1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 19:23:23 +01:00
llvm-mirror/test/CodeGen/AArch64/arm64-jumptable.ll
Michael Zolotukhin 728dc93610 [SimplifyCFG] Avoid quadratic on a predecessors number behavior in instruction sinking.
If a block has N predecessors, then the current algorithm will try to
sink common code to this block N times (whenever we visit a
predecessor). Every attempt to sink the common code includes going
through all predecessors, so the complexity of the algorithm becomes
O(N^2).
With this patch we try to sink common code only when we visit the block
itself. With this, the complexity goes down to O(N).
As a side effect, the moment the code is sunk is slightly different than
before (the order of simplifications has been changed), that's why I had
to adjust two tests (note that neither of the tests is supposed to test
SimplifyCFG):
* test/CodeGen/AArch64/arm64-jumptable.ll - changes in this test mimic
the changes that previous implementation of SimplifyCFG would do.
* test/CodeGen/ARM/avoid-cpsr-rmw.ll - in this test I disabled common
code sinking by a command line flag.

llvm-svn: 321236
2017-12-21 01:22:13 +00:00

35 lines
882 B
LLVM

; RUN: llc -mtriple=arm64-apple-ios < %s | FileCheck %s
; RUN: llc -mtriple=arm64-linux-gnu < %s | FileCheck %s --check-prefix=CHECK-LINUX
; <rdar://11417675>
define void @sum(i32 %a, i32* %to, i32 %c) {
entry:
switch i32 %a, label %exit [
i32 1, label %bb1
i32 2, label %exit.sink.split
i32 3, label %bb3
i32 4, label %bb4
]
bb1:
%b = add i32 %c, 1
br label %exit.sink.split
bb3:
br label %exit.sink.split
bb4:
br label %exit.sink.split
exit.sink.split:
%.sink = phi i32 [ 5, %bb4 ], [ %b, %bb1 ], [ 3, %bb3 ], [ %a, %entry ]
store i32 %.sink, i32* %to
br label %exit
exit:
ret void
}
; CHECK-LABEL: sum:
; CHECK: adrp {{x[0-9]+}}, LJTI0_0@PAGE
; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, LJTI0_0@PAGEOFF
; CHECK-LINUX-LABEL: sum:
; CHECK-LINUX: adrp {{x[0-9]+}}, .LJTI0_0
; CHECK-LINUX: add {{x[0-9]+}}, {{x[0-9]+}}, :lo12:.LJTI0_0