1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-24 03:33:20 +01:00
llvm-mirror/test/CodeGen/Thumb2/thumb2-ifcvt2.ll
Kristof Beyls 7d64810efd [ARM] Make -mcpu=generic schedule for an in-order core (Cortex-A8).
The benchmarking summarized in
http://lists.llvm.org/pipermail/llvm-dev/2017-May/113525.html showed
this is beneficial for a wide range of cores.

As is to be expected, quite a few small adaptations are needed to the
regressions tests, as the difference in scheduling results in:
- Quite a few small instruction schedule differences.
- A few changes in register allocation decisions caused by different
 instruction schedules.
- A few changes in IfConversion decisions, due to a difference in
 instruction schedule and/or the estimated cost of a branch mispredict.

llvm-svn: 306514
2017-06-28 07:07:03 +00:00

94 lines
2.8 KiB
LLVM

; RUN: llc < %s -mtriple=thumbv7-apple-ios -arm-atomic-cfg-tidy=0 | FileCheck %s
; RUN: llc < %s -mtriple=thumbv7-apple-ios -arm-atomic-cfg-tidy=0 -arm-default-it | FileCheck %s
; RUN: llc < %s -mtriple=thumbv8-apple-ios -arm-atomic-cfg-tidy=0 -arm-no-restrict-it | FileCheck %s
define void @foo(i32 %X, i32 %Y) {
entry:
; CHECK-LABEL: foo:
; CHECK: it ne
; CHECK: cmpne
; CHECK: it hi
; CHECK: bxhi lr
%tmp1 = icmp ult i32 %X, 4 ; <i1> [#uses=1]
%tmp4 = icmp eq i32 %Y, 0 ; <i1> [#uses=1]
%tmp7 = or i1 %tmp4, %tmp1 ; <i1> [#uses=1]
br i1 %tmp7, label %cond_true, label %UnifiedReturnBlock
cond_true: ; preds = %entry
%tmp10 = call i32 (...) @bar( ) ; <i32> [#uses=0]
ret void
UnifiedReturnBlock: ; preds = %entry
ret void
}
declare i32 @bar(...)
; FIXME: Need post-ifcvt branch folding to get rid of the extra br at end of BB1.
%struct.quad_struct = type { i32, i32, %struct.quad_struct*, %struct.quad_struct*, %struct.quad_struct*, %struct.quad_struct*, %struct.quad_struct* }
define fastcc i32 @CountTree(%struct.quad_struct* %tree) {
entry:
; CHECK-LABEL: CountTree:
; CHECK: bne
; CHECK: cmp
; CHECK: it eq
; CHECK: cmpeq
br label %tailrecurse
tailrecurse: ; preds = %bb, %entry
%tmp6 = load %struct.quad_struct*, %struct.quad_struct** null ; <%struct.quad_struct*> [#uses=1]
%tmp9 = load %struct.quad_struct*, %struct.quad_struct** null ; <%struct.quad_struct*> [#uses=2]
%tmp12 = load %struct.quad_struct*, %struct.quad_struct** null ; <%struct.quad_struct*> [#uses=1]
%tmp14 = icmp eq %struct.quad_struct* null, null ; <i1> [#uses=1]
%tmp17 = icmp eq %struct.quad_struct* %tmp6, null ; <i1> [#uses=1]
%tmp23 = icmp eq %struct.quad_struct* %tmp9, null ; <i1> [#uses=1]
%tmp29 = icmp eq %struct.quad_struct* %tmp12, null ; <i1> [#uses=1]
%bothcond = and i1 %tmp17, %tmp14 ; <i1> [#uses=1]
%bothcond1 = and i1 %bothcond, %tmp23 ; <i1> [#uses=1]
%bothcond2 = and i1 %bothcond1, %tmp29 ; <i1> [#uses=1]
br i1 %bothcond2, label %return, label %bb
bb: ; preds = %tailrecurse
%tmp41 = tail call fastcc i32 @CountTree( %struct.quad_struct* %tmp9 ) ; <i32> [#uses=0]
br label %tailrecurse
return: ; preds = %tailrecurse
ret i32 0
}
%struct.SString = type { i8*, i32, i32 }
declare void @abort()
define fastcc void @t1(%struct.SString* %word, i8 signext %c) {
entry:
; CHECK-LABEL: t1:
; CHECK: it ne
; CHECK: bxne lr
%tmp1 = icmp eq %struct.SString* %word, null ; <i1> [#uses=1]
br i1 %tmp1, label %cond_true, label %cond_false
cond_true: ; preds = %entry
tail call void @abort( )
unreachable
cond_false: ; preds = %entry
ret void
}
define fastcc void @t2() nounwind {
entry:
; CHECK-LABEL: t2:
; CHECK: cmp r0, #0
; CHECK: %growMapping.exit
br i1 undef, label %bb.i.i3, label %growMapping.exit
bb.i.i3: ; preds = %entry
unreachable
growMapping.exit: ; preds = %entry
unreachable
}