mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-23 19:23:23 +01:00
0773b05cfa
This changes the definition of t2DoLoopStart from t2DoLoopStart rGPR to GPRlr = t2DoLoopStart rGPR This will hopefully mean that low overhead loops are more tied together, and we can more reliably generate loops without reverting or being at the whims of the register allocator. This is a fairly simple change in itself, but leads to a number of other required alterations. - The hardware loop pass, if UsePhi is set, now generates loops of the form: %start = llvm.start.loop.iterations(%N) loop: %p = phi [%start], [%dec] %dec = llvm.loop.decrement.reg(%p, 1) %c = icmp ne %dec, 0 br %c, loop, exit - For this a new llvm.start.loop.iterations intrinsic was added, identical to llvm.set.loop.iterations but produces a value as seen above, gluing the loop together more through def-use chains. - This new instrinsic conceptually produces the same output as input, which is taught to SCEV so that the checks in MVETailPredication are not affected. - Some minor changes are needed to the ARMLowOverheadLoop pass, but it has been left mostly as before. We should now more reliably be able to tell that the t2DoLoopStart is correct without having to prove it, but t2WhileLoopStart and tail-predicated loops will remain the same. - And all the tests have been updated. There are a lot of them! This patch on it's own might cause more trouble that it helps, with more tail-predicated loops being reverted, but some additional patches can hopefully improve upon that to get to something that is better overall. Differential Revision: https://reviews.llvm.org/D89881
376 lines
20 KiB
YAML
376 lines
20 KiB
YAML
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
|
|
# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp -run-pass=arm-low-overhead-loops %s -o - | FileCheck %s
|
|
|
|
--- |
|
|
define dso_local arm_aapcs_vfpcc float @insert_after_vdup_1(float* nocapture readonly %a, float* nocapture readonly %b, float %init, i32 %N) {
|
|
entry:
|
|
%cmp8.not = icmp eq i32 %N, 0
|
|
%0 = add i32 %N, 3
|
|
%1 = lshr i32 %0, 2
|
|
%2 = shl nuw i32 %1, 2
|
|
%3 = add i32 %2, -4
|
|
%4 = lshr i32 %3, 2
|
|
%5 = add nuw nsw i32 %4, 1
|
|
br i1 %cmp8.not, label %for.cond.cleanup, label %vector.ph
|
|
|
|
vector.ph: ; preds = %entry
|
|
%6 = insertelement <4 x float> <float undef, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, float %init, i32 0
|
|
%start = call i32 @llvm.start.loop.iterations.i32(i32 %5)
|
|
br label %vector.body
|
|
|
|
vector.body: ; preds = %vector.body, %vector.ph
|
|
%lsr.iv13 = phi float* [ %scevgep14, %vector.body ], [ %b, %vector.ph ]
|
|
%lsr.iv = phi float* [ %scevgep, %vector.body ], [ %a, %vector.ph ]
|
|
%vec.phi = phi <4 x float> [ %6, %vector.ph ], [ %13, %vector.body ]
|
|
%7 = phi i32 [ %start, %vector.ph ], [ %14, %vector.body ]
|
|
%8 = phi i32 [ %N, %vector.ph ], [ %10, %vector.body ]
|
|
%lsr.iv12 = bitcast float* %lsr.iv to <4 x float>*
|
|
%lsr.iv1315 = bitcast float* %lsr.iv13 to <4 x float>*
|
|
%9 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %8)
|
|
%10 = sub i32 %8, 4
|
|
%wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %lsr.iv12, i32 4, <4 x i1> %9, <4 x float> undef)
|
|
%wide.masked.load11 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %lsr.iv1315, i32 4, <4 x i1> %9, <4 x float> undef)
|
|
%11 = fmul fast <4 x float> %wide.masked.load11, %wide.masked.load
|
|
%12 = fadd fast <4 x float> %11, %vec.phi
|
|
%13 = select <4 x i1> %9, <4 x float> %12, <4 x float> %vec.phi
|
|
%scevgep = getelementptr float, float* %lsr.iv, i32 4
|
|
%scevgep14 = getelementptr float, float* %lsr.iv13, i32 4
|
|
%14 = call i32 @llvm.loop.decrement.reg.i32(i32 %7, i32 1)
|
|
%15 = icmp ne i32 %14, 0
|
|
br i1 %15, label %vector.body, label %middle.block
|
|
|
|
middle.block: ; preds = %vector.body
|
|
%16 = call fast float @llvm.vector.reduce.fadd.f32.v4f32(float 0.000000e+00, <4 x float> %13)
|
|
br label %for.cond.cleanup
|
|
|
|
for.cond.cleanup: ; preds = %middle.block, %entry
|
|
%res.0.lcssa = phi float [ %init, %entry ], [ %16, %middle.block ]
|
|
ret float %res.0.lcssa
|
|
}
|
|
|
|
; Function Attrs: norecurse nounwind readonly
|
|
define dso_local arm_aapcs_vfpcc float @insert_after_vdup_2(float* nocapture readonly %a, float* nocapture readonly %b, float %init, i32 %N) local_unnamed_addr #0 {
|
|
entry:
|
|
%shr = lshr i32 %N, 2
|
|
%cmp9.not = icmp eq i32 %shr, 0
|
|
%0 = add nuw nsw i32 %shr, 3
|
|
%1 = lshr i32 %0, 2
|
|
%2 = shl nuw nsw i32 %1, 2
|
|
%3 = add nsw i32 %2, -4
|
|
%4 = lshr i32 %3, 2
|
|
%5 = add nuw nsw i32 %4, 1
|
|
br i1 %cmp9.not, label %for.cond.cleanup, label %vector.ph
|
|
|
|
vector.ph: ; preds = %entry
|
|
%6 = insertelement <4 x float> <float undef, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, float %init, i32 0
|
|
%start = call i32 @llvm.start.loop.iterations.i32(i32 %5)
|
|
br label %vector.body
|
|
|
|
vector.body: ; preds = %vector.body, %vector.ph
|
|
%lsr.iv14 = phi float* [ %scevgep15, %vector.body ], [ %b, %vector.ph ]
|
|
%lsr.iv = phi float* [ %scevgep, %vector.body ], [ %a, %vector.ph ]
|
|
%vec.phi = phi <4 x float> [ %6, %vector.ph ], [ %13, %vector.body ]
|
|
%7 = phi i32 [ %start, %vector.ph ], [ %14, %vector.body ]
|
|
%8 = phi i32 [ %shr, %vector.ph ], [ %10, %vector.body ]
|
|
%lsr.iv13 = bitcast float* %lsr.iv to <4 x float>*
|
|
%lsr.iv1416 = bitcast float* %lsr.iv14 to <4 x float>*
|
|
%9 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %8)
|
|
%10 = sub i32 %8, 4
|
|
%wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %lsr.iv13, i32 4, <4 x i1> %9, <4 x float> undef)
|
|
%wide.masked.load12 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %lsr.iv1416, i32 4, <4 x i1> %9, <4 x float> undef)
|
|
%11 = fmul fast <4 x float> %wide.masked.load12, %wide.masked.load
|
|
%12 = fadd fast <4 x float> %11, %vec.phi
|
|
%13 = select <4 x i1> %9, <4 x float> %12, <4 x float> %vec.phi
|
|
%scevgep = getelementptr float, float* %lsr.iv, i32 4
|
|
%scevgep15 = getelementptr float, float* %lsr.iv14, i32 4
|
|
%14 = call i32 @llvm.loop.decrement.reg.i32(i32 %7, i32 1)
|
|
%15 = icmp ne i32 %14, 0
|
|
br i1 %15, label %vector.body, label %middle.block
|
|
|
|
middle.block: ; preds = %vector.body
|
|
%16 = call fast float @llvm.vector.reduce.fadd.f32.v4f32(float 0.000000e+00, <4 x float> %13)
|
|
br label %for.cond.cleanup
|
|
|
|
for.cond.cleanup: ; preds = %middle.block, %entry
|
|
%res.0.lcssa = phi float [ %init, %entry ], [ %16, %middle.block ]
|
|
ret float %res.0.lcssa
|
|
}
|
|
|
|
declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
|
|
declare <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>*, i32 immarg, <4 x i1>, <4 x float>)
|
|
declare float @llvm.vector.reduce.fadd.f32.v4f32(float, <4 x float>)
|
|
declare i32 @llvm.start.loop.iterations.i32(i32)
|
|
declare i32 @llvm.loop.decrement.reg.i32(i32, i32)
|
|
declare <4 x i1> @llvm.arm.mve.vctp32(i32)
|
|
|
|
...
|
|
---
|
|
name: insert_after_vdup_1
|
|
alignment: 4
|
|
tracksRegLiveness: true
|
|
registers: []
|
|
liveins:
|
|
- { reg: '$r0', virtual-reg: '' }
|
|
- { reg: '$r1', virtual-reg: '' }
|
|
- { reg: '$s0', virtual-reg: '' }
|
|
- { reg: '$r2', virtual-reg: '' }
|
|
frameInfo:
|
|
stackSize: 8
|
|
offsetAdjustment: 0
|
|
maxAlignment: 4
|
|
localFrameSize: 0
|
|
savePoint: ''
|
|
restorePoint: ''
|
|
fixedStack: []
|
|
stack:
|
|
- { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4,
|
|
stack-id: default, callee-saved-register: '$lr', callee-saved-restored: true,
|
|
debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
|
|
- { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4,
|
|
stack-id: default, callee-saved-register: '$r7', callee-saved-restored: true,
|
|
debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
|
|
callSites: []
|
|
constants:
|
|
- id: 0
|
|
value: 'float 0.000000e+00'
|
|
alignment: 4
|
|
isTargetSpecific: false
|
|
machineFunctionInfo: {}
|
|
body: |
|
|
; CHECK-LABEL: name: insert_after_vdup_1
|
|
; CHECK: bb.0.entry:
|
|
; CHECK: successors: %bb.1(0x80000000)
|
|
; CHECK: liveins: $lr, $r0, $r1, $r2, $r7, $s0
|
|
; CHECK: tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
|
|
; CHECK: t2IT 0, 8, implicit-def $itstate
|
|
; CHECK: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $s0, implicit killed $itstate
|
|
; CHECK: bb.1.vector.ph:
|
|
; CHECK: successors: %bb.2(0x80000000)
|
|
; CHECK: liveins: $lr, $r0, $r1, $r2, $s0
|
|
; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
|
|
; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
|
|
; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
|
|
; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
|
|
; CHECK: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
|
|
; CHECK: frame-setup CFI_INSTRUCTION def_cfa_register $r7
|
|
; CHECK: renamable $r3, dead $cpsr = tADDi3 renamable $r2, 3, 14 /* CC::al */, $noreg
|
|
; CHECK: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
|
|
; CHECK: renamable $r12 = t2SUBri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg
|
|
; CHECK: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
|
|
; CHECK: renamable $lr = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
|
|
; CHECK: renamable $r3 = tLDRpci %const.0, 14 /* CC::al */, $noreg :: (load 4 from constant-pool)
|
|
; CHECK: $lr = t2DLS killed renamable $lr
|
|
; CHECK: renamable $q1 = MVE_VDUP32 killed renamable $r3, 0, $noreg, undef renamable $q1
|
|
; CHECK: $s4 = VMOVS killed $s0, 14 /* CC::al */, $noreg, implicit killed $q1, implicit-def $q1
|
|
; CHECK: bb.2.vector.body:
|
|
; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
|
|
; CHECK: liveins: $lr, $q1, $r0, $r1, $r2
|
|
; CHECK: renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg
|
|
; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
|
|
; CHECK: MVE_VPST 2, implicit $vpr
|
|
; CHECK: renamable $r0, renamable $q0 = MVE_VLDRWU32_post killed renamable $r0, 16, 1, renamable $vpr :: (load 16 from %ir.lsr.iv12, align 4)
|
|
; CHECK: renamable $r1, renamable $q2 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr :: (load 16 from %ir.lsr.iv1315, align 4)
|
|
; CHECK: renamable $q1 = MVE_VFMAf32 killed renamable $q1, killed renamable $q2, killed renamable $q0, 1, killed renamable $vpr
|
|
; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.2
|
|
; CHECK: bb.3.middle.block:
|
|
; CHECK: liveins: $q1
|
|
; CHECK: renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS renamable $s6, renamable $s7, 14 /* CC::al */, $noreg
|
|
; CHECK: renamable $s2 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s4, killed renamable $s5, 14 /* CC::al */, $noreg, implicit killed $q1
|
|
; CHECK: renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s2, killed renamable $s0, 14 /* CC::al */, $noreg
|
|
; CHECK: $sp = frame-destroy t2LDMIA_UPD $sp, 14 /* CC::al */, $noreg, def $r7, def $lr
|
|
; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit killed $s0
|
|
; CHECK: bb.4 (align 4):
|
|
; CHECK: CONSTPOOL_ENTRY 0, %const.0, 4
|
|
bb.0.entry:
|
|
successors: %bb.1(0x80000000)
|
|
liveins: $r0, $r1, $r2, $s0, $lr
|
|
|
|
tCMPi8 renamable $r2, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
|
|
t2IT 0, 8, implicit-def $itstate
|
|
tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $s0, implicit killed $itstate
|
|
|
|
bb.1.vector.ph:
|
|
successors: %bb.2(0x80000000)
|
|
liveins: $r0, $r1, $r2, $s0, $lr
|
|
|
|
frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
|
|
frame-setup CFI_INSTRUCTION def_cfa_offset 8
|
|
frame-setup CFI_INSTRUCTION offset $lr, -4
|
|
frame-setup CFI_INSTRUCTION offset $r7, -8
|
|
$r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
|
|
frame-setup CFI_INSTRUCTION def_cfa_register $r7
|
|
renamable $r3, dead $cpsr = tADDi3 renamable $r2, 3, 14 /* CC::al */, $noreg
|
|
renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
|
|
renamable $r12 = t2SUBri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg
|
|
renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
|
|
renamable $lr = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
|
|
renamable $r3 = tLDRpci %const.0, 14 /* CC::al */, $noreg :: (load 4 from constant-pool)
|
|
$lr = t2DoLoopStart renamable $lr
|
|
renamable $q1 = MVE_VDUP32 killed renamable $r3, 0, $noreg, undef renamable $q1
|
|
$s4 = VMOVS killed $s0, 14 /* CC::al */, $noreg, implicit killed $q1, implicit-def $q1
|
|
|
|
bb.2.vector.body:
|
|
successors: %bb.2(0x7c000000), %bb.3(0x04000000)
|
|
liveins: $lr, $q1, $r0, $r1, $r2
|
|
|
|
renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg
|
|
renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
|
|
renamable $lr = t2LoopDec killed renamable $lr, 1
|
|
MVE_VPST 2, implicit $vpr
|
|
renamable $r0, renamable $q0 = MVE_VLDRWU32_post killed renamable $r0, 16, 1, renamable $vpr :: (load 16 from %ir.lsr.iv12, align 4)
|
|
renamable $r1, renamable $q2 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr :: (load 16 from %ir.lsr.iv1315, align 4)
|
|
renamable $q1 = MVE_VFMAf32 killed renamable $q1, killed renamable $q2, killed renamable $q0, 1, killed renamable $vpr
|
|
t2LoopEnd renamable $lr, %bb.2, implicit-def dead $cpsr
|
|
tB %bb.3, 14 /* CC::al */, $noreg
|
|
|
|
bb.3.middle.block:
|
|
liveins: $q1
|
|
|
|
renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS renamable $s6, renamable $s7, 14 /* CC::al */, $noreg
|
|
renamable $s2 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s4, killed renamable $s5, 14 /* CC::al */, $noreg, implicit $q1
|
|
renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s2, killed renamable $s0, 14 /* CC::al */, $noreg
|
|
$sp = frame-destroy t2LDMIA_UPD $sp, 14 /* CC::al */, $noreg, def $r7, def $lr
|
|
tBX_RET 14 /* CC::al */, $noreg, implicit killed $s0
|
|
|
|
bb.4 (align 4):
|
|
CONSTPOOL_ENTRY 0, %const.0, 4
|
|
|
|
...
|
|
---
|
|
name: insert_after_vdup_2
|
|
alignment: 4
|
|
tracksRegLiveness: true
|
|
registers: []
|
|
liveins:
|
|
- { reg: '$r0', virtual-reg: '' }
|
|
- { reg: '$r1', virtual-reg: '' }
|
|
- { reg: '$s0', virtual-reg: '' }
|
|
- { reg: '$r2', virtual-reg: '' }
|
|
frameInfo:
|
|
stackSize: 8
|
|
offsetAdjustment: 0
|
|
maxAlignment: 4
|
|
savePoint: ''
|
|
restorePoint: ''
|
|
fixedStack: []
|
|
stack:
|
|
- { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4,
|
|
stack-id: default, callee-saved-register: '$lr', callee-saved-restored: true,
|
|
debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
|
|
- { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4,
|
|
stack-id: default, callee-saved-register: '$r7', callee-saved-restored: true,
|
|
debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
|
|
callSites: []
|
|
constants:
|
|
- id: 0
|
|
value: 'float 0.000000e+00'
|
|
alignment: 4
|
|
isTargetSpecific: false
|
|
machineFunctionInfo: {}
|
|
body: |
|
|
; CHECK-LABEL: name: insert_after_vdup_2
|
|
; CHECK: bb.0.entry:
|
|
; CHECK: successors: %bb.1(0x80000000)
|
|
; CHECK: liveins: $lr, $r0, $r1, $r2, $r7, $s0
|
|
; CHECK: renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
|
|
; CHECK: t2CMPrs killed renamable $r3, renamable $r2, 19, 14 /* CC::al */, $noreg, implicit-def $cpsr
|
|
; CHECK: t2IT 0, 8, implicit-def $itstate
|
|
; CHECK: tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $s0, implicit killed $itstate
|
|
; CHECK: bb.1.vector.ph:
|
|
; CHECK: successors: %bb.2(0x80000000)
|
|
; CHECK: liveins: $lr, $r0, $r1, $r2, $s0
|
|
; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
|
|
; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
|
|
; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
|
|
; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
|
|
; CHECK: dead $r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
|
|
; CHECK: frame-setup CFI_INSTRUCTION def_cfa_register $r7
|
|
; CHECK: renamable $r3, dead $cpsr = tMOVi8 3, 14 /* CC::al */, $noreg
|
|
; CHECK: renamable $r3 = nuw nsw t2ADDrs killed renamable $r3, renamable $r2, 19, 14 /* CC::al */, $noreg, $noreg
|
|
; CHECK: renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
|
|
; CHECK: renamable $r12 = t2SUBri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg
|
|
; CHECK: renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
|
|
; CHECK: renamable $lr = t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
|
|
; CHECK: renamable $r3 = tLDRpci %const.0, 14 /* CC::al */, $noreg :: (load 4 from constant-pool)
|
|
; CHECK: $lr = t2DLS killed renamable $lr
|
|
; CHECK: renamable $q1 = MVE_VDUP32 killed renamable $r3, 0, $noreg, undef renamable $q1
|
|
; CHECK: renamable $r2, dead $cpsr = tLSRri killed renamable $r2, 2, 14 /* CC::al */, $noreg
|
|
; CHECK: $s4 = VMOVS killed $s0, 14 /* CC::al */, $noreg, implicit killed $q1, implicit-def $q1
|
|
; CHECK: bb.2.vector.body:
|
|
; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
|
|
; CHECK: liveins: $lr, $q1, $r0, $r1, $r2
|
|
; CHECK: renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg
|
|
; CHECK: renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
|
|
; CHECK: MVE_VPST 2, implicit $vpr
|
|
; CHECK: renamable $r0, renamable $q0 = MVE_VLDRWU32_post killed renamable $r0, 16, 1, renamable $vpr :: (load 16 from %ir.lsr.iv13, align 4)
|
|
; CHECK: renamable $r1, renamable $q2 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr :: (load 16 from %ir.lsr.iv1416, align 4)
|
|
; CHECK: renamable $q1 = MVE_VFMAf32 killed renamable $q1, killed renamable $q2, killed renamable $q0, 1, killed renamable $vpr
|
|
; CHECK: $lr = t2LEUpdate killed renamable $lr, %bb.2
|
|
; CHECK: bb.3.middle.block:
|
|
; CHECK: liveins: $q1
|
|
; CHECK: renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS renamable $s6, renamable $s7, 14 /* CC::al */, $noreg
|
|
; CHECK: renamable $s2 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s4, killed renamable $s5, 14 /* CC::al */, $noreg, implicit killed $q1
|
|
; CHECK: renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s2, killed renamable $s0, 14 /* CC::al */, $noreg
|
|
; CHECK: $sp = frame-destroy t2LDMIA_UPD $sp, 14 /* CC::al */, $noreg, def $r7, def $lr
|
|
; CHECK: tBX_RET 14 /* CC::al */, $noreg, implicit killed $s0
|
|
; CHECK: bb.4 (align 4):
|
|
; CHECK: CONSTPOOL_ENTRY 0, %const.0, 4
|
|
bb.0.entry:
|
|
successors: %bb.1(0x80000000)
|
|
liveins: $r0, $r1, $r2, $s0, $lr
|
|
|
|
renamable $r3, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
|
|
t2CMPrs killed renamable $r3, renamable $r2, 19, 14 /* CC::al */, $noreg, implicit-def $cpsr
|
|
t2IT 0, 8, implicit-def $itstate
|
|
tBX_RET 0 /* CC::eq */, killed $cpsr, implicit $s0, implicit killed $itstate
|
|
|
|
bb.1.vector.ph:
|
|
successors: %bb.2(0x80000000)
|
|
liveins: $r0, $r1, $r2, $s0, $lr
|
|
|
|
frame-setup tPUSH 14 /* CC::al */, $noreg, killed $lr, implicit-def $sp, implicit $sp
|
|
frame-setup CFI_INSTRUCTION def_cfa_offset 8
|
|
frame-setup CFI_INSTRUCTION offset $lr, -4
|
|
frame-setup CFI_INSTRUCTION offset $r7, -8
|
|
$r7 = frame-setup tMOVr $sp, 14 /* CC::al */, $noreg
|
|
frame-setup CFI_INSTRUCTION def_cfa_register $r7
|
|
renamable $r3, dead $cpsr = tMOVi8 3, 14 /* CC::al */, $noreg
|
|
renamable $r3 = nuw nsw t2ADDrs killed renamable $r3, renamable $r2, 19, 14 /* CC::al */, $noreg, $noreg
|
|
renamable $r3 = t2BICri killed renamable $r3, 3, 14 /* CC::al */, $noreg, $noreg
|
|
renamable $r12 = t2SUBri killed renamable $r3, 4, 14 /* CC::al */, $noreg, $noreg
|
|
renamable $r3, dead $cpsr = tMOVi8 1, 14 /* CC::al */, $noreg
|
|
renamable $lr = t2ADDrs killed renamable $r3, killed renamable $r12, 19, 14 /* CC::al */, $noreg, $noreg
|
|
renamable $r3 = tLDRpci %const.0, 14 /* CC::al */, $noreg :: (load 4 from constant-pool)
|
|
$lr = t2DoLoopStart renamable $lr
|
|
renamable $q1 = MVE_VDUP32 killed renamable $r3, 0, $noreg, undef renamable $q1
|
|
renamable $r2, dead $cpsr = tLSRri killed renamable $r2, 2, 14 /* CC::al */, $noreg
|
|
$s4 = VMOVS killed $s0, 14 /* CC::al */, $noreg, implicit killed $q1, implicit-def $q1
|
|
|
|
bb.2.vector.body:
|
|
successors: %bb.2(0x7c000000), %bb.3(0x04000000)
|
|
liveins: $lr, $q1, $r0, $r1, $r2
|
|
|
|
renamable $vpr = MVE_VCTP32 renamable $r2, 0, $noreg
|
|
renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 4, 14 /* CC::al */, $noreg
|
|
renamable $lr = t2LoopDec killed renamable $lr, 1
|
|
MVE_VPST 2, implicit $vpr
|
|
renamable $r0, renamable $q0 = MVE_VLDRWU32_post killed renamable $r0, 16, 1, renamable $vpr :: (load 16 from %ir.lsr.iv13, align 4)
|
|
renamable $r1, renamable $q2 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr :: (load 16 from %ir.lsr.iv1416, align 4)
|
|
renamable $q1 = MVE_VFMAf32 killed renamable $q1, killed renamable $q2, killed renamable $q0, 1, killed renamable $vpr
|
|
t2LoopEnd renamable $lr, %bb.2, implicit-def dead $cpsr
|
|
tB %bb.3, 14 /* CC::al */, $noreg
|
|
|
|
bb.3.middle.block:
|
|
liveins: $q1
|
|
|
|
renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS renamable $s6, renamable $s7, 14 /* CC::al */, $noreg
|
|
renamable $s2 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s4, killed renamable $s5, 14 /* CC::al */, $noreg, implicit $q1
|
|
renamable $s0 = nnan ninf nsz arcp contract afn reassoc VADDS killed renamable $s2, killed renamable $s0, 14 /* CC::al */, $noreg
|
|
$sp = frame-destroy t2LDMIA_UPD $sp, 14 /* CC::al */, $noreg, def $r7, def $lr
|
|
tBX_RET 14 /* CC::al */, $noreg, implicit killed $s0
|
|
|
|
bb.4 (align 4):
|
|
CONSTPOOL_ENTRY 0, %const.0, 4
|
|
|
|
...
|