1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 19:23:23 +01:00
llvm-mirror/test/CodeGen/Thumb2/LowOverheadLoops/multiple-do-loops.mir
David Green 0773b05cfa [ARM] Alter t2DoLoopStart to define lr
This changes the definition of t2DoLoopStart from
t2DoLoopStart rGPR
to
GPRlr = t2DoLoopStart rGPR

This will hopefully mean that low overhead loops are more tied together,
and we can more reliably generate loops without reverting or being at
the whims of the register allocator.

This is a fairly simple change in itself, but leads to a number of other
required alterations.

 - The hardware loop pass, if UsePhi is set, now generates loops of the
   form:
       %start = llvm.start.loop.iterations(%N)
     loop:
       %p = phi [%start], [%dec]
       %dec = llvm.loop.decrement.reg(%p, 1)
       %c = icmp ne %dec, 0
       br %c, loop, exit
 - For this a new llvm.start.loop.iterations intrinsic was added, identical
   to llvm.set.loop.iterations but produces a value as seen above, gluing
   the loop together more through def-use chains.
 - This new instrinsic conceptually produces the same output as input,
   which is taught to SCEV so that the checks in MVETailPredication are not
   affected.
 - Some minor changes are needed to the ARMLowOverheadLoop pass, but it has
   been left mostly as before. We should now more reliably be able to tell
   that the t2DoLoopStart is correct without having to prove it, but
   t2WhileLoopStart and tail-predicated loops will remain the same.
 - And all the tests have been updated. There are a lot of them!

This patch on it's own might cause more trouble that it helps, with more
tail-predicated loops being reverted, but some additional patches can
hopefully improve upon that to get to something that is better overall.

Differential Revision: https://reviews.llvm.org/D89881
2020-11-10 15:57:58 +00:00

981 lines
52 KiB
YAML

# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops %s -o - --verify-machineinstrs | FileCheck %s
--- |
define dso_local arm_aapcs_vfpcc void @test1(i32* noalias nocapture %a, i32* nocapture readonly %b, i32* nocapture readonly %c, i32 %N) local_unnamed_addr {
entry:
%cmp30 = icmp eq i32 %N, 0
%0 = add i32 %N, 3
%1 = lshr i32 %0, 2
%2 = shl nuw i32 %1, 2
%3 = add i32 %2, -4
%4 = lshr i32 %3, 2
%5 = add nuw nsw i32 %4, 1
br i1 %cmp30, label %for.cond.cleanup6, label %vector.ph
vector.ph: ; preds = %entry
%start1 = call i32 @llvm.start.loop.iterations.i32(i32 %5)
br label %vector.body
vector.body: ; preds = %vector.body, %vector.ph
%lsr.iv68 = phi i32* [ %scevgep69, %vector.body ], [ %a, %vector.ph ]
%lsr.iv65 = phi i32* [ %scevgep66, %vector.body ], [ %c, %vector.ph ]
%lsr.iv62 = phi i32* [ %scevgep63, %vector.body ], [ %b, %vector.ph ]
%6 = phi i32 [ %start1, %vector.ph ], [ %11, %vector.body ]
%7 = phi i32 [ %N, %vector.ph ], [ %9, %vector.body ]
%lsr.iv6870 = bitcast i32* %lsr.iv68 to <4 x i32>*
%lsr.iv6567 = bitcast i32* %lsr.iv65 to <4 x i32>*
%lsr.iv6264 = bitcast i32* %lsr.iv62 to <4 x i32>*
%8 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %7)
%9 = sub i32 %7, 4
%wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv6264, i32 4, <4 x i1> %8, <4 x i32> undef)
%wide.masked.load35 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv6567, i32 4, <4 x i1> %8, <4 x i32> undef)
%10 = mul nsw <4 x i32> %wide.masked.load35, %wide.masked.load
call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %10, <4 x i32>* %lsr.iv6870, i32 4, <4 x i1> %8)
%scevgep63 = getelementptr i32, i32* %lsr.iv62, i32 4
%scevgep66 = getelementptr i32, i32* %lsr.iv65, i32 4
%scevgep69 = getelementptr i32, i32* %lsr.iv68, i32 4
%11 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %6, i32 1)
%12 = icmp ne i32 %11, 0
br i1 %12, label %vector.body, label %for.cond4.preheader
for.cond4.preheader: ; preds = %vector.body
%13 = icmp eq i32 %N, 0
%14 = add i32 %N, 3
%15 = lshr i32 %14, 2
%16 = shl nuw i32 %15, 2
%17 = add i32 %16, -4
%18 = lshr i32 %17, 2
%19 = add nuw nsw i32 %18, 1
br i1 %13, label %for.cond.cleanup6, label %vector.ph39
vector.ph39: ; preds = %for.cond4.preheader
%start2 = call i32 @llvm.start.loop.iterations.i32(i32 %19)
br label %vector.body38
vector.body38: ; preds = %vector.body38, %vector.ph39
%lsr.iv59 = phi i32* [ %scevgep60, %vector.body38 ], [ %a, %vector.ph39 ]
%lsr.iv56 = phi i32* [ %scevgep57, %vector.body38 ], [ %c, %vector.ph39 ]
%lsr.iv = phi i32* [ %scevgep, %vector.body38 ], [ %b, %vector.ph39 ]
%20 = phi i32 [ %start2, %vector.ph39 ], [ %26, %vector.body38 ]
%21 = phi i32 [ %N, %vector.ph39 ], [ %23, %vector.body38 ]
%lsr.iv5961 = bitcast i32* %lsr.iv59 to <4 x i32>*
%lsr.iv5658 = bitcast i32* %lsr.iv56 to <4 x i32>*
%lsr.iv55 = bitcast i32* %lsr.iv to <4 x i32>*
%22 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %21)
%23 = sub i32 %21, 4
%wide.masked.load52 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv55, i32 4, <4 x i1> %22, <4 x i32> undef)
%wide.masked.load53 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv5658, i32 4, <4 x i1> %22, <4 x i32> undef)
%24 = xor <4 x i32> %wide.masked.load53, %wide.masked.load52
%wide.masked.load54 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv5961, i32 4, <4 x i1> %22, <4 x i32> undef)
%25 = add nsw <4 x i32> %wide.masked.load54, %24
call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %25, <4 x i32>* %lsr.iv5961, i32 4, <4 x i1> %22)
%scevgep = getelementptr i32, i32* %lsr.iv, i32 4
%scevgep57 = getelementptr i32, i32* %lsr.iv56, i32 4
%scevgep60 = getelementptr i32, i32* %lsr.iv59, i32 4
%26 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %20, i32 1)
%27 = icmp ne i32 %26, 0
br i1 %27, label %vector.body38, label %for.cond.cleanup6
for.cond.cleanup6: ; preds = %vector.body38, %entry, %for.cond4.preheader
ret void
}
; Function Attrs: nofree norecurse nounwind
define dso_local arm_aapcs_vfpcc void @test2(i32* noalias nocapture %a, i32* nocapture readonly %b, i32* nocapture readonly %c, i32 %N) local_unnamed_addr {
entry:
%div = lshr i32 %N, 1
%cmp30 = icmp eq i32 %div, 0
%0 = add nuw i32 %div, 3
%1 = lshr i32 %0, 2
%2 = shl nuw i32 %1, 2
%3 = add i32 %2, -4
%4 = lshr i32 %3, 2
%5 = add nuw nsw i32 %4, 1
br i1 %cmp30, label %for.cond4.preheader, label %vector.ph
vector.ph: ; preds = %entry
%start1 = call i32 @llvm.start.loop.iterations.i32(i32 %5)
br label %vector.body
vector.body: ; preds = %vector.body, %vector.ph
%lsr.iv68 = phi i32* [ %scevgep69, %vector.body ], [ %a, %vector.ph ]
%lsr.iv65 = phi i32* [ %scevgep66, %vector.body ], [ %c, %vector.ph ]
%lsr.iv62 = phi i32* [ %scevgep63, %vector.body ], [ %b, %vector.ph ]
%6 = phi i32 [ %start1, %vector.ph ], [ %11, %vector.body ]
%7 = phi i32 [ %div, %vector.ph ], [ %9, %vector.body ]
%lsr.iv6870 = bitcast i32* %lsr.iv68 to <4 x i32>*
%lsr.iv6567 = bitcast i32* %lsr.iv65 to <4 x i32>*
%lsr.iv6264 = bitcast i32* %lsr.iv62 to <4 x i32>*
%8 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %7)
%9 = sub i32 %7, 4
%wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv6264, i32 4, <4 x i1> %8, <4 x i32> undef)
%wide.masked.load35 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv6567, i32 4, <4 x i1> %8, <4 x i32> undef)
%10 = mul nsw <4 x i32> %wide.masked.load35, %wide.masked.load
call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %10, <4 x i32>* %lsr.iv6870, i32 4, <4 x i1> %8)
%scevgep63 = getelementptr i32, i32* %lsr.iv62, i32 4
%scevgep66 = getelementptr i32, i32* %lsr.iv65, i32 4
%scevgep69 = getelementptr i32, i32* %lsr.iv68, i32 4
%11 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %6, i32 1)
%12 = icmp ne i32 %11, 0
br i1 %12, label %vector.body, label %for.cond4.preheader
for.cond4.preheader: ; preds = %vector.body, %entry
%cmp528 = icmp eq i32 %N, 0
%13 = add i32 %N, 3
%14 = lshr i32 %13, 2
%15 = shl nuw i32 %14, 2
%16 = add i32 %15, -4
%17 = lshr i32 %16, 2
%18 = add nuw nsw i32 %17, 1
br i1 %cmp528, label %for.cond.cleanup6, label %vector.ph39
vector.ph39: ; preds = %for.cond4.preheader
%start2 = call i32 @llvm.start.loop.iterations.i32(i32 %18)
br label %vector.body38
vector.body38: ; preds = %vector.body38, %vector.ph39
%lsr.iv59 = phi i32* [ %scevgep60, %vector.body38 ], [ %a, %vector.ph39 ]
%lsr.iv56 = phi i32* [ %scevgep57, %vector.body38 ], [ %c, %vector.ph39 ]
%lsr.iv = phi i32* [ %scevgep, %vector.body38 ], [ %b, %vector.ph39 ]
%19 = phi i32 [ %start2, %vector.ph39 ], [ %25, %vector.body38 ]
%20 = phi i32 [ %N, %vector.ph39 ], [ %22, %vector.body38 ]
%lsr.iv5961 = bitcast i32* %lsr.iv59 to <4 x i32>*
%lsr.iv5658 = bitcast i32* %lsr.iv56 to <4 x i32>*
%lsr.iv55 = bitcast i32* %lsr.iv to <4 x i32>*
%21 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %20)
%22 = sub i32 %20, 4
%wide.masked.load52 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv55, i32 4, <4 x i1> %21, <4 x i32> undef)
%wide.masked.load53 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv5658, i32 4, <4 x i1> %21, <4 x i32> undef)
%23 = xor <4 x i32> %wide.masked.load53, %wide.masked.load52
%wide.masked.load54 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv5961, i32 4, <4 x i1> %21, <4 x i32> undef)
%24 = add nsw <4 x i32> %wide.masked.load54, %23
call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %24, <4 x i32>* %lsr.iv5961, i32 4, <4 x i1> %21)
%scevgep = getelementptr i32, i32* %lsr.iv, i32 4
%scevgep57 = getelementptr i32, i32* %lsr.iv56, i32 4
%scevgep60 = getelementptr i32, i32* %lsr.iv59, i32 4
%25 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %19, i32 1)
%26 = icmp ne i32 %25, 0
br i1 %26, label %vector.body38, label %for.cond.cleanup6
for.cond.cleanup6: ; preds = %vector.body38, %for.cond4.preheader
ret void
}
; Function Attrs: nofree norecurse nounwind
define dso_local arm_aapcs_vfpcc void @test3(i32* noalias nocapture %a, i32* nocapture readonly %b, i32* nocapture readonly %c, i32 %N) local_unnamed_addr {
entry:
%cmp54 = icmp eq i32 %N, 0
%0 = add i32 %N, 3
%1 = lshr i32 %0, 2
%2 = shl nuw i32 %1, 2
%3 = add i32 %2, -4
%4 = lshr i32 %3, 2
%5 = add nuw nsw i32 %4, 1
br i1 %cmp54, label %for.cond.cleanup17, label %vector.ph
vector.ph: ; preds = %entry
%start1 = call i32 @llvm.start.loop.iterations.i32(i32 %5)
br label %vector.body
vector.body: ; preds = %vector.body, %vector.ph
%lsr.iv123 = phi i32* [ %scevgep124, %vector.body ], [ %a, %vector.ph ]
%lsr.iv120 = phi i32* [ %scevgep121, %vector.body ], [ %c, %vector.ph ]
%lsr.iv117 = phi i32* [ %scevgep118, %vector.body ], [ %b, %vector.ph ]
%6 = phi i32 [ %start1, %vector.ph ], [ %11, %vector.body ]
%7 = phi i32 [ %N, %vector.ph ], [ %9, %vector.body ]
%lsr.iv123125 = bitcast i32* %lsr.iv123 to <4 x i32>*
%lsr.iv120122 = bitcast i32* %lsr.iv120 to <4 x i32>*
%lsr.iv117119 = bitcast i32* %lsr.iv117 to <4 x i32>*
%8 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %7)
%9 = sub i32 %7, 4
%wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv117119, i32 4, <4 x i1> %8, <4 x i32> undef)
%wide.masked.load62 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv120122, i32 4, <4 x i1> %8, <4 x i32> undef)
%10 = mul nsw <4 x i32> %wide.masked.load62, %wide.masked.load
call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %10, <4 x i32>* %lsr.iv123125, i32 4, <4 x i1> %8)
%scevgep118 = getelementptr i32, i32* %lsr.iv117, i32 4
%scevgep121 = getelementptr i32, i32* %lsr.iv120, i32 4
%scevgep124 = getelementptr i32, i32* %lsr.iv123, i32 4
%11 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %6, i32 1)
%12 = icmp ne i32 %11, 0
br i1 %12, label %vector.body, label %for.cond4.preheader
for.cond4.preheader: ; preds = %vector.body
%div = lshr i32 %N, 1
%cmp552 = icmp eq i32 %div, 0
%13 = add nuw i32 %div, 3
%14 = lshr i32 %13, 2
%15 = shl nuw i32 %14, 2
%16 = add i32 %15, -4
%17 = lshr i32 %16, 2
%18 = add nuw nsw i32 %17, 1
br i1 %cmp552, label %for.cond15.preheader, label %vector.ph66
vector.ph66: ; preds = %for.cond4.preheader
%start2 = call i32 @llvm.start.loop.iterations.i32(i32 %18)
br label %vector.body65
vector.body65: ; preds = %vector.body65, %vector.ph66
%lsr.iv114 = phi i32* [ %scevgep115, %vector.body65 ], [ %a, %vector.ph66 ]
%lsr.iv111 = phi i32* [ %scevgep112, %vector.body65 ], [ %c, %vector.ph66 ]
%lsr.iv108 = phi i32* [ %scevgep109, %vector.body65 ], [ %b, %vector.ph66 ]
%19 = phi i32 [ %start2, %vector.ph66 ], [ %25, %vector.body65 ]
%20 = phi i32 [ %div, %vector.ph66 ], [ %22, %vector.body65 ]
%lsr.iv114116 = bitcast i32* %lsr.iv114 to <4 x i32>*
%lsr.iv111113 = bitcast i32* %lsr.iv111 to <4 x i32>*
%lsr.iv108110 = bitcast i32* %lsr.iv108 to <4 x i32>*
%21 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %20)
%22 = sub i32 %20, 4
%wide.masked.load79 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv108110, i32 4, <4 x i1> %21, <4 x i32> undef)
%wide.masked.load80 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv111113, i32 4, <4 x i1> %21, <4 x i32> undef)
%23 = xor <4 x i32> %wide.masked.load80, %wide.masked.load79
%wide.masked.load81 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv114116, i32 4, <4 x i1> %21, <4 x i32> undef)
%24 = add nsw <4 x i32> %wide.masked.load81, %23
call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %24, <4 x i32>* %lsr.iv114116, i32 4, <4 x i1> %21)
%scevgep109 = getelementptr i32, i32* %lsr.iv108, i32 4
%scevgep112 = getelementptr i32, i32* %lsr.iv111, i32 4
%scevgep115 = getelementptr i32, i32* %lsr.iv114, i32 4
%25 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %19, i32 1)
%26 = icmp ne i32 %25, 0
br i1 %26, label %vector.body65, label %for.cond15.preheader
for.cond15.preheader: ; preds = %vector.body65, %for.cond4.preheader
%27 = icmp eq i32 %N, 0
%28 = add i32 %N, 3
%29 = lshr i32 %28, 2
%30 = shl nuw i32 %29, 2
%31 = add i32 %30, -4
%32 = lshr i32 %31, 2
%33 = add nuw nsw i32 %32, 1
br i1 %27, label %for.cond.cleanup17, label %vector.ph85
vector.ph85: ; preds = %for.cond15.preheader
%start3 = call i32 @llvm.start.loop.iterations.i32(i32 %33)
br label %vector.body84
vector.body84: ; preds = %vector.body84, %vector.ph85
%lsr.iv105 = phi i32* [ %scevgep106, %vector.body84 ], [ %a, %vector.ph85 ]
%lsr.iv102 = phi i32* [ %scevgep103, %vector.body84 ], [ %c, %vector.ph85 ]
%lsr.iv = phi i32* [ %scevgep, %vector.body84 ], [ %b, %vector.ph85 ]
%34 = phi i32 [ %start3, %vector.ph85 ], [ %40, %vector.body84 ]
%35 = phi i32 [ %N, %vector.ph85 ], [ %37, %vector.body84 ]
%lsr.iv105107 = bitcast i32* %lsr.iv105 to <4 x i32>*
%lsr.iv102104 = bitcast i32* %lsr.iv102 to <4 x i32>*
%lsr.iv101 = bitcast i32* %lsr.iv to <4 x i32>*
%36 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %35)
%37 = sub i32 %35, 4
%wide.masked.load98 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv101, i32 4, <4 x i1> %36, <4 x i32> undef)
%wide.masked.load99 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv102104, i32 4, <4 x i1> %36, <4 x i32> undef)
%wide.masked.load100 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv105107, i32 4, <4 x i1> %36, <4 x i32> undef)
%38 = add <4 x i32> %wide.masked.load99, %wide.masked.load98
%39 = sub <4 x i32> %wide.masked.load100, %38
call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %39, <4 x i32>* %lsr.iv105107, i32 4, <4 x i1> %36)
%scevgep = getelementptr i32, i32* %lsr.iv, i32 4
%scevgep103 = getelementptr i32, i32* %lsr.iv102, i32 4
%scevgep106 = getelementptr i32, i32* %lsr.iv105, i32 4
%40 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %34, i32 1)
%41 = icmp ne i32 %40, 0
br i1 %41, label %vector.body84, label %for.cond.cleanup17
for.cond.cleanup17: ; preds = %vector.body84, %entry, %for.cond15.preheader
ret void
}
declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>)
declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>)
declare i32 @llvm.start.loop.iterations.i32(i32)
declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32)
declare <4 x i1> @llvm.arm.mve.vctp32(i32)
...
---
name: test1
alignment: 2
exposesReturnsTwice: false
legalized: false
regBankSelected: false
selected: false
failedISel: false
tracksRegLiveness: true
hasWinCFI: false
registers: []
liveins:
- { reg: '$r0', virtual-reg: '' }
- { reg: '$r1', virtual-reg: '' }
- { reg: '$r2', virtual-reg: '' }
- { reg: '$r3', virtual-reg: '' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
hasStackMap: false
hasPatchPoint: false
stackSize: 24
offsetAdjustment: -16
maxAlignment: 4
adjustsStack: false
hasCalls: false
stackProtector: ''
maxCallFrameSize: 0
cvBytesOfCalleeSavedRegisters: 0
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
localFrameSize: 0
savePoint: ''
restorePoint: ''
fixedStack: []
stack:
- { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4,
stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false,
debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
- { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4,
stack-id: default, callee-saved-register: '$r7', callee-saved-restored: true,
debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
- { id: 2, name: '', type: spill-slot, offset: -12, size: 4, alignment: 4,
stack-id: default, callee-saved-register: '$r6', callee-saved-restored: true,
debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
- { id: 3, name: '', type: spill-slot, offset: -16, size: 4, alignment: 4,
stack-id: default, callee-saved-register: '$r5', callee-saved-restored: true,
debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
- { id: 4, name: '', type: spill-slot, offset: -20, size: 4, alignment: 4,
stack-id: default, callee-saved-register: '$r4', callee-saved-restored: true,
debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
- { id: 5, name: '', type: spill-slot, offset: -24, size: 4, alignment: 4,
stack-id: default, callee-saved-register: '$r8', callee-saved-restored: true,
debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
callSites: []
constants: []
machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: test1
; CHECK: bb.0.entry:
; CHECK: successors: %bb.6(0x30000000), %bb.1(0x50000000)
; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r8
; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $lr, implicit-def $sp, implicit $sp
; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 20
; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
; CHECK: frame-setup CFI_INSTRUCTION offset $r6, -12
; CHECK: frame-setup CFI_INSTRUCTION offset $r5, -16
; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -20
; CHECK: dead $r7 = frame-setup tADDrSPi $sp, 3, 14 /* CC::al */, $noreg
; CHECK: frame-setup CFI_INSTRUCTION def_cfa $r7, 8
; CHECK: early-clobber $sp = frame-setup t2STR_PRE killed $r8, $sp, -4, 14 /* CC::al */, $noreg
; CHECK: frame-setup CFI_INSTRUCTION offset $r8, -24
; CHECK: tCMPi8 renamable $r3, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
; CHECK: tBcc %bb.6, 0 /* CC::eq */, killed $cpsr
; CHECK: bb.1.vector.ph:
; CHECK: successors: %bb.2(0x80000000)
; CHECK: liveins: $r0, $r1, $r2, $r3
; CHECK: $r8 = tMOVr $r0, 14 /* CC::al */, $noreg
; CHECK: $r5 = tMOVr $r2, 14 /* CC::al */, $noreg
; CHECK: $r4 = tMOVr $r3, 14 /* CC::al */, $noreg
; CHECK: $r6 = tMOVr $r1, 14 /* CC::al */, $noreg
; CHECK: $lr = MVE_DLSTP_32 killed renamable $r4
; CHECK: bb.2.vector.body:
; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r5, $r6, $r8
; CHECK: renamable $r6, renamable $q0 = MVE_VLDRWU32_post killed renamable $r6, 16, 0, $noreg :: (load 16 from %ir.lsr.iv6264, align 4)
; CHECK: renamable $r5, renamable $q1 = MVE_VLDRWU32_post killed renamable $r5, 16, 0, $noreg :: (load 16 from %ir.lsr.iv6567, align 4)
; CHECK: renamable $q0 = nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, undef renamable $q0
; CHECK: renamable $r8 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r8, 16, 0, killed $noreg :: (store 16 into %ir.lsr.iv6870, align 4)
; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
; CHECK: bb.3.for.cond4.preheader:
; CHECK: successors: %bb.6(0x30000000), %bb.4(0x50000000)
; CHECK: liveins: $r0, $r1, $r2, $r3
; CHECK: tCBZ $r3, %bb.6
; CHECK: bb.4.vector.ph39:
; CHECK: successors: %bb.5(0x80000000)
; CHECK: liveins: $r0, $r1, $r2, $r3
; CHECK: $r12 = tMOVr $r0, 14 /* CC::al */, $noreg
; CHECK: $lr = MVE_DLSTP_32 killed renamable $r3
; CHECK: bb.5.vector.body38:
; CHECK: successors: %bb.5(0x7c000000), %bb.6(0x04000000)
; CHECK: liveins: $lr, $r0, $r1, $r2, $r12
; CHECK: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 0, $noreg :: (load 16 from %ir.lsr.iv55, align 4)
; CHECK: renamable $r2, renamable $q1 = MVE_VLDRWU32_post killed renamable $r2, 16, 0, $noreg :: (load 16 from %ir.lsr.iv5658, align 4)
; CHECK: renamable $r12, renamable $q2 = MVE_VLDRWU32_post killed renamable $r12, 16, 0, $noreg :: (load 16 from %ir.lsr.iv5961, align 4)
; CHECK: renamable $q0 = MVE_VEOR killed renamable $q1, killed renamable $q0, 0, $noreg, undef renamable $q0
; CHECK: renamable $q0 = nsw MVE_VADDi32 killed renamable $q2, killed renamable $q0, 0, $noreg, undef renamable $q0
; CHECK: MVE_VSTRWU32 killed renamable $q0, killed renamable $r0, 0, 0, killed $noreg :: (store 16 into %ir.lsr.iv5961, align 4)
; CHECK: $r0 = tMOVr $r12, 14 /* CC::al */, $noreg
; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.5
; CHECK: bb.6.for.cond.cleanup6:
; CHECK: $r8, $sp = t2LDR_POST $sp, 4, 14 /* CC::al */, $noreg
; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $pc
bb.0.entry:
successors: %bb.6(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r6, $lr, $r8
frame-setup tPUSH 14, $noreg, killed $r4, killed $r5, killed $r6, killed $lr, implicit-def $sp, implicit $sp
frame-setup CFI_INSTRUCTION def_cfa_offset 20
frame-setup CFI_INSTRUCTION offset $lr, -4
frame-setup CFI_INSTRUCTION offset $r7, -8
frame-setup CFI_INSTRUCTION offset $r6, -12
frame-setup CFI_INSTRUCTION offset $r5, -16
frame-setup CFI_INSTRUCTION offset $r4, -20
$r7 = frame-setup tADDrSPi $sp, 3, 14, $noreg
frame-setup CFI_INSTRUCTION def_cfa $r7, 8
early-clobber $sp = frame-setup t2STR_PRE killed $r8, $sp, -4, 14, $noreg
frame-setup CFI_INSTRUCTION offset $r8, -24
tCMPi8 renamable $r3, 0, 14, $noreg, implicit-def $cpsr
tBcc %bb.6, 0, killed $cpsr
bb.1.vector.ph:
successors: %bb.2(0x80000000)
liveins: $r0, $r1, $r2, $r3
renamable $r6, dead $cpsr = tADDi3 renamable $r3, 3, 14, $noreg
$r8 = tMOVr $r0, 14, $noreg
renamable $r6 = t2BICri killed renamable $r6, 3, 14, $noreg, $noreg
$r5 = tMOVr $r2, 14, $noreg
renamable $r12 = t2SUBri killed renamable $r6, 4, 14, $noreg, $noreg
renamable $r6, dead $cpsr = tMOVi8 1, 14, $noreg
$r4 = tMOVr $r3, 14, $noreg
renamable $lr = nuw nsw t2ADDrs killed renamable $r6, renamable $r12, 19, 14, $noreg, $noreg
$r6 = tMOVr $r1, 14, $noreg
$lr = t2DoLoopStart renamable $lr
bb.2.vector.body:
successors: %bb.2(0x7c000000), %bb.3(0x04000000)
liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r8, $r12
renamable $vpr = MVE_VCTP32 renamable $r4, 0, $noreg
MVE_VPST 4, implicit $vpr
renamable $r6, renamable $q0 = MVE_VLDRWU32_post killed renamable $r6, 16, 1, renamable $vpr :: (load 16 from %ir.lsr.iv6264, align 4)
renamable $r5, renamable $q1 = MVE_VLDRWU32_post killed renamable $r5, 16, 1, renamable $vpr :: (load 16 from %ir.lsr.iv6567, align 4)
renamable $r4, dead $cpsr = tSUBi8 killed renamable $r4, 4, 14, $noreg
renamable $q0 = nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, undef renamable $q0
MVE_VPST 8, implicit $vpr
renamable $r8 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r8, 16, 1, killed renamable $vpr :: (store 16 into %ir.lsr.iv6870, align 4)
renamable $lr = t2LoopDec killed renamable $lr, 1
t2LoopEnd renamable $lr, %bb.2, implicit-def dead $cpsr
tB %bb.3, 14, $noreg
bb.3.for.cond4.preheader:
successors: %bb.6(0x30000000), %bb.4(0x50000000)
liveins: $r0, $r1, $r2, $r3, $r12
tCBZ $r3, %bb.6
bb.4.vector.ph39:
successors: %bb.5(0x80000000)
liveins: $r0, $r1, $r2, $r3, $r12
renamable $r6, dead $cpsr = tMOVi8 1, 14, $noreg
renamable $lr = nuw nsw t2ADDrs killed renamable $r6, killed renamable $r12, 19, 14, $noreg, $noreg
$r12 = tMOVr $r0, 14, $noreg
$lr = t2DoLoopStart renamable $lr
bb.5.vector.body38:
successors: %bb.5(0x7c000000), %bb.6(0x04000000)
liveins: $lr, $r0, $r1, $r2, $r3, $r12
renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg
MVE_VPST 2, implicit $vpr
renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr :: (load 16 from %ir.lsr.iv55, align 4)
renamable $r2, renamable $q1 = MVE_VLDRWU32_post killed renamable $r2, 16, 1, renamable $vpr :: (load 16 from %ir.lsr.iv5658, align 4)
renamable $r12, renamable $q2 = MVE_VLDRWU32_post killed renamable $r12, 16, 1, renamable $vpr :: (load 16 from %ir.lsr.iv5961, align 4)
renamable $q0 = MVE_VEOR killed renamable $q1, killed renamable $q0, 0, $noreg, undef renamable $q0
renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14, $noreg
renamable $q0 = nsw MVE_VADDi32 killed renamable $q2, killed renamable $q0, 0, $noreg, undef renamable $q0
MVE_VPST 8, implicit $vpr
MVE_VSTRWU32 killed renamable $q0, killed renamable $r0, 0, 1, killed renamable $vpr :: (store 16 into %ir.lsr.iv5961, align 4)
renamable $lr = t2LoopDec killed renamable $lr, 1
$r0 = tMOVr $r12, 14, $noreg
t2LoopEnd renamable $lr, %bb.5, implicit-def dead $cpsr
tB %bb.6, 14, $noreg
bb.6.for.cond.cleanup6:
$r8, $sp = t2LDR_POST $sp, 4, 14, $noreg
tPOP_RET 14, $noreg, def $r4, def $r5, def $r6, def $r7, def $pc
...
---
name: test2
alignment: 2
exposesReturnsTwice: false
legalized: false
regBankSelected: false
selected: false
failedISel: false
tracksRegLiveness: true
hasWinCFI: false
registers: []
liveins:
- { reg: '$r0', virtual-reg: '' }
- { reg: '$r1', virtual-reg: '' }
- { reg: '$r2', virtual-reg: '' }
- { reg: '$r3', virtual-reg: '' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
hasStackMap: false
hasPatchPoint: false
stackSize: 24
offsetAdjustment: -16
maxAlignment: 4
adjustsStack: false
hasCalls: false
stackProtector: ''
maxCallFrameSize: 0
cvBytesOfCalleeSavedRegisters: 0
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
localFrameSize: 0
savePoint: ''
restorePoint: ''
fixedStack: []
stack:
- { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4,
stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false,
debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
- { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4,
stack-id: default, callee-saved-register: '$r7', callee-saved-restored: true,
debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
- { id: 2, name: '', type: spill-slot, offset: -12, size: 4, alignment: 4,
stack-id: default, callee-saved-register: '$r6', callee-saved-restored: true,
debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
- { id: 3, name: '', type: spill-slot, offset: -16, size: 4, alignment: 4,
stack-id: default, callee-saved-register: '$r5', callee-saved-restored: true,
debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
- { id: 4, name: '', type: spill-slot, offset: -20, size: 4, alignment: 4,
stack-id: default, callee-saved-register: '$r4', callee-saved-restored: true,
debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
- { id: 5, name: '', type: spill-slot, offset: -24, size: 4, alignment: 4,
stack-id: default, callee-saved-register: '$r8', callee-saved-restored: true,
debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
callSites: []
constants: []
machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: test2
; CHECK: bb.0.entry:
; CHECK: successors: %bb.3(0x30000000), %bb.1(0x50000000)
; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r8
; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $lr, implicit-def $sp, implicit $sp
; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 20
; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
; CHECK: frame-setup CFI_INSTRUCTION offset $r6, -12
; CHECK: frame-setup CFI_INSTRUCTION offset $r5, -16
; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -20
; CHECK: dead $r7 = frame-setup tADDrSPi $sp, 3, 14 /* CC::al */, $noreg
; CHECK: frame-setup CFI_INSTRUCTION def_cfa $r7, 8
; CHECK: early-clobber $sp = frame-setup t2STR_PRE killed $r8, $sp, -4, 14 /* CC::al */, $noreg
; CHECK: frame-setup CFI_INSTRUCTION offset $r8, -24
; CHECK: renamable $r6, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
; CHECK: t2CMPrs killed renamable $r6, renamable $r3, 11, 14 /* CC::al */, $noreg, implicit-def $cpsr
; CHECK: tBcc %bb.3, 0 /* CC::eq */, killed $cpsr
; CHECK: bb.1.vector.ph:
; CHECK: successors: %bb.2(0x80000000)
; CHECK: liveins: $r0, $r1, $r2, $r3
; CHECK: renamable $r4, dead $cpsr = tLSRri renamable $r3, 1, 14 /* CC::al */, $noreg
; CHECK: $r8 = tMOVr $r0, 14 /* CC::al */, $noreg
; CHECK: $r5 = tMOVr $r1, 14 /* CC::al */, $noreg
; CHECK: $r6 = tMOVr $r2, 14 /* CC::al */, $noreg
; CHECK: $lr = MVE_DLSTP_32 killed renamable $r4
; CHECK: bb.2.vector.body:
; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r5, $r6, $r8
; CHECK: renamable $r5, renamable $q0 = MVE_VLDRWU32_post killed renamable $r5, 16, 0, $noreg :: (load 16 from %ir.lsr.iv6264, align 4)
; CHECK: renamable $r6, renamable $q1 = MVE_VLDRWU32_post killed renamable $r6, 16, 0, $noreg :: (load 16 from %ir.lsr.iv6567, align 4)
; CHECK: renamable $q0 = nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, undef renamable $q0
; CHECK: renamable $r8 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r8, 16, 0, killed $noreg :: (store 16 into %ir.lsr.iv6870, align 4)
; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
; CHECK: bb.3.for.cond4.preheader:
; CHECK: successors: %bb.6(0x30000000), %bb.4(0x50000000)
; CHECK: liveins: $r0, $r1, $r2, $r3
; CHECK: tCBZ $r3, %bb.6
; CHECK: bb.4.vector.ph39:
; CHECK: successors: %bb.5(0x80000000)
; CHECK: liveins: $r0, $r1, $r2, $r3
; CHECK: $r4 = tMOVr $r0, 14 /* CC::al */, $noreg
; CHECK: $lr = MVE_DLSTP_32 killed renamable $r3
; CHECK: bb.5.vector.body38:
; CHECK: successors: %bb.5(0x7c000000), %bb.6(0x04000000)
; CHECK: liveins: $lr, $r0, $r1, $r2, $r4
; CHECK: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 0, $noreg :: (load 16 from %ir.lsr.iv55, align 4)
; CHECK: renamable $r2, renamable $q1 = MVE_VLDRWU32_post killed renamable $r2, 16, 0, $noreg :: (load 16 from %ir.lsr.iv5658, align 4)
; CHECK: renamable $r4, renamable $q2 = MVE_VLDRWU32_post killed renamable $r4, 16, 0, $noreg :: (load 16 from %ir.lsr.iv5961, align 4)
; CHECK: renamable $q0 = MVE_VEOR killed renamable $q1, killed renamable $q0, 0, $noreg, undef renamable $q0
; CHECK: renamable $q0 = nsw MVE_VADDi32 killed renamable $q2, killed renamable $q0, 0, $noreg, undef renamable $q0
; CHECK: MVE_VSTRWU32 killed renamable $q0, killed renamable $r0, 0, 0, killed $noreg :: (store 16 into %ir.lsr.iv5961, align 4)
; CHECK: $r0 = tMOVr $r4, 14 /* CC::al */, $noreg
; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.5
; CHECK: bb.6.for.cond.cleanup6:
; CHECK: $r8, $sp = t2LDR_POST $sp, 4, 14 /* CC::al */, $noreg
; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $pc
bb.0.entry:
successors: %bb.3(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r6, $lr, $r8
frame-setup tPUSH 14, $noreg, killed $r4, killed $r5, killed $r6, killed $lr, implicit-def $sp, implicit $sp
frame-setup CFI_INSTRUCTION def_cfa_offset 20
frame-setup CFI_INSTRUCTION offset $lr, -4
frame-setup CFI_INSTRUCTION offset $r7, -8
frame-setup CFI_INSTRUCTION offset $r6, -12
frame-setup CFI_INSTRUCTION offset $r5, -16
frame-setup CFI_INSTRUCTION offset $r4, -20
$r7 = frame-setup tADDrSPi $sp, 3, 14, $noreg
frame-setup CFI_INSTRUCTION def_cfa $r7, 8
early-clobber $sp = frame-setup t2STR_PRE killed $r8, $sp, -4, 14, $noreg
frame-setup CFI_INSTRUCTION offset $r8, -24
renamable $r6, dead $cpsr = tMOVi8 0, 14, $noreg
renamable $r12 = t2MOVi 1, 14, $noreg, $noreg
t2CMPrs killed renamable $r6, renamable $r3, 11, 14, $noreg, implicit-def $cpsr
tBcc %bb.3, 0, killed $cpsr
bb.1.vector.ph:
successors: %bb.2(0x80000000)
liveins: $r0, $r1, $r2, $r3, $r12
renamable $r6, dead $cpsr = tMOVi8 3, 14, $noreg
renamable $r4, dead $cpsr = tLSRri renamable $r3, 1, 14, $noreg
renamable $r6 = nuw t2ADDrs killed renamable $r6, renamable $r3, 11, 14, $noreg, $noreg
$r8 = tMOVr $r0, 14, $noreg
renamable $r6 = t2BICri killed renamable $r6, 3, 14, $noreg, $noreg
$r5 = tMOVr $r1, 14, $noreg
renamable $r6, dead $cpsr = tSUBi8 killed renamable $r6, 4, 14, $noreg
renamable $lr = nuw nsw t2ADDrs renamable $r12, killed renamable $r6, 19, 14, $noreg, $noreg
$r6 = tMOVr $r2, 14, $noreg
$lr = t2DoLoopStart renamable $lr
bb.2.vector.body:
successors: %bb.2(0x7c000000), %bb.3(0x04000000)
liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r8, $r12
renamable $vpr = MVE_VCTP32 renamable $r4, 0, $noreg
MVE_VPST 4, implicit $vpr
renamable $r5, renamable $q0 = MVE_VLDRWU32_post killed renamable $r5, 16, 1, renamable $vpr :: (load 16 from %ir.lsr.iv6264, align 4)
renamable $r6, renamable $q1 = MVE_VLDRWU32_post killed renamable $r6, 16, 1, renamable $vpr :: (load 16 from %ir.lsr.iv6567, align 4)
renamable $r4, dead $cpsr = tSUBi8 killed renamable $r4, 4, 14, $noreg
renamable $q0 = nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, undef renamable $q0
MVE_VPST 8, implicit $vpr
renamable $r8 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r8, 16, 1, killed renamable $vpr :: (store 16 into %ir.lsr.iv6870, align 4)
renamable $lr = t2LoopDec killed renamable $lr, 1
t2LoopEnd renamable $lr, %bb.2, implicit-def dead $cpsr
tB %bb.3, 14, $noreg
bb.3.for.cond4.preheader:
successors: %bb.6(0x30000000), %bb.4(0x50000000)
liveins: $r0, $r1, $r2, $r3, $r12
tCBZ $r3, %bb.6
bb.4.vector.ph39:
successors: %bb.5(0x80000000)
liveins: $r0, $r1, $r2, $r3, $r12
renamable $r6, dead $cpsr = tADDi3 renamable $r3, 3, 14, $noreg
$r4 = tMOVr $r0, 14, $noreg
renamable $r6 = t2BICri killed renamable $r6, 3, 14, $noreg, $noreg
renamable $r6, dead $cpsr = tSUBi8 killed renamable $r6, 4, 14, $noreg
renamable $lr = nuw nsw t2ADDrs killed renamable $r12, killed renamable $r6, 19, 14, $noreg, $noreg
$lr = t2DoLoopStart renamable $lr
bb.5.vector.body38:
successors: %bb.5(0x7c000000), %bb.6(0x04000000)
liveins: $lr, $r0, $r1, $r2, $r3, $r4
renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg
MVE_VPST 2, implicit $vpr
renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr :: (load 16 from %ir.lsr.iv55, align 4)
renamable $r2, renamable $q1 = MVE_VLDRWU32_post killed renamable $r2, 16, 1, renamable $vpr :: (load 16 from %ir.lsr.iv5658, align 4)
renamable $r4, renamable $q2 = MVE_VLDRWU32_post killed renamable $r4, 16, 1, renamable $vpr :: (load 16 from %ir.lsr.iv5961, align 4)
renamable $q0 = MVE_VEOR killed renamable $q1, killed renamable $q0, 0, $noreg, undef renamable $q0
renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14, $noreg
renamable $q0 = nsw MVE_VADDi32 killed renamable $q2, killed renamable $q0, 0, $noreg, undef renamable $q0
MVE_VPST 8, implicit $vpr
MVE_VSTRWU32 killed renamable $q0, killed renamable $r0, 0, 1, killed renamable $vpr :: (store 16 into %ir.lsr.iv5961, align 4)
renamable $lr = t2LoopDec killed renamable $lr, 1
$r0 = tMOVr $r4, 14, $noreg
t2LoopEnd renamable $lr, %bb.5, implicit-def dead $cpsr
tB %bb.6, 14, $noreg
bb.6.for.cond.cleanup6:
$r8, $sp = t2LDR_POST $sp, 4, 14, $noreg
tPOP_RET 14, $noreg, def $r4, def $r5, def $r6, def $r7, def $pc
...
---
name: test3
alignment: 2
exposesReturnsTwice: false
legalized: false
regBankSelected: false
selected: false
failedISel: false
tracksRegLiveness: true
hasWinCFI: false
registers: []
liveins:
- { reg: '$r0', virtual-reg: '' }
- { reg: '$r1', virtual-reg: '' }
- { reg: '$r2', virtual-reg: '' }
- { reg: '$r3', virtual-reg: '' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
hasStackMap: false
hasPatchPoint: false
stackSize: 32
offsetAdjustment: -24
maxAlignment: 4
adjustsStack: false
hasCalls: false
stackProtector: ''
maxCallFrameSize: 0
cvBytesOfCalleeSavedRegisters: 0
hasOpaqueSPAdjustment: false
hasVAStart: false
hasMustTailInVarArgFunc: false
localFrameSize: 0
savePoint: ''
restorePoint: ''
fixedStack: []
stack:
- { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4,
stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false,
debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
- { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4,
stack-id: default, callee-saved-register: '$r7', callee-saved-restored: true,
debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
- { id: 2, name: '', type: spill-slot, offset: -12, size: 4, alignment: 4,
stack-id: default, callee-saved-register: '$r6', callee-saved-restored: true,
debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
- { id: 3, name: '', type: spill-slot, offset: -16, size: 4, alignment: 4,
stack-id: default, callee-saved-register: '$r5', callee-saved-restored: true,
debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
- { id: 4, name: '', type: spill-slot, offset: -20, size: 4, alignment: 4,
stack-id: default, callee-saved-register: '$r4', callee-saved-restored: true,
debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
- { id: 5, name: '', type: spill-slot, offset: -24, size: 4, alignment: 4,
stack-id: default, callee-saved-register: '$r10', callee-saved-restored: true,
debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
- { id: 6, name: '', type: spill-slot, offset: -28, size: 4, alignment: 4,
stack-id: default, callee-saved-register: '$r9', callee-saved-restored: true,
debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
- { id: 7, name: '', type: spill-slot, offset: -32, size: 4, alignment: 4,
stack-id: default, callee-saved-register: '$r8', callee-saved-restored: true,
debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
callSites: []
constants: []
machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: test3
; CHECK: bb.0.entry:
; CHECK: successors: %bb.9(0x30000000), %bb.1(0x50000000)
; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r8, $r9, $r10
; CHECK: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r4, killed $r5, killed $r6, killed $lr, implicit-def $sp, implicit $sp
; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 20
; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
; CHECK: frame-setup CFI_INSTRUCTION offset $r7, -8
; CHECK: frame-setup CFI_INSTRUCTION offset $r6, -12
; CHECK: frame-setup CFI_INSTRUCTION offset $r5, -16
; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -20
; CHECK: dead $r7 = frame-setup tADDrSPi $sp, 3, 14 /* CC::al */, $noreg
; CHECK: frame-setup CFI_INSTRUCTION def_cfa $r7, 8
; CHECK: $sp = frame-setup t2STMDB_UPD $sp, 14 /* CC::al */, $noreg, killed $r8, killed $r9, killed $r10
; CHECK: frame-setup CFI_INSTRUCTION offset $r10, -24
; CHECK: frame-setup CFI_INSTRUCTION offset $r9, -28
; CHECK: frame-setup CFI_INSTRUCTION offset $r8, -32
; CHECK: tCMPi8 renamable $r3, 0, 14 /* CC::al */, $noreg, implicit-def $cpsr
; CHECK: tBcc %bb.9, 0 /* CC::eq */, killed $cpsr
; CHECK: bb.1.vector.ph:
; CHECK: successors: %bb.2(0x80000000)
; CHECK: liveins: $r0, $r1, $r2, $r3
; CHECK: $r8 = tMOVr $r0, 14 /* CC::al */, $noreg
; CHECK: $r5 = tMOVr $r2, 14 /* CC::al */, $noreg
; CHECK: $r4 = tMOVr $r3, 14 /* CC::al */, $noreg
; CHECK: $r6 = tMOVr $r1, 14 /* CC::al */, $noreg
; CHECK: $lr = MVE_DLSTP_32 killed renamable $r4
; CHECK: bb.2.vector.body:
; CHECK: successors: %bb.2(0x7c000000), %bb.3(0x04000000)
; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r5, $r6, $r8
; CHECK: renamable $r6, renamable $q0 = MVE_VLDRWU32_post killed renamable $r6, 16, 0, $noreg :: (load 16 from %ir.lsr.iv117119, align 4)
; CHECK: renamable $r5, renamable $q1 = MVE_VLDRWU32_post killed renamable $r5, 16, 0, $noreg :: (load 16 from %ir.lsr.iv120122, align 4)
; CHECK: renamable $q0 = nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, undef renamable $q0
; CHECK: renamable $r8 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r8, 16, 0, killed $noreg :: (store 16 into %ir.lsr.iv123125, align 4)
; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.2
; CHECK: bb.3.for.cond4.preheader:
; CHECK: successors: %bb.6(0x30000000), %bb.4(0x50000000)
; CHECK: liveins: $r0, $r1, $r2, $r3
; CHECK: renamable $r6, dead $cpsr = tMOVi8 0, 14 /* CC::al */, $noreg
; CHECK: t2CMPrs killed renamable $r6, renamable $r3, 11, 14 /* CC::al */, $noreg, implicit-def $cpsr
; CHECK: tBcc %bb.6, 0 /* CC::eq */, killed $cpsr
; CHECK: bb.4.vector.ph66:
; CHECK: successors: %bb.5(0x80000000)
; CHECK: liveins: $r0, $r1, $r2, $r3
; CHECK: renamable $r5, dead $cpsr = tLSRri renamable $r3, 1, 14 /* CC::al */, $noreg
; CHECK: $r10 = tMOVr $r0, 14 /* CC::al */, $noreg
; CHECK: $r9 = tMOVr $r2, 14 /* CC::al */, $noreg
; CHECK: $r4 = tMOVr $r1, 14 /* CC::al */, $noreg
; CHECK: $r6 = tMOVr $r0, 14 /* CC::al */, $noreg
; CHECK: $lr = MVE_DLSTP_32 killed renamable $r5
; CHECK: bb.5.vector.body65:
; CHECK: successors: %bb.5(0x7c000000), %bb.6(0x04000000)
; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r6, $r9, $r10
; CHECK: renamable $r4, renamable $q0 = MVE_VLDRWU32_post killed renamable $r4, 16, 0, $noreg :: (load 16 from %ir.lsr.iv108110, align 4)
; CHECK: renamable $r9, renamable $q1 = MVE_VLDRWU32_post killed renamable $r9, 16, 0, $noreg :: (load 16 from %ir.lsr.iv111113, align 4)
; CHECK: renamable $r6, renamable $q2 = MVE_VLDRWU32_post killed renamable $r6, 16, 0, $noreg :: (load 16 from %ir.lsr.iv114116, align 4)
; CHECK: renamable $q0 = MVE_VEOR killed renamable $q1, killed renamable $q0, 0, $noreg, undef renamable $q0
; CHECK: renamable $q0 = nsw MVE_VADDi32 killed renamable $q2, killed renamable $q0, 0, $noreg, undef renamable $q0
; CHECK: MVE_VSTRWU32 killed renamable $q0, killed renamable $r10, 0, 0, killed $noreg :: (store 16 into %ir.lsr.iv114116, align 4)
; CHECK: $r10 = tMOVr $r6, 14 /* CC::al */, $noreg
; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.5
; CHECK: bb.6.for.cond15.preheader:
; CHECK: successors: %bb.9(0x30000000), %bb.7(0x50000000)
; CHECK: liveins: $r0, $r1, $r2, $r3
; CHECK: tCBZ $r3, %bb.9
; CHECK: bb.7.vector.ph85:
; CHECK: successors: %bb.8(0x80000000)
; CHECK: liveins: $r0, $r1, $r2, $r3
; CHECK: $r5 = tMOVr $r0, 14 /* CC::al */, $noreg
; CHECK: $lr = MVE_DLSTP_32 killed renamable $r3
; CHECK: bb.8.vector.body84:
; CHECK: successors: %bb.8(0x7c000000), %bb.9(0x04000000)
; CHECK: liveins: $lr, $r0, $r1, $r2, $r5
; CHECK: renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 0, $noreg :: (load 16 from %ir.lsr.iv101, align 4)
; CHECK: renamable $r2, renamable $q1 = MVE_VLDRWU32_post killed renamable $r2, 16, 0, $noreg :: (load 16 from %ir.lsr.iv102104, align 4)
; CHECK: renamable $r5, renamable $q2 = MVE_VLDRWU32_post killed renamable $r5, 16, 0, $noreg :: (load 16 from %ir.lsr.iv105107, align 4)
; CHECK: renamable $q0 = MVE_VADDi32 killed renamable $q1, killed renamable $q0, 0, $noreg, undef renamable $q0
; CHECK: renamable $q0 = MVE_VSUBi32 killed renamable $q2, killed renamable $q0, 0, $noreg, undef renamable $q0
; CHECK: MVE_VSTRWU32 killed renamable $q0, killed renamable $r0, 0, 0, killed $noreg :: (store 16 into %ir.lsr.iv105107, align 4)
; CHECK: $r0 = tMOVr $r5, 14 /* CC::al */, $noreg
; CHECK: $lr = MVE_LETP killed renamable $lr, %bb.8
; CHECK: bb.9.for.cond.cleanup17:
; CHECK: $sp = t2LDMIA_UPD $sp, 14 /* CC::al */, $noreg, def $r8, def $r9, def $r10
; CHECK: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r5, def $r6, def $r7, def $pc
bb.0.entry:
successors: %bb.9(0x30000000), %bb.1(0x50000000)
liveins: $r0, $r1, $r2, $r3, $r4, $r5, $r6, $lr, $r8, $r9, $r10
frame-setup tPUSH 14, $noreg, killed $r4, killed $r5, killed $r6, killed $lr, implicit-def $sp, implicit $sp
frame-setup CFI_INSTRUCTION def_cfa_offset 20
frame-setup CFI_INSTRUCTION offset $lr, -4
frame-setup CFI_INSTRUCTION offset $r7, -8
frame-setup CFI_INSTRUCTION offset $r6, -12
frame-setup CFI_INSTRUCTION offset $r5, -16
frame-setup CFI_INSTRUCTION offset $r4, -20
$r7 = frame-setup tADDrSPi $sp, 3, 14, $noreg
frame-setup CFI_INSTRUCTION def_cfa $r7, 8
$sp = frame-setup t2STMDB_UPD $sp, 14, $noreg, killed $r8, killed $r9, killed $r10
frame-setup CFI_INSTRUCTION offset $r10, -24
frame-setup CFI_INSTRUCTION offset $r9, -28
frame-setup CFI_INSTRUCTION offset $r8, -32
tCMPi8 renamable $r3, 0, 14, $noreg, implicit-def $cpsr
tBcc %bb.9, 0, killed $cpsr
bb.1.vector.ph:
successors: %bb.2(0x80000000)
liveins: $r0, $r1, $r2, $r3
renamable $r6, dead $cpsr = tADDi3 renamable $r3, 3, 14, $noreg
$r8 = tMOVr $r0, 14, $noreg
renamable $r6 = t2BICri killed renamable $r6, 3, 14, $noreg, $noreg
$r5 = tMOVr $r2, 14, $noreg
renamable $r12 = t2SUBri killed renamable $r6, 4, 14, $noreg, $noreg
renamable $r6, dead $cpsr = tMOVi8 1, 14, $noreg
$r4 = tMOVr $r3, 14, $noreg
renamable $lr = nuw nsw t2ADDrs killed renamable $r6, renamable $r12, 19, 14, $noreg, $noreg
$r6 = tMOVr $r1, 14, $noreg
$lr = t2DoLoopStart renamable $lr
bb.2.vector.body:
successors: %bb.2(0x7c000000), %bb.3(0x04000000)
liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r8, $r12
renamable $vpr = MVE_VCTP32 renamable $r4, 0, $noreg
MVE_VPST 4, implicit $vpr
renamable $r6, renamable $q0 = MVE_VLDRWU32_post killed renamable $r6, 16, 1, renamable $vpr :: (load 16 from %ir.lsr.iv117119, align 4)
renamable $r5, renamable $q1 = MVE_VLDRWU32_post killed renamable $r5, 16, 1, renamable $vpr :: (load 16 from %ir.lsr.iv120122, align 4)
renamable $r4, dead $cpsr = tSUBi8 killed renamable $r4, 4, 14, $noreg
renamable $q0 = nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, undef renamable $q0
MVE_VPST 8, implicit $vpr
renamable $r8 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r8, 16, 1, killed renamable $vpr :: (store 16 into %ir.lsr.iv123125, align 4)
renamable $lr = t2LoopDec killed renamable $lr, 1
t2LoopEnd renamable $lr, %bb.2, implicit-def dead $cpsr
tB %bb.3, 14, $noreg
bb.3.for.cond4.preheader:
successors: %bb.6(0x30000000), %bb.4(0x50000000)
liveins: $r0, $r1, $r2, $r3, $r12
renamable $r6, dead $cpsr = tMOVi8 0, 14, $noreg
renamable $r8 = t2MOVi 1, 14, $noreg, $noreg
t2CMPrs killed renamable $r6, renamable $r3, 11, 14, $noreg, implicit-def $cpsr
tBcc %bb.6, 0, killed $cpsr
bb.4.vector.ph66:
successors: %bb.5(0x80000000)
liveins: $r0, $r1, $r2, $r3, $r8, $r12
renamable $r6, dead $cpsr = tMOVi8 3, 14, $noreg
renamable $r5, dead $cpsr = tLSRri renamable $r3, 1, 14, $noreg
renamable $r6 = nuw t2ADDrs killed renamable $r6, renamable $r3, 11, 14, $noreg, $noreg
$r10 = tMOVr $r0, 14, $noreg
renamable $r6 = t2BICri killed renamable $r6, 3, 14, $noreg, $noreg
$r9 = tMOVr $r2, 14, $noreg
renamable $r6, dead $cpsr = tSUBi8 killed renamable $r6, 4, 14, $noreg
$r4 = tMOVr $r1, 14, $noreg
renamable $lr = nuw nsw t2ADDrs renamable $r8, killed renamable $r6, 19, 14, $noreg, $noreg
$r6 = tMOVr $r0, 14, $noreg
$lr = t2DoLoopStart renamable $lr
bb.5.vector.body65:
successors: %bb.5(0x7c000000), %bb.6(0x04000000)
liveins: $lr, $r0, $r1, $r2, $r3, $r4, $r5, $r6, $r8, $r9, $r10, $r12
renamable $vpr = MVE_VCTP32 renamable $r5, 0, $noreg
MVE_VPST 2, implicit $vpr
renamable $r4, renamable $q0 = MVE_VLDRWU32_post killed renamable $r4, 16, 1, renamable $vpr :: (load 16 from %ir.lsr.iv108110, align 4)
renamable $r9, renamable $q1 = MVE_VLDRWU32_post killed renamable $r9, 16, 1, renamable $vpr :: (load 16 from %ir.lsr.iv111113, align 4)
renamable $r6, renamable $q2 = MVE_VLDRWU32_post killed renamable $r6, 16, 1, renamable $vpr :: (load 16 from %ir.lsr.iv114116, align 4)
renamable $q0 = MVE_VEOR killed renamable $q1, killed renamable $q0, 0, $noreg, undef renamable $q0
renamable $r5, dead $cpsr = tSUBi8 killed renamable $r5, 4, 14, $noreg
renamable $q0 = nsw MVE_VADDi32 killed renamable $q2, killed renamable $q0, 0, $noreg, undef renamable $q0
MVE_VPST 8, implicit $vpr
MVE_VSTRWU32 killed renamable $q0, killed renamable $r10, 0, 1, killed renamable $vpr :: (store 16 into %ir.lsr.iv114116, align 4)
renamable $lr = t2LoopDec killed renamable $lr, 1
$r10 = tMOVr $r6, 14, $noreg
t2LoopEnd renamable $lr, %bb.5, implicit-def dead $cpsr
tB %bb.6, 14, $noreg
bb.6.for.cond15.preheader:
successors: %bb.9(0x30000000), %bb.7(0x50000000)
liveins: $r0, $r1, $r2, $r3, $r8, $r12
tCBZ $r3, %bb.9
bb.7.vector.ph85:
successors: %bb.8(0x80000000)
liveins: $r0, $r1, $r2, $r3, $r8, $r12
renamable $lr = nuw nsw t2ADDrs killed renamable $r8, killed renamable $r12, 19, 14, $noreg, $noreg
$r5 = tMOVr $r0, 14, $noreg
$lr = t2DoLoopStart renamable $lr
bb.8.vector.body84:
successors: %bb.8(0x7c000000), %bb.9(0x04000000)
liveins: $lr, $r0, $r1, $r2, $r3, $r5
renamable $vpr = MVE_VCTP32 renamable $r3, 0, $noreg
MVE_VPST 2, implicit $vpr
renamable $r1, renamable $q0 = MVE_VLDRWU32_post killed renamable $r1, 16, 1, renamable $vpr :: (load 16 from %ir.lsr.iv101, align 4)
renamable $r2, renamable $q1 = MVE_VLDRWU32_post killed renamable $r2, 16, 1, renamable $vpr :: (load 16 from %ir.lsr.iv102104, align 4)
renamable $r5, renamable $q2 = MVE_VLDRWU32_post killed renamable $r5, 16, 1, renamable $vpr :: (load 16 from %ir.lsr.iv105107, align 4)
renamable $q0 = MVE_VADDi32 killed renamable $q1, killed renamable $q0, 0, $noreg, undef renamable $q0
renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 4, 14, $noreg
renamable $q0 = MVE_VSUBi32 killed renamable $q2, killed renamable $q0, 0, $noreg, undef renamable $q0
MVE_VPST 8, implicit $vpr
MVE_VSTRWU32 killed renamable $q0, killed renamable $r0, 0, 1, killed renamable $vpr :: (store 16 into %ir.lsr.iv105107, align 4)
renamable $lr = t2LoopDec killed renamable $lr, 1
$r0 = tMOVr $r5, 14, $noreg
t2LoopEnd renamable $lr, %bb.8, implicit-def dead $cpsr
tB %bb.9, 14, $noreg
bb.9.for.cond.cleanup17:
$sp = t2LDMIA_UPD $sp, 14, $noreg, def $r8, def $r9, def $r10
tPOP_RET 14, $noreg, def $r4, def $r5, def $r6, def $r7, def $pc
...