From e4830a62b33ecf1b5b1dbf4809107a3db87240f7 Mon Sep 17 00:00:00 2001 From: David Green Date: Mon, 26 Jul 2021 20:56:22 +0100 Subject: [PATCH] [ARM] Fixup vst4 test. NFC --- test/CodeGen/Thumb2/mve-vst4.ll | 1043 +++++++++++++++++-------------- 1 file changed, 567 insertions(+), 476 deletions(-) diff --git a/test/CodeGen/Thumb2/mve-vst4.ll b/test/CodeGen/Thumb2/mve-vst4.ll index 4d1e12f0c5e..f275049eddf 100644 --- a/test/CodeGen/Thumb2/mve-vst4.ll +++ b/test/CodeGen/Thumb2/mve-vst4.ll @@ -6,24 +6,28 @@ define void @vst4_v2i32(<2 x i32> *%src, <8 x i32> *%dst) { ; CHECK-LABEL: vst4_v2i32: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .save {r4, lr} -; CHECK-NEXT: push {r4, lr} +; CHECK-NEXT: .save {r4, r5, r6, lr} +; CHECK-NEXT: push {r4, r5, r6, lr} +; CHECK-NEXT: add.w r6, r0, #16 ; CHECK-NEXT: ldrd lr, r12, [r0] ; CHECK-NEXT: ldrd r3, r2, [r0, #8] -; CHECK-NEXT: ldrd r4, r0, [r0, #16] +; CHECK-NEXT: ldm r6, {r4, r5, r6} ; CHECK-NEXT: vmov q1[2], q1[0], lr, r3 ; CHECK-NEXT: vmov q1[3], q1[1], r12, r2 -; CHECK-NEXT: vmov q0[2], q0[0], r4, r0 +; CHECK-NEXT: ldr r0, [r0, #28] +; CHECK-NEXT: vmov q0[2], q0[0], r4, r6 ; CHECK-NEXT: vmov.f64 d4, d2 +; CHECK-NEXT: vmov q0[3], q0[1], r5, r0 ; CHECK-NEXT: vmov.f32 s9, s6 ; CHECK-NEXT: vmov.f32 s10, s0 -; CHECK-NEXT: vmov.f32 s11, s0 -; CHECK-NEXT: vmov.f32 s0, s5 +; CHECK-NEXT: vmov.f32 s11, s2 ; CHECK-NEXT: vstrw.32 q2, [r1] -; CHECK-NEXT: vmov.f32 s1, s7 -; CHECK-NEXT: vmov.f32 s3, s2 -; CHECK-NEXT: vstrw.32 q0, [r1, #16] -; CHECK-NEXT: pop {r4, pc} +; CHECK-NEXT: vmov.f32 s8, s5 +; CHECK-NEXT: vmov.f32 s9, s7 +; CHECK-NEXT: vmov.f32 s10, s1 +; CHECK-NEXT: vmov.f32 s11, s3 +; CHECK-NEXT: vstrw.32 q2, [r1, #16] +; CHECK-NEXT: pop {r4, r5, r6, pc} entry: %s1 = getelementptr <2 x i32>, <2 x i32>* %src, i32 0 %l1 = load <2 x i32>, <2 x i32>* %s1, align 4 @@ -32,7 +36,7 @@ entry: %s3 = getelementptr <2 x i32>, <2 x i32>* %src, i32 2 %l3 = load <2 x i32>, <2 x i32>* %s3, align 4 %s4 = getelementptr <2 x i32>, <2 x i32>* %src, i32 3 - %l4 = load <2 x i32>, <2 x i32>* %s3, align 4 + %l4 = load <2 x i32>, <2 x i32>* %s4, align 4 %t1 = shufflevector <2 x i32> %l1, <2 x i32> %l2, <4 x i32> %t2 = shufflevector <2 x i32> %l3, <2 x i32> %l4, <4 x i32> %s = shufflevector <4 x i32> %t1, <4 x i32> %t2, <8 x i32> @@ -43,10 +47,10 @@ entry: define void @vst4_v4i32(<4 x i32> *%src, <16 x i32> *%dst) { ; CHECK-LABEL: vst4_v4i32: ; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldrw.u32 q3, [r0, #48] ; CHECK-NEXT: vldrw.u32 q2, [r0, #32] ; CHECK-NEXT: vldrw.u32 q1, [r0, #16] ; CHECK-NEXT: vldrw.u32 q0, [r0] -; CHECK-NEXT: vmov q3, q2 ; CHECK-NEXT: vst40.32 {q0, q1, q2, q3}, [r1] ; CHECK-NEXT: vst41.32 {q0, q1, q2, q3}, [r1] ; CHECK-NEXT: vst42.32 {q0, q1, q2, q3}, [r1] @@ -60,7 +64,7 @@ entry: %s3 = getelementptr <4 x i32>, <4 x i32>* %src, i32 2 %l3 = load <4 x i32>, <4 x i32>* %s3, align 4 %s4 = getelementptr <4 x i32>, <4 x i32>* %src, i32 3 - %l4 = load <4 x i32>, <4 x i32>* %s3, align 4 + %l4 = load <4 x i32>, <4 x i32>* %s4, align 4 %t1 = shufflevector <4 x i32> %l1, <4 x i32> %l2, <8 x i32> %t2 = shufflevector <4 x i32> %l3, <4 x i32> %l4, <8 x i32> %s = shufflevector <8 x i32> %t1, <8 x i32> %t2, <16 x i32> @@ -73,14 +77,14 @@ define void @vst4_v8i32(<8 x i32> *%src, <32 x i32> *%dst) { ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} +; CHECK-NEXT: vldrw.u32 q7, [r0, #96] +; CHECK-NEXT: vldrw.u32 q3, [r0, #112] ; CHECK-NEXT: vldrw.u32 q6, [r0, #64] ; CHECK-NEXT: vldrw.u32 q2, [r0, #80] ; CHECK-NEXT: vldrw.u32 q5, [r0, #32] ; CHECK-NEXT: vldrw.u32 q1, [r0, #48] ; CHECK-NEXT: vldrw.u32 q4, [r0] ; CHECK-NEXT: vldrw.u32 q0, [r0, #16] -; CHECK-NEXT: vmov q7, q6 -; CHECK-NEXT: vmov q3, q2 ; CHECK-NEXT: vst40.32 {q4, q5, q6, q7}, [r1] ; CHECK-NEXT: vst41.32 {q4, q5, q6, q7}, [r1] ; CHECK-NEXT: vst42.32 {q4, q5, q6, q7}, [r1] @@ -99,7 +103,7 @@ entry: %s3 = getelementptr <8 x i32>, <8 x i32>* %src, i32 2 %l3 = load <8 x i32>, <8 x i32>* %s3, align 4 %s4 = getelementptr <8 x i32>, <8 x i32>* %src, i32 3 - %l4 = load <8 x i32>, <8 x i32>* %s3, align 4 + %l4 = load <8 x i32>, <8 x i32>* %s4, align 4 %t1 = shufflevector <8 x i32> %l1, <8 x i32> %l2, <16 x i32> %t2 = shufflevector <8 x i32> %l3, <8 x i32> %l4, <16 x i32> %s = shufflevector <16 x i32> %t1, <16 x i32> %t2, <32 x i32> @@ -116,64 +120,70 @@ define void @vst4_v16i32(<16 x i32> *%src, <64 x i32> *%dst) { ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: .pad #192 ; CHECK-NEXT: sub sp, #192 -; CHECK-NEXT: vldrw.u32 q2, [r0, #144] +; CHECK-NEXT: add r2, sp, #128 +; CHECK-NEXT: vldrw.u32 q7, [r0, #240] +; CHECK-NEXT: vldrw.u32 q3, [r0, #208] +; CHECK-NEXT: vstmia r2, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Spill ; CHECK-NEXT: add r2, sp, #64 +; CHECK-NEXT: vldrw.u32 q7, [r0, #224] +; CHECK-NEXT: vldrw.u32 q2, [r0, #144] +; CHECK-NEXT: vstmia r2, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Spill +; CHECK-NEXT: add r2, sp, #128 ; CHECK-NEXT: vldrw.u32 q1, [r0, #80] +; CHECK-NEXT: vldmia r2, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Reload +; CHECK-NEXT: add r2, sp, #128 ; CHECK-NEXT: vldrw.u32 q6, [r0, #176] ; CHECK-NEXT: vldrw.u32 q0, [r0, #16] -; CHECK-NEXT: vstmia r2, {d0, d1, d2, d3, d4, d5, d6, d7} @ 64-byte Spill +; CHECK-NEXT: vstmia r2, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Spill +; CHECK-NEXT: add r2, sp, #64 +; CHECK-NEXT: vldmia r2, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Reload +; CHECK-NEXT: add r2, sp, #64 +; CHECK-NEXT: vldrw.u32 q6, [r0, #160] +; CHECK-NEXT: vstmia r2, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Spill ; CHECK-NEXT: add r2, sp, #128 +; CHECK-NEXT: vldmia r2, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Reload +; CHECK-NEXT: add r2, sp, #128 +; CHECK-NEXT: vldrw.u32 q5, [r0, #112] +; CHECK-NEXT: vstmia r2, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Spill +; CHECK-NEXT: add r2, sp, #64 +; CHECK-NEXT: vldmia r2, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Reload +; CHECK-NEXT: add r2, sp, #64 +; CHECK-NEXT: vldrw.u32 q5, [r0, #96] +; CHECK-NEXT: vstmia r2, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Spill +; CHECK-NEXT: add r2, sp, #128 +; CHECK-NEXT: vldmia r2, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Reload +; CHECK-NEXT: add r2, sp, #128 +; CHECK-NEXT: vldrw.u32 q4, [r0, #48] +; CHECK-NEXT: vstmia sp, {d0, d1, d2, d3, d4, d5, d6, d7} @ 64-byte Spill +; CHECK-NEXT: vldrw.u32 q3, [r0, #192] ; CHECK-NEXT: vldrw.u32 q2, [r0, #128] ; CHECK-NEXT: vstmia r2, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Spill ; CHECK-NEXT: add r2, sp, #64 ; CHECK-NEXT: vldrw.u32 q1, [r0, #64] ; CHECK-NEXT: vldmia r2, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Reload -; CHECK-NEXT: add r2, sp, #64 -; CHECK-NEXT: vmov q7, q6 ; CHECK-NEXT: vldrw.u32 q0, [r0] -; CHECK-NEXT: vstmia r2, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Spill -; CHECK-NEXT: add r2, sp, #128 -; CHECK-NEXT: vldrw.u32 q6, [r0, #160] -; CHECK-NEXT: vmov q3, q2 -; CHECK-NEXT: vstmia sp, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Spill -; CHECK-NEXT: vldmia r2, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Reload -; CHECK-NEXT: add r2, sp, #128 -; CHECK-NEXT: vldrw.u32 q5, [r0, #112] -; CHECK-NEXT: vstmia r2, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Spill -; CHECK-NEXT: add r2, sp, #128 -; CHECK-NEXT: vldmia sp, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Reload -; CHECK-NEXT: vldrw.u32 q5, [r0, #96] -; CHECK-NEXT: vstmia sp, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Spill -; CHECK-NEXT: vldmia r2, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Reload -; CHECK-NEXT: add r2, sp, #128 -; CHECK-NEXT: vldrw.u32 q4, [r0, #48] -; CHECK-NEXT: vstmia r2, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Spill -; CHECK-NEXT: add r2, sp, #64 -; CHECK-NEXT: vldmia sp, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Reload ; CHECK-NEXT: vldrw.u32 q4, [r0, #32] ; CHECK-NEXT: mov r0, r1 ; CHECK-NEXT: vst40.32 {q0, q1, q2, q3}, [r1] ; CHECK-NEXT: vst41.32 {q0, q1, q2, q3}, [r1] ; CHECK-NEXT: vst42.32 {q0, q1, q2, q3}, [r1] ; CHECK-NEXT: vst43.32 {q0, q1, q2, q3}, [r0]! -; CHECK-NEXT: vldmia r2, {d0, d1, d2, d3, d4, d5, d6, d7} @ 64-byte Reload -; CHECK-NEXT: add r2, sp, #128 -; CHECK-NEXT: vmov q7, q6 +; CHECK-NEXT: vldmia sp, {d0, d1, d2, d3, d4, d5, d6, d7} @ 64-byte Reload ; CHECK-NEXT: vst40.32 {q0, q1, q2, q3}, [r0] ; CHECK-NEXT: vst41.32 {q0, q1, q2, q3}, [r0] ; CHECK-NEXT: vst42.32 {q0, q1, q2, q3}, [r0] ; CHECK-NEXT: vst43.32 {q0, q1, q2, q3}, [r0] -; CHECK-NEXT: vldmia r2, {d0, d1, d2, d3, d4, d5, d6, d7} @ 64-byte Reload ; CHECK-NEXT: add.w r0, r1, #192 ; CHECK-NEXT: adds r1, #128 -; CHECK-NEXT: vmov q3, q2 ; CHECK-NEXT: vst40.32 {q4, q5, q6, q7}, [r1] ; CHECK-NEXT: vst41.32 {q4, q5, q6, q7}, [r1] ; CHECK-NEXT: vst42.32 {q4, q5, q6, q7}, [r1] +; CHECK-NEXT: vst43.32 {q4, q5, q6, q7}, [r1] +; CHECK-NEXT: add r1, sp, #128 +; CHECK-NEXT: vldmia r1, {d0, d1, d2, d3, d4, d5, d6, d7} @ 64-byte Reload ; CHECK-NEXT: vst40.32 {q0, q1, q2, q3}, [r0] ; CHECK-NEXT: vst41.32 {q0, q1, q2, q3}, [r0] ; CHECK-NEXT: vst42.32 {q0, q1, q2, q3}, [r0] -; CHECK-NEXT: vst43.32 {q4, q5, q6, q7}, [r1] ; CHECK-NEXT: vst43.32 {q0, q1, q2, q3}, [r0] ; CHECK-NEXT: add sp, #192 ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} @@ -187,7 +197,7 @@ entry: %s3 = getelementptr <16 x i32>, <16 x i32>* %src, i32 2 %l3 = load <16 x i32>, <16 x i32>* %s3, align 4 %s4 = getelementptr <16 x i32>, <16 x i32>* %src, i32 3 - %l4 = load <16 x i32>, <16 x i32>* %s3, align 4 + %l4 = load <16 x i32>, <16 x i32>* %s4, align 4 %t1 = shufflevector <16 x i32> %l1, <16 x i32> %l2, <32 x i32> %t2 = shufflevector <16 x i32> %l3, <16 x i32> %l4, <32 x i32> %s = shufflevector <32 x i32> %t1, <32 x i32> %t2, <64 x i32> @@ -198,37 +208,33 @@ entry: define void @vst4_v4i32_align1(<4 x i32> *%src, <16 x i32> *%dst) { ; CHECK-LABEL: vst4_v4i32_align1: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .save {r7, lr} -; CHECK-NEXT: push {r7, lr} -; CHECK-NEXT: .vsave {d8, d9, d10} -; CHECK-NEXT: vpush {d8, d9, d10} +; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13} +; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13} +; CHECK-NEXT: vldrw.u32 q4, [r0] +; CHECK-NEXT: vldrw.u32 q2, [r0, #16] ; CHECK-NEXT: vldrw.u32 q0, [r0, #32] -; CHECK-NEXT: vldrw.u32 q1, [r0, #16] -; CHECK-NEXT: vmov r2, r3, d1 -; CHECK-NEXT: vmov r12, lr, d0 -; CHECK-NEXT: vldrw.u32 q0, [r0] -; CHECK-NEXT: vmov s14, r2 -; CHECK-NEXT: vmov s18, r3 -; CHECK-NEXT: vmov s10, lr -; CHECK-NEXT: vmov s20, r12 -; CHECK-NEXT: vmov.f32 s16, s3 -; CHECK-NEXT: vmov.f32 s12, s2 -; CHECK-NEXT: vmov.f32 s8, s1 -; CHECK-NEXT: vmov.f32 s1, s4 -; CHECK-NEXT: vmov.f32 s17, s7 -; CHECK-NEXT: vmov.f32 s13, s6 -; CHECK-NEXT: vmov.f32 s9, s5 -; CHECK-NEXT: vmov.f32 s2, s20 -; CHECK-NEXT: vmov.f32 s19, s18 -; CHECK-NEXT: vmov.f32 s15, s14 -; CHECK-NEXT: vstrb.8 q4, [r1, #48] -; CHECK-NEXT: vmov.f32 s11, s10 -; CHECK-NEXT: vstrb.8 q3, [r1, #32] -; CHECK-NEXT: vmov.f32 s3, s20 -; CHECK-NEXT: vstrb.8 q2, [r1, #16] -; CHECK-NEXT: vstrb.8 q0, [r1] -; CHECK-NEXT: vpop {d8, d9, d10} -; CHECK-NEXT: pop {r7, pc} +; CHECK-NEXT: vldrw.u32 q1, [r0, #48] +; CHECK-NEXT: vmov.f32 s12, s17 +; CHECK-NEXT: vmov.f64 d10, d8 +; CHECK-NEXT: vmov.f32 s24, s19 +; CHECK-NEXT: vmov.f32 s13, s9 +; CHECK-NEXT: vmov.f32 s21, s8 +; CHECK-NEXT: vmov.f32 s25, s11 +; CHECK-NEXT: vmov.f32 s14, s1 +; CHECK-NEXT: vmov.f32 s22, s0 +; CHECK-NEXT: vmov.f32 s26, s3 +; CHECK-NEXT: vmov.f32 s0, s18 +; CHECK-NEXT: vmov.f32 s1, s10 +; CHECK-NEXT: vmov.f32 s15, s5 +; CHECK-NEXT: vmov.f32 s23, s4 +; CHECK-NEXT: vstrb.8 q3, [r1, #16] +; CHECK-NEXT: vmov.f32 s27, s7 +; CHECK-NEXT: vstrb.8 q5, [r1] +; CHECK-NEXT: vmov.f32 s3, s6 +; CHECK-NEXT: vstrb.8 q6, [r1, #48] +; CHECK-NEXT: vstrb.8 q0, [r1, #32] +; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13} +; CHECK-NEXT: bx lr entry: %s1 = getelementptr <4 x i32>, <4 x i32>* %src, i32 0 %l1 = load <4 x i32>, <4 x i32>* %s1, align 4 @@ -237,7 +243,7 @@ entry: %s3 = getelementptr <4 x i32>, <4 x i32>* %src, i32 2 %l3 = load <4 x i32>, <4 x i32>* %s3, align 4 %s4 = getelementptr <4 x i32>, <4 x i32>* %src, i32 3 - %l4 = load <4 x i32>, <4 x i32>* %s3, align 4 + %l4 = load <4 x i32>, <4 x i32>* %s4, align 4 %t1 = shufflevector <4 x i32> %l1, <4 x i32> %l2, <8 x i32> %t2 = shufflevector <4 x i32> %l3, <4 x i32> %l4, <8 x i32> %s = shufflevector <8 x i32> %t1, <8 x i32> %t2, <16 x i32> @@ -250,26 +256,30 @@ entry: define void @vst4_v2i16(<2 x i16> *%src, <8 x i16> *%dst) { ; CHECK-LABEL: vst4_v2i16: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .save {r4, lr} -; CHECK-NEXT: push {r4, lr} -; CHECK-NEXT: ldrh r2, [r0, #2] -; CHECK-NEXT: ldrh.w r12, [r0, #4] -; CHECK-NEXT: ldrh r3, [r0, #8] -; CHECK-NEXT: ldrh.w lr, [r0, #6] -; CHECK-NEXT: ldrh r4, [r0, #10] -; CHECK-NEXT: ldrh r0, [r0] -; CHECK-NEXT: vmov q0[2], q0[0], r0, r2 +; CHECK-NEXT: .save {r4, r5, r6, lr} +; CHECK-NEXT: push {r4, r5, r6, lr} +; CHECK-NEXT: ldrh r3, [r0, #2] +; CHECK-NEXT: ldrh r2, [r0] +; CHECK-NEXT: ldrh.w r12, [r0, #10] +; CHECK-NEXT: ldrh.w lr, [r0, #4] +; CHECK-NEXT: vmov q1[2], q1[0], r2, r3 +; CHECK-NEXT: ldrh r4, [r0, #12] +; CHECK-NEXT: ldrh r5, [r0, #6] +; CHECK-NEXT: ldrh r6, [r0, #14] +; CHECK-NEXT: ldrh r0, [r0, #8] +; CHECK-NEXT: vmov q0[2], q0[0], r0, r12 +; CHECK-NEXT: vmov r0, s4 +; CHECK-NEXT: vmov.16 q1[0], r0 ; CHECK-NEXT: vmov r0, s0 -; CHECK-NEXT: vmov.16 q0[0], r0 -; CHECK-NEXT: vmov.16 q0[1], r12 -; CHECK-NEXT: vmov.16 q0[2], r3 -; CHECK-NEXT: vmov.16 q0[3], r3 -; CHECK-NEXT: vmov.16 q0[4], r2 -; CHECK-NEXT: vmov.16 q0[5], lr -; CHECK-NEXT: vmov.16 q0[6], r4 -; CHECK-NEXT: vmov.16 q0[7], r4 -; CHECK-NEXT: vstrh.16 q0, [r1] -; CHECK-NEXT: pop {r4, pc} +; CHECK-NEXT: vmov.16 q1[1], lr +; CHECK-NEXT: vmov.16 q1[2], r0 +; CHECK-NEXT: vmov.16 q1[3], r4 +; CHECK-NEXT: vmov.16 q1[4], r3 +; CHECK-NEXT: vmov.16 q1[5], r5 +; CHECK-NEXT: vmov.16 q1[6], r12 +; CHECK-NEXT: vmov.16 q1[7], r6 +; CHECK-NEXT: vstrh.16 q1, [r1] +; CHECK-NEXT: pop {r4, r5, r6, pc} entry: %s1 = getelementptr <2 x i16>, <2 x i16>* %src, i32 0 %l1 = load <2 x i16>, <2 x i16>* %s1, align 4 @@ -278,7 +288,7 @@ entry: %s3 = getelementptr <2 x i16>, <2 x i16>* %src, i32 2 %l3 = load <2 x i16>, <2 x i16>* %s3, align 4 %s4 = getelementptr <2 x i16>, <2 x i16>* %src, i32 3 - %l4 = load <2 x i16>, <2 x i16>* %s3, align 4 + %l4 = load <2 x i16>, <2 x i16>* %s4, align 4 %t1 = shufflevector <2 x i16> %l1, <2 x i16> %l2, <4 x i32> %t2 = shufflevector <2 x i16> %l3, <2 x i16> %l4, <4 x i32> %s = shufflevector <4 x i16> %t1, <4 x i16> %t2, <8 x i32> @@ -289,36 +299,42 @@ entry: define void @vst4_v4i16(<4 x i16> *%src, <16 x i16> *%dst) { ; CHECK-LABEL: vst4_v4i16: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .save {r4, r5, r7, lr} -; CHECK-NEXT: push {r4, r5, r7, lr} -; CHECK-NEXT: vldrh.u32 q0, [r0, #8] -; CHECK-NEXT: vldrh.u32 q2, [r0, #16] -; CHECK-NEXT: vmov lr, r12, d0 -; CHECK-NEXT: vmov r2, r3, d1 +; CHECK-NEXT: .save {r4, r5, r6, lr} +; CHECK-NEXT: push {r4, r5, r6, lr} +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: vldrh.u32 q0, [r0] -; CHECK-NEXT: vmov r4, r5, d1 -; CHECK-NEXT: vmov.16 q1[0], r4 -; CHECK-NEXT: vmov r0, r4, d5 -; CHECK-NEXT: vmov.16 q1[1], r2 -; CHECK-NEXT: vmov.16 q1[2], r0 -; CHECK-NEXT: vmov.16 q1[3], r0 -; CHECK-NEXT: vmov r0, r2, d4 -; CHECK-NEXT: vmov.16 q1[4], r5 -; CHECK-NEXT: vmov.16 q1[5], r3 -; CHECK-NEXT: vmov r3, r5, d0 -; CHECK-NEXT: vmov.16 q0[0], r3 -; CHECK-NEXT: vmov.16 q1[6], r4 -; CHECK-NEXT: vmov.16 q0[1], lr -; CHECK-NEXT: vmov.16 q1[7], r4 -; CHECK-NEXT: vmov.16 q0[2], r0 -; CHECK-NEXT: vstrh.16 q1, [r1, #16] +; CHECK-NEXT: vldrh.u32 q3, [r0, #8] +; CHECK-NEXT: vldrh.u32 q1, [r0, #16] +; CHECK-NEXT: vldrh.u32 q4, [r0, #24] +; CHECK-NEXT: vmov r3, r4, d1 +; CHECK-NEXT: vmov r5, r12, d0 +; CHECK-NEXT: vmov.16 q2[0], r3 +; CHECK-NEXT: vmov.16 q0[0], r5 +; CHECK-NEXT: vmov r0, r5, d7 +; CHECK-NEXT: vmov.16 q2[1], r0 +; CHECK-NEXT: vmov r2, lr, d3 +; CHECK-NEXT: vmov r0, r3, d9 +; CHECK-NEXT: vmov.16 q2[2], r2 +; CHECK-NEXT: vmov.16 q2[3], r0 +; CHECK-NEXT: vmov r0, r6, d8 +; CHECK-NEXT: vmov.16 q2[4], r4 +; CHECK-NEXT: vmov.16 q2[5], r5 +; CHECK-NEXT: vmov r4, r5, d6 +; CHECK-NEXT: vmov.16 q2[6], lr +; CHECK-NEXT: vmov.16 q0[1], r4 +; CHECK-NEXT: vmov.16 q2[7], r3 +; CHECK-NEXT: vmov r3, r2, d2 +; CHECK-NEXT: vmov.16 q0[2], r3 +; CHECK-NEXT: vstrh.16 q2, [r1, #16] ; CHECK-NEXT: vmov.16 q0[3], r0 -; CHECK-NEXT: vmov.16 q0[4], r5 -; CHECK-NEXT: vmov.16 q0[5], r12 +; CHECK-NEXT: vmov.16 q0[4], r12 +; CHECK-NEXT: vmov.16 q0[5], r5 ; CHECK-NEXT: vmov.16 q0[6], r2 -; CHECK-NEXT: vmov.16 q0[7], r2 +; CHECK-NEXT: vmov.16 q0[7], r6 ; CHECK-NEXT: vstrh.16 q0, [r1] -; CHECK-NEXT: pop {r4, r5, r7, pc} +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: pop {r4, r5, r6, pc} entry: %s1 = getelementptr <4 x i16>, <4 x i16>* %src, i32 0 %l1 = load <4 x i16>, <4 x i16>* %s1, align 4 @@ -327,7 +343,7 @@ entry: %s3 = getelementptr <4 x i16>, <4 x i16>* %src, i32 2 %l3 = load <4 x i16>, <4 x i16>* %s3, align 4 %s4 = getelementptr <4 x i16>, <4 x i16>* %src, i32 3 - %l4 = load <4 x i16>, <4 x i16>* %s3, align 4 + %l4 = load <4 x i16>, <4 x i16>* %s4, align 4 %t1 = shufflevector <4 x i16> %l1, <4 x i16> %l2, <8 x i32> %t2 = shufflevector <4 x i16> %l3, <4 x i16> %l4, <8 x i32> %s = shufflevector <8 x i16> %t1, <8 x i16> %t2, <16 x i32> @@ -338,10 +354,10 @@ entry: define void @vst4_v8i16(<8 x i16> *%src, <32 x i16> *%dst) { ; CHECK-LABEL: vst4_v8i16: ; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldrw.u32 q3, [r0, #48] ; CHECK-NEXT: vldrw.u32 q2, [r0, #32] ; CHECK-NEXT: vldrw.u32 q1, [r0, #16] ; CHECK-NEXT: vldrw.u32 q0, [r0] -; CHECK-NEXT: vmov q3, q2 ; CHECK-NEXT: vst40.16 {q0, q1, q2, q3}, [r1] ; CHECK-NEXT: vst41.16 {q0, q1, q2, q3}, [r1] ; CHECK-NEXT: vst42.16 {q0, q1, q2, q3}, [r1] @@ -355,7 +371,7 @@ entry: %s3 = getelementptr <8 x i16>, <8 x i16>* %src, i32 2 %l3 = load <8 x i16>, <8 x i16>* %s3, align 4 %s4 = getelementptr <8 x i16>, <8 x i16>* %src, i32 3 - %l4 = load <8 x i16>, <8 x i16>* %s3, align 4 + %l4 = load <8 x i16>, <8 x i16>* %s4, align 4 %t1 = shufflevector <8 x i16> %l1, <8 x i16> %l2, <16 x i32> %t2 = shufflevector <8 x i16> %l3, <8 x i16> %l4, <16 x i32> %s = shufflevector <16 x i16> %t1, <16 x i16> %t2, <32 x i32> @@ -368,14 +384,14 @@ define void @vst4_v16i16(<16 x i16> *%src, <64 x i16> *%dst) { ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} +; CHECK-NEXT: vldrw.u32 q7, [r0, #96] +; CHECK-NEXT: vldrw.u32 q3, [r0, #112] ; CHECK-NEXT: vldrw.u32 q6, [r0, #64] ; CHECK-NEXT: vldrw.u32 q2, [r0, #80] ; CHECK-NEXT: vldrw.u32 q5, [r0, #32] ; CHECK-NEXT: vldrw.u32 q1, [r0, #48] ; CHECK-NEXT: vldrw.u32 q4, [r0] ; CHECK-NEXT: vldrw.u32 q0, [r0, #16] -; CHECK-NEXT: vmov q7, q6 -; CHECK-NEXT: vmov q3, q2 ; CHECK-NEXT: vst40.16 {q4, q5, q6, q7}, [r1] ; CHECK-NEXT: vst41.16 {q4, q5, q6, q7}, [r1] ; CHECK-NEXT: vst42.16 {q4, q5, q6, q7}, [r1] @@ -394,7 +410,7 @@ entry: %s3 = getelementptr <16 x i16>, <16 x i16>* %src, i32 2 %l3 = load <16 x i16>, <16 x i16>* %s3, align 4 %s4 = getelementptr <16 x i16>, <16 x i16>* %src, i32 3 - %l4 = load <16 x i16>, <16 x i16>* %s3, align 4 + %l4 = load <16 x i16>, <16 x i16>* %s4, align 4 %t1 = shufflevector <16 x i16> %l1, <16 x i16> %l2, <32 x i32> %t2 = shufflevector <16 x i16> %l3, <16 x i16> %l4, <32 x i32> %s = shufflevector <32 x i16> %t1, <32 x i16> %t2, <64 x i32> @@ -405,56 +421,61 @@ entry: define void @vst4_v8i16_align1(<8 x i16> *%src, <32 x i16> *%dst) { ; CHECK-LABEL: vst4_v8i16_align1: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13} -; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13} -; CHECK-NEXT: vldrw.u32 q2, [r0, #16] -; CHECK-NEXT: vldrw.u32 q1, [r0] -; CHECK-NEXT: vmovx.f16 s2, s5 -; CHECK-NEXT: vmovx.f16 s12, s9 -; CHECK-NEXT: vins.f16 s2, s12 -; CHECK-NEXT: vldrw.u32 q3, [r0, #32] +; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} +; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} +; CHECK-NEXT: vldrw.u32 q2, [r0, #48] +; CHECK-NEXT: vldrw.u32 q1, [r0, #32] +; CHECK-NEXT: vldrw.u32 q4, [r0, #16] +; CHECK-NEXT: vmovx.f16 s12, s5 ; CHECK-NEXT: vins.f16 s5, s9 -; CHECK-NEXT: vmovx.f16 s24, s11 -; CHECK-NEXT: vmovx.f16 s16, s13 -; CHECK-NEXT: vins.f16 s13, s13 -; CHECK-NEXT: vins.f16 s16, s16 -; CHECK-NEXT: vmov q5, q3 -; CHECK-NEXT: vmov.f32 s23, s16 -; CHECK-NEXT: vmovx.f16 s16, s8 -; CHECK-NEXT: vmov.f32 s0, s5 -; CHECK-NEXT: vmov.f32 s1, s21 -; CHECK-NEXT: vmov.f32 s3, s23 -; CHECK-NEXT: vmovx.f16 s20, s4 +; CHECK-NEXT: vmovx.f16 s0, s9 +; CHECK-NEXT: vmovx.f16 s20, s17 +; CHECK-NEXT: vins.f16 s12, s0 +; CHECK-NEXT: vmov q0, q1 +; CHECK-NEXT: vmov.f32 s3, s12 +; CHECK-NEXT: vldrw.u32 q3, [r0] +; CHECK-NEXT: vmovx.f16 s27, s4 ; CHECK-NEXT: vins.f16 s4, s8 -; CHECK-NEXT: vins.f16 s20, s16 -; CHECK-NEXT: vmov q4, q1 +; CHECK-NEXT: vmovx.f16 s2, s13 +; CHECK-NEXT: vins.f16 s13, s17 +; CHECK-NEXT: vins.f16 s2, s20 +; CHECK-NEXT: vmovx.f16 s20, s8 +; CHECK-NEXT: vins.f16 s27, s20 +; CHECK-NEXT: vmov.f32 s0, s13 +; CHECK-NEXT: vmovx.f16 s28, s12 +; CHECK-NEXT: vmovx.f16 s20, s16 +; CHECK-NEXT: vins.f16 s12, s16 +; CHECK-NEXT: vins.f16 s28, s20 +; CHECK-NEXT: vmov q5, q3 +; CHECK-NEXT: vmov.f32 s25, s4 +; CHECK-NEXT: vmov.f32 s22, s28 +; CHECK-NEXT: vmovx.f16 s28, s11 +; CHECK-NEXT: vmov.f32 s21, s4 ; CHECK-NEXT: vmovx.f16 s8, s10 -; CHECK-NEXT: vmov.f32 s18, s20 -; CHECK-NEXT: vmovx.f16 s22, s7 +; CHECK-NEXT: vmov.f32 s23, s27 +; CHECK-NEXT: vmovx.f16 s27, s7 +; CHECK-NEXT: vins.f16 s27, s28 ; CHECK-NEXT: vins.f16 s7, s11 -; CHECK-NEXT: vins.f16 s22, s24 -; CHECK-NEXT: vmovx.f16 s26, s6 -; CHECK-NEXT: vmovx.f16 s19, s12 -; CHECK-NEXT: vins.f16 s12, s12 -; CHECK-NEXT: vmov.f32 s20, s7 +; CHECK-NEXT: vmov.f32 s25, s7 +; CHECK-NEXT: vmovx.f16 s28, s19 +; CHECK-NEXT: vmovx.f16 s26, s15 +; CHECK-NEXT: vins.f16 s15, s19 +; CHECK-NEXT: vins.f16 s26, s28 +; CHECK-NEXT: vmovx.f16 s31, s6 ; CHECK-NEXT: vins.f16 s6, s10 -; CHECK-NEXT: vins.f16 s26, s8 -; CHECK-NEXT: vmov.f32 s17, s12 -; CHECK-NEXT: vmovx.f16 s23, s15 -; CHECK-NEXT: vins.f16 s15, s15 -; CHECK-NEXT: vmov.f32 s24, s6 -; CHECK-NEXT: vmov.f32 s21, s15 -; CHECK-NEXT: vmovx.f16 s27, s14 -; CHECK-NEXT: vins.f16 s14, s14 -; CHECK-NEXT: vins.f16 s19, s19 -; CHECK-NEXT: vmov.f32 s25, s14 -; CHECK-NEXT: vins.f16 s23, s23 -; CHECK-NEXT: vins.f16 s27, s27 -; CHECK-NEXT: vstrb.8 q5, [r1, #48] -; CHECK-NEXT: vstrb.8 q6, [r1, #32] +; CHECK-NEXT: vins.f16 s31, s8 +; CHECK-NEXT: vmov.f32 s29, s6 +; CHECK-NEXT: vmovx.f16 s4, s18 +; CHECK-NEXT: vmovx.f16 s30, s14 +; CHECK-NEXT: vmov.f32 s24, s15 +; CHECK-NEXT: vins.f16 s14, s18 +; CHECK-NEXT: vins.f16 s30, s4 +; CHECK-NEXT: vmov.f32 s28, s14 +; CHECK-NEXT: vstrb.8 q6, [r1, #48] +; CHECK-NEXT: vstrb.8 q7, [r1, #32] ; CHECK-NEXT: vstrb.8 q0, [r1, #16] -; CHECK-NEXT: vstrb.8 q4, [r1] -; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13} +; CHECK-NEXT: vstrb.8 q5, [r1] +; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: bx lr entry: %s1 = getelementptr <8 x i16>, <8 x i16>* %src, i32 0 @@ -464,7 +485,7 @@ entry: %s3 = getelementptr <8 x i16>, <8 x i16>* %src, i32 2 %l3 = load <8 x i16>, <8 x i16>* %s3, align 4 %s4 = getelementptr <8 x i16>, <8 x i16>* %src, i32 3 - %l4 = load <8 x i16>, <8 x i16>* %s3, align 4 + %l4 = load <8 x i16>, <8 x i16>* %s4, align 4 %t1 = shufflevector <8 x i16> %l1, <8 x i16> %l2, <16 x i32> %t2 = shufflevector <8 x i16> %l3, <8 x i16> %l4, <16 x i32> %s = shufflevector <16 x i16> %t1, <16 x i16> %t2, <32 x i32> @@ -477,26 +498,30 @@ entry: define void @vst4_v2i8(<2 x i8> *%src, <8 x i8> *%dst) { ; CHECK-LABEL: vst4_v2i8: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .save {r4, lr} -; CHECK-NEXT: push {r4, lr} +; CHECK-NEXT: .save {r4, r5, r6, lr} +; CHECK-NEXT: push {r4, r5, r6, lr} +; CHECK-NEXT: ldrb r4, [r0, #5] +; CHECK-NEXT: ldrb r5, [r0, #4] ; CHECK-NEXT: ldrb r2, [r0] ; CHECK-NEXT: ldrb r3, [r0, #1] +; CHECK-NEXT: vmov q0[2], q0[0], r5, r4 +; CHECK-NEXT: vmov r5, s0 ; CHECK-NEXT: ldrb.w r12, [r0, #2] ; CHECK-NEXT: vmov q0[2], q0[0], r2, r3 ; CHECK-NEXT: ldrb.w lr, [r0, #3] ; CHECK-NEXT: vmov r2, s0 -; CHECK-NEXT: ldrb r4, [r0, #5] +; CHECK-NEXT: ldrb r6, [r0, #7] ; CHECK-NEXT: vmov.16 q0[0], r2 -; CHECK-NEXT: ldrb r0, [r0, #4] +; CHECK-NEXT: ldrb r0, [r0, #6] ; CHECK-NEXT: vmov.16 q0[1], r12 -; CHECK-NEXT: vmov.16 q0[2], r0 +; CHECK-NEXT: vmov.16 q0[2], r5 ; CHECK-NEXT: vmov.16 q0[3], r0 ; CHECK-NEXT: vmov.16 q0[4], r3 ; CHECK-NEXT: vmov.16 q0[5], lr ; CHECK-NEXT: vmov.16 q0[6], r4 -; CHECK-NEXT: vmov.16 q0[7], r4 +; CHECK-NEXT: vmov.16 q0[7], r6 ; CHECK-NEXT: vstrb.16 q0, [r1] -; CHECK-NEXT: pop {r4, pc} +; CHECK-NEXT: pop {r4, r5, r6, pc} entry: %s1 = getelementptr <2 x i8>, <2 x i8>* %src, i32 0 %l1 = load <2 x i8>, <2 x i8>* %s1, align 4 @@ -505,7 +530,7 @@ entry: %s3 = getelementptr <2 x i8>, <2 x i8>* %src, i32 2 %l3 = load <2 x i8>, <2 x i8>* %s3, align 4 %s4 = getelementptr <2 x i8>, <2 x i8>* %src, i32 3 - %l4 = load <2 x i8>, <2 x i8>* %s3, align 4 + %l4 = load <2 x i8>, <2 x i8>* %s4, align 4 %t1 = shufflevector <2 x i8> %l1, <2 x i8> %l2, <4 x i32> %t2 = shufflevector <2 x i8> %l3, <2 x i8> %l4, <4 x i32> %s = shufflevector <4 x i8> %t1, <4 x i8> %t2, <8 x i32> @@ -516,35 +541,44 @@ entry: define void @vst4_v4i8(<4 x i8> *%src, <16 x i8> *%dst) { ; CHECK-LABEL: vst4_v4i8: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .save {r4, r5, r7, lr} -; CHECK-NEXT: push {r4, r5, r7, lr} -; CHECK-NEXT: vldrb.u32 q1, [r0] -; CHECK-NEXT: vldrb.u32 q0, [r0, #4] -; CHECK-NEXT: vldrb.u32 q2, [r0, #8] -; CHECK-NEXT: vmov r4, r5, d2 -; CHECK-NEXT: vmov lr, r12, d1 -; CHECK-NEXT: vmov r2, r3, d0 +; CHECK-NEXT: .save {r4, r5, r6, r7, lr} +; CHECK-NEXT: push {r4, r5, r6, r7, lr} +; CHECK-NEXT: .pad #4 +; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vldrb.u32 q2, [r0] +; CHECK-NEXT: vldrb.u32 q3, [r0, #4] +; CHECK-NEXT: vldrb.u32 q1, [r0, #8] +; CHECK-NEXT: vldrb.u32 q4, [r0, #12] +; CHECK-NEXT: vmov r4, r5, d4 ; CHECK-NEXT: vmov.8 q0[0], r4 -; CHECK-NEXT: vmov r0, r4, d4 +; CHECK-NEXT: vmov r2, lr, d6 ; CHECK-NEXT: vmov.8 q0[1], r2 +; CHECK-NEXT: vmov r0, r4, d2 +; CHECK-NEXT: vmov r3, r12, d8 ; CHECK-NEXT: vmov.8 q0[2], r0 -; CHECK-NEXT: vmov.8 q0[3], r0 -; CHECK-NEXT: vmov r0, r2, d5 +; CHECK-NEXT: vmov.8 q0[3], r3 +; CHECK-NEXT: vmov r2, r7, d9 ; CHECK-NEXT: vmov.8 q0[4], r5 -; CHECK-NEXT: vmov.8 q0[5], r3 -; CHECK-NEXT: vmov r3, r5, d3 +; CHECK-NEXT: vmov r3, r5, d7 +; CHECK-NEXT: vmov.8 q0[5], lr ; CHECK-NEXT: vmov.8 q0[6], r4 -; CHECK-NEXT: vmov.8 q0[7], r4 -; CHECK-NEXT: vmov.8 q0[8], r3 -; CHECK-NEXT: vmov.8 q0[9], lr -; CHECK-NEXT: vmov.8 q0[10], r0 -; CHECK-NEXT: vmov.8 q0[11], r0 -; CHECK-NEXT: vmov.8 q0[12], r5 -; CHECK-NEXT: vmov.8 q0[13], r12 -; CHECK-NEXT: vmov.8 q0[14], r2 -; CHECK-NEXT: vmov.8 q0[15], r2 +; CHECK-NEXT: vmov r4, r0, d5 +; CHECK-NEXT: vmov.8 q0[7], r12 +; CHECK-NEXT: vmov.8 q0[8], r4 +; CHECK-NEXT: vmov r4, r6, d3 +; CHECK-NEXT: vmov.8 q0[9], r3 +; CHECK-NEXT: vmov.8 q0[10], r4 +; CHECK-NEXT: vmov.8 q0[11], r2 +; CHECK-NEXT: vmov.8 q0[12], r0 +; CHECK-NEXT: vmov.8 q0[13], r5 +; CHECK-NEXT: vmov.8 q0[14], r6 +; CHECK-NEXT: vmov.8 q0[15], r7 ; CHECK-NEXT: vstrb.8 q0, [r1] -; CHECK-NEXT: pop {r4, r5, r7, pc} +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: add sp, #4 +; CHECK-NEXT: pop {r4, r5, r6, r7, pc} entry: %s1 = getelementptr <4 x i8>, <4 x i8>* %src, i32 0 %l1 = load <4 x i8>, <4 x i8>* %s1, align 4 @@ -553,7 +587,7 @@ entry: %s3 = getelementptr <4 x i8>, <4 x i8>* %src, i32 2 %l3 = load <4 x i8>, <4 x i8>* %s3, align 4 %s4 = getelementptr <4 x i8>, <4 x i8>* %src, i32 3 - %l4 = load <4 x i8>, <4 x i8>* %s3, align 4 + %l4 = load <4 x i8>, <4 x i8>* %s4, align 4 %t1 = shufflevector <4 x i8> %l1, <4 x i8> %l2, <8 x i32> %t2 = shufflevector <4 x i8> %l3, <4 x i8> %l4, <8 x i32> %s = shufflevector <8 x i8> %t1, <8 x i8> %t2, <16 x i32> @@ -564,17 +598,19 @@ entry: define void @vst4_v8i8(<8 x i8> *%src, <32 x i8> *%dst) { ; CHECK-LABEL: vst4_v8i8: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .vsave {d8, d9} -; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: .vsave {d8, d9, d10, d11} +; CHECK-NEXT: vpush {d8, d9, d10, d11} ; CHECK-NEXT: vldrb.u16 q1, [r0] ; CHECK-NEXT: vldrb.u16 q2, [r0, #8] ; CHECK-NEXT: vldrb.u16 q3, [r0, #16] +; CHECK-NEXT: vldrb.u16 q4, [r0, #24] ; CHECK-NEXT: vmov.u16 r2, q1[4] ; CHECK-NEXT: vmov.8 q0[0], r2 ; CHECK-NEXT: vmov.u16 r2, q2[4] ; CHECK-NEXT: vmov.8 q0[1], r2 -; CHECK-NEXT: vmov.u16 r0, q3[4] -; CHECK-NEXT: vmov.8 q0[2], r0 +; CHECK-NEXT: vmov.u16 r2, q3[4] +; CHECK-NEXT: vmov.8 q0[2], r2 +; CHECK-NEXT: vmov.u16 r0, q4[4] ; CHECK-NEXT: vmov.8 q0[3], r0 ; CHECK-NEXT: vmov.u16 r0, q1[5] ; CHECK-NEXT: vmov.8 q0[4], r0 @@ -582,6 +618,7 @@ define void @vst4_v8i8(<8 x i8> *%src, <32 x i8> *%dst) { ; CHECK-NEXT: vmov.8 q0[5], r0 ; CHECK-NEXT: vmov.u16 r0, q3[5] ; CHECK-NEXT: vmov.8 q0[6], r0 +; CHECK-NEXT: vmov.u16 r0, q4[5] ; CHECK-NEXT: vmov.8 q0[7], r0 ; CHECK-NEXT: vmov.u16 r0, q1[6] ; CHECK-NEXT: vmov.8 q0[8], r0 @@ -589,6 +626,7 @@ define void @vst4_v8i8(<8 x i8> *%src, <32 x i8> *%dst) { ; CHECK-NEXT: vmov.8 q0[9], r0 ; CHECK-NEXT: vmov.u16 r0, q3[6] ; CHECK-NEXT: vmov.8 q0[10], r0 +; CHECK-NEXT: vmov.u16 r0, q4[6] ; CHECK-NEXT: vmov.8 q0[11], r0 ; CHECK-NEXT: vmov.u16 r0, q1[7] ; CHECK-NEXT: vmov.8 q0[12], r0 @@ -596,38 +634,43 @@ define void @vst4_v8i8(<8 x i8> *%src, <32 x i8> *%dst) { ; CHECK-NEXT: vmov.8 q0[13], r0 ; CHECK-NEXT: vmov.u16 r0, q3[7] ; CHECK-NEXT: vmov.8 q0[14], r0 +; CHECK-NEXT: vmov.u16 r0, q4[7] ; CHECK-NEXT: vmov.8 q0[15], r0 ; CHECK-NEXT: vmov.u16 r0, q1[0] -; CHECK-NEXT: vmov.8 q4[0], r0 +; CHECK-NEXT: vmov.8 q5[0], r0 ; CHECK-NEXT: vmov.u16 r0, q2[0] -; CHECK-NEXT: vmov.8 q4[1], r0 +; CHECK-NEXT: vmov.8 q5[1], r0 ; CHECK-NEXT: vmov.u16 r0, q3[0] -; CHECK-NEXT: vmov.8 q4[2], r0 -; CHECK-NEXT: vstrb.8 q0, [r1, #16] -; CHECK-NEXT: vmov.8 q4[3], r0 +; CHECK-NEXT: vmov.8 q5[2], r0 +; CHECK-NEXT: vmov.u16 r0, q4[0] +; CHECK-NEXT: vmov.8 q5[3], r0 ; CHECK-NEXT: vmov.u16 r0, q1[1] -; CHECK-NEXT: vmov.8 q4[4], r0 +; CHECK-NEXT: vmov.8 q5[4], r0 ; CHECK-NEXT: vmov.u16 r0, q2[1] -; CHECK-NEXT: vmov.8 q4[5], r0 +; CHECK-NEXT: vmov.8 q5[5], r0 ; CHECK-NEXT: vmov.u16 r0, q3[1] -; CHECK-NEXT: vmov.8 q4[6], r0 -; CHECK-NEXT: vmov.8 q4[7], r0 +; CHECK-NEXT: vmov.8 q5[6], r0 +; CHECK-NEXT: vmov.u16 r0, q4[1] +; CHECK-NEXT: vmov.8 q5[7], r0 ; CHECK-NEXT: vmov.u16 r0, q1[2] -; CHECK-NEXT: vmov.8 q4[8], r0 +; CHECK-NEXT: vmov.8 q5[8], r0 ; CHECK-NEXT: vmov.u16 r0, q2[2] -; CHECK-NEXT: vmov.8 q4[9], r0 +; CHECK-NEXT: vmov.8 q5[9], r0 ; CHECK-NEXT: vmov.u16 r0, q3[2] -; CHECK-NEXT: vmov.8 q4[10], r0 -; CHECK-NEXT: vmov.8 q4[11], r0 +; CHECK-NEXT: vmov.8 q5[10], r0 +; CHECK-NEXT: vmov.u16 r0, q4[2] +; CHECK-NEXT: vmov.8 q5[11], r0 ; CHECK-NEXT: vmov.u16 r0, q1[3] -; CHECK-NEXT: vmov.8 q4[12], r0 +; CHECK-NEXT: vmov.8 q5[12], r0 ; CHECK-NEXT: vmov.u16 r0, q2[3] -; CHECK-NEXT: vmov.8 q4[13], r0 +; CHECK-NEXT: vmov.8 q5[13], r0 ; CHECK-NEXT: vmov.u16 r0, q3[3] -; CHECK-NEXT: vmov.8 q4[14], r0 -; CHECK-NEXT: vmov.8 q4[15], r0 -; CHECK-NEXT: vstrb.8 q4, [r1] -; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: vmov.8 q5[14], r0 +; CHECK-NEXT: vmov.u16 r0, q4[3] +; CHECK-NEXT: vmov.8 q5[15], r0 +; CHECK-NEXT: vstrb.8 q0, [r1, #16] +; CHECK-NEXT: vstrb.8 q5, [r1] +; CHECK-NEXT: vpop {d8, d9, d10, d11} ; CHECK-NEXT: bx lr entry: %s1 = getelementptr <8 x i8>, <8 x i8>* %src, i32 0 @@ -637,7 +680,7 @@ entry: %s3 = getelementptr <8 x i8>, <8 x i8>* %src, i32 2 %l3 = load <8 x i8>, <8 x i8>* %s3, align 4 %s4 = getelementptr <8 x i8>, <8 x i8>* %src, i32 3 - %l4 = load <8 x i8>, <8 x i8>* %s3, align 4 + %l4 = load <8 x i8>, <8 x i8>* %s4, align 4 %t1 = shufflevector <8 x i8> %l1, <8 x i8> %l2, <16 x i32> %t2 = shufflevector <8 x i8> %l3, <8 x i8> %l4, <16 x i32> %s = shufflevector <16 x i8> %t1, <16 x i8> %t2, <32 x i32> @@ -648,10 +691,10 @@ entry: define void @vst4_v16i8(<16 x i8> *%src, <64 x i8> *%dst) { ; CHECK-LABEL: vst4_v16i8: ; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldrw.u32 q3, [r0, #48] ; CHECK-NEXT: vldrw.u32 q2, [r0, #32] ; CHECK-NEXT: vldrw.u32 q1, [r0, #16] ; CHECK-NEXT: vldrw.u32 q0, [r0] -; CHECK-NEXT: vmov q3, q2 ; CHECK-NEXT: vst40.8 {q0, q1, q2, q3}, [r1] ; CHECK-NEXT: vst41.8 {q0, q1, q2, q3}, [r1] ; CHECK-NEXT: vst42.8 {q0, q1, q2, q3}, [r1] @@ -665,7 +708,7 @@ entry: %s3 = getelementptr <16 x i8>, <16 x i8>* %src, i32 2 %l3 = load <16 x i8>, <16 x i8>* %s3, align 4 %s4 = getelementptr <16 x i8>, <16 x i8>* %src, i32 3 - %l4 = load <16 x i8>, <16 x i8>* %s3, align 4 + %l4 = load <16 x i8>, <16 x i8>* %s4, align 4 %t1 = shufflevector <16 x i8> %l1, <16 x i8> %l2, <32 x i32> %t2 = shufflevector <16 x i8> %l3, <16 x i8> %l4, <32 x i32> %s = shufflevector <32 x i8> %t1, <32 x i8> %t2, <64 x i32> @@ -678,25 +721,29 @@ entry: define void @vst4_v2i64(<2 x i64> *%src, <8 x i64> *%dst) { ; CHECK-LABEL: vst4_v2i64: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vldrw.u32 q3, [r0] -; CHECK-NEXT: vldrw.u32 q0, [r0, #16] -; CHECK-NEXT: vldrw.u32 q1, [r0, #32] -; CHECK-NEXT: vmov.f64 d4, d6 -; CHECK-NEXT: vmov.f32 s9, s13 -; CHECK-NEXT: vmov.f32 s10, s0 -; CHECK-NEXT: vmov.f32 s11, s1 -; CHECK-NEXT: vmov.f32 s0, s14 -; CHECK-NEXT: vstrw.32 q2, [r1] -; CHECK-NEXT: vmov.f32 s1, s15 -; CHECK-NEXT: vmov.f64 d6, d2 -; CHECK-NEXT: vstrw.32 q0, [r1, #32] -; CHECK-NEXT: vmov.f32 s13, s5 -; CHECK-NEXT: vmov.f32 s14, s4 -; CHECK-NEXT: vmov.f32 s15, s5 -; CHECK-NEXT: vmov.f32 s4, s6 +; CHECK-NEXT: .vsave {d8, d9, d10, d11} +; CHECK-NEXT: vpush {d8, d9, d10, d11} +; CHECK-NEXT: vldrw.u32 q4, [r0, #32] +; CHECK-NEXT: vldrw.u32 q2, [r0] +; CHECK-NEXT: vldrw.u32 q0, [r0, #48] +; CHECK-NEXT: vldrw.u32 q1, [r0, #16] +; CHECK-NEXT: vmov.f64 d6, d8 +; CHECK-NEXT: vmov.f64 d10, d4 +; CHECK-NEXT: vmov.f32 s13, s17 +; CHECK-NEXT: vmov.f32 s21, s9 +; CHECK-NEXT: vmov.f32 s14, s0 +; CHECK-NEXT: vmov.f32 s22, s4 +; CHECK-NEXT: vmov.f32 s15, s1 +; CHECK-NEXT: vmov.f32 s23, s5 ; CHECK-NEXT: vstrw.32 q3, [r1, #16] -; CHECK-NEXT: vmov.f32 s5, s7 -; CHECK-NEXT: vstrw.32 q1, [r1, #48] +; CHECK-NEXT: vmov.f32 s0, s18 +; CHECK-NEXT: vstrw.32 q5, [r1] +; CHECK-NEXT: vmov.f32 s4, s10 +; CHECK-NEXT: vmov.f32 s1, s19 +; CHECK-NEXT: vmov.f32 s5, s11 +; CHECK-NEXT: vstrw.32 q0, [r1, #48] +; CHECK-NEXT: vstrw.32 q1, [r1, #32] +; CHECK-NEXT: vpop {d8, d9, d10, d11} ; CHECK-NEXT: bx lr entry: %s1 = getelementptr <2 x i64>, <2 x i64>* %src, i32 0 @@ -706,7 +753,7 @@ entry: %s3 = getelementptr <2 x i64>, <2 x i64>* %src, i32 2 %l3 = load <2 x i64>, <2 x i64>* %s3, align 4 %s4 = getelementptr <2 x i64>, <2 x i64>* %src, i32 3 - %l4 = load <2 x i64>, <2 x i64>* %s3, align 4 + %l4 = load <2 x i64>, <2 x i64>* %s4, align 4 %t1 = shufflevector <2 x i64> %l1, <2 x i64> %l2, <4 x i32> %t2 = shufflevector <2 x i64> %l3, <2 x i64> %l4, <4 x i32> %s = shufflevector <4 x i64> %t1, <4 x i64> %t2, <8 x i32> @@ -719,44 +766,57 @@ define void @vst4_v4i64(<4 x i64> *%src, <16 x i64> *%dst) { ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-NEXT: vldrw.u32 q5, [r0] +; CHECK-NEXT: .pad #48 +; CHECK-NEXT: sub sp, #48 +; CHECK-NEXT: vldrw.u32 q7, [r0] ; CHECK-NEXT: vldrw.u32 q0, [r0, #32] -; CHECK-NEXT: vldrw.u32 q6, [r0, #16] -; CHECK-NEXT: vldrw.u32 q2, [r0, #48] -; CHECK-NEXT: vmov.f64 d6, d10 -; CHECK-NEXT: vldrw.u32 q4, [r0, #80] -; CHECK-NEXT: vldrw.u32 q1, [r0, #64] -; CHECK-NEXT: vmov.f32 s13, s21 -; CHECK-NEXT: vmov.f32 s14, s0 -; CHECK-NEXT: vmov.f32 s15, s1 -; CHECK-NEXT: vmov.f32 s0, s22 -; CHECK-NEXT: vstrw.32 q3, [r1] -; CHECK-NEXT: vmov.f32 s1, s23 -; CHECK-NEXT: vmov.f64 d10, d12 -; CHECK-NEXT: vstrw.32 q0, [r1, #32] -; CHECK-NEXT: vmov.f32 s21, s25 -; CHECK-NEXT: vmov.f32 s22, s8 -; CHECK-NEXT: vmov.f32 s23, s9 -; CHECK-NEXT: vmov.f32 s8, s26 -; CHECK-NEXT: vstrw.32 q5, [r1, #64] -; CHECK-NEXT: vmov.f32 s9, s27 -; CHECK-NEXT: vmov.f64 d12, d2 -; CHECK-NEXT: vstrw.32 q2, [r1, #96] -; CHECK-NEXT: vmov.f64 d14, d8 -; CHECK-NEXT: vmov.f32 s25, s5 -; CHECK-NEXT: vmov.f32 s29, s17 -; CHECK-NEXT: vmov.f32 s26, s4 +; CHECK-NEXT: vldrw.u32 q1, [r0, #16] +; CHECK-NEXT: vldrw.u32 q3, [r0, #64] +; CHECK-NEXT: vmov.f64 d12, d14 +; CHECK-NEXT: vldrw.u32 q2, [r0, #96] +; CHECK-NEXT: vstrw.32 q1, [sp, #16] @ 16-byte Spill +; CHECK-NEXT: vldrw.u32 q1, [r0, #80] +; CHECK-NEXT: vldrw.u32 q4, [r0, #48] +; CHECK-NEXT: vldrw.u32 q5, [r0, #112] +; CHECK-NEXT: vstrw.32 q1, [sp, #32] @ 16-byte Spill +; CHECK-NEXT: vmov.f32 s25, s29 +; CHECK-NEXT: vmov.f32 s26, s0 +; CHECK-NEXT: vmov.f32 s27, s1 +; CHECK-NEXT: vmov.f32 s0, s30 +; CHECK-NEXT: vstrw.32 q6, [r1] +; CHECK-NEXT: vmov.f32 s1, s31 +; CHECK-NEXT: vstrw.32 q0, [sp] @ 16-byte Spill +; CHECK-NEXT: vldrw.u32 q0, [sp, #16] @ 16-byte Reload +; CHECK-NEXT: vmov.f64 d2, d6 +; CHECK-NEXT: vmov.f32 s5, s13 +; CHECK-NEXT: vmov.f64 d14, d0 +; CHECK-NEXT: vmov.f32 s29, s1 +; CHECK-NEXT: vldrw.u32 q0, [sp, #32] @ 16-byte Reload +; CHECK-NEXT: vmov.f32 s6, s8 +; CHECK-NEXT: vmov.f32 s7, s9 +; CHECK-NEXT: vmov.f32 s8, s14 +; CHECK-NEXT: vstrw.32 q1, [r1, #16] +; CHECK-NEXT: vmov.f32 s9, s15 +; CHECK-NEXT: vmov.f64 d6, d0 +; CHECK-NEXT: vstrw.32 q2, [r1, #48] +; CHECK-NEXT: vmov.f32 s13, s1 +; CHECK-NEXT: vldrw.u32 q0, [sp, #16] @ 16-byte Reload ; CHECK-NEXT: vmov.f32 s30, s16 -; CHECK-NEXT: vmov.f32 s27, s5 -; CHECK-NEXT: vmov.f32 s4, s6 -; CHECK-NEXT: vstrw.32 q6, [r1, #16] ; CHECK-NEXT: vmov.f32 s31, s17 -; CHECK-NEXT: vmov.f32 s16, s18 -; CHECK-NEXT: vstrw.32 q7, [r1, #80] -; CHECK-NEXT: vmov.f32 s5, s7 -; CHECK-NEXT: vmov.f32 s17, s19 -; CHECK-NEXT: vstrw.32 q1, [r1, #48] -; CHECK-NEXT: vstrw.32 q4, [r1, #112] +; CHECK-NEXT: vmov.f32 s16, s2 +; CHECK-NEXT: vstrw.32 q7, [r1, #64] +; CHECK-NEXT: vmov.f32 s17, s3 +; CHECK-NEXT: vldrw.u32 q0, [sp, #32] @ 16-byte Reload +; CHECK-NEXT: vmov.f32 s14, s20 +; CHECK-NEXT: vstrw.32 q4, [r1, #96] +; CHECK-NEXT: vmov.f32 s15, s21 +; CHECK-NEXT: vmov.f32 s20, s2 +; CHECK-NEXT: vstrw.32 q3, [r1, #80] +; CHECK-NEXT: vmov.f32 s21, s3 +; CHECK-NEXT: vldrw.u32 q0, [sp] @ 16-byte Reload +; CHECK-NEXT: vstrw.32 q5, [r1, #112] +; CHECK-NEXT: vstrw.32 q0, [r1, #32] +; CHECK-NEXT: add sp, #48 ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: bx lr entry: @@ -767,7 +827,7 @@ entry: %s3 = getelementptr <4 x i64>, <4 x i64>* %src, i32 2 %l3 = load <4 x i64>, <4 x i64>* %s3, align 4 %s4 = getelementptr <4 x i64>, <4 x i64>* %src, i32 3 - %l4 = load <4 x i64>, <4 x i64>* %s3, align 4 + %l4 = load <4 x i64>, <4 x i64>* %s4, align 4 %t1 = shufflevector <4 x i64> %l1, <4 x i64> %l2, <8 x i32> %t2 = shufflevector <4 x i64> %l3, <4 x i64> %l4, <8 x i32> %s = shufflevector <8 x i64> %t1, <8 x i64> %t2, <16 x i32> @@ -786,8 +846,8 @@ define void @vst4_v2f32(<2 x float> *%src, <8 x float> *%dst) { ; CHECK-NEXT: vldr s5, [r0, #12] ; CHECK-NEXT: vldr s2, [r0, #16] ; CHECK-NEXT: vldr s6, [r0, #20] -; CHECK-NEXT: vmov.f32 s3, s2 -; CHECK-NEXT: vmov.f32 s7, s6 +; CHECK-NEXT: vldr s3, [r0, #24] +; CHECK-NEXT: vldr s7, [r0, #28] ; CHECK-NEXT: vstrw.32 q0, [r1] ; CHECK-NEXT: vstrw.32 q1, [r1, #16] ; CHECK-NEXT: bx lr @@ -799,7 +859,7 @@ entry: %s3 = getelementptr <2 x float>, <2 x float>* %src, i32 2 %l3 = load <2 x float>, <2 x float>* %s3, align 4 %s4 = getelementptr <2 x float>, <2 x float>* %src, i32 3 - %l4 = load <2 x float>, <2 x float>* %s3, align 4 + %l4 = load <2 x float>, <2 x float>* %s4, align 4 %t1 = shufflevector <2 x float> %l1, <2 x float> %l2, <4 x i32> %t2 = shufflevector <2 x float> %l3, <2 x float> %l4, <4 x i32> %s = shufflevector <4 x float> %t1, <4 x float> %t2, <8 x i32> @@ -810,10 +870,10 @@ entry: define void @vst4_v4f32(<4 x float> *%src, <16 x float> *%dst) { ; CHECK-LABEL: vst4_v4f32: ; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldrw.u32 q3, [r0, #48] ; CHECK-NEXT: vldrw.u32 q2, [r0, #32] ; CHECK-NEXT: vldrw.u32 q1, [r0, #16] ; CHECK-NEXT: vldrw.u32 q0, [r0] -; CHECK-NEXT: vmov q3, q2 ; CHECK-NEXT: vst40.32 {q0, q1, q2, q3}, [r1] ; CHECK-NEXT: vst41.32 {q0, q1, q2, q3}, [r1] ; CHECK-NEXT: vst42.32 {q0, q1, q2, q3}, [r1] @@ -827,7 +887,7 @@ entry: %s3 = getelementptr <4 x float>, <4 x float>* %src, i32 2 %l3 = load <4 x float>, <4 x float>* %s3, align 4 %s4 = getelementptr <4 x float>, <4 x float>* %src, i32 3 - %l4 = load <4 x float>, <4 x float>* %s3, align 4 + %l4 = load <4 x float>, <4 x float>* %s4, align 4 %t1 = shufflevector <4 x float> %l1, <4 x float> %l2, <8 x i32> %t2 = shufflevector <4 x float> %l3, <4 x float> %l4, <8 x i32> %s = shufflevector <8 x float> %t1, <8 x float> %t2, <16 x i32> @@ -840,14 +900,14 @@ define void @vst4_v8f32(<8 x float> *%src, <32 x float> *%dst) { ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} +; CHECK-NEXT: vldrw.u32 q7, [r0, #96] +; CHECK-NEXT: vldrw.u32 q3, [r0, #112] ; CHECK-NEXT: vldrw.u32 q6, [r0, #64] ; CHECK-NEXT: vldrw.u32 q2, [r0, #80] ; CHECK-NEXT: vldrw.u32 q5, [r0, #32] ; CHECK-NEXT: vldrw.u32 q1, [r0, #48] ; CHECK-NEXT: vldrw.u32 q4, [r0] ; CHECK-NEXT: vldrw.u32 q0, [r0, #16] -; CHECK-NEXT: vmov q7, q6 -; CHECK-NEXT: vmov q3, q2 ; CHECK-NEXT: vst40.32 {q4, q5, q6, q7}, [r1] ; CHECK-NEXT: vst41.32 {q4, q5, q6, q7}, [r1] ; CHECK-NEXT: vst42.32 {q4, q5, q6, q7}, [r1] @@ -866,7 +926,7 @@ entry: %s3 = getelementptr <8 x float>, <8 x float>* %src, i32 2 %l3 = load <8 x float>, <8 x float>* %s3, align 4 %s4 = getelementptr <8 x float>, <8 x float>* %src, i32 3 - %l4 = load <8 x float>, <8 x float>* %s3, align 4 + %l4 = load <8 x float>, <8 x float>* %s4, align 4 %t1 = shufflevector <8 x float> %l1, <8 x float> %l2, <16 x i32> %t2 = shufflevector <8 x float> %l3, <8 x float> %l4, <16 x i32> %s = shufflevector <16 x float> %t1, <16 x float> %t2, <32 x i32> @@ -883,64 +943,70 @@ define void @vst4_v16f32(<16 x float> *%src, <64 x float> *%dst) { ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: .pad #192 ; CHECK-NEXT: sub sp, #192 -; CHECK-NEXT: vldrw.u32 q2, [r0, #144] +; CHECK-NEXT: add r2, sp, #128 +; CHECK-NEXT: vldrw.u32 q7, [r0, #240] +; CHECK-NEXT: vldrw.u32 q3, [r0, #208] +; CHECK-NEXT: vstmia r2, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Spill ; CHECK-NEXT: add r2, sp, #64 +; CHECK-NEXT: vldrw.u32 q7, [r0, #224] +; CHECK-NEXT: vldrw.u32 q2, [r0, #144] +; CHECK-NEXT: vstmia r2, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Spill +; CHECK-NEXT: add r2, sp, #128 ; CHECK-NEXT: vldrw.u32 q1, [r0, #80] +; CHECK-NEXT: vldmia r2, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Reload +; CHECK-NEXT: add r2, sp, #128 ; CHECK-NEXT: vldrw.u32 q6, [r0, #176] ; CHECK-NEXT: vldrw.u32 q0, [r0, #16] -; CHECK-NEXT: vstmia r2, {d0, d1, d2, d3, d4, d5, d6, d7} @ 64-byte Spill +; CHECK-NEXT: vstmia r2, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Spill +; CHECK-NEXT: add r2, sp, #64 +; CHECK-NEXT: vldmia r2, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Reload +; CHECK-NEXT: add r2, sp, #64 +; CHECK-NEXT: vldrw.u32 q6, [r0, #160] +; CHECK-NEXT: vstmia r2, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Spill ; CHECK-NEXT: add r2, sp, #128 +; CHECK-NEXT: vldmia r2, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Reload +; CHECK-NEXT: add r2, sp, #128 +; CHECK-NEXT: vldrw.u32 q5, [r0, #112] +; CHECK-NEXT: vstmia r2, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Spill +; CHECK-NEXT: add r2, sp, #64 +; CHECK-NEXT: vldmia r2, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Reload +; CHECK-NEXT: add r2, sp, #64 +; CHECK-NEXT: vldrw.u32 q5, [r0, #96] +; CHECK-NEXT: vstmia r2, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Spill +; CHECK-NEXT: add r2, sp, #128 +; CHECK-NEXT: vldmia r2, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Reload +; CHECK-NEXT: add r2, sp, #128 +; CHECK-NEXT: vldrw.u32 q4, [r0, #48] +; CHECK-NEXT: vstmia sp, {d0, d1, d2, d3, d4, d5, d6, d7} @ 64-byte Spill +; CHECK-NEXT: vldrw.u32 q3, [r0, #192] ; CHECK-NEXT: vldrw.u32 q2, [r0, #128] ; CHECK-NEXT: vstmia r2, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Spill ; CHECK-NEXT: add r2, sp, #64 ; CHECK-NEXT: vldrw.u32 q1, [r0, #64] ; CHECK-NEXT: vldmia r2, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Reload -; CHECK-NEXT: add r2, sp, #64 -; CHECK-NEXT: vmov q7, q6 ; CHECK-NEXT: vldrw.u32 q0, [r0] -; CHECK-NEXT: vstmia r2, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Spill -; CHECK-NEXT: add r2, sp, #128 -; CHECK-NEXT: vldrw.u32 q6, [r0, #160] -; CHECK-NEXT: vmov q3, q2 -; CHECK-NEXT: vstmia sp, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Spill -; CHECK-NEXT: vldmia r2, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Reload -; CHECK-NEXT: add r2, sp, #128 -; CHECK-NEXT: vldrw.u32 q5, [r0, #112] -; CHECK-NEXT: vstmia r2, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Spill -; CHECK-NEXT: add r2, sp, #128 -; CHECK-NEXT: vldmia sp, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Reload -; CHECK-NEXT: vldrw.u32 q5, [r0, #96] -; CHECK-NEXT: vstmia sp, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Spill -; CHECK-NEXT: vldmia r2, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Reload -; CHECK-NEXT: add r2, sp, #128 -; CHECK-NEXT: vldrw.u32 q4, [r0, #48] -; CHECK-NEXT: vstmia r2, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Spill -; CHECK-NEXT: add r2, sp, #64 -; CHECK-NEXT: vldmia sp, {d8, d9, d10, d11, d12, d13, d14, d15} @ 64-byte Reload ; CHECK-NEXT: vldrw.u32 q4, [r0, #32] ; CHECK-NEXT: mov r0, r1 ; CHECK-NEXT: vst40.32 {q0, q1, q2, q3}, [r1] ; CHECK-NEXT: vst41.32 {q0, q1, q2, q3}, [r1] ; CHECK-NEXT: vst42.32 {q0, q1, q2, q3}, [r1] ; CHECK-NEXT: vst43.32 {q0, q1, q2, q3}, [r0]! -; CHECK-NEXT: vldmia r2, {d0, d1, d2, d3, d4, d5, d6, d7} @ 64-byte Reload -; CHECK-NEXT: add r2, sp, #128 -; CHECK-NEXT: vmov q7, q6 +; CHECK-NEXT: vldmia sp, {d0, d1, d2, d3, d4, d5, d6, d7} @ 64-byte Reload ; CHECK-NEXT: vst40.32 {q0, q1, q2, q3}, [r0] ; CHECK-NEXT: vst41.32 {q0, q1, q2, q3}, [r0] ; CHECK-NEXT: vst42.32 {q0, q1, q2, q3}, [r0] ; CHECK-NEXT: vst43.32 {q0, q1, q2, q3}, [r0] -; CHECK-NEXT: vldmia r2, {d0, d1, d2, d3, d4, d5, d6, d7} @ 64-byte Reload ; CHECK-NEXT: add.w r0, r1, #192 ; CHECK-NEXT: adds r1, #128 -; CHECK-NEXT: vmov q3, q2 ; CHECK-NEXT: vst40.32 {q4, q5, q6, q7}, [r1] ; CHECK-NEXT: vst41.32 {q4, q5, q6, q7}, [r1] ; CHECK-NEXT: vst42.32 {q4, q5, q6, q7}, [r1] +; CHECK-NEXT: vst43.32 {q4, q5, q6, q7}, [r1] +; CHECK-NEXT: add r1, sp, #128 +; CHECK-NEXT: vldmia r1, {d0, d1, d2, d3, d4, d5, d6, d7} @ 64-byte Reload ; CHECK-NEXT: vst40.32 {q0, q1, q2, q3}, [r0] ; CHECK-NEXT: vst41.32 {q0, q1, q2, q3}, [r0] ; CHECK-NEXT: vst42.32 {q0, q1, q2, q3}, [r0] -; CHECK-NEXT: vst43.32 {q4, q5, q6, q7}, [r1] ; CHECK-NEXT: vst43.32 {q0, q1, q2, q3}, [r0] ; CHECK-NEXT: add sp, #192 ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} @@ -954,7 +1020,7 @@ entry: %s3 = getelementptr <16 x float>, <16 x float>* %src, i32 2 %l3 = load <16 x float>, <16 x float>* %s3, align 4 %s4 = getelementptr <16 x float>, <16 x float>* %src, i32 3 - %l4 = load <16 x float>, <16 x float>* %s3, align 4 + %l4 = load <16 x float>, <16 x float>* %s4, align 4 %t1 = shufflevector <16 x float> %l1, <16 x float> %l2, <32 x i32> %t2 = shufflevector <16 x float> %l3, <16 x float> %l4, <32 x i32> %s = shufflevector <32 x float> %t1, <32 x float> %t2, <64 x i32> @@ -965,37 +1031,33 @@ entry: define void @vst4_v4f32_align1(<4 x float> *%src, <16 x float> *%dst) { ; CHECK-LABEL: vst4_v4f32_align1: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .save {r7, lr} -; CHECK-NEXT: push {r7, lr} -; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12} -; CHECK-NEXT: vpush {d8, d9, d10, d11, d12} -; CHECK-NEXT: vldrw.u32 q0, [r0, #32] +; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13} +; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13} +; CHECK-NEXT: vldrw.u32 q4, [r0] ; CHECK-NEXT: vldrw.u32 q2, [r0, #16] -; CHECK-NEXT: vldrw.u32 q1, [r0] -; CHECK-NEXT: vmov r2, r3, d1 -; CHECK-NEXT: vmov r12, lr, d0 -; CHECK-NEXT: vmov s18, r2 -; CHECK-NEXT: vmov s22, r3 -; CHECK-NEXT: vmov s14, lr -; CHECK-NEXT: vmov s24, r12 -; CHECK-NEXT: vmov.f32 s20, s7 -; CHECK-NEXT: vmov.f32 s16, s6 -; CHECK-NEXT: vmov.f32 s12, s5 -; CHECK-NEXT: vmov.f32 s5, s8 -; CHECK-NEXT: vmov.f32 s21, s11 +; CHECK-NEXT: vldrw.u32 q0, [r0, #32] +; CHECK-NEXT: vldrw.u32 q1, [r0, #48] +; CHECK-NEXT: vmov.f32 s12, s17 +; CHECK-NEXT: vmov.f64 d10, d8 +; CHECK-NEXT: vmov.f32 s24, s19 ; CHECK-NEXT: vmov.f32 s13, s9 -; CHECK-NEXT: vmov.f32 s17, s10 -; CHECK-NEXT: vmov.f32 s6, s24 -; CHECK-NEXT: vmov.f32 s23, s22 -; CHECK-NEXT: vmov.f32 s19, s2 -; CHECK-NEXT: vstrb.8 q5, [r1, #48] -; CHECK-NEXT: vmov.f32 s15, s14 -; CHECK-NEXT: vstrb.8 q4, [r1, #32] -; CHECK-NEXT: vmov.f32 s7, s0 +; CHECK-NEXT: vmov.f32 s21, s8 +; CHECK-NEXT: vmov.f32 s25, s11 +; CHECK-NEXT: vmov.f32 s14, s1 +; CHECK-NEXT: vmov.f32 s22, s0 +; CHECK-NEXT: vmov.f32 s26, s3 +; CHECK-NEXT: vmov.f32 s0, s18 +; CHECK-NEXT: vmov.f32 s1, s10 +; CHECK-NEXT: vmov.f32 s15, s5 +; CHECK-NEXT: vmov.f32 s23, s4 ; CHECK-NEXT: vstrb.8 q3, [r1, #16] -; CHECK-NEXT: vstrb.8 q1, [r1] -; CHECK-NEXT: vpop {d8, d9, d10, d11, d12} -; CHECK-NEXT: pop {r7, pc} +; CHECK-NEXT: vmov.f32 s27, s7 +; CHECK-NEXT: vstrb.8 q5, [r1] +; CHECK-NEXT: vmov.f32 s3, s6 +; CHECK-NEXT: vstrb.8 q6, [r1, #48] +; CHECK-NEXT: vstrb.8 q0, [r1, #32] +; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13} +; CHECK-NEXT: bx lr entry: %s1 = getelementptr <4 x float>, <4 x float>* %src, i32 0 %l1 = load <4 x float>, <4 x float>* %s1, align 4 @@ -1004,7 +1066,7 @@ entry: %s3 = getelementptr <4 x float>, <4 x float>* %src, i32 2 %l3 = load <4 x float>, <4 x float>* %s3, align 4 %s4 = getelementptr <4 x float>, <4 x float>* %src, i32 3 - %l4 = load <4 x float>, <4 x float>* %s3, align 4 + %l4 = load <4 x float>, <4 x float>* %s4, align 4 %t1 = shufflevector <4 x float> %l1, <4 x float> %l2, <8 x i32> %t2 = shufflevector <4 x float> %l3, <4 x float> %l4, <8 x i32> %s = shufflevector <8 x float> %t1, <8 x float> %t2, <16 x i32> @@ -1020,8 +1082,8 @@ define void @vst4_v2f16(<2 x half> *%src, <8 x half> *%dst) { ; CHECK-NEXT: vldmia r0, {s0, s1} ; CHECK-NEXT: vldr s4, [r0, #8] ; CHECK-NEXT: vmovx.f16 s2, s0 +; CHECK-NEXT: vldr s5, [r0, #12] ; CHECK-NEXT: vins.f16 s0, s1 -; CHECK-NEXT: vmov.f32 s5, s4 ; CHECK-NEXT: vmovx.f16 s3, s4 ; CHECK-NEXT: vmovx.f16 s8, s1 ; CHECK-NEXT: vins.f16 s4, s5 @@ -1039,7 +1101,7 @@ entry: %s3 = getelementptr <2 x half>, <2 x half>* %src, i32 2 %l3 = load <2 x half>, <2 x half>* %s3, align 4 %s4 = getelementptr <2 x half>, <2 x half>* %src, i32 3 - %l4 = load <2 x half>, <2 x half>* %s3, align 4 + %l4 = load <2 x half>, <2 x half>* %s4, align 4 %t1 = shufflevector <2 x half> %l1, <2 x half> %l2, <4 x i32> %t2 = shufflevector <2 x half> %l3, <2 x half> %l4, <4 x i32> %s = shufflevector <4 x half> %t1, <4 x half> %t2, <8 x i32> @@ -1052,41 +1114,46 @@ define void @vst4_v4f16(<4 x half> *%src, <16 x half> *%dst) { ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .save {r7, lr} ; CHECK-NEXT: push {r7, lr} -; CHECK-NEXT: ldrd r2, r12, [r0] -; CHECK-NEXT: ldrd r3, lr, [r0, #8] -; CHECK-NEXT: vmov.32 q0[0], r2 -; CHECK-NEXT: ldrd r2, r0, [r0, #16] -; CHECK-NEXT: vmov.32 q1[0], r3 +; CHECK-NEXT: add.w lr, r0, #16 +; CHECK-NEXT: ldr r2, [r0, #28] +; CHECK-NEXT: ldm.w lr, {r3, r12, lr} +; CHECK-NEXT: vmov.32 q1[0], lr +; CHECK-NEXT: vmov.32 q1[1], r2 +; CHECK-NEXT: vmov.32 q0[0], r3 ; CHECK-NEXT: vmov.32 q0[1], r12 -; CHECK-NEXT: vmov.32 q1[1], lr +; CHECK-NEXT: ldrd r2, r12, [r0] ; CHECK-NEXT: vmov.f32 s2, s4 +; CHECK-NEXT: ldrd r3, r0, [r0, #8] ; CHECK-NEXT: vmov.f32 s3, s5 ; CHECK-NEXT: vmovx.f16 s12, s0 ; CHECK-NEXT: vins.f16 s0, s2 ; CHECK-NEXT: vmovx.f16 s4, s2 +; CHECK-NEXT: vmov.32 q2[0], r3 ; CHECK-NEXT: vins.f16 s12, s4 -; CHECK-NEXT: vmovx.f16 s6, s1 -; CHECK-NEXT: vins.f16 s1, s3 -; CHECK-NEXT: vmovx.f16 s8, s3 -; CHECK-NEXT: vins.f16 s6, s8 -; CHECK-NEXT: vmov.32 q2[0], r2 +; CHECK-NEXT: vmov.32 q1[0], r2 ; CHECK-NEXT: vmov.32 q2[1], r0 -; CHECK-NEXT: vmov.f32 s10, s8 -; CHECK-NEXT: vmov.f32 s11, s9 -; CHECK-NEXT: vmovx.f16 s3, s8 -; CHECK-NEXT: vins.f16 s8, s10 -; CHECK-NEXT: vmovx.f16 s14, s10 -; CHECK-NEXT: vmovx.f16 s7, s9 -; CHECK-NEXT: vins.f16 s3, s14 -; CHECK-NEXT: vins.f16 s9, s11 -; CHECK-NEXT: vmovx.f16 s14, s11 -; CHECK-NEXT: vins.f16 s7, s14 -; CHECK-NEXT: vmov.f32 s4, s1 -; CHECK-NEXT: vmov.f32 s1, s8 -; CHECK-NEXT: vmov.f32 s5, s9 -; CHECK-NEXT: vmov.f32 s2, s12 -; CHECK-NEXT: vstrh.16 q1, [r1, #16] -; CHECK-NEXT: vstrh.16 q0, [r1] +; CHECK-NEXT: vmov.32 q1[1], r12 +; CHECK-NEXT: vmov.f32 s6, s8 +; CHECK-NEXT: vmov.f32 s7, s9 +; CHECK-NEXT: vmovx.f16 s14, s4 +; CHECK-NEXT: vins.f16 s4, s6 +; CHECK-NEXT: vmovx.f16 s8, s6 +; CHECK-NEXT: vins.f16 s14, s8 +; CHECK-NEXT: vmovx.f16 s11, s1 +; CHECK-NEXT: vins.f16 s1, s3 +; CHECK-NEXT: vmovx.f16 s13, s3 +; CHECK-NEXT: vins.f16 s11, s13 +; CHECK-NEXT: vmovx.f16 s10, s5 +; CHECK-NEXT: vins.f16 s5, s7 +; CHECK-NEXT: vmovx.f16 s13, s7 +; CHECK-NEXT: vins.f16 s10, s13 +; CHECK-NEXT: vmov.f32 s8, s5 +; CHECK-NEXT: vmov.f32 s5, s0 +; CHECK-NEXT: vmov.f32 s6, s14 +; CHECK-NEXT: vmov.f32 s9, s1 +; CHECK-NEXT: vmov.f32 s7, s12 +; CHECK-NEXT: vstrh.16 q2, [r1, #16] +; CHECK-NEXT: vstrh.16 q1, [r1] ; CHECK-NEXT: pop {r7, pc} entry: %s1 = getelementptr <4 x half>, <4 x half>* %src, i32 0 @@ -1096,7 +1163,7 @@ entry: %s3 = getelementptr <4 x half>, <4 x half>* %src, i32 2 %l3 = load <4 x half>, <4 x half>* %s3, align 4 %s4 = getelementptr <4 x half>, <4 x half>* %src, i32 3 - %l4 = load <4 x half>, <4 x half>* %s3, align 4 + %l4 = load <4 x half>, <4 x half>* %s4, align 4 %t1 = shufflevector <4 x half> %l1, <4 x half> %l2, <8 x i32> %t2 = shufflevector <4 x half> %l3, <4 x half> %l4, <8 x i32> %s = shufflevector <8 x half> %t1, <8 x half> %t2, <16 x i32> @@ -1107,10 +1174,10 @@ entry: define void @vst4_v8f16(<8 x half> *%src, <32 x half> *%dst) { ; CHECK-LABEL: vst4_v8f16: ; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldrw.u32 q3, [r0, #48] ; CHECK-NEXT: vldrw.u32 q2, [r0, #32] ; CHECK-NEXT: vldrw.u32 q1, [r0, #16] ; CHECK-NEXT: vldrw.u32 q0, [r0] -; CHECK-NEXT: vmov q3, q2 ; CHECK-NEXT: vst40.16 {q0, q1, q2, q3}, [r1] ; CHECK-NEXT: vst41.16 {q0, q1, q2, q3}, [r1] ; CHECK-NEXT: vst42.16 {q0, q1, q2, q3}, [r1] @@ -1124,7 +1191,7 @@ entry: %s3 = getelementptr <8 x half>, <8 x half>* %src, i32 2 %l3 = load <8 x half>, <8 x half>* %s3, align 4 %s4 = getelementptr <8 x half>, <8 x half>* %src, i32 3 - %l4 = load <8 x half>, <8 x half>* %s3, align 4 + %l4 = load <8 x half>, <8 x half>* %s4, align 4 %t1 = shufflevector <8 x half> %l1, <8 x half> %l2, <16 x i32> %t2 = shufflevector <8 x half> %l3, <8 x half> %l4, <16 x i32> %s = shufflevector <16 x half> %t1, <16 x half> %t2, <32 x i32> @@ -1137,14 +1204,14 @@ define void @vst4_v16f16(<16 x half> *%src, <64 x half> *%dst) { ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} +; CHECK-NEXT: vldrw.u32 q7, [r0, #96] +; CHECK-NEXT: vldrw.u32 q3, [r0, #112] ; CHECK-NEXT: vldrw.u32 q6, [r0, #64] ; CHECK-NEXT: vldrw.u32 q2, [r0, #80] ; CHECK-NEXT: vldrw.u32 q5, [r0, #32] ; CHECK-NEXT: vldrw.u32 q1, [r0, #48] ; CHECK-NEXT: vldrw.u32 q4, [r0] ; CHECK-NEXT: vldrw.u32 q0, [r0, #16] -; CHECK-NEXT: vmov q7, q6 -; CHECK-NEXT: vmov q3, q2 ; CHECK-NEXT: vst40.16 {q4, q5, q6, q7}, [r1] ; CHECK-NEXT: vst41.16 {q4, q5, q6, q7}, [r1] ; CHECK-NEXT: vst42.16 {q4, q5, q6, q7}, [r1] @@ -1163,7 +1230,7 @@ entry: %s3 = getelementptr <16 x half>, <16 x half>* %src, i32 2 %l3 = load <16 x half>, <16 x half>* %s3, align 4 %s4 = getelementptr <16 x half>, <16 x half>* %src, i32 3 - %l4 = load <16 x half>, <16 x half>* %s3, align 4 + %l4 = load <16 x half>, <16 x half>* %s4, align 4 %t1 = shufflevector <16 x half> %l1, <16 x half> %l2, <32 x i32> %t2 = shufflevector <16 x half> %l3, <16 x half> %l4, <32 x i32> %s = shufflevector <32 x half> %t1, <32 x half> %t2, <64 x i32> @@ -1178,56 +1245,63 @@ define void @vst4_v8f16_align1(<8 x half> *%src, <32 x half> *%dst) { ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: .pad #8 ; CHECK-NEXT: sub sp, #8 -; CHECK-NEXT: vldrw.u32 q4, [r0, #16] -; CHECK-NEXT: vldrw.u32 q1, [r0] -; CHECK-NEXT: vmovx.f16 s30, s5 -; CHECK-NEXT: vmovx.f16 s8, s17 -; CHECK-NEXT: vins.f16 s5, s17 -; CHECK-NEXT: vins.f16 s30, s8 -; CHECK-NEXT: vmovx.f16 s0, s4 -; CHECK-NEXT: vmovx.f16 s8, s16 -; CHECK-NEXT: vins.f16 s4, s16 -; CHECK-NEXT: vins.f16 s0, s8 -; CHECK-NEXT: vmovx.f16 s10, s7 -; CHECK-NEXT: vmovx.f16 s12, s19 -; CHECK-NEXT: vins.f16 s7, s19 -; CHECK-NEXT: vins.f16 s10, s12 -; CHECK-NEXT: vmovx.f16 s14, s6 -; CHECK-NEXT: vmovx.f16 s16, s18 -; CHECK-NEXT: vins.f16 s6, s18 -; CHECK-NEXT: vins.f16 s14, s16 -; CHECK-NEXT: vldrw.u32 q4, [r0, #32] -; CHECK-NEXT: vstr s0, [sp, #4] @ 4-byte Spill -; CHECK-NEXT: vmov.f32 s28, s5 -; CHECK-NEXT: vmovx.f16 s24, s17 -; CHECK-NEXT: vins.f16 s17, s17 -; CHECK-NEXT: vmovx.f16 s23, s16 -; CHECK-NEXT: vins.f16 s16, s16 -; CHECK-NEXT: vmovx.f16 s11, s19 -; CHECK-NEXT: vins.f16 s19, s19 -; CHECK-NEXT: vmovx.f16 s15, s18 -; CHECK-NEXT: vins.f16 s18, s18 -; CHECK-NEXT: vins.f16 s24, s24 -; CHECK-NEXT: vmov q0, q4 -; CHECK-NEXT: vmov.f32 s3, s24 +; CHECK-NEXT: vldrw.u32 q7, [r0, #48] +; CHECK-NEXT: vldrw.u32 q1, [r0, #32] +; CHECK-NEXT: vldrw.u32 q6, [r0, #16] +; CHECK-NEXT: vldrw.u32 q2, [r0] +; CHECK-NEXT: vmovx.f16 s2, s5 +; CHECK-NEXT: vmovx.f16 s0, s29 +; CHECK-NEXT: vins.f16 s2, s0 +; CHECK-NEXT: vmovx.f16 s12, s25 +; CHECK-NEXT: vstr s2, [sp, #4] @ 4-byte Spill +; CHECK-NEXT: vmovx.f16 s2, s9 +; CHECK-NEXT: vins.f16 s5, s29 +; CHECK-NEXT: vins.f16 s2, s12 +; CHECK-NEXT: vmovx.f16 s19, s4 +; CHECK-NEXT: vmovx.f16 s12, s28 +; CHECK-NEXT: vins.f16 s9, s25 +; CHECK-NEXT: vins.f16 s19, s12 +; CHECK-NEXT: vmovx.f16 s14, s8 +; CHECK-NEXT: vmovx.f16 s12, s24 +; CHECK-NEXT: vins.f16 s14, s12 +; CHECK-NEXT: vins.f16 s4, s28 +; CHECK-NEXT: vstr s14, [sp] @ 4-byte Spill +; CHECK-NEXT: vmovx.f16 s15, s7 +; CHECK-NEXT: vmovx.f16 s20, s31 +; CHECK-NEXT: vins.f16 s8, s24 +; CHECK-NEXT: vins.f16 s15, s20 +; CHECK-NEXT: vmovx.f16 s20, s27 +; CHECK-NEXT: vmovx.f16 s14, s11 +; CHECK-NEXT: vins.f16 s7, s31 +; CHECK-NEXT: vins.f16 s14, s20 +; CHECK-NEXT: vmovx.f16 s23, s6 +; CHECK-NEXT: vmovx.f16 s28, s30 +; CHECK-NEXT: vins.f16 s6, s30 +; CHECK-NEXT: vins.f16 s23, s28 +; CHECK-NEXT: vins.f16 s11, s27 +; CHECK-NEXT: vmovx.f16 s22, s10 +; CHECK-NEXT: vmovx.f16 s24, s26 +; CHECK-NEXT: vldr s28, [sp, #4] @ 4-byte Reload +; CHECK-NEXT: vins.f16 s22, s24 +; CHECK-NEXT: vins.f16 s10, s26 ; CHECK-NEXT: vmov q6, q1 -; CHECK-NEXT: vmov.f32 s29, s1 -; CHECK-NEXT: vins.f16 s11, s11 -; CHECK-NEXT: vins.f16 s15, s15 -; CHECK-NEXT: vins.f16 s23, s23 -; CHECK-NEXT: vmov.f32 s8, s7 -; CHECK-NEXT: vmov.f32 s12, s6 -; CHECK-NEXT: vmov.f32 s9, s19 -; CHECK-NEXT: vmov.f32 s13, s18 -; CHECK-NEXT: vstrb.8 q2, [r1, #48] -; CHECK-NEXT: vmov.f32 s31, s3 -; CHECK-NEXT: vldr s0, [sp, #4] @ 4-byte Reload -; CHECK-NEXT: vmov.f32 s21, s16 -; CHECK-NEXT: vstrb.8 q3, [r1, #32] -; CHECK-NEXT: vmov.f32 s26, s0 -; CHECK-NEXT: vstrb.8 q7, [r1, #16] -; CHECK-NEXT: vmov.f32 s25, s16 -; CHECK-NEXT: vmov.f32 s27, s23 +; CHECK-NEXT: vmov.f32 s27, s28 +; CHECK-NEXT: vldr s28, [sp] @ 4-byte Reload +; CHECK-NEXT: vmov.f32 s0, s9 +; CHECK-NEXT: vmov.f32 s1, s25 +; CHECK-NEXT: vmov.f32 s13, s7 +; CHECK-NEXT: vmov.f32 s21, s6 +; CHECK-NEXT: vmov.f32 s12, s11 +; CHECK-NEXT: vmov.f32 s20, s10 +; CHECK-NEXT: vstrb.8 q3, [r1, #48] +; CHECK-NEXT: vmov.f32 s3, s27 +; CHECK-NEXT: vmov q6, q2 +; CHECK-NEXT: vmov.f32 s26, s28 +; CHECK-NEXT: vstrb.8 q5, [r1, #32] +; CHECK-NEXT: vmov.f32 s25, s4 +; CHECK-NEXT: vstrb.8 q0, [r1, #16] +; CHECK-NEXT: vmov.f32 s17, s4 +; CHECK-NEXT: vmov.f32 s27, s19 ; CHECK-NEXT: vstrb.8 q6, [r1] ; CHECK-NEXT: add sp, #8 ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} @@ -1240,7 +1314,7 @@ entry: %s3 = getelementptr <8 x half>, <8 x half>* %src, i32 2 %l3 = load <8 x half>, <8 x half>* %s3, align 4 %s4 = getelementptr <8 x half>, <8 x half>* %src, i32 3 - %l4 = load <8 x half>, <8 x half>* %s3, align 4 + %l4 = load <8 x half>, <8 x half>* %s4, align 4 %t1 = shufflevector <8 x half> %l1, <8 x half> %l2, <16 x i32> %t2 = shufflevector <8 x half> %l3, <8 x half> %l4, <16 x i32> %s = shufflevector <16 x half> %t1, <16 x half> %t2, <32 x i32> @@ -1253,19 +1327,23 @@ entry: define void @vst4_v2f64(<2 x double> *%src, <8 x double> *%dst) { ; CHECK-LABEL: vst4_v2f64: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vldrw.u32 q1, [r0] -; CHECK-NEXT: vldrw.u32 q0, [r0, #16] +; CHECK-NEXT: .vsave {d8, d9, d10, d11} +; CHECK-NEXT: vpush {d8, d9, d10, d11} ; CHECK-NEXT: vldrw.u32 q2, [r0, #32] -; CHECK-NEXT: vmov.f64 d6, d2 -; CHECK-NEXT: vmov.f64 d7, d0 -; CHECK-NEXT: vmov.f64 d0, d3 -; CHECK-NEXT: vstrw.32 q3, [r1] -; CHECK-NEXT: vmov.f64 d2, d4 -; CHECK-NEXT: vstrw.32 q0, [r1, #32] -; CHECK-NEXT: vmov.f64 d3, d4 -; CHECK-NEXT: vmov.f64 d4, d5 -; CHECK-NEXT: vstrw.32 q1, [r1, #16] -; CHECK-NEXT: vstrw.32 q2, [r1, #48] +; CHECK-NEXT: vldrw.u32 q3, [r0] +; CHECK-NEXT: vldrw.u32 q0, [r0, #48] +; CHECK-NEXT: vldrw.u32 q1, [r0, #16] +; CHECK-NEXT: vmov.f64 d8, d4 +; CHECK-NEXT: vmov.f64 d10, d6 +; CHECK-NEXT: vmov.f64 d9, d0 +; CHECK-NEXT: vmov.f64 d11, d2 +; CHECK-NEXT: vstrw.32 q4, [r1, #16] +; CHECK-NEXT: vmov.f64 d0, d5 +; CHECK-NEXT: vstrw.32 q5, [r1] +; CHECK-NEXT: vmov.f64 d2, d7 +; CHECK-NEXT: vstrw.32 q0, [r1, #48] +; CHECK-NEXT: vstrw.32 q1, [r1, #32] +; CHECK-NEXT: vpop {d8, d9, d10, d11} ; CHECK-NEXT: bx lr entry: %s1 = getelementptr <2 x double>, <2 x double>* %src, i32 0 @@ -1275,7 +1353,7 @@ entry: %s3 = getelementptr <2 x double>, <2 x double>* %src, i32 2 %l3 = load <2 x double>, <2 x double>* %s3, align 4 %s4 = getelementptr <2 x double>, <2 x double>* %src, i32 3 - %l4 = load <2 x double>, <2 x double>* %s3, align 4 + %l4 = load <2 x double>, <2 x double>* %s4, align 4 %t1 = shufflevector <2 x double> %l1, <2 x double> %l2, <4 x i32> %t2 = shufflevector <2 x double> %l3, <2 x double> %l4, <4 x i32> %s = shufflevector <4 x double> %t1, <4 x double> %t2, <8 x i32> @@ -1288,32 +1366,45 @@ define void @vst4_v4f64(<4 x double> *%src, <16 x double> *%dst) { ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-NEXT: vldrw.u32 q5, [r0] -; CHECK-NEXT: vldrw.u32 q0, [r0, #32] -; CHECK-NEXT: vldrw.u32 q6, [r0, #16] -; CHECK-NEXT: vldrw.u32 q4, [r0, #80] -; CHECK-NEXT: vmov.f64 d4, d10 +; CHECK-NEXT: .pad #64 +; CHECK-NEXT: sub sp, #64 +; CHECK-NEXT: vldrw.u32 q7, [r0, #80] +; CHECK-NEXT: vldrw.u32 q6, [r0] +; CHECK-NEXT: vldrw.u32 q5, [r0, #32] +; CHECK-NEXT: vldrw.u32 q2, [r0, #64] +; CHECK-NEXT: vstrw.32 q7, [sp, #32] @ 16-byte Spill +; CHECK-NEXT: vmov.f64 d14, d12 +; CHECK-NEXT: vldrw.u32 q1, [r0, #96] +; CHECK-NEXT: vldrw.u32 q0, [r0, #16] ; CHECK-NEXT: vldrw.u32 q3, [r0, #48] -; CHECK-NEXT: vldrw.u32 q1, [r0, #64] -; CHECK-NEXT: vmov.f64 d5, d0 -; CHECK-NEXT: vmov.f64 d0, d11 -; CHECK-NEXT: vstrw.32 q2, [r1] -; CHECK-NEXT: vmov.f64 d10, d12 -; CHECK-NEXT: vstrw.32 q0, [r1, #32] -; CHECK-NEXT: vmov.f64 d11, d6 -; CHECK-NEXT: vmov.f64 d6, d13 -; CHECK-NEXT: vstrw.32 q5, [r1, #64] -; CHECK-NEXT: vmov.f64 d12, d2 -; CHECK-NEXT: vstrw.32 q3, [r1, #96] -; CHECK-NEXT: vmov.f64 d14, d8 -; CHECK-NEXT: vmov.f64 d13, d2 -; CHECK-NEXT: vmov.f64 d15, d8 -; CHECK-NEXT: vstrw.32 q6, [r1, #16] -; CHECK-NEXT: vmov.f64 d2, d3 -; CHECK-NEXT: vstrw.32 q7, [r1, #80] -; CHECK-NEXT: vmov.f64 d8, d9 +; CHECK-NEXT: vldrw.u32 q4, [r0, #112] +; CHECK-NEXT: vstrw.32 q0, [sp, #16] @ 16-byte Spill +; CHECK-NEXT: vmov.f64 d15, d10 +; CHECK-NEXT: vstrw.32 q7, [sp, #48] @ 16-byte Spill +; CHECK-NEXT: vmov.f64 d14, d4 +; CHECK-NEXT: vmov.f64 d15, d2 +; CHECK-NEXT: vmov.f64 d2, d5 +; CHECK-NEXT: vstrw.32 q7, [sp] @ 16-byte Spill +; CHECK-NEXT: vldrw.u32 q7, [sp, #16] @ 16-byte Reload +; CHECK-NEXT: vmov.f64 d4, d0 +; CHECK-NEXT: vldrw.u32 q0, [sp, #32] @ 16-byte Reload ; CHECK-NEXT: vstrw.32 q1, [r1, #48] +; CHECK-NEXT: vmov.f64 d10, d13 +; CHECK-NEXT: vmov.f64 d12, d0 +; CHECK-NEXT: vstrw.32 q5, [r1, #32] +; CHECK-NEXT: vmov.f64 d5, d6 +; CHECK-NEXT: vmov.f64 d13, d8 +; CHECK-NEXT: vstrw.32 q2, [r1, #64] +; CHECK-NEXT: vmov.f64 d8, d1 +; CHECK-NEXT: vldrw.u32 q0, [sp, #48] @ 16-byte Reload +; CHECK-NEXT: vstrw.32 q6, [r1, #80] +; CHECK-NEXT: vstrw.32 q0, [r1] +; CHECK-NEXT: vldrw.u32 q0, [sp] @ 16-byte Reload +; CHECK-NEXT: vmov.f64 d6, d15 ; CHECK-NEXT: vstrw.32 q4, [r1, #112] +; CHECK-NEXT: vstrw.32 q0, [r1, #16] +; CHECK-NEXT: vstrw.32 q3, [r1, #96] +; CHECK-NEXT: add sp, #64 ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: bx lr entry: @@ -1324,7 +1415,7 @@ entry: %s3 = getelementptr <4 x double>, <4 x double>* %src, i32 2 %l3 = load <4 x double>, <4 x double>* %s3, align 4 %s4 = getelementptr <4 x double>, <4 x double>* %src, i32 3 - %l4 = load <4 x double>, <4 x double>* %s3, align 4 + %l4 = load <4 x double>, <4 x double>* %s4, align 4 %t1 = shufflevector <4 x double> %l1, <4 x double> %l2, <8 x i32> %t2 = shufflevector <4 x double> %l3, <4 x double> %l4, <8 x i32> %s = shufflevector <8 x double> %t1, <8 x double> %t2, <16 x i32>