1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-19 11:02:59 +02:00

[X86][SSE] Added tests to ensure that consecutive loads including any/all volatiles are not combined

llvm-svn: 264225
This commit is contained in:
Simon Pilgrim 2016-03-24 00:14:37 +00:00
parent 082bed0b87
commit 618084030f
3 changed files with 275 additions and 0 deletions

View File

@ -202,6 +202,7 @@ define <4 x float> @merge_4f32_f32_45zz(float* %ptr) nounwind uwtable noinline s
%res1 = insertelement <4 x float> %res0, float %val1, i32 1
ret <4 x float> %res1
}
define <4 x float> @merge_4f32_f32_012u(float* %ptr) nounwind uwtable noinline ssp {
; SSE2-LABEL: merge_4f32_f32_012u:
; SSE2: # BB#0:
@ -654,3 +655,90 @@ define void @merge_4i32_i32_combine(<4 x i32>* %dst, i32* %src) {
store <4 x i32> %6, <4 x i32>* %dst
ret void
}
;
; consecutive loads including any/all volatiles may not be combined
;
define <2 x i64> @merge_2i64_i64_12_volatile(i64* %ptr) nounwind uwtable noinline ssp {
; SSE-LABEL: merge_2i64_i64_12_volatile:
; SSE: # BB#0:
; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: retq
;
; AVX-LABEL: merge_2i64_i64_12_volatile:
; AVX: # BB#0:
; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-NEXT: retq
;
; X32-SSE-LABEL: merge_2i64_i64_12_volatile:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-SSE-NEXT: pinsrd $1, 12(%eax), %xmm0
; X32-SSE-NEXT: pinsrd $2, 16(%eax), %xmm0
; X32-SSE-NEXT: pinsrd $3, 20(%eax), %xmm0
; X32-SSE-NEXT: retl
%ptr0 = getelementptr inbounds i64, i64* %ptr, i64 1
%ptr1 = getelementptr inbounds i64, i64* %ptr, i64 2
%val0 = load volatile i64, i64* %ptr0
%val1 = load volatile i64, i64* %ptr1
%res0 = insertelement <2 x i64> undef, i64 %val0, i32 0
%res1 = insertelement <2 x i64> %res0, i64 %val1, i32 1
ret <2 x i64> %res1
}
define <4 x float> @merge_4f32_f32_2345_volatile(float* %ptr) nounwind uwtable noinline ssp {
; SSE2-LABEL: merge_4f32_f32_2345_volatile:
; SSE2: # BB#0:
; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSE2-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
; SSE2-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSE2-NEXT: retq
;
; SSE41-LABEL: merge_4f32_f32_2345_volatile:
; SSE41: # BB#0:
; SSE41-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
; SSE41-NEXT: retq
;
; AVX-LABEL: merge_4f32_f32_2345_volatile:
; AVX: # BB#0:
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
; AVX-NEXT: retq
;
; X32-SSE-LABEL: merge_4f32_f32_2345_volatile:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-SSE-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
; X32-SSE-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
; X32-SSE-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
; X32-SSE-NEXT: retl
%ptr0 = getelementptr inbounds float, float* %ptr, i64 2
%ptr1 = getelementptr inbounds float, float* %ptr, i64 3
%ptr2 = getelementptr inbounds float, float* %ptr, i64 4
%ptr3 = getelementptr inbounds float, float* %ptr, i64 5
%val0 = load volatile float, float* %ptr0
%val1 = load float, float* %ptr1
%val2 = load float, float* %ptr2
%val3 = load float, float* %ptr3
%res0 = insertelement <4 x float> undef, float %val0, i32 0
%res1 = insertelement <4 x float> %res0, float %val1, i32 1
%res2 = insertelement <4 x float> %res1, float %val2, i32 2
%res3 = insertelement <4 x float> %res2, float %val3, i32 3
ret <4 x float> %res3
}

View File

@ -641,3 +641,116 @@ define <32 x i8> @merge_32i8_i8_23u5uuuuuuuuuuzzzzuuuuuuuuuuuuuu(i8* %ptr) nounw
%resH = insertelement <32 x i8> %resG, i8 0, i8 17
ret <32 x i8> %resH
}
;
; consecutive loads including any/all volatiles may not be combined
;
define <4 x double> @merge_4f64_f64_34uz_volatile(double* %ptr) nounwind uwtable noinline ssp {
; AVX1-LABEL: merge_4f64_f64_34uz_volatile:
; AVX1: # BB#0:
; AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX1-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; AVX1-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: merge_4f64_f64_34uz_volatile:
; AVX2: # BB#0:
; AVX2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX2-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; AVX2-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: merge_4f64_f64_34uz_volatile:
; AVX512F: # BB#0:
; AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512F-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; AVX512F-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX512F-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
;
; X32-AVX-LABEL: merge_4f64_f64_34uz_volatile:
; X32-AVX: # BB#0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-AVX-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; X32-AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X32-AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X32-AVX-NEXT: retl
%ptr0 = getelementptr inbounds double, double* %ptr, i64 3
%ptr1 = getelementptr inbounds double, double* %ptr, i64 4
%val0 = load volatile double, double* %ptr0
%val1 = load volatile double, double* %ptr1
%res0 = insertelement <4 x double> undef, double %val0, i32 0
%res1 = insertelement <4 x double> %res0, double %val1, i32 1
%res3 = insertelement <4 x double> %res1, double 0.0, i32 3
ret <4 x double> %res3
}
define <16 x i16> @merge_16i16_i16_0uu3zzuuuuuzCuEF_volatile(i16* %ptr) nounwind uwtable noinline ssp {
; AVX1-LABEL: merge_16i16_i16_0uu3zzuuuuuzCuEF_volatile:
; AVX1: # BB#0:
; AVX1-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX1-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm1
; AVX1-NEXT: vpinsrw $3, 6(%rdi), %xmm1, %xmm1
; AVX1-NEXT: vpinsrw $4, 24(%rdi), %xmm0, %xmm0
; AVX1-NEXT: vpinsrw $6, 28(%rdi), %xmm0, %xmm0
; AVX1-NEXT: vpinsrw $7, 30(%rdi), %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: merge_16i16_i16_0uu3zzuuuuuzCuEF_volatile:
; AVX2: # BB#0:
; AVX2-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX2-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm1
; AVX2-NEXT: vpinsrw $3, 6(%rdi), %xmm1, %xmm1
; AVX2-NEXT: vpinsrw $4, 24(%rdi), %xmm0, %xmm0
; AVX2-NEXT: vpinsrw $6, 28(%rdi), %xmm0, %xmm0
; AVX2-NEXT: vpinsrw $7, 30(%rdi), %xmm0, %xmm0
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: merge_16i16_i16_0uu3zzuuuuuzCuEF_volatile:
; AVX512F: # BB#0:
; AVX512F-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX512F-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm1
; AVX512F-NEXT: vpinsrw $3, 6(%rdi), %xmm1, %xmm1
; AVX512F-NEXT: vpinsrw $4, 24(%rdi), %xmm0, %xmm0
; AVX512F-NEXT: vpinsrw $6, 28(%rdi), %xmm0, %xmm0
; AVX512F-NEXT: vpinsrw $7, 30(%rdi), %xmm0, %xmm0
; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX512F-NEXT: retq
;
; X32-AVX-LABEL: merge_16i16_i16_0uu3zzuuuuuzCuEF_volatile:
; X32-AVX: # BB#0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; X32-AVX-NEXT: vpinsrw $0, (%eax), %xmm0, %xmm1
; X32-AVX-NEXT: vpinsrw $3, 6(%eax), %xmm1, %xmm1
; X32-AVX-NEXT: vpinsrw $4, 24(%eax), %xmm0, %xmm0
; X32-AVX-NEXT: vpinsrw $6, 28(%eax), %xmm0, %xmm0
; X32-AVX-NEXT: vpinsrw $7, 30(%eax), %xmm0, %xmm0
; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X32-AVX-NEXT: retl
%ptr0 = getelementptr inbounds i16, i16* %ptr, i64 0
%ptr3 = getelementptr inbounds i16, i16* %ptr, i64 3
%ptrC = getelementptr inbounds i16, i16* %ptr, i64 12
%ptrE = getelementptr inbounds i16, i16* %ptr, i64 14
%ptrF = getelementptr inbounds i16, i16* %ptr, i64 15
%val0 = load volatile i16, i16* %ptr0
%val3 = load i16, i16* %ptr3
%valC = load i16, i16* %ptrC
%valE = load i16, i16* %ptrE
%valF = load volatile i16, i16* %ptrF
%res0 = insertelement <16 x i16> undef, i16 %val0, i16 0
%res3 = insertelement <16 x i16> %res0, i16 %val3, i16 3
%res4 = insertelement <16 x i16> %res3, i16 0, i16 4
%res5 = insertelement <16 x i16> %res4, i16 0, i16 5
%resC = insertelement <16 x i16> %res5, i16 %valC, i16 12
%resD = insertelement <16 x i16> %resC, i16 0, i16 13
%resE = insertelement <16 x i16> %resD, i16 %valE, i16 14
%resF = insertelement <16 x i16> %resE, i16 %valF, i16 15
ret <16 x i16> %resF
}

View File

@ -642,3 +642,77 @@ define <64 x i8> @merge_64i8_i8_12u4uuuuuuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuu
%res63 = insertelement <64 x i8> %res17, i8 0, i8 63
ret <64 x i8> %res63
}
;
; consecutive loads including any/all volatiles may not be combined
;
define <8 x double> @merge_8f64_f64_23uuuuu9_volatile(double* %ptr) nounwind uwtable noinline ssp {
; ALL-LABEL: merge_8f64_f64_23uuuuu9_volatile:
; ALL: # BB#0:
; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; ALL-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; ALL-NEXT: vbroadcastsd 72(%rdi), %ymm1
; ALL-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_8f64_f64_23uuuuu9_volatile:
; X32-AVX512F: # BB#0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-AVX512F-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; X32-AVX512F-NEXT: vbroadcastsd 72(%eax), %ymm1
; X32-AVX512F-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
; X32-AVX512F-NEXT: retl
%ptr0 = getelementptr inbounds double, double* %ptr, i64 2
%ptr1 = getelementptr inbounds double, double* %ptr, i64 3
%ptr7 = getelementptr inbounds double, double* %ptr, i64 9
%val0 = load volatile double, double* %ptr0
%val1 = load double, double* %ptr1
%val7 = load double, double* %ptr7
%res0 = insertelement <8 x double> undef, double %val0, i32 0
%res1 = insertelement <8 x double> %res0, double %val1, i32 1
%res7 = insertelement <8 x double> %res1, double %val7, i32 7
ret <8 x double> %res7
}
define <16 x i32> @merge_16i32_i32_0uu3uuuuuuuuCuEF_volatile(i32* %ptr) nounwind uwtable noinline ssp {
; ALL-LABEL: merge_16i32_i32_0uu3uuuuuuuuCuEF_volatile:
; ALL: # BB#0:
; ALL-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; ALL-NEXT: vpinsrd $3, 12(%rdi), %xmm0, %xmm0
; ALL-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; ALL-NEXT: vpinsrd $2, 56(%rdi), %xmm1, %xmm1
; ALL-NEXT: vpinsrd $3, 60(%rdi), %xmm1, %xmm1
; ALL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; ALL-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_16i32_i32_0uu3uuuuuuuuCuEF_volatile:
; X32-AVX512F: # BB#0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-AVX512F-NEXT: vpinsrd $3, 12(%eax), %xmm0, %xmm0
; X32-AVX512F-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-AVX512F-NEXT: vpinsrd $2, 56(%eax), %xmm1, %xmm1
; X32-AVX512F-NEXT: vpinsrd $3, 60(%eax), %xmm1, %xmm1
; X32-AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; X32-AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; X32-AVX512F-NEXT: retl
%ptr0 = getelementptr inbounds i32, i32* %ptr, i64 0
%ptr3 = getelementptr inbounds i32, i32* %ptr, i64 3
%ptrC = getelementptr inbounds i32, i32* %ptr, i64 12
%ptrE = getelementptr inbounds i32, i32* %ptr, i64 14
%ptrF = getelementptr inbounds i32, i32* %ptr, i64 15
%val0 = load volatile i32, i32* %ptr0
%val3 = load volatile i32, i32* %ptr3
%valC = load volatile i32, i32* %ptrC
%valE = load volatile i32, i32* %ptrE
%valF = load volatile i32, i32* %ptrF
%res0 = insertelement <16 x i32> undef, i32 %val0, i32 0
%res3 = insertelement <16 x i32> %res0, i32 %val3, i32 3
%resC = insertelement <16 x i32> %res3, i32 %valC, i32 12
%resE = insertelement <16 x i32> %resC, i32 %valE, i32 14
%resF = insertelement <16 x i32> %resE, i32 %valF, i32 15
ret <16 x i32> %resF
}