1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 03:02:36 +01:00

[X86] Modify a few tests to not use icmps that are provably false.

These used things like unsigned less than zero, which is always false because there is no unsigned number less than zero.

I plan to teach DAG combine to optimize these so need to stop using them.

llvm-svn: 324315
This commit is contained in:
Craig Topper 2018-02-06 06:44:05 +00:00
parent f29f02b282
commit 794e34cca4
3 changed files with 15 additions and 17 deletions

View File

@ -2113,7 +2113,7 @@ define <2 x float> @ubto2f32(<2 x i32> %a) {
; NOVL: # %bb.0:
; NOVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; NOVL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; NOVL-NEXT: vpcmpltuq %zmm1, %zmm0, %k1
; NOVL-NEXT: vptestmq %zmm0, %zmm0, %k1
; NOVL-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
; NOVL-NEXT: vcvtdq2ps %xmm0, %xmm0
; NOVL-NEXT: vzeroupper
@ -2123,11 +2123,11 @@ define <2 x float> @ubto2f32(<2 x i32> %a) {
; VL: # %bb.0:
; VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; VL-NEXT: vpcmpltuq %xmm1, %xmm0, %k1
; VL-NEXT: vptestmq %xmm0, %xmm0, %k1
; VL-NEXT: vpbroadcastd {{.*}}(%rip), %xmm0 {%k1} {z}
; VL-NEXT: vcvtdq2ps %xmm0, %xmm0
; VL-NEXT: retq
%mask = icmp ult <2 x i32> %a, zeroinitializer
%mask = icmp ne <2 x i32> %a, zeroinitializer
%1 = uitofp <2 x i1> %mask to <2 x float>
ret <2 x float> %1
}
@ -2137,7 +2137,7 @@ define <2 x double> @ubto2f64(<2 x i32> %a) {
; NOVL: # %bb.0:
; NOVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; NOVL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; NOVL-NEXT: vpcmpltuq %zmm1, %zmm0, %k1
; NOVL-NEXT: vptestmq %zmm0, %zmm0, %k1
; NOVL-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
; NOVL-NEXT: vcvtudq2pd %ymm0, %zmm0
; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
@ -2148,11 +2148,11 @@ define <2 x double> @ubto2f64(<2 x i32> %a) {
; VL: # %bb.0:
; VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; VL-NEXT: vpcmpltuq %xmm1, %xmm0, %k1
; VL-NEXT: vptestmq %xmm0, %xmm0, %k1
; VL-NEXT: vpbroadcastd {{.*}}(%rip), %xmm0 {%k1} {z}
; VL-NEXT: vcvtudq2pd %xmm0, %xmm0
; VL-NEXT: retq
%mask = icmp ult <2 x i32> %a, zeroinitializer
%mask = icmp ne <2 x i32> %a, zeroinitializer
%1 = uitofp <2 x i1> %mask to <2 x double>
ret <2 x double> %1
}

View File

@ -2911,7 +2911,7 @@ define <2 x float> @ubto2f32(<2 x i32> %a) {
; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] sched: [1:0.50]
; GENERIC-NEXT: vpcmpltuq %xmm1, %xmm0, %k1 # sched: [3:1.00]
; GENERIC-NEXT: vptestmq %xmm0, %xmm0, %k1 # sched: [1:1.00]
; GENERIC-NEXT: vpbroadcastd {{.*}}(%rip), %xmm0 {%k1} {z} # sched: [5:1.00]
; GENERIC-NEXT: vcvtdq2ps %xmm0, %xmm0 # sched: [3:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
@ -2920,11 +2920,11 @@ define <2 x float> @ubto2f32(<2 x i32> %a) {
; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] sched: [1:0.33]
; SKX-NEXT: vpcmpltuq %xmm1, %xmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vptestmq %xmm0, %xmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm0 {%k1} {z} # sched: [7:0.50]
; SKX-NEXT: vcvtdq2ps %xmm0, %xmm0 # sched: [4:0.33]
; SKX-NEXT: retq # sched: [7:1.00]
%mask = icmp ult <2 x i32> %a, zeroinitializer
%mask = icmp ne <2 x i32> %a, zeroinitializer
%1 = uitofp <2 x i1> %mask to <2 x float>
ret <2 x float> %1
}
@ -2934,7 +2934,7 @@ define <2 x double> @ubto2f64(<2 x i32> %a) {
; GENERIC: # %bb.0:
; GENERIC-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; GENERIC-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] sched: [1:0.50]
; GENERIC-NEXT: vpcmpltuq %xmm1, %xmm0, %k1 # sched: [3:1.00]
; GENERIC-NEXT: vptestmq %xmm0, %xmm0, %k1 # sched: [1:1.00]
; GENERIC-NEXT: vpbroadcastd {{.*}}(%rip), %xmm0 {%k1} {z} # sched: [5:1.00]
; GENERIC-NEXT: vcvtudq2pd %xmm0, %xmm0 # sched: [4:1.00]
; GENERIC-NEXT: retq # sched: [1:1.00]
@ -2943,11 +2943,11 @@ define <2 x double> @ubto2f64(<2 x i32> %a) {
; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1 # sched: [1:0.33]
; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] sched: [1:0.33]
; SKX-NEXT: vpcmpltuq %xmm1, %xmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vptestmq %xmm0, %xmm0, %k1 # sched: [3:1.00]
; SKX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm0 {%k1} {z} # sched: [7:0.50]
; SKX-NEXT: vcvtudq2pd %xmm0, %xmm0 # sched: [5:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%mask = icmp ult <2 x i32> %a, zeroinitializer
%mask = icmp ne <2 x i32> %a, zeroinitializer
%1 = uitofp <2 x i1> %mask to <2 x double>
ret <2 x double> %1
}

View File

@ -25859,8 +25859,7 @@ define i32 @test_cmpm_rnd_zero(<16 x float> %a, <16 x float> %b) {
define i8 @mask_zero_lower(<4 x i32> %a) {
; VLX-LABEL: mask_zero_lower:
; VLX: # %bb.0:
; VLX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; VLX-NEXT: vpcmpltud %xmm1, %xmm0, %k0
; VLX-NEXT: vptestmd %xmm0, %xmm0, %k0
; VLX-NEXT: kshiftlb $4, %k0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: # kill: def $al killed $al killed $eax
@ -25869,15 +25868,14 @@ define i8 @mask_zero_lower(<4 x i32> %a) {
; NoVLX-LABEL: mask_zero_lower:
; NoVLX: # %bb.0:
; NoVLX-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NoVLX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $12, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
; NoVLX-NEXT: # kill: def $al killed $al killed $eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
%cmp = icmp ult <4 x i32> %a, zeroinitializer
%cmp = icmp ne <4 x i32> %a, zeroinitializer
%concat = shufflevector <4 x i1> %cmp, <4 x i1> zeroinitializer, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3>
%cast = bitcast <8 x i1> %concat to i8
ret i8 %cast