1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 20:51:52 +01:00

[X86] Make sure we don't remove sign extends of masks with AVX2 masked gathers.

We don't use k-registers and instead use the MSB so we need to make sure we sign extend the mask to the msb.

llvm-svn: 319405
This commit is contained in:
Craig Topper 2017-11-30 06:31:31 +00:00
parent 51595f2d85
commit a9ba95e8f2
2 changed files with 52 additions and 7 deletions

View File

@ -35924,7 +35924,8 @@ static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG,
}
static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI) {
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
SDLoc DL(N);
// Pre-shrink oversized index elements to avoid triggering scalarization.
@ -35967,7 +35968,7 @@ static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
// the masks is v*i1. So the mask will be truncated anyway.
// The SIGN_EXTEND_INREG my be dropped.
SDValue Mask = N->getOperand(2);
if (Mask.getOpcode() == ISD::SIGN_EXTEND_INREG) {
if (Subtarget.hasAVX512() && Mask.getOpcode() == ISD::SIGN_EXTEND_INREG) {
SmallVector<SDValue, 5> NewOps(N->op_begin(), N->op_end());
NewOps[2] = Mask.getOperand(0);
DAG.UpdateNodeOperands(N, NewOps);
@ -37079,7 +37080,7 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
case X86ISD::FMADDSUB:
case X86ISD::FMSUBADD: return combineFMADDSUB(N, DAG, Subtarget);
case ISD::MGATHER:
case ISD::MSCATTER: return combineGatherScatter(N, DAG, DCI);
case ISD::MSCATTER: return combineGatherScatter(N, DAG, DCI, Subtarget);
case X86ISD::TESTM: return combineTestM(N, DAG, Subtarget);
case X86ISD::PCMPEQ:
case X86ISD::PCMPGT: return combineVectorCompare(N, DAG, Subtarget);

View File

@ -9,6 +9,9 @@ declare <2 x i32> @llvm.masked.gather.v2i32(<2 x i32*> %ptrs, i32 %align, <2 x i
define <2 x i32> @masked_gather_v2i32(<2 x i32*>* %ptr, <2 x i1> %masks, <2 x i32> %passthro) {
; X86-LABEL: masked_gather_v2i32:
; X86: # BB#0: # %entry
; X86-NEXT: vpsllq $63, %xmm0, %xmm0
; X86-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X86-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm0
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpmovzxdq {{.*#+}} xmm2 = mem[0],zero,mem[1],zero
; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@ -19,6 +22,9 @@ define <2 x i32> @masked_gather_v2i32(<2 x i32*>* %ptr, <2 x i1> %masks, <2 x i3
;
; X64-LABEL: masked_gather_v2i32:
; X64: # BB#0: # %entry
; X64-NEXT: vpsllq $63, %xmm0, %xmm0
; X64-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X64-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm0
; X64-NEXT: vmovdqa (%rdi), %xmm2
; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@ -57,6 +63,9 @@ entry:
define <4 x i32> @masked_gather_v2i32_concat(<2 x i32*>* %ptr, <2 x i1> %masks, <2 x i32> %passthro) {
; X86-LABEL: masked_gather_v2i32_concat:
; X86: # BB#0: # %entry
; X86-NEXT: vpsllq $63, %xmm0, %xmm0
; X86-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X86-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm0
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpmovzxdq {{.*#+}} xmm2 = mem[0],zero,mem[1],zero
; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@ -68,6 +77,9 @@ define <4 x i32> @masked_gather_v2i32_concat(<2 x i32*>* %ptr, <2 x i1> %masks,
;
; X64-LABEL: masked_gather_v2i32_concat:
; X64: # BB#0: # %entry
; X64-NEXT: vpsllq $63, %xmm0, %xmm0
; X64-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X64-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm0
; X64-NEXT: vmovdqa (%rdi), %xmm2
; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
@ -112,6 +124,8 @@ define <2 x float> @masked_gather_v2float(<2 x float*>* %ptr, <2 x i1> %masks, <
; X86-LABEL: masked_gather_v2float:
; X86: # BB#0: # %entry
; X86-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; X86-NEXT: vpslld $31, %xmm0, %xmm0
; X86-NEXT: vpsrad $31, %xmm0, %xmm0
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; X86-NEXT: vgatherdps %xmm0, (,%xmm2), %xmm1
@ -121,6 +135,8 @@ define <2 x float> @masked_gather_v2float(<2 x float*>* %ptr, <2 x i1> %masks, <
; X64-LABEL: masked_gather_v2float:
; X64: # BB#0: # %entry
; X64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; X64-NEXT: vpslld $31, %xmm0, %xmm0
; X64-NEXT: vpsrad $31, %xmm0, %xmm0
; X64-NEXT: vmovaps (%rdi), %xmm2
; X64-NEXT: vgatherqps %xmm0, (,%ymm2), %xmm1
; X64-NEXT: vmovaps %xmm1, %xmm0
@ -159,6 +175,8 @@ define <4 x float> @masked_gather_v2float_concat(<2 x float*>* %ptr, <2 x i1> %m
; X86-LABEL: masked_gather_v2float_concat:
; X86: # BB#0: # %entry
; X86-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; X86-NEXT: vpslld $31, %xmm0, %xmm0
; X86-NEXT: vpsrad $31, %xmm0, %xmm0
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; X86-NEXT: vgatherdps %xmm0, (,%xmm2), %xmm1
@ -168,6 +186,8 @@ define <4 x float> @masked_gather_v2float_concat(<2 x float*>* %ptr, <2 x i1> %m
; X64-LABEL: masked_gather_v2float_concat:
; X64: # BB#0: # %entry
; X64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; X64-NEXT: vpslld $31, %xmm0, %xmm0
; X64-NEXT: vpsrad $31, %xmm0, %xmm0
; X64-NEXT: vmovaps (%rdi), %xmm2
; X64-NEXT: vgatherqps %xmm0, (,%ymm2), %xmm1
; X64-NEXT: vmovaps %xmm1, %xmm0
@ -209,12 +229,16 @@ declare <4 x i32> @llvm.masked.gather.v4i32(<4 x i32*> %ptrs, i32 %align, <4 x i
define <4 x i32> @masked_gather_v4i32(<4 x i32*> %ptrs, <4 x i1> %masks, <4 x i32> %passthro) {
; X86-LABEL: masked_gather_v4i32:
; X86: # BB#0: # %entry
; X86-NEXT: vpslld $31, %xmm1, %xmm1
; X86-NEXT: vpsrad $31, %xmm1, %xmm1
; X86-NEXT: vpgatherdd %xmm1, (,%xmm0), %xmm2
; X86-NEXT: vmovdqa %xmm2, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: masked_gather_v4i32:
; X64: # BB#0: # %entry
; X64-NEXT: vpslld $31, %xmm1, %xmm1
; X64-NEXT: vpsrad $31, %xmm1, %xmm1
; X64-NEXT: vpgatherqd %xmm1, (,%ymm0), %xmm2
; X64-NEXT: vmovdqa %xmm2, %xmm0
; X64-NEXT: vzeroupper
@ -267,12 +291,16 @@ declare <4 x float> @llvm.masked.gather.v4float(<4 x float*> %ptrs, i32 %align,
define <4 x float> @masked_gather_v4float(<4 x float*> %ptrs, <4 x i1> %masks, <4 x float> %passthro) {
; X86-LABEL: masked_gather_v4float:
; X86: # BB#0: # %entry
; X86-NEXT: vpslld $31, %xmm1, %xmm1
; X86-NEXT: vpsrad $31, %xmm1, %xmm1
; X86-NEXT: vgatherdps %xmm1, (,%xmm0), %xmm2
; X86-NEXT: vmovaps %xmm2, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: masked_gather_v4float:
; X64: # BB#0: # %entry
; X64-NEXT: vpslld $31, %xmm1, %xmm1
; X64-NEXT: vpsrad $31, %xmm1, %xmm1
; X64-NEXT: vgatherqps %xmm1, (,%ymm0), %xmm2
; X64-NEXT: vmovaps %xmm2, %xmm0
; X64-NEXT: vzeroupper
@ -326,6 +354,8 @@ define <8 x i32> @masked_gather_v8i32(<8 x i32*>* %ptr, <8 x i1> %masks, <8 x i3
; X86-LABEL: masked_gather_v8i32:
; X86: # BB#0: # %entry
; X86-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X86-NEXT: vpslld $31, %ymm0, %ymm0
; X86-NEXT: vpsrad $31, %ymm0, %ymm0
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vmovdqa (%eax), %ymm2
; X86-NEXT: vpgatherdd %ymm0, (,%ymm2), %ymm1
@ -441,6 +471,8 @@ define <8 x float> @masked_gather_v8float(<8 x float*>* %ptr, <8 x i1> %masks, <
; X86-LABEL: masked_gather_v8float:
; X86: # BB#0: # %entry
; X86-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X86-NEXT: vpslld $31, %ymm0, %ymm0
; X86-NEXT: vpsrad $31, %ymm0, %ymm0
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vmovaps (%eax), %ymm2
; X86-NEXT: vgatherdps %ymm0, (,%ymm2), %ymm1
@ -710,14 +742,20 @@ declare <2 x i64> @llvm.masked.gather.v2i64(<2 x i64*> %ptrs, i32 %align, <2 x i
define <2 x i64> @masked_gather_v2i64(<2 x i64*>* %ptr, <2 x i1> %masks, <2 x i64> %passthro) {
; X86-LABEL: masked_gather_v2i64:
; X86: # BB#0: # %entry
; X86-NEXT: vpsllq $63, %xmm0, %xmm0
; X86-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpmovzxdq {{.*#+}} xmm2 = mem[0],zero,mem[1],zero
; X86-NEXT: vpgatherqq %xmm0, (,%xmm2), %xmm1
; X86-NEXT: vpmovzxdq {{.*#+}} xmm3 = mem[0],zero,mem[1],zero
; X86-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm0
; X86-NEXT: vpgatherqq %xmm0, (,%xmm3), %xmm1
; X86-NEXT: vmovdqa %xmm1, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: masked_gather_v2i64:
; X64: # BB#0: # %entry
; X64-NEXT: vpsllq $63, %xmm0, %xmm0
; X64-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X64-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm0
; X64-NEXT: vmovdqa (%rdi), %xmm2
; X64-NEXT: vpgatherqq %xmm0, (,%xmm2), %xmm1
; X64-NEXT: vmovdqa %xmm1, %xmm0
@ -755,14 +793,20 @@ declare <2 x double> @llvm.masked.gather.v2double(<2 x double*> %ptrs, i32 %alig
define <2 x double> @masked_gather_v2double(<2 x double*>* %ptr, <2 x i1> %masks, <2 x double> %passthro) {
; X86-LABEL: masked_gather_v2double:
; X86: # BB#0: # %entry
; X86-NEXT: vpsllq $63, %xmm0, %xmm0
; X86-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpmovzxdq {{.*#+}} xmm2 = mem[0],zero,mem[1],zero
; X86-NEXT: vgatherqpd %xmm0, (,%xmm2), %xmm1
; X86-NEXT: vpmovzxdq {{.*#+}} xmm3 = mem[0],zero,mem[1],zero
; X86-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm0
; X86-NEXT: vgatherqpd %xmm0, (,%xmm3), %xmm1
; X86-NEXT: vmovapd %xmm1, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: masked_gather_v2double:
; X64: # BB#0: # %entry
; X64-NEXT: vpsllq $63, %xmm0, %xmm0
; X64-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X64-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm0
; X64-NEXT: vmovapd (%rdi), %xmm2
; X64-NEXT: vgatherqpd %xmm0, (,%xmm2), %xmm1
; X64-NEXT: vmovapd %xmm1, %xmm0