mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-01-31 20:51:52 +01:00
[X86][SSE] Fold SIGN_EXTEND(SIGN_EXTEND_VECTOR_INREG(X)) -> SIGN_EXTEND_VECTOR_INREG(X)
It should be possible to make this generic, but we're not great at checking legality of *_EXTEND_VECTOR_INREG ops so I'm conservatively putting this inside X86ISelLowering.cpp
This commit is contained in:
parent
79a8f1c79c
commit
19d99cd594
@ -46769,10 +46769,14 @@ static SDValue combineSext(SDNode *N, SelectionDAG &DAG,
|
|||||||
if (SDValue V = combineToExtendBoolVectorInReg(N, DAG, DCI, Subtarget))
|
if (SDValue V = combineToExtendBoolVectorInReg(N, DAG, DCI, Subtarget))
|
||||||
return V;
|
return V;
|
||||||
|
|
||||||
if (VT.isVector())
|
if (VT.isVector()) {
|
||||||
if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
|
if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
|
||||||
return R;
|
return R;
|
||||||
|
|
||||||
|
if (N0.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG)
|
||||||
|
return DAG.getNode(N0.getOpcode(), DL, VT, N0.getOperand(0));
|
||||||
|
}
|
||||||
|
|
||||||
if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
|
if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
|
||||||
return NewAdd;
|
return NewAdd;
|
||||||
|
|
||||||
|
@ -462,9 +462,8 @@ define <8 x double> @load_v8f64_v8i16(<8 x i16> %trigger, <8 x double>* %addr, <
|
|||||||
; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
|
; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
|
||||||
; AVX1-NEXT: vpcmpeqw %xmm4, %xmm3, %xmm3
|
; AVX1-NEXT: vpcmpeqw %xmm4, %xmm3, %xmm3
|
||||||
; AVX1-NEXT: vpmovsxwq %xmm3, %xmm5
|
; AVX1-NEXT: vpmovsxwq %xmm3, %xmm5
|
||||||
; AVX1-NEXT: vpmovsxwd %xmm3, %xmm3
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,1,1]
|
||||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
|
; AVX1-NEXT: vpmovsxwq %xmm3, %xmm3
|
||||||
; AVX1-NEXT: vpmovsxdq %xmm3, %xmm3
|
|
||||||
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm3
|
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm3
|
||||||
; AVX1-NEXT: vpcmpeqw %xmm4, %xmm0, %xmm0
|
; AVX1-NEXT: vpcmpeqw %xmm4, %xmm0, %xmm0
|
||||||
; AVX1-NEXT: vpmovsxwq %xmm0, %xmm4
|
; AVX1-NEXT: vpmovsxwq %xmm0, %xmm4
|
||||||
@ -482,8 +481,7 @@ define <8 x double> @load_v8f64_v8i16(<8 x i16> %trigger, <8 x double>* %addr, <
|
|||||||
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
|
||||||
; AVX2-NEXT: vpxor %xmm4, %xmm4, %xmm4
|
; AVX2-NEXT: vpxor %xmm4, %xmm4, %xmm4
|
||||||
; AVX2-NEXT: vpcmpeqw %xmm4, %xmm3, %xmm3
|
; AVX2-NEXT: vpcmpeqw %xmm4, %xmm3, %xmm3
|
||||||
; AVX2-NEXT: vpmovsxwd %xmm3, %xmm3
|
; AVX2-NEXT: vpmovsxwq %xmm3, %ymm3
|
||||||
; AVX2-NEXT: vpmovsxdq %xmm3, %ymm3
|
|
||||||
; AVX2-NEXT: vpcmpeqw %xmm4, %xmm0, %xmm0
|
; AVX2-NEXT: vpcmpeqw %xmm4, %xmm0, %xmm0
|
||||||
; AVX2-NEXT: vpmovsxwq %xmm0, %ymm0
|
; AVX2-NEXT: vpmovsxwq %xmm0, %ymm0
|
||||||
; AVX2-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm4
|
; AVX2-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm4
|
||||||
@ -1782,9 +1780,8 @@ define <8 x i64> @load_v8i64_v8i16(<8 x i16> %trigger, <8 x i64>* %addr, <8 x i6
|
|||||||
; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
|
; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
|
||||||
; AVX1-NEXT: vpcmpeqw %xmm4, %xmm3, %xmm3
|
; AVX1-NEXT: vpcmpeqw %xmm4, %xmm3, %xmm3
|
||||||
; AVX1-NEXT: vpmovsxwq %xmm3, %xmm5
|
; AVX1-NEXT: vpmovsxwq %xmm3, %xmm5
|
||||||
; AVX1-NEXT: vpmovsxwd %xmm3, %xmm3
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,1,1]
|
||||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
|
; AVX1-NEXT: vpmovsxwq %xmm3, %xmm3
|
||||||
; AVX1-NEXT: vpmovsxdq %xmm3, %xmm3
|
|
||||||
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm3
|
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm3
|
||||||
; AVX1-NEXT: vpcmpeqw %xmm4, %xmm0, %xmm0
|
; AVX1-NEXT: vpcmpeqw %xmm4, %xmm0, %xmm0
|
||||||
; AVX1-NEXT: vpmovsxwq %xmm0, %xmm4
|
; AVX1-NEXT: vpmovsxwq %xmm0, %xmm4
|
||||||
@ -1802,8 +1799,7 @@ define <8 x i64> @load_v8i64_v8i16(<8 x i16> %trigger, <8 x i64>* %addr, <8 x i6
|
|||||||
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
|
||||||
; AVX2-NEXT: vpxor %xmm4, %xmm4, %xmm4
|
; AVX2-NEXT: vpxor %xmm4, %xmm4, %xmm4
|
||||||
; AVX2-NEXT: vpcmpeqw %xmm4, %xmm3, %xmm3
|
; AVX2-NEXT: vpcmpeqw %xmm4, %xmm3, %xmm3
|
||||||
; AVX2-NEXT: vpmovsxwd %xmm3, %xmm3
|
; AVX2-NEXT: vpmovsxwq %xmm3, %ymm3
|
||||||
; AVX2-NEXT: vpmovsxdq %xmm3, %ymm3
|
|
||||||
; AVX2-NEXT: vpcmpeqw %xmm4, %xmm0, %xmm0
|
; AVX2-NEXT: vpcmpeqw %xmm4, %xmm0, %xmm0
|
||||||
; AVX2-NEXT: vpmovsxwq %xmm0, %ymm0
|
; AVX2-NEXT: vpmovsxwq %xmm0, %ymm0
|
||||||
; AVX2-NEXT: vpmaskmovq (%rdi), %ymm0, %ymm4
|
; AVX2-NEXT: vpmaskmovq (%rdi), %ymm0, %ymm4
|
||||||
|
Loading…
x
Reference in New Issue
Block a user