1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 20:51:52 +01:00

Fix for bug 41512: lower INSERT_VECTOR_ELT(ZeroVec, 0, Elt) to SCALAR_TO_VECTOR(Elt) for all SSE flavors

Current LLVM uses pxor+pinsrb on SSE4+ for INSERT_VECTOR_ELT(ZeroVec, 0, Elt) insead of much simpler movd.
INSERT_VECTOR_ELT(ZeroVec, 0, Elt) is idiomatic construct which is used e.g. for _mm_cvtsi32_si128(Elt) and for lowest element initialization in _mm_set_epi32.
So such inefficient lowering leads to significant performance digradations in ceratin cases switching from SSSE3 to SSE4.
https://bugs.llvm.org/show_bug.cgi?id=41512

Here INSERT_VECTOR_ELT(ZeroVec, 0, Elt) is simply converted to SCALAR_TO_VECTOR(Elt) when applicable since latter is closer match to desired behavior and always efficiently lowered to movd and alike.

Committed on behalf of @Serge_Preis (Serge Preis)

Differential Revision: https://reviews.llvm.org/D60852

llvm-svn: 359545
This commit is contained in:
Simon Pilgrim 2019-04-30 10:18:25 +00:00
parent 9d0721b992
commit af2892e778
2 changed files with 37 additions and 101 deletions

View File

@ -17019,6 +17019,14 @@ SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
}
assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
// This will be just movd/movq/movss/movsd.
if (IdxVal == 0 && ISD::isBuildVectorAllZeros(N0.getNode()) &&
(EltVT == MVT::i32 || EltVT == MVT::f32 || EltVT == MVT::f64 ||
EltVT == MVT::i64)) {
N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
return getShuffleVectorZeroOrUndef(N1, 0, true, Subtarget, DAG);
}
// Transform it so it match pinsr{b,w} which expects a GR32 as its second
// argument. SSE41 required for pinsrb.
if (VT == MVT::v8i16 || (VT == MVT::v16i8 && Subtarget.hasSSE41())) {

View File

@ -500,45 +500,19 @@ define <32 x i8> @insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz(<32 x i8> %a) {
ret <32 x i8> %4
}
; FIXME: Prefer 'movd' over 'pinsr' to element 0.
define <4 x i32> @PR41512(i32 %x, i32 %y) {
; SSE2-LABEL: PR41512:
; SSE2: # %bb.0:
; SSE2-NEXT: movd %edi, %xmm0
; SSE2-NEXT: movd %esi, %xmm1
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE2-NEXT: retq
;
; SSE3-LABEL: PR41512:
; SSE3: # %bb.0:
; SSE3-NEXT: movd %edi, %xmm0
; SSE3-NEXT: movd %esi, %xmm1
; SSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: PR41512:
; SSSE3: # %bb.0:
; SSSE3-NEXT: movd %edi, %xmm0
; SSSE3-NEXT: movd %esi, %xmm1
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: PR41512:
; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: pinsrd $0, %edi, %xmm0
; SSE41-NEXT: pinsrd $0, %esi, %xmm1
; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE41-NEXT: retq
; SSE-LABEL: PR41512:
; SSE: # %bb.0:
; SSE-NEXT: movd %edi, %xmm0
; SSE-NEXT: movd %esi, %xmm1
; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: retq
;
; AVX-LABEL: PR41512:
; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX-NEXT: vpinsrd $0, %edi, %xmm0, %xmm1
; AVX-NEXT: vpinsrd $0, %esi, %xmm0, %xmm0
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX-NEXT: vmovd %edi, %xmm0
; AVX-NEXT: vmovd %esi, %xmm1
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-NEXT: retq
%ins1 = insertelement <4 x i32> <i32 undef, i32 0, i32 undef, i32 undef>, i32 %x, i32 0
%ins2 = insertelement <4 x i32> <i32 undef, i32 0, i32 undef, i32 undef>, i32 %y, i32 0
@ -547,46 +521,24 @@ define <4 x i32> @PR41512(i32 %x, i32 %y) {
}
define <4 x i64> @PR41512_v4i64(i64 %x, i64 %y) {
; SSE2-LABEL: PR41512_v4i64:
; SSE2: # %bb.0:
; SSE2-NEXT: movq %rdi, %xmm0
; SSE2-NEXT: movq %rsi, %xmm1
; SSE2-NEXT: retq
;
; SSE3-LABEL: PR41512_v4i64:
; SSE3: # %bb.0:
; SSE3-NEXT: movq %rdi, %xmm0
; SSE3-NEXT: movq %rsi, %xmm1
; SSE3-NEXT: retq
;
; SSSE3-LABEL: PR41512_v4i64:
; SSSE3: # %bb.0:
; SSSE3-NEXT: movq %rdi, %xmm0
; SSSE3-NEXT: movq %rsi, %xmm1
; SSSE3-NEXT: retq
;
; SSE41-LABEL: PR41512_v4i64:
; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: pinsrq $0, %rdi, %xmm0
; SSE41-NEXT: pinsrq $0, %rsi, %xmm1
; SSE41-NEXT: retq
; SSE-LABEL: PR41512_v4i64:
; SSE: # %bb.0:
; SSE-NEXT: movq %rdi, %xmm0
; SSE-NEXT: movq %rsi, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: PR41512_v4i64:
; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX1-NEXT: vpinsrq $0, %rdi, %xmm0, %xmm1
; AVX1-NEXT: vpinsrq $0, %rsi, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vmovq %rdi, %xmm0
; AVX1-NEXT: vmovq %rsi, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: PR41512_v4i64:
; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX2-NEXT: vpinsrq $0, %rdi, %xmm0, %xmm1
; AVX2-NEXT: vpinsrq $0, %rsi, %xmm0, %xmm0
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-NEXT: vmovq %rdi, %xmm0
; AVX2-NEXT: vmovq %rsi, %xmm1
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
%ins1 = insertelement <4 x i64> <i64 undef, i64 0, i64 undef, i64 undef>, i64 %x, i32 0
%ins2 = insertelement <4 x i64> <i64 undef, i64 0, i64 undef, i64 undef>, i64 %y, i32 0
@ -648,42 +600,18 @@ define <8 x float> @PR41512_v8f32(float %x, float %y) {
}
define <4 x i32> @PR41512_loads(i32* %p1, i32* %p2) {
; SSE2-LABEL: PR41512_loads:
; SSE2: # %bb.0:
; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE2-NEXT: retq
;
; SSE3-LABEL: PR41512_loads:
; SSE3: # %bb.0:
; SSE3-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE3-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE3-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE3-NEXT: retq
;
; SSSE3-LABEL: PR41512_loads:
; SSSE3: # %bb.0:
; SSSE3-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSSE3-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSSE3-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: PR41512_loads:
; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: pinsrd $0, (%rdi), %xmm0
; SSE41-NEXT: pinsrd $0, (%rsi), %xmm1
; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE41-NEXT: retq
; SSE-LABEL: PR41512_loads:
; SSE: # %bb.0:
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: retq
;
; AVX-LABEL: PR41512_loads:
; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX-NEXT: vpinsrd $0, (%rdi), %xmm0, %xmm1
; AVX-NEXT: vpinsrd $0, (%rsi), %xmm0, %xmm0
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-NEXT: retq
%x = load i32, i32* %p1
%y = load i32, i32* %p2