mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-24 11:42:57 +01:00
[X86][AVX] insert_subvector(bitcast(v), bitcast(s), c1) -> bitcast(insert_subvector(v,s,c2))
Similar to what we already do in DAGCombiner, but this version also handles bitcasts from types with different scalar sizes, which x86 is better at handling. Differential Revision: https://reviews.llvm.org/D57514 llvm-svn: 352773
This commit is contained in:
parent
d6bb2bcfab
commit
1a3ae748a1
@ -41573,6 +41573,42 @@ static SDValue combineInsertSubvector(SDNode *N, SelectionDAG &DAG,
|
||||
}
|
||||
}
|
||||
|
||||
// Push subvector bitcasts to the output, adjusting the index as we go.
|
||||
// insert_subvector(bitcast(v), bitcast(s), c1) ->
|
||||
// bitcast(insert_subvector(v,s,c2))
|
||||
// TODO: Move this to generic - which only supports same scalar sizes.
|
||||
if ((Vec.isUndef() || Vec.getOpcode() == ISD::BITCAST) &&
|
||||
SubVec.getOpcode() == ISD::BITCAST) {
|
||||
SDValue VecSrc = peekThroughBitcasts(Vec);
|
||||
SDValue SubVecSrc = peekThroughBitcasts(SubVec);
|
||||
MVT VecSrcSVT = VecSrc.getSimpleValueType().getScalarType();
|
||||
MVT SubVecSrcSVT = SubVecSrc.getSimpleValueType().getScalarType();
|
||||
if (Vec.isUndef() || VecSrcSVT == SubVecSrcSVT) {
|
||||
MVT NewOpVT;
|
||||
SDValue NewIdx;
|
||||
unsigned NumElts = OpVT.getVectorNumElements();
|
||||
unsigned EltSizeInBits = OpVT.getScalarSizeInBits();
|
||||
if ((EltSizeInBits % SubVecSrcSVT.getSizeInBits()) == 0) {
|
||||
unsigned Scale = EltSizeInBits / SubVecSrcSVT.getSizeInBits();
|
||||
NewOpVT = MVT::getVectorVT(SubVecSrcSVT, NumElts * Scale);
|
||||
NewIdx = DAG.getIntPtrConstant(IdxVal * Scale, dl);
|
||||
} else if ((SubVecSrcSVT.getSizeInBits() % EltSizeInBits) == 0) {
|
||||
unsigned Scale = SubVecSrcSVT.getSizeInBits() / EltSizeInBits;
|
||||
if ((IdxVal % Scale) == 0) {
|
||||
NewOpVT = MVT::getVectorVT(SubVecSrcSVT, NumElts / Scale);
|
||||
NewIdx = DAG.getIntPtrConstant(IdxVal / Scale, dl);
|
||||
}
|
||||
}
|
||||
if (NewIdx && DAG.getTargetLoweringInfo().isOperationLegal(
|
||||
ISD::INSERT_SUBVECTOR, NewOpVT)) {
|
||||
SDValue Res = DAG.getBitcast(NewOpVT, VecSrc);
|
||||
Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, NewOpVT, Res, SubVecSrc,
|
||||
NewIdx);
|
||||
return DAG.getBitcast(OpVT, Res);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fold two 16-byte or 32-byte subvector loads into one 32-byte or 64-byte
|
||||
// load:
|
||||
// (insert_subvector (insert_subvector undef, (load16 addr), 0),
|
||||
|
@ -1185,8 +1185,7 @@ define <8 x i32> @combine_vec_sdiv_by_pow2b_v8i32(<8 x i32> %x) {
|
||||
; AVX1-NEXT: vpsrad $4, %xmm1, %xmm2
|
||||
; AVX1-NEXT: vpsrad $2, %xmm1, %xmm3
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
|
||||
; AVX1-NEXT: vpsrad $3, %xmm1, %xmm3
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
|
||||
; AVX1-NEXT: vpsrad $3, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
|
||||
; AVX1-NEXT: vpsrad $31, %xmm0, %xmm2
|
||||
; AVX1-NEXT: vpsrld $28, %xmm2, %xmm3
|
||||
@ -1198,8 +1197,7 @@ define <8 x i32> @combine_vec_sdiv_by_pow2b_v8i32(<8 x i32> %x) {
|
||||
; AVX1-NEXT: vpsrad $4, %xmm2, %xmm3
|
||||
; AVX1-NEXT: vpsrad $2, %xmm2, %xmm4
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
|
||||
; AVX1-NEXT: vpsrad $3, %xmm2, %xmm4
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4,5,6,7]
|
||||
; AVX1-NEXT: vpsrad $3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
|
||||
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
|
||||
@ -1406,8 +1404,7 @@ define <16 x i32> @combine_vec_sdiv_by_pow2b_v16i32(<16 x i32> %x) {
|
||||
; AVX1-NEXT: vpsrad $4, %xmm2, %xmm3
|
||||
; AVX1-NEXT: vpsrad $2, %xmm2, %xmm4
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
|
||||
; AVX1-NEXT: vpsrad $3, %xmm2, %xmm4
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4,5,6,7]
|
||||
; AVX1-NEXT: vpsrad $3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
|
||||
; AVX1-NEXT: vpsrad $31, %xmm0, %xmm3
|
||||
; AVX1-NEXT: vpsrld $28, %xmm3, %xmm4
|
||||
@ -1419,8 +1416,7 @@ define <16 x i32> @combine_vec_sdiv_by_pow2b_v16i32(<16 x i32> %x) {
|
||||
; AVX1-NEXT: vpsrad $4, %xmm3, %xmm4
|
||||
; AVX1-NEXT: vpsrad $2, %xmm3, %xmm5
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4,5,6,7]
|
||||
; AVX1-NEXT: vpsrad $3, %xmm3, %xmm5
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm5[4,5,6,7]
|
||||
; AVX1-NEXT: vpsrad $3, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3],xmm3[4,5],xmm4[6,7]
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
|
||||
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3],ymm0[4],ymm2[5,6,7]
|
||||
@ -1435,8 +1431,7 @@ define <16 x i32> @combine_vec_sdiv_by_pow2b_v16i32(<16 x i32> %x) {
|
||||
; AVX1-NEXT: vpsrad $4, %xmm2, %xmm3
|
||||
; AVX1-NEXT: vpsrad $2, %xmm2, %xmm4
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
|
||||
; AVX1-NEXT: vpsrad $3, %xmm2, %xmm4
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4,5,6,7]
|
||||
; AVX1-NEXT: vpsrad $3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
|
||||
; AVX1-NEXT: vpsrad $31, %xmm1, %xmm3
|
||||
; AVX1-NEXT: vpsrld $28, %xmm3, %xmm4
|
||||
@ -1448,8 +1443,7 @@ define <16 x i32> @combine_vec_sdiv_by_pow2b_v16i32(<16 x i32> %x) {
|
||||
; AVX1-NEXT: vpsrad $4, %xmm3, %xmm4
|
||||
; AVX1-NEXT: vpsrad $2, %xmm3, %xmm5
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4,5,6,7]
|
||||
; AVX1-NEXT: vpsrad $3, %xmm3, %xmm5
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm5[4,5,6,7]
|
||||
; AVX1-NEXT: vpsrad $3, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3],xmm3[4,5],xmm4[6,7]
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
|
||||
; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3],ymm1[4],ymm2[5,6,7]
|
||||
|
@ -271,12 +271,11 @@ define <8 x i32> @elt7_v8i32(i32 %x) {
|
||||
; X64SSE4-NEXT: movaps {{.*#+}} xmm0 = [42,1,2,3]
|
||||
; X64SSE4-NEXT: retq
|
||||
;
|
||||
; X32AVX1-LABEL: elt7_v8i32:
|
||||
; X32AVX1: # %bb.0:
|
||||
; X32AVX1-NEXT: vbroadcastss {{[0-9]+}}(%esp), %xmm0
|
||||
; X32AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
|
||||
; X32AVX1-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1,2,3,4,5,6],ymm0[7]
|
||||
; X32AVX1-NEXT: retl
|
||||
; X32AVX-LABEL: elt7_v8i32:
|
||||
; X32AVX: # %bb.0:
|
||||
; X32AVX-NEXT: vbroadcastss {{[0-9]+}}(%esp), %ymm0
|
||||
; X32AVX-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1,2,3,4,5,6],ymm0[7]
|
||||
; X32AVX-NEXT: retl
|
||||
;
|
||||
; X64AVX1-LABEL: elt7_v8i32:
|
||||
; X64AVX1: # %bb.0:
|
||||
@ -286,12 +285,6 @@ define <8 x i32> @elt7_v8i32(i32 %x) {
|
||||
; X64AVX1-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1,2,3,4,5,6],ymm0[7]
|
||||
; X64AVX1-NEXT: retq
|
||||
;
|
||||
; X32AVX2-LABEL: elt7_v8i32:
|
||||
; X32AVX2: # %bb.0:
|
||||
; X32AVX2-NEXT: vbroadcastss {{[0-9]+}}(%esp), %ymm0
|
||||
; X32AVX2-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1,2,3,4,5,6],ymm0[7]
|
||||
; X32AVX2-NEXT: retl
|
||||
;
|
||||
; X64AVX2-LABEL: elt7_v8i32:
|
||||
; X64AVX2: # %bb.0:
|
||||
; X64AVX2-NEXT: vmovd %edi, %xmm0
|
||||
@ -299,12 +292,6 @@ define <8 x i32> @elt7_v8i32(i32 %x) {
|
||||
; X64AVX2-NEXT: vpblendd {{.*#+}} ymm0 = mem[0,1,2,3,4,5,6],ymm0[7]
|
||||
; X64AVX2-NEXT: retq
|
||||
;
|
||||
; X32AVX512F-LABEL: elt7_v8i32:
|
||||
; X32AVX512F: # %bb.0:
|
||||
; X32AVX512F-NEXT: vbroadcastss {{[0-9]+}}(%esp), %ymm0
|
||||
; X32AVX512F-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1,2,3,4,5,6],ymm0[7]
|
||||
; X32AVX512F-NEXT: retl
|
||||
;
|
||||
; X64AVX512F-LABEL: elt7_v8i32:
|
||||
; X64AVX512F: # %bb.0:
|
||||
; X64AVX512F-NEXT: vmovd %edi, %xmm0
|
||||
|
@ -4145,8 +4145,7 @@ define <16 x i16> @shuffle_v16i16_u_u_u_u_u_u_u_u_3_3_3_3_3_3_3_3(<16 x i16> %a,
|
||||
; AVX2-SLOW-LABEL: shuffle_v16i16_u_u_u_u_u_u_u_u_3_3_3_3_3_3_3_3:
|
||||
; AVX2-SLOW: # %bb.0:
|
||||
; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,3,2,3,4,5,6,7]
|
||||
; AVX2-SLOW-NEXT: vpbroadcastd %xmm0, %xmm0
|
||||
; AVX2-SLOW-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
|
||||
; AVX2-SLOW-NEXT: vpbroadcastd %xmm0, %ymm0
|
||||
; AVX2-SLOW-NEXT: retq
|
||||
;
|
||||
; AVX2-FAST-LABEL: shuffle_v16i16_u_u_u_u_u_u_u_u_3_3_3_3_3_3_3_3:
|
||||
@ -4158,8 +4157,7 @@ define <16 x i16> @shuffle_v16i16_u_u_u_u_u_u_u_u_3_3_3_3_3_3_3_3(<16 x i16> %a,
|
||||
; AVX512VL-SLOW-LABEL: shuffle_v16i16_u_u_u_u_u_u_u_u_3_3_3_3_3_3_3_3:
|
||||
; AVX512VL-SLOW: # %bb.0:
|
||||
; AVX512VL-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,3,2,3,4,5,6,7]
|
||||
; AVX512VL-SLOW-NEXT: vpbroadcastd %xmm0, %xmm0
|
||||
; AVX512VL-SLOW-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
|
||||
; AVX512VL-SLOW-NEXT: vpbroadcastd %xmm0, %ymm0
|
||||
; AVX512VL-SLOW-NEXT: retq
|
||||
;
|
||||
; AVX512VL-FAST-LABEL: shuffle_v16i16_u_u_u_u_u_u_u_u_3_3_3_3_3_3_3_3:
|
||||
|
@ -264,21 +264,21 @@ define void @interleaved_store_vf32_i8_stride4(<32 x i8> %x1, <32 x i8> %x2, <32
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm6, %ymm3
|
||||
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm7[4],xmm1[4],xmm7[5],xmm1[5],xmm7[6],xmm1[6],xmm7[7],xmm1[7]
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
|
||||
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
|
||||
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3]
|
||||
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm8[0],xmm2[0],xmm8[1],xmm2[1],xmm8[2],xmm2[2],xmm8[3],xmm2[3]
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm6, %ymm5
|
||||
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm3[2,3],ymm1[2,3]
|
||||
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
|
||||
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3]
|
||||
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm8[0],xmm2[0],xmm8[1],xmm2[1],xmm8[2],xmm2[2],xmm8[3],xmm2[3]
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm5, %ymm4
|
||||
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm9[4],xmm0[4],xmm9[5],xmm0[5],xmm9[6],xmm0[6],xmm9[7],xmm0[7]
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
|
||||
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm4[2,3],ymm0[2,3]
|
||||
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm8[4],xmm2[4],xmm8[5],xmm2[5],xmm8[6],xmm2[6],xmm8[7],xmm2[7]
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm4
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm6, %ymm3
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm5, %ymm2
|
||||
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm3[2,3],ymm1[2,3]
|
||||
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm5[2,3],ymm0[2,3]
|
||||
; AVX1-NEXT: vmovaps %ymm2, 32(%rdi)
|
||||
; AVX1-NEXT: vmovaps %ymm3, (%rdi)
|
||||
; AVX1-NEXT: vmovaps %ymm0, 96(%rdi)
|
||||
; AVX1-NEXT: vmovaps %ymm1, 64(%rdi)
|
||||
; AVX1-NEXT: vmovaps %ymm2, 32(%rdi)
|
||||
; AVX1-NEXT: vmovaps %ymm4, (%rdi)
|
||||
; AVX1-NEXT: vzeroupper
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
@ -1563,81 +1563,71 @@ define void @interleaved_store_vf64_i8_stride4(<64 x i8> %a, <64 x i8> %b, <64 x
|
||||
; AVX1-LABEL: interleaved_store_vf64_i8_stride4:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
|
||||
; AVX1-NEXT: vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm11
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm12
|
||||
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3],xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
|
||||
; AVX1-NEXT: vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
||||
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
|
||||
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm9 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3],xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
|
||||
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm13
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm14
|
||||
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm15 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3],xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7]
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
|
||||
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm12[8],xmm11[8],xmm12[9],xmm11[9],xmm12[10],xmm11[10],xmm12[11],xmm11[11],xmm12[12],xmm11[12],xmm12[13],xmm11[13],xmm12[14],xmm11[14],xmm12[15],xmm11[15]
|
||||
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm1[8],xmm3[8],xmm1[9],xmm3[9],xmm1[10],xmm3[10],xmm1[11],xmm3[11],xmm1[12],xmm3[12],xmm1[13],xmm3[13],xmm1[14],xmm3[14],xmm1[15],xmm3[15]
|
||||
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm11 = xmm12[8],xmm11[8],xmm12[9],xmm11[9],xmm12[10],xmm11[10],xmm12[11],xmm11[11],xmm12[12],xmm11[12],xmm12[13],xmm11[13],xmm12[14],xmm11[14],xmm12[15],xmm11[15]
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm12 = xmm1[8],xmm3[8],xmm1[9],xmm3[9],xmm1[10],xmm3[10],xmm1[11],xmm3[11],xmm1[12],xmm3[12],xmm1[13],xmm3[13],xmm1[14],xmm3[14],xmm1[15],xmm3[15]
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm13 = xmm14[8],xmm13[8],xmm14[9],xmm13[9],xmm14[10],xmm13[10],xmm14[11],xmm13[11],xmm14[12],xmm13[12],xmm14[13],xmm13[13],xmm14[14],xmm13[14],xmm14[15],xmm13[15]
|
||||
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm14 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3],xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7]
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm1
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
|
||||
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm9 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm12 = xmm4[8],xmm6[8],xmm4[9],xmm6[9],xmm4[10],xmm6[10],xmm4[11],xmm6[11],xmm4[12],xmm6[12],xmm4[13],xmm6[13],xmm4[14],xmm6[14],xmm4[15],xmm6[15]
|
||||
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm4[8],xmm6[8],xmm4[9],xmm6[9],xmm4[10],xmm6[10],xmm4[11],xmm6[11],xmm4[12],xmm6[12],xmm4[13],xmm6[13],xmm4[14],xmm6[14],xmm4[15],xmm6[15]
|
||||
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm5[0],xmm7[0],xmm5[1],xmm7[1],xmm5[2],xmm7[2],xmm5[3],xmm7[3],xmm5[4],xmm7[4],xmm5[5],xmm7[5],xmm5[6],xmm7[6],xmm5[7],xmm7[7]
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm10 = xmm3[8],xmm1[8],xmm3[9],xmm1[9],xmm3[10],xmm1[10],xmm3[11],xmm1[11],xmm3[12],xmm1[12],xmm3[13],xmm1[13],xmm3[14],xmm1[14],xmm3[15],xmm1[15]
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm3[8],xmm1[8],xmm3[9],xmm1[9],xmm3[10],xmm1[10],xmm3[11],xmm1[11],xmm3[12],xmm1[12],xmm3[13],xmm1[13],xmm3[14],xmm1[14],xmm3[15],xmm1[15]
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm3
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm5[8],xmm7[8],xmm5[9],xmm7[9],xmm5[10],xmm7[10],xmm5[11],xmm7[11],xmm5[12],xmm7[12],xmm5[13],xmm7[13],xmm5[14],xmm7[14],xmm5[15],xmm7[15]
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm5
|
||||
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3],xmm5[4],xmm3[4],xmm5[5],xmm3[5],xmm5[6],xmm3[6],xmm5[7],xmm3[7]
|
||||
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3],xmm5[4],xmm3[4],xmm5[5],xmm3[5],xmm5[6],xmm3[6],xmm5[7],xmm3[7]
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm5[8],xmm3[8],xmm5[9],xmm3[9],xmm5[10],xmm3[10],xmm5[11],xmm3[11],xmm5[12],xmm3[12],xmm5[13],xmm3[13],xmm5[14],xmm3[14],xmm5[15],xmm3[15]
|
||||
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm15[0],xmm4[0],xmm15[1],xmm4[1],xmm15[2],xmm4[2],xmm15[3],xmm4[3]
|
||||
; AVX1-NEXT: vmovdqa %xmm8, %xmm2
|
||||
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm11 = xmm8[0],xmm6[0],xmm8[1],xmm6[1],xmm8[2],xmm6[2],xmm8[3],xmm6[3]
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm11, %ymm11
|
||||
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
||||
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm0[0],xmm9[0],xmm0[1],xmm9[1],xmm0[2],xmm9[2],xmm0[3],xmm9[3]
|
||||
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
||||
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm8 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3]
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm8, %ymm5
|
||||
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm15[4],xmm4[4],xmm15[5],xmm4[5],xmm15[6],xmm4[6],xmm15[7],xmm4[7]
|
||||
; AVX1-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
||||
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm8 = xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
|
||||
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm15 = xmm0[4],xmm9[4],xmm0[5],xmm9[5],xmm0[6],xmm9[6],xmm0[7],xmm9[7]
|
||||
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm14 = xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7]
|
||||
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm13[0],xmm3[0],xmm13[1],xmm3[1],xmm13[2],xmm3[2],xmm13[3],xmm3[3]
|
||||
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
|
||||
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm9 = xmm4[0],xmm7[0],xmm4[1],xmm7[1],xmm4[2],xmm7[2],xmm4[3],xmm7[3]
|
||||
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3]
|
||||
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
|
||||
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm15 = xmm10[0],xmm6[0],xmm10[1],xmm6[1],xmm10[2],xmm6[2],xmm10[3],xmm6[3]
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm15, %ymm5
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
|
||||
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm5[2,3],ymm0[2,3]
|
||||
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm10[4],xmm6[4],xmm10[5],xmm6[5],xmm10[6],xmm6[6],xmm10[7],xmm6[7]
|
||||
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm9[0],xmm2[0],xmm9[1],xmm2[1],xmm9[2],xmm2[2],xmm9[3],xmm2[3]
|
||||
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm9[4],xmm2[4],xmm9[5],xmm2[5],xmm9[6],xmm2[6],xmm9[7],xmm2[7]
|
||||
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm9 = xmm8[0],xmm14[0],xmm8[1],xmm14[1],xmm8[2],xmm14[2],xmm8[3],xmm14[3]
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm9, %ymm6
|
||||
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
||||
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3]
|
||||
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
||||
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm9 = xmm1[0],xmm12[0],xmm1[1],xmm12[1],xmm1[2],xmm12[2],xmm1[3],xmm12[3]
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm9, %ymm2
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
|
||||
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm10 = ymm6[2,3],ymm2[2,3]
|
||||
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm14 = xmm8[4],xmm14[4],xmm8[5],xmm14[5],xmm8[6],xmm14[6],xmm8[7],xmm14[7]
|
||||
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm13[0],xmm3[0],xmm13[1],xmm3[1],xmm13[2],xmm3[2],xmm13[3],xmm3[3]
|
||||
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm13[4],xmm3[4],xmm13[5],xmm3[5],xmm13[6],xmm3[6],xmm13[7],xmm3[7]
|
||||
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm7 = xmm4[4],xmm7[4],xmm4[5],xmm7[5],xmm4[6],xmm7[6],xmm4[7],xmm7[7]
|
||||
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7]
|
||||
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm1[4],xmm12[4],xmm1[5],xmm12[5],xmm1[6],xmm12[6],xmm1[7],xmm12[7]
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm15, %ymm0, %ymm9
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm14, %ymm5, %ymm10
|
||||
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm5[2,3],ymm9[2,3]
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm2, %ymm4
|
||||
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[2,3]
|
||||
; AVX1-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 16-byte Folded Reload
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm8, %ymm11, %ymm8
|
||||
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm11[2,3],ymm2[2,3]
|
||||
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm8 = xmm12[0],xmm7[0],xmm12[1],xmm7[1],xmm12[2],xmm7[2],xmm12[3],xmm7[3]
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm8, %ymm2
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm6, %ymm7
|
||||
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm6[2,3],ymm3[2,3]
|
||||
; AVX1-NEXT: vmovaps %ymm3, 224(%rdi)
|
||||
; AVX1-NEXT: vmovaps %ymm2, 192(%rdi)
|
||||
; AVX1-NEXT: vmovaps %ymm7, 160(%rdi)
|
||||
; AVX1-NEXT: vmovaps %ymm8, 128(%rdi)
|
||||
; AVX1-NEXT: vmovaps %ymm1, 96(%rdi)
|
||||
; AVX1-NEXT: vmovaps %ymm5, 64(%rdi)
|
||||
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm2[2,3],ymm3[2,3]
|
||||
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm12[4],xmm7[4],xmm12[5],xmm7[5],xmm12[6],xmm7[6],xmm12[7],xmm7[7]
|
||||
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm11[0],xmm1[0],xmm11[1],xmm1[1],xmm11[2],xmm1[2],xmm11[3],xmm1[3]
|
||||
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm11[4],xmm1[4],xmm11[5],xmm1[5],xmm11[6],xmm1[6],xmm11[7],xmm1[7]
|
||||
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
|
||||
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm11 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3]
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm11, %ymm7
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
|
||||
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm7[2,3],ymm1[2,3]
|
||||
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm14, %ymm9, %ymm6
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm11, %ymm4
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm15, %ymm5
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm8, %ymm3
|
||||
; AVX1-NEXT: vmovaps %ymm3, 160(%rdi)
|
||||
; AVX1-NEXT: vmovaps %ymm5, 128(%rdi)
|
||||
; AVX1-NEXT: vmovaps %ymm4, 32(%rdi)
|
||||
; AVX1-NEXT: vmovaps %ymm10, (%rdi)
|
||||
; AVX1-NEXT: vmovaps %ymm6, (%rdi)
|
||||
; AVX1-NEXT: vmovaps %ymm2, 224(%rdi)
|
||||
; AVX1-NEXT: vmovaps %ymm0, 192(%rdi)
|
||||
; AVX1-NEXT: vmovaps %ymm1, 96(%rdi)
|
||||
; AVX1-NEXT: vmovaps %ymm10, 64(%rdi)
|
||||
; AVX1-NEXT: vzeroupper
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
|
Loading…
Reference in New Issue
Block a user