mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-01-31 12:41:49 +01:00
[X86][SSE] Start shuffle combining from ANY_EXTEND_VECTOR_INREG on SSE targets
We already do this on AVX (+ for ZERO_EXTEND_VECTOR_INREG), but this enables it for all SSE targets - we attempted something similar back at rL357057 but hit issues with the ZERO_EXTEND_VECTOR_INREG handling (PR41249). I'm still looking at the vector-mul.ll regression - which is due to 32-bit targets performing the load as a f64, resulting in the shuffle combiner thinking it has to create a shuffle in the float domain.
This commit is contained in:
parent
e7682e1add
commit
38a46e3c56
@ -48671,6 +48671,7 @@ static SDValue combineExtInVec(SDNode *N, SelectionDAG &DAG,
|
|||||||
const X86Subtarget &Subtarget) {
|
const X86Subtarget &Subtarget) {
|
||||||
EVT VT = N->getValueType(0);
|
EVT VT = N->getValueType(0);
|
||||||
SDValue In = N->getOperand(0);
|
SDValue In = N->getOperand(0);
|
||||||
|
unsigned Opcode = N->getOpcode();
|
||||||
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
|
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
|
||||||
|
|
||||||
// Try to merge vector loads and extend_inreg to an extload.
|
// Try to merge vector loads and extend_inreg to an extload.
|
||||||
@ -48679,7 +48680,7 @@ static SDValue combineExtInVec(SDNode *N, SelectionDAG &DAG,
|
|||||||
auto *Ld = cast<LoadSDNode>(In);
|
auto *Ld = cast<LoadSDNode>(In);
|
||||||
if (Ld->isSimple()) {
|
if (Ld->isSimple()) {
|
||||||
MVT SVT = In.getSimpleValueType().getVectorElementType();
|
MVT SVT = In.getSimpleValueType().getVectorElementType();
|
||||||
ISD::LoadExtType Ext = N->getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG
|
ISD::LoadExtType Ext = Opcode == ISD::SIGN_EXTEND_VECTOR_INREG
|
||||||
? ISD::SEXTLOAD
|
? ISD::SEXTLOAD
|
||||||
: ISD::ZEXTLOAD;
|
: ISD::ZEXTLOAD;
|
||||||
EVT MemVT =
|
EVT MemVT =
|
||||||
@ -48687,8 +48688,7 @@ static SDValue combineExtInVec(SDNode *N, SelectionDAG &DAG,
|
|||||||
if (TLI.isLoadExtLegal(Ext, VT, MemVT)) {
|
if (TLI.isLoadExtLegal(Ext, VT, MemVT)) {
|
||||||
SDValue Load =
|
SDValue Load =
|
||||||
DAG.getExtLoad(Ext, SDLoc(N), VT, Ld->getChain(), Ld->getBasePtr(),
|
DAG.getExtLoad(Ext, SDLoc(N), VT, Ld->getChain(), Ld->getBasePtr(),
|
||||||
Ld->getPointerInfo(), MemVT,
|
Ld->getPointerInfo(), MemVT, Ld->getOriginalAlign(),
|
||||||
Ld->getOriginalAlign(),
|
|
||||||
Ld->getMemOperand()->getFlags());
|
Ld->getMemOperand()->getFlags());
|
||||||
DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
|
DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
|
||||||
return Load;
|
return Load;
|
||||||
@ -48697,8 +48697,9 @@ static SDValue combineExtInVec(SDNode *N, SelectionDAG &DAG,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Attempt to combine as a shuffle.
|
// Attempt to combine as a shuffle.
|
||||||
// TODO: SSE41 support
|
// TODO: SSE ZERO_EXTEND_VECTOR_INREG support.
|
||||||
if (Subtarget.hasAVX() && N->getOpcode() != ISD::SIGN_EXTEND_VECTOR_INREG) {
|
if (Opcode == ISD::ANY_EXTEND_VECTOR_INREG ||
|
||||||
|
(Opcode == ISD::ZERO_EXTEND_VECTOR_INREG && Subtarget.hasAVX())) {
|
||||||
SDValue Op(N, 0);
|
SDValue Op(N, 0);
|
||||||
if (TLI.isTypeLegal(VT) && TLI.isTypeLegal(In.getValueType()))
|
if (TLI.isTypeLegal(VT) && TLI.isTypeLegal(In.getValueType()))
|
||||||
if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
|
if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
|
||||||
|
@ -91,9 +91,9 @@ define <4 x i64> @combine_shuffle_zero_pmuludq_256(<8 x i32> %a0, <8 x i32> %a1)
|
|||||||
define <8 x i64> @combine_zext_pmuludq_256(<8 x i32> %a) {
|
define <8 x i64> @combine_zext_pmuludq_256(<8 x i32> %a) {
|
||||||
; SSE-LABEL: combine_zext_pmuludq_256:
|
; SSE-LABEL: combine_zext_pmuludq_256:
|
||||||
; SSE: # %bb.0:
|
; SSE: # %bb.0:
|
||||||
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,2,3,3]
|
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,1,3,3]
|
||||||
; SSE-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
|
; SSE-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
|
||||||
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,2,3,3]
|
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,1,3,3]
|
||||||
; SSE-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
|
; SSE-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
|
||||||
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [715827883,715827883]
|
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [715827883,715827883]
|
||||||
; SSE-NEXT: pmuludq %xmm4, %xmm0
|
; SSE-NEXT: pmuludq %xmm4, %xmm0
|
||||||
|
@ -137,8 +137,8 @@ define <4 x i64> @_mul4xi32toi64a(<4 x i32>, <4 x i32>) {
|
|||||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,1,1,3]
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,1,1,3]
|
||||||
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,1,3]
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,1,3]
|
||||||
; SSE2-NEXT: pmuludq %xmm3, %xmm2
|
; SSE2-NEXT: pmuludq %xmm3, %xmm2
|
||||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,1,3,3]
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,3,3]
|
||||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,1,3,3]
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,3,3]
|
||||||
; SSE2-NEXT: pmuludq %xmm3, %xmm1
|
; SSE2-NEXT: pmuludq %xmm3, %xmm1
|
||||||
; SSE2-NEXT: movdqa %xmm2, %xmm0
|
; SSE2-NEXT: movdqa %xmm2, %xmm0
|
||||||
; SSE2-NEXT: retq
|
; SSE2-NEXT: retq
|
||||||
@ -148,8 +148,8 @@ define <4 x i64> @_mul4xi32toi64a(<4 x i32>, <4 x i32>) {
|
|||||||
; SSE42-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero
|
; SSE42-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero
|
||||||
; SSE42-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
|
; SSE42-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
|
||||||
; SSE42-NEXT: pmuludq %xmm3, %xmm2
|
; SSE42-NEXT: pmuludq %xmm3, %xmm2
|
||||||
; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,2,3,3]
|
; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,1,3,3]
|
||||||
; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,2,3,3]
|
; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,1,3,3]
|
||||||
; SSE42-NEXT: pmuludq %xmm3, %xmm1
|
; SSE42-NEXT: pmuludq %xmm3, %xmm1
|
||||||
; SSE42-NEXT: movdqa %xmm2, %xmm0
|
; SSE42-NEXT: movdqa %xmm2, %xmm0
|
||||||
; SSE42-NEXT: retq
|
; SSE42-NEXT: retq
|
||||||
|
@ -1033,10 +1033,10 @@ define <4 x i32> @mul_v4i64_zero_upper(<4 x i32> %val1, <4 x i32> %val2) {
|
|||||||
; SSE41-LABEL: mul_v4i64_zero_upper:
|
; SSE41-LABEL: mul_v4i64_zero_upper:
|
||||||
; SSE41: # %bb.0: # %entry
|
; SSE41: # %bb.0: # %entry
|
||||||
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
|
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
|
||||||
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,2,3,3]
|
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,1,3,3]
|
||||||
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero
|
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero
|
||||||
; SSE41-NEXT: pmuludq %xmm2, %xmm0
|
; SSE41-NEXT: pmuludq %xmm2, %xmm0
|
||||||
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,3,3]
|
||||||
; SSE41-NEXT: pmuludq %xmm3, %xmm1
|
; SSE41-NEXT: pmuludq %xmm3, %xmm1
|
||||||
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
|
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
|
||||||
; SSE41-NEXT: retq
|
; SSE41-NEXT: retq
|
||||||
@ -1186,17 +1186,17 @@ define <8 x i32> @mul_v8i64_zero_upper(<8 x i32> %val1, <8 x i32> %val2) {
|
|||||||
; SSE41-LABEL: mul_v8i64_zero_upper:
|
; SSE41-LABEL: mul_v8i64_zero_upper:
|
||||||
; SSE41: # %bb.0: # %entry
|
; SSE41: # %bb.0: # %entry
|
||||||
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero
|
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero
|
||||||
; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[2,2,3,3]
|
; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[2,1,3,3]
|
||||||
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm6 = xmm1[0],zero,xmm1[1],zero
|
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm6 = xmm1[0],zero,xmm1[1],zero
|
||||||
; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm1[2,2,3,3]
|
; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm1[2,1,3,3]
|
||||||
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm2[0],zero,xmm2[1],zero
|
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm2[0],zero,xmm2[1],zero
|
||||||
; SSE41-NEXT: pmuludq %xmm4, %xmm0
|
; SSE41-NEXT: pmuludq %xmm4, %xmm0
|
||||||
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,2,3,3]
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,1,3,3]
|
||||||
; SSE41-NEXT: pmuludq %xmm5, %xmm1
|
; SSE41-NEXT: pmuludq %xmm5, %xmm1
|
||||||
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
|
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
|
||||||
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm3[0],zero,xmm3[1],zero
|
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm3[0],zero,xmm3[1],zero
|
||||||
; SSE41-NEXT: pmuludq %xmm6, %xmm1
|
; SSE41-NEXT: pmuludq %xmm6, %xmm1
|
||||||
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,2,3,3]
|
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,1,3,3]
|
||||||
; SSE41-NEXT: pmuludq %xmm7, %xmm2
|
; SSE41-NEXT: pmuludq %xmm7, %xmm2
|
||||||
; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm2[1,3]
|
; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm2[1,3]
|
||||||
; SSE41-NEXT: retq
|
; SSE41-NEXT: retq
|
||||||
@ -1311,11 +1311,11 @@ define <8 x i64> @mul_v8i64_sext(<8 x i16> %val1, <8 x i32> %val2) {
|
|||||||
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,1,1]
|
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,1,1]
|
||||||
; SSE41-NEXT: pmovsxwq %xmm3, %xmm6
|
; SSE41-NEXT: pmovsxwq %xmm3, %xmm6
|
||||||
; SSE41-NEXT: pmovsxwq %xmm0, %xmm7
|
; SSE41-NEXT: pmovsxwq %xmm0, %xmm7
|
||||||
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,2,3,3]
|
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,1,3,3]
|
||||||
; SSE41-NEXT: pmuldq %xmm4, %xmm3
|
; SSE41-NEXT: pmuldq %xmm4, %xmm3
|
||||||
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
|
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
|
||||||
; SSE41-NEXT: pmuldq %xmm5, %xmm2
|
; SSE41-NEXT: pmuldq %xmm5, %xmm2
|
||||||
; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,2,3,3]
|
; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,1,3,3]
|
||||||
; SSE41-NEXT: pmuldq %xmm6, %xmm4
|
; SSE41-NEXT: pmuldq %xmm6, %xmm4
|
||||||
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero
|
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero
|
||||||
; SSE41-NEXT: pmuldq %xmm7, %xmm0
|
; SSE41-NEXT: pmuldq %xmm7, %xmm0
|
||||||
|
@ -30,20 +30,19 @@ define <4 x i64> @PR45808(<4 x i64> %0, <4 x i64> %1) {
|
|||||||
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm7[1,1,3,3]
|
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm7[1,1,3,3]
|
||||||
; SSE2-NEXT: por %xmm4, %xmm5
|
; SSE2-NEXT: por %xmm4, %xmm5
|
||||||
; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm6[0,2]
|
; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm6[0,2]
|
||||||
; SSE2-NEXT: pcmpeqd %xmm4, %xmm4
|
; SSE2-NEXT: movaps {{.*#+}} xmm4 = <1,1,u,0>
|
||||||
; SSE2-NEXT: movaps %xmm5, %xmm6
|
; SSE2-NEXT: xorps %xmm5, %xmm4
|
||||||
; SSE2-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,1],xmm5[3,3]
|
; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,1,3,3]
|
||||||
; SSE2-NEXT: psllq $63, %xmm6
|
|
||||||
; SSE2-NEXT: psrad $31, %xmm6
|
|
||||||
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
|
|
||||||
; SSE2-NEXT: pand %xmm6, %xmm1
|
|
||||||
; SSE2-NEXT: pandn %xmm3, %xmm6
|
|
||||||
; SSE2-NEXT: por %xmm6, %xmm1
|
|
||||||
; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1,1,3]
|
|
||||||
; SSE2-NEXT: xorps %xmm4, %xmm5
|
|
||||||
; SSE2-NEXT: psllq $63, %xmm5
|
; SSE2-NEXT: psllq $63, %xmm5
|
||||||
; SSE2-NEXT: psrad $31, %xmm5
|
; SSE2-NEXT: psrad $31, %xmm5
|
||||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm5[1,1,3,3]
|
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
|
||||||
|
; SSE2-NEXT: pand %xmm5, %xmm1
|
||||||
|
; SSE2-NEXT: pandn %xmm3, %xmm5
|
||||||
|
; SSE2-NEXT: por %xmm5, %xmm1
|
||||||
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[0,1,1,3]
|
||||||
|
; SSE2-NEXT: psllq $63, %xmm3
|
||||||
|
; SSE2-NEXT: psrad $31, %xmm3
|
||||||
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
|
||||||
; SSE2-NEXT: pand %xmm3, %xmm0
|
; SSE2-NEXT: pand %xmm3, %xmm0
|
||||||
; SSE2-NEXT: pandn %xmm2, %xmm3
|
; SSE2-NEXT: pandn %xmm2, %xmm3
|
||||||
; SSE2-NEXT: por %xmm3, %xmm0
|
; SSE2-NEXT: por %xmm3, %xmm0
|
||||||
|
@ -835,17 +835,15 @@ define <16 x i8> @test_remconstant_16i8(<16 x i8> %a) nounwind {
|
|||||||
; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
|
||||||
; SSE41-NEXT: pmullw {{.*}}(%rip), %xmm2
|
; SSE41-NEXT: pmullw {{.*}}(%rip), %xmm2
|
||||||
; SSE41-NEXT: psrlw $8, %xmm2
|
; SSE41-NEXT: psrlw $8, %xmm2
|
||||||
|
; SSE41-NEXT: pmullw {{.*}}(%rip), %xmm2
|
||||||
|
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
|
||||||
|
; SSE41-NEXT: pand %xmm1, %xmm2
|
||||||
; SSE41-NEXT: pmullw {{.*}}(%rip), %xmm3
|
; SSE41-NEXT: pmullw {{.*}}(%rip), %xmm3
|
||||||
; SSE41-NEXT: psrlw $8, %xmm3
|
; SSE41-NEXT: psrlw $8, %xmm3
|
||||||
|
; SSE41-NEXT: pmullw {{.*}}(%rip), %xmm3
|
||||||
|
; SSE41-NEXT: pand %xmm1, %xmm3
|
||||||
; SSE41-NEXT: packuswb %xmm2, %xmm3
|
; SSE41-NEXT: packuswb %xmm2, %xmm3
|
||||||
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
|
; SSE41-NEXT: psubb %xmm3, %xmm0
|
||||||
; SSE41-NEXT: pmullw {{.*}}(%rip), %xmm1
|
|
||||||
; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
|
|
||||||
; SSE41-NEXT: pand %xmm3, %xmm1
|
|
||||||
; SSE41-NEXT: pmullw {{.*}}(%rip), %xmm2
|
|
||||||
; SSE41-NEXT: pand %xmm3, %xmm2
|
|
||||||
; SSE41-NEXT: packuswb %xmm2, %xmm1
|
|
||||||
; SSE41-NEXT: psubb %xmm1, %xmm0
|
|
||||||
; SSE41-NEXT: retq
|
; SSE41-NEXT: retq
|
||||||
;
|
;
|
||||||
; AVX1-LABEL: test_remconstant_16i8:
|
; AVX1-LABEL: test_remconstant_16i8:
|
||||||
|
@ -1904,7 +1904,8 @@ define <2 x i64> @mul_v2i64_zext_cross_bb(<2 x i32>* %in, <2 x i32>* %y) {
|
|||||||
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||||
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
||||||
; X86-NEXT: pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
|
; X86-NEXT: pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
|
||||||
; X86-NEXT: pmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
|
; X86-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
|
||||||
|
; X86-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0,0,1,1]
|
||||||
; X86-NEXT: pmuludq %xmm1, %xmm0
|
; X86-NEXT: pmuludq %xmm1, %xmm0
|
||||||
; X86-NEXT: retl
|
; X86-NEXT: retl
|
||||||
;
|
;
|
||||||
|
@ -1567,14 +1567,14 @@ define i8 @test_v4i8(<4 x i8> %a0) {
|
|||||||
; SSE2-LABEL: test_v4i8:
|
; SSE2-LABEL: test_v4i8:
|
||||||
; SSE2: # %bb.0:
|
; SSE2: # %bb.0:
|
||||||
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
||||||
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
||||||
; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
|
|
||||||
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
||||||
; SSE2-NEXT: pmullw %xmm1, %xmm0
|
|
||||||
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
|
||||||
; SSE2-NEXT: psrld $16, %xmm1
|
; SSE2-NEXT: psrld $16, %xmm1
|
||||||
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||||
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||||
; SSE2-NEXT: pmullw %xmm0, %xmm1
|
; SSE2-NEXT: pmullw %xmm0, %xmm1
|
||||||
; SSE2-NEXT: movd %xmm1, %eax
|
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
||||||
|
; SSE2-NEXT: psrld $16, %xmm0
|
||||||
|
; SSE2-NEXT: pmullw %xmm1, %xmm0
|
||||||
|
; SSE2-NEXT: movd %xmm0, %eax
|
||||||
; SSE2-NEXT: # kill: def $al killed $al killed $eax
|
; SSE2-NEXT: # kill: def $al killed $al killed $eax
|
||||||
; SSE2-NEXT: retq
|
; SSE2-NEXT: retq
|
||||||
;
|
;
|
||||||
@ -1618,16 +1618,18 @@ define i8 @test_v4i8(<4 x i8> %a0) {
|
|||||||
define i8 @test_v8i8(<8 x i8> %a0) {
|
define i8 @test_v8i8(<8 x i8> %a0) {
|
||||||
; SSE2-LABEL: test_v8i8:
|
; SSE2-LABEL: test_v8i8:
|
||||||
; SSE2: # %bb.0:
|
; SSE2: # %bb.0:
|
||||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,2,3,3]
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
||||||
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||||
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||||
; SSE2-NEXT: pmullw %xmm0, %xmm1
|
; SSE2-NEXT: pmullw %xmm0, %xmm1
|
||||||
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,2,3,0]
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
|
||||||
; SSE2-NEXT: pmullw %xmm1, %xmm0
|
; SSE2-NEXT: pmullw %xmm1, %xmm0
|
||||||
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
|
||||||
; SSE2-NEXT: psrld $16, %xmm1
|
; SSE2-NEXT: pand %xmm0, %xmm1
|
||||||
; SSE2-NEXT: pmullw %xmm0, %xmm1
|
; SSE2-NEXT: packuswb %xmm1, %xmm1
|
||||||
; SSE2-NEXT: movd %xmm1, %eax
|
; SSE2-NEXT: psrld $16, %xmm0
|
||||||
|
; SSE2-NEXT: pmullw %xmm1, %xmm0
|
||||||
|
; SSE2-NEXT: movd %xmm0, %eax
|
||||||
; SSE2-NEXT: # kill: def $al killed $al killed $eax
|
; SSE2-NEXT: # kill: def $al killed $al killed $eax
|
||||||
; SSE2-NEXT: retq
|
; SSE2-NEXT: retq
|
||||||
;
|
;
|
||||||
@ -1637,13 +1639,11 @@ define i8 @test_v8i8(<8 x i8> %a0) {
|
|||||||
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
||||||
; SSE41-NEXT: pmullw %xmm0, %xmm1
|
; SSE41-NEXT: pmullw %xmm0, %xmm1
|
||||||
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
|
||||||
; SSE41-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
|
|
||||||
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
||||||
; SSE41-NEXT: pmullw %xmm0, %xmm1
|
|
||||||
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
|
||||||
; SSE41-NEXT: psrld $16, %xmm0
|
|
||||||
; SSE41-NEXT: pmullw %xmm1, %xmm0
|
; SSE41-NEXT: pmullw %xmm1, %xmm0
|
||||||
; SSE41-NEXT: movd %xmm0, %eax
|
; SSE41-NEXT: movdqa %xmm0, %xmm1
|
||||||
|
; SSE41-NEXT: psrld $16, %xmm1
|
||||||
|
; SSE41-NEXT: pmullw %xmm0, %xmm1
|
||||||
|
; SSE41-NEXT: movd %xmm1, %eax
|
||||||
; SSE41-NEXT: # kill: def $al killed $al killed $eax
|
; SSE41-NEXT: # kill: def $al killed $al killed $eax
|
||||||
; SSE41-NEXT: retq
|
; SSE41-NEXT: retq
|
||||||
;
|
;
|
||||||
@ -1710,23 +1710,15 @@ define i8 @test_v16i8(<16 x i8> %a0) {
|
|||||||
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
||||||
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||||
; SSE41-NEXT: pmullw %xmm1, %xmm0
|
; SSE41-NEXT: pmullw %xmm1, %xmm0
|
||||||
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
||||||
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
|
|
||||||
; SSE41-NEXT: pand %xmm1, %xmm0
|
|
||||||
; SSE41-NEXT: pxor %xmm3, %xmm3
|
|
||||||
; SSE41-NEXT: packuswb %xmm3, %xmm0
|
|
||||||
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
||||||
; SSE41-NEXT: pmullw %xmm2, %xmm0
|
|
||||||
; SSE41-NEXT: pand %xmm0, %xmm1
|
|
||||||
; SSE41-NEXT: packuswb %xmm3, %xmm1
|
|
||||||
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
|
|
||||||
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
||||||
; SSE41-NEXT: pmullw %xmm0, %xmm1
|
; SSE41-NEXT: pmullw %xmm0, %xmm1
|
||||||
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
|
||||||
; SSE41-NEXT: psrld $8, %xmm0
|
|
||||||
; SSE41-NEXT: psrlw $8, %xmm0
|
|
||||||
; SSE41-NEXT: pmullw %xmm1, %xmm0
|
; SSE41-NEXT: pmullw %xmm1, %xmm0
|
||||||
; SSE41-NEXT: movd %xmm0, %eax
|
; SSE41-NEXT: movdqa %xmm0, %xmm1
|
||||||
|
; SSE41-NEXT: psrld $8, %xmm1
|
||||||
|
; SSE41-NEXT: psrlw $8, %xmm1
|
||||||
|
; SSE41-NEXT: pmullw %xmm0, %xmm1
|
||||||
|
; SSE41-NEXT: movd %xmm1, %eax
|
||||||
; SSE41-NEXT: # kill: def $al killed $al killed $eax
|
; SSE41-NEXT: # kill: def $al killed $al killed $eax
|
||||||
; SSE41-NEXT: retq
|
; SSE41-NEXT: retq
|
||||||
;
|
;
|
||||||
@ -1900,30 +1892,17 @@ define i8 @test_v32i8(<32 x i8> %a0) {
|
|||||||
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
||||||
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||||
; SSE41-NEXT: pmullw %xmm1, %xmm0
|
; SSE41-NEXT: pmullw %xmm1, %xmm0
|
||||||
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
|
; SSE41-NEXT: pmullw %xmm2, %xmm0
|
||||||
; SSE41-NEXT: movdqa %xmm0, %xmm4
|
; SSE41-NEXT: pmullw %xmm3, %xmm0
|
||||||
; SSE41-NEXT: pand %xmm1, %xmm4
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
||||||
; SSE41-NEXT: pmullw %xmm2, %xmm3
|
|
||||||
; SSE41-NEXT: pand %xmm1, %xmm3
|
|
||||||
; SSE41-NEXT: packuswb %xmm4, %xmm3
|
|
||||||
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
|
|
||||||
; SSE41-NEXT: pmullw %xmm0, %xmm2
|
|
||||||
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
|
|
||||||
; SSE41-NEXT: pand %xmm1, %xmm2
|
|
||||||
; SSE41-NEXT: pxor %xmm3, %xmm3
|
|
||||||
; SSE41-NEXT: packuswb %xmm3, %xmm2
|
|
||||||
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
|
|
||||||
; SSE41-NEXT: pmullw %xmm0, %xmm2
|
|
||||||
; SSE41-NEXT: pand %xmm2, %xmm1
|
|
||||||
; SSE41-NEXT: packuswb %xmm3, %xmm1
|
|
||||||
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,1,1]
|
|
||||||
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
||||||
; SSE41-NEXT: pmullw %xmm0, %xmm1
|
; SSE41-NEXT: pmullw %xmm0, %xmm1
|
||||||
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
|
||||||
; SSE41-NEXT: psrld $8, %xmm0
|
|
||||||
; SSE41-NEXT: psrlw $8, %xmm0
|
|
||||||
; SSE41-NEXT: pmullw %xmm1, %xmm0
|
; SSE41-NEXT: pmullw %xmm1, %xmm0
|
||||||
; SSE41-NEXT: movd %xmm0, %eax
|
; SSE41-NEXT: movdqa %xmm0, %xmm1
|
||||||
|
; SSE41-NEXT: psrld $8, %xmm1
|
||||||
|
; SSE41-NEXT: psrlw $8, %xmm1
|
||||||
|
; SSE41-NEXT: pmullw %xmm0, %xmm1
|
||||||
|
; SSE41-NEXT: movd %xmm1, %eax
|
||||||
; SSE41-NEXT: # kill: def $al killed $al killed $eax
|
; SSE41-NEXT: # kill: def $al killed $al killed $eax
|
||||||
; SSE41-NEXT: retq
|
; SSE41-NEXT: retq
|
||||||
;
|
;
|
||||||
@ -2139,31 +2118,13 @@ define i8 @test_v64i8(<64 x i8> %a0) {
|
|||||||
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
||||||
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||||
; SSE41-NEXT: pmullw %xmm2, %xmm0
|
; SSE41-NEXT: pmullw %xmm2, %xmm0
|
||||||
; SSE41-NEXT: pmullw %xmm3, %xmm1
|
|
||||||
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
|
|
||||||
; SSE41-NEXT: pshufb %xmm2, %xmm1
|
|
||||||
; SSE41-NEXT: pmullw %xmm4, %xmm5
|
; SSE41-NEXT: pmullw %xmm4, %xmm5
|
||||||
; SSE41-NEXT: pshufb %xmm2, %xmm5
|
; SSE41-NEXT: pmullw %xmm3, %xmm5
|
||||||
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
|
; SSE41-NEXT: pmullw %xmm0, %xmm5
|
||||||
; SSE41-NEXT: movdqa %xmm0, %xmm3
|
; SSE41-NEXT: pmullw %xmm1, %xmm5
|
||||||
; SSE41-NEXT: pand %xmm2, %xmm3
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,3,2,3]
|
||||||
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
|
; SSE41-NEXT: pmullw %xmm5, %xmm0
|
||||||
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
||||||
; SSE41-NEXT: pmullw %xmm4, %xmm1
|
|
||||||
; SSE41-NEXT: pand %xmm2, %xmm1
|
|
||||||
; SSE41-NEXT: packuswb %xmm3, %xmm1
|
|
||||||
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
||||||
; SSE41-NEXT: pmullw %xmm0, %xmm1
|
|
||||||
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
|
|
||||||
; SSE41-NEXT: pand %xmm2, %xmm1
|
|
||||||
; SSE41-NEXT: pxor %xmm3, %xmm3
|
|
||||||
; SSE41-NEXT: packuswb %xmm3, %xmm1
|
|
||||||
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
||||||
; SSE41-NEXT: pmullw %xmm0, %xmm1
|
|
||||||
; SSE41-NEXT: pand %xmm1, %xmm2
|
|
||||||
; SSE41-NEXT: packuswb %xmm3, %xmm2
|
|
||||||
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
|
|
||||||
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
|
|
||||||
; SSE41-NEXT: pmullw %xmm0, %xmm1
|
; SSE41-NEXT: pmullw %xmm0, %xmm1
|
||||||
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
||||||
; SSE41-NEXT: psrld $8, %xmm0
|
; SSE41-NEXT: psrld $8, %xmm0
|
||||||
@ -2446,14 +2407,14 @@ define i8 @test_v128i8(<128 x i8> %a0) {
|
|||||||
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm9 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm9 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
|
||||||
; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||||
; SSE41-NEXT: pmullw %xmm6, %xmm2
|
; SSE41-NEXT: pmullw %xmm6, %xmm2
|
||||||
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm10 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm6 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
|
||||||
; SSE41-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||||
; SSE41-NEXT: pmullw %xmm2, %xmm4
|
; SSE41-NEXT: pmullw %xmm2, %xmm4
|
||||||
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm10 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
||||||
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||||
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm11 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero,xmm7[4],zero,xmm7[5],zero,xmm7[6],zero,xmm7[7],zero
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm11 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero,xmm7[4],zero,xmm7[5],zero,xmm7[6],zero,xmm7[7],zero
|
||||||
; SSE41-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||||
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm6 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
|
||||||
; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||||
; SSE41-NEXT: pmullw %xmm7, %xmm3
|
; SSE41-NEXT: pmullw %xmm7, %xmm3
|
||||||
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm7 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm7 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
|
||||||
@ -2464,43 +2425,17 @@ define i8 @test_v128i8(<128 x i8> %a0) {
|
|||||||
; SSE41-NEXT: pmullw %xmm5, %xmm1
|
; SSE41-NEXT: pmullw %xmm5, %xmm1
|
||||||
; SSE41-NEXT: pmullw %xmm4, %xmm1
|
; SSE41-NEXT: pmullw %xmm4, %xmm1
|
||||||
; SSE41-NEXT: pmullw %xmm0, %xmm1
|
; SSE41-NEXT: pmullw %xmm0, %xmm1
|
||||||
; SSE41-NEXT: pmullw %xmm7, %xmm3
|
|
||||||
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
|
|
||||||
; SSE41-NEXT: pshufb %xmm0, %xmm3
|
|
||||||
; SSE41-NEXT: pmullw %xmm11, %xmm6
|
|
||||||
; SSE41-NEXT: pshufb %xmm0, %xmm6
|
|
||||||
; SSE41-NEXT: pmullw %xmm10, %xmm2
|
|
||||||
; SSE41-NEXT: pshufb %xmm0, %xmm2
|
|
||||||
; SSE41-NEXT: pmullw %xmm8, %xmm9
|
; SSE41-NEXT: pmullw %xmm8, %xmm9
|
||||||
; SSE41-NEXT: pshufb %xmm0, %xmm9
|
; SSE41-NEXT: pmullw %xmm9, %xmm6
|
||||||
; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
|
; SSE41-NEXT: pmullw %xmm11, %xmm2
|
||||||
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm5 = xmm9[0],zero,xmm9[1],zero,xmm9[2],zero,xmm9[3],zero,xmm9[4],zero,xmm9[5],zero,xmm9[6],zero,xmm9[7],zero
|
; SSE41-NEXT: pmullw %xmm2, %xmm7
|
||||||
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
|
; SSE41-NEXT: pmullw %xmm7, %xmm3
|
||||||
; SSE41-NEXT: pmullw %xmm5, %xmm2
|
; SSE41-NEXT: pmullw %xmm6, %xmm3
|
||||||
; SSE41-NEXT: pshufb %xmm0, %xmm2
|
; SSE41-NEXT: pmullw %xmm1, %xmm3
|
||||||
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm5 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
|
; SSE41-NEXT: pmullw %xmm10, %xmm3
|
||||||
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3]
|
||||||
; SSE41-NEXT: pmullw %xmm5, %xmm3
|
; SSE41-NEXT: pmullw %xmm3, %xmm0
|
||||||
; SSE41-NEXT: pshufb %xmm0, %xmm3
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
||||||
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
|
||||||
; SSE41-NEXT: pand %xmm4, %xmm0
|
|
||||||
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
|
|
||||||
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
|
|
||||||
; SSE41-NEXT: pmullw %xmm3, %xmm2
|
|
||||||
; SSE41-NEXT: pand %xmm4, %xmm2
|
|
||||||
; SSE41-NEXT: packuswb %xmm0, %xmm2
|
|
||||||
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
|
|
||||||
; SSE41-NEXT: pmullw %xmm1, %xmm0
|
|
||||||
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
||||||
; SSE41-NEXT: pand %xmm4, %xmm0
|
|
||||||
; SSE41-NEXT: pxor %xmm2, %xmm2
|
|
||||||
; SSE41-NEXT: packuswb %xmm2, %xmm0
|
|
||||||
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
||||||
; SSE41-NEXT: pmullw %xmm1, %xmm0
|
|
||||||
; SSE41-NEXT: pand %xmm0, %xmm4
|
|
||||||
; SSE41-NEXT: packuswb %xmm2, %xmm4
|
|
||||||
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
|
|
||||||
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
|
|
||||||
; SSE41-NEXT: pmullw %xmm0, %xmm1
|
; SSE41-NEXT: pmullw %xmm0, %xmm1
|
||||||
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
||||||
; SSE41-NEXT: psrld $8, %xmm0
|
; SSE41-NEXT: psrld $8, %xmm0
|
||||||
|
@ -5058,10 +5058,10 @@ define <4 x i32> @mul_add_const_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwi
|
|||||||
; SSE-LABEL: mul_add_const_v4i64_v4i32:
|
; SSE-LABEL: mul_add_const_v4i64_v4i32:
|
||||||
; SSE: # %bb.0:
|
; SSE: # %bb.0:
|
||||||
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,1,3]
|
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,1,3]
|
||||||
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,1,3,3]
|
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,3,3]
|
||||||
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,1,3]
|
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,1,3]
|
||||||
; SSE-NEXT: pmuludq %xmm2, %xmm0
|
; SSE-NEXT: pmuludq %xmm2, %xmm0
|
||||||
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,3,3]
|
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,3,3]
|
||||||
; SSE-NEXT: pmuludq %xmm3, %xmm1
|
; SSE-NEXT: pmuludq %xmm3, %xmm1
|
||||||
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
|
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
|
||||||
; SSE-NEXT: paddd {{.*}}(%rip), %xmm0
|
; SSE-NEXT: paddd {{.*}}(%rip), %xmm0
|
||||||
@ -5084,10 +5084,10 @@ define <4 x i32> @mul_add_self_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwin
|
|||||||
; SSE-LABEL: mul_add_self_v4i64_v4i32:
|
; SSE-LABEL: mul_add_self_v4i64_v4i32:
|
||||||
; SSE: # %bb.0:
|
; SSE: # %bb.0:
|
||||||
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,1,3]
|
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,1,3]
|
||||||
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,1,3,3]
|
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,3,3]
|
||||||
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,1,3]
|
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,1,3]
|
||||||
; SSE-NEXT: pmuludq %xmm2, %xmm0
|
; SSE-NEXT: pmuludq %xmm2, %xmm0
|
||||||
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,3,3]
|
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,3,3]
|
||||||
; SSE-NEXT: pmuludq %xmm3, %xmm1
|
; SSE-NEXT: pmuludq %xmm3, %xmm1
|
||||||
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
|
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
|
||||||
; SSE-NEXT: paddd %xmm0, %xmm0
|
; SSE-NEXT: paddd %xmm0, %xmm0
|
||||||
@ -5110,10 +5110,10 @@ define <4 x i32> @mul_add_multiuse_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nou
|
|||||||
; SSE-LABEL: mul_add_multiuse_v4i64_v4i32:
|
; SSE-LABEL: mul_add_multiuse_v4i64_v4i32:
|
||||||
; SSE: # %bb.0:
|
; SSE: # %bb.0:
|
||||||
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,1,3]
|
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,1,3]
|
||||||
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,1,3,3]
|
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,3,3]
|
||||||
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,1,1,3]
|
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,1,1,3]
|
||||||
; SSE-NEXT: pmuludq %xmm2, %xmm4
|
; SSE-NEXT: pmuludq %xmm2, %xmm4
|
||||||
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,3,3]
|
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,3,3]
|
||||||
; SSE-NEXT: pmuludq %xmm3, %xmm1
|
; SSE-NEXT: pmuludq %xmm3, %xmm1
|
||||||
; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm1[0,2]
|
; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm1[0,2]
|
||||||
; SSE-NEXT: paddd %xmm4, %xmm0
|
; SSE-NEXT: paddd %xmm4, %xmm0
|
||||||
|
Loading…
x
Reference in New Issue
Block a user