mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-10-19 11:02:59 +02:00
[X86][XOP] Added support for the lowering of 128-bit vector shifts to XOP shift instructions
The XOP shifts just have logical/arithmetic versions and the left/right shifts are controlled by whether the value is positive/negative. Because of this I've added new X86ISD nodes instead of trying to force them to use the existing shift nodes. Additionally Excavator cores (bdver4) support XOP and AVX2 - meaning that it should use the AVX2 shifts when it can and fall back to XOP in other cases. Differential Revision: http://reviews.llvm.org/D8690 llvm-svn: 248878
This commit is contained in:
parent
894a1720db
commit
d3e938c0f5
@ -17893,18 +17893,28 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
|
||||
|
||||
// i64 SRA needs to be performed as partial shifts.
|
||||
if ((VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64)) &&
|
||||
Op.getOpcode() == ISD::SRA)
|
||||
Op.getOpcode() == ISD::SRA && !Subtarget->hasXOP())
|
||||
return ArithmeticShiftRight64(ShiftAmt);
|
||||
|
||||
if (VT == MVT::v16i8 || (Subtarget->hasInt256() && VT == MVT::v32i8)) {
|
||||
unsigned NumElts = VT.getVectorNumElements();
|
||||
MVT ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
|
||||
|
||||
if (Op.getOpcode() == ISD::SHL) {
|
||||
// Simple i8 add case
|
||||
if (ShiftAmt == 1)
|
||||
return DAG.getNode(ISD::ADD, dl, VT, R, R);
|
||||
// Simple i8 add case
|
||||
if (Op.getOpcode() == ISD::SHL && ShiftAmt == 1)
|
||||
return DAG.getNode(ISD::ADD, dl, VT, R, R);
|
||||
|
||||
// ashr(R, 7) === cmp_slt(R, 0)
|
||||
if (Op.getOpcode() == ISD::SRA && ShiftAmt == 7) {
|
||||
SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
|
||||
return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
|
||||
}
|
||||
|
||||
// XOP can shift v16i8 directly instead of as shift v8i16 + mask.
|
||||
if (VT == MVT::v16i8 && Subtarget->hasXOP())
|
||||
return SDValue();
|
||||
|
||||
if (Op.getOpcode() == ISD::SHL) {
|
||||
// Make a large shift.
|
||||
SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ShiftVT,
|
||||
R, ShiftAmt, DAG);
|
||||
@ -17927,12 +17937,6 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
|
||||
DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
|
||||
}
|
||||
if (Op.getOpcode() == ISD::SRA) {
|
||||
if (ShiftAmt == 7) {
|
||||
// ashr(R, 7) === cmp_slt(R, 0)
|
||||
SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
|
||||
return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
|
||||
}
|
||||
|
||||
// ashr(R, Amt) === sub(xor(lshr(R, Amt), Mask), Mask)
|
||||
SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
|
||||
SmallVector<SDValue, 32> V(NumElts,
|
||||
@ -17949,7 +17953,7 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
|
||||
}
|
||||
|
||||
// Special case in 32-bit mode, where i64 is expanded into high and low parts.
|
||||
if (!Subtarget->is64Bit() &&
|
||||
if (!Subtarget->is64Bit() && !Subtarget->hasXOP() &&
|
||||
(VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64))) {
|
||||
|
||||
// Peek through any splat that was introduced for i64 shift vectorization.
|
||||
@ -18103,11 +18107,26 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
|
||||
return V;
|
||||
|
||||
if (SDValue V = LowerScalarVariableShift(Op, DAG, Subtarget))
|
||||
return V;
|
||||
return V;
|
||||
|
||||
if (SupportedVectorVarShift(VT, Subtarget, Op.getOpcode()))
|
||||
return Op;
|
||||
|
||||
// XOP has 128-bit variable logical/arithmetic shifts.
|
||||
// +ve/-ve Amt = shift left/right.
|
||||
if (Subtarget->hasXOP() &&
|
||||
(VT == MVT::v2i64 || VT == MVT::v4i32 ||
|
||||
VT == MVT::v8i16 || VT == MVT::v16i8)) {
|
||||
if (Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SRA) {
|
||||
SDValue Zero = getZeroVector(VT, Subtarget, DAG, dl);
|
||||
Amt = DAG.getNode(ISD::SUB, dl, VT, Zero, Amt);
|
||||
}
|
||||
if (Op.getOpcode() == ISD::SHL || Op.getOpcode() == ISD::SRL)
|
||||
return DAG.getNode(X86ISD::VPSHL, dl, VT, R, Amt);
|
||||
if (Op.getOpcode() == ISD::SRA)
|
||||
return DAG.getNode(X86ISD::VPSHA, dl, VT, R, Amt);
|
||||
}
|
||||
|
||||
// 2i64 vector logical shifts can efficiently avoid scalarization - do the
|
||||
// shifts per-lane and then shuffle the partial results back together.
|
||||
if (VT == MVT::v2i64 && Op.getOpcode() != ISD::SRA) {
|
||||
@ -18296,7 +18315,8 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
|
||||
return DAG.getVectorShuffle(VT, dl, R02, R13, {0, 5, 2, 7});
|
||||
}
|
||||
|
||||
if (VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget->hasInt256())) {
|
||||
if (VT == MVT::v16i8 ||
|
||||
(VT == MVT::v32i8 && Subtarget->hasInt256() && !Subtarget->hasXOP())) {
|
||||
MVT ExtVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements() / 2);
|
||||
unsigned ShiftOpcode = Op->getOpcode();
|
||||
|
||||
@ -18416,7 +18436,7 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
|
||||
DAG.getNode(Op.getOpcode(), dl, ExtVT, R, Amt));
|
||||
}
|
||||
|
||||
if (Subtarget->hasInt256() && VT == MVT::v16i16) {
|
||||
if (Subtarget->hasInt256() && !Subtarget->hasXOP() && VT == MVT::v16i16) {
|
||||
MVT ExtVT = MVT::v8i32;
|
||||
SDValue Z = getZeroVector(VT, Subtarget, DAG, dl);
|
||||
SDValue ALo = DAG.getNode(X86ISD::UNPCKL, dl, VT, Amt, Z);
|
||||
@ -19820,6 +19840,8 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
|
||||
case X86ISD::RDSEED: return "X86ISD::RDSEED";
|
||||
case X86ISD::VPMADDUBSW: return "X86ISD::VPMADDUBSW";
|
||||
case X86ISD::VPMADDWD: return "X86ISD::VPMADDWD";
|
||||
case X86ISD::VPSHA: return "X86ISD::VPSHA";
|
||||
case X86ISD::VPSHL: return "X86ISD::VPSHL";
|
||||
case X86ISD::FMADD: return "X86ISD::FMADD";
|
||||
case X86ISD::FMSUB: return "X86ISD::FMSUB";
|
||||
case X86ISD::FNMADD: return "X86ISD::FNMADD";
|
||||
|
@ -410,6 +410,9 @@ namespace llvm {
|
||||
/// SSE4A Extraction and Insertion.
|
||||
EXTRQI, INSERTQI,
|
||||
|
||||
// XOP arithmetic/logical shifts
|
||||
VPSHA, VPSHL,
|
||||
|
||||
// Vector multiply packed unsigned doubleword integers
|
||||
PMULUDQ,
|
||||
// Vector multiply packed signed doubleword integers
|
||||
|
@ -215,6 +215,13 @@ def X86vshli : SDNode<"X86ISD::VSHLI", SDTIntShiftOp>;
|
||||
def X86vsrli : SDNode<"X86ISD::VSRLI", SDTIntShiftOp>;
|
||||
def X86vsrai : SDNode<"X86ISD::VSRAI", SDTIntShiftOp>;
|
||||
|
||||
def X86vpshl : SDNode<"X86ISD::VPSHL",
|
||||
SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
|
||||
SDTCisVec<2>]>>;
|
||||
def X86vpsha : SDNode<"X86ISD::VPSHA",
|
||||
SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
|
||||
SDTCisVec<2>]>>;
|
||||
|
||||
def SDTX86CmpPTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
|
||||
SDTCisVec<1>,
|
||||
SDTCisSameAs<2, 1>]>;
|
||||
|
@ -83,7 +83,42 @@ let ExeDomain = SSEPackedDouble in {
|
||||
defm VFRCZPD : xop2op256<0x81, "vfrczpd", int_x86_xop_vfrcz_pd_256, loadv4f64>;
|
||||
}
|
||||
|
||||
multiclass xop3op<bits<8> opc, string OpcodeStr, Intrinsic Int> {
|
||||
multiclass xop3op<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
||||
ValueType vt128> {
|
||||
def rr : IXOP<opc, MRMSrcReg, (outs VR128:$dst),
|
||||
(ins VR128:$src1, VR128:$src2),
|
||||
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
||||
[(set VR128:$dst,
|
||||
(vt128 (OpNode (vt128 VR128:$src1), (vt128 VR128:$src2))))]>,
|
||||
XOP_4VOp3, Sched<[WriteVarVecShift]>;
|
||||
def rm : IXOP<opc, MRMSrcMem, (outs VR128:$dst),
|
||||
(ins VR128:$src1, i128mem:$src2),
|
||||
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
||||
[(set VR128:$dst,
|
||||
(vt128 (OpNode (vt128 VR128:$src1),
|
||||
(vt128 (bitconvert (loadv2i64 addr:$src2))))))]>,
|
||||
XOP_4V, VEX_W, Sched<[WriteVarVecShift, ReadAfterLd]>;
|
||||
def mr : IXOP<opc, MRMSrcMem, (outs VR128:$dst),
|
||||
(ins i128mem:$src1, VR128:$src2),
|
||||
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
||||
[(set VR128:$dst,
|
||||
(vt128 (OpNode (vt128 (bitconvert (loadv2i64 addr:$src1))),
|
||||
(vt128 VR128:$src2))))]>,
|
||||
XOP_4VOp3, Sched<[WriteVarVecShift, ReadAfterLd]>;
|
||||
}
|
||||
|
||||
let ExeDomain = SSEPackedInt in {
|
||||
defm VPSHAB : xop3op<0x98, "vpshab", X86vpsha, v16i8>;
|
||||
defm VPSHAD : xop3op<0x9A, "vpshad", X86vpsha, v4i32>;
|
||||
defm VPSHAQ : xop3op<0x9B, "vpshaq", X86vpsha, v2i64>;
|
||||
defm VPSHAW : xop3op<0x99, "vpshaw", X86vpsha, v8i16>;
|
||||
defm VPSHLB : xop3op<0x94, "vpshlb", X86vpshl, v16i8>;
|
||||
defm VPSHLD : xop3op<0x96, "vpshld", X86vpshl, v4i32>;
|
||||
defm VPSHLQ : xop3op<0x97, "vpshlq", X86vpshl, v2i64>;
|
||||
defm VPSHLW : xop3op<0x95, "vpshlw", X86vpshl, v8i16>;
|
||||
}
|
||||
|
||||
multiclass xop3op_int<bits<8> opc, string OpcodeStr, Intrinsic Int> {
|
||||
def rr : IXOP<opc, MRMSrcReg, (outs VR128:$dst),
|
||||
(ins VR128:$src1, VR128:$src2),
|
||||
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
||||
@ -103,18 +138,10 @@ multiclass xop3op<bits<8> opc, string OpcodeStr, Intrinsic Int> {
|
||||
}
|
||||
|
||||
let ExeDomain = SSEPackedInt in {
|
||||
defm VPSHLW : xop3op<0x95, "vpshlw", int_x86_xop_vpshlw>;
|
||||
defm VPSHLQ : xop3op<0x97, "vpshlq", int_x86_xop_vpshlq>;
|
||||
defm VPSHLD : xop3op<0x96, "vpshld", int_x86_xop_vpshld>;
|
||||
defm VPSHLB : xop3op<0x94, "vpshlb", int_x86_xop_vpshlb>;
|
||||
defm VPSHAW : xop3op<0x99, "vpshaw", int_x86_xop_vpshaw>;
|
||||
defm VPSHAQ : xop3op<0x9B, "vpshaq", int_x86_xop_vpshaq>;
|
||||
defm VPSHAD : xop3op<0x9A, "vpshad", int_x86_xop_vpshad>;
|
||||
defm VPSHAB : xop3op<0x98, "vpshab", int_x86_xop_vpshab>;
|
||||
defm VPROTW : xop3op<0x91, "vprotw", int_x86_xop_vprotw>;
|
||||
defm VPROTQ : xop3op<0x93, "vprotq", int_x86_xop_vprotq>;
|
||||
defm VPROTD : xop3op<0x92, "vprotd", int_x86_xop_vprotd>;
|
||||
defm VPROTB : xop3op<0x90, "vprotb", int_x86_xop_vprotb>;
|
||||
defm VPROTW : xop3op_int<0x91, "vprotw", int_x86_xop_vprotw>;
|
||||
defm VPROTQ : xop3op_int<0x93, "vprotq", int_x86_xop_vprotq>;
|
||||
defm VPROTD : xop3op_int<0x92, "vprotd", int_x86_xop_vprotd>;
|
||||
defm VPROTB : xop3op_int<0x90, "vprotb", int_x86_xop_vprotb>;
|
||||
}
|
||||
|
||||
multiclass xop3opimm<bits<8> opc, string OpcodeStr, Intrinsic Int> {
|
||||
|
@ -1661,7 +1661,15 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
|
||||
X86_INTRINSIC_DATA(ssse3_pshuf_b_128, INTR_TYPE_2OP, X86ISD::PSHUFB, 0),
|
||||
X86_INTRINSIC_DATA(ssse3_psign_b_128, INTR_TYPE_2OP, X86ISD::PSIGN, 0),
|
||||
X86_INTRINSIC_DATA(ssse3_psign_d_128, INTR_TYPE_2OP, X86ISD::PSIGN, 0),
|
||||
X86_INTRINSIC_DATA(ssse3_psign_w_128, INTR_TYPE_2OP, X86ISD::PSIGN, 0)
|
||||
X86_INTRINSIC_DATA(ssse3_psign_w_128, INTR_TYPE_2OP, X86ISD::PSIGN, 0),
|
||||
X86_INTRINSIC_DATA(xop_vpshab, INTR_TYPE_2OP, X86ISD::VPSHA, 0),
|
||||
X86_INTRINSIC_DATA(xop_vpshad, INTR_TYPE_2OP, X86ISD::VPSHA, 0),
|
||||
X86_INTRINSIC_DATA(xop_vpshaq, INTR_TYPE_2OP, X86ISD::VPSHA, 0),
|
||||
X86_INTRINSIC_DATA(xop_vpshaw, INTR_TYPE_2OP, X86ISD::VPSHA, 0),
|
||||
X86_INTRINSIC_DATA(xop_vpshlb, INTR_TYPE_2OP, X86ISD::VPSHL, 0),
|
||||
X86_INTRINSIC_DATA(xop_vpshld, INTR_TYPE_2OP, X86ISD::VPSHL, 0),
|
||||
X86_INTRINSIC_DATA(xop_vpshlq, INTR_TYPE_2OP, X86ISD::VPSHL, 0),
|
||||
X86_INTRINSIC_DATA(xop_vpshlw, INTR_TYPE_2OP, X86ISD::VPSHL, 0)
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -140,6 +140,12 @@ int X86TTIImpl::getArithmeticInstrCost(
|
||||
{ ISD::SRA, MVT::v8i64, 1 },
|
||||
};
|
||||
|
||||
if (ST->hasAVX512()) {
|
||||
int Idx = CostTableLookup(AVX512CostTable, ISD, LT.second);
|
||||
if (Idx != -1)
|
||||
return LT.first * AVX512CostTable[Idx].Cost;
|
||||
}
|
||||
|
||||
static const CostTblEntry<MVT::SimpleValueType> AVX2CostTable[] = {
|
||||
// Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to
|
||||
// customize them to detect the cases where shift amount is a scalar one.
|
||||
@ -153,7 +159,59 @@ int X86TTIImpl::getArithmeticInstrCost(
|
||||
{ ISD::SRL, MVT::v2i64, 1 },
|
||||
{ ISD::SHL, MVT::v4i64, 1 },
|
||||
{ ISD::SRL, MVT::v4i64, 1 },
|
||||
};
|
||||
|
||||
// Look for AVX2 lowering tricks.
|
||||
if (ST->hasAVX2()) {
|
||||
if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
|
||||
(Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
|
||||
Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
|
||||
// On AVX2, a packed v16i16 shift left by a constant build_vector
|
||||
// is lowered into a vector multiply (vpmullw).
|
||||
return LT.first;
|
||||
|
||||
int Idx = CostTableLookup(AVX2CostTable, ISD, LT.second);
|
||||
if (Idx != -1)
|
||||
return LT.first * AVX2CostTable[Idx].Cost;
|
||||
}
|
||||
|
||||
static const CostTblEntry<MVT::SimpleValueType> XOPCostTable[] = {
|
||||
// 128bit shifts take 1cy, but right shifts require negation beforehand.
|
||||
{ ISD::SHL, MVT::v16i8, 1 },
|
||||
{ ISD::SRL, MVT::v16i8, 2 },
|
||||
{ ISD::SRA, MVT::v16i8, 2 },
|
||||
{ ISD::SHL, MVT::v8i16, 1 },
|
||||
{ ISD::SRL, MVT::v8i16, 2 },
|
||||
{ ISD::SRA, MVT::v8i16, 2 },
|
||||
{ ISD::SHL, MVT::v4i32, 1 },
|
||||
{ ISD::SRL, MVT::v4i32, 2 },
|
||||
{ ISD::SRA, MVT::v4i32, 2 },
|
||||
{ ISD::SHL, MVT::v2i64, 1 },
|
||||
{ ISD::SRL, MVT::v2i64, 2 },
|
||||
{ ISD::SRA, MVT::v2i64, 2 },
|
||||
// 256bit shifts require splitting if AVX2 didn't catch them above.
|
||||
{ ISD::SHL, MVT::v32i8, 2 },
|
||||
{ ISD::SRL, MVT::v32i8, 4 },
|
||||
{ ISD::SRA, MVT::v32i8, 4 },
|
||||
{ ISD::SHL, MVT::v16i16, 2 },
|
||||
{ ISD::SRL, MVT::v16i16, 4 },
|
||||
{ ISD::SRA, MVT::v16i16, 4 },
|
||||
{ ISD::SHL, MVT::v8i32, 2 },
|
||||
{ ISD::SRL, MVT::v8i32, 4 },
|
||||
{ ISD::SRA, MVT::v8i32, 4 },
|
||||
{ ISD::SHL, MVT::v4i64, 2 },
|
||||
{ ISD::SRL, MVT::v4i64, 4 },
|
||||
{ ISD::SRA, MVT::v4i64, 4 },
|
||||
};
|
||||
|
||||
// Look for XOP lowering tricks.
|
||||
if (ST->hasXOP()) {
|
||||
int Idx = CostTableLookup(XOPCostTable, ISD, LT.second);
|
||||
if (Idx != -1)
|
||||
return LT.first * XOPCostTable[Idx].Cost;
|
||||
}
|
||||
|
||||
static const CostTblEntry<MVT::SimpleValueType> AVX2CustomCostTable[] = {
|
||||
{ ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence.
|
||||
{ ISD::SHL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
|
||||
|
||||
@ -176,23 +234,11 @@ int X86TTIImpl::getArithmeticInstrCost(
|
||||
{ ISD::UDIV, MVT::v4i64, 4*20 },
|
||||
};
|
||||
|
||||
if (ST->hasAVX512()) {
|
||||
int Idx = CostTableLookup(AVX512CostTable, ISD, LT.second);
|
||||
if (Idx != -1)
|
||||
return LT.first * AVX512CostTable[Idx].Cost;
|
||||
}
|
||||
// Look for AVX2 lowering tricks.
|
||||
// Look for AVX2 lowering tricks for custom cases.
|
||||
if (ST->hasAVX2()) {
|
||||
if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
|
||||
(Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
|
||||
Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
|
||||
// On AVX2, a packed v16i16 shift left by a constant build_vector
|
||||
// is lowered into a vector multiply (vpmullw).
|
||||
return LT.first;
|
||||
|
||||
int Idx = CostTableLookup(AVX2CostTable, ISD, LT.second);
|
||||
int Idx = CostTableLookup(AVX2CustomCostTable, ISD, LT.second);
|
||||
if (Idx != -1)
|
||||
return LT.first * AVX2CostTable[Idx].Cost;
|
||||
return LT.first * AVX2CustomCostTable[Idx].Cost;
|
||||
}
|
||||
|
||||
static const CostTblEntry<MVT::SimpleValueType>
|
||||
|
@ -2,6 +2,8 @@
|
||||
; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=SSE41
|
||||
; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7-avx -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX
|
||||
; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=core-avx2 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=AVX2
|
||||
; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=bdver2 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=XOP -check-prefix=XOPAVX
|
||||
; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=bdver4 -cost-model -analyze | FileCheck %s -check-prefix=CHECK -check-prefix=XOP -check-prefix=XOPAVX2
|
||||
|
||||
|
||||
; Verify the cost of vector shift left instructions.
|
||||
@ -39,6 +41,7 @@ define <4 x i32> @test3(<4 x i32> %a) {
|
||||
; SSE41: Found an estimated cost of 1 for instruction: %shl
|
||||
; AVX: Found an estimated cost of 1 for instruction: %shl
|
||||
; AVX2: Found an estimated cost of 1 for instruction: %shl
|
||||
; XOP: Found an estimated cost of 1 for instruction: %shl
|
||||
|
||||
|
||||
define <4 x i32> @test4(<4 x i32> %a) {
|
||||
@ -50,6 +53,7 @@ define <4 x i32> @test4(<4 x i32> %a) {
|
||||
; SSE41: Found an estimated cost of 1 for instruction: %shl
|
||||
; AVX: Found an estimated cost of 1 for instruction: %shl
|
||||
; AVX2: Found an estimated cost of 1 for instruction: %shl
|
||||
; XOP: Found an estimated cost of 1 for instruction: %shl
|
||||
|
||||
|
||||
; On AVX2 we are able to lower the following shift into a single
|
||||
@ -66,6 +70,7 @@ define <2 x i64> @test5(<2 x i64> %a) {
|
||||
; SSE41: Found an estimated cost of 4 for instruction: %shl
|
||||
; AVX: Found an estimated cost of 4 for instruction: %shl
|
||||
; AVX2: Found an estimated cost of 1 for instruction: %shl
|
||||
; XOP: Found an estimated cost of 1 for instruction: %shl
|
||||
|
||||
|
||||
; v16i16 and v8i32 shift left by non-uniform constant are lowered into
|
||||
@ -90,6 +95,8 @@ define <16 x i16> @test6(<16 x i16> %a) {
|
||||
; SSE41: Found an estimated cost of 2 for instruction: %shl
|
||||
; AVX: Found an estimated cost of 4 for instruction: %shl
|
||||
; AVX2: Found an estimated cost of 1 for instruction: %shl
|
||||
; XOPAVX: Found an estimated cost of 2 for instruction: %shl
|
||||
; XOPAVX2: Found an estimated cost of 1 for instruction: %shl
|
||||
|
||||
|
||||
; With SSE2 and SSE4.1, the vector shift cost for 'test7' is twice
|
||||
@ -105,6 +112,8 @@ define <8 x i32> @test7(<8 x i32> %a) {
|
||||
; SSE41: Found an estimated cost of 2 for instruction: %shl
|
||||
; AVX: Found an estimated cost of 4 for instruction: %shl
|
||||
; AVX2: Found an estimated cost of 1 for instruction: %shl
|
||||
; XOPAVX: Found an estimated cost of 2 for instruction: %shl
|
||||
; XOPAVX2: Found an estimated cost of 1 for instruction: %shl
|
||||
|
||||
|
||||
; On AVX2 we are able to lower the following shift into a single
|
||||
@ -121,6 +130,8 @@ define <4 x i64> @test8(<4 x i64> %a) {
|
||||
; SSE41: Found an estimated cost of 8 for instruction: %shl
|
||||
; AVX: Found an estimated cost of 8 for instruction: %shl
|
||||
; AVX2: Found an estimated cost of 1 for instruction: %shl
|
||||
; XOPAVX: Found an estimated cost of 2 for instruction: %shl
|
||||
; XOPAVX2: Found an estimated cost of 1 for instruction: %shl
|
||||
|
||||
|
||||
; Same as 'test6', with the difference that the cost is double.
|
||||
@ -134,6 +145,8 @@ define <32 x i16> @test9(<32 x i16> %a) {
|
||||
; SSE41: Found an estimated cost of 4 for instruction: %shl
|
||||
; AVX: Found an estimated cost of 8 for instruction: %shl
|
||||
; AVX2: Found an estimated cost of 2 for instruction: %shl
|
||||
; XOPAVX: Found an estimated cost of 4 for instruction: %shl
|
||||
; XOPAVX2: Found an estimated cost of 2 for instruction: %shl
|
||||
|
||||
|
||||
; Same as 'test7', except that now the cost is double.
|
||||
@ -147,6 +160,8 @@ define <16 x i32> @test10(<16 x i32> %a) {
|
||||
; SSE41: Found an estimated cost of 4 for instruction: %shl
|
||||
; AVX: Found an estimated cost of 8 for instruction: %shl
|
||||
; AVX2: Found an estimated cost of 2 for instruction: %shl
|
||||
; XOPAVX: Found an estimated cost of 4 for instruction: %shl
|
||||
; XOPAVX2: Found an estimated cost of 2 for instruction: %shl
|
||||
|
||||
|
||||
; On AVX2 we are able to lower the following shift into a sequence of
|
||||
@ -163,5 +178,5 @@ define <8 x i64> @test11(<8 x i64> %a) {
|
||||
; SSE41: Found an estimated cost of 16 for instruction: %shl
|
||||
; AVX: Found an estimated cost of 16 for instruction: %shl
|
||||
; AVX2: Found an estimated cost of 2 for instruction: %shl
|
||||
|
||||
|
||||
; XOPAVX: Found an estimated cost of 4 for instruction: %shl
|
||||
; XOPAVX2: Found an estimated cost of 2 for instruction: %shl
|
||||
|
@ -2,6 +2,8 @@
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
|
||||
;
|
||||
; Just one 32-bit run to make sure we do reasonable things for i64 shifts.
|
||||
; RUN: llc < %s -mtriple=i686-unknown-unknown -mcpu=x86-64 | FileCheck %s --check-prefix=ALL --check-prefix=X32-SSE --check-prefix=X32-SSE2
|
||||
@ -67,6 +69,13 @@ define <2 x i64> @var_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
|
||||
; AVX2-NEXT: vpsubq %xmm3, %xmm0, %xmm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: var_shift_v2i64:
|
||||
; XOP: # BB#0:
|
||||
; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; XOP-NEXT: vpsubq %xmm1, %xmm2, %xmm1
|
||||
; XOP-NEXT: vpshaq %xmm1, %xmm0, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: var_shift_v2i64:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
|
||||
@ -155,6 +164,18 @@ define <4 x i32> @var_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
|
||||
; AVX2-NEXT: vpsravd %xmm1, %xmm0, %xmm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: var_shift_v4i32:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1
|
||||
; XOPAVX1-NEXT: vpshad %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: var_shift_v4i32:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpsravd %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: var_shift_v4i32:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: movdqa %xmm1, %xmm2
|
||||
@ -276,6 +297,13 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
|
||||
; AVX2-NEXT: vzeroupper
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: var_shift_v8i16:
|
||||
; XOP: # BB#0:
|
||||
; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; XOP-NEXT: vpsubw %xmm1, %xmm2, %xmm1
|
||||
; XOP-NEXT: vpshaw %xmm1, %xmm0, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: var_shift_v8i16:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: psllw $12, %xmm1
|
||||
@ -437,6 +465,13 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
|
||||
; AVX-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: var_shift_v16i8:
|
||||
; XOP: # BB#0:
|
||||
; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; XOP-NEXT: vpsubb %xmm1, %xmm2, %xmm1
|
||||
; XOP-NEXT: vpshab %xmm1, %xmm0, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: var_shift_v16i8:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
|
||||
@ -521,6 +556,22 @@ define <2 x i64> @splatvar_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
|
||||
; AVX-NEXT: vpsubq %xmm2, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: splatvar_shift_v2i64:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
|
||||
; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm1
|
||||
; XOPAVX1-NEXT: vpshaq %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: splatvar_shift_v2i64:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpbroadcastq %xmm1, %xmm1
|
||||
; XOPAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; XOPAVX2-NEXT: vpsubq %xmm1, %xmm2, %xmm1
|
||||
; XOPAVX2-NEXT: vpshaq %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: splatvar_shift_v2i64:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: movq {{.*#+}} xmm1 = xmm1[0],zero
|
||||
@ -557,6 +608,13 @@ define <4 x i32> @splatvar_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
|
||||
; AVX-NEXT: vpsrad %xmm1, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: splatvar_shift_v4i32:
|
||||
; XOP: # BB#0:
|
||||
; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; XOP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
|
||||
; XOP-NEXT: vpsrad %xmm1, %xmm0, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: splatvar_shift_v4i32:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: xorps %xmm2, %xmm2
|
||||
@ -591,6 +649,13 @@ define <8 x i16> @splatvar_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
|
||||
; AVX-NEXT: vpsraw %xmm1, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: splatvar_shift_v8i16:
|
||||
; XOP: # BB#0:
|
||||
; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; XOP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5,6,7]
|
||||
; XOP-NEXT: vpsraw %xmm1, %xmm0, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: splatvar_shift_v8i16:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: movd %xmm1, %eax
|
||||
@ -763,6 +828,22 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
|
||||
; AVX2-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: splatvar_shift_v16i8:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
||||
; XOPAVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm1
|
||||
; XOPAVX1-NEXT: vpshab %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: splatvar_shift_v16i8:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpbroadcastb %xmm1, %xmm1
|
||||
; XOPAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; XOPAVX2-NEXT: vpsubb %xmm1, %xmm2, %xmm1
|
||||
; XOPAVX2-NEXT: vpshab %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: splatvar_shift_v16i8:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
@ -875,6 +956,13 @@ define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind {
|
||||
; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: constant_shift_v2i64:
|
||||
; XOP: # BB#0:
|
||||
; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; XOP-NEXT: vpsubq {{.*}}(%rip), %xmm1, %xmm1
|
||||
; XOP-NEXT: vpshaq %xmm1, %xmm0, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: constant_shift_v2i64:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [0,2147483648,0,2147483648]
|
||||
@ -941,6 +1029,16 @@ define <4 x i32> @constant_shift_v4i32(<4 x i32> %a) nounwind {
|
||||
; AVX2-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: constant_shift_v4i32:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpshad {{.*}}(%rip), %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: constant_shift_v4i32:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: constant_shift_v4i32:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
|
||||
@ -1026,6 +1124,13 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
|
||||
; AVX2-NEXT: vzeroupper
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: constant_shift_v8i16:
|
||||
; XOP: # BB#0:
|
||||
; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; XOP-NEXT: vpsubw {{.*}}(%rip), %xmm1, %xmm1
|
||||
; XOP-NEXT: vpshaw %xmm1, %xmm0, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: constant_shift_v8i16:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
|
||||
@ -1172,6 +1277,13 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
|
||||
; AVX-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: constant_shift_v16i8:
|
||||
; XOP: # BB#0:
|
||||
; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; XOP-NEXT: vpsubb {{.*}}(%rip), %xmm1, %xmm1
|
||||
; XOP-NEXT: vpshab %xmm1, %xmm0, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: constant_shift_v16i8:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
|
||||
@ -1271,6 +1383,13 @@ define <2 x i64> @splatconstant_shift_v2i64(<2 x i64> %a) nounwind {
|
||||
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: splatconstant_shift_v2i64:
|
||||
; XOP: # BB#0:
|
||||
; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; XOP-NEXT: vpsubq {{.*}}(%rip), %xmm1, %xmm1
|
||||
; XOP-NEXT: vpshaq %xmm1, %xmm0, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: splatconstant_shift_v2i64:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
|
||||
@ -1295,6 +1414,11 @@ define <4 x i32> @splatconstant_shift_v4i32(<4 x i32> %a) nounwind {
|
||||
; AVX-NEXT: vpsrad $5, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: splatconstant_shift_v4i32:
|
||||
; XOP: # BB#0:
|
||||
; XOP-NEXT: vpsrad $5, %xmm0, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: splatconstant_shift_v4i32:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: psrad $5, %xmm0
|
||||
@ -1314,6 +1438,11 @@ define <8 x i16> @splatconstant_shift_v8i16(<8 x i16> %a) nounwind {
|
||||
; AVX-NEXT: vpsraw $3, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: splatconstant_shift_v8i16:
|
||||
; XOP: # BB#0:
|
||||
; XOP-NEXT: vpsraw $3, %xmm0, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: splatconstant_shift_v8i16:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: psraw $3, %xmm0
|
||||
@ -1341,6 +1470,13 @@ define <16 x i8> @splatconstant_shift_v16i8(<16 x i8> %a) nounwind {
|
||||
; AVX-NEXT: vpsubb %xmm1, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: splatconstant_shift_v16i8:
|
||||
; XOP: # BB#0:
|
||||
; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; XOP-NEXT: vpsubb {{.*}}(%rip), %xmm1, %xmm1
|
||||
; XOP-NEXT: vpshab %xmm1, %xmm0, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: splatconstant_shift_v16i8:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: psrlw $3, %xmm0
|
||||
|
@ -1,5 +1,7 @@
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
|
||||
|
||||
;
|
||||
; Variable Shifts
|
||||
@ -40,6 +42,27 @@ define <4 x i64> @var_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
|
||||
; AVX2-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpsubq %ymm3, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: var_shift_v4i64:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
||||
; XOPAVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
||||
; XOPAVX1-NEXT: vpsubq %xmm2, %xmm3, %xmm2
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
|
||||
; XOPAVX1-NEXT: vpshaq %xmm2, %xmm4, %xmm2
|
||||
; XOPAVX1-NEXT: vpsubq %xmm1, %xmm3, %xmm1
|
||||
; XOPAVX1-NEXT: vpshaq %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: var_shift_v4i64:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
|
||||
; XOPAVX2-NEXT: vpsrlvq %ymm1, %ymm2, %ymm3
|
||||
; XOPAVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: vpsubq %ymm3, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%shift = ashr <4 x i64> %a, %b
|
||||
ret <4 x i64> %shift
|
||||
}
|
||||
@ -79,6 +102,23 @@ define <8 x i32> @var_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
|
||||
; AVX2: # BB#0:
|
||||
; AVX2-NEXT: vpsravd %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: var_shift_v8i32:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
||||
; XOPAVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
||||
; XOPAVX1-NEXT: vpsubd %xmm2, %xmm3, %xmm2
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
|
||||
; XOPAVX1-NEXT: vpshad %xmm2, %xmm4, %xmm2
|
||||
; XOPAVX1-NEXT: vpsubd %xmm1, %xmm3, %xmm1
|
||||
; XOPAVX1-NEXT: vpshad %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: var_shift_v8i32:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpsravd %ymm1, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%shift = ashr <8 x i32> %a, %b
|
||||
ret <8 x i32> %shift
|
||||
}
|
||||
@ -132,6 +172,30 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
|
||||
; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpackusdw %ymm3, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: var_shift_v16i16:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
||||
; XOPAVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
||||
; XOPAVX1-NEXT: vpsubw %xmm2, %xmm3, %xmm2
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
|
||||
; XOPAVX1-NEXT: vpshaw %xmm2, %xmm4, %xmm2
|
||||
; XOPAVX1-NEXT: vpsubw %xmm1, %xmm3, %xmm1
|
||||
; XOPAVX1-NEXT: vpshaw %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: var_shift_v16i16:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
|
||||
; XOPAVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
||||
; XOPAVX2-NEXT: vpsubw %xmm2, %xmm3, %xmm2
|
||||
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
|
||||
; XOPAVX2-NEXT: vpshaw %xmm2, %xmm4, %xmm2
|
||||
; XOPAVX2-NEXT: vpsubw %xmm1, %xmm3, %xmm1
|
||||
; XOPAVX2-NEXT: vpshaw %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%shift = ashr <16 x i16> %a, %b
|
||||
ret <16 x i16> %shift
|
||||
}
|
||||
@ -219,6 +283,30 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
|
||||
; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: var_shift_v32i8:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
||||
; XOPAVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
||||
; XOPAVX1-NEXT: vpsubb %xmm2, %xmm3, %xmm2
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
|
||||
; XOPAVX1-NEXT: vpshab %xmm2, %xmm4, %xmm2
|
||||
; XOPAVX1-NEXT: vpsubb %xmm1, %xmm3, %xmm1
|
||||
; XOPAVX1-NEXT: vpshab %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: var_shift_v32i8:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
|
||||
; XOPAVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
||||
; XOPAVX2-NEXT: vpsubb %xmm2, %xmm3, %xmm2
|
||||
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
|
||||
; XOPAVX2-NEXT: vpshab %xmm2, %xmm4, %xmm2
|
||||
; XOPAVX2-NEXT: vpsubb %xmm1, %xmm3, %xmm1
|
||||
; XOPAVX2-NEXT: vpshab %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%shift = ashr <32 x i8> %a, %b
|
||||
ret <32 x i8> %shift
|
||||
}
|
||||
@ -250,6 +338,26 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
|
||||
; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpsubq %ymm2, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: splatvar_shift_v4i64:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vmovddup {{.*#+}} xmm1 = xmm1[0,0]
|
||||
; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm1
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
||||
; XOPAVX1-NEXT: vpshaq %xmm1, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vpshaq %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: splatvar_shift_v4i64:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
|
||||
; XOPAVX2-NEXT: vpsrlq %xmm1, %ymm2, %ymm2
|
||||
; XOPAVX2-NEXT: vpsrlq %xmm1, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: vpsubq %ymm2, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%splat = shufflevector <4 x i64> %b, <4 x i64> undef, <4 x i32> zeroinitializer
|
||||
%shift = ashr <4 x i64> %a, %splat
|
||||
ret <4 x i64> %shift
|
||||
@ -272,6 +380,23 @@ define <8 x i32> @splatvar_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
|
||||
; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
|
||||
; AVX2-NEXT: vpsrad %xmm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: splatvar_shift_v8i32:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
||||
; XOPAVX1-NEXT: vpsrad %xmm1, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vpsrad %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: splatvar_shift_v8i32:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; XOPAVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
|
||||
; XOPAVX2-NEXT: vpsrad %xmm1, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%splat = shufflevector <8 x i32> %b, <8 x i32> undef, <8 x i32> zeroinitializer
|
||||
%shift = ashr <8 x i32> %a, %splat
|
||||
ret <8 x i32> %shift
|
||||
@ -296,6 +421,25 @@ define <16 x i16> @splatvar_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind
|
||||
; AVX2-NEXT: vmovd %eax, %xmm1
|
||||
; AVX2-NEXT: vpsraw %xmm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: splatvar_shift_v16i16:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
||||
; XOPAVX1-NEXT: vmovd %xmm1, %eax
|
||||
; XOPAVX1-NEXT: movzwl %ax, %eax
|
||||
; XOPAVX1-NEXT: vmovd %eax, %xmm1
|
||||
; XOPAVX1-NEXT: vpsraw %xmm1, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vpsraw %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: splatvar_shift_v16i16:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vmovd %xmm1, %eax
|
||||
; XOPAVX2-NEXT: movzwl %ax, %eax
|
||||
; XOPAVX2-NEXT: vmovd %eax, %xmm1
|
||||
; XOPAVX2-NEXT: vpsraw %xmm1, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%splat = shufflevector <16 x i16> %b, <16 x i16> undef, <16 x i32> zeroinitializer
|
||||
%shift = ashr <16 x i16> %a, %splat
|
||||
ret <16 x i16> %shift
|
||||
@ -379,6 +523,30 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
|
||||
; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: splatvar_shift_v32i8:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
||||
; XOPAVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm1
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
||||
; XOPAVX1-NEXT: vpshab %xmm1, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vpshab %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: splatvar_shift_v32i8:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpbroadcastb %xmm1, %ymm1
|
||||
; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
|
||||
; XOPAVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
||||
; XOPAVX2-NEXT: vpsubb %xmm2, %xmm3, %xmm2
|
||||
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
|
||||
; XOPAVX2-NEXT: vpshab %xmm2, %xmm4, %xmm2
|
||||
; XOPAVX2-NEXT: vpsubb %xmm1, %xmm3, %xmm1
|
||||
; XOPAVX2-NEXT: vpshab %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%splat = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
|
||||
%shift = ashr <32 x i8> %a, %splat
|
||||
ret <32 x i8> %shift
|
||||
@ -414,6 +582,25 @@ define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) nounwind {
|
||||
; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: constant_shift_v4i64:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; XOPAVX1-NEXT: vpsubq {{.*}}(%rip), %xmm1, %xmm2
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
||||
; XOPAVX1-NEXT: vpshaq %xmm2, %xmm3, %xmm2
|
||||
; XOPAVX1-NEXT: vpsubq {{.*}}(%rip), %xmm1, %xmm1
|
||||
; XOPAVX1-NEXT: vpshaq %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: constant_shift_v4i64:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [4611686018427387904,72057594037927936,4294967296,2]
|
||||
; XOPAVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%shift = ashr <4 x i64> %a, <i64 1, i64 7, i64 31, i64 62>
|
||||
ret <4 x i64> %shift
|
||||
}
|
||||
@ -441,6 +628,19 @@ define <8 x i32> @constant_shift_v8i32(<8 x i32> %a) nounwind {
|
||||
; AVX2: # BB#0:
|
||||
; AVX2-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: constant_shift_v8i32:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpshad {{.*}}(%rip), %xmm0, %xmm1
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
||||
; XOPAVX1-NEXT: vpshad {{.*}}(%rip), %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: constant_shift_v8i32:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%shift = ashr <8 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 8, i32 7>
|
||||
ret <8 x i32> %shift
|
||||
}
|
||||
@ -490,6 +690,28 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
|
||||
; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpackusdw %ymm3, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: constant_shift_v16i16:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; XOPAVX1-NEXT: vpsubw {{.*}}(%rip), %xmm1, %xmm2
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
||||
; XOPAVX1-NEXT: vpshaw %xmm2, %xmm3, %xmm2
|
||||
; XOPAVX1-NEXT: vpsubw {{.*}}(%rip), %xmm1, %xmm1
|
||||
; XOPAVX1-NEXT: vpshaw %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: constant_shift_v16i16:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; XOPAVX2-NEXT: vpsubw {{.*}}(%rip), %xmm1, %xmm2
|
||||
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm3
|
||||
; XOPAVX2-NEXT: vpshaw %xmm2, %xmm3, %xmm2
|
||||
; XOPAVX2-NEXT: vpsubw {{.*}}(%rip), %xmm1, %xmm1
|
||||
; XOPAVX2-NEXT: vpshaw %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%shift = ashr <16 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
|
||||
ret <16 x i16> %shift
|
||||
}
|
||||
@ -571,6 +793,26 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
|
||||
; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: constant_shift_v32i8:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; XOPAVX1-NEXT: vpsubb {{.*}}(%rip), %xmm1, %xmm1
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
||||
; XOPAVX1-NEXT: vpshab %xmm1, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vpshab %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: constant_shift_v32i8:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; XOPAVX2-NEXT: vpsubb {{.*}}(%rip), %xmm1, %xmm1
|
||||
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
|
||||
; XOPAVX2-NEXT: vpshab %xmm1, %xmm2, %xmm2
|
||||
; XOPAVX2-NEXT: vpshab %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%shift = ashr <32 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
|
||||
ret <32 x i8> %shift
|
||||
}
|
||||
@ -598,6 +840,24 @@ define <4 x i64> @splatconstant_shift_v4i64(<4 x i64> %a) nounwind {
|
||||
; AVX2-NEXT: vpsrlq $7, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: splatconstant_shift_v4i64:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; XOPAVX1-NEXT: vpsubq {{.*}}(%rip), %xmm1, %xmm1
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
||||
; XOPAVX1-NEXT: vpshaq %xmm1, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vpshaq %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: splatconstant_shift_v4i64:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpsrlq $7, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm1
|
||||
; XOPAVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%shift = ashr <4 x i64> %a, <i64 7, i64 7, i64 7, i64 7>
|
||||
ret <4 x i64> %shift
|
||||
}
|
||||
@ -615,6 +875,19 @@ define <8 x i32> @splatconstant_shift_v8i32(<8 x i32> %a) nounwind {
|
||||
; AVX2: # BB#0:
|
||||
; AVX2-NEXT: vpsrad $5, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: splatconstant_shift_v8i32:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpsrad $5, %xmm0, %xmm1
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
||||
; XOPAVX1-NEXT: vpsrad $5, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: splatconstant_shift_v8i32:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpsrad $5, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%shift = ashr <8 x i32> %a, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
|
||||
ret <8 x i32> %shift
|
||||
}
|
||||
@ -632,6 +905,19 @@ define <16 x i16> @splatconstant_shift_v16i16(<16 x i16> %a) nounwind {
|
||||
; AVX2: # BB#0:
|
||||
; AVX2-NEXT: vpsraw $3, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: splatconstant_shift_v16i16:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpsraw $3, %xmm0, %xmm1
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
||||
; XOPAVX1-NEXT: vpsraw $3, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: splatconstant_shift_v16i16:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpsraw $3, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%shift = ashr <16 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
|
||||
ret <16 x i16> %shift
|
||||
}
|
||||
@ -661,6 +947,25 @@ define <32 x i8> @splatconstant_shift_v32i8(<32 x i8> %a) nounwind {
|
||||
; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpsubb %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: splatconstant_shift_v32i8:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; XOPAVX1-NEXT: vpsubb {{.*}}(%rip), %xmm1, %xmm1
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
||||
; XOPAVX1-NEXT: vpshab %xmm1, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vpshab %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: splatconstant_shift_v32i8:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpsrlw $3, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
||||
; XOPAVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: vpsubb %ymm1, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%shift = ashr <32 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
|
||||
ret <32 x i8> %shift
|
||||
}
|
||||
|
@ -2,6 +2,8 @@
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
|
||||
;
|
||||
; Just one 32-bit run to make sure we do reasonable things for i64 shifts.
|
||||
; RUN: llc < %s -mtriple=i686-unknown-unknown -mcpu=x86-64 | FileCheck %s --check-prefix=ALL --check-prefix=X32-SSE --check-prefix=X32-SSE2
|
||||
@ -43,6 +45,18 @@ define <2 x i64> @var_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
|
||||
; AVX2-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: var_shift_v2i64:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm1
|
||||
; XOPAVX1-NEXT: vpshlq %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: var_shift_v2i64:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: var_shift_v2i64:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
|
||||
@ -124,6 +138,18 @@ define <4 x i32> @var_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
|
||||
; AVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: var_shift_v4i32:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1
|
||||
; XOPAVX1-NEXT: vpshld %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: var_shift_v4i32:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: var_shift_v4i32:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: movdqa %xmm1, %xmm2
|
||||
@ -245,6 +271,13 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
|
||||
; AVX2-NEXT: vzeroupper
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: var_shift_v8i16:
|
||||
; XOP: # BB#0:
|
||||
; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; XOP-NEXT: vpsubw %xmm1, %xmm2, %xmm1
|
||||
; XOP-NEXT: vpshlw %xmm1, %xmm0, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: var_shift_v8i16:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: psllw $12, %xmm1
|
||||
@ -355,6 +388,13 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
|
||||
; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: var_shift_v16i8:
|
||||
; XOP: # BB#0:
|
||||
; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; XOP-NEXT: vpsubb %xmm1, %xmm2, %xmm1
|
||||
; XOP-NEXT: vpshlb %xmm1, %xmm0, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: var_shift_v16i8:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: psllw $5, %xmm1
|
||||
@ -404,6 +444,11 @@ define <2 x i64> @splatvar_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
|
||||
; AVX-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: splatvar_shift_v2i64:
|
||||
; XOP: # BB#0:
|
||||
; XOP-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: splatvar_shift_v2i64:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: movq {{.*#+}} xmm1 = xmm1[0],zero
|
||||
@ -436,6 +481,13 @@ define <4 x i32> @splatvar_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
|
||||
; AVX-NEXT: vpsrld %xmm1, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: splatvar_shift_v4i32:
|
||||
; XOP: # BB#0:
|
||||
; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; XOP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
|
||||
; XOP-NEXT: vpsrld %xmm1, %xmm0, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: splatvar_shift_v4i32:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: xorps %xmm2, %xmm2
|
||||
@ -470,6 +522,13 @@ define <8 x i16> @splatvar_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
|
||||
; AVX-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: splatvar_shift_v8i16:
|
||||
; XOP: # BB#0:
|
||||
; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; XOP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5,6,7]
|
||||
; XOP-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: splatvar_shift_v8i16:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: movd %xmm1, %eax
|
||||
@ -580,6 +639,22 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
|
||||
; AVX2-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: splatvar_shift_v16i8:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
||||
; XOPAVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm1
|
||||
; XOPAVX1-NEXT: vpshlb %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: splatvar_shift_v16i8:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpbroadcastb %xmm1, %xmm1
|
||||
; XOPAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; XOPAVX2-NEXT: vpsubb %xmm1, %xmm2, %xmm1
|
||||
; XOPAVX2-NEXT: vpshlb %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: splatvar_shift_v16i8:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
@ -653,6 +728,18 @@ define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind {
|
||||
; AVX2-NEXT: vpsrlvq {{.*}}(%rip), %xmm0, %xmm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: constant_shift_v2i64:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; XOPAVX1-NEXT: vpsubq {{.*}}(%rip), %xmm1, %xmm1
|
||||
; XOPAVX1-NEXT: vpshlq %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: constant_shift_v2i64:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpsrlvq {{.*}}(%rip), %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: constant_shift_v2i64:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
|
||||
@ -712,6 +799,16 @@ define <4 x i32> @constant_shift_v4i32(<4 x i32> %a) nounwind {
|
||||
; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: constant_shift_v4i32:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: constant_shift_v4i32:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: constant_shift_v4i32:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
|
||||
@ -797,6 +894,13 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
|
||||
; AVX2-NEXT: vzeroupper
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: constant_shift_v8i16:
|
||||
; XOP: # BB#0:
|
||||
; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; XOP-NEXT: vpsubw {{.*}}(%rip), %xmm1, %xmm1
|
||||
; XOP-NEXT: vpshlw %xmm1, %xmm0, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: constant_shift_v8i16:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
|
||||
@ -889,6 +993,13 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
|
||||
; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: constant_shift_v16i8:
|
||||
; XOP: # BB#0:
|
||||
; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; XOP-NEXT: vpsubb {{.*}}(%rip), %xmm1, %xmm1
|
||||
; XOP-NEXT: vpshlb %xmm1, %xmm0, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: constant_shift_v16i8:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
|
||||
@ -939,6 +1050,11 @@ define <2 x i64> @splatconstant_shift_v2i64(<2 x i64> %a) nounwind {
|
||||
; AVX-NEXT: vpsrlq $7, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: splatconstant_shift_v2i64:
|
||||
; XOP: # BB#0:
|
||||
; XOP-NEXT: vpsrlq $7, %xmm0, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: splatconstant_shift_v2i64:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: psrlq $7, %xmm0
|
||||
@ -958,6 +1074,11 @@ define <4 x i32> @splatconstant_shift_v4i32(<4 x i32> %a) nounwind {
|
||||
; AVX-NEXT: vpsrld $5, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: splatconstant_shift_v4i32:
|
||||
; XOP: # BB#0:
|
||||
; XOP-NEXT: vpsrld $5, %xmm0, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: splatconstant_shift_v4i32:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: psrld $5, %xmm0
|
||||
@ -977,6 +1098,11 @@ define <8 x i16> @splatconstant_shift_v8i16(<8 x i16> %a) nounwind {
|
||||
; AVX-NEXT: vpsrlw $3, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: splatconstant_shift_v8i16:
|
||||
; XOP: # BB#0:
|
||||
; XOP-NEXT: vpsrlw $3, %xmm0, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: splatconstant_shift_v8i16:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: psrlw $3, %xmm0
|
||||
@ -998,6 +1124,13 @@ define <16 x i8> @splatconstant_shift_v16i8(<16 x i8> %a) nounwind {
|
||||
; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: splatconstant_shift_v16i8:
|
||||
; XOP: # BB#0:
|
||||
; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; XOP-NEXT: vpsubb {{.*}}(%rip), %xmm1, %xmm1
|
||||
; XOP-NEXT: vpshlb %xmm1, %xmm0, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: splatconstant_shift_v16i8:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: psrlw $3, %xmm0
|
||||
|
@ -1,5 +1,7 @@
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
|
||||
|
||||
;
|
||||
; Variable Shifts
|
||||
@ -25,6 +27,23 @@ define <4 x i64> @var_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
|
||||
; AVX2: # BB#0:
|
||||
; AVX2-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: var_shift_v4i64:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
||||
; XOPAVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
||||
; XOPAVX1-NEXT: vpsubq %xmm2, %xmm3, %xmm2
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
|
||||
; XOPAVX1-NEXT: vpshlq %xmm2, %xmm4, %xmm2
|
||||
; XOPAVX1-NEXT: vpsubq %xmm1, %xmm3, %xmm1
|
||||
; XOPAVX1-NEXT: vpshlq %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: var_shift_v4i64:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%shift = lshr <4 x i64> %a, %b
|
||||
ret <4 x i64> %shift
|
||||
}
|
||||
@ -64,6 +83,23 @@ define <8 x i32> @var_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
|
||||
; AVX2: # BB#0:
|
||||
; AVX2-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: var_shift_v8i32:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
||||
; XOPAVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
||||
; XOPAVX1-NEXT: vpsubd %xmm2, %xmm3, %xmm2
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
|
||||
; XOPAVX1-NEXT: vpshld %xmm2, %xmm4, %xmm2
|
||||
; XOPAVX1-NEXT: vpsubd %xmm1, %xmm3, %xmm1
|
||||
; XOPAVX1-NEXT: vpshld %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: var_shift_v8i32:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%shift = lshr <8 x i32> %a, %b
|
||||
ret <8 x i32> %shift
|
||||
}
|
||||
@ -117,6 +153,30 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
|
||||
; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpackusdw %ymm3, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: var_shift_v16i16:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
||||
; XOPAVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
||||
; XOPAVX1-NEXT: vpsubw %xmm2, %xmm3, %xmm2
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
|
||||
; XOPAVX1-NEXT: vpshlw %xmm2, %xmm4, %xmm2
|
||||
; XOPAVX1-NEXT: vpsubw %xmm1, %xmm3, %xmm1
|
||||
; XOPAVX1-NEXT: vpshlw %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: var_shift_v16i16:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
|
||||
; XOPAVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
||||
; XOPAVX2-NEXT: vpsubw %xmm2, %xmm3, %xmm2
|
||||
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
|
||||
; XOPAVX2-NEXT: vpshlw %xmm2, %xmm4, %xmm2
|
||||
; XOPAVX2-NEXT: vpsubw %xmm1, %xmm3, %xmm1
|
||||
; XOPAVX2-NEXT: vpshlw %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%shift = lshr <16 x i16> %a, %b
|
||||
ret <16 x i16> %shift
|
||||
}
|
||||
@ -171,6 +231,30 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
|
||||
; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
|
||||
; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: var_shift_v32i8:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
||||
; XOPAVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
||||
; XOPAVX1-NEXT: vpsubb %xmm2, %xmm3, %xmm2
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
|
||||
; XOPAVX1-NEXT: vpshlb %xmm2, %xmm4, %xmm2
|
||||
; XOPAVX1-NEXT: vpsubb %xmm1, %xmm3, %xmm1
|
||||
; XOPAVX1-NEXT: vpshlb %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: var_shift_v32i8:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
|
||||
; XOPAVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
||||
; XOPAVX2-NEXT: vpsubb %xmm2, %xmm3, %xmm2
|
||||
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
|
||||
; XOPAVX2-NEXT: vpshlb %xmm2, %xmm4, %xmm2
|
||||
; XOPAVX2-NEXT: vpsubb %xmm1, %xmm3, %xmm1
|
||||
; XOPAVX2-NEXT: vpshlb %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%shift = lshr <32 x i8> %a, %b
|
||||
ret <32 x i8> %shift
|
||||
}
|
||||
@ -192,6 +276,19 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
|
||||
; AVX2: # BB#0:
|
||||
; AVX2-NEXT: vpsrlq %xmm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: splatvar_shift_v4i64:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
||||
; XOPAVX1-NEXT: vpsrlq %xmm1, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: splatvar_shift_v4i64:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpsrlq %xmm1, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%splat = shufflevector <4 x i64> %b, <4 x i64> undef, <4 x i32> zeroinitializer
|
||||
%shift = lshr <4 x i64> %a, %splat
|
||||
ret <4 x i64> %shift
|
||||
@ -214,6 +311,23 @@ define <8 x i32> @splatvar_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
|
||||
; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
|
||||
; AVX2-NEXT: vpsrld %xmm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: splatvar_shift_v8i32:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
||||
; XOPAVX1-NEXT: vpsrld %xmm1, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vpsrld %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: splatvar_shift_v8i32:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; XOPAVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
|
||||
; XOPAVX2-NEXT: vpsrld %xmm1, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%splat = shufflevector <8 x i32> %b, <8 x i32> undef, <8 x i32> zeroinitializer
|
||||
%shift = lshr <8 x i32> %a, %splat
|
||||
ret <8 x i32> %shift
|
||||
@ -238,6 +352,25 @@ define <16 x i16> @splatvar_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind
|
||||
; AVX2-NEXT: vmovd %eax, %xmm1
|
||||
; AVX2-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: splatvar_shift_v16i16:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
||||
; XOPAVX1-NEXT: vmovd %xmm1, %eax
|
||||
; XOPAVX1-NEXT: movzwl %ax, %eax
|
||||
; XOPAVX1-NEXT: vmovd %eax, %xmm1
|
||||
; XOPAVX1-NEXT: vpsrlw %xmm1, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: splatvar_shift_v16i16:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vmovd %xmm1, %eax
|
||||
; XOPAVX2-NEXT: movzwl %ax, %eax
|
||||
; XOPAVX2-NEXT: vmovd %eax, %xmm1
|
||||
; XOPAVX2-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%splat = shufflevector <16 x i16> %b, <16 x i16> undef, <16 x i32> zeroinitializer
|
||||
%shift = lshr <16 x i16> %a, %splat
|
||||
ret <16 x i16> %shift
|
||||
@ -292,6 +425,30 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
|
||||
; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
|
||||
; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: splatvar_shift_v32i8:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
||||
; XOPAVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm1
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
||||
; XOPAVX1-NEXT: vpshlb %xmm1, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vpshlb %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: splatvar_shift_v32i8:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpbroadcastb %xmm1, %ymm1
|
||||
; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
|
||||
; XOPAVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
||||
; XOPAVX2-NEXT: vpsubb %xmm2, %xmm3, %xmm2
|
||||
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
|
||||
; XOPAVX2-NEXT: vpshlb %xmm2, %xmm4, %xmm2
|
||||
; XOPAVX2-NEXT: vpsubb %xmm1, %xmm3, %xmm1
|
||||
; XOPAVX2-NEXT: vpshlb %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%splat = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
|
||||
%shift = lshr <32 x i8> %a, %splat
|
||||
ret <32 x i8> %shift
|
||||
@ -318,6 +475,22 @@ define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) nounwind {
|
||||
; AVX2: # BB#0:
|
||||
; AVX2-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: constant_shift_v4i64:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; XOPAVX1-NEXT: vpsubq {{.*}}(%rip), %xmm1, %xmm2
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
||||
; XOPAVX1-NEXT: vpshlq %xmm2, %xmm3, %xmm2
|
||||
; XOPAVX1-NEXT: vpsubq {{.*}}(%rip), %xmm1, %xmm1
|
||||
; XOPAVX1-NEXT: vpshlq %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: constant_shift_v4i64:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%shift = lshr <4 x i64> %a, <i64 1, i64 7, i64 31, i64 62>
|
||||
ret <4 x i64> %shift
|
||||
}
|
||||
@ -345,6 +518,19 @@ define <8 x i32> @constant_shift_v8i32(<8 x i32> %a) nounwind {
|
||||
; AVX2: # BB#0:
|
||||
; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: constant_shift_v8i32:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm1
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
||||
; XOPAVX1-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: constant_shift_v8i32:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%shift = lshr <8 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 8, i32 7>
|
||||
ret <8 x i32> %shift
|
||||
}
|
||||
@ -394,6 +580,28 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
|
||||
; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpackusdw %ymm3, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: constant_shift_v16i16:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; XOPAVX1-NEXT: vpsubw {{.*}}(%rip), %xmm1, %xmm2
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
||||
; XOPAVX1-NEXT: vpshlw %xmm2, %xmm3, %xmm2
|
||||
; XOPAVX1-NEXT: vpsubw {{.*}}(%rip), %xmm1, %xmm1
|
||||
; XOPAVX1-NEXT: vpshlw %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: constant_shift_v16i16:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; XOPAVX2-NEXT: vpsubw {{.*}}(%rip), %xmm1, %xmm2
|
||||
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm3
|
||||
; XOPAVX2-NEXT: vpshlw %xmm2, %xmm3, %xmm2
|
||||
; XOPAVX2-NEXT: vpsubw {{.*}}(%rip), %xmm1, %xmm1
|
||||
; XOPAVX2-NEXT: vpshlw %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%shift = lshr <16 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
|
||||
ret <16 x i16> %shift
|
||||
}
|
||||
@ -446,6 +654,26 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
|
||||
; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
|
||||
; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: constant_shift_v32i8:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; XOPAVX1-NEXT: vpsubb {{.*}}(%rip), %xmm1, %xmm1
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
||||
; XOPAVX1-NEXT: vpshlb %xmm1, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vpshlb %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: constant_shift_v32i8:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; XOPAVX2-NEXT: vpsubb {{.*}}(%rip), %xmm1, %xmm1
|
||||
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
|
||||
; XOPAVX2-NEXT: vpshlb %xmm1, %xmm2, %xmm2
|
||||
; XOPAVX2-NEXT: vpshlb %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%shift = lshr <32 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
|
||||
ret <32 x i8> %shift
|
||||
}
|
||||
@ -467,6 +695,19 @@ define <4 x i64> @splatconstant_shift_v4i64(<4 x i64> %a) nounwind {
|
||||
; AVX2: # BB#0:
|
||||
; AVX2-NEXT: vpsrlq $7, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: splatconstant_shift_v4i64:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpsrlq $7, %xmm0, %xmm1
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
||||
; XOPAVX1-NEXT: vpsrlq $7, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: splatconstant_shift_v4i64:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpsrlq $7, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%shift = lshr <4 x i64> %a, <i64 7, i64 7, i64 7, i64 7>
|
||||
ret <4 x i64> %shift
|
||||
}
|
||||
@ -484,6 +725,19 @@ define <8 x i32> @splatconstant_shift_v8i32(<8 x i32> %a) nounwind {
|
||||
; AVX2: # BB#0:
|
||||
; AVX2-NEXT: vpsrld $5, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: splatconstant_shift_v8i32:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpsrld $5, %xmm0, %xmm1
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
||||
; XOPAVX1-NEXT: vpsrld $5, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: splatconstant_shift_v8i32:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpsrld $5, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%shift = lshr <8 x i32> %a, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
|
||||
ret <8 x i32> %shift
|
||||
}
|
||||
@ -501,6 +755,19 @@ define <16 x i16> @splatconstant_shift_v16i16(<16 x i16> %a) nounwind {
|
||||
; AVX2: # BB#0:
|
||||
; AVX2-NEXT: vpsrlw $3, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: splatconstant_shift_v16i16:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpsrlw $3, %xmm0, %xmm1
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
||||
; XOPAVX1-NEXT: vpsrlw $3, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: splatconstant_shift_v16i16:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpsrlw $3, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%shift = lshr <16 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
|
||||
ret <16 x i16> %shift
|
||||
}
|
||||
@ -522,6 +789,22 @@ define <32 x i8> @splatconstant_shift_v32i8(<32 x i8> %a) nounwind {
|
||||
; AVX2-NEXT: vpsrlw $3, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: splatconstant_shift_v32i8:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; XOPAVX1-NEXT: vpsubb {{.*}}(%rip), %xmm1, %xmm1
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
||||
; XOPAVX1-NEXT: vpshlb %xmm1, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vpshlb %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: splatconstant_shift_v32i8:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpsrlw $3, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%shift = lshr <32 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
|
||||
ret <32 x i8> %shift
|
||||
}
|
||||
|
@ -2,6 +2,8 @@
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
|
||||
;
|
||||
; Just one 32-bit run to make sure we do reasonable things for i64 shifts.
|
||||
; RUN: llc < %s -mtriple=i686-unknown-unknown -mcpu=x86-64 | FileCheck %s --check-prefix=ALL --check-prefix=X32-SSE --check-prefix=X32-SSE2
|
||||
@ -43,6 +45,16 @@ define <2 x i64> @var_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
|
||||
; AVX2-NEXT: vpsllvq %xmm1, %xmm0, %xmm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: var_shift_v2i64:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpshlq %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: var_shift_v2i64:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpsllvq %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: var_shift_v2i64:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
|
||||
@ -94,6 +106,16 @@ define <4 x i32> @var_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
|
||||
; AVX2-NEXT: vpsllvd %xmm1, %xmm0, %xmm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: var_shift_v4i32:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpshld %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: var_shift_v4i32:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpsllvd %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: var_shift_v4i32:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: pslld $23, %xmm1
|
||||
@ -206,6 +228,11 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
|
||||
; AVX2-NEXT: vzeroupper
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: var_shift_v8i16:
|
||||
; XOP: # BB#0:
|
||||
; XOP-NEXT: vpshlw %xmm1, %xmm0, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: var_shift_v8i16:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: psllw $12, %xmm1
|
||||
@ -313,6 +340,11 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
|
||||
; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: var_shift_v16i8:
|
||||
; XOP: # BB#0:
|
||||
; XOP-NEXT: vpshlb %xmm1, %xmm0, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: var_shift_v16i8:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: psllw $5, %xmm1
|
||||
@ -361,6 +393,11 @@ define <2 x i64> @splatvar_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
|
||||
; AVX-NEXT: vpsllq %xmm1, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: splatvar_shift_v2i64:
|
||||
; XOP: # BB#0:
|
||||
; XOP-NEXT: vpsllq %xmm1, %xmm0, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: splatvar_shift_v2i64:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: movq {{.*#+}} xmm1 = xmm1[0],zero
|
||||
@ -393,6 +430,13 @@ define <4 x i32> @splatvar_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
|
||||
; AVX-NEXT: vpslld %xmm1, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: splatvar_shift_v4i32:
|
||||
; XOP: # BB#0:
|
||||
; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; XOP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
|
||||
; XOP-NEXT: vpslld %xmm1, %xmm0, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: splatvar_shift_v4i32:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: xorps %xmm2, %xmm2
|
||||
@ -427,6 +471,13 @@ define <8 x i16> @splatvar_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
|
||||
; AVX-NEXT: vpsllw %xmm1, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: splatvar_shift_v8i16:
|
||||
; XOP: # BB#0:
|
||||
; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; XOP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5,6,7]
|
||||
; XOP-NEXT: vpsllw %xmm1, %xmm0, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: splatvar_shift_v8i16:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: movd %xmm1, %eax
|
||||
@ -533,6 +584,19 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
|
||||
; AVX2-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: splatvar_shift_v16i8:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
||||
; XOPAVX1-NEXT: vpshlb %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: splatvar_shift_v16i8:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpbroadcastb %xmm1, %xmm1
|
||||
; XOPAVX2-NEXT: vpshlb %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: splatvar_shift_v16i8:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
@ -605,6 +669,16 @@ define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind {
|
||||
; AVX2-NEXT: vpsllvq {{.*}}(%rip), %xmm0, %xmm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: constant_shift_v2i64:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpshlq {{.*}}(%rip), %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: constant_shift_v2i64:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpsllvq {{.*}}(%rip), %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: constant_shift_v2i64:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
|
||||
@ -645,6 +719,16 @@ define <4 x i32> @constant_shift_v4i32(<4 x i32> %a) nounwind {
|
||||
; AVX2-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: constant_shift_v4i32:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: constant_shift_v4i32:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: constant_shift_v4i32:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [16,32,64,128]
|
||||
@ -671,6 +755,11 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
|
||||
; AVX-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: constant_shift_v8i16:
|
||||
; XOP: # BB#0:
|
||||
; XOP-NEXT: vpshlw {{.*}}(%rip), %xmm0, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: constant_shift_v8i16:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: pmullw .LCPI10_0, %xmm0
|
||||
@ -748,6 +837,11 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
|
||||
; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: constant_shift_v16i8:
|
||||
; XOP: # BB#0:
|
||||
; XOP-NEXT: vpshlb {{.*}}(%rip), %xmm0, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: constant_shift_v16i8:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
|
||||
@ -797,6 +891,11 @@ define <2 x i64> @splatconstant_shift_v2i64(<2 x i64> %a) nounwind {
|
||||
; AVX-NEXT: vpsllq $7, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: splatconstant_shift_v2i64:
|
||||
; XOP: # BB#0:
|
||||
; XOP-NEXT: vpsllq $7, %xmm0, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: splatconstant_shift_v2i64:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: psllq $7, %xmm0
|
||||
@ -816,6 +915,11 @@ define <4 x i32> @splatconstant_shift_v4i32(<4 x i32> %a) nounwind {
|
||||
; AVX-NEXT: vpslld $5, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: splatconstant_shift_v4i32:
|
||||
; XOP: # BB#0:
|
||||
; XOP-NEXT: vpslld $5, %xmm0, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: splatconstant_shift_v4i32:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: pslld $5, %xmm0
|
||||
@ -835,6 +939,11 @@ define <8 x i16> @splatconstant_shift_v8i16(<8 x i16> %a) nounwind {
|
||||
; AVX-NEXT: vpsllw $3, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: splatconstant_shift_v8i16:
|
||||
; XOP: # BB#0:
|
||||
; XOP-NEXT: vpsllw $3, %xmm0, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: splatconstant_shift_v8i16:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: psllw $3, %xmm0
|
||||
@ -856,6 +965,11 @@ define <16 x i8> @splatconstant_shift_v16i8(<16 x i8> %a) nounwind {
|
||||
; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: splatconstant_shift_v16i8:
|
||||
; XOP: # BB#0:
|
||||
; XOP-NEXT: vpshlb {{.*}}(%rip), %xmm0, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; X32-SSE-LABEL: splatconstant_shift_v16i8:
|
||||
; X32-SSE: # BB#0:
|
||||
; X32-SSE-NEXT: psllw $3, %xmm0
|
||||
|
@ -1,5 +1,7 @@
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
|
||||
|
||||
;
|
||||
; Variable Shifts
|
||||
@ -25,6 +27,20 @@ define <4 x i64> @var_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
|
||||
; AVX2: # BB#0:
|
||||
; AVX2-NEXT: vpsllvq %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: var_shift_v4i64:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
||||
; XOPAVX1-NEXT: vpshlq %xmm2, %xmm3, %xmm2
|
||||
; XOPAVX1-NEXT: vpshlq %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: var_shift_v4i64:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpsllvq %ymm1, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%shift = shl <4 x i64> %a, %b
|
||||
ret <4 x i64> %shift
|
||||
}
|
||||
@ -50,6 +66,20 @@ define <8 x i32> @var_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
|
||||
; AVX2: # BB#0:
|
||||
; AVX2-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: var_shift_v8i32:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
||||
; XOPAVX1-NEXT: vpshld %xmm2, %xmm3, %xmm2
|
||||
; XOPAVX1-NEXT: vpshld %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: var_shift_v8i32:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%shift = shl <8 x i32> %a, %b
|
||||
ret <8 x i32> %shift
|
||||
}
|
||||
@ -103,6 +133,24 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
|
||||
; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpackusdw %ymm3, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: var_shift_v16i16:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
||||
; XOPAVX1-NEXT: vpshlw %xmm2, %xmm3, %xmm2
|
||||
; XOPAVX1-NEXT: vpshlw %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: var_shift_v16i16:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
|
||||
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm3
|
||||
; XOPAVX2-NEXT: vpshlw %xmm2, %xmm3, %xmm2
|
||||
; XOPAVX2-NEXT: vpshlw %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%shift = shl <16 x i16> %a, %b
|
||||
ret <16 x i16> %shift
|
||||
}
|
||||
@ -153,6 +201,24 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
|
||||
; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
|
||||
; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: var_shift_v32i8:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
||||
; XOPAVX1-NEXT: vpshlb %xmm2, %xmm3, %xmm2
|
||||
; XOPAVX1-NEXT: vpshlb %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: var_shift_v32i8:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
|
||||
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm3
|
||||
; XOPAVX2-NEXT: vpshlb %xmm2, %xmm3, %xmm2
|
||||
; XOPAVX2-NEXT: vpshlb %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%shift = shl <32 x i8> %a, %b
|
||||
ret <32 x i8> %shift
|
||||
}
|
||||
@ -174,6 +240,19 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
|
||||
; AVX2: # BB#0:
|
||||
; AVX2-NEXT: vpsllq %xmm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: splatvar_shift_v4i64:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
||||
; XOPAVX1-NEXT: vpsllq %xmm1, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vpsllq %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: splatvar_shift_v4i64:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpsllq %xmm1, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%splat = shufflevector <4 x i64> %b, <4 x i64> undef, <4 x i32> zeroinitializer
|
||||
%shift = shl <4 x i64> %a, %splat
|
||||
ret <4 x i64> %shift
|
||||
@ -196,6 +275,23 @@ define <8 x i32> @splatvar_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
|
||||
; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
|
||||
; AVX2-NEXT: vpslld %xmm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: splatvar_shift_v8i32:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
||||
; XOPAVX1-NEXT: vpslld %xmm1, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vpslld %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: splatvar_shift_v8i32:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; XOPAVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
|
||||
; XOPAVX2-NEXT: vpslld %xmm1, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%splat = shufflevector <8 x i32> %b, <8 x i32> undef, <8 x i32> zeroinitializer
|
||||
%shift = shl <8 x i32> %a, %splat
|
||||
ret <8 x i32> %shift
|
||||
@ -220,6 +316,25 @@ define <16 x i16> @splatvar_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind
|
||||
; AVX2-NEXT: vmovd %eax, %xmm1
|
||||
; AVX2-NEXT: vpsllw %xmm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: splatvar_shift_v16i16:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
||||
; XOPAVX1-NEXT: vmovd %xmm1, %eax
|
||||
; XOPAVX1-NEXT: movzwl %ax, %eax
|
||||
; XOPAVX1-NEXT: vmovd %eax, %xmm1
|
||||
; XOPAVX1-NEXT: vpsllw %xmm1, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vpsllw %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: splatvar_shift_v16i16:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vmovd %xmm1, %eax
|
||||
; XOPAVX2-NEXT: movzwl %ax, %eax
|
||||
; XOPAVX2-NEXT: vmovd %eax, %xmm1
|
||||
; XOPAVX2-NEXT: vpsllw %xmm1, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%splat = shufflevector <16 x i16> %b, <16 x i16> undef, <16 x i32> zeroinitializer
|
||||
%shift = shl <16 x i16> %a, %splat
|
||||
ret <16 x i16> %shift
|
||||
@ -270,6 +385,26 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
|
||||
; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
|
||||
; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: splatvar_shift_v32i8:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
||||
; XOPAVX1-NEXT: vpshlb %xmm1, %xmm2, %xmm2
|
||||
; XOPAVX1-NEXT: vpshlb %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: splatvar_shift_v32i8:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpbroadcastb %xmm1, %ymm1
|
||||
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
|
||||
; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
|
||||
; XOPAVX2-NEXT: vpshlb %xmm3, %xmm2, %xmm2
|
||||
; XOPAVX2-NEXT: vpshlb %xmm1, %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%splat = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
|
||||
%shift = shl <32 x i8> %a, %splat
|
||||
ret <32 x i8> %shift
|
||||
@ -296,6 +431,19 @@ define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) nounwind {
|
||||
; AVX2: # BB#0:
|
||||
; AVX2-NEXT: vpsllvq {{.*}}(%rip), %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: constant_shift_v4i64:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpshlq {{.*}}(%rip), %xmm0, %xmm1
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
||||
; XOPAVX1-NEXT: vpshlq {{.*}}(%rip), %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: constant_shift_v4i64:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpsllvq {{.*}}(%rip), %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%shift = shl <4 x i64> %a, <i64 1, i64 7, i64 31, i64 62>
|
||||
ret <4 x i64> %shift
|
||||
}
|
||||
@ -313,6 +461,19 @@ define <8 x i32> @constant_shift_v8i32(<8 x i32> %a) nounwind {
|
||||
; AVX2: # BB#0:
|
||||
; AVX2-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: constant_shift_v8i32:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm1
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
||||
; XOPAVX1-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: constant_shift_v8i32:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%shift = shl <8 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 8, i32 7>
|
||||
ret <8 x i32> %shift
|
||||
}
|
||||
@ -330,6 +491,19 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
|
||||
; AVX2: # BB#0:
|
||||
; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: constant_shift_v16i16:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpshlw {{.*}}(%rip), %xmm0, %xmm1
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
||||
; XOPAVX1-NEXT: vpshlw {{.*}}(%rip), %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: constant_shift_v16i16:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%shift = shl <16 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
|
||||
ret <16 x i16> %shift
|
||||
}
|
||||
@ -378,6 +552,24 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
|
||||
; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
|
||||
; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: constant_shift_v32i8:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
||||
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
|
||||
; XOPAVX1-NEXT: vpshlb %xmm2, %xmm1, %xmm1
|
||||
; XOPAVX1-NEXT: vpshlb %xmm2, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: constant_shift_v32i8:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
||||
; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
|
||||
; XOPAVX2-NEXT: vpshlb %xmm2, %xmm1, %xmm1
|
||||
; XOPAVX2-NEXT: vpshlb %xmm2, %xmm0, %xmm0
|
||||
; XOPAVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%shift = shl <32 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
|
||||
ret <32 x i8> %shift
|
||||
}
|
||||
@ -399,6 +591,19 @@ define <4 x i64> @splatconstant_shift_v4i64(<4 x i64> %a) nounwind {
|
||||
; AVX2: # BB#0:
|
||||
; AVX2-NEXT: vpsllq $7, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: splatconstant_shift_v4i64:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpsllq $7, %xmm0, %xmm1
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
||||
; XOPAVX1-NEXT: vpsllq $7, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: splatconstant_shift_v4i64:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpsllq $7, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%shift = shl <4 x i64> %a, <i64 7, i64 7, i64 7, i64 7>
|
||||
ret <4 x i64> %shift
|
||||
}
|
||||
@ -416,6 +621,19 @@ define <8 x i32> @splatconstant_shift_v8i32(<8 x i32> %a) nounwind {
|
||||
; AVX2: # BB#0:
|
||||
; AVX2-NEXT: vpslld $5, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: splatconstant_shift_v8i32:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpslld $5, %xmm0, %xmm1
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
||||
; XOPAVX1-NEXT: vpslld $5, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: splatconstant_shift_v8i32:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpslld $5, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%shift = shl <8 x i32> %a, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
|
||||
ret <8 x i32> %shift
|
||||
}
|
||||
@ -433,6 +651,19 @@ define <16 x i16> @splatconstant_shift_v16i16(<16 x i16> %a) nounwind {
|
||||
; AVX2: # BB#0:
|
||||
; AVX2-NEXT: vpsllw $3, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: splatconstant_shift_v16i16:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vpsllw $3, %xmm0, %xmm1
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
||||
; XOPAVX1-NEXT: vpsllw $3, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: splatconstant_shift_v16i16:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpsllw $3, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%shift = shl <16 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
|
||||
ret <16 x i16> %shift
|
||||
}
|
||||
@ -454,6 +685,21 @@ define <32 x i8> @splatconstant_shift_v32i8(<32 x i8> %a) nounwind {
|
||||
; AVX2-NEXT: vpsllw $3, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: splatconstant_shift_v32i8:
|
||||
; XOPAVX1: # BB#0:
|
||||
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
||||
; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
|
||||
; XOPAVX1-NEXT: vpshlb %xmm2, %xmm1, %xmm1
|
||||
; XOPAVX1-NEXT: vpshlb %xmm2, %xmm0, %xmm0
|
||||
; XOPAVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
||||
; XOPAVX1-NEXT: retq
|
||||
;
|
||||
; XOPAVX2-LABEL: splatconstant_shift_v32i8:
|
||||
; XOPAVX2: # BB#0:
|
||||
; XOPAVX2-NEXT: vpsllw $3, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%shift = shl <32 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
|
||||
ret <32 x i8> %shift
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user