mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-01-31 12:41:49 +01:00
Recommit r344877 "[X86] Stop promoting integer loads to vXi64"
I've included a fix to DAGCombiner::ForwardStoreValueToDirectLoad that I believe will prevent the previous miscompile. Original commit message: Theoretically this was done to simplify the amount of isel patterns that were needed. But it also meant a substantial number of our isel patterns have to match an explicit bitcast. By making the vXi32/vXi16/vXi8 types legal for loads, DAG combiner should be able to change the load type to rem I had to add some additional plain load instruction patterns and a few other special cases, but overall the isel table has reduced in size by ~12000 bytes. So it looks like this promotion was hurting us more than helping. I still have one crash in vector-trunc.ll that I'm hoping @RKSimon can help with. It seems to relate to using getTargetConstantFromNode on a load that was shrunk due to an extract_subvector combine after the constant pool entry was created. So we end up decoding more mask elements than the lo I'm hoping this patch will simplify the number of patterns needed to remove the and/or/xor promotion. Reviewers: RKSimon, spatel Reviewed By: RKSimon Subscribers: llvm-commits, RKSimon Differential Revision: https://reviews.llvm.org/D53306 llvm-svn: 344965
This commit is contained in:
parent
a6149dec85
commit
e7725047a0
@ -12897,7 +12897,8 @@ SDValue DAGCombiner::ForwardStoreValueToDirectLoad(LoadSDNode *LD) {
|
||||
if (!isTypeLegal(LDMemType))
|
||||
continue;
|
||||
if (STMemType != LDMemType) {
|
||||
if (numVectorEltsOrZero(STMemType) == numVectorEltsOrZero(LDMemType) &&
|
||||
// TODO: Support vectors? This requires extract_subvector/bitcast.
|
||||
if (!STMemType.isVector() && !LDMemType.isVector() &&
|
||||
STMemType.isInteger() && LDMemType.isInteger())
|
||||
Val = DAG.getNode(ISD::TRUNCATE, SDLoc(LD), LDMemType, Val);
|
||||
else
|
||||
|
@ -2890,21 +2890,17 @@ MachineSDNode *X86DAGToDAGISel::emitPCMPISTR(unsigned ROpc, unsigned MOpc,
|
||||
const ConstantInt *Val = cast<ConstantSDNode>(Imm)->getConstantIntValue();
|
||||
Imm = CurDAG->getTargetConstant(*Val, SDLoc(Node), Imm.getValueType());
|
||||
|
||||
// If there is a load, it will be behind a bitcast. We don't need to check
|
||||
// alignment on this load.
|
||||
// Try to fold a load. No need to check alignment.
|
||||
SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
|
||||
if (MayFoldLoad && N1->getOpcode() == ISD::BITCAST && N1->hasOneUse() &&
|
||||
tryFoldLoad(Node, N1.getNode(), N1.getOperand(0), Tmp0, Tmp1, Tmp2,
|
||||
Tmp3, Tmp4)) {
|
||||
SDValue Load = N1.getOperand(0);
|
||||
if (MayFoldLoad && tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
|
||||
SDValue Ops[] = { N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Imm,
|
||||
Load.getOperand(0) };
|
||||
N1.getOperand(0) };
|
||||
SDVTList VTs = CurDAG->getVTList(VT, MVT::i32, MVT::Other);
|
||||
MachineSDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
|
||||
// Update the chain.
|
||||
ReplaceUses(Load.getValue(1), SDValue(CNode, 2));
|
||||
ReplaceUses(N1.getValue(1), SDValue(CNode, 2));
|
||||
// Record the mem-refs
|
||||
CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(Load)->getMemOperand()});
|
||||
CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N1)->getMemOperand()});
|
||||
return CNode;
|
||||
}
|
||||
|
||||
@ -2927,22 +2923,18 @@ MachineSDNode *X86DAGToDAGISel::emitPCMPESTR(unsigned ROpc, unsigned MOpc,
|
||||
const ConstantInt *Val = cast<ConstantSDNode>(Imm)->getConstantIntValue();
|
||||
Imm = CurDAG->getTargetConstant(*Val, SDLoc(Node), Imm.getValueType());
|
||||
|
||||
// If there is a load, it will be behind a bitcast. We don't need to check
|
||||
// alignment on this load.
|
||||
// Try to fold a load. No need to check alignment.
|
||||
SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
|
||||
if (MayFoldLoad && N2->getOpcode() == ISD::BITCAST && N2->hasOneUse() &&
|
||||
tryFoldLoad(Node, N2.getNode(), N2.getOperand(0), Tmp0, Tmp1, Tmp2,
|
||||
Tmp3, Tmp4)) {
|
||||
SDValue Load = N2.getOperand(0);
|
||||
if (MayFoldLoad && tryFoldLoad(Node, N2, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
|
||||
SDValue Ops[] = { N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Imm,
|
||||
Load.getOperand(0), InFlag };
|
||||
N2.getOperand(0), InFlag };
|
||||
SDVTList VTs = CurDAG->getVTList(VT, MVT::i32, MVT::Other, MVT::Glue);
|
||||
MachineSDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
|
||||
InFlag = SDValue(CNode, 3);
|
||||
// Update the chain.
|
||||
ReplaceUses(Load.getValue(1), SDValue(CNode, 2));
|
||||
ReplaceUses(N2.getValue(1), SDValue(CNode, 2));
|
||||
// Record the mem-refs
|
||||
CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(Load)->getMemOperand()});
|
||||
CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N2)->getMemOperand()});
|
||||
return CNode;
|
||||
}
|
||||
|
||||
|
@ -869,11 +869,6 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
|
||||
setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
|
||||
}
|
||||
|
||||
// Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
|
||||
for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
|
||||
setOperationPromotedToType(ISD::LOAD, VT, MVT::v2i64);
|
||||
}
|
||||
|
||||
// Custom lower v2i64 and v2f64 selects.
|
||||
setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
|
||||
setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
|
||||
@ -1178,11 +1173,6 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
|
||||
if (HasInt256)
|
||||
setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
|
||||
|
||||
// Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64.
|
||||
for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) {
|
||||
setOperationPromotedToType(ISD::LOAD, VT, MVT::v4i64);
|
||||
}
|
||||
|
||||
if (HasInt256) {
|
||||
// Custom legalize 2x32 to get a little better code.
|
||||
setOperationAction(ISD::MGATHER, MVT::v2f32, Custom);
|
||||
@ -1419,10 +1409,6 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
|
||||
setOperationAction(ISD::MGATHER, VT, Custom);
|
||||
setOperationAction(ISD::MSCATTER, VT, Custom);
|
||||
}
|
||||
for (auto VT : { MVT::v64i8, MVT::v32i16, MVT::v16i32 }) {
|
||||
setOperationPromotedToType(ISD::LOAD, VT, MVT::v8i64);
|
||||
}
|
||||
|
||||
// Need to custom split v32i16/v64i8 bitcasts.
|
||||
if (!Subtarget.hasBWI()) {
|
||||
setOperationAction(ISD::BITCAST, MVT::v32i16, Custom);
|
||||
@ -5539,7 +5525,7 @@ static const Constant *getTargetConstantFromNode(SDValue Op) {
|
||||
if (!CNode || CNode->isMachineConstantPoolEntry() || CNode->getOffset() != 0)
|
||||
return nullptr;
|
||||
|
||||
return dyn_cast<Constant>(CNode->getConstVal());
|
||||
return CNode->getConstVal();
|
||||
}
|
||||
|
||||
// Extract raw constant bits from constant pools.
|
||||
@ -6045,7 +6031,7 @@ static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
|
||||
break;
|
||||
}
|
||||
if (auto *C = getTargetConstantFromNode(MaskNode)) {
|
||||
DecodeVPERMILPMask(C, MaskEltSize, Mask);
|
||||
DecodeVPERMILPMask(C, MaskEltSize, VT.getSizeInBits(), Mask);
|
||||
break;
|
||||
}
|
||||
return false;
|
||||
@ -6062,7 +6048,7 @@ static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
|
||||
break;
|
||||
}
|
||||
if (auto *C = getTargetConstantFromNode(MaskNode)) {
|
||||
DecodePSHUFBMask(C, Mask);
|
||||
DecodePSHUFBMask(C, VT.getSizeInBits(), Mask);
|
||||
break;
|
||||
}
|
||||
return false;
|
||||
@ -6124,7 +6110,7 @@ static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
|
||||
break;
|
||||
}
|
||||
if (auto *C = getTargetConstantFromNode(MaskNode)) {
|
||||
DecodeVPERMIL2PMask(C, CtrlImm, MaskEltSize, Mask);
|
||||
DecodeVPERMIL2PMask(C, CtrlImm, MaskEltSize, VT.getSizeInBits(), Mask);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -6141,7 +6127,7 @@ static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
|
||||
break;
|
||||
}
|
||||
if (auto *C = getTargetConstantFromNode(MaskNode)) {
|
||||
DecodeVPPERMMask(C, Mask);
|
||||
DecodeVPPERMMask(C, VT.getSizeInBits(), Mask);
|
||||
break;
|
||||
}
|
||||
return false;
|
||||
@ -6158,7 +6144,7 @@ static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
|
||||
break;
|
||||
}
|
||||
if (auto *C = getTargetConstantFromNode(MaskNode)) {
|
||||
DecodeVPERMVMask(C, MaskEltSize, Mask);
|
||||
DecodeVPERMVMask(C, MaskEltSize, VT.getSizeInBits(), Mask);
|
||||
break;
|
||||
}
|
||||
return false;
|
||||
@ -6172,7 +6158,7 @@ static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
|
||||
Ops.push_back(N->getOperand(2));
|
||||
SDValue MaskNode = N->getOperand(1);
|
||||
if (auto *C = getTargetConstantFromNode(MaskNode)) {
|
||||
DecodeVPERMV3Mask(C, MaskEltSize, Mask);
|
||||
DecodeVPERMV3Mask(C, MaskEltSize, VT.getSizeInBits(), Mask);
|
||||
break;
|
||||
}
|
||||
return false;
|
||||
|
@ -66,21 +66,16 @@ class X86VectorVTInfo<int numelts, ValueType eltvt, RegisterClass rc,
|
||||
!if (!eq (EltTypeName, "f64"), !cast<Operand>("sdmem"), ?));
|
||||
|
||||
// Load patterns
|
||||
// Note: For 128/256-bit integer VT we choose loadv2i64/loadv4i64
|
||||
// due to load promotion during legalization
|
||||
PatFrag LdFrag = !cast<PatFrag>("load" #
|
||||
!if (!eq (TypeVariantName, "i"),
|
||||
!if (!eq (Size, 128), "v2i64",
|
||||
!if (!eq (Size, 256), "v4i64",
|
||||
!if (!eq (Size, 512), "v8i64",
|
||||
VTName))), VTName));
|
||||
PatFrag LdFrag = !cast<PatFrag>("load" # VTName);
|
||||
|
||||
PatFrag AlignedLdFrag = !cast<PatFrag>("alignedload" #
|
||||
!if (!eq (TypeVariantName, "i"),
|
||||
!if (!eq (Size, 128), "v2i64",
|
||||
!if (!eq (Size, 256), "v4i64",
|
||||
!if (!eq (Size, 512), "v8i64",
|
||||
VTName))), VTName));
|
||||
PatFrag i64LdFrag = !cast<PatFrag>("load" #
|
||||
!if (!eq (TypeVariantName, "i"),
|
||||
!if (!eq (Size, 128), "v2i64",
|
||||
!if (!eq (Size, 256), "v4i64",
|
||||
!if (!eq (Size, 512), "v8i64",
|
||||
VTName))), VTName));
|
||||
|
||||
PatFrag AlignedLdFrag = !cast<PatFrag>("alignedload" # VTName);
|
||||
|
||||
PatFrag ScalarLdFrag = !cast<PatFrag>("load" # EltVT);
|
||||
|
||||
@ -518,10 +513,10 @@ multiclass vinsert_for_size_split<int Opcode, X86VectorVTInfo From,
|
||||
"vinsert" # From.EltTypeName # "x" # From.NumElts,
|
||||
"$src3, $src2, $src1", "$src1, $src2, $src3",
|
||||
(vinsert_insert:$src3 (To.VT To.RC:$src1),
|
||||
(From.VT (bitconvert (From.LdFrag addr:$src2))),
|
||||
(From.VT (From.LdFrag addr:$src2)),
|
||||
(iPTR imm)),
|
||||
(vinsert_for_mask:$src3 (To.VT To.RC:$src1),
|
||||
(From.VT (bitconvert (From.LdFrag addr:$src2))),
|
||||
(From.VT (From.LdFrag addr:$src2)),
|
||||
(iPTR imm))>, AVX512AIi8Base, EVEX_4V,
|
||||
EVEX_CD8<From.EltSize, From.CD8TupleForm>,
|
||||
Sched<[sched.Folded, sched.ReadAfterFold]>;
|
||||
@ -547,7 +542,7 @@ multiclass vinsert_for_size_lowering<string InstrStr, X86VectorVTInfo From,
|
||||
|
||||
def : Pat<(vinsert_insert:$ins
|
||||
(To.VT To.RC:$src1),
|
||||
(From.VT (bitconvert (From.LdFrag addr:$src2))),
|
||||
(From.VT (From.LdFrag addr:$src2)),
|
||||
(iPTR imm)),
|
||||
(To.VT (!cast<Instruction>(InstrStr#"rm")
|
||||
To.RC:$src1, addr:$src2,
|
||||
@ -680,9 +675,7 @@ let Predicates = p in {
|
||||
(vselect Cast.KRCWM:$mask,
|
||||
(bitconvert
|
||||
(vinsert_insert:$ins (To.VT To.RC:$src1),
|
||||
(From.VT
|
||||
(bitconvert
|
||||
(From.LdFrag addr:$src2))),
|
||||
(From.VT (From.LdFrag addr:$src2)),
|
||||
(iPTR imm))),
|
||||
Cast.ImmAllZerosV)),
|
||||
(!cast<Instruction>(InstrStr#"rmkz")
|
||||
@ -1374,7 +1367,7 @@ multiclass avx512_subvec_broadcast_rm<bits<8> opc, string OpcodeStr,
|
||||
defm rm : AVX512_maskable<opc, MRMSrcMem, _Dst, (outs _Dst.RC:$dst),
|
||||
(ins _Src.MemOp:$src), OpcodeStr, "$src", "$src",
|
||||
(_Dst.VT (X86SubVBroadcast
|
||||
(_Src.VT (bitconvert (_Src.LdFrag addr:$src)))))>,
|
||||
(_Src.VT (_Src.LdFrag addr:$src))))>,
|
||||
Sched<[SchedWriteShuffle.YMM.Folded]>,
|
||||
AVX5128IBase, EVEX;
|
||||
}
|
||||
@ -1389,7 +1382,7 @@ multiclass avx512_subvec_broadcast_rm_dq<bits<8> opc, string OpcodeStr,
|
||||
(ins _Src.MemOp:$src), OpcodeStr, "$src", "$src",
|
||||
(null_frag),
|
||||
(_Dst.VT (X86SubVBroadcast
|
||||
(_Src.VT (bitconvert (_Src.LdFrag addr:$src)))))>,
|
||||
(_Src.VT (_Src.LdFrag addr:$src))))>,
|
||||
Sched<[SchedWriteShuffle.YMM.Folded]>,
|
||||
AVX5128IBase, EVEX;
|
||||
}
|
||||
@ -1442,11 +1435,11 @@ defm VBROADCASTF64X4 : avx512_subvec_broadcast_rm<0x1b, "vbroadcastf64x4",
|
||||
let Predicates = [HasAVX512] in {
|
||||
def : Pat<(v16f32 (X86SubVBroadcast (loadv8f32 addr:$src))),
|
||||
(VBROADCASTF64X4rm addr:$src)>;
|
||||
def : Pat<(v16i32 (X86SubVBroadcast (bc_v8i32 (loadv4i64 addr:$src)))),
|
||||
def : Pat<(v16i32 (X86SubVBroadcast (loadv8i32 addr:$src))),
|
||||
(VBROADCASTI64X4rm addr:$src)>;
|
||||
def : Pat<(v32i16 (X86SubVBroadcast (bc_v16i16 (loadv4i64 addr:$src)))),
|
||||
def : Pat<(v32i16 (X86SubVBroadcast (loadv16i16 addr:$src))),
|
||||
(VBROADCASTI64X4rm addr:$src)>;
|
||||
def : Pat<(v64i8 (X86SubVBroadcast (bc_v32i8 (loadv4i64 addr:$src)))),
|
||||
def : Pat<(v64i8 (X86SubVBroadcast (loadv32i8 addr:$src))),
|
||||
(VBROADCASTI64X4rm addr:$src)>;
|
||||
|
||||
// Provide fallback in case the load node that is used in the patterns above
|
||||
@ -1474,9 +1467,9 @@ def : Pat<(v8f64 (X86SubVBroadcast (loadv2f64 addr:$src))),
|
||||
(VBROADCASTF32X4rm addr:$src)>;
|
||||
def : Pat<(v8i64 (X86SubVBroadcast (loadv2i64 addr:$src))),
|
||||
(VBROADCASTI32X4rm addr:$src)>;
|
||||
def : Pat<(v32i16 (X86SubVBroadcast (bc_v8i16 (loadv2i64 addr:$src)))),
|
||||
def : Pat<(v32i16 (X86SubVBroadcast (loadv8i16 addr:$src))),
|
||||
(VBROADCASTI32X4rm addr:$src)>;
|
||||
def : Pat<(v64i8 (X86SubVBroadcast (bc_v16i8 (loadv2i64 addr:$src)))),
|
||||
def : Pat<(v64i8 (X86SubVBroadcast (loadv16i8 addr:$src))),
|
||||
(VBROADCASTI32X4rm addr:$src)>;
|
||||
|
||||
// Patterns for selects of bitcasted operations.
|
||||
@ -1506,11 +1499,11 @@ def : Pat<(vselect VK8WM:$mask,
|
||||
VR512:$src0),
|
||||
(VBROADCASTF64X4rmk VR512:$src0, VK8WM:$mask, addr:$src)>;
|
||||
def : Pat<(vselect VK8WM:$mask,
|
||||
(bc_v8i64 (v16i32 (X86SubVBroadcast (bc_v8i32 (loadv4i64 addr:$src))))),
|
||||
(bc_v8i64 (v16i32 (X86SubVBroadcast (loadv8i32 addr:$src)))),
|
||||
(bc_v8i64 (v16i32 immAllZerosV))),
|
||||
(VBROADCASTI64X4rmkz VK8WM:$mask, addr:$src)>;
|
||||
def : Pat<(vselect VK8WM:$mask,
|
||||
(bc_v8i64 (v16i32 (X86SubVBroadcast (bc_v8i32 (loadv4i64 addr:$src))))),
|
||||
(bc_v8i64 (v16i32 (X86SubVBroadcast (loadv8i32 addr:$src)))),
|
||||
VR512:$src0),
|
||||
(VBROADCASTI64X4rmk VR512:$src0, VK8WM:$mask, addr:$src)>;
|
||||
}
|
||||
@ -1527,9 +1520,9 @@ def : Pat<(v4f64 (X86SubVBroadcast (loadv2f64 addr:$src))),
|
||||
(VBROADCASTF32X4Z256rm addr:$src)>;
|
||||
def : Pat<(v4i64 (X86SubVBroadcast (loadv2i64 addr:$src))),
|
||||
(VBROADCASTI32X4Z256rm addr:$src)>;
|
||||
def : Pat<(v16i16 (X86SubVBroadcast (bc_v8i16 (loadv2i64 addr:$src)))),
|
||||
def : Pat<(v16i16 (X86SubVBroadcast (loadv8i16 addr:$src))),
|
||||
(VBROADCASTI32X4Z256rm addr:$src)>;
|
||||
def : Pat<(v32i8 (X86SubVBroadcast (bc_v16i8 (loadv2i64 addr:$src)))),
|
||||
def : Pat<(v32i8 (X86SubVBroadcast (loadv16i8 addr:$src))),
|
||||
(VBROADCASTI32X4Z256rm addr:$src)>;
|
||||
|
||||
// Patterns for selects of bitcasted operations.
|
||||
@ -1591,11 +1584,11 @@ def : Pat<(vselect VK4WM:$mask,
|
||||
VR256X:$src0),
|
||||
(VBROADCASTF64X2Z128rmk VR256X:$src0, VK4WM:$mask, addr:$src)>;
|
||||
def : Pat<(vselect VK4WM:$mask,
|
||||
(bc_v4i64 (v8i32 (X86SubVBroadcast (bc_v4i32 (loadv2i64 addr:$src))))),
|
||||
(bc_v4i64 (v8i32 (X86SubVBroadcast (loadv4i32 addr:$src)))),
|
||||
(bc_v4i64 (v8i32 immAllZerosV))),
|
||||
(VBROADCASTI64X2Z128rmkz VK4WM:$mask, addr:$src)>;
|
||||
def : Pat<(vselect VK4WM:$mask,
|
||||
(bc_v4i64 (v8i32 (X86SubVBroadcast (bc_v4i32 (loadv2i64 addr:$src))))),
|
||||
(bc_v4i64 (v8i32 (X86SubVBroadcast (loadv4i32 addr:$src)))),
|
||||
VR256X:$src0),
|
||||
(VBROADCASTI64X2Z128rmk VR256X:$src0, VK4WM:$mask, addr:$src)>;
|
||||
}
|
||||
@ -1641,11 +1634,11 @@ def : Pat<(vselect VK8WM:$mask,
|
||||
VR512:$src0),
|
||||
(VBROADCASTF64X2rmk VR512:$src0, VK8WM:$mask, addr:$src)>;
|
||||
def : Pat<(vselect VK8WM:$mask,
|
||||
(bc_v8i64 (v16i32 (X86SubVBroadcast (bc_v4i32 (loadv2i64 addr:$src))))),
|
||||
(bc_v8i64 (v16i32 (X86SubVBroadcast (loadv4i32 addr:$src)))),
|
||||
(bc_v8i64 (v16i32 immAllZerosV))),
|
||||
(VBROADCASTI64X2rmkz VK8WM:$mask, addr:$src)>;
|
||||
def : Pat<(vselect VK8WM:$mask,
|
||||
(bc_v8i64 (v16i32 (X86SubVBroadcast (bc_v4i32 (loadv2i64 addr:$src))))),
|
||||
(bc_v8i64 (v16i32 (X86SubVBroadcast (loadv4i32 addr:$src)))),
|
||||
VR512:$src0),
|
||||
(VBROADCASTI64X2rmk VR512:$src0, VK8WM:$mask, addr:$src)>;
|
||||
}
|
||||
@ -1741,7 +1734,7 @@ let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain,
|
||||
(ins _.RC:$src2, _.MemOp:$src3),
|
||||
OpcodeStr, "$src3, $src2", "$src2, $src3",
|
||||
(_.VT (X86VPermt2 _.RC:$src2, IdxVT.RC:$src1,
|
||||
(_.VT (bitconvert (_.LdFrag addr:$src3))))), 1>,
|
||||
(_.VT (_.LdFrag addr:$src3)))), 1>,
|
||||
EVEX_4V, AVX5128IBase, Sched<[sched.Folded, sched.ReadAfterFold]>;
|
||||
}
|
||||
}
|
||||
@ -1859,7 +1852,7 @@ let Constraints = "$src1 = $dst", ExeDomain = _.ExeDomain in {
|
||||
(ins IdxVT.RC:$src2, _.MemOp:$src3),
|
||||
OpcodeStr, "$src3, $src2", "$src2, $src3",
|
||||
(_.VT (X86VPermt2 _.RC:$src1, IdxVT.RC:$src2,
|
||||
(bitconvert (_.LdFrag addr:$src3)))), 1>,
|
||||
(_.LdFrag addr:$src3))), 1>,
|
||||
EVEX_4V, AVX5128IBase, Sched<[sched.Folded, sched.ReadAfterFold]>;
|
||||
}
|
||||
}
|
||||
@ -2149,7 +2142,7 @@ multiclass avx512_icmp_packed<bits<8> opc, string OpcodeStr, PatFrag OpNode,
|
||||
(outs _.KRC:$dst), (ins _.RC:$src1, _.MemOp:$src2),
|
||||
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
||||
[(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1),
|
||||
(_.VT (bitconvert (_.LdFrag addr:$src2)))))]>,
|
||||
(_.VT (_.LdFrag addr:$src2))))]>,
|
||||
EVEX_4V, Sched<[sched.Folded, sched.ReadAfterFold]>;
|
||||
let isCommutable = IsCommutable in
|
||||
def rrk : AVX512BI<opc, MRMSrcReg,
|
||||
@ -2165,8 +2158,7 @@ multiclass avx512_icmp_packed<bits<8> opc, string OpcodeStr, PatFrag OpNode,
|
||||
"$dst {${mask}}, $src1, $src2}"),
|
||||
[(set _.KRC:$dst, (and _.KRCWM:$mask,
|
||||
(OpNode (_.VT _.RC:$src1),
|
||||
(_.VT (bitconvert
|
||||
(_.LdFrag addr:$src2))))))]>,
|
||||
(_.VT (_.LdFrag addr:$src2)))))]>,
|
||||
EVEX_4V, EVEX_K, Sched<[sched.Folded, sched.ReadAfterFold]>;
|
||||
}
|
||||
|
||||
@ -2291,7 +2283,7 @@ multiclass avx512_icmp_cc<bits<8> opc, string Suffix, PatFrag Frag,
|
||||
[(set _.KRC:$dst, (_.KVT
|
||||
(Frag:$cc
|
||||
(_.VT _.RC:$src1),
|
||||
(_.VT (bitconvert (_.LdFrag addr:$src2))),
|
||||
(_.VT (_.LdFrag addr:$src2)),
|
||||
cond)))]>,
|
||||
EVEX_4V, Sched<[sched.Folded, sched.ReadAfterFold]>;
|
||||
let isCommutable = 1 in
|
||||
@ -2316,8 +2308,7 @@ multiclass avx512_icmp_cc<bits<8> opc, string Suffix, PatFrag Frag,
|
||||
(_.KVT
|
||||
(Frag:$cc
|
||||
(_.VT _.RC:$src1),
|
||||
(_.VT (bitconvert
|
||||
(_.LdFrag addr:$src2))),
|
||||
(_.VT (_.LdFrag addr:$src2)),
|
||||
cond))))]>,
|
||||
EVEX_4V, EVEX_K, Sched<[sched.Folded, sched.ReadAfterFold]>;
|
||||
|
||||
@ -2352,13 +2343,13 @@ multiclass avx512_icmp_cc<bits<8> opc, string Suffix, PatFrag Frag,
|
||||
NotMemoryFoldable;
|
||||
}
|
||||
|
||||
def : Pat<(_.KVT (CommFrag:$cc (bitconvert (_.LdFrag addr:$src2)),
|
||||
def : Pat<(_.KVT (CommFrag:$cc (_.LdFrag addr:$src2),
|
||||
(_.VT _.RC:$src1), cond)),
|
||||
(!cast<Instruction>(Name#_.ZSuffix#"rmi")
|
||||
_.RC:$src1, addr:$src2, (CommFrag.OperandTransform $cc))>;
|
||||
|
||||
def : Pat<(and _.KRCWM:$mask,
|
||||
(_.KVT (CommFrag:$cc (bitconvert (_.LdFrag addr:$src2)),
|
||||
(_.KVT (CommFrag:$cc (_.LdFrag addr:$src2),
|
||||
(_.VT _.RC:$src1), cond))),
|
||||
(!cast<Instruction>(Name#_.ZSuffix#"rmik")
|
||||
_.KRCWM:$mask, _.RC:$src1, addr:$src2,
|
||||
@ -2544,7 +2535,7 @@ multiclass avx512_vcmp_common<X86FoldableSchedWrite sched, X86VectorVTInfo _,
|
||||
"vcmp${cc}"#_.Suffix,
|
||||
"$src2, $src1", "$src1, $src2",
|
||||
(X86cmpm (_.VT _.RC:$src1),
|
||||
(_.VT (bitconvert (_.LdFrag addr:$src2))),
|
||||
(_.VT (_.LdFrag addr:$src2)),
|
||||
imm:$cc)>,
|
||||
Sched<[sched.Folded, sched.ReadAfterFold]>;
|
||||
|
||||
@ -2732,7 +2723,7 @@ multiclass avx512_vector_fpclass<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
||||
OpcodeStr##_.Suffix##mem#
|
||||
"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
||||
[(set _.KRC:$dst,(OpNode
|
||||
(_.VT (bitconvert (_.LdFrag addr:$src1))),
|
||||
(_.VT (_.LdFrag addr:$src1)),
|
||||
(i32 imm:$src2)))]>,
|
||||
Sched<[sched.Folded, sched.ReadAfterFold]>;
|
||||
def rmk : AVX512<opc, MRMSrcMem, (outs _.KRC:$dst),
|
||||
@ -2740,7 +2731,7 @@ multiclass avx512_vector_fpclass<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
||||
OpcodeStr##_.Suffix##mem#
|
||||
"\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}",
|
||||
[(set _.KRC:$dst, (and _.KRCWM:$mask, (OpNode
|
||||
(_.VT (bitconvert (_.LdFrag addr:$src1))),
|
||||
(_.VT (_.LdFrag addr:$src1)),
|
||||
(i32 imm:$src2))))]>,
|
||||
EVEX_K, Sched<[sched.Folded, sched.ReadAfterFold]>;
|
||||
def rmb : AVX512<opc, MRMSrcMem, (outs _.KRC:$dst),
|
||||
@ -3353,7 +3344,7 @@ multiclass avx512_load<bits<8> opc, string OpcodeStr, string Name,
|
||||
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
||||
!if(NoRMPattern, [],
|
||||
[(set _.RC:$dst,
|
||||
(_.VT (bitconvert (ld_frag addr:$src))))]),
|
||||
(_.VT (ld_frag addr:$src)))]),
|
||||
_.ExeDomain>, EVEX, Sched<[Sched.RM]>,
|
||||
EVEX2VEXOverride<EVEX2VEXOvrd#"rm">;
|
||||
|
||||
@ -3372,7 +3363,7 @@ multiclass avx512_load<bits<8> opc, string OpcodeStr, string Name,
|
||||
"${dst} {${mask}}, $src1}"),
|
||||
[(set _.RC:$dst, (_.VT
|
||||
(vselect _.KRCWM:$mask,
|
||||
(_.VT (bitconvert (ld_frag addr:$src1))),
|
||||
(_.VT (ld_frag addr:$src1)),
|
||||
(_.VT _.RC:$src0))))], _.ExeDomain>,
|
||||
EVEX, EVEX_K, Sched<[Sched.RM]>;
|
||||
}
|
||||
@ -3381,7 +3372,7 @@ multiclass avx512_load<bits<8> opc, string OpcodeStr, string Name,
|
||||
OpcodeStr #"\t{$src, ${dst} {${mask}} {z}|"#
|
||||
"${dst} {${mask}} {z}, $src}",
|
||||
[(set _.RC:$dst, (_.VT (vselect _.KRCWM:$mask,
|
||||
(_.VT (bitconvert (ld_frag addr:$src))), _.ImmAllZerosV)))],
|
||||
(_.VT (ld_frag addr:$src)), _.ImmAllZerosV)))],
|
||||
_.ExeDomain>, EVEX, EVEX_KZ, Sched<[Sched.RM]>;
|
||||
}
|
||||
def : Pat<(_.VT (mload addr:$ptr, _.KRCWM:$mask, undef)),
|
||||
@ -3681,6 +3672,20 @@ let Predicates = [HasBWI, NoVLX] in {
|
||||
}
|
||||
|
||||
let Predicates = [HasAVX512] in {
|
||||
// 512-bit load.
|
||||
def : Pat<(alignedloadv16i32 addr:$src),
|
||||
(VMOVDQA64Zrm addr:$src)>;
|
||||
def : Pat<(alignedloadv32i16 addr:$src),
|
||||
(VMOVDQA64Zrm addr:$src)>;
|
||||
def : Pat<(alignedloadv64i8 addr:$src),
|
||||
(VMOVDQA64Zrm addr:$src)>;
|
||||
def : Pat<(loadv16i32 addr:$src),
|
||||
(VMOVDQU64Zrm addr:$src)>;
|
||||
def : Pat<(loadv32i16 addr:$src),
|
||||
(VMOVDQU64Zrm addr:$src)>;
|
||||
def : Pat<(loadv64i8 addr:$src),
|
||||
(VMOVDQU64Zrm addr:$src)>;
|
||||
|
||||
// 512-bit store.
|
||||
def : Pat<(alignedstore (v16i32 VR512:$src), addr:$dst),
|
||||
(VMOVDQA64Zmr addr:$dst, VR512:$src)>;
|
||||
@ -3697,6 +3702,20 @@ let Predicates = [HasAVX512] in {
|
||||
}
|
||||
|
||||
let Predicates = [HasVLX] in {
|
||||
// 128-bit load.
|
||||
def : Pat<(alignedloadv4i32 addr:$src),
|
||||
(VMOVDQA64Z128rm addr:$src)>;
|
||||
def : Pat<(alignedloadv8i16 addr:$src),
|
||||
(VMOVDQA64Z128rm addr:$src)>;
|
||||
def : Pat<(alignedloadv16i8 addr:$src),
|
||||
(VMOVDQA64Z128rm addr:$src)>;
|
||||
def : Pat<(loadv4i32 addr:$src),
|
||||
(VMOVDQU64Z128rm addr:$src)>;
|
||||
def : Pat<(loadv8i16 addr:$src),
|
||||
(VMOVDQU64Z128rm addr:$src)>;
|
||||
def : Pat<(loadv16i8 addr:$src),
|
||||
(VMOVDQU64Z128rm addr:$src)>;
|
||||
|
||||
// 128-bit store.
|
||||
def : Pat<(alignedstore (v4i32 VR128X:$src), addr:$dst),
|
||||
(VMOVDQA64Z128mr addr:$dst, VR128X:$src)>;
|
||||
@ -3711,6 +3730,20 @@ let Predicates = [HasVLX] in {
|
||||
def : Pat<(store (v16i8 VR128X:$src), addr:$dst),
|
||||
(VMOVDQU64Z128mr addr:$dst, VR128X:$src)>;
|
||||
|
||||
// 256-bit load.
|
||||
def : Pat<(alignedloadv8i32 addr:$src),
|
||||
(VMOVDQA64Z256rm addr:$src)>;
|
||||
def : Pat<(alignedloadv16i16 addr:$src),
|
||||
(VMOVDQA64Z256rm addr:$src)>;
|
||||
def : Pat<(alignedloadv32i8 addr:$src),
|
||||
(VMOVDQA64Z256rm addr:$src)>;
|
||||
def : Pat<(loadv8i32 addr:$src),
|
||||
(VMOVDQU64Z256rm addr:$src)>;
|
||||
def : Pat<(loadv16i16 addr:$src),
|
||||
(VMOVDQU64Z256rm addr:$src)>;
|
||||
def : Pat<(loadv32i8 addr:$src),
|
||||
(VMOVDQU64Z256rm addr:$src)>;
|
||||
|
||||
// 256-bit store.
|
||||
def : Pat<(alignedstore (v8i32 VR256X:$src), addr:$dst),
|
||||
(VMOVDQA64Z256mr addr:$dst, VR256X:$src)>;
|
||||
@ -4495,7 +4528,7 @@ let Predicates = [HasAVX512] in {
|
||||
(VMOVDI2PDIZrm addr:$src)>;
|
||||
def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector (loadi32 addr:$src))))),
|
||||
(VMOVDI2PDIZrm addr:$src)>;
|
||||
def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
|
||||
def : Pat<(v4i32 (X86vzmovl (loadv4i32 addr:$src))),
|
||||
(VMOVDI2PDIZrm addr:$src)>;
|
||||
def : Pat<(v4i32 (X86vzload addr:$src)),
|
||||
(VMOVDI2PDIZrm addr:$src)>;
|
||||
@ -4591,6 +4624,12 @@ let Predicates = [HasAVX512], AddedComplexity = 400 in {
|
||||
(VMOVNTDQAZrm addr:$src)>;
|
||||
def : Pat<(v8i64 (alignednontemporalload addr:$src)),
|
||||
(VMOVNTDQAZrm addr:$src)>;
|
||||
def : Pat<(v16i32 (alignednontemporalload addr:$src)),
|
||||
(VMOVNTDQAZrm addr:$src)>;
|
||||
def : Pat<(v32i16 (alignednontemporalload addr:$src)),
|
||||
(VMOVNTDQAZrm addr:$src)>;
|
||||
def : Pat<(v64i8 (alignednontemporalload addr:$src)),
|
||||
(VMOVNTDQAZrm addr:$src)>;
|
||||
}
|
||||
|
||||
let Predicates = [HasVLX], AddedComplexity = 400 in {
|
||||
@ -4607,6 +4646,12 @@ let Predicates = [HasVLX], AddedComplexity = 400 in {
|
||||
(VMOVNTDQAZ256rm addr:$src)>;
|
||||
def : Pat<(v4i64 (alignednontemporalload addr:$src)),
|
||||
(VMOVNTDQAZ256rm addr:$src)>;
|
||||
def : Pat<(v8i32 (alignednontemporalload addr:$src)),
|
||||
(VMOVNTDQAZ256rm addr:$src)>;
|
||||
def : Pat<(v16i16 (alignednontemporalload addr:$src)),
|
||||
(VMOVNTDQAZ256rm addr:$src)>;
|
||||
def : Pat<(v32i8 (alignednontemporalload addr:$src)),
|
||||
(VMOVNTDQAZ256rm addr:$src)>;
|
||||
|
||||
def : Pat<(alignednontemporalstore (v4i32 VR128X:$src), addr:$dst),
|
||||
(VMOVNTDQZ128mr addr:$dst, VR128X:$src)>;
|
||||
@ -4621,6 +4666,12 @@ let Predicates = [HasVLX], AddedComplexity = 400 in {
|
||||
(VMOVNTDQAZ128rm addr:$src)>;
|
||||
def : Pat<(v2i64 (alignednontemporalload addr:$src)),
|
||||
(VMOVNTDQAZ128rm addr:$src)>;
|
||||
def : Pat<(v4i32 (alignednontemporalload addr:$src)),
|
||||
(VMOVNTDQAZ128rm addr:$src)>;
|
||||
def : Pat<(v8i16 (alignednontemporalload addr:$src)),
|
||||
(VMOVNTDQAZ128rm addr:$src)>;
|
||||
def : Pat<(v16i8 (alignednontemporalload addr:$src)),
|
||||
(VMOVNTDQAZ128rm addr:$src)>;
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
@ -4639,8 +4690,7 @@ multiclass avx512_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
||||
defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
|
||||
(ins _.RC:$src1, _.MemOp:$src2), OpcodeStr,
|
||||
"$src2, $src1", "$src1, $src2",
|
||||
(_.VT (OpNode _.RC:$src1,
|
||||
(bitconvert (_.LdFrag addr:$src2))))>,
|
||||
(_.VT (OpNode _.RC:$src1, (_.LdFrag addr:$src2)))>,
|
||||
AVX512BIBase, EVEX_4V,
|
||||
Sched<[sched.Folded, sched.ReadAfterFold]>;
|
||||
}
|
||||
@ -4771,7 +4821,7 @@ multiclass avx512_binop_rm2<bits<8> opc, string OpcodeStr,
|
||||
(ins _Src.RC:$src1, _Src.MemOp:$src2), OpcodeStr,
|
||||
"$src2, $src1", "$src1, $src2",
|
||||
(_Dst.VT (OpNode (_Src.VT _Src.RC:$src1),
|
||||
(bitconvert (_Src.LdFrag addr:$src2))))>,
|
||||
(_Src.LdFrag addr:$src2)))>,
|
||||
AVX512BIBase, EVEX_4V,
|
||||
Sched<[sched.Folded, sched.ReadAfterFold]>;
|
||||
|
||||
@ -4876,7 +4926,7 @@ multiclass avx512_packs_rm<bits<8> opc, string OpcodeStr,
|
||||
(ins _Src.RC:$src1, _Src.MemOp:$src2), OpcodeStr,
|
||||
"$src2, $src1", "$src1, $src2",
|
||||
(_Dst.VT (OpNode (_Src.VT _Src.RC:$src1),
|
||||
(bitconvert (_Src.LdFrag addr:$src2))))>,
|
||||
(_Src.LdFrag addr:$src2)))>,
|
||||
EVEX_4V, EVEX_CD8<_Src.EltSize, CD8VF>,
|
||||
Sched<[sched.Folded, sched.ReadAfterFold]>;
|
||||
}
|
||||
@ -5068,7 +5118,7 @@ multiclass avx512_logic_rm<bits<8> opc, string OpcodeStr,
|
||||
(_.i64VT (OpNode (bitconvert (_.VT _.RC:$src1)),
|
||||
(bitconvert (_.LdFrag addr:$src2)))),
|
||||
(_.VT (bitconvert (_.i64VT (OpNodeMsk _.RC:$src1,
|
||||
(bitconvert (_.LdFrag addr:$src2))))))>,
|
||||
(_.i64LdFrag addr:$src2)))))>,
|
||||
AVX512BIBase, EVEX_4V,
|
||||
Sched<[sched.Folded, sched.ReadAfterFold]>;
|
||||
}
|
||||
@ -5729,7 +5779,7 @@ multiclass avx512_vptest<bits<8> opc, string OpcodeStr, PatFrag OpNode,
|
||||
"$src2, $src1", "$src1, $src2",
|
||||
(OpNode (bitconvert
|
||||
(_.i64VT (and _.RC:$src1,
|
||||
(bitconvert (_.LdFrag addr:$src2))))),
|
||||
(_.i64LdFrag addr:$src2)))),
|
||||
_.ImmAllZerosV)>,
|
||||
EVEX_4V, EVEX_CD8<_.EltSize, CD8VF>,
|
||||
Sched<[sched.Folded, sched.ReadAfterFold]>;
|
||||
@ -5893,7 +5943,7 @@ multiclass avx512_shift_rmi<bits<8> opc, Format ImmFormR, Format ImmFormM,
|
||||
defm mi : AVX512_maskable<opc, ImmFormM, _, (outs _.RC:$dst),
|
||||
(ins _.MemOp:$src1, u8imm:$src2), OpcodeStr,
|
||||
"$src2, $src1", "$src1, $src2",
|
||||
(_.VT (OpNode (_.VT (bitconvert (_.LdFrag addr:$src1))),
|
||||
(_.VT (OpNode (_.VT (_.LdFrag addr:$src1)),
|
||||
(i8 imm:$src2)))>,
|
||||
Sched<[sched.Folded]>;
|
||||
}
|
||||
@ -5923,8 +5973,7 @@ multiclass avx512_shift_rrm<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
||||
defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
|
||||
(ins _.RC:$src1, i128mem:$src2), OpcodeStr,
|
||||
"$src2, $src1", "$src1, $src2",
|
||||
(_.VT (OpNode _.RC:$src1,
|
||||
(SrcVT (bitconvert (loadv2i64 addr:$src2)))))>,
|
||||
(_.VT (OpNode _.RC:$src1, (SrcVT (load addr:$src2))))>,
|
||||
AVX512BIBase,
|
||||
EVEX_4V, Sched<[sched.Folded, sched.ReadAfterFold]>;
|
||||
}
|
||||
@ -6078,7 +6127,7 @@ multiclass avx512_var_shift<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
||||
(ins _.RC:$src1, _.MemOp:$src2), OpcodeStr,
|
||||
"$src2, $src1", "$src1, $src2",
|
||||
(_.VT (OpNode _.RC:$src1,
|
||||
(_.VT (bitconvert (_.LdFrag addr:$src2)))))>,
|
||||
(_.VT (_.LdFrag addr:$src2))))>,
|
||||
AVX5128IBase, EVEX_4V, EVEX_CD8<_.EltSize, CD8VF>,
|
||||
Sched<[sched.Folded, sched.ReadAfterFold]>;
|
||||
}
|
||||
@ -6178,7 +6227,7 @@ multiclass avx512_var_shift_int_lowering<string InstrStr, X86VectorVTInfo _,
|
||||
def : Pat<(_.VT (X86vsrav _.RC:$src1, _.RC:$src2)),
|
||||
(!cast<Instruction>(InstrStr#_.ZSuffix#rr) _.RC:$src1,
|
||||
_.RC:$src2)>;
|
||||
def : Pat<(_.VT (X86vsrav _.RC:$src1, (bitconvert (_.LdFrag addr:$src2)))),
|
||||
def : Pat<(_.VT (X86vsrav _.RC:$src1, (_.LdFrag addr:$src2))),
|
||||
(!cast<Instruction>(InstrStr#_.ZSuffix##rm)
|
||||
_.RC:$src1, addr:$src2)>;
|
||||
def : Pat<(_.VT (vselect _.KRCWM:$mask,
|
||||
@ -6186,7 +6235,7 @@ multiclass avx512_var_shift_int_lowering<string InstrStr, X86VectorVTInfo _,
|
||||
(!cast<Instruction>(InstrStr#_.ZSuffix#rrk) _.RC:$src0,
|
||||
_.KRC:$mask, _.RC:$src1, _.RC:$src2)>;
|
||||
def : Pat<(_.VT (vselect _.KRCWM:$mask,
|
||||
(X86vsrav _.RC:$src1, (bitconvert (_.LdFrag addr:$src2))),
|
||||
(X86vsrav _.RC:$src1, (_.LdFrag addr:$src2)),
|
||||
_.RC:$src0)),
|
||||
(!cast<Instruction>(InstrStr#_.ZSuffix##rmk) _.RC:$src0,
|
||||
_.KRC:$mask, _.RC:$src1, addr:$src2)>;
|
||||
@ -6195,7 +6244,7 @@ multiclass avx512_var_shift_int_lowering<string InstrStr, X86VectorVTInfo _,
|
||||
(!cast<Instruction>(InstrStr#_.ZSuffix#rrkz) _.KRC:$mask,
|
||||
_.RC:$src1, _.RC:$src2)>;
|
||||
def : Pat<(_.VT (vselect _.KRCWM:$mask,
|
||||
(X86vsrav _.RC:$src1, (bitconvert (_.LdFrag addr:$src2))),
|
||||
(X86vsrav _.RC:$src1, (_.LdFrag addr:$src2)),
|
||||
_.ImmAllZerosV)),
|
||||
(!cast<Instruction>(InstrStr#_.ZSuffix##rmkz) _.KRC:$mask,
|
||||
_.RC:$src1, addr:$src2)>;
|
||||
@ -6420,7 +6469,7 @@ multiclass avx512_permil_vec<bits<8> OpcVar, string OpcodeStr, SDNode OpNode,
|
||||
"$src2, $src1", "$src1, $src2",
|
||||
(_.VT (OpNode
|
||||
_.RC:$src1,
|
||||
(Ctrl.VT (bitconvert(Ctrl.LdFrag addr:$src2)))))>,
|
||||
(Ctrl.VT (Ctrl.LdFrag addr:$src2))))>,
|
||||
T8PD, EVEX_4V, EVEX_CD8<_.EltSize, CD8VF>,
|
||||
Sched<[sched.Folded, sched.ReadAfterFold]>;
|
||||
defm rmb: AVX512_maskable<OpcVar, MRMSrcMem, _, (outs _.RC:$dst),
|
||||
@ -7706,7 +7755,7 @@ multiclass avx512_vcvt_fp<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
|
||||
defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
|
||||
(ins MemOp:$src), OpcodeStr#Alias, "$src", "$src",
|
||||
(_.VT (OpNode (_Src.VT
|
||||
(bitconvert (_Src.LdFrag addr:$src)))))>,
|
||||
(_Src.LdFrag addr:$src))))>,
|
||||
EVEX, Sched<[sched.Folded]>;
|
||||
|
||||
defm rmb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
|
||||
@ -8413,8 +8462,7 @@ multiclass avx512_cvtph2ps<X86VectorVTInfo _dest, X86VectorVTInfo _src,
|
||||
defm rm : AVX512_maskable<0x13, MRMSrcMem, _dest, (outs _dest.RC:$dst),
|
||||
(ins x86memop:$src), "vcvtph2ps", "$src", "$src",
|
||||
(X86cvtph2ps (_src.VT
|
||||
(bitconvert
|
||||
(ld_frag addr:$src))))>,
|
||||
(ld_frag addr:$src)))>,
|
||||
T8PD, Sched<[sched.Folded]>;
|
||||
}
|
||||
|
||||
@ -8429,17 +8477,17 @@ multiclass avx512_cvtph2ps_sae<X86VectorVTInfo _dest, X86VectorVTInfo _src,
|
||||
}
|
||||
|
||||
let Predicates = [HasAVX512] in
|
||||
defm VCVTPH2PSZ : avx512_cvtph2ps<v16f32_info, v16i16x_info, f256mem, loadv4i64,
|
||||
defm VCVTPH2PSZ : avx512_cvtph2ps<v16f32_info, v16i16x_info, f256mem, load,
|
||||
WriteCvtPH2PSZ>,
|
||||
avx512_cvtph2ps_sae<v16f32_info, v16i16x_info, WriteCvtPH2PSZ>,
|
||||
EVEX, EVEX_V512, EVEX_CD8<32, CD8VH>;
|
||||
|
||||
let Predicates = [HasVLX] in {
|
||||
defm VCVTPH2PSZ256 : avx512_cvtph2ps<v8f32x_info, v8i16x_info, f128mem,
|
||||
loadv2i64, WriteCvtPH2PSY>, EVEX, EVEX_V256,
|
||||
load, WriteCvtPH2PSY>, EVEX, EVEX_V256,
|
||||
EVEX_CD8<32, CD8VH>;
|
||||
defm VCVTPH2PSZ128 : avx512_cvtph2ps<v4f32x_info, v8i16x_info, f64mem,
|
||||
loadv2i64, WriteCvtPH2PS>, EVEX, EVEX_V128,
|
||||
load, WriteCvtPH2PS>, EVEX, EVEX_V128,
|
||||
EVEX_CD8<32, CD8VH>;
|
||||
|
||||
// Pattern match vcvtph2ps of a scalar i64 load.
|
||||
@ -9383,7 +9431,7 @@ multiclass AVX512_pmovx_patterns<string OpcPrefix, SDNode ExtOp,
|
||||
(!cast<I>(OpcPrefix#BWZ128rm) addr:$src)>;
|
||||
def : Pat<(v8i16 (InVecOp (v16i8 (vzload_v2i64 addr:$src)))),
|
||||
(!cast<I>(OpcPrefix#BWZ128rm) addr:$src)>;
|
||||
def : Pat<(v8i16 (InVecOp (bc_v16i8 (loadv2i64 addr:$src)))),
|
||||
def : Pat<(v8i16 (InVecOp (loadv16i8 addr:$src))),
|
||||
(!cast<I>(OpcPrefix#BWZ128rm) addr:$src)>;
|
||||
}
|
||||
let Predicates = [HasVLX] in {
|
||||
@ -9393,7 +9441,7 @@ multiclass AVX512_pmovx_patterns<string OpcPrefix, SDNode ExtOp,
|
||||
(!cast<I>(OpcPrefix#BDZ128rm) addr:$src)>;
|
||||
def : Pat<(v4i32 (InVecOp (v16i8 (vzload_v2i64 addr:$src)))),
|
||||
(!cast<I>(OpcPrefix#BDZ128rm) addr:$src)>;
|
||||
def : Pat<(v4i32 (InVecOp (bc_v16i8 (loadv2i64 addr:$src)))),
|
||||
def : Pat<(v4i32 (InVecOp (loadv16i8 addr:$src))),
|
||||
(!cast<I>(OpcPrefix#BDZ128rm) addr:$src)>;
|
||||
|
||||
def : Pat<(v2i64 (InVecOp (bc_v16i8 (v4i32 (scalar_to_vector (extloadi32i16 addr:$src)))))),
|
||||
@ -9402,7 +9450,7 @@ multiclass AVX512_pmovx_patterns<string OpcPrefix, SDNode ExtOp,
|
||||
(!cast<I>(OpcPrefix#BQZ128rm) addr:$src)>;
|
||||
def : Pat<(v2i64 (InVecOp (v16i8 (vzload_v2i64 addr:$src)))),
|
||||
(!cast<I>(OpcPrefix#BQZ128rm) addr:$src)>;
|
||||
def : Pat<(v2i64 (InVecOp (bc_v16i8 (loadv2i64 addr:$src)))),
|
||||
def : Pat<(v2i64 (InVecOp (loadv16i8 addr:$src))),
|
||||
(!cast<I>(OpcPrefix#BQZ128rm) addr:$src)>;
|
||||
|
||||
def : Pat<(v4i32 (InVecOp (bc_v8i16 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
|
||||
@ -9413,7 +9461,7 @@ multiclass AVX512_pmovx_patterns<string OpcPrefix, SDNode ExtOp,
|
||||
(!cast<I>(OpcPrefix#WDZ128rm) addr:$src)>;
|
||||
def : Pat<(v4i32 (InVecOp (v8i16 (vzload_v2i64 addr:$src)))),
|
||||
(!cast<I>(OpcPrefix#WDZ128rm) addr:$src)>;
|
||||
def : Pat<(v4i32 (InVecOp (bc_v8i16 (loadv2i64 addr:$src)))),
|
||||
def : Pat<(v4i32 (InVecOp (loadv8i16 addr:$src))),
|
||||
(!cast<I>(OpcPrefix#WDZ128rm) addr:$src)>;
|
||||
|
||||
def : Pat<(v2i64 (InVecOp (bc_v8i16 (v4i32 (scalar_to_vector (loadi32 addr:$src)))))),
|
||||
@ -9422,7 +9470,7 @@ multiclass AVX512_pmovx_patterns<string OpcPrefix, SDNode ExtOp,
|
||||
(!cast<I>(OpcPrefix#WQZ128rm) addr:$src)>;
|
||||
def : Pat<(v2i64 (InVecOp (v8i16 (vzload_v2i64 addr:$src)))),
|
||||
(!cast<I>(OpcPrefix#WQZ128rm) addr:$src)>;
|
||||
def : Pat<(v2i64 (InVecOp (bc_v8i16 (loadv2i64 addr:$src)))),
|
||||
def : Pat<(v2i64 (InVecOp (loadv8i16 addr:$src))),
|
||||
(!cast<I>(OpcPrefix#WQZ128rm) addr:$src)>;
|
||||
|
||||
def : Pat<(v2i64 (InVecOp (bc_v4i32 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
|
||||
@ -9433,12 +9481,12 @@ multiclass AVX512_pmovx_patterns<string OpcPrefix, SDNode ExtOp,
|
||||
(!cast<I>(OpcPrefix#DQZ128rm) addr:$src)>;
|
||||
def : Pat<(v2i64 (InVecOp (v4i32 (vzload_v2i64 addr:$src)))),
|
||||
(!cast<I>(OpcPrefix#DQZ128rm) addr:$src)>;
|
||||
def : Pat<(v2i64 (InVecOp (bc_v4i32 (loadv2i64 addr:$src)))),
|
||||
def : Pat<(v2i64 (InVecOp (loadv4i32 addr:$src))),
|
||||
(!cast<I>(OpcPrefix#DQZ128rm) addr:$src)>;
|
||||
}
|
||||
// 256-bit patterns
|
||||
let Predicates = [HasVLX, HasBWI] in {
|
||||
def : Pat<(v16i16 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
|
||||
def : Pat<(v16i16 (ExtOp (loadv16i8 addr:$src))),
|
||||
(!cast<I>(OpcPrefix#BWZ256rm) addr:$src)>;
|
||||
def : Pat<(v16i16 (ExtOp (v16i8 (vzmovl_v2i64 addr:$src)))),
|
||||
(!cast<I>(OpcPrefix#BWZ256rm) addr:$src)>;
|
||||
@ -9452,7 +9500,7 @@ multiclass AVX512_pmovx_patterns<string OpcPrefix, SDNode ExtOp,
|
||||
(!cast<I>(OpcPrefix#BDZ256rm) addr:$src)>;
|
||||
def : Pat<(v8i32 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
|
||||
(!cast<I>(OpcPrefix#BDZ256rm) addr:$src)>;
|
||||
def : Pat<(v8i32 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
|
||||
def : Pat<(v8i32 (ExtOp (loadv16i8 addr:$src))),
|
||||
(!cast<I>(OpcPrefix#BDZ256rm) addr:$src)>;
|
||||
|
||||
def : Pat<(v4i64 (ExtOp (bc_v16i8 (v4i32 (scalar_to_vector (loadi32 addr:$src)))))),
|
||||
@ -9461,10 +9509,10 @@ multiclass AVX512_pmovx_patterns<string OpcPrefix, SDNode ExtOp,
|
||||
(!cast<I>(OpcPrefix#BQZ256rm) addr:$src)>;
|
||||
def : Pat<(v4i64 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
|
||||
(!cast<I>(OpcPrefix#BQZ256rm) addr:$src)>;
|
||||
def : Pat<(v4i64 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
|
||||
def : Pat<(v4i64 (ExtOp (loadv16i8 addr:$src))),
|
||||
(!cast<I>(OpcPrefix#BQZ256rm) addr:$src)>;
|
||||
|
||||
def : Pat<(v8i32 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
|
||||
def : Pat<(v8i32 (ExtOp (loadv8i16 addr:$src))),
|
||||
(!cast<I>(OpcPrefix#WDZ256rm) addr:$src)>;
|
||||
def : Pat<(v8i32 (ExtOp (v8i16 (vzmovl_v2i64 addr:$src)))),
|
||||
(!cast<I>(OpcPrefix#WDZ256rm) addr:$src)>;
|
||||
@ -9477,10 +9525,10 @@ multiclass AVX512_pmovx_patterns<string OpcPrefix, SDNode ExtOp,
|
||||
(!cast<I>(OpcPrefix#WQZ256rm) addr:$src)>;
|
||||
def : Pat<(v4i64 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))),
|
||||
(!cast<I>(OpcPrefix#WQZ256rm) addr:$src)>;
|
||||
def : Pat<(v4i64 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
|
||||
def : Pat<(v4i64 (ExtOp (loadv8i16 addr:$src))),
|
||||
(!cast<I>(OpcPrefix#WQZ256rm) addr:$src)>;
|
||||
|
||||
def : Pat<(v4i64 (ExtOp (bc_v4i32 (loadv2i64 addr:$src)))),
|
||||
def : Pat<(v4i64 (ExtOp (loadv4i32 addr:$src))),
|
||||
(!cast<I>(OpcPrefix#DQZ256rm) addr:$src)>;
|
||||
def : Pat<(v4i64 (ExtOp (v4i32 (vzmovl_v2i64 addr:$src)))),
|
||||
(!cast<I>(OpcPrefix#DQZ256rm) addr:$src)>;
|
||||
@ -9489,25 +9537,25 @@ multiclass AVX512_pmovx_patterns<string OpcPrefix, SDNode ExtOp,
|
||||
}
|
||||
// 512-bit patterns
|
||||
let Predicates = [HasBWI] in {
|
||||
def : Pat<(v32i16 (ExtOp (bc_v32i8 (loadv4i64 addr:$src)))),
|
||||
def : Pat<(v32i16 (ExtOp (loadv32i8 addr:$src))),
|
||||
(!cast<I>(OpcPrefix#BWZrm) addr:$src)>;
|
||||
}
|
||||
let Predicates = [HasAVX512] in {
|
||||
def : Pat<(v16i32 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
|
||||
def : Pat<(v16i32 (ExtOp (loadv16i8 addr:$src))),
|
||||
(!cast<I>(OpcPrefix#BDZrm) addr:$src)>;
|
||||
|
||||
def : Pat<(v8i64 (ExtOp (bc_v16i8 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
|
||||
(!cast<I>(OpcPrefix#BQZrm) addr:$src)>;
|
||||
def : Pat<(v8i64 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
|
||||
def : Pat<(v8i64 (ExtOp (loadv16i8 addr:$src))),
|
||||
(!cast<I>(OpcPrefix#BQZrm) addr:$src)>;
|
||||
|
||||
def : Pat<(v16i32 (ExtOp (bc_v16i16 (loadv4i64 addr:$src)))),
|
||||
def : Pat<(v16i32 (ExtOp (loadv16i16 addr:$src))),
|
||||
(!cast<I>(OpcPrefix#WDZrm) addr:$src)>;
|
||||
|
||||
def : Pat<(v8i64 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
|
||||
def : Pat<(v8i64 (ExtOp (loadv8i16 addr:$src))),
|
||||
(!cast<I>(OpcPrefix#WQZrm) addr:$src)>;
|
||||
|
||||
def : Pat<(v8i64 (ExtOp (bc_v8i32 (loadv4i64 addr:$src)))),
|
||||
def : Pat<(v8i64 (ExtOp (loadv8i32 addr:$src))),
|
||||
(!cast<I>(OpcPrefix#DQZrm) addr:$src)>;
|
||||
}
|
||||
}
|
||||
@ -10412,7 +10460,7 @@ multiclass avx512_shuff_packed_128_common<bits<8> opc, string OpcodeStr,
|
||||
(_.VT
|
||||
(bitconvert
|
||||
(CastInfo.VT (X86Shuf128 _.RC:$src1,
|
||||
(bitconvert (_.LdFrag addr:$src2)),
|
||||
(CastInfo.LdFrag addr:$src2),
|
||||
(i8 imm:$src3)))))>,
|
||||
Sched<[sched.Folded, sched.ReadAfterFold]>,
|
||||
EVEX2VEXOverride<EVEX2VEXOvrd#"rm">;
|
||||
@ -10578,7 +10626,7 @@ multiclass avx512_vpalign_mask_lowering<string OpcodeStr, SDNode OpNode,
|
||||
def : Pat<(To.VT (vselect To.KRCWM:$mask,
|
||||
(bitconvert
|
||||
(From.VT (OpNode From.RC:$src1,
|
||||
(bitconvert (To.LdFrag addr:$src2)),
|
||||
(From.LdFrag addr:$src2),
|
||||
imm:$src3))),
|
||||
To.RC:$src0)),
|
||||
(!cast<Instruction>(OpcodeStr#"rmik") To.RC:$src0, To.KRCWM:$mask,
|
||||
@ -10588,7 +10636,7 @@ multiclass avx512_vpalign_mask_lowering<string OpcodeStr, SDNode OpNode,
|
||||
def : Pat<(To.VT (vselect To.KRCWM:$mask,
|
||||
(bitconvert
|
||||
(From.VT (OpNode From.RC:$src1,
|
||||
(bitconvert (To.LdFrag addr:$src2)),
|
||||
(From.LdFrag addr:$src2),
|
||||
imm:$src3))),
|
||||
To.ImmAllZerosV)),
|
||||
(!cast<Instruction>(OpcodeStr#"rmikz") To.KRCWM:$mask,
|
||||
@ -11732,7 +11780,7 @@ multiclass VBMI2_shift_var_rm<bits<8> Op, string OpStr, SDNode OpNode,
|
||||
(ins VTI.RC:$src2, VTI.MemOp:$src3), OpStr,
|
||||
"$src3, $src2", "$src2, $src3",
|
||||
(VTI.VT (OpNode VTI.RC:$src1, VTI.RC:$src2,
|
||||
(VTI.VT (bitconvert (VTI.LdFrag addr:$src3)))))>,
|
||||
(VTI.VT (VTI.LdFrag addr:$src3))))>,
|
||||
AVX512FMA3Base,
|
||||
Sched<[sched.Folded, sched.ReadAfterFold]>;
|
||||
}
|
||||
@ -11835,8 +11883,7 @@ multiclass VNNI_rmb<bits<8> Op, string OpStr, SDNode OpNode,
|
||||
(ins VTI.RC:$src2, VTI.MemOp:$src3), OpStr,
|
||||
"$src3, $src2", "$src2, $src3",
|
||||
(VTI.VT (OpNode VTI.RC:$src1, VTI.RC:$src2,
|
||||
(VTI.VT (bitconvert
|
||||
(VTI.LdFrag addr:$src3)))))>,
|
||||
(VTI.VT (VTI.LdFrag addr:$src3))))>,
|
||||
EVEX_4V, EVEX_CD8<32, CD8VF>, T8PD,
|
||||
Sched<[sched.Folded, sched.ReadAfterFold]>;
|
||||
defm mb : AVX512_maskable_3src<Op, MRMSrcMem, VTI, (outs VTI.RC:$dst),
|
||||
@ -11892,7 +11939,7 @@ multiclass VPSHUFBITQMB_rm<X86FoldableSchedWrite sched, X86VectorVTInfo VTI> {
|
||||
"vpshufbitqmb",
|
||||
"$src2, $src1", "$src1, $src2",
|
||||
(X86Vpshufbitqmb (VTI.VT VTI.RC:$src1),
|
||||
(VTI.VT (bitconvert (VTI.LdFrag addr:$src2))))>,
|
||||
(VTI.VT (VTI.LdFrag addr:$src2)))>,
|
||||
EVEX_4V, EVEX_CD8<8, CD8VF>, T8PD,
|
||||
Sched<[sched.Folded, sched.ReadAfterFold]>;
|
||||
}
|
||||
|
@ -648,21 +648,28 @@ def sdmem : Operand<v2f64> {
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
// 128-bit load pattern fragments
|
||||
// NOTE: all 128-bit integer vector loads are promoted to v2i64
|
||||
def loadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>;
|
||||
def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>;
|
||||
def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>;
|
||||
def loadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (load node:$ptr))>;
|
||||
def loadv8i16 : PatFrag<(ops node:$ptr), (v8i16 (load node:$ptr))>;
|
||||
def loadv16i8 : PatFrag<(ops node:$ptr), (v16i8 (load node:$ptr))>;
|
||||
|
||||
// 256-bit load pattern fragments
|
||||
// NOTE: all 256-bit integer vector loads are promoted to v4i64
|
||||
def loadv8f32 : PatFrag<(ops node:$ptr), (v8f32 (load node:$ptr))>;
|
||||
def loadv4f64 : PatFrag<(ops node:$ptr), (v4f64 (load node:$ptr))>;
|
||||
def loadv4i64 : PatFrag<(ops node:$ptr), (v4i64 (load node:$ptr))>;
|
||||
def loadv8f32 : PatFrag<(ops node:$ptr), (v8f32 (load node:$ptr))>;
|
||||
def loadv4f64 : PatFrag<(ops node:$ptr), (v4f64 (load node:$ptr))>;
|
||||
def loadv4i64 : PatFrag<(ops node:$ptr), (v4i64 (load node:$ptr))>;
|
||||
def loadv8i32 : PatFrag<(ops node:$ptr), (v8i32 (load node:$ptr))>;
|
||||
def loadv16i16 : PatFrag<(ops node:$ptr), (v16i16 (load node:$ptr))>;
|
||||
def loadv32i8 : PatFrag<(ops node:$ptr), (v32i8 (load node:$ptr))>;
|
||||
|
||||
// 512-bit load pattern fragments
|
||||
def loadv16f32 : PatFrag<(ops node:$ptr), (v16f32 (load node:$ptr))>;
|
||||
def loadv8f64 : PatFrag<(ops node:$ptr), (v8f64 (load node:$ptr))>;
|
||||
def loadv8i64 : PatFrag<(ops node:$ptr), (v8i64 (load node:$ptr))>;
|
||||
def loadv8f64 : PatFrag<(ops node:$ptr), (v8f64 (load node:$ptr))>;
|
||||
def loadv8i64 : PatFrag<(ops node:$ptr), (v8i64 (load node:$ptr))>;
|
||||
def loadv16i32 : PatFrag<(ops node:$ptr), (v16i32 (load node:$ptr))>;
|
||||
def loadv32i16 : PatFrag<(ops node:$ptr), (v32i16 (load node:$ptr))>;
|
||||
def loadv64i8 : PatFrag<(ops node:$ptr), (v64i8 (load node:$ptr))>;
|
||||
|
||||
// 128-/256-/512-bit extload pattern fragments
|
||||
def extloadv2f32 : PatFrag<(ops node:$ptr), (v2f64 (extloadvf32 node:$ptr))>;
|
||||
@ -690,15 +697,27 @@ def alignedloadv2f64 : PatFrag<(ops node:$ptr),
|
||||
(v2f64 (alignedload node:$ptr))>;
|
||||
def alignedloadv2i64 : PatFrag<(ops node:$ptr),
|
||||
(v2i64 (alignedload node:$ptr))>;
|
||||
def alignedloadv4i32 : PatFrag<(ops node:$ptr),
|
||||
(v4i32 (alignedload node:$ptr))>;
|
||||
def alignedloadv8i16 : PatFrag<(ops node:$ptr),
|
||||
(v8i16 (alignedload node:$ptr))>;
|
||||
def alignedloadv16i8 : PatFrag<(ops node:$ptr),
|
||||
(v16i8 (alignedload node:$ptr))>;
|
||||
|
||||
// 256-bit aligned load pattern fragments
|
||||
// NOTE: all 256-bit integer vector loads are promoted to v4i64
|
||||
def alignedloadv8f32 : PatFrag<(ops node:$ptr),
|
||||
(v8f32 (alignedload node:$ptr))>;
|
||||
def alignedloadv4f64 : PatFrag<(ops node:$ptr),
|
||||
(v4f64 (alignedload node:$ptr))>;
|
||||
def alignedloadv4i64 : PatFrag<(ops node:$ptr),
|
||||
(v4i64 (alignedload node:$ptr))>;
|
||||
def alignedloadv8f32 : PatFrag<(ops node:$ptr),
|
||||
(v8f32 (alignedload node:$ptr))>;
|
||||
def alignedloadv4f64 : PatFrag<(ops node:$ptr),
|
||||
(v4f64 (alignedload node:$ptr))>;
|
||||
def alignedloadv4i64 : PatFrag<(ops node:$ptr),
|
||||
(v4i64 (alignedload node:$ptr))>;
|
||||
def alignedloadv8i32 : PatFrag<(ops node:$ptr),
|
||||
(v8i32 (alignedload node:$ptr))>;
|
||||
def alignedloadv16i16 : PatFrag<(ops node:$ptr),
|
||||
(v16i16 (alignedload node:$ptr))>;
|
||||
def alignedloadv32i8 : PatFrag<(ops node:$ptr),
|
||||
(v32i8 (alignedload node:$ptr))>;
|
||||
|
||||
// 512-bit aligned load pattern fragments
|
||||
def alignedloadv16f32 : PatFrag<(ops node:$ptr),
|
||||
@ -707,6 +726,12 @@ def alignedloadv8f64 : PatFrag<(ops node:$ptr),
|
||||
(v8f64 (alignedload node:$ptr))>;
|
||||
def alignedloadv8i64 : PatFrag<(ops node:$ptr),
|
||||
(v8i64 (alignedload node:$ptr))>;
|
||||
def alignedloadv16i32 : PatFrag<(ops node:$ptr),
|
||||
(v16i32 (alignedload node:$ptr))>;
|
||||
def alignedloadv32i16 : PatFrag<(ops node:$ptr),
|
||||
(v32i16 (alignedload node:$ptr))>;
|
||||
def alignedloadv64i8 : PatFrag<(ops node:$ptr),
|
||||
(v64i8 (alignedload node:$ptr))>;
|
||||
|
||||
// Like 'load', but uses special alignment checks suitable for use in
|
||||
// memory operands in most SSE instructions, which are required to
|
||||
@ -725,6 +750,9 @@ def memop : PatFrag<(ops node:$ptr), (load node:$ptr), [{
|
||||
def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>;
|
||||
def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>;
|
||||
def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>;
|
||||
def memopv4i32 : PatFrag<(ops node:$ptr), (v4i32 (memop node:$ptr))>;
|
||||
def memopv8i16 : PatFrag<(ops node:$ptr), (v8i16 (memop node:$ptr))>;
|
||||
def memopv16i8 : PatFrag<(ops node:$ptr), (v16i8 (memop node:$ptr))>;
|
||||
|
||||
def X86masked_gather : SDNode<"X86ISD::MGATHER",
|
||||
SDTypeProfile<2, 3, [SDTCisVec<0>,
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -11,32 +11,32 @@
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
multiclass xop2op<bits<8> opc, string OpcodeStr, Intrinsic Int, PatFrag memop> {
|
||||
multiclass xop2op<bits<8> opc, string OpcodeStr, Intrinsic Int> {
|
||||
def rr : IXOP<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
|
||||
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
||||
[(set VR128:$dst, (Int VR128:$src))]>, XOP, Sched<[SchedWritePHAdd.XMM]>;
|
||||
def rm : IXOP<opc, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
|
||||
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
||||
[(set VR128:$dst, (Int (bitconvert (memop addr:$src))))]>, XOP,
|
||||
[(set VR128:$dst, (Int (load addr:$src)))]>, XOP,
|
||||
Sched<[SchedWritePHAdd.XMM.Folded, SchedWritePHAdd.XMM.ReadAfterFold]>;
|
||||
}
|
||||
|
||||
let ExeDomain = SSEPackedInt in {
|
||||
defm VPHSUBWD : xop2op<0xE2, "vphsubwd", int_x86_xop_vphsubwd, loadv2i64>;
|
||||
defm VPHSUBDQ : xop2op<0xE3, "vphsubdq", int_x86_xop_vphsubdq, loadv2i64>;
|
||||
defm VPHSUBBW : xop2op<0xE1, "vphsubbw", int_x86_xop_vphsubbw, loadv2i64>;
|
||||
defm VPHADDWQ : xop2op<0xC7, "vphaddwq", int_x86_xop_vphaddwq, loadv2i64>;
|
||||
defm VPHADDWD : xop2op<0xC6, "vphaddwd", int_x86_xop_vphaddwd, loadv2i64>;
|
||||
defm VPHADDUWQ : xop2op<0xD7, "vphadduwq", int_x86_xop_vphadduwq, loadv2i64>;
|
||||
defm VPHADDUWD : xop2op<0xD6, "vphadduwd", int_x86_xop_vphadduwd, loadv2i64>;
|
||||
defm VPHADDUDQ : xop2op<0xDB, "vphaddudq", int_x86_xop_vphaddudq, loadv2i64>;
|
||||
defm VPHADDUBW : xop2op<0xD1, "vphaddubw", int_x86_xop_vphaddubw, loadv2i64>;
|
||||
defm VPHADDUBQ : xop2op<0xD3, "vphaddubq", int_x86_xop_vphaddubq, loadv2i64>;
|
||||
defm VPHADDUBD : xop2op<0xD2, "vphaddubd", int_x86_xop_vphaddubd, loadv2i64>;
|
||||
defm VPHADDDQ : xop2op<0xCB, "vphadddq", int_x86_xop_vphadddq, loadv2i64>;
|
||||
defm VPHADDBW : xop2op<0xC1, "vphaddbw", int_x86_xop_vphaddbw, loadv2i64>;
|
||||
defm VPHADDBQ : xop2op<0xC3, "vphaddbq", int_x86_xop_vphaddbq, loadv2i64>;
|
||||
defm VPHADDBD : xop2op<0xC2, "vphaddbd", int_x86_xop_vphaddbd, loadv2i64>;
|
||||
defm VPHSUBWD : xop2op<0xE2, "vphsubwd", int_x86_xop_vphsubwd>;
|
||||
defm VPHSUBDQ : xop2op<0xE3, "vphsubdq", int_x86_xop_vphsubdq>;
|
||||
defm VPHSUBBW : xop2op<0xE1, "vphsubbw", int_x86_xop_vphsubbw>;
|
||||
defm VPHADDWQ : xop2op<0xC7, "vphaddwq", int_x86_xop_vphaddwq>;
|
||||
defm VPHADDWD : xop2op<0xC6, "vphaddwd", int_x86_xop_vphaddwd>;
|
||||
defm VPHADDUWQ : xop2op<0xD7, "vphadduwq", int_x86_xop_vphadduwq>;
|
||||
defm VPHADDUWD : xop2op<0xD6, "vphadduwd", int_x86_xop_vphadduwd>;
|
||||
defm VPHADDUDQ : xop2op<0xDB, "vphaddudq", int_x86_xop_vphaddudq>;
|
||||
defm VPHADDUBW : xop2op<0xD1, "vphaddubw", int_x86_xop_vphaddubw>;
|
||||
defm VPHADDUBQ : xop2op<0xD3, "vphaddubq", int_x86_xop_vphaddubq>;
|
||||
defm VPHADDUBD : xop2op<0xD2, "vphaddubd", int_x86_xop_vphaddubd>;
|
||||
defm VPHADDDQ : xop2op<0xCB, "vphadddq", int_x86_xop_vphadddq>;
|
||||
defm VPHADDBW : xop2op<0xC1, "vphaddbw", int_x86_xop_vphaddbw>;
|
||||
defm VPHADDBQ : xop2op<0xC3, "vphaddbq", int_x86_xop_vphaddbq>;
|
||||
defm VPHADDBD : xop2op<0xC2, "vphaddbd", int_x86_xop_vphaddbd>;
|
||||
}
|
||||
|
||||
// Scalar load 2 addr operand instructions
|
||||
@ -48,47 +48,47 @@ multiclass xop2opsld<bits<8> opc, string OpcodeStr, Intrinsic Int,
|
||||
[(set VR128:$dst, (Int VR128:$src))]>, XOP, Sched<[sched]>;
|
||||
def rm : IXOP<opc, MRMSrcMem, (outs VR128:$dst), (ins memop:$src),
|
||||
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
||||
[(set VR128:$dst, (Int (bitconvert mem_cpat:$src)))]>, XOP,
|
||||
[(set VR128:$dst, (Int mem_cpat:$src))]>, XOP,
|
||||
Sched<[sched.Folded, sched.ReadAfterFold]>;
|
||||
}
|
||||
|
||||
multiclass xop2op128<bits<8> opc, string OpcodeStr, Intrinsic Int,
|
||||
PatFrag memop, X86FoldableSchedWrite sched> {
|
||||
X86FoldableSchedWrite sched> {
|
||||
def rr : IXOP<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
|
||||
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
||||
[(set VR128:$dst, (Int VR128:$src))]>, XOP, Sched<[sched]>;
|
||||
def rm : IXOP<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
|
||||
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
||||
[(set VR128:$dst, (Int (bitconvert (memop addr:$src))))]>, XOP,
|
||||
[(set VR128:$dst, (Int (load addr:$src)))]>, XOP,
|
||||
Sched<[sched.Folded, sched.ReadAfterFold]>;
|
||||
}
|
||||
|
||||
multiclass xop2op256<bits<8> opc, string OpcodeStr, Intrinsic Int,
|
||||
PatFrag memop, X86FoldableSchedWrite sched> {
|
||||
X86FoldableSchedWrite sched> {
|
||||
def Yrr : IXOP<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
|
||||
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
||||
[(set VR256:$dst, (Int VR256:$src))]>, XOP, VEX_L, Sched<[sched]>;
|
||||
def Yrm : IXOP<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
|
||||
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
||||
[(set VR256:$dst, (Int (bitconvert (memop addr:$src))))]>, XOP, VEX_L,
|
||||
[(set VR256:$dst, (Int (load addr:$src)))]>, XOP, VEX_L,
|
||||
Sched<[sched.Folded, sched.ReadAfterFold]>;
|
||||
}
|
||||
|
||||
let ExeDomain = SSEPackedSingle in {
|
||||
defm VFRCZSS : xop2opsld<0x82, "vfrczss", int_x86_xop_vfrcz_ss,
|
||||
ssmem, sse_load_f32, SchedWriteFRnd.Scl>;
|
||||
defm VFRCZPS : xop2op128<0x80, "vfrczps", int_x86_xop_vfrcz_ps, loadv4f32,
|
||||
defm VFRCZPS : xop2op128<0x80, "vfrczps", int_x86_xop_vfrcz_ps,
|
||||
SchedWriteFRnd.XMM>;
|
||||
defm VFRCZPS : xop2op256<0x80, "vfrczps", int_x86_xop_vfrcz_ps_256, loadv8f32,
|
||||
defm VFRCZPS : xop2op256<0x80, "vfrczps", int_x86_xop_vfrcz_ps_256,
|
||||
SchedWriteFRnd.YMM>;
|
||||
}
|
||||
|
||||
let ExeDomain = SSEPackedDouble in {
|
||||
defm VFRCZSD : xop2opsld<0x83, "vfrczsd", int_x86_xop_vfrcz_sd,
|
||||
sdmem, sse_load_f64, SchedWriteFRnd.Scl>;
|
||||
defm VFRCZPD : xop2op128<0x81, "vfrczpd", int_x86_xop_vfrcz_pd, loadv2f64,
|
||||
defm VFRCZPD : xop2op128<0x81, "vfrczpd", int_x86_xop_vfrcz_pd,
|
||||
SchedWriteFRnd.XMM>;
|
||||
defm VFRCZPD : xop2op256<0x81, "vfrczpd", int_x86_xop_vfrcz_pd_256, loadv4f64,
|
||||
defm VFRCZPD : xop2op256<0x81, "vfrczpd", int_x86_xop_vfrcz_pd_256,
|
||||
SchedWriteFRnd.YMM>;
|
||||
}
|
||||
|
||||
@ -105,13 +105,13 @@ multiclass xop3op<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
||||
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
||||
[(set VR128:$dst,
|
||||
(vt128 (OpNode (vt128 VR128:$src1),
|
||||
(vt128 (bitconvert (loadv2i64 addr:$src2))))))]>,
|
||||
(vt128 (load addr:$src2)))))]>,
|
||||
XOP_4V, VEX_W, Sched<[sched.Folded, sched.ReadAfterFold]>;
|
||||
def mr : IXOP<opc, MRMSrcMem4VOp3, (outs VR128:$dst),
|
||||
(ins i128mem:$src1, VR128:$src2),
|
||||
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
||||
[(set VR128:$dst,
|
||||
(vt128 (OpNode (vt128 (bitconvert (loadv2i64 addr:$src1))),
|
||||
(vt128 (OpNode (vt128 (load addr:$src1)),
|
||||
(vt128 VR128:$src2))))]>,
|
||||
XOP, Sched<[sched.Folded, sched.ReadAfterFold]>;
|
||||
// For disassembler
|
||||
@ -150,7 +150,7 @@ multiclass xop3opimm<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
||||
(ins i128mem:$src1, u8imm:$src2),
|
||||
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
||||
[(set VR128:$dst,
|
||||
(vt128 (OpNode (vt128 (bitconvert (loadv2i64 addr:$src1))), imm:$src2)))]>,
|
||||
(vt128 (OpNode (vt128 (load addr:$src1)), imm:$src2)))]>,
|
||||
XOP, Sched<[sched.Folded, sched.ReadAfterFold]>;
|
||||
}
|
||||
|
||||
@ -181,7 +181,7 @@ multiclass xop4opm2<bits<8> opc, string OpcodeStr, Intrinsic Int,
|
||||
!strconcat(OpcodeStr,
|
||||
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
|
||||
[(set VR128:$dst,
|
||||
(Int VR128:$src1, (bitconvert (loadv2i64 addr:$src2)),
|
||||
(Int VR128:$src1, (load addr:$src2),
|
||||
VR128:$src3))]>, XOP_4V, Sched<[sched.Folded, sched.ReadAfterFold]>;
|
||||
}
|
||||
|
||||
@ -260,7 +260,7 @@ multiclass xopvpcom<bits<8> opc, string Suffix, SDNode OpNode, ValueType vt128,
|
||||
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
||||
[(set VR128:$dst,
|
||||
(vt128 (OpNode (vt128 VR128:$src1),
|
||||
(vt128 (bitconvert (loadv2i64 addr:$src2))),
|
||||
(vt128 (load addr:$src2)),
|
||||
imm:$cc)))]>,
|
||||
XOP_4V, Sched<[sched.Folded, sched.ReadAfterFold]>;
|
||||
let isAsmParserOnly = 1, hasSideEffects = 0 in {
|
||||
@ -279,7 +279,7 @@ multiclass xopvpcom<bits<8> opc, string Suffix, SDNode OpNode, ValueType vt128,
|
||||
}
|
||||
}
|
||||
|
||||
def : Pat<(OpNode (bitconvert (loadv2i64 addr:$src2)),
|
||||
def : Pat<(OpNode (load addr:$src2),
|
||||
(vt128 VR128:$src1), imm:$cc),
|
||||
(!cast<Instruction>(NAME#"mi") VR128:$src1, addr:$src2,
|
||||
(CommuteVPCOMCC imm:$cc))>;
|
||||
@ -310,14 +310,14 @@ multiclass xop4op<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
||||
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
|
||||
[(set VR128:$dst,
|
||||
(vt128 (OpNode (vt128 VR128:$src1), (vt128 VR128:$src2),
|
||||
(vt128 (bitconvert (loadv2i64 addr:$src3))))))]>,
|
||||
(vt128 (load addr:$src3)))))]>,
|
||||
XOP_4V, VEX_W, Sched<[sched.Folded, sched.ReadAfterFold, sched.ReadAfterFold]>;
|
||||
def rmr : IXOPi8Reg<opc, MRMSrcMem, (outs VR128:$dst),
|
||||
(ins VR128:$src1, i128mem:$src2, VR128:$src3),
|
||||
!strconcat(OpcodeStr,
|
||||
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
|
||||
[(set VR128:$dst,
|
||||
(v16i8 (OpNode (vt128 VR128:$src1), (vt128 (bitconvert (loadv2i64 addr:$src2))),
|
||||
(v16i8 (OpNode (vt128 VR128:$src1), (vt128 (load addr:$src2)),
|
||||
(vt128 VR128:$src3))))]>,
|
||||
XOP_4V, Sched<[sched.Folded, sched.ReadAfterFold,
|
||||
// 128mem:$src2
|
||||
@ -401,8 +401,7 @@ multiclass xop_vpermil2<bits<8> Opc, string OpcodeStr, RegisterClass RC,
|
||||
!strconcat(OpcodeStr,
|
||||
"\t{$src4, $src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3, $src4}"),
|
||||
[(set RC:$dst,
|
||||
(VT (X86vpermil2 RC:$src1, RC:$src2,
|
||||
(bitconvert (IntLdFrag addr:$src3)),
|
||||
(VT (X86vpermil2 RC:$src1, RC:$src2, (IntLdFrag addr:$src3),
|
||||
(i8 imm:$src4))))]>, VEX_W,
|
||||
Sched<[sched.Folded, sched.ReadAfterFold, sched.ReadAfterFold]>;
|
||||
def mr : IXOP5<Opc, MRMSrcMem, (outs RC:$dst),
|
||||
@ -437,10 +436,10 @@ let ExeDomain = SSEPackedDouble in {
|
||||
|
||||
let ExeDomain = SSEPackedSingle in {
|
||||
defm VPERMIL2PS : xop_vpermil2<0x48, "vpermil2ps", VR128, i128mem, f128mem,
|
||||
v4f32, loadv4f32, loadv2i64,
|
||||
v4f32, loadv4f32, loadv4i32,
|
||||
SchedWriteFVarShuffle.XMM>;
|
||||
defm VPERMIL2PSY : xop_vpermil2<0x48, "vpermil2ps", VR256, i256mem, f256mem,
|
||||
v8f32, loadv8f32, loadv4i64,
|
||||
v8f32, loadv8f32, loadv8i32,
|
||||
SchedWriteFVarShuffle.YMM>, VEX_L;
|
||||
}
|
||||
|
||||
|
@ -1391,7 +1391,7 @@ static const Constant *getConstantFromPool(const MachineInstr &MI,
|
||||
if (ConstantEntry.isMachineConstantPoolEntry())
|
||||
return nullptr;
|
||||
|
||||
auto *C = dyn_cast<Constant>(ConstantEntry.Val.ConstVal);
|
||||
const Constant *C = ConstantEntry.Val.ConstVal;
|
||||
assert((!C || ConstantEntry.getType() == C->getType()) &&
|
||||
"Expected a constant of the same type!");
|
||||
return C;
|
||||
@ -1594,6 +1594,18 @@ void X86AsmPrinter::EmitSEHInstruction(const MachineInstr *MI) {
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned getRegisterWidth(const MCOperandInfo &Info) {
|
||||
if (Info.RegClass == X86::VR128RegClassID ||
|
||||
Info.RegClass == X86::VR128XRegClassID)
|
||||
return 128;
|
||||
if (Info.RegClass == X86::VR256RegClassID ||
|
||||
Info.RegClass == X86::VR256XRegClassID)
|
||||
return 256;
|
||||
if (Info.RegClass == X86::VR512RegClassID)
|
||||
return 512;
|
||||
llvm_unreachable("Unknown register class!");
|
||||
}
|
||||
|
||||
void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
|
||||
X86MCInstLower MCInstLowering(*MF, *this);
|
||||
const X86RegisterInfo *RI =
|
||||
@ -1879,8 +1891,9 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
|
||||
|
||||
const MachineOperand &MaskOp = MI->getOperand(MaskIdx);
|
||||
if (auto *C = getConstantFromPool(*MI, MaskOp)) {
|
||||
unsigned Width = getRegisterWidth(MI->getDesc().OpInfo[0]);
|
||||
SmallVector<int, 64> Mask;
|
||||
DecodePSHUFBMask(C, Mask);
|
||||
DecodePSHUFBMask(C, Width, Mask);
|
||||
if (!Mask.empty())
|
||||
OutStreamer->AddComment(getShuffleComment(MI, SrcIdx, SrcIdx, Mask),
|
||||
!EnablePrintSchedInfo);
|
||||
@ -1951,8 +1964,9 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
|
||||
|
||||
const MachineOperand &MaskOp = MI->getOperand(MaskIdx);
|
||||
if (auto *C = getConstantFromPool(*MI, MaskOp)) {
|
||||
unsigned Width = getRegisterWidth(MI->getDesc().OpInfo[0]);
|
||||
SmallVector<int, 16> Mask;
|
||||
DecodeVPERMILPMask(C, ElSize, Mask);
|
||||
DecodeVPERMILPMask(C, ElSize, Width, Mask);
|
||||
if (!Mask.empty())
|
||||
OutStreamer->AddComment(getShuffleComment(MI, SrcIdx, SrcIdx, Mask),
|
||||
!EnablePrintSchedInfo);
|
||||
@ -1982,8 +1996,9 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
|
||||
|
||||
const MachineOperand &MaskOp = MI->getOperand(6);
|
||||
if (auto *C = getConstantFromPool(*MI, MaskOp)) {
|
||||
unsigned Width = getRegisterWidth(MI->getDesc().OpInfo[0]);
|
||||
SmallVector<int, 16> Mask;
|
||||
DecodeVPERMIL2PMask(C, (unsigned)CtrlOp.getImm(), ElSize, Mask);
|
||||
DecodeVPERMIL2PMask(C, (unsigned)CtrlOp.getImm(), ElSize, Width, Mask);
|
||||
if (!Mask.empty())
|
||||
OutStreamer->AddComment(getShuffleComment(MI, 1, 2, Mask),
|
||||
!EnablePrintSchedInfo);
|
||||
@ -1999,8 +2014,9 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
|
||||
|
||||
const MachineOperand &MaskOp = MI->getOperand(6);
|
||||
if (auto *C = getConstantFromPool(*MI, MaskOp)) {
|
||||
unsigned Width = getRegisterWidth(MI->getDesc().OpInfo[0]);
|
||||
SmallVector<int, 16> Mask;
|
||||
DecodeVPPERMMask(C, Mask);
|
||||
DecodeVPPERMMask(C, Width, Mask);
|
||||
if (!Mask.empty())
|
||||
OutStreamer->AddComment(getShuffleComment(MI, 1, 2, Mask),
|
||||
!EnablePrintSchedInfo);
|
||||
|
@ -112,11 +112,10 @@ static bool extractConstantMask(const Constant *C, unsigned MaskEltSizeInBits,
|
||||
return true;
|
||||
}
|
||||
|
||||
void DecodePSHUFBMask(const Constant *C, SmallVectorImpl<int> &ShuffleMask) {
|
||||
Type *MaskTy = C->getType();
|
||||
unsigned MaskTySize = MaskTy->getPrimitiveSizeInBits();
|
||||
(void)MaskTySize;
|
||||
assert((MaskTySize == 128 || MaskTySize == 256 || MaskTySize == 512) &&
|
||||
void DecodePSHUFBMask(const Constant *C, unsigned Width,
|
||||
SmallVectorImpl<int> &ShuffleMask) {
|
||||
assert((Width == 128 || Width == 256 || Width == 512) &&
|
||||
C->getType()->getPrimitiveSizeInBits() >= Width &&
|
||||
"Unexpected vector size.");
|
||||
|
||||
// The shuffle mask requires a byte vector.
|
||||
@ -125,7 +124,7 @@ void DecodePSHUFBMask(const Constant *C, SmallVectorImpl<int> &ShuffleMask) {
|
||||
if (!extractConstantMask(C, 8, UndefElts, RawMask))
|
||||
return;
|
||||
|
||||
unsigned NumElts = RawMask.size();
|
||||
unsigned NumElts = Width / 8;
|
||||
assert((NumElts == 16 || NumElts == 32 || NumElts == 64) &&
|
||||
"Unexpected number of vector elements.");
|
||||
|
||||
@ -151,12 +150,10 @@ void DecodePSHUFBMask(const Constant *C, SmallVectorImpl<int> &ShuffleMask) {
|
||||
}
|
||||
}
|
||||
|
||||
void DecodeVPERMILPMask(const Constant *C, unsigned ElSize,
|
||||
void DecodeVPERMILPMask(const Constant *C, unsigned ElSize, unsigned Width,
|
||||
SmallVectorImpl<int> &ShuffleMask) {
|
||||
Type *MaskTy = C->getType();
|
||||
unsigned MaskTySize = MaskTy->getPrimitiveSizeInBits();
|
||||
(void)MaskTySize;
|
||||
assert((MaskTySize == 128 || MaskTySize == 256 || MaskTySize == 512) &&
|
||||
assert((Width == 128 || Width == 256 || Width == 512) &&
|
||||
C->getType()->getPrimitiveSizeInBits() >= Width &&
|
||||
"Unexpected vector size.");
|
||||
assert((ElSize == 32 || ElSize == 64) && "Unexpected vector element size.");
|
||||
|
||||
@ -166,7 +163,7 @@ void DecodeVPERMILPMask(const Constant *C, unsigned ElSize,
|
||||
if (!extractConstantMask(C, ElSize, UndefElts, RawMask))
|
||||
return;
|
||||
|
||||
unsigned NumElts = RawMask.size();
|
||||
unsigned NumElts = Width / ElSize;
|
||||
unsigned NumEltsPerLane = 128 / ElSize;
|
||||
assert((NumElts == 2 || NumElts == 4 || NumElts == 8 || NumElts == 16) &&
|
||||
"Unexpected number of vector elements.");
|
||||
@ -189,11 +186,13 @@ void DecodeVPERMILPMask(const Constant *C, unsigned ElSize,
|
||||
}
|
||||
|
||||
void DecodeVPERMIL2PMask(const Constant *C, unsigned M2Z, unsigned ElSize,
|
||||
unsigned Width,
|
||||
SmallVectorImpl<int> &ShuffleMask) {
|
||||
Type *MaskTy = C->getType();
|
||||
unsigned MaskTySize = MaskTy->getPrimitiveSizeInBits();
|
||||
(void)MaskTySize;
|
||||
assert((MaskTySize == 128 || MaskTySize == 256) && "Unexpected vector size.");
|
||||
assert((MaskTySize == 128 || MaskTySize == 256) &&
|
||||
Width >= MaskTySize && "Unexpected vector size.");
|
||||
|
||||
// The shuffle mask requires elements the same size as the target.
|
||||
APInt UndefElts;
|
||||
@ -201,7 +200,7 @@ void DecodeVPERMIL2PMask(const Constant *C, unsigned M2Z, unsigned ElSize,
|
||||
if (!extractConstantMask(C, ElSize, UndefElts, RawMask))
|
||||
return;
|
||||
|
||||
unsigned NumElts = RawMask.size();
|
||||
unsigned NumElts = Width / ElSize;
|
||||
unsigned NumEltsPerLane = 128 / ElSize;
|
||||
assert((NumElts == 2 || NumElts == 4 || NumElts == 8) &&
|
||||
"Unexpected number of vector elements.");
|
||||
@ -242,9 +241,12 @@ void DecodeVPERMIL2PMask(const Constant *C, unsigned M2Z, unsigned ElSize,
|
||||
}
|
||||
}
|
||||
|
||||
void DecodeVPPERMMask(const Constant *C, SmallVectorImpl<int> &ShuffleMask) {
|
||||
assert(C->getType()->getPrimitiveSizeInBits() == 128 &&
|
||||
"Unexpected vector size.");
|
||||
void DecodeVPPERMMask(const Constant *C, unsigned Width,
|
||||
SmallVectorImpl<int> &ShuffleMask) {
|
||||
Type *MaskTy = C->getType();
|
||||
unsigned MaskTySize = MaskTy->getPrimitiveSizeInBits();
|
||||
(void)MaskTySize;
|
||||
assert(Width == 128 && Width >= MaskTySize && "Unexpected vector size.");
|
||||
|
||||
// The shuffle mask requires a byte vector.
|
||||
APInt UndefElts;
|
||||
@ -252,7 +254,7 @@ void DecodeVPPERMMask(const Constant *C, SmallVectorImpl<int> &ShuffleMask) {
|
||||
if (!extractConstantMask(C, 8, UndefElts, RawMask))
|
||||
return;
|
||||
|
||||
unsigned NumElts = RawMask.size();
|
||||
unsigned NumElts = Width / 8;
|
||||
assert(NumElts == 16 && "Unexpected number of vector elements.");
|
||||
|
||||
for (unsigned i = 0; i != NumElts; ++i) {
|
||||
@ -291,12 +293,10 @@ void DecodeVPPERMMask(const Constant *C, SmallVectorImpl<int> &ShuffleMask) {
|
||||
}
|
||||
}
|
||||
|
||||
void DecodeVPERMVMask(const Constant *C, unsigned ElSize,
|
||||
void DecodeVPERMVMask(const Constant *C, unsigned ElSize, unsigned Width,
|
||||
SmallVectorImpl<int> &ShuffleMask) {
|
||||
Type *MaskTy = C->getType();
|
||||
unsigned MaskTySize = MaskTy->getPrimitiveSizeInBits();
|
||||
(void)MaskTySize;
|
||||
assert((MaskTySize == 128 || MaskTySize == 256 || MaskTySize == 512) &&
|
||||
assert((Width == 128 || Width == 256 || Width == 512) &&
|
||||
C->getType()->getPrimitiveSizeInBits() >= Width &&
|
||||
"Unexpected vector size.");
|
||||
assert((ElSize == 8 || ElSize == 16 || ElSize == 32 || ElSize == 64) &&
|
||||
"Unexpected vector element size.");
|
||||
@ -307,7 +307,7 @@ void DecodeVPERMVMask(const Constant *C, unsigned ElSize,
|
||||
if (!extractConstantMask(C, ElSize, UndefElts, RawMask))
|
||||
return;
|
||||
|
||||
unsigned NumElts = RawMask.size();
|
||||
unsigned NumElts = Width / ElSize;
|
||||
|
||||
for (unsigned i = 0; i != NumElts; ++i) {
|
||||
if (UndefElts[i]) {
|
||||
@ -319,12 +319,10 @@ void DecodeVPERMVMask(const Constant *C, unsigned ElSize,
|
||||
}
|
||||
}
|
||||
|
||||
void DecodeVPERMV3Mask(const Constant *C, unsigned ElSize,
|
||||
void DecodeVPERMV3Mask(const Constant *C, unsigned ElSize, unsigned Width,
|
||||
SmallVectorImpl<int> &ShuffleMask) {
|
||||
Type *MaskTy = C->getType();
|
||||
unsigned MaskTySize = MaskTy->getPrimitiveSizeInBits();
|
||||
(void)MaskTySize;
|
||||
assert((MaskTySize == 128 || MaskTySize == 256 || MaskTySize == 512) &&
|
||||
assert((Width == 128 || Width == 256 || Width == 512) &&
|
||||
C->getType()->getPrimitiveSizeInBits() >= Width &&
|
||||
"Unexpected vector size.");
|
||||
assert((ElSize == 8 || ElSize == 16 || ElSize == 32 || ElSize == 64) &&
|
||||
"Unexpected vector element size.");
|
||||
@ -335,7 +333,7 @@ void DecodeVPERMV3Mask(const Constant *C, unsigned ElSize,
|
||||
if (!extractConstantMask(C, ElSize, UndefElts, RawMask))
|
||||
return;
|
||||
|
||||
unsigned NumElts = RawMask.size();
|
||||
unsigned NumElts = Width / ElSize;
|
||||
|
||||
for (unsigned i = 0; i != NumElts; ++i) {
|
||||
if (UndefElts[i]) {
|
||||
|
@ -26,25 +26,28 @@ class Constant;
|
||||
class MVT;
|
||||
|
||||
/// Decode a PSHUFB mask from an IR-level vector constant.
|
||||
void DecodePSHUFBMask(const Constant *C, SmallVectorImpl<int> &ShuffleMask);
|
||||
void DecodePSHUFBMask(const Constant *C, unsigned Width,
|
||||
SmallVectorImpl<int> &ShuffleMask);
|
||||
|
||||
/// Decode a VPERMILP variable mask from an IR-level vector constant.
|
||||
void DecodeVPERMILPMask(const Constant *C, unsigned ElSize,
|
||||
void DecodeVPERMILPMask(const Constant *C, unsigned ElSize, unsigned Width,
|
||||
SmallVectorImpl<int> &ShuffleMask);
|
||||
|
||||
/// Decode a VPERMILP2 variable mask from an IR-level vector constant.
|
||||
void DecodeVPERMIL2PMask(const Constant *C, unsigned MatchImm, unsigned ElSize,
|
||||
unsigned Width,
|
||||
SmallVectorImpl<int> &ShuffleMask);
|
||||
|
||||
/// Decode a VPPERM variable mask from an IR-level vector constant.
|
||||
void DecodeVPPERMMask(const Constant *C, SmallVectorImpl<int> &ShuffleMask);
|
||||
void DecodeVPPERMMask(const Constant *C, unsigned Width,
|
||||
SmallVectorImpl<int> &ShuffleMask);
|
||||
|
||||
/// Decode a VPERM W/D/Q/PS/PD mask from an IR-level vector constant.
|
||||
void DecodeVPERMVMask(const Constant *C, unsigned ElSize,
|
||||
void DecodeVPERMVMask(const Constant *C, unsigned ElSize, unsigned Width,
|
||||
SmallVectorImpl<int> &ShuffleMask);
|
||||
|
||||
/// Decode a VPERMT2 W/D/Q/PS/PD mask from an IR-level vector constant.
|
||||
void DecodeVPERMV3Mask(const Constant *C, unsigned ElSize,
|
||||
void DecodeVPERMV3Mask(const Constant *C, unsigned ElSize, unsigned Width,
|
||||
SmallVectorImpl<int> &ShuffleMask);
|
||||
|
||||
} // llvm namespace
|
||||
|
@ -224,7 +224,7 @@ entry:
|
||||
define <16 x i16> @shuffle_v16i16_4501_mem(<16 x i16>* %a, <16 x i16>* %b) nounwind uwtable readnone ssp {
|
||||
; AVX1-LABEL: shuffle_v16i16_4501_mem:
|
||||
; AVX1: # %bb.0: # %entry
|
||||
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
|
||||
; AVX1-NEXT: vmovdqa (%rdi), %xmm0
|
||||
; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpsubw %xmm1, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[0,1],ymm0[0,1]
|
||||
|
@ -1630,7 +1630,7 @@ define void @interleave_24i32_in(<24 x i32>* %p, <8 x i32>* %q1, <8 x i32>* %q2,
|
||||
; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm5 = ymm1[1,2,3,3,5,6,7,7]
|
||||
; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[2,2,2,3]
|
||||
; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6],ymm4[7]
|
||||
; AVX2-SLOW-NEXT: vbroadcastsd 24(%rsi), %ymm5
|
||||
; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm0[0,3,3,3]
|
||||
; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
|
||||
; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
|
||||
; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[1,1,2,2]
|
||||
@ -1654,19 +1654,19 @@ define void @interleave_24i32_in(<24 x i32>* %p, <8 x i32>* %q1, <8 x i32>* %q2,
|
||||
; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7]
|
||||
; AVX2-FAST-NEXT: vbroadcastsd %xmm2, %ymm4
|
||||
; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
|
||||
; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
|
||||
; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm4 = ymm2[1,1,2,2]
|
||||
; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm4[0],ymm0[1],ymm4[2,3],ymm0[4],ymm4[5,6],ymm0[7]
|
||||
; AVX2-FAST-NEXT: vpermilps {{.*#+}} ymm4 = ymm1[0,0,3,3,4,4,7,7]
|
||||
; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm4[2],ymm0[3,4],ymm4[5],ymm0[6,7]
|
||||
; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm4 = [5,6,5,6,5,6,7,7]
|
||||
; AVX2-FAST-NEXT: vpermps %ymm1, %ymm4, %ymm1
|
||||
; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm4 = ymm0[1,1,2,2]
|
||||
; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm5 = ymm2[1,1,2,2]
|
||||
; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6],ymm4[7]
|
||||
; AVX2-FAST-NEXT: vpermilps {{.*#+}} ymm5 = ymm1[0,0,3,3,4,4,7,7]
|
||||
; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
|
||||
; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm5 = [5,6,5,6,5,6,7,7]
|
||||
; AVX2-FAST-NEXT: vpermps %ymm1, %ymm5, %ymm1
|
||||
; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,1,3,3]
|
||||
; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7]
|
||||
; AVX2-FAST-NEXT: vbroadcastsd 24(%rsi), %ymm2
|
||||
; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
|
||||
; AVX2-FAST-NEXT: vmovups %ymm1, 64(%rdi)
|
||||
; AVX2-FAST-NEXT: vmovups %ymm0, 32(%rdi)
|
||||
; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,3,3]
|
||||
; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
|
||||
; AVX2-FAST-NEXT: vmovups %ymm0, 64(%rdi)
|
||||
; AVX2-FAST-NEXT: vmovups %ymm4, 32(%rdi)
|
||||
; AVX2-FAST-NEXT: vmovups %ymm3, (%rdi)
|
||||
; AVX2-FAST-NEXT: vzeroupper
|
||||
; AVX2-FAST-NEXT: retq
|
||||
|
@ -57,9 +57,9 @@ define <16 x i8> @test5(<16 x i8> %V) {
|
||||
; CHECK-NEXT: movl $1, %eax
|
||||
; CHECK-NEXT: movq %rax, %xmm1
|
||||
; CHECK-NEXT: movdqa %xmm1, (%rax)
|
||||
; CHECK-NEXT: movdqa {{.*#+}} xmm1 = [1,1]
|
||||
; CHECK-NEXT: movdqa %xmm1, (%rax)
|
||||
; CHECK-NEXT: pshufb %xmm1, %xmm0
|
||||
; CHECK-NEXT: movaps {{.*#+}} xmm1 = [1,1]
|
||||
; CHECK-NEXT: movaps %xmm1, (%rax)
|
||||
; CHECK-NEXT: pshufb (%rax), %xmm0
|
||||
; CHECK-NEXT: retq
|
||||
store <2 x i64> <i64 1, i64 0>, <2 x i64>* undef, align 16
|
||||
%l = load <2 x i64>, <2 x i64>* undef, align 16
|
||||
|
@ -13,6 +13,7 @@ define i64 @extract_any_extend_vector_inreg_v16i64(<16 x i64> %a0, i32 %a1) noun
|
||||
; X32-SSE-NEXT: subl $384, %esp # imm = 0x180
|
||||
; X32-SSE-NEXT: movl 88(%ebp), %ecx
|
||||
; X32-SSE-NEXT: movdqa 72(%ebp), %xmm0
|
||||
; X32-SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
|
||||
; X32-SSE-NEXT: xorps %xmm1, %xmm1
|
||||
; X32-SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
|
||||
; X32-SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
|
||||
@ -21,7 +22,6 @@ define i64 @extract_any_extend_vector_inreg_v16i64(<16 x i64> %a0, i32 %a1) noun
|
||||
; X32-SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
|
||||
; X32-SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
|
||||
; X32-SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
|
||||
; X32-SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
|
||||
; X32-SSE-NEXT: movdqa %xmm0, {{[0-9]+}}(%esp)
|
||||
; X32-SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
|
||||
; X32-SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
|
||||
|
@ -693,20 +693,20 @@ define void @test_sdiv_pow2_v2i32(<2 x i32>* %x, <2 x i32>* %y) nounwind {
|
||||
; X86-NEXT: movdqa %xmm0, %xmm1
|
||||
; X86-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
|
||||
; X86-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1,1,3]
|
||||
; X86-NEXT: movdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648]
|
||||
; X86-NEXT: movdqa {{.*#+}} xmm3 = [31,0,31,0]
|
||||
; X86-NEXT: movdqa %xmm2, %xmm4
|
||||
; X86-NEXT: psrlq %xmm3, %xmm4
|
||||
; X86-NEXT: movdqa {{.*#+}} xmm2 = [31,0,31,0]
|
||||
; X86-NEXT: movdqa {{.*#+}} xmm3 = [0,2147483648,0,2147483648]
|
||||
; X86-NEXT: movdqa %xmm3, %xmm4
|
||||
; X86-NEXT: psrlq %xmm2, %xmm4
|
||||
; X86-NEXT: movl $31, %ecx
|
||||
; X86-NEXT: movd %ecx, %xmm5
|
||||
; X86-NEXT: psrlq %xmm5, %xmm2
|
||||
; X86-NEXT: movsd {{.*#+}} xmm2 = xmm4[0],xmm2[1]
|
||||
; X86-NEXT: psrlq %xmm5, %xmm3
|
||||
; X86-NEXT: movsd {{.*#+}} xmm3 = xmm4[0],xmm3[1]
|
||||
; X86-NEXT: movdqa %xmm1, %xmm4
|
||||
; X86-NEXT: psrlq %xmm3, %xmm4
|
||||
; X86-NEXT: psrlq %xmm2, %xmm4
|
||||
; X86-NEXT: psrlq %xmm5, %xmm1
|
||||
; X86-NEXT: movsd {{.*#+}} xmm1 = xmm4[0],xmm1[1]
|
||||
; X86-NEXT: xorpd %xmm2, %xmm1
|
||||
; X86-NEXT: psubq %xmm2, %xmm1
|
||||
; X86-NEXT: xorpd %xmm3, %xmm1
|
||||
; X86-NEXT: psubq %xmm3, %xmm1
|
||||
; X86-NEXT: pand {{\.LCPI.*}}, %xmm1
|
||||
; X86-NEXT: psrlq $29, %xmm1
|
||||
; X86-NEXT: paddq %xmm0, %xmm1
|
||||
|
@ -121,10 +121,21 @@ define <8 x i32> @load_splat_8i32_4i32_01010101(<4 x i32>* %ptr) nounwind uwtabl
|
||||
; SSE-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: load_splat_8i32_4i32_01010101:
|
||||
; AVX: # %bb.0: # %entry
|
||||
; AVX-NEXT: vbroadcastsd (%rdi), %ymm0
|
||||
; AVX-NEXT: retq
|
||||
; AVX1-LABEL: load_splat_8i32_4i32_01010101:
|
||||
; AVX1: # %bb.0: # %entry
|
||||
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = mem[0,1,0,1]
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: load_splat_8i32_4i32_01010101:
|
||||
; AVX2: # %bb.0: # %entry
|
||||
; AVX2-NEXT: vbroadcastsd (%rdi), %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; AVX512-LABEL: load_splat_8i32_4i32_01010101:
|
||||
; AVX512: # %bb.0: # %entry
|
||||
; AVX512-NEXT: vbroadcastsd (%rdi), %ymm0
|
||||
; AVX512-NEXT: retq
|
||||
entry:
|
||||
%ld = load <4 x i32>, <4 x i32>* %ptr
|
||||
%ret = shufflevector <4 x i32> %ld, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
|
||||
@ -138,21 +149,10 @@ define <8 x i32> @load_splat_8i32_8i32_01010101(<8 x i32>* %ptr) nounwind uwtabl
|
||||
; SSE-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX1-LABEL: load_splat_8i32_8i32_01010101:
|
||||
; AVX1: # %bb.0: # %entry
|
||||
; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: load_splat_8i32_8i32_01010101:
|
||||
; AVX2: # %bb.0: # %entry
|
||||
; AVX2-NEXT: vbroadcastsd (%rdi), %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; AVX512-LABEL: load_splat_8i32_8i32_01010101:
|
||||
; AVX512: # %bb.0: # %entry
|
||||
; AVX512-NEXT: vbroadcastsd (%rdi), %ymm0
|
||||
; AVX512-NEXT: retq
|
||||
; AVX-LABEL: load_splat_8i32_8i32_01010101:
|
||||
; AVX: # %bb.0: # %entry
|
||||
; AVX-NEXT: vbroadcastsd (%rdi), %ymm0
|
||||
; AVX-NEXT: retq
|
||||
entry:
|
||||
%ld = load <8 x i32>, <8 x i32>* %ptr
|
||||
%ret = shufflevector <8 x i32> %ld, <8 x i32> undef, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
|
||||
@ -246,10 +246,21 @@ define <16 x i16> @load_splat_16i16_8i16_0123012301230123(<8 x i16>* %ptr) nounw
|
||||
; SSE-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: load_splat_16i16_8i16_0123012301230123:
|
||||
; AVX: # %bb.0: # %entry
|
||||
; AVX-NEXT: vbroadcastsd (%rdi), %ymm0
|
||||
; AVX-NEXT: retq
|
||||
; AVX1-LABEL: load_splat_16i16_8i16_0123012301230123:
|
||||
; AVX1: # %bb.0: # %entry
|
||||
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = mem[0,1,0,1]
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: load_splat_16i16_8i16_0123012301230123:
|
||||
; AVX2: # %bb.0: # %entry
|
||||
; AVX2-NEXT: vbroadcastsd (%rdi), %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; AVX512-LABEL: load_splat_16i16_8i16_0123012301230123:
|
||||
; AVX512: # %bb.0: # %entry
|
||||
; AVX512-NEXT: vbroadcastsd (%rdi), %ymm0
|
||||
; AVX512-NEXT: retq
|
||||
entry:
|
||||
%ld = load <8 x i16>, <8 x i16>* %ptr
|
||||
%ret = shufflevector <8 x i16> %ld, <8 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3,i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
|
||||
@ -263,21 +274,10 @@ define <16 x i16> @load_splat_16i16_16i16_0101010101010101(<16 x i16>* %ptr) nou
|
||||
; SSE-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX1-LABEL: load_splat_16i16_16i16_0101010101010101:
|
||||
; AVX1: # %bb.0: # %entry
|
||||
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = mem[0,0,0,0]
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: load_splat_16i16_16i16_0101010101010101:
|
||||
; AVX2: # %bb.0: # %entry
|
||||
; AVX2-NEXT: vbroadcastss (%rdi), %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; AVX512-LABEL: load_splat_16i16_16i16_0101010101010101:
|
||||
; AVX512: # %bb.0: # %entry
|
||||
; AVX512-NEXT: vbroadcastss (%rdi), %ymm0
|
||||
; AVX512-NEXT: retq
|
||||
; AVX-LABEL: load_splat_16i16_16i16_0101010101010101:
|
||||
; AVX: # %bb.0: # %entry
|
||||
; AVX-NEXT: vbroadcastss (%rdi), %ymm0
|
||||
; AVX-NEXT: retq
|
||||
entry:
|
||||
%ld = load <16 x i16>, <16 x i16>* %ptr
|
||||
%ret = shufflevector <16 x i16> %ld, <16 x i16> undef, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
|
||||
@ -446,10 +446,21 @@ define <32 x i8> @load_splat_32i8_16i8_01234567012345670123456701234567(<16 x i8
|
||||
; SSE-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: load_splat_32i8_16i8_01234567012345670123456701234567:
|
||||
; AVX: # %bb.0: # %entry
|
||||
; AVX-NEXT: vbroadcastsd (%rdi), %ymm0
|
||||
; AVX-NEXT: retq
|
||||
; AVX1-LABEL: load_splat_32i8_16i8_01234567012345670123456701234567:
|
||||
; AVX1: # %bb.0: # %entry
|
||||
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = mem[0,1,0,1]
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: load_splat_32i8_16i8_01234567012345670123456701234567:
|
||||
; AVX2: # %bb.0: # %entry
|
||||
; AVX2-NEXT: vbroadcastsd (%rdi), %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; AVX512-LABEL: load_splat_32i8_16i8_01234567012345670123456701234567:
|
||||
; AVX512: # %bb.0: # %entry
|
||||
; AVX512-NEXT: vbroadcastsd (%rdi), %ymm0
|
||||
; AVX512-NEXT: retq
|
||||
entry:
|
||||
%ld = load <16 x i8>, <16 x i8>* %ptr
|
||||
%ret = shufflevector <16 x i8> %ld, <16 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
|
Loading…
x
Reference in New Issue
Block a user