mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-26 04:32:44 +01:00
AVX512: Remove VSHRI kmask patterns from TD file. It is incorrect to use kshiftw to implement VSHRI v4i1 , bits 15-4 is undef so the upper bits of v4i1 may not be zeroed. v4i1 should be zero_extend to v16i1 ( or any natively supported vector).
Differential Revision: http://reviews.llvm.org/D17763 llvm-svn: 262797
This commit is contained in:
parent
2c4c7d140a
commit
d0d6119cbd
@ -4702,7 +4702,8 @@ static SDValue Insert256BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
|
||||
}
|
||||
|
||||
/// Insert i1-subvector to i1-vector.
|
||||
static SDValue Insert1BitVector(SDValue Op, SelectionDAG &DAG) {
|
||||
static SDValue Insert1BitVector(SDValue Op, SelectionDAG &DAG,
|
||||
const X86Subtarget &Subtarget) {
|
||||
|
||||
SDLoc dl(Op);
|
||||
SDValue Vec = Op.getOperand(0);
|
||||
@ -4732,43 +4733,71 @@ static SDValue Insert1BitVector(SDValue Op, SelectionDAG &DAG) {
|
||||
// 3. Subvector should be inserted in the middle (for example v2i1
|
||||
// to v16i1, index 2)
|
||||
|
||||
// extend to natively supported kshift
|
||||
MVT MinVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
|
||||
MVT WideOpVT = OpVT;
|
||||
if (OpVT.getSizeInBits() < MinVT.getStoreSizeInBits())
|
||||
WideOpVT = MinVT;
|
||||
|
||||
SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
|
||||
SDValue Undef = DAG.getUNDEF(OpVT);
|
||||
SDValue WideSubVec =
|
||||
DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, Undef, SubVec, ZeroIdx);
|
||||
if (Vec.isUndef())
|
||||
return DAG.getNode(X86ISD::VSHLI, dl, OpVT, WideSubVec,
|
||||
DAG.getConstant(IdxVal, dl, MVT::i8));
|
||||
SDValue Undef = DAG.getUNDEF(WideOpVT);
|
||||
SDValue WideSubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
|
||||
Undef, SubVec, ZeroIdx);
|
||||
|
||||
// Extract sub-vector if require.
|
||||
auto ExtractSubVec = [&](SDValue V) {
|
||||
return (WideOpVT == OpVT) ? V : DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl,
|
||||
OpVT, V, ZeroIdx);
|
||||
};
|
||||
|
||||
if (Vec.isUndef()) {
|
||||
if (IdxVal != 0) {
|
||||
SDValue ShiftBits = DAG.getConstant(IdxVal, dl, MVT::i8);
|
||||
WideSubVec = DAG.getNode(X86ISD::VSHLI, dl, WideOpVT, WideSubVec, ShiftBits);
|
||||
}
|
||||
return ExtractSubVec(WideSubVec);
|
||||
}
|
||||
|
||||
if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
|
||||
NumElems = WideOpVT.getVectorNumElements();
|
||||
unsigned ShiftLeft = NumElems - SubVecNumElems;
|
||||
unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
|
||||
WideSubVec = DAG.getNode(X86ISD::VSHLI, dl, OpVT, WideSubVec,
|
||||
DAG.getConstant(ShiftLeft, dl, MVT::i8));
|
||||
return ShiftRight ? DAG.getNode(X86ISD::VSRLI, dl, OpVT, WideSubVec,
|
||||
DAG.getConstant(ShiftRight, dl, MVT::i8)) : WideSubVec;
|
||||
Vec = DAG.getNode(X86ISD::VSHLI, dl, WideOpVT, WideSubVec,
|
||||
DAG.getConstant(ShiftLeft, dl, MVT::i8));
|
||||
Vec = ShiftRight ? DAG.getNode(X86ISD::VSRLI, dl, WideOpVT, Vec,
|
||||
DAG.getConstant(ShiftRight, dl, MVT::i8)) : Vec;
|
||||
return ExtractSubVec(Vec);
|
||||
}
|
||||
|
||||
if (IdxVal == 0) {
|
||||
// Zero lower bits of the Vec
|
||||
SDValue ShiftBits = DAG.getConstant(SubVecNumElems, dl, MVT::i8);
|
||||
Vec = DAG.getNode(X86ISD::VSRLI, dl, OpVT, Vec, ShiftBits);
|
||||
Vec = DAG.getNode(X86ISD::VSHLI, dl, OpVT, Vec, ShiftBits);
|
||||
// Merge them together
|
||||
return DAG.getNode(ISD::OR, dl, OpVT, Vec, WideSubVec);
|
||||
Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec, ZeroIdx);
|
||||
Vec = DAG.getNode(X86ISD::VSRLI, dl, WideOpVT, Vec, ShiftBits);
|
||||
Vec = DAG.getNode(X86ISD::VSHLI, dl, WideOpVT, Vec, ShiftBits);
|
||||
// Merge them together, SubVec should be zero extended.
|
||||
WideSubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
|
||||
getZeroVector(WideOpVT, Subtarget, DAG, dl),
|
||||
SubVec, ZeroIdx);
|
||||
Vec = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, WideSubVec);
|
||||
return ExtractSubVec(Vec);
|
||||
}
|
||||
|
||||
// Simple case when we put subvector in the upper part
|
||||
if (IdxVal + SubVecNumElems == NumElems) {
|
||||
// Zero upper bits of the Vec
|
||||
WideSubVec = DAG.getNode(X86ISD::VSHLI, dl, OpVT, Vec,
|
||||
DAG.getConstant(IdxVal, dl, MVT::i8));
|
||||
WideSubVec = DAG.getNode(X86ISD::VSHLI, dl, WideOpVT, WideSubVec,
|
||||
DAG.getConstant(IdxVal, dl, MVT::i8));
|
||||
SDValue ShiftBits = DAG.getConstant(SubVecNumElems, dl, MVT::i8);
|
||||
Vec = DAG.getNode(X86ISD::VSHLI, dl, OpVT, Vec, ShiftBits);
|
||||
Vec = DAG.getNode(X86ISD::VSRLI, dl, OpVT, Vec, ShiftBits);
|
||||
return DAG.getNode(ISD::OR, dl, OpVT, Vec, WideSubVec);
|
||||
Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec, ZeroIdx);
|
||||
Vec = DAG.getNode(X86ISD::VSHLI, dl, WideOpVT, Vec, ShiftBits);
|
||||
Vec = DAG.getNode(X86ISD::VSRLI, dl, WideOpVT, Vec, ShiftBits);
|
||||
Vec = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, WideSubVec);
|
||||
return ExtractSubVec(Vec);
|
||||
}
|
||||
// Subvector should be inserted in the middle - use shuffle
|
||||
WideSubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, Undef,
|
||||
SubVec, ZeroIdx);
|
||||
SmallVector<int, 64> Mask;
|
||||
for (unsigned i = 0; i < NumElems; ++i)
|
||||
Mask.push_back(i >= IdxVal && i < IdxVal + SubVecNumElems ?
|
||||
@ -12661,7 +12690,7 @@ static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
|
||||
return Insert256BitVector(Vec, SubVec, IdxVal, DAG, dl);
|
||||
|
||||
if (OpVT.getVectorElementType() == MVT::i1)
|
||||
return Insert1BitVector(Op, DAG);
|
||||
return Insert1BitVector(Op, DAG, Subtarget);
|
||||
|
||||
return SDValue();
|
||||
}
|
||||
|
@ -2445,7 +2445,6 @@ multiclass avx512_mask_shiftop_w<bits<8> opc1, bits<8> opc2, string OpcodeStr,
|
||||
let Predicates = [HasBWI] in {
|
||||
defm Q : avx512_mask_shiftop<opc2, !strconcat(OpcodeStr, "q"), VK64, OpNode>,
|
||||
VEX, TAPD, VEX_W;
|
||||
let Predicates = [HasDQI] in
|
||||
defm D : avx512_mask_shiftop<opc2, !strconcat(OpcodeStr, "d"), VK32, OpNode>,
|
||||
VEX, TAPD;
|
||||
}
|
||||
@ -2482,88 +2481,53 @@ let Predicates = [HasAVX512] in {
|
||||
def : Pat<(i1 1), (COPY_TO_REGCLASS (KSHIFTRWri (KSET1W), (i8 15)), VK1)>;
|
||||
def : Pat<(i1 -1), (COPY_TO_REGCLASS (KSHIFTRWri (KSET1W), (i8 15)), VK1)>;
|
||||
}
|
||||
def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 0))),
|
||||
(v8i1 (COPY_TO_REGCLASS VK16:$src, VK8))>;
|
||||
def : Pat<(v8i1 (extract_subvector (v32i1 VK32:$src), (iPTR 0))),
|
||||
(v8i1 (COPY_TO_REGCLASS VK32:$src, VK8))>;
|
||||
def : Pat<(v8i1 (extract_subvector (v64i1 VK64:$src), (iPTR 0))),
|
||||
(v8i1 (COPY_TO_REGCLASS VK64:$src, VK8))>;
|
||||
|
||||
// Patterns for kmask insert_subvector/extract_subvector to/from index=0
|
||||
multiclass operation_subvector_mask_lowering<RegisterClass subRC, ValueType subVT,
|
||||
RegisterClass RC, ValueType VT> {
|
||||
def : Pat<(subVT (extract_subvector (VT RC:$src), (iPTR 0))),
|
||||
(subVT (COPY_TO_REGCLASS RC:$src, subRC))>;
|
||||
|
||||
def : Pat<(VT (insert_subvector undef, subRC:$src, (iPTR 0))),
|
||||
(VT (COPY_TO_REGCLASS subRC:$src, RC))>;
|
||||
}
|
||||
|
||||
defm : operation_subvector_mask_lowering<VK2, v2i1, VK4, v4i1>;
|
||||
defm : operation_subvector_mask_lowering<VK2, v2i1, VK8, v8i1>;
|
||||
defm : operation_subvector_mask_lowering<VK2, v2i1, VK16, v16i1>;
|
||||
defm : operation_subvector_mask_lowering<VK2, v2i1, VK32, v32i1>;
|
||||
defm : operation_subvector_mask_lowering<VK2, v2i1, VK64, v64i1>;
|
||||
|
||||
defm : operation_subvector_mask_lowering<VK4, v4i1, VK8, v8i1>;
|
||||
defm : operation_subvector_mask_lowering<VK4, v4i1, VK16, v16i1>;
|
||||
defm : operation_subvector_mask_lowering<VK4, v4i1, VK32, v32i1>;
|
||||
defm : operation_subvector_mask_lowering<VK4, v4i1, VK64, v64i1>;
|
||||
|
||||
defm : operation_subvector_mask_lowering<VK8, v8i1, VK16, v16i1>;
|
||||
defm : operation_subvector_mask_lowering<VK8, v8i1, VK32, v32i1>;
|
||||
defm : operation_subvector_mask_lowering<VK8, v8i1, VK64, v64i1>;
|
||||
|
||||
defm : operation_subvector_mask_lowering<VK16, v16i1, VK32, v32i1>;
|
||||
defm : operation_subvector_mask_lowering<VK16, v16i1, VK64, v64i1>;
|
||||
|
||||
defm : operation_subvector_mask_lowering<VK32, v32i1, VK64, v64i1>;
|
||||
|
||||
def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 8))),
|
||||
(v8i1 (COPY_TO_REGCLASS (KSHIFTRWri VK16:$src, (i8 8)), VK8))>;
|
||||
|
||||
def : Pat<(v16i1 (extract_subvector (v32i1 VK32:$src), (iPTR 0))),
|
||||
(v16i1 (COPY_TO_REGCLASS VK32:$src, VK16))>;
|
||||
def : Pat<(v16i1 (extract_subvector (v64i1 VK64:$src), (iPTR 0))),
|
||||
(v16i1 (COPY_TO_REGCLASS VK64:$src, VK16))>;
|
||||
|
||||
def : Pat<(v16i1 (extract_subvector (v32i1 VK32:$src), (iPTR 16))),
|
||||
(v16i1 (COPY_TO_REGCLASS (KSHIFTRDri VK32:$src, (i8 16)), VK16))>;
|
||||
|
||||
def : Pat<(v32i1 (extract_subvector (v64i1 VK64:$src), (iPTR 0))),
|
||||
(v32i1 (COPY_TO_REGCLASS VK64:$src, VK32))>;
|
||||
|
||||
def : Pat<(v32i1 (extract_subvector (v64i1 VK64:$src), (iPTR 32))),
|
||||
(v32i1 (COPY_TO_REGCLASS (KSHIFTRQri VK64:$src, (i8 32)), VK32))>;
|
||||
|
||||
def : Pat<(v4i1 (extract_subvector (v8i1 VK8:$src), (iPTR 0))),
|
||||
(v4i1 (COPY_TO_REGCLASS VK8:$src, VK4))>;
|
||||
|
||||
def : Pat<(v2i1 (extract_subvector (v8i1 VK8:$src), (iPTR 0))),
|
||||
(v2i1 (COPY_TO_REGCLASS VK8:$src, VK2))>;
|
||||
|
||||
def : Pat<(v4i1 (insert_subvector undef, (v2i1 VK2:$src), (iPTR 0))),
|
||||
(v4i1 (COPY_TO_REGCLASS VK2:$src, VK4))>;
|
||||
|
||||
def : Pat<(v8i1 (insert_subvector undef, (v4i1 VK4:$src), (iPTR 0))),
|
||||
(v8i1 (COPY_TO_REGCLASS VK4:$src, VK8))>;
|
||||
def : Pat<(v8i1 (insert_subvector undef, (v2i1 VK2:$src), (iPTR 0))),
|
||||
(v8i1 (COPY_TO_REGCLASS VK2:$src, VK8))>;
|
||||
|
||||
def : Pat<(v16i1 (insert_subvector undef, (v8i1 VK8:$src), (iPTR 0))),
|
||||
(v16i1 (COPY_TO_REGCLASS VK8:$src, VK16))>;
|
||||
|
||||
def : Pat<(v32i1 (insert_subvector undef, VK2:$src, (iPTR 0))),
|
||||
(v32i1 (COPY_TO_REGCLASS VK2:$src, VK32))>;
|
||||
def : Pat<(v32i1 (insert_subvector undef, VK4:$src, (iPTR 0))),
|
||||
(v32i1 (COPY_TO_REGCLASS VK4:$src, VK32))>;
|
||||
def : Pat<(v32i1 (insert_subvector undef, VK8:$src, (iPTR 0))),
|
||||
(v32i1 (COPY_TO_REGCLASS VK8:$src, VK32))>;
|
||||
def : Pat<(v32i1 (insert_subvector undef, VK16:$src, (iPTR 0))),
|
||||
(v32i1 (COPY_TO_REGCLASS VK16:$src, VK32))>;
|
||||
|
||||
def : Pat<(v64i1 (insert_subvector undef, VK2:$src, (iPTR 0))),
|
||||
(v64i1 (COPY_TO_REGCLASS VK2:$src, VK64))>;
|
||||
def : Pat<(v64i1 (insert_subvector undef, VK4:$src, (iPTR 0))),
|
||||
(v64i1 (COPY_TO_REGCLASS VK4:$src, VK64))>;
|
||||
def : Pat<(v64i1 (insert_subvector undef, VK8:$src, (iPTR 0))),
|
||||
(v64i1 (COPY_TO_REGCLASS VK8:$src, VK64))>;
|
||||
def : Pat<(v64i1 (insert_subvector undef, VK16:$src, (iPTR 0))),
|
||||
(v64i1 (COPY_TO_REGCLASS VK16:$src, VK64))>;
|
||||
def : Pat<(v64i1 (insert_subvector undef, VK32:$src, (iPTR 0))),
|
||||
(v64i1 (COPY_TO_REGCLASS VK32:$src, VK64))>;
|
||||
|
||||
|
||||
def : Pat<(v8i1 (X86vshli VK8:$src, (i8 imm:$imm))),
|
||||
(v8i1 (COPY_TO_REGCLASS
|
||||
(KSHIFTLWri (COPY_TO_REGCLASS VK8:$src, VK16),
|
||||
(I8Imm $imm)), VK8))>, Requires<[HasAVX512, NoDQI]>;
|
||||
|
||||
def : Pat<(v8i1 (X86vsrli VK8:$src, (i8 imm:$imm))),
|
||||
(v8i1 (COPY_TO_REGCLASS
|
||||
(KSHIFTRWri (COPY_TO_REGCLASS VK8:$src, VK16),
|
||||
(I8Imm $imm)), VK8))>, Requires<[HasAVX512, NoDQI]>;
|
||||
|
||||
def : Pat<(v4i1 (X86vshli VK4:$src, (i8 imm:$imm))),
|
||||
(v4i1 (COPY_TO_REGCLASS
|
||||
(KSHIFTLWri (COPY_TO_REGCLASS VK4:$src, VK16),
|
||||
(I8Imm $imm)), VK4))>, Requires<[HasAVX512]>;
|
||||
|
||||
def : Pat<(v4i1 (X86vsrli VK4:$src, (i8 imm:$imm))),
|
||||
(v4i1 (COPY_TO_REGCLASS
|
||||
(KSHIFTRWri (COPY_TO_REGCLASS VK4:$src, VK16),
|
||||
(I8Imm $imm)), VK4))>, Requires<[HasAVX512]>;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// AVX-512 - Aligned and unaligned load and store
|
||||
//
|
||||
|
@ -60,9 +60,12 @@ define <8 x i1> @test4(<4 x i1> %a, <4 x i1>%b) {
|
||||
; CHECK: # BB#0:
|
||||
; CHECK-NEXT: vpslld $31, %xmm0, %xmm0
|
||||
; CHECK-NEXT: vptestmd %xmm0, %xmm0, %k0
|
||||
; CHECK-NEXT: vpslld $31, %xmm1, %xmm0
|
||||
; CHECK-NEXT: vptestmd %xmm0, %xmm0, %k1
|
||||
; CHECK-NEXT: kshiftlb $4, %k1, %k1
|
||||
; CHECK-NEXT: kshiftlb $4, %k0, %k0
|
||||
; CHECK-NEXT: kshiftrb $4, %k0, %k1
|
||||
; CHECK-NEXT: korb %k0, %k1, %k0
|
||||
; CHECK-NEXT: kshiftrb $4, %k0, %k0
|
||||
; CHECK-NEXT: korb %k1, %k0, %k0
|
||||
; CHECK-NEXT: vpmovm2w %k0, %xmm0
|
||||
; CHECK-NEXT: retq
|
||||
|
||||
@ -75,9 +78,12 @@ define <4 x i1> @test5(<2 x i1> %a, <2 x i1>%b) {
|
||||
; CHECK: # BB#0:
|
||||
; CHECK-NEXT: vpsllq $63, %xmm0, %xmm0
|
||||
; CHECK-NEXT: vptestmq %xmm0, %xmm0, %k0
|
||||
; CHECK-NEXT: kshiftlw $2, %k0, %k0
|
||||
; CHECK-NEXT: kshiftrw $2, %k0, %k1
|
||||
; CHECK-NEXT: korw %k0, %k1, %k0
|
||||
; CHECK-NEXT: vpsllq $63, %xmm1, %xmm0
|
||||
; CHECK-NEXT: vptestmq %xmm0, %xmm0, %k1
|
||||
; CHECK-NEXT: kshiftlb $2, %k1, %k1
|
||||
; CHECK-NEXT: kshiftlb $2, %k0, %k0
|
||||
; CHECK-NEXT: kshiftrb $2, %k0, %k0
|
||||
; CHECK-NEXT: korb %k1, %k0, %k0
|
||||
; CHECK-NEXT: vpmovm2d %k0, %xmm0
|
||||
; CHECK-NEXT: retq
|
||||
|
||||
@ -90,9 +96,12 @@ define <16 x i1> @test6(<2 x i1> %a, <2 x i1>%b) {
|
||||
; CHECK: # BB#0:
|
||||
; CHECK-NEXT: vpsllq $63, %xmm0, %xmm0
|
||||
; CHECK-NEXT: vptestmq %xmm0, %xmm0, %k0
|
||||
; CHECK-NEXT: kshiftlw $2, %k0, %k0
|
||||
; CHECK-NEXT: kshiftrw $2, %k0, %k1
|
||||
; CHECK-NEXT: korw %k0, %k1, %k0
|
||||
; CHECK-NEXT: vpsllq $63, %xmm1, %xmm0
|
||||
; CHECK-NEXT: vptestmq %xmm0, %xmm0, %k1
|
||||
; CHECK-NEXT: kshiftlb $2, %k1, %k1
|
||||
; CHECK-NEXT: kshiftlb $2, %k0, %k0
|
||||
; CHECK-NEXT: kshiftrb $2, %k0, %k0
|
||||
; CHECK-NEXT: korb %k1, %k0, %k0
|
||||
; CHECK-NEXT: kunpckbw %k0, %k0, %k0
|
||||
; CHECK-NEXT: vpmovm2b %k0, %xmm0
|
||||
; CHECK-NEXT: retq
|
||||
@ -106,9 +115,12 @@ define <32 x i1> @test7(<4 x i1> %a, <4 x i1>%b) {
|
||||
; CHECK: # BB#0:
|
||||
; CHECK-NEXT: vpslld $31, %xmm0, %xmm0
|
||||
; CHECK-NEXT: vptestmd %xmm0, %xmm0, %k0
|
||||
; CHECK-NEXT: vpslld $31, %xmm1, %xmm0
|
||||
; CHECK-NEXT: vptestmd %xmm0, %xmm0, %k1
|
||||
; CHECK-NEXT: kshiftlb $4, %k1, %k1
|
||||
; CHECK-NEXT: kshiftlb $4, %k0, %k0
|
||||
; CHECK-NEXT: kshiftrb $4, %k0, %k1
|
||||
; CHECK-NEXT: korb %k0, %k1, %k0
|
||||
; CHECK-NEXT: kshiftrb $4, %k0, %k0
|
||||
; CHECK-NEXT: korb %k1, %k0, %k0
|
||||
; CHECK-NEXT: kunpckbw %k0, %k0, %k0
|
||||
; CHECK-NEXT: kunpckwd %k0, %k0, %k0
|
||||
; CHECK-NEXT: vpmovm2b %k0, %ymm0
|
||||
|
@ -939,8 +939,8 @@ define void @test20(<2 x float>%a1, <2 x float*> %ptr, <2 x i1> %mask) {
|
||||
; SKX: # BB#0:
|
||||
; SKX-NEXT: vpsllq $63, %xmm2, %xmm2
|
||||
; SKX-NEXT: vptestmq %xmm2, %xmm2, %k0
|
||||
; SKX-NEXT: kshiftlw $2, %k0, %k0
|
||||
; SKX-NEXT: kshiftrw $2, %k0, %k1
|
||||
; SKX-NEXT: kshiftlb $6, %k0, %k0
|
||||
; SKX-NEXT: kshiftrb $6, %k0, %k1
|
||||
; SKX-NEXT: vscatterqps %xmm0, (,%ymm1) {%k1}
|
||||
; SKX-NEXT: retq
|
||||
;
|
||||
@ -949,8 +949,8 @@ define void @test20(<2 x float>%a1, <2 x float*> %ptr, <2 x i1> %mask) {
|
||||
; SKX_32-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; SKX_32-NEXT: vpsllq $63, %xmm2, %xmm2
|
||||
; SKX_32-NEXT: vptestmq %xmm2, %xmm2, %k0
|
||||
; SKX_32-NEXT: kshiftlw $2, %k0, %k0
|
||||
; SKX_32-NEXT: kshiftrw $2, %k0, %k1
|
||||
; SKX_32-NEXT: kshiftlb $6, %k0, %k0
|
||||
; SKX_32-NEXT: kshiftrb $6, %k0, %k1
|
||||
; SKX_32-NEXT: vscatterdps %xmm0, (,%xmm1) {%k1}
|
||||
; SKX_32-NEXT: retl
|
||||
call void @llvm.masked.scatter.v2f32(<2 x float> %a1, <2 x float*> %ptr, i32 4, <2 x i1> %mask)
|
||||
@ -984,8 +984,8 @@ define void @test21(<2 x i32>%a1, <2 x i32*> %ptr, <2 x i1>%mask) {
|
||||
; SKX: # BB#0:
|
||||
; SKX-NEXT: vpsllq $63, %xmm2, %xmm2
|
||||
; SKX-NEXT: vptestmq %xmm2, %xmm2, %k0
|
||||
; SKX-NEXT: kshiftlw $2, %k0, %k0
|
||||
; SKX-NEXT: kshiftrw $2, %k0, %k1
|
||||
; SKX-NEXT: kshiftlb $6, %k0, %k0
|
||||
; SKX-NEXT: kshiftrb $6, %k0, %k1
|
||||
; SKX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; SKX-NEXT: vpscatterqd %xmm0, (,%ymm1) {%k1}
|
||||
; SKX-NEXT: retq
|
||||
@ -994,8 +994,8 @@ define void @test21(<2 x i32>%a1, <2 x i32*> %ptr, <2 x i1>%mask) {
|
||||
; SKX_32: # BB#0:
|
||||
; SKX_32-NEXT: vpsllq $63, %xmm2, %xmm2
|
||||
; SKX_32-NEXT: vptestmq %xmm2, %xmm2, %k0
|
||||
; SKX_32-NEXT: kshiftlw $2, %k0, %k0
|
||||
; SKX_32-NEXT: kshiftrw $2, %k0, %k1
|
||||
; SKX_32-NEXT: kshiftlb $6, %k0, %k0
|
||||
; SKX_32-NEXT: kshiftrb $6, %k0, %k1
|
||||
; SKX_32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; SKX_32-NEXT: vpscatterqd %xmm0, (,%ymm1) {%k1}
|
||||
; SKX_32-NEXT: retl
|
||||
@ -1043,8 +1043,8 @@ define <2 x float> @test22(float* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x fl
|
||||
; SKX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; SKX-NEXT: vpsllq $63, %xmm1, %xmm1
|
||||
; SKX-NEXT: vptestmq %xmm1, %xmm1, %k0
|
||||
; SKX-NEXT: kshiftlw $2, %k0, %k0
|
||||
; SKX-NEXT: kshiftrw $2, %k0, %k1
|
||||
; SKX-NEXT: kshiftlb $6, %k0, %k0
|
||||
; SKX-NEXT: kshiftrb $6, %k0, %k1
|
||||
; SKX-NEXT: vgatherdps (%rdi,%xmm0,4), %xmm2 {%k1}
|
||||
; SKX-NEXT: vmovaps %zmm2, %zmm0
|
||||
; SKX-NEXT: retq
|
||||
@ -1054,8 +1054,8 @@ define <2 x float> @test22(float* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x fl
|
||||
; SKX_32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; SKX_32-NEXT: vpsllq $63, %xmm1, %xmm1
|
||||
; SKX_32-NEXT: vptestmq %xmm1, %xmm1, %k0
|
||||
; SKX_32-NEXT: kshiftlw $2, %k0, %k0
|
||||
; SKX_32-NEXT: kshiftrw $2, %k0, %k1
|
||||
; SKX_32-NEXT: kshiftlb $6, %k0, %k0
|
||||
; SKX_32-NEXT: kshiftrb $6, %k0, %k1
|
||||
; SKX_32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; SKX_32-NEXT: vgatherdps (%eax,%xmm0,4), %xmm2 {%k1}
|
||||
; SKX_32-NEXT: vmovaps %zmm2, %zmm0
|
||||
|
@ -707,8 +707,8 @@ define void @test14(<2 x i32> %trigger, <2 x float>* %addr, <2 x float> %val) {
|
||||
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
|
||||
; SKX-NEXT: vpcmpeqq %xmm2, %xmm0, %k0
|
||||
; SKX-NEXT: kshiftlw $2, %k0, %k0
|
||||
; SKX-NEXT: kshiftrw $2, %k0, %k1
|
||||
; SKX-NEXT: kshiftlw $14, %k0, %k0
|
||||
; SKX-NEXT: kshiftrw $14, %k0, %k1
|
||||
; SKX-NEXT: vmovups %xmm1, (%rdi) {%k1}
|
||||
; SKX-NEXT: retq
|
||||
%mask = icmp eq <2 x i32> %trigger, zeroinitializer
|
||||
@ -801,8 +801,8 @@ define <2 x float> @test16(<2 x i32> %trigger, <2 x float>* %addr, <2 x float> %
|
||||
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
|
||||
; SKX-NEXT: vpcmpeqq %xmm2, %xmm0, %k0
|
||||
; SKX-NEXT: kshiftlw $2, %k0, %k0
|
||||
; SKX-NEXT: kshiftrw $2, %k0, %k1
|
||||
; SKX-NEXT: kshiftlw $14, %k0, %k0
|
||||
; SKX-NEXT: kshiftrw $14, %k0, %k1
|
||||
; SKX-NEXT: vmovups (%rdi), %xmm1 {%k1}
|
||||
; SKX-NEXT: vmovaps %zmm1, %zmm0
|
||||
; SKX-NEXT: retq
|
||||
@ -856,8 +856,8 @@ define <2 x i32> @test17(<2 x i32> %trigger, <2 x i32>* %addr, <2 x i32> %dst) {
|
||||
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
|
||||
; SKX-NEXT: vpcmpeqq %xmm2, %xmm0, %k0
|
||||
; SKX-NEXT: kshiftlw $2, %k0, %k0
|
||||
; SKX-NEXT: kshiftrw $2, %k0, %k1
|
||||
; SKX-NEXT: kshiftlw $14, %k0, %k0
|
||||
; SKX-NEXT: kshiftrw $14, %k0, %k1
|
||||
; SKX-NEXT: vpshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
|
||||
; SKX-NEXT: vmovdqu32 (%rdi), %xmm0 {%k1}
|
||||
; SKX-NEXT: vpmovsxdq %xmm0, %xmm0
|
||||
@ -903,8 +903,8 @@ define <2 x float> @test18(<2 x i32> %trigger, <2 x float>* %addr) {
|
||||
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
|
||||
; SKX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0
|
||||
; SKX-NEXT: kshiftlw $2, %k0, %k0
|
||||
; SKX-NEXT: kshiftrw $2, %k0, %k1
|
||||
; SKX-NEXT: kshiftlw $14, %k0, %k0
|
||||
; SKX-NEXT: kshiftrw $14, %k0, %k1
|
||||
; SKX-NEXT: vmovups (%rdi), %xmm0 {%k1} {z}
|
||||
; SKX-NEXT: retq
|
||||
%mask = icmp eq <2 x i32> %trigger, zeroinitializer
|
||||
|
Loading…
Reference in New Issue
Block a user