mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-25 20:23:11 +01:00
[VectorUtils] rename scaleShuffleMask to narrowShuffleMaskElts; NFC
As proposed in D77881, we'll have the related widening operation, so this name becomes too vague. While here, change the function signature to take an 'int' rather than 'size_t' for the scaling factor, add an assert for overflow of 32-bits, and improve the documentation comments.
This commit is contained in:
parent
c68409d10e
commit
8105dac4cc
@ -328,19 +328,19 @@ const Value *getSplatValue(const Value *V);
|
||||
/// not limited by finding a scalar source value to a splatted vector.
|
||||
bool isSplatValue(const Value *V, int Index = -1, unsigned Depth = 0);
|
||||
|
||||
/// Scale a shuffle or target shuffle mask, replacing each mask index with the
|
||||
/// scaled sequential indices for an equivalent mask of narrowed elements.
|
||||
/// Mask elements that are less than 0 (sentinel values) are repeated in the
|
||||
/// output mask.
|
||||
/// Replace each shuffle mask index with the scaled sequential indices for an
|
||||
/// equivalent mask of narrowed elements. Mask elements that are less than 0
|
||||
/// (sentinel values) are repeated in the output mask.
|
||||
///
|
||||
/// Example with Scale = 4:
|
||||
/// <4 x i32> <3, 2, 0, -1> -->
|
||||
/// <16 x i8> <12, 13, 14, 15, 8, 9, 10, 11, 0, 1, 2, 3, -1, -1, -1, -1>
|
||||
///
|
||||
/// This is the reverse process of "canWidenShuffleElements", but can always
|
||||
/// succeed.
|
||||
void scaleShuffleMask(size_t Scale, ArrayRef<int> Mask,
|
||||
SmallVectorImpl<int> &ScaledMask);
|
||||
/// This is the reverse process of widening shuffle mask elements, but it always
|
||||
/// succeeds because the indexes can always be multiplied (scaled up) to map to
|
||||
/// narrower vector elements.
|
||||
void narrowShuffleMaskElts(int Scale, ArrayRef<int> Mask,
|
||||
SmallVectorImpl<int> &ScaledMask);
|
||||
|
||||
/// Compute a map of integer instructions to their minimum legal type
|
||||
/// size.
|
||||
|
@ -398,8 +398,8 @@ bool llvm::isSplatValue(const Value *V, int Index, unsigned Depth) {
|
||||
return false;
|
||||
}
|
||||
|
||||
void llvm::scaleShuffleMask(size_t Scale, ArrayRef<int> Mask,
|
||||
SmallVectorImpl<int> &ScaledMask) {
|
||||
void llvm::narrowShuffleMaskElts(int Scale, ArrayRef<int> Mask,
|
||||
SmallVectorImpl<int> &ScaledMask) {
|
||||
assert(Scale > 0 && "Unexpected scaling factor");
|
||||
|
||||
// Fast-path: if no scaling, then it is just a copy.
|
||||
@ -409,9 +409,15 @@ void llvm::scaleShuffleMask(size_t Scale, ArrayRef<int> Mask,
|
||||
}
|
||||
|
||||
ScaledMask.clear();
|
||||
for (int MaskElt : Mask)
|
||||
for (int ScaleElt = 0; ScaleElt != (int)Scale; ++ScaleElt)
|
||||
ScaledMask.push_back(MaskElt < 0 ? MaskElt : Scale * MaskElt + ScaleElt);
|
||||
for (int MaskElt : Mask) {
|
||||
if (MaskElt >= 0) {
|
||||
assert(((uint64_t)Scale * MaskElt + (Scale - 1)) <=
|
||||
std::numeric_limits<int32_t>::max() &&
|
||||
"Overflowed 32-bits");
|
||||
}
|
||||
for (int SliceElt = 0; SliceElt != Scale; ++SliceElt)
|
||||
ScaledMask.push_back(MaskElt < 0 ? MaskElt : Scale * MaskElt + SliceElt);
|
||||
}
|
||||
}
|
||||
|
||||
MapVector<Instruction *, uint64_t>
|
||||
|
@ -19841,8 +19841,8 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
|
||||
ShuffleVectorSDNode *InnerSVN = cast<ShuffleVectorSDNode>(BC0);
|
||||
SmallVector<int, 8> InnerMask;
|
||||
SmallVector<int, 8> OuterMask;
|
||||
scaleShuffleMask(InnerScale, InnerSVN->getMask(), InnerMask);
|
||||
scaleShuffleMask(OuterScale, SVN->getMask(), OuterMask);
|
||||
narrowShuffleMaskElts(InnerScale, InnerSVN->getMask(), InnerMask);
|
||||
narrowShuffleMaskElts(OuterScale, SVN->getMask(), OuterMask);
|
||||
|
||||
// Merge the shuffle masks.
|
||||
SmallVector<int, 8> NewMask;
|
||||
|
@ -7332,8 +7332,8 @@ static bool getFauxShuffleMask(SDValue N, const APInt &DemandedElts,
|
||||
|
||||
size_t MaskSize = std::max(SrcMask0.size(), SrcMask1.size());
|
||||
SmallVector<int, 64> Mask0, Mask1;
|
||||
scaleShuffleMask(MaskSize / SrcMask0.size(), SrcMask0, Mask0);
|
||||
scaleShuffleMask(MaskSize / SrcMask1.size(), SrcMask1, Mask1);
|
||||
narrowShuffleMaskElts(MaskSize / SrcMask0.size(), SrcMask0, Mask0);
|
||||
narrowShuffleMaskElts(MaskSize / SrcMask1.size(), SrcMask1, Mask1);
|
||||
for (size_t i = 0; i != MaskSize; ++i) {
|
||||
if (Mask0[i] == SM_SentinelUndef && Mask1[i] == SM_SentinelUndef)
|
||||
Mask.push_back(SM_SentinelUndef);
|
||||
@ -7391,7 +7391,7 @@ static bool getFauxShuffleMask(SDValue N, const APInt &DemandedElts,
|
||||
if ((NumSubElts % SubMask.size()) == 0) {
|
||||
int Scale = NumSubElts / SubMask.size();
|
||||
SmallVector<int,64> ScaledSubMask;
|
||||
scaleShuffleMask(Scale, SubMask, ScaledSubMask);
|
||||
narrowShuffleMaskElts(Scale, SubMask, ScaledSubMask);
|
||||
SubMask = ScaledSubMask;
|
||||
} else {
|
||||
int Scale = SubMask.size() / NumSubElts;
|
||||
@ -16359,7 +16359,7 @@ static SDValue lowerV4I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
|
||||
SmallVector<int, 2> RepeatedMask;
|
||||
if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
|
||||
SmallVector<int, 4> PSHUFDMask;
|
||||
scaleShuffleMask(2, RepeatedMask, PSHUFDMask);
|
||||
narrowShuffleMaskElts(2, RepeatedMask, PSHUFDMask);
|
||||
return DAG.getBitcast(
|
||||
MVT::v4i64,
|
||||
DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
|
||||
@ -17008,7 +17008,7 @@ static SDValue lowerV4X128Shuffle(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
|
||||
SmallVector<int, 2> Widened256Mask;
|
||||
if (canWidenShuffleElements(Widened128Mask, Widened256Mask)) {
|
||||
Widened128Mask.clear();
|
||||
llvm::scaleShuffleMask(2, Widened256Mask, Widened128Mask);
|
||||
narrowShuffleMaskElts(2, Widened256Mask, Widened128Mask);
|
||||
}
|
||||
|
||||
// Try to lower to vshuf64x2/vshuf32x4.
|
||||
@ -17159,7 +17159,7 @@ static SDValue lowerV8I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
|
||||
SmallVector<int, 2> Repeated128Mask;
|
||||
if (is128BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated128Mask)) {
|
||||
SmallVector<int, 4> PSHUFDMask;
|
||||
scaleShuffleMask(2, Repeated128Mask, PSHUFDMask);
|
||||
narrowShuffleMaskElts(2, Repeated128Mask, PSHUFDMask);
|
||||
return DAG.getBitcast(
|
||||
MVT::v8i64,
|
||||
DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32,
|
||||
@ -20247,7 +20247,7 @@ static SDValue truncateVectorWithPACK(unsigned Opcode, EVT DstVT, SDValue In,
|
||||
// Scale shuffle mask to avoid bitcasts and help ComputeNumSignBits.
|
||||
SmallVector<int, 64> Mask;
|
||||
int Scale = 64 / OutVT.getScalarSizeInBits();
|
||||
scaleShuffleMask(Scale, { 0, 2, 1, 3 }, Mask);
|
||||
narrowShuffleMaskElts(Scale, { 0, 2, 1, 3 }, Mask);
|
||||
Res = DAG.getVectorShuffle(OutVT, DL, Res, Res, Mask);
|
||||
|
||||
if (DstVT.is256BitVector())
|
||||
@ -33693,7 +33693,7 @@ static bool matchUnaryPermuteShuffle(MVT MaskVT, ArrayRef<int> Mask,
|
||||
// Narrow the repeated mask to create 32-bit element permutes.
|
||||
SmallVector<int, 4> WordMask = RepeatedMask;
|
||||
if (MaskScalarSizeInBits == 64)
|
||||
scaleShuffleMask(2, RepeatedMask, WordMask);
|
||||
narrowShuffleMaskElts(2, RepeatedMask, WordMask);
|
||||
|
||||
Shuffle = (AllowIntDomain ? X86ISD::PSHUFD : X86ISD::VPERMILPI);
|
||||
ShuffleVT = (AllowIntDomain ? MVT::i32 : MVT::f32);
|
||||
@ -34146,7 +34146,7 @@ static SDValue combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
|
||||
if (BaseMaskEltSizeInBits > 64) {
|
||||
assert((BaseMaskEltSizeInBits % 64) == 0 && "Illegal mask size");
|
||||
int MaskScale = BaseMaskEltSizeInBits / 64;
|
||||
scaleShuffleMask(MaskScale, BaseMask, Mask);
|
||||
narrowShuffleMaskElts(MaskScale, BaseMask, Mask);
|
||||
} else {
|
||||
Mask = SmallVector<int, 64>(BaseMask.begin(), BaseMask.end());
|
||||
}
|
||||
@ -38294,7 +38294,7 @@ static SDValue combineExtractWithShuffle(SDNode *N, SelectionDAG &DAG,
|
||||
if ((NumSrcElts % Mask.size()) == 0) {
|
||||
SmallVector<int, 16> ScaledMask;
|
||||
int Scale = NumSrcElts / Mask.size();
|
||||
scaleShuffleMask(Scale, Mask, ScaledMask);
|
||||
narrowShuffleMaskElts(Scale, Mask, ScaledMask);
|
||||
Mask = std::move(ScaledMask);
|
||||
} else if ((Mask.size() % NumSrcElts) == 0) {
|
||||
// Simplify Mask based on demanded element.
|
||||
|
@ -336,8 +336,8 @@ void X86InterleavedAccessGroup::interleave8bitStride4VF8(
|
||||
|
||||
createUnpackShuffleMask(VT, MaskLowTemp1, true, false);
|
||||
createUnpackShuffleMask(VT, MaskHighTemp1, false, false);
|
||||
scaleShuffleMask(2, MaskHighTemp1, MaskHighWord);
|
||||
scaleShuffleMask(2, MaskLowTemp1, MaskLowWord);
|
||||
narrowShuffleMaskElts(2, MaskHighTemp1, MaskHighWord);
|
||||
narrowShuffleMaskElts(2, MaskLowTemp1, MaskLowWord);
|
||||
// IntrVec1Low = c0 m0 c1 m1 c2 m2 c3 m3 c4 m4 c5 m5 c6 m6 c7 m7
|
||||
// IntrVec2Low = y0 k0 y1 k1 y2 k2 y3 k3 y4 k4 y5 k5 y6 k6 y7 k7
|
||||
Value *IntrVec1Low =
|
||||
@ -384,8 +384,8 @@ void X86InterleavedAccessGroup::interleave8bitStride4(
|
||||
|
||||
createUnpackShuffleMask(HalfVT, MaskLowTemp, true, false);
|
||||
createUnpackShuffleMask(HalfVT, MaskHighTemp, false, false);
|
||||
scaleShuffleMask(2, MaskLowTemp, LowHighMask[0]);
|
||||
scaleShuffleMask(2, MaskHighTemp, LowHighMask[1]);
|
||||
narrowShuffleMaskElts(2, MaskLowTemp, LowHighMask[0]);
|
||||
narrowShuffleMaskElts(2, MaskHighTemp, LowHighMask[1]);
|
||||
|
||||
// IntrVec1Low = c0 m0 c1 m1 ... c7 m7 | c16 m16 c17 m17 ... c23 m23
|
||||
// IntrVec1High = c8 m8 c9 m9 ... c15 m15 | c24 m24 c25 m25 ... c31 m31
|
||||
|
@ -1967,7 +1967,7 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
|
||||
assert(XNumElts % VWidth == 0 && "Unexpected vector bitcast");
|
||||
unsigned ScaleFactor = XNumElts / VWidth;
|
||||
SmallVector<int, 16> ScaledMask;
|
||||
scaleShuffleMask(ScaleFactor, Mask, ScaledMask);
|
||||
narrowShuffleMaskElts(ScaleFactor, Mask, ScaledMask);
|
||||
|
||||
// If the shuffled source vector simplifies, cast that value to this
|
||||
// shuffle's type.
|
||||
|
@ -298,7 +298,7 @@ static bool foldBitcastShuf(Instruction &I, const TargetTransformInfo &TTI) {
|
||||
SmallVector<int, 16> NewMask;
|
||||
assert(DestNumElts % SrcNumElts == 0 && "Unexpected shuffle mask");
|
||||
unsigned ScaleFactor = DestNumElts / SrcNumElts;
|
||||
scaleShuffleMask(ScaleFactor, Mask, NewMask);
|
||||
narrowShuffleMaskElts(ScaleFactor, Mask, NewMask);
|
||||
Value *Shuf = Builder.CreateShuffleVector(CastV, UndefValue::get(DestTy),
|
||||
NewMask);
|
||||
I.replaceAllUsesWith(Shuf);
|
||||
|
@ -98,11 +98,11 @@ TEST_F(BasicTest, isSplat) {
|
||||
EXPECT_FALSE(isSplatValue(SplatWithUndefC));
|
||||
}
|
||||
|
||||
TEST_F(BasicTest, scaleShuffleMask) {
|
||||
TEST_F(BasicTest, narrowShuffleMaskElts) {
|
||||
SmallVector<int, 16> ScaledMask;
|
||||
scaleShuffleMask(1, {3,2,0,-2}, ScaledMask);
|
||||
narrowShuffleMaskElts(1, {3,2,0,-2}, ScaledMask);
|
||||
EXPECT_EQ(makeArrayRef(ScaledMask), makeArrayRef({3,2,0,-2}));
|
||||
scaleShuffleMask(4, {3,2,0,-1}, ScaledMask);
|
||||
narrowShuffleMaskElts(4, {3,2,0,-1}, ScaledMask);
|
||||
EXPECT_EQ(makeArrayRef(ScaledMask), makeArrayRef({12,13,14,15,8,9,10,11,0,1,2,3,-1,-1,-1,-1}));
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user