1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-21 20:12:56 +02:00

[X86][SSE] Added common helper for shuffle mask constant pool decodes.

The shuffle mask decodes have a large amount of repeated code extracting/splitting mask values from Constant data.

This patch pulls all of this duplicated code into a single helper function to identify undef elements and combine/split constant integer data into the requested shuffle mask elements.

Updated PSHUFB/VPERMIL/VPERMIL2/VPPERM decoders to use it (VPERMV/VPERMV3 could be converted as well in the future).

llvm-svn: 282720
This commit is contained in:
Simon Pilgrim 2016-09-29 15:25:48 +00:00
parent 3fc7eac79f
commit e14120722d

View File

@ -14,6 +14,7 @@
#include "X86ShuffleDecodeConstantPool.h"
#include "Utils/X86ShuffleDecode.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/CodeGen/MachineValueType.h"
#include "llvm/IR/Constants.h"
@ -23,10 +24,12 @@
namespace llvm {
void DecodePSHUFBMask(const Constant *C, SmallVectorImpl<int> &ShuffleMask) {
Type *MaskTy = C->getType();
// It is not an error for the PSHUFB mask to not be a vector of i8 because the
// constant pool uniques constants by their bit representation.
static bool extractConstantMask(const Constant *C, unsigned MaskEltSizeInBits,
SmallBitVector &UndefElts,
SmallVectorImpl<uint64_t> &RawMask) {
// It is not an error for shuffle masks to not be a vector of
// MaskEltSizeInBits because the constant pool uniques constants by their
// bit representation.
// e.g. the following take up the same space in the constant pool:
// i128 -170141183420855150465331762880109871104
//
@ -34,165 +37,155 @@ void DecodePSHUFBMask(const Constant *C, SmallVectorImpl<int> &ShuffleMask) {
//
// <4 x i32> <i32 -2147483648, i32 -2147483648,
// i32 -2147483648, i32 -2147483648>
Type *CstTy = C->getType();
if (!CstTy->isVectorTy())
return false;
#ifndef NDEBUG
unsigned MaskTySize = MaskTy->getPrimitiveSizeInBits();
assert(MaskTySize == 128 || MaskTySize == 256 || MaskTySize == 512);
#endif
Type *CstEltTy = CstTy->getVectorElementType();
if (!CstEltTy->isIntegerTy())
return false;
if (!MaskTy->isVectorTy())
return;
int NumElts = MaskTy->getVectorNumElements();
unsigned CstSizeInBits = CstTy->getPrimitiveSizeInBits();
unsigned CstEltSizeInBits = CstTy->getScalarSizeInBits();
unsigned NumCstElts = CstTy->getVectorNumElements();
Type *EltTy = MaskTy->getVectorElementType();
if (!EltTy->isIntegerTy())
return;
// The shuffle mask requires a byte vector - decode cases with
// wider elements as well.
unsigned BitWidth = cast<IntegerType>(EltTy)->getBitWidth();
if ((BitWidth % 8) != 0)
return;
int Scale = BitWidth / 8;
int NumBytes = NumElts * Scale;
ShuffleMask.reserve(NumBytes);
for (int i = 0; i != NumElts; ++i) {
// Extract all the undef/constant element data and pack into single bitsets.
APInt UndefBits(CstSizeInBits, 0);
APInt MaskBits(CstSizeInBits, 0);
for (unsigned i = 0; i != NumCstElts; ++i) {
Constant *COp = C->getAggregateElement(i);
if (!COp) {
ShuffleMask.clear();
return;
} else if (isa<UndefValue>(COp)) {
ShuffleMask.append(Scale, SM_SentinelUndef);
if (!COp || (!isa<UndefValue>(COp) && !isa<ConstantInt>(COp)))
return false;
if (isa<UndefValue>(COp)) {
APInt EltUndef = APInt::getLowBitsSet(CstSizeInBits, CstEltSizeInBits);
UndefBits |= EltUndef.shl(i * CstEltSizeInBits);
continue;
}
APInt APElt = cast<ConstantInt>(COp)->getValue();
for (int j = 0; j != Scale; ++j) {
// For AVX vectors with 32 bytes the base of the shuffle is the 16-byte
// lane of the vector we're inside.
int Base = ((i * Scale) + j) & ~0xf;
uint64_t Element = APElt.getLoBits(8).getZExtValue();
APElt = APElt.lshr(8);
// If the high bit (7) of the byte is set, the element is zeroed.
if (Element & (1 << 7))
ShuffleMask.push_back(SM_SentinelZero);
else {
// Only the least significant 4 bits of the byte are used.
int Index = Base + (Element & 0xf);
ShuffleMask.push_back(Index);
}
}
APInt EltBits = cast<ConstantInt>(COp)->getValue();
EltBits = EltBits.zextOrTrunc(CstSizeInBits);
MaskBits |= EltBits.shl(i * CstEltSizeInBits);
}
assert(NumBytes == (int)ShuffleMask.size() && "Unexpected shuffle mask size");
// Now extract the undef/constant bit data into the raw shuffle masks.
assert((CstSizeInBits % MaskEltSizeInBits) == 0 && "");
unsigned NumMaskElts = CstSizeInBits / MaskEltSizeInBits;
UndefElts = SmallBitVector(NumMaskElts, false);
RawMask.resize(NumMaskElts, 0);
for (unsigned i = 0; i != NumMaskElts; ++i) {
APInt EltUndef = UndefBits.lshr(i * MaskEltSizeInBits);
EltUndef = EltUndef.zextOrTrunc(MaskEltSizeInBits);
// Only treat the element as UNDEF if all bits are UNDEF, otherwise
// treat it as zero.
if (EltUndef.countPopulation() == MaskEltSizeInBits) {
UndefElts[i] = true;
RawMask[i] = 0;
continue;
}
APInt EltBits = MaskBits.lshr(i * MaskEltSizeInBits);
EltBits = EltBits.zextOrTrunc(MaskEltSizeInBits);
RawMask[i] = EltBits.getZExtValue();
}
return true;
}
void DecodePSHUFBMask(const Constant *C, SmallVectorImpl<int> &ShuffleMask) {
Type *MaskTy = C->getType();
unsigned MaskTySize = MaskTy->getPrimitiveSizeInBits();
assert(MaskTySize == 128 || MaskTySize == 256 || MaskTySize == 512);
// The shuffle mask requires a byte vector.
SmallBitVector UndefElts;
SmallVector<uint64_t, 32> RawMask;
if (!extractConstantMask(C, 8, UndefElts, RawMask))
return;
unsigned NumElts = RawMask.size();
assert((NumElts == 16 || NumElts == 32 || NumElts == 64) &&
"Unexpected number of vector elements.");
for (unsigned i = 0; i != NumElts; ++i) {
if (UndefElts[i]) {
ShuffleMask.push_back(SM_SentinelUndef);
continue;
}
uint64_t Element = RawMask[i];
// If the high bit (7) of the byte is set, the element is zeroed.
if (Element & (1 << 7))
ShuffleMask.push_back(SM_SentinelZero);
else {
// For AVX vectors with 32 bytes the base of the shuffle is the 16-byte
// lane of the vector we're inside.
unsigned Base = i & ~0xf;
// Only the least significant 4 bits of the byte are used.
int Index = Base + (Element & 0xf);
ShuffleMask.push_back(Index);
}
}
}
void DecodeVPERMILPMask(const Constant *C, unsigned ElSize,
SmallVectorImpl<int> &ShuffleMask) {
Type *MaskTy = C->getType();
// It is not an error for the PSHUFB mask to not be a vector of i8 because the
// constant pool uniques constants by their bit representation.
// e.g. the following take up the same space in the constant pool:
// i128 -170141183420855150465331762880109871104
//
// <2 x i64> <i64 -9223372034707292160, i64 -9223372034707292160>
//
// <4 x i32> <i32 -2147483648, i32 -2147483648,
// i32 -2147483648, i32 -2147483648>
if (ElSize != 32 && ElSize != 64)
return;
unsigned MaskTySize = MaskTy->getPrimitiveSizeInBits();
if (MaskTySize != 128 && MaskTySize != 256 && MaskTySize != 512)
assert(MaskTySize == 128 || MaskTySize == 256 || MaskTySize == 512);
assert(ElSize == 32 || ElSize == 64);
// The shuffle mask requires elements the same size as the target.
SmallBitVector UndefElts;
SmallVector<uint64_t, 8> RawMask;
if (!extractConstantMask(C, ElSize, UndefElts, RawMask))
return;
// Only support vector types.
if (!MaskTy->isVectorTy())
return;
// Make sure its an integer type.
Type *VecEltTy = MaskTy->getVectorElementType();
if (!VecEltTy->isIntegerTy())
return;
// Support any element type from byte up to element size.
// This is necessary primarily because 64-bit elements get split to 32-bit
// in the constant pool on 32-bit target.
unsigned EltTySize = VecEltTy->getIntegerBitWidth();
if (EltTySize < 8 || EltTySize > ElSize)
return;
unsigned NumElements = MaskTySize / ElSize;
assert((NumElements == 2 || NumElements == 4 || NumElements == 8 ||
NumElements == 16) &&
unsigned NumElts = RawMask.size();
unsigned NumEltsPerLane = 128 / ElSize;
assert((NumElts == 2 || NumElts == 4 || NumElts == 8 || NumElts == 16) &&
"Unexpected number of vector elements.");
ShuffleMask.reserve(NumElements);
unsigned NumElementsPerLane = 128 / ElSize;
unsigned Factor = ElSize / EltTySize;
for (unsigned i = 0; i < NumElements; ++i) {
Constant *COp = C->getAggregateElement(i * Factor);
if (!COp) {
ShuffleMask.clear();
return;
} else if (isa<UndefValue>(COp)) {
for (unsigned i = 0; i != NumElts; ++i) {
if (UndefElts[i]) {
ShuffleMask.push_back(SM_SentinelUndef);
continue;
}
int Index = i & ~(NumElementsPerLane - 1);
uint64_t Element = cast<ConstantInt>(COp)->getZExtValue();
int Index = i & ~(NumEltsPerLane - 1);
uint64_t Element = RawMask[i];
if (ElSize == 64)
Index += (Element >> 1) & 0x1;
else
Index += Element & 0x3;
ShuffleMask.push_back(Index);
}
// TODO: Handle funny-looking vectors too.
}
void DecodeVPERMIL2PMask(const Constant *C, unsigned M2Z, unsigned ElSize,
SmallVectorImpl<int> &ShuffleMask) {
Type *MaskTy = C->getType();
unsigned MaskTySize = MaskTy->getPrimitiveSizeInBits();
if (MaskTySize != 128 && MaskTySize != 256)
assert(MaskTySize == 128 || MaskTySize == 256);
// The shuffle mask requires elements the same size as the target.
SmallBitVector UndefElts;
SmallVector<uint64_t, 8> RawMask;
if (!extractConstantMask(C, ElSize, UndefElts, RawMask))
return;
// Only support vector types.
if (!MaskTy->isVectorTy())
return;
// Make sure its an integer type.
Type *VecEltTy = MaskTy->getVectorElementType();
if (!VecEltTy->isIntegerTy())
return;
// Support any element type from byte up to element size.
// This is necessary primarily because 64-bit elements get split to 32-bit
// in the constant pool on 32-bit target.
unsigned EltTySize = VecEltTy->getIntegerBitWidth();
if (EltTySize < 8 || EltTySize > ElSize)
return;
unsigned NumElements = MaskTySize / ElSize;
assert((NumElements == 2 || NumElements == 4 || NumElements == 8) &&
unsigned NumElts = RawMask.size();
unsigned NumEltsPerLane = 128 / ElSize;
assert((NumElts == 2 || NumElts == 4 || NumElts == 8) &&
"Unexpected number of vector elements.");
ShuffleMask.reserve(NumElements);
unsigned NumElementsPerLane = 128 / ElSize;
unsigned Factor = ElSize / EltTySize;
for (unsigned i = 0; i < NumElements; ++i) {
Constant *COp = C->getAggregateElement(i * Factor);
if (!COp) {
ShuffleMask.clear();
return;
} else if (isa<UndefValue>(COp)) {
for (unsigned i = 0; i != NumElts; ++i) {
if (UndefElts[i]) {
ShuffleMask.push_back(SM_SentinelUndef);
continue;
}
@ -201,7 +194,7 @@ void DecodeVPERMIL2PMask(const Constant *C, unsigned M2Z, unsigned ElSize,
// Bits[3] - Match Bit.
// Bits[2:1] - (Per Lane) PD Shuffle Mask.
// Bits[2:0] - (Per Lane) PS Shuffle Mask.
uint64_t Selector = cast<ConstantInt>(COp)->getZExtValue();
uint64_t Selector = RawMask[i];
unsigned MatchBit = (Selector >> 3) & 0x1;
// M2Z[0:1] MatchBit
@ -215,51 +208,34 @@ void DecodeVPERMIL2PMask(const Constant *C, unsigned M2Z, unsigned ElSize,
continue;
}
int Index = i & ~(NumElementsPerLane - 1);
int Index = i & ~(NumEltsPerLane - 1);
if (ElSize == 64)
Index += (Selector >> 1) & 0x1;
else
Index += Selector & 0x3;
int Src = (Selector >> 2) & 0x1;
Index += Src * NumElements;
Index += Src * NumElts;
ShuffleMask.push_back(Index);
}
// TODO: Handle funny-looking vectors too.
}
void DecodeVPPERMMask(const Constant *C, SmallVectorImpl<int> &ShuffleMask) {
Type *MaskTy = C->getType();
assert(MaskTy->getPrimitiveSizeInBits() == 128);
// Only support vector types.
if (!MaskTy->isVectorTy())
// The shuffle mask requires a byte vector.
SmallBitVector UndefElts;
SmallVector<uint64_t, 32> RawMask;
if (!extractConstantMask(C, 8, UndefElts, RawMask))
return;
// Make sure its an integer type.
Type *VecEltTy = MaskTy->getVectorElementType();
if (!VecEltTy->isIntegerTy())
return;
unsigned NumElts = RawMask.size();
assert(NumElts == 16 && "Unexpected number of vector elements.");
// The shuffle mask requires a byte vector - decode cases with
// wider elements as well.
unsigned BitWidth = cast<IntegerType>(VecEltTy)->getBitWidth();
if ((BitWidth % 8) != 0)
return;
int NumElts = MaskTy->getVectorNumElements();
int Scale = BitWidth / 8;
int NumBytes = NumElts * Scale;
ShuffleMask.reserve(NumBytes);
for (int i = 0; i != NumElts; ++i) {
Constant *COp = C->getAggregateElement(i);
if (!COp) {
ShuffleMask.clear();
return;
} else if (isa<UndefValue>(COp)) {
ShuffleMask.append(Scale, SM_SentinelUndef);
for (unsigned i = 0; i != NumElts; ++i) {
if (UndefElts[i]) {
ShuffleMask.push_back(SM_SentinelUndef);
continue;
}
@ -275,26 +251,22 @@ void DecodeVPPERMMask(const Constant *C, SmallVectorImpl<int> &ShuffleMask) {
// 4 - 00h (zero - fill).
// 5 - FFh (ones - fill).
// 6 - Most significant bit of source byte replicated in all bit positions.
// 7 - Invert most significant bit of source byte and replicate in all bit positions.
APInt MaskElt = cast<ConstantInt>(COp)->getValue();
for (int j = 0; j != Scale; ++j) {
APInt Index = MaskElt.getLoBits(5);
APInt PermuteOp = MaskElt.lshr(5).getLoBits(3);
MaskElt = MaskElt.lshr(8);
// 7 - Invert most significant bit of source byte and replicate in all bit
// positions.
uint64_t Element = RawMask[i];
uint64_t Index = Element & 0x1F;
uint64_t PermuteOp = (Element >> 5) & 0x7;
if (PermuteOp == 4) {
ShuffleMask.push_back(SM_SentinelZero);
continue;
}
if (PermuteOp != 0) {
ShuffleMask.clear();
return;
}
ShuffleMask.push_back((int)Index.getZExtValue());
if (PermuteOp == 4) {
ShuffleMask.push_back(SM_SentinelZero);
continue;
}
if (PermuteOp != 0) {
ShuffleMask.clear();
return;
}
ShuffleMask.push_back((int)Index);
}
assert(NumBytes == (int)ShuffleMask.size() && "Unexpected shuffle mask size");
}
void DecodeVPERMVMask(const Constant *C, MVT VT,