1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-18 10:32:48 +02:00

Reland [GlobalISel] NFC: Have LLT::getSizeInBits/Bytes return a TypeSize.

This patch relands https://reviews.llvm.org/D104454, but fixes some failing
builds on Mac OS which apparently has a different definition for size_t,
that caused 'ambiguous operator overload' for the implicit conversion
of TypeSize to a scalar value.

This reverts commit b732e6c9a8438e5204ac96c8ca76f9b11abf98ff.
This commit is contained in:
Sander de Smalen 2021-06-27 16:07:19 +01:00
parent 74fd9c1f8e
commit a82143ea32
7 changed files with 43 additions and 24 deletions

View File

@ -595,7 +595,7 @@ bool InstructionSelector::executeMatchTable(
case GIM_CheckPointerToAny: {
int64_t InsnID = MatchTable[CurrentIdx++];
int64_t OpIdx = MatchTable[CurrentIdx++];
int64_t SizeInBits = MatchTable[CurrentIdx++];
uint64_t SizeInBits = MatchTable[CurrentIdx++];
DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
dbgs() << CurrentIdx << ": GIM_CheckPointerToAny(MIs["

View File

@ -67,7 +67,7 @@ public:
assert(!EC.isScalar() && "invalid number of vector elements");
assert(!ScalarTy.isVector() && "invalid vector element type");
return LLT{ScalarTy.isPointer(), /*isVector=*/true, EC,
ScalarTy.getSizeInBits(),
ScalarTy.getSizeInBits().getFixedSize(),
ScalarTy.isPointer() ? ScalarTy.getAddressSpace() : 0};
}
@ -100,12 +100,14 @@ public:
return EC.isScalar() ? ScalarTy : LLT::vector(EC, ScalarTy);
}
static LLT scalarOrVector(ElementCount EC, unsigned ScalarSize) {
return scalarOrVector(EC, LLT::scalar(ScalarSize));
static LLT scalarOrVector(ElementCount EC, uint64_t ScalarSize) {
assert(ScalarSize <= std::numeric_limits<unsigned>::max() &&
"Not enough bits in LLT to represent size");
return scalarOrVector(EC, LLT::scalar(static_cast<unsigned>(ScalarSize)));
}
explicit LLT(bool isPointer, bool isVector, ElementCount EC,
unsigned SizeInBits, unsigned AddressSpace) {
uint64_t SizeInBits, unsigned AddressSpace) {
init(isPointer, isVector, EC, SizeInBits, AddressSpace);
}
explicit LLT() : IsPointer(false), IsVector(false), RawData(0) {}
@ -148,18 +150,19 @@ public:
}
/// Returns the total size of the type. Must only be called on sized types.
unsigned getSizeInBits() const {
TypeSize getSizeInBits() const {
if (isPointer() || isScalar())
return getScalarSizeInBits();
// FIXME: This should return a TypeSize in order to work for scalable
// vectors.
return getScalarSizeInBits() * getElementCount().getKnownMinValue();
return TypeSize::Fixed(getScalarSizeInBits());
auto EC = getElementCount();
return TypeSize(getScalarSizeInBits() * EC.getKnownMinValue(),
EC.isScalable());
}
/// Returns the total size of the type in bytes, i.e. number of whole bytes
/// needed to represent the size in bits. Must only be called on sized types.
unsigned getSizeInBytes() const {
return (getSizeInBits() + 7) / 8;
TypeSize getSizeInBytes() const {
TypeSize BaseSize = getSizeInBits();
return {(BaseSize.getKnownMinSize() + 7) / 8, BaseSize.isScalable()};
}
LLT getScalarType() const {
@ -199,11 +202,11 @@ public:
getElementType());
}
assert(getSizeInBits() % Factor == 0);
return scalar(getSizeInBits() / Factor);
assert(getScalarSizeInBits() % Factor == 0);
return scalar(getScalarSizeInBits() / Factor);
}
bool isByteSized() const { return (getSizeInBits() & 7) == 0; }
bool isByteSized() const { return getSizeInBits().isKnownMultipleOf(8); }
unsigned getScalarSizeInBits() const {
assert(RawData != 0 && "Invalid Type");
@ -333,8 +336,10 @@ private:
return getMask(FieldInfo) & (RawData >> FieldInfo[1]);
}
void init(bool IsPointer, bool IsVector, ElementCount EC, unsigned SizeInBits,
void init(bool IsPointer, bool IsVector, ElementCount EC, uint64_t SizeInBits,
unsigned AddressSpace) {
assert(SizeInBits <= std::numeric_limits<unsigned>::max() &&
"Not enough bits in LLT to represent size");
this->IsPointer = IsPointer;
this->IsVector = IsVector;
if (!IsVector) {

View File

@ -355,7 +355,7 @@ static void buildCopyFromRegs(MachineIRBuilder &B, ArrayRef<Register> OrigRegs,
assert(OrigRegs.size() == 1);
LLT OrigTy = MRI.getType(OrigRegs[0]);
unsigned SrcSize = PartLLT.getSizeInBits() * Regs.size();
unsigned SrcSize = PartLLT.getSizeInBits().getFixedSize() * Regs.size();
if (SrcSize == OrigTy.getSizeInBits())
B.buildMerge(OrigRegs[0], Regs);
else {

View File

@ -1565,7 +1565,7 @@ bool IRTranslator::translateMemFunc(const CallInst &CI,
Register SrcReg = getOrCreateVReg(**AI);
LLT SrcTy = MRI->getType(SrcReg);
if (SrcTy.isPointer())
MinPtrSize = std::min(SrcTy.getSizeInBits(), MinPtrSize);
MinPtrSize = std::min<unsigned>(SrcTy.getSizeInBits(), MinPtrSize);
SrcRegs.push_back(SrcReg);
}

View File

@ -1117,7 +1117,8 @@ MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc,
DstOps[0].getLLTTy(*getMRI());
}) &&
"type mismatch in output list");
assert(DstOps.size() * DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
assert((TypeSize::ScalarTy)DstOps.size() *
DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
"input operands do not cover output register");
break;
@ -1131,7 +1132,8 @@ MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc,
SrcOps[0].getLLTTy(*getMRI());
}) &&
"type mismatch in input list");
assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
assert((TypeSize::ScalarTy)SrcOps.size() *
SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
"input operands do not cover output register");
if (SrcOps.size() == 1)
@ -1182,7 +1184,8 @@ MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc,
SrcOps[0].getLLTTy(*getMRI());
}) &&
"type mismatch in input list");
assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
assert((TypeSize::ScalarTy)SrcOps.size() *
SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
"input scalars do not exactly cover the output vector register");
break;
@ -1215,7 +1218,8 @@ MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc,
SrcOps[0].getLLTTy(*getMRI()));
}) &&
"type mismatch in input list");
assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
assert((TypeSize::ScalarTy)SrcOps.size() *
SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
"input vectors do not exactly cover the output vector register");
break;

View File

@ -81,6 +81,9 @@ TEST(LowLevelTypeTest, Vector) {
EXPECT_EQ(EC, VTy.getElementCount());
if (!EC.isScalable())
EXPECT_EQ(S * EC.getFixedValue(), VTy.getSizeInBits());
else
EXPECT_EQ(TypeSize::Scalable(S * EC.getKnownMinValue()),
VTy.getSizeInBits());
// Test equality operators.
EXPECT_TRUE(VTy == VTy);

View File

@ -182,7 +182,13 @@ public:
assert((!Ty.isVector() || Ty.isScalable() == Other.Ty.isScalable()) &&
"Unexpected mismatch of scalable property");
return Ty.getSizeInBits() < Other.Ty.getSizeInBits();
return Ty.isVector()
? std::make_tuple(Ty.isScalable(),
Ty.getSizeInBits().getKnownMinSize()) <
std::make_tuple(Other.Ty.isScalable(),
Other.Ty.getSizeInBits().getKnownMinSize())
: Ty.getSizeInBits().getFixedSize() <
Other.Ty.getSizeInBits().getFixedSize();
}
bool operator==(const LLTCodeGen &B) const { return Ty == B.Ty; }
@ -3788,7 +3794,8 @@ Optional<unsigned> GlobalISelEmitter::getMemSizeBitsFromPredicate(const TreePred
return None;
// Align so unusual types like i1 don't get rounded down.
return llvm::alignTo(MemTyOrNone->get().getSizeInBits(), 8);
return llvm::alignTo(
static_cast<unsigned>(MemTyOrNone->get().getSizeInBits()), 8);
}
Expected<InstructionMatcher &> GlobalISelEmitter::addBuiltinPredicates(