1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-18 18:42:46 +02:00

TableGen: Add address space to matchers

Currently AMDGPU uses a CodePatPred to check address spaces from the
MachineMemOperand. Introduce a new first class property so that the
existing patterns can be easily modified to uses the new generated
predicate, which will also be handled for GlobalISel.

I would prefer these to match against the pointer type of the
instruction, but that would be difficult to get working with
SelectionDAG compatbility. This is much easier for now and will avoid
a painful tablegen rewrite for all the loads and stores.

I'm also not sure if there's a better way to encode multiple address
spaces in the table, rather than putting the number to expect.

llvm-svn: 366128
This commit is contained in:
Matt Arsenault 2019-07-15 20:59:42 +00:00
parent efeedd6709
commit 100b40be1a
7 changed files with 253 additions and 4 deletions

View File

@ -138,6 +138,16 @@ enum {
/// - MMOIdx - MMO index
/// - Size - The size in bytes of the memory access
GIM_CheckMemorySizeEqualTo,
/// Check the address space of the memory access for the given machine memory
/// operand.
/// - InsnID - Instruction ID
/// - MMOIdx - MMO index
/// - NumAddrSpace - Number of valid address spaces
/// - AddrSpaceN - An allowed space of the memory access
/// - AddrSpaceN+1 ...
GIM_CheckMemoryAddressSpace,
/// Check the size of the memory access for the given machine memory operand
/// against the size of an operand.
/// - InsnID - Instruction ID

View File

@ -370,6 +370,45 @@ bool InstructionSelector::executeMatchTable(
return false;
break;
}
case GIM_CheckMemoryAddressSpace: {
int64_t InsnID = MatchTable[CurrentIdx++];
int64_t MMOIdx = MatchTable[CurrentIdx++];
// This accepts a list of possible address spaces.
const int NumAddrSpace = MatchTable[CurrentIdx++];
if (State.MIs[InsnID]->getNumMemOperands() <= MMOIdx) {
if (handleReject() == RejectAndGiveUp)
return false;
break;
}
// Need to still jump to the end of the list of address spaces if we find
// a match earlier.
const uint64_t LastIdx = CurrentIdx + NumAddrSpace;
const MachineMemOperand *MMO
= *(State.MIs[InsnID]->memoperands_begin() + MMOIdx);
const unsigned MMOAddrSpace = MMO->getAddrSpace();
bool Success = false;
for (int I = 0; I != NumAddrSpace; ++I) {
unsigned AddrSpace = MatchTable[CurrentIdx++];
DEBUG_WITH_TYPE(
TgtInstructionSelector::getName(),
dbgs() << "addrspace(" << MMOAddrSpace << ") vs "
<< AddrSpace << '\n');
if (AddrSpace == MMOAddrSpace) {
Success = true;
break;
}
}
CurrentIdx = LastIdx;
if (!Success && handleReject() == RejectAndGiveUp)
return false;
break;
}
case GIM_CheckMemorySizeEqualTo: {
int64_t InsnID = MatchTable[CurrentIdx++];
int64_t MMOIdx = MatchTable[CurrentIdx++];

View File

@ -737,6 +737,10 @@ class PatFrags<dag ops, list<dag> frags, code pred = [{}],
// cast<StoreSDNode>(N)->isTruncatingStore();
bit IsTruncStore = ?;
// cast<MemSDNode>(N)->getAddressSpace() ==
// If this empty, accept any address space.
list<int> AddressSpaces = ?;
// cast<AtomicSDNode>(N)->getOrdering() == AtomicOrdering::Monotonic
bit IsAtomicOrderingMonotonic = ?;
// cast<AtomicSDNode>(N)->getOrdering() == AtomicOrdering::Acquire
@ -762,6 +766,8 @@ class PatFrags<dag ops, list<dag> frags, code pred = [{}],
// cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::<VT>;
// cast<StoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::<VT>;
ValueType ScalarMemoryVT = ?;
// TODO: Add alignment
}
// PatFrag - A version of PatFrags matching only a single fragment.

View File

@ -0,0 +1,85 @@
// RUN: llvm-tblgen -gen-dag-isel -I %p/../../include %s 2>&1 | FileCheck -check-prefix=SDAG %s
// RUN: llvm-tblgen -gen-global-isel -optimize-match-table=false -I %p/../../include %s -o - < %s | FileCheck -check-prefix=GISEL %s
include "llvm/Target/Target.td"
def TestTargetInstrInfo : InstrInfo;
def TestTarget : Target {
let InstructionSet = TestTargetInstrInfo;
}
def R0 : Register<"r0"> { let Namespace = "MyTarget"; }
def GPR32 : RegisterClass<"MyTarget", [i32], 32, (add R0)>;
// With one address space
def pat_frag_a : PatFrag <(ops node:$ptr), (load node:$ptr), [{}]> {
let AddressSpaces = [ 999 ];
let IsLoad = 1; // FIXME: Can this be inferred?
let MemoryVT = i32;
}
// With multiple address spaces
def pat_frag_b : PatFrag <(ops node:$ptr), (load node:$ptr), [{}]> {
let AddressSpaces = [ 123, 455 ];
let IsLoad = 1; // FIXME: Can this be inferred?
let MemoryVT = i32;
}
def inst_a : Instruction {
let OutOperandList = (outs GPR32:$dst);
let InOperandList = (ins GPR32:$src);
}
def inst_b : Instruction {
let OutOperandList = (outs GPR32:$dst);
let InOperandList = (ins GPR32:$src);
}
// SDAG: case 2: {
// SDAG: // Predicate_pat_frag_a
// SDAG-NEXT: SDNode *N = Node;
// SDAG-NEXT: (void)N;
// SDAG-NEXT: unsigned AddrSpace = cast<MemSDNode>(N)->getAddressSpace();
// SDAG-NEXT: if (AddrSpace != 999)
// SDAG-NEXT: return false;
// SDAG-NEXT: if (cast<MemSDNode>(N)->getMemoryVT() != MVT::i32) return false;
// SDAG-NEXT: return true;
// GISEL: GIM_Try, /*On fail goto*//*Label 0*/ 47, // Rule ID 0 //
// GISEL-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/2,
// GISEL-NEXT: GIM_CheckOpcode, /*MI*/0, TargetOpcode::G_LOAD,
// GISEL-NEXT: GIM_CheckMemorySizeEqualToLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0,
// GISEL-NEXT: GIM_CheckMemoryAddressSpace, /*MI*/0, /*MMO*/0, /*NumAddrSpace*/1, /*AddrSpace*/999,
// GISEL-NEXT: GIM_CheckMemorySizeEqualTo, /*MI*/0, /*MMO*/0, /*Size*/4,
// GISEL-NEXT: GIM_CheckAtomicOrdering, /*MI*/0, /*Order*/(int64_t)AtomicOrdering::NotAtomic,
def : Pat <
(pat_frag_a GPR32:$src),
(inst_a GPR32:$src)
>;
// SDAG: case 3: {
// SDAG-NEXT: // Predicate_pat_frag_b
// SDAG-NEXT: SDNode *N = Node;
// SDAG-NEXT: (void)N;
// SDAG-NEXT: unsigned AddrSpace = cast<MemSDNode>(N)->getAddressSpace();
// SDAG-NEXT: if (AddrSpace != 123 && AddrSpace != 455)
// SDAG-NEXT: return false;
// SDAG-NEXT: if (cast<MemSDNode>(N)->getMemoryVT() != MVT::i32) return false;
// SDAG-NEXT: return true;
// GISEL: GIM_Try, /*On fail goto*//*Label 1*/ 95, // Rule ID 1 //
// GISEL-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/2,
// GISEL-NEXT: GIM_CheckOpcode, /*MI*/0, TargetOpcode::G_LOAD,
// GISEL-NEXT: GIM_CheckMemorySizeEqualToLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0,
// GISEL-NEXT: GIM_CheckMemoryAddressSpace, /*MI*/0, /*MMO*/0, /*NumAddrSpace*/2, /*AddrSpace*/123, /*AddrSpace*/455,
// GISEL-NEXT: GIM_CheckMemorySizeEqualTo, /*MI*/0, /*MMO*/0, /*Size*/4,
// GISEL-NEXT: GIM_CheckAtomicOrdering, /*MI*/0, /*Order*/(int64_t)AtomicOrdering::NotAtomic,
def : Pat <
(pat_frag_b GPR32:$src),
(inst_b GPR32:$src)
>;

View File

@ -954,13 +954,33 @@ std::string TreePredicateFn::getPredCode() const {
}
if (isLoad() || isStore() || isAtomic()) {
StringRef SDNodeName =
isLoad() ? "LoadSDNode" : isStore() ? "StoreSDNode" : "AtomicSDNode";
if (ListInit *AddressSpaces = getAddressSpaces()) {
Code += "unsigned AddrSpace = cast<MemSDNode>(N)->getAddressSpace();\n"
" if (";
bool First = true;
for (Init *Val : AddressSpaces->getValues()) {
if (First)
First = false;
else
Code += " && ";
IntInit *IntVal = dyn_cast<IntInit>(Val);
if (!IntVal) {
PrintFatalError(getOrigPatFragRecord()->getRecord()->getLoc(),
"AddressSpaces element must be integer");
}
Code += "AddrSpace != " + utostr(IntVal->getValue());
}
Code += ")\nreturn false;\n";
}
Record *MemoryVT = getMemoryVT();
if (MemoryVT)
Code += ("if (cast<" + SDNodeName + ">(N)->getMemoryVT() != MVT::" +
Code += ("if (cast<MemSDNode>(N)->getMemoryVT() != MVT::" +
MemoryVT->getName() + ") return false;\n")
.str();
}
@ -1149,6 +1169,14 @@ Record *TreePredicateFn::getMemoryVT() const {
return nullptr;
return R->getValueAsDef("MemoryVT");
}
ListInit *TreePredicateFn::getAddressSpaces() const {
Record *R = getOrigPatFragRecord()->getRecord();
if (R->isValueUnset("AddressSpaces"))
return nullptr;
return R->getValueAsListInit("AddressSpaces");
}
Record *TreePredicateFn::getScalarMemoryVT() const {
Record *R = getOrigPatFragRecord()->getRecord();
if (R->isValueUnset("ScalarMemoryVT"))

View File

@ -593,6 +593,8 @@ public:
/// ValueType record for the memory VT.
Record *getScalarMemoryVT() const;
ListInit *getAddressSpaces() const;
// If true, indicates that GlobalISel-based C++ code was supplied.
bool hasGISelPredicateCode() const;
std::string getGISelPredicateCode() const;

View File

@ -232,6 +232,23 @@ static std::string explainPredicates(const TreePatternNode *N) {
if (Record *VT = P.getScalarMemoryVT())
Explanation += (" ScalarVT(MemVT)=" + VT->getName()).str();
if (ListInit *AddrSpaces = P.getAddressSpaces()) {
raw_string_ostream OS(Explanation);
OS << " AddressSpaces=[";
StringRef AddrSpaceSeparator;
for (Init *Val : AddrSpaces->getValues()) {
IntInit *IntVal = dyn_cast<IntInit>(Val);
if (!IntVal)
continue;
OS << AddrSpaceSeparator << IntVal->getValue();
AddrSpaceSeparator = ", ";
}
OS << ']';
}
if (P.isAtomicOrderingMonotonic())
Explanation += " monotonic";
if (P.isAtomicOrderingAcquire())
@ -308,6 +325,12 @@ static Error isTrivialOperatorNode(const TreePatternNode *N) {
continue;
}
if (Predicate.isLoad() || Predicate.isStore() || Predicate.isAtomic()) {
const ListInit *AddrSpaces = Predicate.getAddressSpaces();
if (AddrSpaces && !AddrSpaces->empty())
continue;
}
if (Predicate.isAtomic() && Predicate.getMemoryVT())
continue;
@ -1028,6 +1051,7 @@ public:
IPM_AtomicOrderingMMO,
IPM_MemoryLLTSize,
IPM_MemoryVsLLTSize,
IPM_MemoryAddressSpace,
IPM_GenericPredicate,
OPM_SameOperand,
OPM_ComplexPattern,
@ -1789,6 +1813,42 @@ public:
}
};
class MemoryAddressSpacePredicateMatcher : public InstructionPredicateMatcher {
protected:
unsigned MMOIdx;
SmallVector<unsigned, 4> AddrSpaces;
public:
MemoryAddressSpacePredicateMatcher(unsigned InsnVarID, unsigned MMOIdx,
ArrayRef<unsigned> AddrSpaces)
: InstructionPredicateMatcher(IPM_MemoryAddressSpace, InsnVarID),
MMOIdx(MMOIdx), AddrSpaces(AddrSpaces.begin(), AddrSpaces.end()) {}
static bool classof(const PredicateMatcher *P) {
return P->getKind() == IPM_MemoryAddressSpace;
}
bool isIdentical(const PredicateMatcher &B) const override {
if (!InstructionPredicateMatcher::isIdentical(B))
return false;
auto *Other = cast<MemoryAddressSpacePredicateMatcher>(&B);
return MMOIdx == Other->MMOIdx && AddrSpaces == Other->AddrSpaces;
}
void emitPredicateOpcodes(MatchTable &Table,
RuleMatcher &Rule) const override {
Table << MatchTable::Opcode("GIM_CheckMemoryAddressSpace")
<< MatchTable::Comment("MI") << MatchTable::IntValue(InsnVarID)
<< MatchTable::Comment("MMO") << MatchTable::IntValue(MMOIdx)
// Encode number of address spaces to expect.
<< MatchTable::Comment("NumAddrSpace")
<< MatchTable::IntValue(AddrSpaces.size());
for (unsigned AS : AddrSpaces)
Table << MatchTable::Comment("AddrSpace") << MatchTable::IntValue(AS);
Table << MatchTable::LineBreak;
}
};
/// Generates code to check that the size of an MMO is less-than, equal-to, or
/// greater than a given LLT.
class MemoryVsLLTSizePredicateMatcher : public InstructionPredicateMatcher {
@ -3210,7 +3270,26 @@ Expected<InstructionMatcher &> GlobalISelEmitter::createAndImportSelDAGMatcher(
continue;
}
// G_LOAD is used for both non-extending and any-extending loads.
// An address space check is needed in all contexts if there is one.
if (Predicate.isLoad() || Predicate.isStore() || Predicate.isAtomic()) {
if (const ListInit *AddrSpaces = Predicate.getAddressSpaces()) {
SmallVector<unsigned, 4> ParsedAddrSpaces;
for (Init *Val : AddrSpaces->getValues()) {
IntInit *IntVal = dyn_cast<IntInit>(Val);
if (!IntVal)
return failedImport("Address space is not an integer");
ParsedAddrSpaces.push_back(IntVal->getValue());
}
if (!ParsedAddrSpaces.empty()) {
InsnMatcher.addPredicate<MemoryAddressSpacePredicateMatcher>(
0, ParsedAddrSpaces);
}
}
}
// G_LOAD is used for both non-extending and any-extending loads.
if (Predicate.isLoad() && Predicate.isNonExtLoad()) {
InsnMatcher.addPredicate<MemoryVsLLTSizePredicateMatcher>(
0, MemoryVsLLTSizePredicateMatcher::EqualTo, 0);