mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-24 03:33:20 +01:00
[AArch64][AsmParser] Make parse function for VectorLists generic to other vector types.
Summary: Added 'RegisterKind' to the VectorListOp structure, so that this operand type can be reused for SVE vector lists in a later patch. It also refactors the 'tryParseVectorList' function so it can be used directly in the ParserMethod of an operand. The parsing can now parse multiple kinds of vectors and recover if there is no match. This is patch [3/6] in a series to add assembler/disassembler support for SVE's contiguous ST1 (scalar+imm) instructions. Reviewers: fhahn, rengolin, javed.absar, huntergr, SjoerdMeijer, t.p.northover, echristo, evandro Reviewed By: rengolin Subscribers: kristof.beyls, llvm-commits Differential Revision: https://reviews.llvm.org/D45429 llvm-svn: 329900
This commit is contained in:
parent
392e36367a
commit
8a797c7bba
@ -489,24 +489,24 @@ def V128_lo : RegisterOperand<FPR128_lo, "printVRegOperand"> {
|
||||
let ParserMatchClass = VectorRegLoAsmOperand;
|
||||
}
|
||||
|
||||
class TypedVecListAsmOperand<int count, int regsize, int lanes, string kind>
|
||||
class TypedVecListAsmOperand<int count, int regsize, int lanes, int eltsize>
|
||||
: AsmOperandClass {
|
||||
let Name = "TypedVectorList" # count # "_" # lanes # kind;
|
||||
let Name = "TypedVectorList" # count # "_" # lanes # eltsize;
|
||||
|
||||
let PredicateMethod
|
||||
= "isTypedVectorList<" # count # ", " # lanes # ", '" # kind # "'>";
|
||||
= "isTypedVectorList<RegKind::NeonVector, " # count # ", " # lanes # ", " # eltsize # ">";
|
||||
let RenderMethod = "addVectorList" # regsize # "Operands<" # count # ">";
|
||||
}
|
||||
|
||||
class TypedVecListRegOperand<RegisterClass Reg, int lanes, string kind>
|
||||
class TypedVecListRegOperand<RegisterClass Reg, int lanes, string eltsize>
|
||||
: RegisterOperand<Reg, "printTypedVectorList<" # lanes # ", '"
|
||||
# kind # "'>">;
|
||||
# eltsize # "'>">;
|
||||
|
||||
multiclass VectorList<int count, RegisterClass Reg64, RegisterClass Reg128> {
|
||||
// With implicit types (probably on instruction instead). E.g. { v0, v1 }
|
||||
def _64AsmOperand : AsmOperandClass {
|
||||
let Name = NAME # "64";
|
||||
let PredicateMethod = "isImplicitlyTypedVectorList<" # count # ">";
|
||||
let PredicateMethod = "isImplicitlyTypedVectorList<RegKind::NeonVector, " # count # ">";
|
||||
let RenderMethod = "addVectorList64Operands<" # count # ">";
|
||||
}
|
||||
|
||||
@ -516,7 +516,7 @@ multiclass VectorList<int count, RegisterClass Reg64, RegisterClass Reg128> {
|
||||
|
||||
def _128AsmOperand : AsmOperandClass {
|
||||
let Name = NAME # "128";
|
||||
let PredicateMethod = "isImplicitlyTypedVectorList<" # count # ">";
|
||||
let PredicateMethod = "isImplicitlyTypedVectorList<RegKind::NeonVector, " # count # ">";
|
||||
let RenderMethod = "addVectorList128Operands<" # count # ">";
|
||||
}
|
||||
|
||||
@ -527,25 +527,25 @@ multiclass VectorList<int count, RegisterClass Reg64, RegisterClass Reg128> {
|
||||
// 64-bit register lists with explicit type.
|
||||
|
||||
// { v0.8b, v1.8b }
|
||||
def _8bAsmOperand : TypedVecListAsmOperand<count, 64, 8, "b">;
|
||||
def _8bAsmOperand : TypedVecListAsmOperand<count, 64, 8, 8>;
|
||||
def "8b" : TypedVecListRegOperand<Reg64, 8, "b"> {
|
||||
let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_8bAsmOperand");
|
||||
}
|
||||
|
||||
// { v0.4h, v1.4h }
|
||||
def _4hAsmOperand : TypedVecListAsmOperand<count, 64, 4, "h">;
|
||||
def _4hAsmOperand : TypedVecListAsmOperand<count, 64, 4, 16>;
|
||||
def "4h" : TypedVecListRegOperand<Reg64, 4, "h"> {
|
||||
let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_4hAsmOperand");
|
||||
}
|
||||
|
||||
// { v0.2s, v1.2s }
|
||||
def _2sAsmOperand : TypedVecListAsmOperand<count, 64, 2, "s">;
|
||||
def _2sAsmOperand : TypedVecListAsmOperand<count, 64, 2, 32>;
|
||||
def "2s" : TypedVecListRegOperand<Reg64, 2, "s"> {
|
||||
let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_2sAsmOperand");
|
||||
}
|
||||
|
||||
// { v0.1d, v1.1d }
|
||||
def _1dAsmOperand : TypedVecListAsmOperand<count, 64, 1, "d">;
|
||||
def _1dAsmOperand : TypedVecListAsmOperand<count, 64, 1, 64>;
|
||||
def "1d" : TypedVecListRegOperand<Reg64, 1, "d"> {
|
||||
let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_1dAsmOperand");
|
||||
}
|
||||
@ -553,49 +553,49 @@ multiclass VectorList<int count, RegisterClass Reg64, RegisterClass Reg128> {
|
||||
// 128-bit register lists with explicit type
|
||||
|
||||
// { v0.16b, v1.16b }
|
||||
def _16bAsmOperand : TypedVecListAsmOperand<count, 128, 16, "b">;
|
||||
def _16bAsmOperand : TypedVecListAsmOperand<count, 128, 16, 8>;
|
||||
def "16b" : TypedVecListRegOperand<Reg128, 16, "b"> {
|
||||
let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_16bAsmOperand");
|
||||
}
|
||||
|
||||
// { v0.8h, v1.8h }
|
||||
def _8hAsmOperand : TypedVecListAsmOperand<count, 128, 8, "h">;
|
||||
def _8hAsmOperand : TypedVecListAsmOperand<count, 128, 8, 16>;
|
||||
def "8h" : TypedVecListRegOperand<Reg128, 8, "h"> {
|
||||
let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_8hAsmOperand");
|
||||
}
|
||||
|
||||
// { v0.4s, v1.4s }
|
||||
def _4sAsmOperand : TypedVecListAsmOperand<count, 128, 4, "s">;
|
||||
def _4sAsmOperand : TypedVecListAsmOperand<count, 128, 4, 32>;
|
||||
def "4s" : TypedVecListRegOperand<Reg128, 4, "s"> {
|
||||
let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_4sAsmOperand");
|
||||
}
|
||||
|
||||
// { v0.2d, v1.2d }
|
||||
def _2dAsmOperand : TypedVecListAsmOperand<count, 128, 2, "d">;
|
||||
def _2dAsmOperand : TypedVecListAsmOperand<count, 128, 2, 64>;
|
||||
def "2d" : TypedVecListRegOperand<Reg128, 2, "d"> {
|
||||
let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_2dAsmOperand");
|
||||
}
|
||||
|
||||
// { v0.b, v1.b }
|
||||
def _bAsmOperand : TypedVecListAsmOperand<count, 128, 0, "b">;
|
||||
def _bAsmOperand : TypedVecListAsmOperand<count, 128, 0, 8>;
|
||||
def "b" : TypedVecListRegOperand<Reg128, 0, "b"> {
|
||||
let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_bAsmOperand");
|
||||
}
|
||||
|
||||
// { v0.h, v1.h }
|
||||
def _hAsmOperand : TypedVecListAsmOperand<count, 128, 0, "h">;
|
||||
def _hAsmOperand : TypedVecListAsmOperand<count, 128, 0, 16>;
|
||||
def "h" : TypedVecListRegOperand<Reg128, 0, "h"> {
|
||||
let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_hAsmOperand");
|
||||
}
|
||||
|
||||
// { v0.s, v1.s }
|
||||
def _sAsmOperand : TypedVecListAsmOperand<count, 128, 0, "s">;
|
||||
def _sAsmOperand : TypedVecListAsmOperand<count, 128, 0, 32>;
|
||||
def "s" : TypedVecListRegOperand<Reg128, 0, "s"> {
|
||||
let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_sAsmOperand");
|
||||
}
|
||||
|
||||
// { v0.d, v1.d }
|
||||
def _dAsmOperand : TypedVecListAsmOperand<count, 128, 0, "d">;
|
||||
def _dAsmOperand : TypedVecListAsmOperand<count, 128, 0, 64>;
|
||||
def "d" : TypedVecListRegOperand<Reg128, 0, "d"> {
|
||||
let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_dAsmOperand");
|
||||
}
|
||||
|
@ -140,7 +140,9 @@ private:
|
||||
template <bool ParseSuffix>
|
||||
OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
|
||||
OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
|
||||
bool tryParseVectorList(OperandVector &Operands);
|
||||
template <RegKind VectorKind>
|
||||
OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
|
||||
bool ExpectMatch = false);
|
||||
OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
|
||||
|
||||
public:
|
||||
@ -217,7 +219,8 @@ private:
|
||||
unsigned RegNum;
|
||||
unsigned Count;
|
||||
unsigned NumElements;
|
||||
unsigned ElementKind;
|
||||
unsigned ElementWidth;
|
||||
RegKind RegisterKind;
|
||||
};
|
||||
|
||||
struct VectorIndexOp {
|
||||
@ -861,18 +864,23 @@ public:
|
||||
|
||||
/// Is this a vector list with the type implicit (presumably attached to the
|
||||
/// instruction itself)?
|
||||
template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
|
||||
template <RegKind VectorKind, unsigned NumRegs>
|
||||
bool isImplicitlyTypedVectorList() const {
|
||||
return Kind == k_VectorList && VectorList.Count == NumRegs &&
|
||||
!VectorList.ElementKind;
|
||||
VectorList.NumElements == 0 &&
|
||||
VectorList.RegisterKind == VectorKind;
|
||||
}
|
||||
|
||||
template <unsigned NumRegs, unsigned NumElements, char ElementKind>
|
||||
template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
|
||||
unsigned ElementWidth>
|
||||
bool isTypedVectorList() const {
|
||||
if (Kind != k_VectorList)
|
||||
return false;
|
||||
if (VectorList.Count != NumRegs)
|
||||
return false;
|
||||
if (VectorList.ElementKind != ElementKind)
|
||||
if (VectorList.RegisterKind != VectorKind)
|
||||
return false;
|
||||
if (VectorList.ElementWidth != ElementWidth)
|
||||
return false;
|
||||
return VectorList.NumElements == NumElements;
|
||||
}
|
||||
@ -1590,30 +1598,14 @@ public:
|
||||
|
||||
static std::unique_ptr<AArch64Operand>
|
||||
CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
|
||||
unsigned ElementWidth, SMLoc S, SMLoc E, MCContext &Ctx) {
|
||||
unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E,
|
||||
MCContext &Ctx) {
|
||||
auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
|
||||
Op->VectorList.RegNum = RegNum;
|
||||
Op->VectorList.Count = Count;
|
||||
Op->VectorList.NumElements = NumElements;
|
||||
switch (ElementWidth) {
|
||||
case 0:
|
||||
Op->VectorList.ElementKind = 0;
|
||||
break;
|
||||
case 8:
|
||||
Op->VectorList.ElementKind = 'b';
|
||||
break;
|
||||
case 16:
|
||||
Op->VectorList.ElementKind = 'h';
|
||||
break;
|
||||
case 32:
|
||||
Op->VectorList.ElementKind = 's';
|
||||
break;
|
||||
case 64:
|
||||
Op->VectorList.ElementKind = 'd';
|
||||
break;
|
||||
default:
|
||||
llvm_unreachable("Unsupported elementwidth");
|
||||
}
|
||||
Op->VectorList.ElementWidth = ElementWidth;
|
||||
Op->VectorList.RegisterKind = RegisterKind;
|
||||
Op->StartLoc = S;
|
||||
Op->EndLoc = E;
|
||||
return Op;
|
||||
@ -2883,30 +2875,50 @@ bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// parseVectorList - Parse a vector list operand for vector instructions.
|
||||
bool AArch64AsmParser::tryParseVectorList(OperandVector &Operands) {
|
||||
template <RegKind VectorKind>
|
||||
OperandMatchResultTy
|
||||
AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
|
||||
bool ExpectMatch) {
|
||||
MCAsmParser &Parser = getParser();
|
||||
assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
|
||||
if (!Parser.getTok().is(AsmToken::LCurly))
|
||||
return MatchOperand_NoMatch;
|
||||
|
||||
// Wrapper around parse function
|
||||
auto ParseVector = [this](int &Reg, StringRef &Kind, SMLoc Loc) {
|
||||
if (tryParseVectorRegister(Reg, Kind, RegKind::NeonVector) ==
|
||||
MatchOperand_Success) {
|
||||
auto ParseVector = [this, &Parser](int &Reg, StringRef &Kind, SMLoc Loc,
|
||||
bool NoMatchIsError) {
|
||||
auto RegTok = Parser.getTok();
|
||||
auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
|
||||
if (ParseRes == MatchOperand_Success) {
|
||||
if (parseVectorKind(Kind, RegKind::NeonVector))
|
||||
return true;
|
||||
return ParseRes;
|
||||
llvm_unreachable("Expected a valid vector kind");
|
||||
}
|
||||
|
||||
Error(Loc, "vector register expected");
|
||||
return false;
|
||||
if (RegTok.isNot(AsmToken::Identifier) ||
|
||||
ParseRes == MatchOperand_ParseFail ||
|
||||
(ParseRes == MatchOperand_NoMatch && NoMatchIsError)) {
|
||||
Error(Loc, "vector register expected");
|
||||
return MatchOperand_ParseFail;
|
||||
}
|
||||
|
||||
return MatchOperand_NoMatch;
|
||||
};
|
||||
|
||||
SMLoc S = getLoc();
|
||||
auto LCurly = Parser.getTok();
|
||||
Parser.Lex(); // Eat left bracket token.
|
||||
|
||||
StringRef Kind;
|
||||
int FirstReg = -1;
|
||||
if (!ParseVector(FirstReg, Kind, getLoc()))
|
||||
return true;
|
||||
auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
|
||||
|
||||
// Put back the original left bracket if there was no match, so that
|
||||
// different types of list-operands can be matched (e.g. SVE, Neon).
|
||||
if (ParseRes == MatchOperand_NoMatch)
|
||||
Parser.getLexer().UnLex(LCurly);
|
||||
|
||||
if (ParseRes != MatchOperand_Success)
|
||||
return ParseRes;
|
||||
|
||||
int64_t PrevReg = FirstReg;
|
||||
unsigned Count = 1;
|
||||
@ -2916,17 +2928,21 @@ bool AArch64AsmParser::tryParseVectorList(OperandVector &Operands) {
|
||||
StringRef NextKind;
|
||||
|
||||
int Reg;
|
||||
if (!ParseVector(Reg, NextKind, getLoc()))
|
||||
return true;
|
||||
ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
|
||||
if (ParseRes != MatchOperand_Success)
|
||||
return ParseRes;
|
||||
|
||||
// Any Kind suffices must match on all regs in the list.
|
||||
if (Kind != NextKind)
|
||||
return Error(Loc, "mismatched register size suffix");
|
||||
if (Kind != NextKind) {
|
||||
Error(Loc, "mismatched register size suffix");
|
||||
return MatchOperand_ParseFail;
|
||||
}
|
||||
|
||||
unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
|
||||
|
||||
if (Space == 0 || Space > 3) {
|
||||
return Error(Loc, "invalid number of vectors");
|
||||
Error(Loc, "invalid number of vectors");
|
||||
return MatchOperand_ParseFail;
|
||||
}
|
||||
|
||||
Count += Space;
|
||||
@ -2936,16 +2952,22 @@ bool AArch64AsmParser::tryParseVectorList(OperandVector &Operands) {
|
||||
SMLoc Loc = getLoc();
|
||||
StringRef NextKind;
|
||||
int Reg;
|
||||
if (!ParseVector(Reg, NextKind, getLoc()))
|
||||
return true;
|
||||
ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
|
||||
if (ParseRes != MatchOperand_Success)
|
||||
return ParseRes;
|
||||
|
||||
// Any Kind suffices must match on all regs in the list.
|
||||
if (Kind != NextKind)
|
||||
return Error(Loc, "mismatched register size suffix");
|
||||
if (Kind != NextKind) {
|
||||
Error(Loc, "mismatched register size suffix");
|
||||
return MatchOperand_ParseFail;
|
||||
}
|
||||
|
||||
// Registers must be incremental (with wraparound at 31)
|
||||
if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
|
||||
(getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
|
||||
return Error(Loc, "registers must be sequential");
|
||||
(getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) {
|
||||
Error(Loc, "registers must be sequential");
|
||||
return MatchOperand_ParseFail;
|
||||
}
|
||||
|
||||
PrevReg = Reg;
|
||||
++Count;
|
||||
@ -2953,27 +2975,31 @@ bool AArch64AsmParser::tryParseVectorList(OperandVector &Operands) {
|
||||
}
|
||||
|
||||
if (parseToken(AsmToken::RCurly, "'}' expected"))
|
||||
return true;
|
||||
return MatchOperand_ParseFail;
|
||||
|
||||
if (Count > 4)
|
||||
return Error(S, "invalid number of vectors");
|
||||
if (Count > 4) {
|
||||
Error(S, "invalid number of vectors");
|
||||
return MatchOperand_ParseFail;
|
||||
}
|
||||
|
||||
unsigned NumElements = 0;
|
||||
unsigned ElementWidth = 0;
|
||||
if (!Kind.empty()) {
|
||||
if (const auto &VK = parseVectorKind(Kind, RegKind::NeonVector))
|
||||
if (const auto &VK = parseVectorKind(Kind, VectorKind))
|
||||
std::tie(NumElements, ElementWidth) = *VK;
|
||||
}
|
||||
|
||||
Operands.push_back(AArch64Operand::CreateVectorList(
|
||||
FirstReg, Count, NumElements, ElementWidth, S, getLoc(), getContext()));
|
||||
FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(),
|
||||
getContext()));
|
||||
|
||||
return false;
|
||||
return MatchOperand_Success;
|
||||
}
|
||||
|
||||
/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
|
||||
bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
|
||||
if (tryParseVectorList(Operands))
|
||||
auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
|
||||
if (ParseRes != MatchOperand_Success)
|
||||
return true;
|
||||
|
||||
return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
|
||||
|
Loading…
Reference in New Issue
Block a user