1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-19 02:52:53 +02:00

[AArch64][SVE] Asm: Add SVE (Z) Register definitions and parsing support

Patch [3/5] in a series to add assembler/disassembler support for AArch64 SVE unpredicated ADD/SUB instructions.

To summarise, this patch adds:

 * SVE register definitions
 * Methods to parse SVE register operands
 * Methods to print SVE register operands
 * RegKind SVEDataVector to distinguish it from other data types like scalar register or Neon vector.
 * k_SVEDataRegister and SVEDataRegOp to describe SVE registers (which will be extended by further patches with e.g. ElementWidth and the shift-extend type).


Patch by Sander De Smalen.

Reviewed by: rengolin

Differential Revision: https://reviews.llvm.org/D39089

llvm-svn: 317590
This commit is contained in:
Florian Hahn 2017-11-07 16:45:48 +00:00
parent 51e5f9cc30
commit 6277740a24
5 changed files with 330 additions and 1 deletions

View File

@ -32,6 +32,12 @@ let Namespace = "AArch64" in {
def qsub : SubRegIndex<64>;
def sube64 : SubRegIndex<64>;
def subo64 : SubRegIndex<64>;
// SVE
def zsub : SubRegIndex<128>;
// Note: zsub_hi should never be used directly because it represents
// the scalable part of the SVE vector and cannot be manipulated as a
// subvector in the same way the lower 128bits can.
def zsub_hi : SubRegIndex<128>;
// Note: Code depends on these having consecutive numbers
def dsub0 : SubRegIndex<64>;
def dsub1 : SubRegIndex<64>;
@ -645,3 +651,119 @@ def XSeqPairClassOperand :
//===----- END: v8.1a atomic CASP register operands -----------------------===//
// The part of SVE registers that don't overlap Neon registers.
// These are only used as part of clobber lists.
def Z0_HI : AArch64Reg<0, "z0_hi">;
def Z1_HI : AArch64Reg<1, "z1_hi">;
def Z2_HI : AArch64Reg<2, "z2_hi">;
def Z3_HI : AArch64Reg<3, "z3_hi">;
def Z4_HI : AArch64Reg<4, "z4_hi">;
def Z5_HI : AArch64Reg<5, "z5_hi">;
def Z6_HI : AArch64Reg<6, "z6_hi">;
def Z7_HI : AArch64Reg<7, "z7_hi">;
def Z8_HI : AArch64Reg<8, "z8_hi">;
def Z9_HI : AArch64Reg<9, "z9_hi">;
def Z10_HI : AArch64Reg<10, "z10_hi">;
def Z11_HI : AArch64Reg<11, "z11_hi">;
def Z12_HI : AArch64Reg<12, "z12_hi">;
def Z13_HI : AArch64Reg<13, "z13_hi">;
def Z14_HI : AArch64Reg<14, "z14_hi">;
def Z15_HI : AArch64Reg<15, "z15_hi">;
def Z16_HI : AArch64Reg<16, "z16_hi">;
def Z17_HI : AArch64Reg<17, "z17_hi">;
def Z18_HI : AArch64Reg<18, "z18_hi">;
def Z19_HI : AArch64Reg<19, "z19_hi">;
def Z20_HI : AArch64Reg<20, "z20_hi">;
def Z21_HI : AArch64Reg<21, "z21_hi">;
def Z22_HI : AArch64Reg<22, "z22_hi">;
def Z23_HI : AArch64Reg<23, "z23_hi">;
def Z24_HI : AArch64Reg<24, "z24_hi">;
def Z25_HI : AArch64Reg<25, "z25_hi">;
def Z26_HI : AArch64Reg<26, "z26_hi">;
def Z27_HI : AArch64Reg<27, "z27_hi">;
def Z28_HI : AArch64Reg<28, "z28_hi">;
def Z29_HI : AArch64Reg<29, "z29_hi">;
def Z30_HI : AArch64Reg<30, "z30_hi">;
def Z31_HI : AArch64Reg<31, "z31_hi">;
// SVE variable-size vector registers
let SubRegIndices = [zsub,zsub_hi] in {
def Z0 : AArch64Reg<0, "z0", [Q0, Z0_HI]>, DwarfRegNum<[96]>;
def Z1 : AArch64Reg<1, "z1", [Q1, Z1_HI]>, DwarfRegNum<[97]>;
def Z2 : AArch64Reg<2, "z2", [Q2, Z2_HI]>, DwarfRegNum<[98]>;
def Z3 : AArch64Reg<3, "z3", [Q3, Z3_HI]>, DwarfRegNum<[99]>;
def Z4 : AArch64Reg<4, "z4", [Q4, Z4_HI]>, DwarfRegNum<[100]>;
def Z5 : AArch64Reg<5, "z5", [Q5, Z5_HI]>, DwarfRegNum<[101]>;
def Z6 : AArch64Reg<6, "z6", [Q6, Z6_HI]>, DwarfRegNum<[102]>;
def Z7 : AArch64Reg<7, "z7", [Q7, Z7_HI]>, DwarfRegNum<[103]>;
def Z8 : AArch64Reg<8, "z8", [Q8, Z8_HI]>, DwarfRegNum<[104]>;
def Z9 : AArch64Reg<9, "z9", [Q9, Z9_HI]>, DwarfRegNum<[105]>;
def Z10 : AArch64Reg<10, "z10", [Q10, Z10_HI]>, DwarfRegNum<[106]>;
def Z11 : AArch64Reg<11, "z11", [Q11, Z11_HI]>, DwarfRegNum<[107]>;
def Z12 : AArch64Reg<12, "z12", [Q12, Z12_HI]>, DwarfRegNum<[108]>;
def Z13 : AArch64Reg<13, "z13", [Q13, Z13_HI]>, DwarfRegNum<[109]>;
def Z14 : AArch64Reg<14, "z14", [Q14, Z14_HI]>, DwarfRegNum<[110]>;
def Z15 : AArch64Reg<15, "z15", [Q15, Z15_HI]>, DwarfRegNum<[111]>;
def Z16 : AArch64Reg<16, "z16", [Q16, Z16_HI]>, DwarfRegNum<[112]>;
def Z17 : AArch64Reg<17, "z17", [Q17, Z17_HI]>, DwarfRegNum<[113]>;
def Z18 : AArch64Reg<18, "z18", [Q18, Z18_HI]>, DwarfRegNum<[114]>;
def Z19 : AArch64Reg<19, "z19", [Q19, Z19_HI]>, DwarfRegNum<[115]>;
def Z20 : AArch64Reg<20, "z20", [Q20, Z20_HI]>, DwarfRegNum<[116]>;
def Z21 : AArch64Reg<21, "z21", [Q21, Z21_HI]>, DwarfRegNum<[117]>;
def Z22 : AArch64Reg<22, "z22", [Q22, Z22_HI]>, DwarfRegNum<[118]>;
def Z23 : AArch64Reg<23, "z23", [Q23, Z23_HI]>, DwarfRegNum<[119]>;
def Z24 : AArch64Reg<24, "z24", [Q24, Z24_HI]>, DwarfRegNum<[120]>;
def Z25 : AArch64Reg<25, "z25", [Q25, Z25_HI]>, DwarfRegNum<[121]>;
def Z26 : AArch64Reg<26, "z26", [Q26, Z26_HI]>, DwarfRegNum<[122]>;
def Z27 : AArch64Reg<27, "z27", [Q27, Z27_HI]>, DwarfRegNum<[123]>;
def Z28 : AArch64Reg<28, "z28", [Q28, Z28_HI]>, DwarfRegNum<[124]>;
def Z29 : AArch64Reg<29, "z29", [Q29, Z29_HI]>, DwarfRegNum<[125]>;
def Z30 : AArch64Reg<30, "z30", [Q30, Z30_HI]>, DwarfRegNum<[126]>;
def Z31 : AArch64Reg<31, "z31", [Q31, Z31_HI]>, DwarfRegNum<[127]>;
}
class SVERegOp <string Suffix, AsmOperandClass C,
RegisterClass RC> : RegisterOperand<RC> {
let PrintMethod = !if(!eq(Suffix, ""),
"printSVERegOp<>",
"printSVERegOp<'" # Suffix # "'>");
let ParserMatchClass = C;
}
class ZPRRegOp <string Suffix, AsmOperandClass C,
RegisterClass RC> : SVERegOp<Suffix, C, RC> {}
//******************************************************************************
// SVE vector register class
def ZPR : RegisterClass<"AArch64",
[nxv16i8, nxv8i16, nxv4i32, nxv2i64,
nxv2f16, nxv4f16, nxv8f16,
nxv1f32, nxv2f32, nxv4f32,
nxv1f64, nxv2f64],
128, (sequence "Z%u", 0, 31)> {
let Size = 128;
}
class ZPRAsmOperand <string name, int Width>: AsmOperandClass {
let Name = "SVE" # name # "Reg";
let PredicateMethod = "isSVEDataVectorRegOfWidth<" # Width # ">";
let RenderMethod = "addRegOperands";
let ParserMethod = "tryParseSVEDataVector<"
# !if(!eq(Width, -1), "false", "true") # ">";
}
def ZPRAsmOpAny : ZPRAsmOperand<"VectorAny", -1>;
def ZPRAsmOp8 : ZPRAsmOperand<"VectorB", 8>;
def ZPRAsmOp16 : ZPRAsmOperand<"VectorH", 16>;
def ZPRAsmOp32 : ZPRAsmOperand<"VectorS", 32>;
def ZPRAsmOp64 : ZPRAsmOperand<"VectorD", 64>;
def ZPRAsmOp128 : ZPRAsmOperand<"VectorQ", 128>;
def ZPRAny : ZPRRegOp<"", ZPRAsmOpAny, ZPR>;
def ZPR8 : ZPRRegOp<"b", ZPRAsmOp8, ZPR>;
def ZPR16 : ZPRRegOp<"h", ZPRAsmOp16, ZPR>;
def ZPR32 : ZPRRegOp<"s", ZPRAsmOp32, ZPR>;
def ZPR64 : ZPRRegOp<"d", ZPRAsmOp64, ZPR>;
def ZPR128 : ZPRRegOp<"q", ZPRAsmOp128, ZPR>;

View File

@ -59,7 +59,7 @@ using namespace llvm;
namespace {
enum class RegKind {Scalar, NeonVector};
enum class RegKind {Scalar, NeonVector, SVEDataVector};
class AArch64AsmParser : public MCTargetAsmParser {
private:
@ -82,6 +82,7 @@ private:
unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
int tryParseRegister();
int tryMatchVectorRegister(StringRef &Kind, bool expected);
int tryParseSVEDataVectorRegister(const AsmToken &Tok, StringRef &Kind);
bool parseRegister(OperandVector &Operands);
bool parseSymbolicImmVal(const MCExpr *&ImmVal);
bool parseVectorList(OperandVector &Operands);
@ -130,6 +131,8 @@ private:
OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
bool tryParseNeonVectorRegister(OperandVector &Operands);
OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
template <bool ParseSuffix>
OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
public:
enum AArch64MatchResultTy {
@ -197,6 +200,8 @@ private:
struct RegOp {
unsigned RegNum;
RegKind Kind;
int ElementWidth;
};
struct VectorListOp {
@ -820,6 +825,17 @@ public:
Reg.RegNum);
}
template <unsigned Class = AArch64::ZPRRegClassID>
bool isSVEDataVectorReg() const {
return (Kind == k_Register && Reg.Kind == RegKind::SVEDataVector) &&
AArch64MCRegisterClasses[Class].contains(getReg());
}
template <int ElementWidth> bool isSVEDataVectorRegOfWidth() const {
return isSVEDataVectorReg() &&
(ElementWidth == -1 || Reg.ElementWidth == ElementWidth);
}
bool isGPR32as64() const {
return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
@ -1580,6 +1596,18 @@ public:
return Op;
}
static std::unique_ptr<AArch64Operand>
CreateReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
SMLoc S, SMLoc E, MCContext &Ctx) {
auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
Op->Reg.RegNum = RegNum;
Op->Reg.ElementWidth = ElementWidth;
Op->Reg.Kind = Kind;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
}
static std::unique_ptr<AArch64Operand>
CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
@ -1860,6 +1888,57 @@ static bool isValidVectorKind(StringRef Name) {
.Default(false);
}
static unsigned matchSVEDataVectorRegName(StringRef Name) {
return StringSwitch<unsigned>(Name.lower())
.Case("z0", AArch64::Z0)
.Case("z1", AArch64::Z1)
.Case("z2", AArch64::Z2)
.Case("z3", AArch64::Z3)
.Case("z4", AArch64::Z4)
.Case("z5", AArch64::Z5)
.Case("z6", AArch64::Z6)
.Case("z7", AArch64::Z7)
.Case("z8", AArch64::Z8)
.Case("z9", AArch64::Z9)
.Case("z10", AArch64::Z10)
.Case("z11", AArch64::Z11)
.Case("z12", AArch64::Z12)
.Case("z13", AArch64::Z13)
.Case("z14", AArch64::Z14)
.Case("z15", AArch64::Z15)
.Case("z16", AArch64::Z16)
.Case("z17", AArch64::Z17)
.Case("z18", AArch64::Z18)
.Case("z19", AArch64::Z19)
.Case("z20", AArch64::Z20)
.Case("z21", AArch64::Z21)
.Case("z22", AArch64::Z22)
.Case("z23", AArch64::Z23)
.Case("z24", AArch64::Z24)
.Case("z25", AArch64::Z25)
.Case("z26", AArch64::Z26)
.Case("z27", AArch64::Z27)
.Case("z28", AArch64::Z28)
.Case("z29", AArch64::Z29)
.Case("z30", AArch64::Z30)
.Case("z31", AArch64::Z31)
.Default(0);
}
static bool isValidSVEKind(StringRef Name) {
return StringSwitch<bool>(Name.lower())
.Case(".b", true)
.Case(".h", true)
.Case(".s", true)
.Case(".d", true)
.Case(".q", true)
.Default(false);
}
static bool isSVEDataVectorRegister(StringRef Name) {
return Name[0] == 'z';
}
static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
char &ElementKind) {
assert(isValidVectorKind(Name));
@ -1897,6 +1976,9 @@ unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
case RegKind::NeonVector:
RegNum = MatchNeonVectorRegName(Name);
break;
case RegKind::SVEDataVector:
RegNum = matchSVEDataVectorRegName(Name);
break;
}
if (!RegNum) {
@ -1924,6 +2006,9 @@ int AArch64AsmParser::tryParseRegister() {
return -1;
std::string lowerCase = Tok.getString().lower();
if (isSVEDataVectorRegister(lowerCase))
return -1;
unsigned RegNum = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
// Also handle a few aliases of registers.
if (RegNum == 0)
@ -2620,6 +2705,35 @@ bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
return false;
}
// tryParseSVEDataVectorRegister - Try to parse a SVE vector register name with
// optional kind specifier. If it is a register specifier, eat the token
// and return it.
int AArch64AsmParser::tryParseSVEDataVectorRegister(const AsmToken &Tok,
StringRef &Kind) {
if (Tok.isNot(AsmToken::Identifier))
return -1;
StringRef Name = Tok.getString();
// If there is a kind specifier, it's separated from the register name by
// a '.'.
size_t Start = 0, Next = Name.find('.');
StringRef Head = Name.slice(Start, Next);
unsigned RegNum = matchRegisterNameAlias(Head, RegKind::SVEDataVector);
if (RegNum) {
if (Next != StringRef::npos) {
Kind = Name.slice(Next, StringRef::npos);
if (!isValidSVEKind(Kind)) {
TokError("invalid sve vector kind qualifier");
return -1;
}
}
return RegNum;
}
return -1;
}
/// parseRegister - Parse a non-vector register operand.
bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
SMLoc S = getLoc();
@ -4188,6 +4302,17 @@ bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
return Error(SRegLoc, "vector register without type specifier expected");
}
if (RegNum == static_cast<unsigned>(-1)) {
StringRef Kind;
RegisterKind = RegKind::SVEDataVector;
int RegNumTmp = tryParseSVEDataVectorRegister(Parser.getTok(), Kind);
if (RegNumTmp != -1)
Parser.Lex();
RegNum = RegNumTmp;
if (!Kind.empty())
return Error(SRegLoc, "sve vector register without type specifier expected");
}
if (RegNum == static_cast<unsigned>(-1))
return Error(SRegLoc, "register name or alias expected");
@ -4413,3 +4538,38 @@ AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
return MatchOperand_Success;
}
template <bool ParseSuffix>
OperandMatchResultTy
AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
MCAsmParser &Parser = getParser();
const SMLoc S = getLoc();
// Check for a SVE vector register specifier first.
StringRef Kind;
int RegNum = tryParseSVEDataVectorRegister(Parser.getTok(), Kind);
if (RegNum == -1)
return MatchOperand_NoMatch;
// Eat the SVE Register Token
Parser.Lex();
if (ParseSuffix && Kind.empty())
return MatchOperand_NoMatch;
unsigned ElementWidth = StringSwitch<unsigned>(Kind.lower())
.Case("", -1)
.Case(".b", 8)
.Case(".h", 16)
.Case(".s", 32)
.Case(".d", 64)
.Case(".q", 128)
.Default(0);
if (!ElementWidth)
return MatchOperand_NoMatch;
Operands.push_back(
AArch64Operand::CreateReg(RegNum, RegKind::SVEDataVector, ElementWidth,
S, S, getContext()));
return MatchOperand_Success;
}

View File

@ -85,6 +85,9 @@ static DecodeStatus DecodeDDDRegisterClass(MCInst &Inst, unsigned RegNo,
static DecodeStatus DecodeDDDDRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void *Decoder);
static DecodeStatus DecodeZPRRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void *Decode);
static DecodeStatus DecodeFixedPointScaleImm32(MCInst &Inst, unsigned Imm,
uint64_t Address,
@ -436,6 +439,27 @@ static DecodeStatus DecodeGPR32spRegisterClass(MCInst &Inst, unsigned RegNo,
Inst.addOperand(MCOperand::createReg(Register));
return Success;
}
static const unsigned ZPRDecoderTable[] = {
AArch64::Z0, AArch64::Z1, AArch64::Z2, AArch64::Z3,
AArch64::Z4, AArch64::Z5, AArch64::Z6, AArch64::Z7,
AArch64::Z8, AArch64::Z9, AArch64::Z10, AArch64::Z11,
AArch64::Z12, AArch64::Z13, AArch64::Z14, AArch64::Z15,
AArch64::Z16, AArch64::Z17, AArch64::Z18, AArch64::Z19,
AArch64::Z20, AArch64::Z21, AArch64::Z22, AArch64::Z23,
AArch64::Z24, AArch64::Z25, AArch64::Z26, AArch64::Z27,
AArch64::Z28, AArch64::Z29, AArch64::Z30, AArch64::Z31
};
static DecodeStatus DecodeZPRRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address,
const void* Decoder) {
if (RegNo > 31)
return Fail;
unsigned Register = ZPRDecoderTable[RegNo];
Inst.addOperand(MCOperand::createReg(Register));
return Success;
}
static const unsigned VectorDecoderTable[] = {
AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3, AArch64::Q4,

View File

@ -1340,3 +1340,23 @@ void AArch64InstPrinter::printComplexRotationOp(const MCInst *MI, unsigned OpNo,
O << "#" << (Val * Angle) + Remainder;
}
template <char suffix>
void AArch64InstPrinter::printSVERegOp(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O) {
switch (suffix) {
case 0:
case 'b':
case 'h':
case 's':
case 'd':
case 'q':
break;
default: llvm_unreachable("Invalid kind specifier.");
}
unsigned Reg = MI->getOperand(OpNum).getReg();
O << getRegisterName(Reg);
if (suffix != 0)
O << '.' << suffix;
}

View File

@ -165,6 +165,9 @@ protected:
void printGPRSeqPairsClassOperand(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O);
template <char = 0>
void printSVERegOp(const MCInst *MI, unsigned OpNum,
const MCSubtargetInfo &STI, raw_ostream &O);
};
class AArch64AppleInstPrinter : public AArch64InstPrinter {