mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-02-01 05:01:59 +01:00
[ARM] Add MVE vector bit-operations (register inputs).
This includes all the obvious bitwise operations (AND, OR, BIC, ORN, MVN) in register-to-register forms, and the immediate forms of AND/OR/BIC/ORN; byte-order reverse instructions; and the VMOVs that access a single lane of a vector. Some of those VMOVs (specifically, the ones that access a 32-bit lane) share an encoding with existing instructions that were disassembled as accessing half of a d-register (e.g. `vmov.32 r0, d1[0]`), but in 8.1-M they're now written as accessing a quarter of a q-register (e.g. `vmov.32 r0, q0[2]`). The older syntax is still accepted by the assembler. Reviewers: dmgreen, samparker, SjoerdMeijer, t.p.northover Subscribers: javed.absar, kristof.beyls, hiraditya, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D62673 llvm-svn: 363838
This commit is contained in:
parent
41fc96e85e
commit
bc0266a9ee
@ -465,6 +465,8 @@ class VFP2AsmPseudo<string asm, dag iops, dag oops = (outs)>
|
||||
: AsmPseudoInst<asm, iops, oops>, Requires<[HasVFP2]>;
|
||||
class NEONAsmPseudo<string asm, dag iops, dag oops = (outs)>
|
||||
: AsmPseudoInst<asm, iops, oops>, Requires<[HasNEON]>;
|
||||
class MVEAsmPseudo<string asm, dag iops, dag oops = (outs)>
|
||||
: AsmPseudoInst<asm, iops, oops>, Requires<[HasMVEInt]>;
|
||||
|
||||
// Pseudo instructions for the code generator.
|
||||
class PseudoInst<dag oops, dag iops, InstrItinClass itin, list<dag> pattern>
|
||||
|
@ -461,6 +461,19 @@ def rot_imm : Operand<i32>, PatLeaf<(i32 imm), [{
|
||||
let ParserMatchClass = RotImmAsmOperand;
|
||||
}
|
||||
|
||||
// Vector indexing
|
||||
class MVEVectorIndexOperand<int NumLanes> : AsmOperandClass {
|
||||
let Name = "MVEVectorIndex"#NumLanes;
|
||||
let RenderMethod = "addMVEVectorIndexOperands";
|
||||
let PredicateMethod = "isVectorIndexInRange<"#NumLanes#">";
|
||||
}
|
||||
|
||||
class MVEVectorIndex<int NumLanes> : Operand<i32> {
|
||||
let PrintMethod = "printVectorIndex";
|
||||
let ParserMatchClass = MVEVectorIndexOperand<NumLanes>;
|
||||
let MIOperandInfo = (ops i32imm);
|
||||
}
|
||||
|
||||
// shift_imm: An integer that encodes a shift amount and the type of shift
|
||||
// (asr or lsl). The 6-bit immediate encodes as:
|
||||
// {5} 0 ==> lsl
|
||||
|
@ -10,6 +10,44 @@
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
class ExpandImmAsmOp<string shift> : AsmOperandClass {
|
||||
let Name = !strconcat("ExpandImm", shift);
|
||||
let PredicateMethod = !strconcat("isExpImm<", shift, ">");
|
||||
let RenderMethod = "addImmOperands";
|
||||
}
|
||||
class InvertedExpandImmAsmOp<string shift, string size> : AsmOperandClass {
|
||||
let Name = !strconcat("InvertedExpandImm", shift, "_", size);
|
||||
let PredicateMethod = !strconcat("isInvertedExpImm<", shift, ",", size, ">");
|
||||
let RenderMethod = "addImmOperands";
|
||||
}
|
||||
|
||||
class ExpandImm<string shift> : Operand<i32> {
|
||||
let ParserMatchClass = ExpandImmAsmOp<shift>;
|
||||
let EncoderMethod = !strconcat("getExpandedImmOpValue<",shift,",false>");
|
||||
let DecoderMethod = !strconcat("DecodeExpandedImmOperand<",shift,">");
|
||||
let PrintMethod = "printExpandedImmOperand";
|
||||
}
|
||||
class InvertedExpandImm<string shift, string size> : Operand<i32> {
|
||||
let ParserMatchClass = InvertedExpandImmAsmOp<shift, size>;
|
||||
let EncoderMethod = !strconcat("getExpandedImmOpValue<",shift,",true>");
|
||||
let PrintMethod = "printExpandedImmOperand";
|
||||
// No decoder method needed, because this operand type is only used
|
||||
// by aliases (VAND and VORN)
|
||||
}
|
||||
|
||||
def expzero00 : ExpandImm<"0">;
|
||||
def expzero08 : ExpandImm<"8">;
|
||||
def expzero16 : ExpandImm<"16">;
|
||||
def expzero24 : ExpandImm<"24">;
|
||||
|
||||
def expzero00inv16 : InvertedExpandImm<"0", "16">;
|
||||
def expzero08inv16 : InvertedExpandImm<"8", "16">;
|
||||
|
||||
def expzero00inv32 : InvertedExpandImm<"0", "32">;
|
||||
def expzero08inv32 : InvertedExpandImm<"8", "32">;
|
||||
def expzero16inv32 : InvertedExpandImm<"16", "32">;
|
||||
def expzero24inv32 : InvertedExpandImm<"24", "32">;
|
||||
|
||||
// VPT condition mask
|
||||
def vpt_mask : Operand<i32> {
|
||||
let PrintMethod = "printVPTMask";
|
||||
@ -113,6 +151,17 @@ class MVE_MI_with_pred<dag oops, dag iops, InstrItinClass itin, string asm,
|
||||
let DecoderNamespace = "MVE";
|
||||
}
|
||||
|
||||
class MVE_VMOV_lane_base<dag oops, dag iops, InstrItinClass itin, string asm,
|
||||
string suffix, string ops, string cstr,
|
||||
list<dag> pattern>
|
||||
: Thumb2I<oops, iops, AddrModeNone, 4, itin, asm,
|
||||
!if(!eq(suffix, ""), "", "." # suffix) # "\t" # ops,
|
||||
cstr, pattern>,
|
||||
Requires<[HasV8_1MMainline, HasMVEInt]> {
|
||||
let D = MVEDomain;
|
||||
let DecoderNamespace = "MVE";
|
||||
}
|
||||
|
||||
class MVE_ScalarShift<string iname, dag oops, dag iops, string asm, string cstr,
|
||||
list<dag> pattern=[]>
|
||||
: MVE_MI_with_pred<oops, iops, NoItinerary, iname, asm, cstr, pattern> {
|
||||
@ -1217,6 +1266,261 @@ def MVE_VSHL_immi32 : MVE_VSHL_imm<"i32", (ins imm0_31:$imm)> {
|
||||
}
|
||||
// end of mve_shift instructions
|
||||
|
||||
// start of mve_bit instructions
|
||||
|
||||
class MVE_bit_arith<dag oops, dag iops, string iname, string suffix,
|
||||
string ops, string cstr, list<dag> pattern=[]>
|
||||
: MVE_p<oops, iops, NoItinerary, iname, suffix, ops, vpred_r, cstr, pattern> {
|
||||
bits<4> Qd;
|
||||
bits<4> Qm;
|
||||
|
||||
let Inst{22} = Qd{3};
|
||||
let Inst{15-13} = Qd{2-0};
|
||||
let Inst{5} = Qm{3};
|
||||
let Inst{3-1} = Qm{2-0};
|
||||
}
|
||||
|
||||
def MVE_VBIC : MVE_bit_arith<(outs MQPR:$Qd), (ins MQPR:$Qn, MQPR:$Qm),
|
||||
"vbic", "", "$Qd, $Qn, $Qm", ""> {
|
||||
bits<4> Qn;
|
||||
|
||||
let Inst{28} = 0b0;
|
||||
let Inst{25-23} = 0b110;
|
||||
let Inst{21-20} = 0b01;
|
||||
let Inst{19-17} = Qn{2-0};
|
||||
let Inst{16} = 0b0;
|
||||
let Inst{12-8} = 0b00001;
|
||||
let Inst{7} = Qn{3};
|
||||
let Inst{6} = 0b1;
|
||||
let Inst{4} = 0b1;
|
||||
let Inst{0} = 0b0;
|
||||
}
|
||||
|
||||
class MVE_VREV<string iname, string suffix, bits<2> size, bits<2> bit_8_7>
|
||||
: MVE_bit_arith<(outs MQPR:$Qd), (ins MQPR:$Qm), iname,
|
||||
suffix, "$Qd, $Qm", ""> {
|
||||
|
||||
let Inst{28} = 0b1;
|
||||
let Inst{25-23} = 0b111;
|
||||
let Inst{21-20} = 0b11;
|
||||
let Inst{19-18} = size;
|
||||
let Inst{17-16} = 0b00;
|
||||
let Inst{12-9} = 0b0000;
|
||||
let Inst{8-7} = bit_8_7;
|
||||
let Inst{6} = 0b1;
|
||||
let Inst{4} = 0b0;
|
||||
let Inst{0} = 0b0;
|
||||
}
|
||||
|
||||
def MVE_VREV64_8 : MVE_VREV<"vrev64", "8", 0b00, 0b00>;
|
||||
def MVE_VREV64_16 : MVE_VREV<"vrev64", "16", 0b01, 0b00>;
|
||||
def MVE_VREV64_32 : MVE_VREV<"vrev64", "32", 0b10, 0b00>;
|
||||
|
||||
def MVE_VREV32_8 : MVE_VREV<"vrev32", "8", 0b00, 0b01>;
|
||||
def MVE_VREV32_16 : MVE_VREV<"vrev32", "16", 0b01, 0b01>;
|
||||
|
||||
def MVE_VREV16_8 : MVE_VREV<"vrev16", "8", 0b00, 0b10>;
|
||||
|
||||
def MVE_VMVN : MVE_bit_arith<(outs MQPR:$Qd), (ins MQPR:$Qm),
|
||||
"vmvn", "", "$Qd, $Qm", ""> {
|
||||
let Inst{28} = 0b1;
|
||||
let Inst{25-23} = 0b111;
|
||||
let Inst{21-16} = 0b110000;
|
||||
let Inst{12-6} = 0b0010111;
|
||||
let Inst{4} = 0b0;
|
||||
let Inst{0} = 0b0;
|
||||
}
|
||||
|
||||
class MVE_bit_ops<string iname, bits<2> bit_21_20, bit bit_28>
|
||||
: MVE_bit_arith<(outs MQPR:$Qd), (ins MQPR:$Qn, MQPR:$Qm),
|
||||
iname, "", "$Qd, $Qn, $Qm", ""> {
|
||||
bits<4> Qn;
|
||||
|
||||
let Inst{28} = bit_28;
|
||||
let Inst{25-23} = 0b110;
|
||||
let Inst{21-20} = bit_21_20;
|
||||
let Inst{19-17} = Qn{2-0};
|
||||
let Inst{16} = 0b0;
|
||||
let Inst{12-8} = 0b00001;
|
||||
let Inst{7} = Qn{3};
|
||||
let Inst{6} = 0b1;
|
||||
let Inst{4} = 0b1;
|
||||
let Inst{0} = 0b0;
|
||||
}
|
||||
|
||||
def MVE_VEOR : MVE_bit_ops<"veor", 0b00, 0b1>;
|
||||
def MVE_VORN : MVE_bit_ops<"vorn", 0b11, 0b0>;
|
||||
def MVE_VORR : MVE_bit_ops<"vorr", 0b10, 0b0>;
|
||||
def MVE_VAND : MVE_bit_ops<"vand", 0b00, 0b0>;
|
||||
|
||||
// add ignored suffixes as aliases
|
||||
|
||||
foreach s=["s8", "s16", "s32", "u8", "u16", "u32", "i8", "i16", "i32", "f16", "f32"] in {
|
||||
def : MVEInstAlias<"vbic${vp}." # s # "\t$QdSrc, $QnSrc, $QmSrc",
|
||||
(MVE_VBIC MQPR:$QdSrc, MQPR:$QnSrc, MQPR:$QmSrc, vpred_r:$vp)>;
|
||||
def : MVEInstAlias<"veor${vp}." # s # "\t$QdSrc, $QnSrc, $QmSrc",
|
||||
(MVE_VEOR MQPR:$QdSrc, MQPR:$QnSrc, MQPR:$QmSrc, vpred_r:$vp)>;
|
||||
def : MVEInstAlias<"vorn${vp}." # s # "\t$QdSrc, $QnSrc, $QmSrc",
|
||||
(MVE_VORN MQPR:$QdSrc, MQPR:$QnSrc, MQPR:$QmSrc, vpred_r:$vp)>;
|
||||
def : MVEInstAlias<"vorr${vp}." # s # "\t$QdSrc, $QnSrc, $QmSrc",
|
||||
(MVE_VORR MQPR:$QdSrc, MQPR:$QnSrc, MQPR:$QmSrc, vpred_r:$vp)>;
|
||||
def : MVEInstAlias<"vand${vp}." # s # "\t$QdSrc, $QnSrc, $QmSrc",
|
||||
(MVE_VAND MQPR:$QdSrc, MQPR:$QnSrc, MQPR:$QmSrc, vpred_r:$vp)>;
|
||||
}
|
||||
|
||||
class MVE_bit_cmode<string iname, string suffix, bits<4> cmode, dag inOps>
|
||||
: MVE_p<(outs MQPR:$Qd), inOps, NoItinerary,
|
||||
iname, suffix, "$Qd, $imm", vpred_n, "$Qd = $Qd_src"> {
|
||||
bits<8> imm;
|
||||
bits<4> Qd;
|
||||
|
||||
let Inst{28} = imm{7};
|
||||
let Inst{27-23} = 0b11111;
|
||||
let Inst{22} = Qd{3};
|
||||
let Inst{21-19} = 0b000;
|
||||
let Inst{18-16} = imm{6-4};
|
||||
let Inst{15-13} = Qd{2-0};
|
||||
let Inst{12} = 0b0;
|
||||
let Inst{11-8} = cmode;
|
||||
let Inst{7-6} = 0b01;
|
||||
let Inst{4} = 0b1;
|
||||
let Inst{3-0} = imm{3-0};
|
||||
}
|
||||
|
||||
class MVE_VORR<string suffix, bits<4> cmode, ExpandImm imm_type>
|
||||
: MVE_bit_cmode<"vorr", suffix, cmode, (ins MQPR:$Qd_src, imm_type:$imm)> {
|
||||
let Inst{5} = 0b0;
|
||||
}
|
||||
|
||||
def MVE_VORRIZ0v4i32 : MVE_VORR<"i32", 0b0001, expzero00>;
|
||||
def MVE_VORRIZ0v8i16 : MVE_VORR<"i16", 0b1001, expzero00>;
|
||||
def MVE_VORRIZ8v4i32 : MVE_VORR<"i32", 0b0011, expzero08>;
|
||||
def MVE_VORRIZ8v8i16 : MVE_VORR<"i16", 0b1011, expzero08>;
|
||||
def MVE_VORRIZ16v4i32 : MVE_VORR<"i32", 0b0101, expzero16>;
|
||||
def MVE_VORRIZ24v4i32 : MVE_VORR<"i32", 0b0111, expzero24>;
|
||||
|
||||
def MVE_VORNIZ0v4i32 : MVEAsmPseudo<"vorn${vp}.i32\t$Qd, $imm",
|
||||
(ins MQPR:$Qd_src, expzero00inv32:$imm, vpred_n:$vp), (outs MQPR:$Qd)>;
|
||||
def MVE_VORNIZ0v8i16 : MVEAsmPseudo<"vorn${vp}.i16\t$Qd, $imm",
|
||||
(ins MQPR:$Qd_src, expzero00inv16:$imm, vpred_n:$vp), (outs MQPR:$Qd)>;
|
||||
def MVE_VORNIZ8v4i32 : MVEAsmPseudo<"vorn${vp}.i32\t$Qd, $imm",
|
||||
(ins MQPR:$Qd_src, expzero08inv32:$imm, vpred_n:$vp), (outs MQPR:$Qd)>;
|
||||
def MVE_VORNIZ8v8i16 : MVEAsmPseudo<"vorn${vp}.i16\t$Qd, $imm",
|
||||
(ins MQPR:$Qd_src, expzero08inv16:$imm, vpred_n:$vp), (outs MQPR:$Qd)>;
|
||||
def MVE_VORNIZ16v4i32 : MVEAsmPseudo<"vorn${vp}.i32\t$Qd, $imm",
|
||||
(ins MQPR:$Qd_src, expzero16inv32:$imm, vpred_n:$vp), (outs MQPR:$Qd)>;
|
||||
def MVE_VORNIZ24v4i32 : MVEAsmPseudo<"vorn${vp}.i32\t$Qd, $imm",
|
||||
(ins MQPR:$Qd_src, expzero24inv32:$imm, vpred_n:$vp), (outs MQPR:$Qd)>;
|
||||
|
||||
def MVE_VMOV : MVEInstAlias<"vmov${vp}\t$Qd, $Qm",
|
||||
(MVE_VORR MQPR:$Qd, MQPR:$Qm, MQPR:$Qm, vpred_r:$vp)>;
|
||||
|
||||
class MVE_VBIC<string suffix, bits<4> cmode, ExpandImm imm_type>
|
||||
: MVE_bit_cmode<"vbic", suffix, cmode, (ins MQPR:$Qd_src, imm_type:$imm)> {
|
||||
let Inst{5} = 0b1;
|
||||
}
|
||||
|
||||
def MVE_VBICIZ0v4i32 : MVE_VBIC<"i32", 0b0001, expzero00>;
|
||||
def MVE_VBICIZ0v8i16 : MVE_VBIC<"i16", 0b1001, expzero00>;
|
||||
def MVE_VBICIZ8v4i32 : MVE_VBIC<"i32", 0b0011, expzero08>;
|
||||
def MVE_VBICIZ8v8i16 : MVE_VBIC<"i16", 0b1011, expzero08>;
|
||||
def MVE_VBICIZ16v4i32 : MVE_VBIC<"i32", 0b0101, expzero16>;
|
||||
def MVE_VBICIZ24v4i32 : MVE_VBIC<"i32", 0b0111, expzero24>;
|
||||
|
||||
def MVE_VANDIZ0v4i32 : MVEAsmPseudo<"vand${vp}.i32\t$Qda, $imm",
|
||||
(ins MQPR:$Qda_src, expzero00inv32:$imm, vpred_n:$vp), (outs MQPR:$Qda)>;
|
||||
def MVE_VANDIZ0v8i16 : MVEAsmPseudo<"vand${vp}.i16\t$Qda, $imm",
|
||||
(ins MQPR:$Qda_src, expzero00inv16:$imm, vpred_n:$vp), (outs MQPR:$Qda)>;
|
||||
def MVE_VANDIZ8v4i32 : MVEAsmPseudo<"vand${vp}.i32\t$Qda, $imm",
|
||||
(ins MQPR:$Qda_src, expzero08inv32:$imm, vpred_n:$vp), (outs MQPR:$Qda)>;
|
||||
def MVE_VANDIZ8v8i16 : MVEAsmPseudo<"vand${vp}.i16\t$Qda, $imm",
|
||||
(ins MQPR:$Qda_src, expzero08inv16:$imm, vpred_n:$vp), (outs MQPR:$Qda)>;
|
||||
def MVE_VANDIZ16v4i32 : MVEAsmPseudo<"vand${vp}.i32\t$Qda, $imm",
|
||||
(ins MQPR:$Qda_src, expzero16inv32:$imm, vpred_n:$vp), (outs MQPR:$Qda)>;
|
||||
def MVE_VANDIZ24v4i32 : MVEAsmPseudo<"vand${vp}.i32\t$Qda, $imm",
|
||||
(ins MQPR:$Qda_src, expzero24inv32:$imm, vpred_n:$vp), (outs MQPR:$Qda)>;
|
||||
|
||||
class MVE_VMOV_lane_direction {
|
||||
bit bit_20;
|
||||
dag oops;
|
||||
dag iops;
|
||||
string ops;
|
||||
string cstr;
|
||||
}
|
||||
def MVE_VMOV_from_lane : MVE_VMOV_lane_direction {
|
||||
let bit_20 = 0b1;
|
||||
let oops = (outs rGPR:$Rt);
|
||||
let iops = (ins MQPR:$Qd);
|
||||
let ops = "$Rt, $Qd$Idx";
|
||||
let cstr = "";
|
||||
}
|
||||
def MVE_VMOV_to_lane : MVE_VMOV_lane_direction {
|
||||
let bit_20 = 0b0;
|
||||
let oops = (outs MQPR:$Qd);
|
||||
let iops = (ins MQPR:$Qd_src, rGPR:$Rt);
|
||||
let ops = "$Qd$Idx, $Rt";
|
||||
let cstr = "$Qd = $Qd_src";
|
||||
}
|
||||
|
||||
class MVE_VMOV_lane<string suffix, bit U, dag indexop,
|
||||
MVE_VMOV_lane_direction dir>
|
||||
: MVE_VMOV_lane_base<dir.oops, !con(dir.iops, indexop), NoItinerary,
|
||||
"vmov", suffix, dir.ops, dir.cstr, []> {
|
||||
bits<4> Qd;
|
||||
bits<5> Rt;
|
||||
|
||||
let Inst{31-24} = 0b11101110;
|
||||
let Inst{23} = U;
|
||||
let Inst{20} = dir.bit_20;
|
||||
let Inst{19-17} = Qd{2-0};
|
||||
let Inst{15-12} = Rt{3-0};
|
||||
let Inst{11-8} = 0b1011;
|
||||
let Inst{7} = Qd{3};
|
||||
let Inst{4-0} = 0b10000;
|
||||
}
|
||||
|
||||
class MVE_VMOV_lane_32<MVE_VMOV_lane_direction dir>
|
||||
: MVE_VMOV_lane<"32", 0b0, (ins MVEVectorIndex<4>:$Idx), dir> {
|
||||
bits<2> Idx;
|
||||
let Inst{22} = 0b0;
|
||||
let Inst{6-5} = 0b00;
|
||||
let Inst{16} = Idx{1};
|
||||
let Inst{21} = Idx{0};
|
||||
|
||||
let Predicates = [HasFPRegsV8_1M];
|
||||
}
|
||||
|
||||
class MVE_VMOV_lane_16<string suffix, bit U, MVE_VMOV_lane_direction dir>
|
||||
: MVE_VMOV_lane<suffix, U, (ins MVEVectorIndex<8>:$Idx), dir> {
|
||||
bits<3> Idx;
|
||||
let Inst{22} = 0b0;
|
||||
let Inst{5} = 0b1;
|
||||
let Inst{16} = Idx{2};
|
||||
let Inst{21} = Idx{1};
|
||||
let Inst{6} = Idx{0};
|
||||
}
|
||||
|
||||
class MVE_VMOV_lane_8<string suffix, bit U, MVE_VMOV_lane_direction dir>
|
||||
: MVE_VMOV_lane<suffix, U, (ins MVEVectorIndex<16>:$Idx), dir> {
|
||||
bits<4> Idx;
|
||||
let Inst{22} = 0b1;
|
||||
let Inst{16} = Idx{3};
|
||||
let Inst{21} = Idx{2};
|
||||
let Inst{6} = Idx{1};
|
||||
let Inst{5} = Idx{0};
|
||||
}
|
||||
|
||||
def MVE_VMOV_from_lane_32 : MVE_VMOV_lane_32< MVE_VMOV_from_lane>;
|
||||
def MVE_VMOV_to_lane_32 : MVE_VMOV_lane_32< MVE_VMOV_to_lane>;
|
||||
def MVE_VMOV_from_lane_s16 : MVE_VMOV_lane_16<"s16", 0b0, MVE_VMOV_from_lane>;
|
||||
def MVE_VMOV_from_lane_u16 : MVE_VMOV_lane_16<"u16", 0b1, MVE_VMOV_from_lane>;
|
||||
def MVE_VMOV_to_lane_16 : MVE_VMOV_lane_16< "16", 0b0, MVE_VMOV_to_lane>;
|
||||
def MVE_VMOV_from_lane_s8 : MVE_VMOV_lane_8 < "s8", 0b0, MVE_VMOV_from_lane>;
|
||||
def MVE_VMOV_from_lane_u8 : MVE_VMOV_lane_8 < "u8", 0b1, MVE_VMOV_from_lane>;
|
||||
def MVE_VMOV_to_lane_8 : MVE_VMOV_lane_8 < "8", 0b0, MVE_VMOV_to_lane>;
|
||||
|
||||
// end of mve_bit instructions
|
||||
|
||||
class MVE_VPT<string suffix, bits<2> size, dag iops, string asm, list<dag> pattern=[]>
|
||||
: MVE_MI<(outs ), iops, NoItinerary, !strconcat("vpt", "${Mk}", ".", suffix), asm, "", pattern> {
|
||||
bits<3> fc;
|
||||
|
@ -1143,6 +1143,34 @@ public:
|
||||
return isImmediate<1, 33>();
|
||||
}
|
||||
|
||||
template<int shift>
|
||||
bool isExpImmValue(uint64_t Value) const {
|
||||
uint64_t mask = (1 << shift) - 1;
|
||||
if ((Value & mask) != 0 || (Value >> shift) > 0xff)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
template<int shift>
|
||||
bool isExpImm() const {
|
||||
if (!isImm()) return false;
|
||||
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
|
||||
if (!CE) return false;
|
||||
|
||||
return isExpImmValue<shift>(CE->getValue());
|
||||
}
|
||||
|
||||
template<int shift, int size>
|
||||
bool isInvertedExpImm() const {
|
||||
if (!isImm()) return false;
|
||||
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
|
||||
if (!CE) return false;
|
||||
|
||||
uint64_t OriginalValue = CE->getValue();
|
||||
uint64_t InvertedValue = OriginalValue ^ (((uint64_t)1 << size) - 1);
|
||||
return isExpImmValue<shift>(InvertedValue);
|
||||
}
|
||||
|
||||
bool isPKHLSLImm() const {
|
||||
return isImmediate<0, 32>();
|
||||
}
|
||||
@ -1897,24 +1925,16 @@ public:
|
||||
|
||||
bool isVectorIndex() const { return Kind == k_VectorIndex; }
|
||||
|
||||
bool isVectorIndex8() const {
|
||||
template <unsigned NumLanes>
|
||||
bool isVectorIndexInRange() const {
|
||||
if (Kind != k_VectorIndex) return false;
|
||||
return VectorIndex.Val < 8;
|
||||
return VectorIndex.Val < NumLanes;
|
||||
}
|
||||
|
||||
bool isVectorIndex16() const {
|
||||
if (Kind != k_VectorIndex) return false;
|
||||
return VectorIndex.Val < 4;
|
||||
}
|
||||
|
||||
bool isVectorIndex32() const {
|
||||
if (Kind != k_VectorIndex) return false;
|
||||
return VectorIndex.Val < 2;
|
||||
}
|
||||
bool isVectorIndex64() const {
|
||||
if (Kind != k_VectorIndex) return false;
|
||||
return VectorIndex.Val < 1;
|
||||
}
|
||||
bool isVectorIndex8() const { return isVectorIndexInRange<8>(); }
|
||||
bool isVectorIndex16() const { return isVectorIndexInRange<4>(); }
|
||||
bool isVectorIndex32() const { return isVectorIndexInRange<2>(); }
|
||||
bool isVectorIndex64() const { return isVectorIndexInRange<1>(); }
|
||||
|
||||
bool isNEONi8splat() const {
|
||||
if (!isImm()) return false;
|
||||
@ -2961,6 +2981,11 @@ public:
|
||||
Inst.addOperand(MCOperand::createImm(getVectorIndex()));
|
||||
}
|
||||
|
||||
void addMVEVectorIndexOperands(MCInst &Inst, unsigned N) const {
|
||||
assert(N == 1 && "Invalid number of operands!");
|
||||
Inst.addOperand(MCOperand::createImm(getVectorIndex()));
|
||||
}
|
||||
|
||||
void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
|
||||
assert(N == 1 && "Invalid number of operands!");
|
||||
// The immediate encodes the type of constant as well as the value.
|
||||
@ -5936,7 +5961,8 @@ StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
|
||||
Mnemonic != "sbcs" && Mnemonic != "rscs" &&
|
||||
!(hasMVE() &&
|
||||
(Mnemonic == "vmine" ||
|
||||
Mnemonic == "vshle" || Mnemonic == "vshlt" || Mnemonic == "vshllt"))) {
|
||||
Mnemonic == "vshle" || Mnemonic == "vshlt" || Mnemonic == "vshllt" ||
|
||||
Mnemonic == "vmvne" || Mnemonic == "vorne"))) {
|
||||
unsigned CC = ARMCondCodeFromString(Mnemonic.substr(Mnemonic.size()-2));
|
||||
if (CC != ~0U) {
|
||||
Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
|
||||
@ -6310,17 +6336,33 @@ bool ARMAsmParser::shouldOmitVectorPredicateOperand(StringRef Mnemonic,
|
||||
if (!hasMVE() || Operands.size() < 3)
|
||||
return true;
|
||||
|
||||
for (auto &Operand : Operands) {
|
||||
// We check the larger class QPR instead of just the legal class
|
||||
// MQPR, to more accurately report errors when using Q registers
|
||||
// outside of the allowed range.
|
||||
if (static_cast<ARMOperand &>(*Operand).isVectorIndex() ||
|
||||
(Operand->isReg() &&
|
||||
(ARMMCRegisterClasses[ARM::QPRRegClassID].contains(
|
||||
if (Mnemonic.startswith("vmov") &&
|
||||
!(Mnemonic.startswith("vmovl") || Mnemonic.startswith("vmovn") ||
|
||||
Mnemonic.startswith("vmovx"))) {
|
||||
for (auto &Operand : Operands) {
|
||||
if (static_cast<ARMOperand &>(*Operand).isVectorIndex() ||
|
||||
((*Operand).isReg() &&
|
||||
(ARMMCRegisterClasses[ARM::SPRRegClassID].contains(
|
||||
(*Operand).getReg()) ||
|
||||
ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
|
||||
(*Operand).getReg())))) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
} else {
|
||||
for (auto &Operand : Operands) {
|
||||
// We check the larger class QPR instead of just the legal class
|
||||
// MQPR, to more accurately report errors when using Q registers
|
||||
// outside of the allowed range.
|
||||
if (static_cast<ARMOperand &>(*Operand).isVectorIndex() ||
|
||||
(Operand->isReg() &&
|
||||
(ARMMCRegisterClasses[ARM::QPRRegClassID].contains(
|
||||
Operand->getReg()))))
|
||||
return false;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool isDataTypeToken(StringRef Tok) {
|
||||
@ -6618,6 +6660,21 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
|
||||
ARMOperand::CreateVPTPred(ARMVCC::None, PLoc));
|
||||
Operands.insert(Operands.begin(),
|
||||
ARMOperand::CreateToken(StringRef("vmovlt"), MLoc));
|
||||
}
|
||||
// For vmov instructions, as mentioned earlier, we did not add the vector
|
||||
// predication code, since these may contain operands that require
|
||||
// special parsing. So now we have to see if they require vector
|
||||
// predication and replace the scalar one with the vector predication
|
||||
// operand if that is the case.
|
||||
else if (Mnemonic == "vmov") {
|
||||
if (!shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
|
||||
Operands.erase(Operands.begin() + 1);
|
||||
SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
|
||||
Mnemonic.size() + CarrySetting);
|
||||
Operands.insert(Operands.begin() + 1,
|
||||
ARMOperand::CreateVPTPred(
|
||||
ARMVCC::VPTCodes(VPTPredicationCode), PLoc));
|
||||
}
|
||||
} else if (CanAcceptVPTPredicationCode) {
|
||||
// For all other instructions, make sure only one of the two
|
||||
// predication operands is left behind, depending on whether we should
|
||||
@ -7714,6 +7771,50 @@ bool ARMAsmParser::processInstruction(MCInst &Inst,
|
||||
}
|
||||
|
||||
switch (Inst.getOpcode()) {
|
||||
case ARM::MVE_VORNIZ0v4i32:
|
||||
case ARM::MVE_VORNIZ0v8i16:
|
||||
case ARM::MVE_VORNIZ8v4i32:
|
||||
case ARM::MVE_VORNIZ8v8i16:
|
||||
case ARM::MVE_VORNIZ16v4i32:
|
||||
case ARM::MVE_VORNIZ24v4i32:
|
||||
case ARM::MVE_VANDIZ0v4i32:
|
||||
case ARM::MVE_VANDIZ0v8i16:
|
||||
case ARM::MVE_VANDIZ8v4i32:
|
||||
case ARM::MVE_VANDIZ8v8i16:
|
||||
case ARM::MVE_VANDIZ16v4i32:
|
||||
case ARM::MVE_VANDIZ24v4i32: {
|
||||
unsigned Opcode;
|
||||
bool imm16 = false;
|
||||
switch(Inst.getOpcode()) {
|
||||
case ARM::MVE_VORNIZ0v4i32: Opcode = ARM::MVE_VORRIZ0v4i32; break;
|
||||
case ARM::MVE_VORNIZ0v8i16: Opcode = ARM::MVE_VORRIZ0v8i16; imm16 = true; break;
|
||||
case ARM::MVE_VORNIZ8v4i32: Opcode = ARM::MVE_VORRIZ8v4i32; break;
|
||||
case ARM::MVE_VORNIZ8v8i16: Opcode = ARM::MVE_VORRIZ8v8i16; imm16 = true; break;
|
||||
case ARM::MVE_VORNIZ16v4i32: Opcode = ARM::MVE_VORRIZ16v4i32; break;
|
||||
case ARM::MVE_VORNIZ24v4i32: Opcode = ARM::MVE_VORRIZ24v4i32; break;
|
||||
case ARM::MVE_VANDIZ0v4i32: Opcode = ARM::MVE_VBICIZ0v4i32; break;
|
||||
case ARM::MVE_VANDIZ0v8i16: Opcode = ARM::MVE_VBICIZ0v8i16; imm16 = true; break;
|
||||
case ARM::MVE_VANDIZ8v4i32: Opcode = ARM::MVE_VBICIZ8v4i32; break;
|
||||
case ARM::MVE_VANDIZ8v8i16: Opcode = ARM::MVE_VBICIZ8v8i16; imm16 = true; break;
|
||||
case ARM::MVE_VANDIZ16v4i32: Opcode = ARM::MVE_VBICIZ16v4i32; break;
|
||||
case ARM::MVE_VANDIZ24v4i32: Opcode = ARM::MVE_VBICIZ24v4i32; break;
|
||||
default: llvm_unreachable("unexpected opcode");
|
||||
}
|
||||
|
||||
MCInst TmpInst;
|
||||
TmpInst.setOpcode(Opcode);
|
||||
TmpInst.addOperand(Inst.getOperand(0));
|
||||
TmpInst.addOperand(Inst.getOperand(1));
|
||||
|
||||
// invert immediate
|
||||
unsigned imm = ~Inst.getOperand(2).getImm() & (imm16 ? 0xffff : 0xffffffff);
|
||||
TmpInst.addOperand(MCOperand::createImm(imm));
|
||||
|
||||
TmpInst.addOperand(Inst.getOperand(3));
|
||||
TmpInst.addOperand(Inst.getOperand(4));
|
||||
Inst = TmpInst;
|
||||
return true;
|
||||
}
|
||||
// Alias for alternate form of 'ldr{,b}t Rt, [Rn], #imm' instruction.
|
||||
case ARM::LDRT_POST:
|
||||
case ARM::LDRBT_POST: {
|
||||
|
@ -504,6 +504,10 @@ template<bool Writeback>
|
||||
static DecodeStatus DecodeVSTRVLDR_SYSREG(MCInst &Inst, unsigned Insn,
|
||||
uint64_t Address,
|
||||
const void *Decoder);
|
||||
template <int shift>
|
||||
static DecodeStatus DecodeExpandedImmOperand(MCInst &Inst, unsigned Val,
|
||||
uint64_t Address,
|
||||
const void *Decoder);
|
||||
static DecodeStatus DecodeMVEOverlappingLongShift(MCInst &Inst, unsigned Insn,
|
||||
uint64_t Address,
|
||||
const void *Decoder);
|
||||
@ -6025,6 +6029,16 @@ static DecodeStatus DecodeVSTRVLDR_SYSREG(MCInst &Inst, unsigned Val,
|
||||
return S;
|
||||
}
|
||||
|
||||
template <int shift>
|
||||
static DecodeStatus DecodeExpandedImmOperand(MCInst &Inst, unsigned Val,
|
||||
uint64_t Address,
|
||||
const void *Decoder) {
|
||||
Val <<= shift;
|
||||
|
||||
Inst.addOperand(MCOperand::createImm(Val));
|
||||
return MCDisassembler::Success;
|
||||
}
|
||||
|
||||
static DecodeStatus DecodeMVEOverlappingLongShift(
|
||||
MCInst &Inst, unsigned Insn, uint64_t Address, const void *Decoder) {
|
||||
DecodeStatus S = MCDisassembler::Success;
|
||||
|
@ -1620,3 +1620,11 @@ void ARMInstPrinter::printVPTMask(const MCInst *MI, unsigned OpNum,
|
||||
}
|
||||
}
|
||||
|
||||
void ARMInstPrinter::printExpandedImmOperand(const MCInst *MI, unsigned OpNum,
|
||||
const MCSubtargetInfo &STI,
|
||||
raw_ostream &O) {
|
||||
uint32_t Val = MI->getOperand(OpNum).getImm();
|
||||
O << markup("<imm:") << "#0x";
|
||||
O.write_hex(Val);
|
||||
O << markup(">");
|
||||
}
|
||||
|
@ -252,6 +252,8 @@ public:
|
||||
raw_ostream &O);
|
||||
void printVPTMask(const MCInst *MI, unsigned OpNum,
|
||||
const MCSubtargetInfo &STI, raw_ostream &O);
|
||||
void printExpandedImmOperand(const MCInst *MI, unsigned OpNum,
|
||||
const MCSubtargetInfo &STI, raw_ostream &O);
|
||||
|
||||
private:
|
||||
unsigned DefaultAltIdx = ARM::NoRegAltName;
|
||||
|
@ -398,6 +398,14 @@ public:
|
||||
unsigned getThumbSRImmOpValue(const MCInst &MI, unsigned Op,
|
||||
SmallVectorImpl<MCFixup> &Fixups,
|
||||
const MCSubtargetInfo &STI) const;
|
||||
template <uint8_t shift, bool invert>
|
||||
unsigned getExpandedImmOpValue(const MCInst &MI, unsigned Op,
|
||||
SmallVectorImpl<MCFixup> &Fixups,
|
||||
const MCSubtargetInfo &STI) const {
|
||||
static_assert(shift <= 32, "Shift count must be less than or equal to 32.");
|
||||
const MCOperand MO = MI.getOperand(Op);
|
||||
return (invert ? (MO.getImm() ^ 0xff) : MO.getImm()) >> shift;
|
||||
}
|
||||
|
||||
unsigned NEONThumb2DataIPostEncoder(const MCInst &MI,
|
||||
unsigned EncodedValue,
|
||||
|
441
test/MC/ARM/mve-bitops.s
Normal file
441
test/MC/ARM/mve-bitops.s
Normal file
@ -0,0 +1,441 @@
|
||||
# RUN: not llvm-mc -triple=thumbv8.1m.main-none-eabi -mattr=+mve -show-encoding < %s \
|
||||
# RUN: | FileCheck --check-prefix=CHECK-NOFP %s
|
||||
# RUN: not llvm-mc -triple=thumbv8.1m.main-none-eabi -mattr=+mve.fp,+fp64 -show-encoding < %s 2>%t \
|
||||
# RUN: | FileCheck --check-prefix=CHECK %s
|
||||
# RUN: FileCheck --check-prefix=ERROR < %t %s
|
||||
|
||||
# CHECK: vorr.i16 q0, #0x12 @ encoding: [0x81,0xef,0x52,0x09]
|
||||
# CHECK-NOFP: vorr.i16 q0, #0x12 @ encoding: [0x81,0xef,0x52,0x09]
|
||||
vorr.i16 q0, #0x12
|
||||
|
||||
# CHECK: vorr.i32 q0, #0x1200 @ encoding: [0x81,0xef,0x52,0x03]
|
||||
# CHECK-NOFP: vorr.i32 q0, #0x1200 @ encoding: [0x81,0xef,0x52,0x03]
|
||||
vorr.i32 q0, #0x1200
|
||||
|
||||
# CHECK: vorr.i16 q0, #0xed @ encoding: [0x86,0xff,0x5d,0x09]
|
||||
# CHECK-NOFP: vorr.i16 q0, #0xed @ encoding: [0x86,0xff,0x5d,0x09]
|
||||
vorn.i16 q0, #0xff12
|
||||
|
||||
# CHECK: vorr.i32 q0, #0xed00 @ encoding: [0x86,0xff,0x5d,0x03]
|
||||
# CHECK-NOFP: vorr.i32 q0, #0xed00 @ encoding: [0x86,0xff,0x5d,0x03]
|
||||
vorn.i32 q0, #0xffff12ff
|
||||
|
||||
# CHECK: vorr.i32 q0, #0xed0000 @ encoding: [0x86,0xff,0x5d,0x05]
|
||||
# CHECK-NOFP: vorr.i32 q0, #0xed0000 @ encoding: [0x86,0xff,0x5d,0x05]
|
||||
vorn.i32 q0, #0xff12ffff
|
||||
|
||||
# CHECK: vorr.i32 q0, #0xed000000 @ encoding: [0x86,0xff,0x5d,0x07]
|
||||
# CHECK-NOFP: vorr.i32 q0, #0xed000000 @ encoding: [0x86,0xff,0x5d,0x07]
|
||||
vorn.i32 q0, #0x12ffffff
|
||||
|
||||
# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction
|
||||
vorn.i16 q0, #0xed00
|
||||
|
||||
# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction
|
||||
vorn.i16 q0, #0x00ed
|
||||
|
||||
# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction
|
||||
vorn.i32 q0, #0xed000000
|
||||
|
||||
# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction
|
||||
vorn.i32 q0, #0x00ed0000
|
||||
|
||||
# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction
|
||||
vorn.i32 q0, #0x0000ed00
|
||||
|
||||
# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction
|
||||
vorn.i32 q0, #0x000000ed
|
||||
|
||||
# CHECK: vbic.i16 q0, #0x22 @ encoding: [0x82,0xef,0x72,0x09]
|
||||
# CHECK-NOFP: vbic.i16 q0, #0x22 @ encoding: [0x82,0xef,0x72,0x09]
|
||||
vbic.i16 q0, #0x22
|
||||
|
||||
# CHECK: vbic.i32 q0, #0x1100 @ encoding: [0x81,0xef,0x71,0x03]
|
||||
# CHECK-NOFP: vbic.i32 q0, #0x1100 @ encoding: [0x81,0xef,0x71,0x03]
|
||||
vbic.i32 q0, #0x1100
|
||||
|
||||
# CHECK: vbic.i16 q0, #0xdd @ encoding: [0x85,0xff,0x7d,0x09]
|
||||
# CHECK-NOFP: vbic.i16 q0, #0xdd @ encoding: [0x85,0xff,0x7d,0x09]
|
||||
vand.i16 q0, #0xff22
|
||||
|
||||
# CHECK: vbic.i16 q0, #0xdd00 @ encoding: [0x85,0xff,0x7d,0x0b]
|
||||
# CHECK-NOFP: vbic.i16 q0, #0xdd00 @ encoding: [0x85,0xff,0x7d,0x0b]
|
||||
vand.i16 q0, #0x22ff
|
||||
|
||||
# CHECK: vbic.i32 q0, #0xee @ encoding: [0x86,0xff,0x7e,0x01]
|
||||
# CHECK-NOFP: vbic.i32 q0, #0xee @ encoding: [0x86,0xff,0x7e,0x01]
|
||||
vand.i32 q0, #0xffffff11
|
||||
|
||||
# CHECK: vbic.i32 q0, #0xee00 @ encoding: [0x86,0xff,0x7e,0x03]
|
||||
# CHECK-NOFP: vbic.i32 q0, #0xee00 @ encoding: [0x86,0xff,0x7e,0x03]
|
||||
vand.i32 q0, #0xffff11ff
|
||||
|
||||
# CHECK: vbic.i32 q0, #0xee0000 @ encoding: [0x86,0xff,0x7e,0x05]
|
||||
# CHECK-NOFP: vbic.i32 q0, #0xee0000 @ encoding: [0x86,0xff,0x7e,0x05]
|
||||
vand.i32 q0, #0xff11ffff
|
||||
|
||||
# CHECK: vbic.i32 q0, #0xee000000 @ encoding: [0x86,0xff,0x7e,0x07]
|
||||
# CHECK-NOFP: vbic.i32 q0, #0xee000000 @ encoding: [0x86,0xff,0x7e,0x07]
|
||||
vand.i32 q0, #0x11ffffff
|
||||
|
||||
# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction
|
||||
vand.i16 q0, #0xed00
|
||||
|
||||
# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction
|
||||
vand.i16 q0, #0x00ed
|
||||
|
||||
# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction
|
||||
vand.i32 q0, #0xed000000
|
||||
|
||||
# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction
|
||||
vand.i32 q0, #0x00ed0000
|
||||
|
||||
# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction
|
||||
vand.i32 q0, #0x0000ed00
|
||||
|
||||
# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction
|
||||
vand.i32 q0, #0x000000ed
|
||||
|
||||
# CHECK: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01]
|
||||
# CHECK-NOFP: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01]
|
||||
vbic q0, q1, q7
|
||||
|
||||
# CHECK: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01]
|
||||
# CHECK-NOFP: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01]
|
||||
vbic.s8 q0, q1, q7
|
||||
|
||||
# CHECK: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01]
|
||||
# CHECK-NOFP: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01]
|
||||
vbic.s16 q0, q1, q7
|
||||
|
||||
# CHECK: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01]
|
||||
# CHECK-NOFP: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01]
|
||||
vbic.s32 q0, q1, q7
|
||||
|
||||
# CHECK: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01]
|
||||
# CHECK-NOFP: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01]
|
||||
vbic.u8 q0, q1, q7
|
||||
|
||||
# CHECK: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01]
|
||||
# CHECK-NOFP: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01]
|
||||
vbic.u16 q0, q1, q7
|
||||
|
||||
# CHECK: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01]
|
||||
# CHECK-NOFP: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01]
|
||||
vbic.u32 q0, q1, q7
|
||||
|
||||
# CHECK: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01]
|
||||
# CHECK-NOFP: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01]
|
||||
vbic.i8 q0, q1, q7
|
||||
|
||||
# CHECK: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01]
|
||||
# CHECK-NOFP: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01]
|
||||
vbic.i16 q0, q1, q7
|
||||
|
||||
# CHECK: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01]
|
||||
# CHECK-NOFP: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01]
|
||||
vbic.i32 q0, q1, q7
|
||||
|
||||
# CHECK: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01]
|
||||
# CHECK-NOFP: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01]
|
||||
vbic.f16 q0, q1, q7
|
||||
|
||||
# CHECK: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01]
|
||||
# CHECK-NOFP: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01]
|
||||
vbic.f32 q0, q1, q7
|
||||
|
||||
# CHECK: vrev64.8 q0, q4 @ encoding: [0xb0,0xff,0x48,0x00]
|
||||
# CHECK-NOFP: vrev64.8 q0, q4 @ encoding: [0xb0,0xff,0x48,0x00]
|
||||
vrev64.8 q0, q4
|
||||
|
||||
# CHECK: vrev64.16 q1, q3 @ encoding: [0xb4,0xff,0x46,0x20]
|
||||
# CHECK-NOFP: vrev64.16 q1, q3 @ encoding: [0xb4,0xff,0x46,0x20]
|
||||
vrev64.16 q1, q3
|
||||
|
||||
# CHECK: vrev64.32 q0, q2 @ encoding: [0xb8,0xff,0x44,0x00]
|
||||
# CHECK-NOFP: vrev64.32 q0, q2 @ encoding: [0xb8,0xff,0x44,0x00]
|
||||
vrev64.32 q0, q2
|
||||
|
||||
# CHECK: vrev32.8 q0, q1 @ encoding: [0xb0,0xff,0xc2,0x00]
|
||||
# CHECK-NOFP: vrev32.8 q0, q1 @ encoding: [0xb0,0xff,0xc2,0x00]
|
||||
vrev32.8 q0, q1
|
||||
|
||||
# CHECK: vrev32.16 q0, q5 @ encoding: [0xb4,0xff,0xca,0x00]
|
||||
# CHECK-NOFP: vrev32.16 q0, q5 @ encoding: [0xb4,0xff,0xca,0x00]
|
||||
vrev32.16 q0, q5
|
||||
|
||||
# CHECK: vrev16.8 q0, q2 @ encoding: [0xb0,0xff,0x44,0x01]
|
||||
# CHECK-NOFP: vrev16.8 q0, q2 @ encoding: [0xb0,0xff,0x44,0x01]
|
||||
vrev16.8 q0, q2
|
||||
|
||||
# CHECK: vmvn q0, q2 @ encoding: [0xb0,0xff,0xc4,0x05]
|
||||
# CHECK-NOFP: vmvn q0, q2 @ encoding: [0xb0,0xff,0xc4,0x05]
|
||||
vmvn q0, q2
|
||||
|
||||
# CHECK: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41]
|
||||
# CHECK-NOFP: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41]
|
||||
veor q2, q1, q7
|
||||
|
||||
# CHECK: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41]
|
||||
# CHECK-NOFP: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41]
|
||||
veor.s8 q2, q1, q7
|
||||
|
||||
# CHECK: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41]
|
||||
# CHECK-NOFP: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41]
|
||||
veor.s16 q2, q1, q7
|
||||
|
||||
# CHECK: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41]
|
||||
# CHECK-NOFP: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41]
|
||||
veor.s32 q2, q1, q7
|
||||
|
||||
# CHECK: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41]
|
||||
# CHECK-NOFP: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41]
|
||||
veor.u8 q2, q1, q7
|
||||
|
||||
# CHECK: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41]
|
||||
# CHECK-NOFP: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41]
|
||||
veor.u16 q2, q1, q7
|
||||
|
||||
# CHECK: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41]
|
||||
# CHECK-NOFP: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41]
|
||||
veor.u32 q2, q1, q7
|
||||
|
||||
# CHECK: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41]
|
||||
# CHECK-NOFP: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41]
|
||||
veor.i8 q2, q1, q7
|
||||
|
||||
# CHECK: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41]
|
||||
# CHECK-NOFP: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41]
|
||||
veor.i16 q2, q1, q7
|
||||
|
||||
# CHECK: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41]
|
||||
# CHECK-NOFP: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41]
|
||||
veor.i32 q2, q1, q7
|
||||
|
||||
# CHECK: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41]
|
||||
# CHECK-NOFP: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41]
|
||||
veor.f16 q2, q1, q7
|
||||
|
||||
# CHECK: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41]
|
||||
# CHECK-NOFP: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41]
|
||||
veor.f32 q2, q1, q7
|
||||
|
||||
# CHECK: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01]
|
||||
# CHECK-NOFP: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01]
|
||||
vorn q0, q3, q2
|
||||
|
||||
# CHECK: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01]
|
||||
# CHECK-NOFP: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01]
|
||||
vorn.s8 q0, q3, q2
|
||||
|
||||
# CHECK: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01]
|
||||
# CHECK-NOFP: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01]
|
||||
vorn.s16 q0, q3, q2
|
||||
|
||||
# CHECK: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01]
|
||||
# CHECK-NOFP: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01]
|
||||
vorn.s32 q0, q3, q2
|
||||
|
||||
# CHECK: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01]
|
||||
# CHECK-NOFP: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01]
|
||||
vorn.u8 q0, q3, q2
|
||||
|
||||
# CHECK: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01]
|
||||
# CHECK-NOFP: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01]
|
||||
vorn.u16 q0, q3, q2
|
||||
|
||||
# CHECK: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01]
|
||||
# CHECK-NOFP: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01]
|
||||
vorn.u32 q0, q3, q2
|
||||
|
||||
# CHECK: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01]
|
||||
# CHECK-NOFP: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01]
|
||||
vorn.i8 q0, q3, q2
|
||||
|
||||
# CHECK: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01]
|
||||
# CHECK-NOFP: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01]
|
||||
vorn.i16 q0, q3, q2
|
||||
|
||||
# CHECK: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01]
|
||||
# CHECK-NOFP: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01]
|
||||
vorn.i32 q0, q3, q2
|
||||
|
||||
# CHECK: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01]
|
||||
# CHECK-NOFP: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01]
|
||||
vorn.f16 q0, q3, q2
|
||||
|
||||
# CHECK: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01]
|
||||
# CHECK-NOFP: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01]
|
||||
vorn.f32 q0, q3, q2
|
||||
|
||||
# CHECK: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21]
|
||||
# CHECK-NOFP: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21]
|
||||
vorr q1, q2, q1
|
||||
|
||||
# CHECK: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21]
|
||||
# CHECK-NOFP: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21]
|
||||
vorr.s8 q1, q2, q1
|
||||
|
||||
# CHECK: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21]
|
||||
# CHECK-NOFP: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21]
|
||||
vorr.s16 q1, q2, q1
|
||||
|
||||
# CHECK: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21]
|
||||
# CHECK-NOFP: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21]
|
||||
vorr.s32 q1, q2, q1
|
||||
|
||||
# CHECK: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21]
|
||||
# CHECK-NOFP: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21]
|
||||
vorr.u8 q1, q2, q1
|
||||
|
||||
# CHECK: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21]
|
||||
# CHECK-NOFP: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21]
|
||||
vorr.u16 q1, q2, q1
|
||||
|
||||
# CHECK: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21]
|
||||
# CHECK-NOFP: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21]
|
||||
vorr.u32 q1, q2, q1
|
||||
|
||||
# CHECK: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21]
|
||||
# CHECK-NOFP: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21]
|
||||
vorr.i8 q1, q2, q1
|
||||
|
||||
# CHECK: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21]
|
||||
# CHECK-NOFP: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21]
|
||||
vorr.i16 q1, q2, q1
|
||||
|
||||
# CHECK: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21]
|
||||
# CHECK-NOFP: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21]
|
||||
vorr.i32 q1, q2, q1
|
||||
|
||||
# CHECK: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21]
|
||||
# CHECK-NOFP: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21]
|
||||
vorr.f16 q1, q2, q1
|
||||
|
||||
# CHECK: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21]
|
||||
# CHECK-NOFP: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21]
|
||||
vorr.f32 q1, q2, q1
|
||||
|
||||
# CHECK: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01]
|
||||
# CHECK-NOFP: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01]
|
||||
vand q0, q2, q0
|
||||
|
||||
# CHECK: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01]
|
||||
# CHECK-NOFP: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01]
|
||||
vand.s8 q0, q2, q0
|
||||
|
||||
# CHECK: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01]
|
||||
# CHECK-NOFP: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01]
|
||||
vand.s16 q0, q2, q0
|
||||
|
||||
# CHECK: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01]
|
||||
# CHECK-NOFP: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01]
|
||||
vand.s32 q0, q2, q0
|
||||
|
||||
# CHECK: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01]
|
||||
# CHECK-NOFP: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01]
|
||||
vand.u8 q0, q2, q0
|
||||
|
||||
# CHECK: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01]
|
||||
# CHECK-NOFP: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01]
|
||||
vand.u16 q0, q2, q0
|
||||
|
||||
# CHECK: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01]
|
||||
# CHECK-NOFP: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01]
|
||||
vand.u32 q0, q2, q0
|
||||
|
||||
# CHECK: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01]
|
||||
# CHECK-NOFP: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01]
|
||||
vand.i8 q0, q2, q0
|
||||
|
||||
# CHECK: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01]
|
||||
# CHECK-NOFP: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01]
|
||||
vand.i16 q0, q2, q0
|
||||
|
||||
# CHECK: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01]
|
||||
# CHECK-NOFP: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01]
|
||||
vand.i32 q0, q2, q0
|
||||
|
||||
# CHECK: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01]
|
||||
# CHECK-NOFP: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01]
|
||||
vand.f16 q0, q2, q0
|
||||
|
||||
# CHECK: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01]
|
||||
# CHECK-NOFP: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01]
|
||||
vand.f32 q0, q2, q0
|
||||
|
||||
# CHECK: vmov.8 q0[1], r8 @ encoding: [0x40,0xee,0x30,0x8b]
|
||||
# CHECK-NOFP: vmov.8 q0[1], r8 @ encoding: [0x40,0xee,0x30,0x8b]
|
||||
vmov.8 q0[1], r8
|
||||
|
||||
# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction
|
||||
vmov.8 q0[16], r8
|
||||
|
||||
# CHECK: vmov.16 q0[2], r5 @ encoding: [0x20,0xee,0x30,0x5b]
|
||||
# CHECK-NOFP: vmov.16 q0[2], r5 @ encoding: [0x20,0xee,0x30,0x5b]
|
||||
vmov.16 q0[2], r5
|
||||
|
||||
# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction
|
||||
vmov.16 q0[8], r5
|
||||
|
||||
# CHECK: vmov.32 q6[3], r11 @ encoding: [0x2d,0xee,0x10,0xbb]
|
||||
# CHECK-NOFP: vmov.32 q6[3], r11 @ encoding: [0x2d,0xee,0x10,0xbb]
|
||||
vmov.32 q6[3], r11
|
||||
|
||||
# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction
|
||||
vmov.32 q6[4], r11
|
||||
|
||||
# CHECK: vmov.32 r0, q1[0] @ encoding: [0x12,0xee,0x10,0x0b]
|
||||
# CHECK-NOFP: vmov.32 r0, q1[0] @ encoding: [0x12,0xee,0x10,0x0b]
|
||||
vmov.32 r0, q1[0]
|
||||
|
||||
# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction
|
||||
vmov.32 r0, q1[4]
|
||||
|
||||
# CHECK: vmov.s16 r1, q2[7] @ encoding: [0x35,0xee,0x70,0x1b]
|
||||
# CHECK-NOFP: vmov.s16 r1, q2[7] @ encoding: [0x35,0xee,0x70,0x1b]
|
||||
vmov.s16 r1, q2[7]
|
||||
|
||||
# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction
|
||||
vmov.s16 r1, q2[8]
|
||||
|
||||
# CHECK: vmov.s8 r0, q4[13] @ encoding: [0x79,0xee,0x30,0x0b]
|
||||
# CHECK-NOFP: vmov.s8 r0, q4[13] @ encoding: [0x79,0xee,0x30,0x0b]
|
||||
vmov.s8 r0, q4[13]
|
||||
|
||||
# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction
|
||||
vmov.s8 r0, q4[16]
|
||||
|
||||
# CHECK: vmov.u16 r0, q1[4] @ encoding: [0x93,0xee,0x30,0x0b]
|
||||
# CHECK-NOFP: vmov.u16 r0, q1[4] @ encoding: [0x93,0xee,0x30,0x0b]
|
||||
vmov.u16 r0, q1[4]
|
||||
|
||||
# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction
|
||||
vmov.u16 r0, q1[8]
|
||||
|
||||
# CHECK: vmov.u8 r0, q5[7] @ encoding: [0xfa,0xee,0x70,0x0b]
|
||||
# CHECK-NOFP: vmov.u8 r0, q5[7] @ encoding: [0xfa,0xee,0x70,0x0b]
|
||||
vmov.u8 r0, q5[7]
|
||||
|
||||
# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction
|
||||
vmov.u8 r0, q5[16]
|
||||
|
||||
vpste
|
||||
vmvnt q0, q1
|
||||
vmvne q0, q1
|
||||
# CHECK: vpste @ encoding: [0x71,0xfe,0x4d,0x8f]
|
||||
# CHECK-NOFP: vpste @ encoding: [0x71,0xfe,0x4d,0x8f]
|
||||
# CHECK: vmvnt q0, q1 @ encoding: [0xb0,0xff,0xc2,0x05]
|
||||
# CHECK-NOFP: vmvnt q0, q1 @ encoding: [0xb0,0xff,0xc2,0x05]
|
||||
# CHECK: vmvne q0, q1 @ encoding: [0xb0,0xff,0xc2,0x05]
|
||||
# CHECK-NOFP: vmvne q0, q1 @ encoding: [0xb0,0xff,0xc2,0x05]
|
||||
|
||||
vpste
|
||||
vornt.s8 q0, q1, q2
|
||||
vorne.s8 q0, q1, q2
|
||||
# CHECK: vpste @ encoding: [0x71,0xfe,0x4d,0x8f]
|
||||
# CHECK-NOFP: vpste @ encoding: [0x71,0xfe,0x4d,0x8f]
|
||||
# CHECK: vornt q0, q1, q2 @ encoding: [0x32,0xef,0x54,0x01]
|
||||
# CHECK-NOFP: vornt q0, q1, q2 @ encoding: [0x32,0xef,0x54,0x01]
|
||||
# CHECK: vorne q0, q1, q2 @ encoding: [0x32,0xef,0x54,0x01]
|
||||
# CHECK-NOFP: vorne q0, q1, q2 @ encoding: [0x32,0xef,0x54,0x01]
|
16
test/MC/ARM/mve-vmov-lane.s
Normal file
16
test/MC/ARM/mve-vmov-lane.s
Normal file
@ -0,0 +1,16 @@
|
||||
// RUN: not llvm-mc -triple=thumbv8m.main -mattr=+fp-armv8 -show-encoding < %s 2>%t | FileCheck %s --check-prefix=V80M
|
||||
// RUN: FileCheck %s < %t --check-prefix=V80M-ERROR
|
||||
// RUN: llvm-mc -triple=thumbv8.1m.main -mattr=+fp-armv8 -show-encoding < %s 2>%t
|
||||
// RUN: llvm-mc -triple=thumbv8.1m.main -mattr=+mve -show-encoding < %s 2>%t
|
||||
|
||||
// v8.1M added the Q register syntax for this instruction. The v8.1M spec does
|
||||
// not list the D register syntax as valid, but we accept it as an extension to
|
||||
// make porting code from v8.0M to v8.1M easier.
|
||||
|
||||
vmov.32 r0, d1[0]
|
||||
// V80M: vmov.32 r0, d1[0] @ encoding: [0x11,0xee,0x10,0x0b]
|
||||
// V81M: vmov.32 r0, d1[0] @ encoding: [0x11,0xee,0x10,0x0b]
|
||||
|
||||
vmov.32 r0, q0[2]
|
||||
// V80M-ERROR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction requires: armv8.1m.main with FP or MVE
|
||||
// V81M: vmov.32 r0, q0[2] @ encoding: [0x11,0xee,0x10,0x0b]
|
150
test/MC/Disassembler/ARM/mve-bitops.txt
Normal file
150
test/MC/Disassembler/ARM/mve-bitops.txt
Normal file
@ -0,0 +1,150 @@
|
||||
# RUN: llvm-mc -disassemble -triple=thumbv8.1m.main-none-eabi -mattr=+mve.fp,+fp64 -show-encoding %s 2> %t | FileCheck %s
|
||||
# RUN: FileCheck --check-prefix=ERROR < %t %s
|
||||
# RUN: not llvm-mc -disassemble -triple=thumbv8.1m.main-none-eabi -show-encoding %s &> %t
|
||||
# RUN: FileCheck --check-prefix=CHECK-NOMVE < %t %s
|
||||
|
||||
# CHECK: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01]
|
||||
# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding
|
||||
[0x12,0xef,0x5e,0x01]
|
||||
|
||||
# CHECK: vrev64.8 q0, q4 @ encoding: [0xb0,0xff,0x48,0x00]
|
||||
# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding
|
||||
[0xb0,0xff,0x48,0x00]
|
||||
|
||||
# CHECK: vrev64.16 q1, q3 @ encoding: [0xb4,0xff,0x46,0x20]
|
||||
# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding
|
||||
[0xb4,0xff,0x46,0x20]
|
||||
|
||||
# CHECK: vrev64.32 q0, q2 @ encoding: [0xb8,0xff,0x44,0x00]
|
||||
# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding
|
||||
[0xb8,0xff,0x44,0x00]
|
||||
|
||||
# CHECK: vrev32.8 q0, q1 @ encoding: [0xb0,0xff,0xc2,0x00]
|
||||
# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding
|
||||
[0xb0,0xff,0xc2,0x00]
|
||||
|
||||
# CHECK: vrev32.16 q0, q5 @ encoding: [0xb4,0xff,0xca,0x00]
|
||||
# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding
|
||||
[0xb4,0xff,0xca,0x00]
|
||||
|
||||
# CHECK: vrev16.8 q0, q2 @ encoding: [0xb0,0xff,0x44,0x01]
|
||||
# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding
|
||||
[0xb0,0xff,0x44,0x01]
|
||||
|
||||
# CHECK: vmvn q0, q2 @ encoding: [0xb0,0xff,0xc4,0x05]
|
||||
# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding
|
||||
[0xb0,0xff,0xc4,0x05]
|
||||
|
||||
# CHECK: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41]
|
||||
# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding
|
||||
[0x02,0xff,0x5e,0x41]
|
||||
|
||||
# CHECK: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01]
|
||||
# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding
|
||||
[0x36,0xef,0x54,0x01]
|
||||
|
||||
# CHECK: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21]
|
||||
# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding
|
||||
[0x24,0xef,0x52,0x21]
|
||||
|
||||
# CHECK: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01]
|
||||
# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding
|
||||
[0x04,0xef,0x50,0x01]
|
||||
|
||||
# CHECK: vorr.i16 q0, #0x12 @ encoding: [0x81,0xef,0x52,0x09]
|
||||
# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding
|
||||
[0x81,0xef,0x52,0x09]
|
||||
|
||||
# CHECK: vorr.i32 q0, #0x1200 @ encoding: [0x81,0xef,0x52,0x03]
|
||||
# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding
|
||||
[0x81,0xef,0x52,0x03]
|
||||
|
||||
# CHECK: vbic.i32 q0, #0x35 @ encoding: [0x83,0xef,0x75,0x01]
|
||||
[0x83,0xef,0x75,0x01]
|
||||
|
||||
# CHECK: vbic.i32 q0, #0x3500 @ encoding: [0x83,0xef,0x75,0x03]
|
||||
[0x83,0xef,0x75,0x03]
|
||||
|
||||
# CHECK: vbic.i32 q0, #0x350000 @ encoding: [0x83,0xef,0x75,0x05]
|
||||
[0x83,0xef,0x75,0x05]
|
||||
|
||||
# CHECK: vbic.i32 q0, #0x35000000 @ encoding: [0x83,0xef,0x75,0x07]
|
||||
[0x83,0xef,0x75,0x07]
|
||||
|
||||
# CHECK: vbic.i16 q0, #0x35 @ encoding: [0x83,0xef,0x75,0x09]
|
||||
[0x83,0xef,0x75,0x09]
|
||||
|
||||
# CHECK: vbic.i16 q0, #0x3500 @ encoding: [0x83,0xef,0x75,0x0b]
|
||||
[0x83,0xef,0x75,0x0b]
|
||||
|
||||
# CHECK: vmov.8 q0[1], r8 @ encoding: [0x40,0xee,0x30,0x8b]
|
||||
# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding
|
||||
[0x40,0xee,0x30,0x8b]
|
||||
|
||||
# CHECK: vmov.16 q0[2], r5 @ encoding: [0x20,0xee,0x30,0x5b]
|
||||
# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding
|
||||
[0x20,0xee,0x30,0x5b]
|
||||
|
||||
# CHECK: vmov.32 q6[3], r11 @ encoding: [0x2d,0xee,0x10,0xbb]
|
||||
# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding
|
||||
[0x2d,0xee,0x10,0xbb]
|
||||
|
||||
# CHECK: vmov.32 r0, q1[0] @ encoding: [0x12,0xee,0x10,0x0b]
|
||||
# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding
|
||||
[0x12,0xee,0x10,0x0b]
|
||||
|
||||
# CHECK: vmov.s16 r1, q2[7] @ encoding: [0x35,0xee,0x70,0x1b]
|
||||
# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding
|
||||
[0x35,0xee,0x70,0x1b]
|
||||
|
||||
# CHECK: vmov.s8 r0, q4[13] @ encoding: [0x79,0xee,0x30,0x0b]
|
||||
# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding
|
||||
[0x79,0xee,0x30,0x0b]
|
||||
|
||||
# CHECK: vmov.u16 r0, q1[4] @ encoding: [0x93,0xee,0x30,0x0b]
|
||||
# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding
|
||||
[0x93,0xee,0x30,0x0b]
|
||||
|
||||
# CHECK: vmov.u8 r0, q5[7] @ encoding: [0xfa,0xee,0x70,0x0b]
|
||||
# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding
|
||||
[0xfa,0xee,0x70,0x0b]
|
||||
|
||||
# CHECK: vmov.f16 s7, r8 @ encoding: [0x03,0xee,0x90,0x89]
|
||||
# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding
|
||||
[0x03,0xee,0x90,0x89]
|
||||
|
||||
# CHECK: vmov.f16 s10, r5 @ encoding: [0x05,0xee,0x10,0x59]
|
||||
# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding
|
||||
[0x05,0xee,0x10,0x59]
|
||||
|
||||
# CHECK: vmov.f16 s10, sp @ encoding: [0x05,0xee,0x10,0xd9]
|
||||
# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding
|
||||
[0x05,0xee,0x10,0xd9]
|
||||
|
||||
# CHECK: vmov.f16 s31, r10 @ encoding: [0x0f,0xee,0x90,0xa9]
|
||||
# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding
|
||||
[0x0f,0xee,0x90,0xa9]
|
||||
|
||||
# CHECK: vmov.f16 r8, s7 @ encoding: [0x13,0xee,0x90,0x89]
|
||||
# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding
|
||||
[0x13,0xee,0x90,0x89]
|
||||
|
||||
# CHECK: vmov.f16 r5, s10 @ encoding: [0x15,0xee,0x10,0x59]
|
||||
# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding
|
||||
[0x15,0xee,0x10,0x59]
|
||||
|
||||
# CHECK: vmov.f16 sp, s10 @ encoding: [0x15,0xee,0x10,0xd9]
|
||||
# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding
|
||||
[0x15,0xee,0x10,0xd9]
|
||||
|
||||
# CHECK: vmov.f16 r10, s31 @ encoding: [0x1f,0xee,0x90,0xa9]
|
||||
# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding
|
||||
[0x1f,0xee,0x90,0xa9]
|
||||
|
||||
# ERROR: [[@LINE+2]]:2: warning: potentially undefined instruction encoding
|
||||
# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding
|
||||
[0x05,0xee,0x10,0xf9]
|
||||
|
||||
# ERROR: [[@LINE+2]]:2: warning: potentially undefined instruction encoding
|
||||
# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding
|
||||
[0x1f,0xee,0x90,0xf9]
|
13
test/MC/Disassembler/ARM/mve-vmov-lane.txt
Normal file
13
test/MC/Disassembler/ARM/mve-vmov-lane.txt
Normal file
@ -0,0 +1,13 @@
|
||||
# RUN: llvm-mc -triple=thumbv8m.main -mattr=+fp-armv8 -disassemble < %s | FileCheck %s --check-prefix=D_REG
|
||||
# RUN: llvm-mc -triple=thumbv8a -mattr=+fp-armv8 -disassemble < %s | FileCheck %s --check-prefix=D_REG
|
||||
# RUN: llvm-mc -triple=thumbv8.1m.main -mattr=+fp-armv8 -disassemble < %s | FileCheck %s --check-prefix=Q_REG
|
||||
# RUN: llvm-mc -triple=thumbv8.1m.main -mattr=+mve -disassemble < %s | FileCheck %s --check-prefix=Q_REG
|
||||
|
||||
# The disassembly for this instruction varies between v8.1M and other
|
||||
# architectures. In v8.1M (with either scalar flotaing point, MVE or both), we
|
||||
# use the Q register syntax, and for all other architectures we use the D
|
||||
# register syntax.
|
||||
|
||||
[0x11,0xee,0x10,0x0b]
|
||||
# D_REG: vmov.32 r0, d1[0]
|
||||
# Q_REG: vmov.32 r0, q0[2]
|
Loading…
x
Reference in New Issue
Block a user