1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 02:33:06 +01:00

[RISCV] Store SEW in RISCV vector pseudo instructions in log2 form.

This shrinks the immediate that isel table needs to emit for these
instructions. Hoping this allows me to change OPC_EmitInteger to
use a better variable length encoding for representing negative
numbers. Similar to what was done a few months ago for OPC_CheckInteger.

The alternative encoding uses less bytes for negative numbers, but
increases the number of bytes need to encode 64 which was a very
common number in the RISCV table due to SEW=64. By using Log2 this
becomes 6 and is no longer a problem.
This commit is contained in:
Craig Topper 2021-05-02 12:01:18 -07:00
parent 2b3350003c
commit 3b8bc4cd8f
10 changed files with 290 additions and 287 deletions

View File

@ -125,9 +125,8 @@ static SDValue createTuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
}
void RISCVDAGToDAGISel::addVectorLoadStoreOperands(
SDNode *Node, unsigned SEWImm, const SDLoc &DL, unsigned CurOp,
bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl<SDValue> &Operands,
MVT *IndexVT) {
SDNode *Node, unsigned SEW, const SDLoc &DL, unsigned CurOp, bool IsMasked,
bool IsStridedOrIndexed, SmallVectorImpl<SDValue> &Operands, MVT *IndexVT) {
SDValue Chain = Node->getOperand(0);
SDValue Glue;
@ -153,8 +152,8 @@ void RISCVDAGToDAGISel::addVectorLoadStoreOperands(
Operands.push_back(VL);
MVT XLenVT = Subtarget->getXLenVT();
SDValue SEW = CurDAG->getTargetConstant(SEWImm, DL, XLenVT);
Operands.push_back(SEW);
SDValue SEWOp = CurDAG->getTargetConstant(Log2_32(SEW), DL, XLenVT);
Operands.push_back(SEWOp);
Operands.push_back(Chain); // Chain.
if (Glue)
@ -492,8 +491,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
VMNANDOpcode = RISCV::PseudoVMNAND_MM_M8;
break;
}
SDValue SEW =
CurDAG->getTargetConstant(Src1VT.getScalarSizeInBits(), DL, XLenVT);
SDValue SEW = CurDAG->getTargetConstant(
Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
SDValue VL;
selectVLOp(Node->getOperand(3), VL);
@ -582,8 +581,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M8;
break;
}
SDValue SEW =
CurDAG->getTargetConstant(Src1VT.getScalarSizeInBits(), DL, XLenVT);
SDValue SEW = CurDAG->getTargetConstant(
Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
SDValue VL;
selectVLOp(Node->getOperand(5), VL);
SDValue MaskedOff = Node->getOperand(1);
@ -814,14 +813,14 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
MVT VT = Node->getSimpleValueType(0);
unsigned ScalarSize = VT.getScalarSizeInBits();
// VLE1 uses an SEW of 8.
unsigned SEWImm = (IntNo == Intrinsic::riscv_vle1) ? 8 : ScalarSize;
unsigned SEW = (IntNo == Intrinsic::riscv_vle1) ? 8 : ScalarSize;
unsigned CurOp = 2;
SmallVector<SDValue, 8> Operands;
if (IsMasked)
Operands.push_back(Node->getOperand(CurOp++));
addVectorLoadStoreOperands(Node, SEWImm, DL, CurOp, IsMasked, IsStrided,
addVectorLoadStoreOperands(Node, SEW, DL, CurOp, IsMasked, IsStrided,
Operands);
RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
@ -1005,13 +1004,13 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
MVT VT = Node->getOperand(2)->getSimpleValueType(0);
unsigned ScalarSize = VT.getScalarSizeInBits();
// VSE1 uses an SEW of 8.
unsigned SEWImm = (IntNo == Intrinsic::riscv_vse1) ? 8 : ScalarSize;
unsigned SEW = (IntNo == Intrinsic::riscv_vse1) ? 8 : ScalarSize;
unsigned CurOp = 2;
SmallVector<SDValue, 8> Operands;
Operands.push_back(Node->getOperand(CurOp++)); // Store value.
addVectorLoadStoreOperands(Node, SEWImm, DL, CurOp, IsMasked, IsStrided,
addVectorLoadStoreOperands(Node, SEW, DL, CurOp, IsMasked, IsStrided,
Operands);
RISCVVLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
@ -1157,7 +1156,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
selectVLOp(Node->getOperand(1), VL);
unsigned ScalarSize = VT.getScalarSizeInBits();
SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT);
SDValue SEW = CurDAG->getTargetConstant(Log2_32(ScalarSize), DL, XLenVT);
SDValue Operands[] = {Ld->getBasePtr(),
CurDAG->getRegister(RISCV::X0, XLenVT), VL, SEW,

View File

@ -6288,9 +6288,9 @@ static MachineBasicBlock *addVSetVL(MachineInstr &MI, MachineBasicBlock *BB,
DebugLoc DL = MI.getDebugLoc();
const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
unsigned SEW = MI.getOperand(SEWIndex).getImm();
assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW");
RISCVVSEW ElementWidth = static_cast<RISCVVSEW>(Log2_32(SEW / 8));
unsigned Log2SEW = MI.getOperand(SEWIndex).getImm();
assert(RISCVVType::isValidSEW(1 << Log2SEW) && "Unexpected SEW");
RISCVVSEW ElementWidth = static_cast<RISCVVSEW>(Log2SEW - 3);
MachineRegisterInfo &MRI = MF.getRegInfo();

View File

@ -148,6 +148,7 @@ class VTypeInfo<ValueType Vec, ValueType Mas, int Sew, VReg Reg, LMULInfo M,
ValueType Vector = Vec;
ValueType Mask = Mas;
int SEW = Sew;
int Log2SEW = shift_amount<Sew>.val;
VReg RegClass = Reg;
LMULInfo LMul = M;
ValueType Scalar = Scal;
@ -257,6 +258,7 @@ class MTypeInfo<ValueType Mas, LMULInfo M, string Bx> {
// {SEW, VLMul} values set a valid VType to deal with this mask type.
// we assume SEW=8 and set corresponding LMUL.
int SEW = 8;
int Log2SEW = 3;
LMULInfo LMul = M;
string BX = Bx; // Appendix of mask operations.
// The pattern fragment which produces the AVL operand, representing the
@ -2193,7 +2195,7 @@ class VPatMaskUnaryNoMask<string intrinsic_name,
VLOpFrag)),
(!cast<Instruction>(inst#"_M_"#mti.BX)
(mti.Mask VR:$rs2),
GPR:$vl, mti.SEW)>;
GPR:$vl, mti.Log2SEW)>;
class VPatMaskUnaryMask<string intrinsic_name,
string inst,
@ -2206,7 +2208,7 @@ class VPatMaskUnaryMask<string intrinsic_name,
(!cast<Instruction>(inst#"_M_"#mti.BX#"_MASK")
(mti.Mask VR:$merge),
(mti.Mask VR:$rs2),
(mti.Mask V0), GPR:$vl, mti.SEW)>;
(mti.Mask V0), GPR:$vl, mti.Log2SEW)>;
class VPatUnaryAnyMask<string intrinsic,
string inst,
@ -2400,11 +2402,11 @@ multiclass VPatUnaryS_M<string intrinsic_name,
def : Pat<(XLenVT (!cast<Intrinsic>(intrinsic_name)
(mti.Mask VR:$rs1), VLOpFrag)),
(!cast<Instruction>(inst#"_M_"#mti.BX) $rs1,
GPR:$vl, mti.SEW)>;
GPR:$vl, mti.Log2SEW)>;
def : Pat<(XLenVT (!cast<Intrinsic>(intrinsic_name # "_mask")
(mti.Mask VR:$rs1), (mti.Mask V0), VLOpFrag)),
(!cast<Instruction>(inst#"_M_"#mti.BX#"_MASK") $rs1,
(mti.Mask V0), GPR:$vl, mti.SEW)>;
(mti.Mask V0), GPR:$vl, mti.Log2SEW)>;
}
}
@ -2413,7 +2415,7 @@ multiclass VPatUnaryV_V_AnyMask<string intrinsic, string instruction,
foreach vti = vtilist in {
def : VPatUnaryAnyMask<intrinsic, instruction, "VM",
vti.Vector, vti.Vector, vti.Mask,
vti.SEW, vti.LMul, vti.RegClass,
vti.Log2SEW, vti.LMul, vti.RegClass,
vti.RegClass>;
}
}
@ -2431,9 +2433,9 @@ multiclass VPatUnaryV_M<string intrinsic, string instruction>
{
foreach vti = AllIntegerVectors in {
def : VPatUnaryNoMask<intrinsic, instruction, "M", vti.Vector, vti.Mask,
vti.SEW, vti.LMul, VR>;
vti.Log2SEW, vti.LMul, VR>;
def : VPatUnaryMask<intrinsic, instruction, "M", vti.Vector, vti.Mask,
vti.Mask, vti.SEW, vti.LMul, vti.RegClass, VR>;
vti.Mask, vti.Log2SEW, vti.LMul, vti.RegClass, VR>;
}
}
@ -2446,10 +2448,10 @@ multiclass VPatUnaryV_VF<string intrinsic, string instruction, string suffix,
defvar fti = vtiTofti.Fti;
def : VPatUnaryNoMask<intrinsic, instruction, suffix,
vti.Vector, fti.Vector,
vti.SEW, vti.LMul, fti.RegClass>;
vti.Log2SEW, vti.LMul, fti.RegClass>;
def : VPatUnaryMask<intrinsic, instruction, suffix,
vti.Vector, fti.Vector, vti.Mask,
vti.SEW, vti.LMul, vti.RegClass, fti.RegClass>;
vti.Log2SEW, vti.LMul, vti.RegClass, fti.RegClass>;
}
}
@ -2458,10 +2460,10 @@ multiclass VPatUnaryV_V<string intrinsic, string instruction,
foreach vti = vtilist in {
def : VPatUnaryNoMask<intrinsic, instruction, "V",
vti.Vector, vti.Vector,
vti.SEW, vti.LMul, vti.RegClass>;
vti.Log2SEW, vti.LMul, vti.RegClass>;
def : VPatUnaryMask<intrinsic, instruction, "V",
vti.Vector, vti.Vector, vti.Mask,
vti.SEW, vti.LMul, vti.RegClass, vti.RegClass>;
vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass>;
}
}
@ -2471,13 +2473,13 @@ multiclass VPatNullaryV<string intrinsic, string instruction>
def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic)
VLOpFrag)),
(!cast<Instruction>(instruction#"_V_" # vti.LMul.MX)
GPR:$vl, vti.SEW)>;
GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic # "_mask")
(vti.Vector vti.RegClass:$merge),
(vti.Mask V0), VLOpFrag)),
(!cast<Instruction>(instruction#"_V_" # vti.LMul.MX # "_MASK")
vti.RegClass:$merge, (vti.Mask V0),
GPR:$vl, vti.SEW)>;
GPR:$vl, vti.Log2SEW)>;
}
}
@ -2486,7 +2488,7 @@ multiclass VPatNullaryM<string intrinsic, string inst> {
def : Pat<(mti.Mask (!cast<Intrinsic>(intrinsic)
(XLenVT (VLOp (XLenVT (XLenVT GPR:$vl)))))),
(!cast<Instruction>(inst#"_M_"#mti.BX)
GPR:$vl, mti.SEW)>;
GPR:$vl, mti.Log2SEW)>;
}
multiclass VPatBinary<string intrinsic,
@ -2591,7 +2593,7 @@ multiclass VPatBinaryV_VV<string intrinsic, string instruction,
foreach vti = vtilist in
defm : VPatBinary<intrinsic, instruction # "_VV_" # vti.LMul.MX,
vti.Vector, vti.Vector, vti.Vector,vti.Mask,
vti.SEW, vti.RegClass,
vti.Log2SEW, vti.RegClass,
vti.RegClass, vti.RegClass>;
}
@ -2601,7 +2603,7 @@ multiclass VPatBinaryV_VV_INT<string intrinsic, string instruction,
defvar ivti = GetIntVTypeInfo<vti>.Vti;
defm : VPatBinary<intrinsic, instruction # "_VV_" # vti.LMul.MX,
vti.Vector, vti.Vector, ivti.Vector, vti.Mask,
vti.SEW, vti.RegClass,
vti.Log2SEW, vti.RegClass,
vti.RegClass, vti.RegClass>;
}
}
@ -2612,14 +2614,14 @@ multiclass VPatBinaryV_VV_INT_EEW<string intrinsic, string instruction,
// emul = lmul * eew / sew
defvar vlmul = vti.LMul;
defvar octuple_lmul = octuple_from_str<vlmul.MX>.ret;
defvar octuple_emul = !srl(!mul(octuple_lmul, eew), shift_amount<vti.SEW>.val);
defvar octuple_emul = !srl(!mul(octuple_lmul, eew), vti.Log2SEW);
if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
defvar emul_str = octuple_to_str<octuple_emul>.ret;
defvar ivti = !cast<VTypeInfo>("VI" # eew # emul_str);
defvar inst = instruction # "_VV_" # vti.LMul.MX # "_" # emul_str;
defm : VPatBinary<intrinsic, inst,
vti.Vector, vti.Vector, ivti.Vector, vti.Mask,
vti.SEW, vti.RegClass,
vti.Log2SEW, vti.RegClass,
vti.RegClass, ivti.RegClass>;
}
}
@ -2631,7 +2633,7 @@ multiclass VPatBinaryV_VX<string intrinsic, string instruction,
defvar kind = "V"#vti.ScalarSuffix;
defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#vti.LMul.MX,
vti.Vector, vti.Vector, vti.Scalar, vti.Mask,
vti.SEW, vti.RegClass,
vti.Log2SEW, vti.RegClass,
vti.RegClass, vti.ScalarRegClass>;
}
}
@ -2641,7 +2643,7 @@ multiclass VPatBinaryV_VX_INT<string intrinsic, string instruction,
foreach vti = vtilist in
defm : VPatBinary<intrinsic, instruction # "_VX_" # vti.LMul.MX,
vti.Vector, vti.Vector, XLenVT, vti.Mask,
vti.SEW, vti.RegClass,
vti.Log2SEW, vti.RegClass,
vti.RegClass, GPR>;
}
@ -2650,7 +2652,7 @@ multiclass VPatBinaryV_VI<string intrinsic, string instruction,
foreach vti = vtilist in
defm : VPatBinary<intrinsic, instruction # "_VI_" # vti.LMul.MX,
vti.Vector, vti.Vector, XLenVT, vti.Mask,
vti.SEW, vti.RegClass,
vti.Log2SEW, vti.RegClass,
vti.RegClass, imm_type>;
}
@ -2658,7 +2660,7 @@ multiclass VPatBinaryM_MM<string intrinsic, string instruction> {
foreach mti = AllMasks in
def : VPatBinaryNoMask<intrinsic, instruction # "_MM_" # mti.LMul.MX,
mti.Mask, mti.Mask, mti.Mask,
mti.SEW, VR, VR>;
mti.Log2SEW, VR, VR>;
}
multiclass VPatBinaryW_VV<string intrinsic, string instruction,
@ -2668,7 +2670,7 @@ multiclass VPatBinaryW_VV<string intrinsic, string instruction,
defvar Wti = VtiToWti.Wti;
defm : VPatBinary<intrinsic, instruction # "_VV_" # Vti.LMul.MX,
Wti.Vector, Vti.Vector, Vti.Vector, Vti.Mask,
Vti.SEW, Wti.RegClass,
Vti.Log2SEW, Wti.RegClass,
Vti.RegClass, Vti.RegClass>;
}
}
@ -2681,7 +2683,7 @@ multiclass VPatBinaryW_VX<string intrinsic, string instruction,
defvar kind = "V"#Vti.ScalarSuffix;
defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
Wti.Vector, Vti.Vector, Vti.Scalar, Vti.Mask,
Vti.SEW, Wti.RegClass,
Vti.Log2SEW, Wti.RegClass,
Vti.RegClass, Vti.ScalarRegClass>;
}
}
@ -2693,7 +2695,7 @@ multiclass VPatBinaryW_WV<string intrinsic, string instruction,
defvar Wti = VtiToWti.Wti;
defm : VPatBinary<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
Wti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
Vti.SEW, Wti.RegClass,
Vti.Log2SEW, Wti.RegClass,
Wti.RegClass, Vti.RegClass>;
}
}
@ -2706,7 +2708,7 @@ multiclass VPatBinaryW_WX<string intrinsic, string instruction,
defvar kind = "W"#Vti.ScalarSuffix;
defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
Wti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
Vti.SEW, Wti.RegClass,
Vti.Log2SEW, Wti.RegClass,
Wti.RegClass, Vti.ScalarRegClass>;
}
}
@ -2718,7 +2720,7 @@ multiclass VPatBinaryV_WV<string intrinsic, string instruction,
defvar Wti = VtiToWti.Wti;
defm : VPatBinary<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
Vti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
Vti.SEW, Vti.RegClass,
Vti.Log2SEW, Vti.RegClass,
Wti.RegClass, Vti.RegClass>;
}
}
@ -2731,7 +2733,7 @@ multiclass VPatBinaryV_WX<string intrinsic, string instruction,
defvar kind = "W"#Vti.ScalarSuffix;
defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
Vti.Vector, Wti.Vector, Vti.Scalar, Vti.Mask,
Vti.SEW, Vti.RegClass,
Vti.Log2SEW, Vti.RegClass,
Wti.RegClass, Vti.ScalarRegClass>;
}
}
@ -2743,7 +2745,7 @@ multiclass VPatBinaryV_WI<string intrinsic, string instruction,
defvar Wti = VtiToWti.Wti;
defm : VPatBinary<intrinsic, instruction # "_WI_" # Vti.LMul.MX,
Vti.Vector, Wti.Vector, XLenVT, Vti.Mask,
Vti.SEW, Vti.RegClass,
Vti.Log2SEW, Vti.RegClass,
Wti.RegClass, uimm5>;
}
}
@ -2755,7 +2757,7 @@ multiclass VPatBinaryV_VM<string intrinsic, string instruction,
defm : VPatBinaryCarryIn<intrinsic, instruction, "VVM",
!if(CarryOut, vti.Mask, vti.Vector),
vti.Vector, vti.Vector, vti.Mask,
vti.SEW, vti.LMul,
vti.Log2SEW, vti.LMul,
vti.RegClass, vti.RegClass>;
}
@ -2767,7 +2769,7 @@ multiclass VPatBinaryV_XM<string intrinsic, string instruction,
"V"#vti.ScalarSuffix#"M",
!if(CarryOut, vti.Mask, vti.Vector),
vti.Vector, vti.Scalar, vti.Mask,
vti.SEW, vti.LMul,
vti.Log2SEW, vti.LMul,
vti.RegClass, vti.ScalarRegClass>;
}
@ -2777,7 +2779,7 @@ multiclass VPatBinaryV_IM<string intrinsic, string instruction,
defm : VPatBinaryCarryIn<intrinsic, instruction, "VIM",
!if(CarryOut, vti.Mask, vti.Vector),
vti.Vector, XLenVT, vti.Mask,
vti.SEW, vti.LMul,
vti.Log2SEW, vti.LMul,
vti.RegClass, simm5>;
}
@ -2785,7 +2787,7 @@ multiclass VPatBinaryV_V<string intrinsic, string instruction> {
foreach vti = AllIntegerVectors in
defm : VPatBinaryMaskOut<intrinsic, instruction, "VV",
vti.Mask, vti.Vector, vti.Vector,
vti.SEW, vti.LMul,
vti.Log2SEW, vti.LMul,
vti.RegClass, vti.RegClass>;
}
@ -2793,7 +2795,7 @@ multiclass VPatBinaryV_X<string intrinsic, string instruction> {
foreach vti = AllIntegerVectors in
defm : VPatBinaryMaskOut<intrinsic, instruction, "VX",
vti.Mask, vti.Vector, XLenVT,
vti.SEW, vti.LMul,
vti.Log2SEW, vti.LMul,
vti.RegClass, GPR>;
}
@ -2801,7 +2803,7 @@ multiclass VPatBinaryV_I<string intrinsic, string instruction> {
foreach vti = AllIntegerVectors in
defm : VPatBinaryMaskOut<intrinsic, instruction, "VI",
vti.Mask, vti.Vector, XLenVT,
vti.SEW, vti.LMul,
vti.Log2SEW, vti.LMul,
vti.RegClass, simm5>;
}
@ -2810,7 +2812,7 @@ multiclass VPatBinaryM_VV<string intrinsic, string instruction,
foreach vti = vtilist in
defm : VPatBinary<intrinsic, instruction # "_VV_" # vti.LMul.MX,
vti.Mask, vti.Vector, vti.Vector, vti.Mask,
vti.SEW, VR,
vti.Log2SEW, VR,
vti.RegClass, vti.RegClass>;
}
@ -2819,7 +2821,7 @@ multiclass VPatBinarySwappedM_VV<string intrinsic, string instruction,
foreach vti = vtilist in
defm : VPatBinarySwapped<intrinsic, instruction # "_VV_" # vti.LMul.MX,
vti.Mask, vti.Vector, vti.Vector, vti.Mask,
vti.SEW, VR,
vti.Log2SEW, VR,
vti.RegClass, vti.RegClass>;
}
@ -2829,7 +2831,7 @@ multiclass VPatBinaryM_VX<string intrinsic, string instruction,
defvar kind = "V"#vti.ScalarSuffix;
defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#vti.LMul.MX,
vti.Mask, vti.Vector, vti.Scalar, vti.Mask,
vti.SEW, VR,
vti.Log2SEW, VR,
vti.RegClass, vti.ScalarRegClass>;
}
}
@ -2839,7 +2841,7 @@ multiclass VPatBinaryM_VI<string intrinsic, string instruction,
foreach vti = vtilist in
defm : VPatBinary<intrinsic, instruction # "_VI_" # vti.LMul.MX,
vti.Mask, vti.Vector, XLenVT, vti.Mask,
vti.SEW, VR,
vti.Log2SEW, VR,
vti.RegClass, simm5>;
}
@ -2927,7 +2929,7 @@ multiclass VPatTernaryV_VV<string intrinsic, string instruction,
foreach vti = vtilist in
defm : VPatTernary<intrinsic, instruction, "VV",
vti.Vector, vti.Vector, vti.Vector, vti.Mask,
vti.SEW, vti.LMul, vti.RegClass,
vti.Log2SEW, vti.LMul, vti.RegClass,
vti.RegClass, vti.RegClass>;
}
@ -2936,7 +2938,7 @@ multiclass VPatTernaryV_VX<string intrinsic, string instruction,
foreach vti = vtilist in
defm : VPatTernary<intrinsic, instruction, "VX",
vti.Vector, vti.Vector, XLenVT, vti.Mask,
vti.SEW, vti.LMul, vti.RegClass,
vti.Log2SEW, vti.LMul, vti.RegClass,
vti.RegClass, GPR>;
}
@ -2946,7 +2948,7 @@ multiclass VPatTernaryV_VX_AAXA<string intrinsic, string instruction,
defm : VPatTernary<intrinsic, instruction,
"V"#vti.ScalarSuffix,
vti.Vector, vti.Scalar, vti.Vector, vti.Mask,
vti.SEW, vti.LMul, vti.RegClass,
vti.Log2SEW, vti.LMul, vti.RegClass,
vti.ScalarRegClass, vti.RegClass>;
}
@ -2955,7 +2957,7 @@ multiclass VPatTernaryV_VI<string intrinsic, string instruction,
foreach vti = vtilist in
defm : VPatTernary<intrinsic, instruction, "VI",
vti.Vector, vti.Vector, XLenVT, vti.Mask,
vti.SEW, vti.LMul, vti.RegClass,
vti.Log2SEW, vti.LMul, vti.RegClass,
vti.RegClass, Imm_type>;
}
@ -2966,7 +2968,7 @@ multiclass VPatTernaryW_VV<string intrinsic, string instruction,
defvar wti = vtiToWti.Wti;
defm : VPatTernary<intrinsic, instruction, "VV",
wti.Vector, vti.Vector, vti.Vector,
vti.Mask, vti.SEW, vti.LMul,
vti.Mask, vti.Log2SEW, vti.LMul,
wti.RegClass, vti.RegClass, vti.RegClass>;
}
}
@ -2979,7 +2981,7 @@ multiclass VPatTernaryW_VX<string intrinsic, string instruction,
defm : VPatTernary<intrinsic, instruction,
"V"#vti.ScalarSuffix,
wti.Vector, vti.Scalar, vti.Vector,
vti.Mask, vti.SEW, vti.LMul,
vti.Mask, vti.Log2SEW, vti.LMul,
wti.RegClass, vti.ScalarRegClass, vti.RegClass>;
}
}
@ -3028,7 +3030,7 @@ multiclass VPatReductionV_VS<string intrinsic, string instruction, bit IsFloat =
defm : VPatTernary<intrinsic, instruction, "VS",
vectorM1.Vector, vti.Vector,
vectorM1.Vector, vti.Mask,
vti.SEW, vti.LMul,
vti.Log2SEW, vti.LMul,
VR, vti.RegClass, VR>;
}
foreach gvti = !if(IsFloat, GroupFloatVectors, GroupIntegerVectors) in
@ -3036,7 +3038,7 @@ multiclass VPatReductionV_VS<string intrinsic, string instruction, bit IsFloat =
defm : VPatTernary<intrinsic, instruction, "VS",
gvti.VectorM1, gvti.Vector,
gvti.VectorM1, gvti.Mask,
gvti.SEW, gvti.LMul,
gvti.Log2SEW, gvti.LMul,
VR, gvti.RegClass, VR>;
}
}
@ -3050,7 +3052,7 @@ multiclass VPatReductionW_VS<string intrinsic, string instruction, bit IsFloat =
defm : VPatTernary<intrinsic, instruction, "VS",
wtiM1.Vector, vti.Vector,
wtiM1.Vector, vti.Mask,
vti.SEW, vti.LMul,
vti.Log2SEW, vti.LMul,
wtiM1.RegClass, vti.RegClass,
wtiM1.RegClass>;
}
@ -3065,7 +3067,7 @@ multiclass VPatConversionVI_VF<string intrinsic,
defvar ivti = GetIntVTypeInfo<fvti>.Vti;
defm : VPatConversion<intrinsic, instruction, "V",
ivti.Vector, fvti.Vector, ivti.Mask, fvti.SEW,
ivti.Vector, fvti.Vector, ivti.Mask, fvti.Log2SEW,
fvti.LMul, ivti.RegClass, fvti.RegClass>;
}
}
@ -3078,7 +3080,7 @@ multiclass VPatConversionVF_VI<string intrinsic,
defvar ivti = GetIntVTypeInfo<fvti>.Vti;
defm : VPatConversion<intrinsic, instruction, "V",
fvti.Vector, ivti.Vector, fvti.Mask, ivti.SEW,
fvti.Vector, ivti.Vector, fvti.Mask, ivti.Log2SEW,
ivti.LMul, fvti.RegClass, ivti.RegClass>;
}
}
@ -3090,7 +3092,7 @@ multiclass VPatConversionWI_VF<string intrinsic, string instruction> {
defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
defm : VPatConversion<intrinsic, instruction, "V",
iwti.Vector, fvti.Vector, iwti.Mask, fvti.SEW,
iwti.Vector, fvti.Vector, iwti.Mask, fvti.Log2SEW,
fvti.LMul, iwti.RegClass, fvti.RegClass>;
}
}
@ -3102,7 +3104,7 @@ multiclass VPatConversionWF_VI<string intrinsic, string instruction> {
defvar fwti = vtiToWti.Wti;
defm : VPatConversion<intrinsic, instruction, "V",
fwti.Vector, vti.Vector, fwti.Mask, vti.SEW,
fwti.Vector, vti.Vector, fwti.Mask, vti.Log2SEW,
vti.LMul, fwti.RegClass, vti.RegClass>;
}
}
@ -3114,7 +3116,7 @@ multiclass VPatConversionWF_VF <string intrinsic, string instruction> {
defvar fwti = fvtiToFWti.Wti;
defm : VPatConversion<intrinsic, instruction, "V",
fwti.Vector, fvti.Vector, fwti.Mask, fvti.SEW,
fwti.Vector, fvti.Vector, fwti.Mask, fvti.Log2SEW,
fvti.LMul, fwti.RegClass, fvti.RegClass>;
}
}
@ -3126,7 +3128,7 @@ multiclass VPatConversionVI_WF <string intrinsic, string instruction> {
defvar fwti = vtiToWti.Wti;
defm : VPatConversion<intrinsic, instruction, "W",
vti.Vector, fwti.Vector, vti.Mask, vti.SEW,
vti.Vector, fwti.Vector, vti.Mask, vti.Log2SEW,
vti.LMul, vti.RegClass, fwti.RegClass>;
}
}
@ -3138,7 +3140,7 @@ multiclass VPatConversionVF_WI <string intrinsic, string instruction> {
defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
defm : VPatConversion<intrinsic, instruction, "W",
fvti.Vector, iwti.Vector, fvti.Mask, fvti.SEW,
fvti.Vector, iwti.Vector, fvti.Mask, fvti.Log2SEW,
fvti.LMul, fvti.RegClass, iwti.RegClass>;
}
}
@ -3150,7 +3152,7 @@ multiclass VPatConversionVF_WF <string intrinsic, string instruction> {
defvar fwti = fvtiToFWti.Wti;
defm : VPatConversion<intrinsic, instruction, "W",
fvti.Vector, fwti.Vector, fvti.Mask, fvti.SEW,
fvti.Vector, fwti.Vector, fvti.Mask, fvti.Log2SEW,
fvti.LMul, fvti.RegClass, fwti.RegClass>;
}
}
@ -3179,14 +3181,14 @@ multiclass VPatAMOV_WD<string intrinsic,
if !or(!eq(vti.SEW, 32), !eq(vti.SEW, 64)) then {
defvar octuple_lmul = octuple_from_str<vti.LMul.MX>.ret;
// Calculate emul = eew * lmul / sew
defvar octuple_emul = !srl(!mul(eew, octuple_lmul), shift_amount<vti.SEW>.val);
defvar octuple_emul = !srl(!mul(eew, octuple_lmul), vti.Log2SEW);
if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
defvar emulMX = octuple_to_str<octuple_emul>.ret;
defvar offsetVti = !cast<VTypeInfo>("VI" # eew # emulMX);
defvar inst_ei = inst # "EI" # eew;
defm : VPatAMOWD<intrinsic, inst_ei,
vti.Vector, offsetVti.Vector,
vti.Mask, vti.SEW, vti.LMul, offsetVti.LMul, offsetVti.RegClass>;
vti.Mask, vti.Log2SEW, vti.LMul, offsetVti.LMul, offsetVti.RegClass>;
}
}
}
@ -3348,7 +3350,7 @@ foreach vti = AllIntegerVectors in {
(!cast<Instruction>("PseudoVSUB_VV_"#vti.LMul.MX) vti.RegClass:$rs1,
vti.RegClass:$rs2,
GPR:$vl,
vti.SEW)>;
vti.Log2SEW)>;
def : Pat<(vti.Vector (int_riscv_vrsub_mask (vti.Vector vti.RegClass:$merge),
(vti.Vector vti.RegClass:$rs2),
(vti.Vector vti.RegClass:$rs1),
@ -3360,7 +3362,7 @@ foreach vti = AllIntegerVectors in {
vti.RegClass:$rs2,
(vti.Mask V0),
GPR:$vl,
vti.SEW)>;
vti.Log2SEW)>;
// Match VSUB with a small immediate to vadd.vi by negating the immediate.
def : Pat<(vti.Vector (int_riscv_vsub (vti.Vector vti.RegClass:$rs1),
@ -3369,7 +3371,7 @@ foreach vti = AllIntegerVectors in {
(!cast<Instruction>("PseudoVADD_VI_"#vti.LMul.MX) vti.RegClass:$rs1,
(NegImm simm5_plus1:$rs2),
GPR:$vl,
vti.SEW)>;
vti.Log2SEW)>;
def : Pat<(vti.Vector (int_riscv_vsub_mask (vti.Vector vti.RegClass:$merge),
(vti.Vector vti.RegClass:$rs1),
(vti.Scalar simm5_plus1:$rs2),
@ -3381,7 +3383,7 @@ foreach vti = AllIntegerVectors in {
(NegImm simm5_plus1:$rs2),
(vti.Mask V0),
GPR:$vl,
vti.SEW)>;
vti.Log2SEW)>;
}
//===----------------------------------------------------------------------===//
@ -3991,7 +3993,7 @@ foreach vti = AllIntegerVectors in {
(!cast<Instruction>("PseudoVMSLE_VI_"#vti.LMul.MX) vti.RegClass:$rs1,
(DecImm simm5_plus1:$rs2),
GPR:$vl,
vti.SEW)>;
vti.Log2SEW)>;
def : Pat<(vti.Mask (int_riscv_vmslt_mask (vti.Mask VR:$merge),
(vti.Vector vti.RegClass:$rs1),
(vti.Scalar simm5_plus1:$rs2),
@ -4003,7 +4005,7 @@ foreach vti = AllIntegerVectors in {
(DecImm simm5_plus1:$rs2),
(vti.Mask V0),
GPR:$vl,
vti.SEW)>;
vti.Log2SEW)>;
def : Pat<(vti.Mask (int_riscv_vmsltu (vti.Vector vti.RegClass:$rs1),
(vti.Scalar simm5_plus1:$rs2),
@ -4011,7 +4013,7 @@ foreach vti = AllIntegerVectors in {
(!cast<Instruction>("PseudoVMSLEU_VI_"#vti.LMul.MX) vti.RegClass:$rs1,
(DecImm simm5_plus1:$rs2),
GPR:$vl,
vti.SEW)>;
vti.Log2SEW)>;
def : Pat<(vti.Mask (int_riscv_vmsltu_mask (vti.Mask VR:$merge),
(vti.Vector vti.RegClass:$rs1),
(vti.Scalar simm5_plus1:$rs2),
@ -4023,7 +4025,7 @@ foreach vti = AllIntegerVectors in {
(DecImm simm5_plus1:$rs2),
(vti.Mask V0),
GPR:$vl,
vti.SEW)>;
vti.Log2SEW)>;
// Special cases to avoid matching vmsltu.vi 0 (always false) to
// vmsleu.vi -1 (always true). Instead match to vmsne.vv.
@ -4032,7 +4034,7 @@ foreach vti = AllIntegerVectors in {
(!cast<Instruction>("PseudoVMSNE_VV_"#vti.LMul.MX) vti.RegClass:$rs1,
vti.RegClass:$rs1,
GPR:$vl,
vti.SEW)>;
vti.Log2SEW)>;
def : Pat<(vti.Mask (int_riscv_vmsltu_mask (vti.Mask VR:$merge),
(vti.Vector vti.RegClass:$rs1),
(vti.Scalar 0),
@ -4044,7 +4046,7 @@ foreach vti = AllIntegerVectors in {
vti.RegClass:$rs1,
(vti.Mask V0),
GPR:$vl,
vti.SEW)>;
vti.Log2SEW)>;
def : Pat<(vti.Mask (int_riscv_vmsge (vti.Vector vti.RegClass:$rs1),
(vti.Scalar simm5_plus1:$rs2),
@ -4052,7 +4054,7 @@ foreach vti = AllIntegerVectors in {
(!cast<Instruction>("PseudoVMSGT_VI_"#vti.LMul.MX) vti.RegClass:$rs1,
(DecImm simm5_plus1:$rs2),
GPR:$vl,
vti.SEW)>;
vti.Log2SEW)>;
def : Pat<(vti.Mask (int_riscv_vmsge_mask (vti.Mask VR:$merge),
(vti.Vector vti.RegClass:$rs1),
(vti.Scalar simm5_plus1:$rs2),
@ -4064,7 +4066,7 @@ foreach vti = AllIntegerVectors in {
(DecImm simm5_plus1:$rs2),
(vti.Mask V0),
GPR:$vl,
vti.SEW)>;
vti.Log2SEW)>;
def : Pat<(vti.Mask (int_riscv_vmsgeu (vti.Vector vti.RegClass:$rs1),
(vti.Scalar simm5_plus1:$rs2),
@ -4072,7 +4074,7 @@ foreach vti = AllIntegerVectors in {
(!cast<Instruction>("PseudoVMSGTU_VI_"#vti.LMul.MX) vti.RegClass:$rs1,
(DecImm simm5_plus1:$rs2),
GPR:$vl,
vti.SEW)>;
vti.Log2SEW)>;
def : Pat<(vti.Mask (int_riscv_vmsgeu_mask (vti.Mask VR:$merge),
(vti.Vector vti.RegClass:$rs1),
(vti.Scalar simm5_plus1:$rs2),
@ -4084,7 +4086,7 @@ foreach vti = AllIntegerVectors in {
(DecImm simm5_plus1:$rs2),
(vti.Mask V0),
GPR:$vl,
vti.SEW)>;
vti.Log2SEW)>;
// Special cases to avoid matching vmsgeu.vi 0 (always true) to
// vmsgtu.vi -1 (always false). Instead match to vmsne.vv.
@ -4093,7 +4095,7 @@ foreach vti = AllIntegerVectors in {
(!cast<Instruction>("PseudoVMSEQ_VV_"#vti.LMul.MX) vti.RegClass:$rs1,
vti.RegClass:$rs1,
GPR:$vl,
vti.SEW)>;
vti.Log2SEW)>;
def : Pat<(vti.Mask (int_riscv_vmsgeu_mask (vti.Mask VR:$merge),
(vti.Vector vti.RegClass:$rs1),
(vti.Scalar 0),
@ -4105,7 +4107,7 @@ foreach vti = AllIntegerVectors in {
vti.RegClass:$rs1,
(vti.Mask V0),
GPR:$vl,
vti.SEW)>;
vti.Log2SEW)>;
}
//===----------------------------------------------------------------------===//
@ -4167,7 +4169,7 @@ foreach vti = AllVectors in {
def : Pat<(vti.Vector (int_riscv_vmv_v_v (vti.Vector vti.RegClass:$rs1),
VLOpFrag)),
(!cast<Instruction>("PseudoVMV_V_V_"#vti.LMul.MX)
$rs1, GPR:$vl, vti.SEW)>;
$rs1, GPR:$vl, vti.Log2SEW)>;
// vmv.v.x/vmv.v.i are handled in RISCInstrVInstrInfoVVLPatterns.td
}
@ -4316,7 +4318,7 @@ foreach fvti = AllFloatVectors in {
def : Pat<(fvti.Vector (int_riscv_vfmerge (fvti.Vector fvti.RegClass:$rs2),
(fvti.Scalar (fpimm0)),
(fvti.Mask V0), VLOpFrag)),
(instr fvti.RegClass:$rs2, 0, (fvti.Mask V0), GPR:$vl, fvti.SEW)>;
(instr fvti.RegClass:$rs2, 0, (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>;
}
//===----------------------------------------------------------------------===//
@ -4459,7 +4461,7 @@ defm : VPatNullaryV<"int_riscv_vid", "PseudoVID">;
let Predicates = [HasStdExtV] in {
foreach vti = AllIntegerVectors in {
def : Pat<(riscv_vmv_x_s (vti.Vector vti.RegClass:$rs2)),
(!cast<Instruction>("PseudoVMV_X_S_" # vti.LMul.MX) $rs2, vti.SEW)>;
(!cast<Instruction>("PseudoVMV_X_S_" # vti.LMul.MX) $rs2, vti.Log2SEW)>;
// vmv.s.x is handled with a custom node in RISCVInstrInfoVVLPatterns.td
}
} // Predicates = [HasStdExtV]
@ -4473,7 +4475,7 @@ foreach fvti = AllFloatVectors in {
defvar instr = !cast<Instruction>("PseudoVFMV_"#fvti.ScalarSuffix#"_S_" #
fvti.LMul.MX);
def : Pat<(fvti.Scalar (int_riscv_vfmv_f_s (fvti.Vector fvti.RegClass:$rs2))),
(instr $rs2, fvti.SEW)>;
(instr $rs2, fvti.Log2SEW)>;
def : Pat<(fvti.Vector (int_riscv_vfmv_s_f (fvti.Vector fvti.RegClass:$rs1),
(fvti.Scalar fvti.ScalarRegClass:$rs2), VLOpFrag)),
@ -4481,7 +4483,7 @@ foreach fvti = AllFloatVectors in {
fvti.LMul.MX)
(fvti.Vector $rs1),
(fvti.Scalar fvti.ScalarRegClass:$rs2),
GPR:$vl, fvti.SEW)>;
GPR:$vl, fvti.Log2SEW)>;
}
} // Predicates = [HasStdExtV, HasStdExtF]

View File

@ -44,25 +44,27 @@ class SwapHelper<dag Prefix, dag A, dag B, dag Suffix, bit swap> {
}
multiclass VPatUSLoadStoreSDNode<ValueType type,
int sew,
int log2sew,
LMULInfo vlmul,
OutPatFrag avl,
VReg reg_class>
VReg reg_class,
int sew = !shl(1, log2sew)>
{
defvar load_instr = !cast<Instruction>("PseudoVLE"#sew#"_V_"#vlmul.MX);
defvar store_instr = !cast<Instruction>("PseudoVSE"#sew#"_V_"#vlmul.MX);
// Load
def : Pat<(type (load BaseAddr:$rs1)),
(load_instr BaseAddr:$rs1, avl, sew)>;
(load_instr BaseAddr:$rs1, avl, log2sew)>;
// Store
def : Pat<(store type:$rs2, BaseAddr:$rs1),
(store_instr reg_class:$rs2, BaseAddr:$rs1, avl, sew)>;
(store_instr reg_class:$rs2, BaseAddr:$rs1, avl, log2sew)>;
}
multiclass VPatUSLoadStoreWholeVRSDNode<ValueType type,
int sew,
int log2sew,
LMULInfo vlmul,
VReg reg_class>
VReg reg_class,
int sew = !shl(1, log2sew)>
{
defvar load_instr =
!cast<Instruction>("VL"#!substr(vlmul.MX, 1)#"RE"#sew#"_V");
@ -83,10 +85,10 @@ multiclass VPatUSLoadStoreMaskSDNode<MTypeInfo m>
defvar store_instr = !cast<Instruction>("PseudoVSE1_V_"#m.BX);
// Load
def : Pat<(m.Mask (load BaseAddr:$rs1)),
(load_instr BaseAddr:$rs1, m.AVL, m.SEW)>;
(load_instr BaseAddr:$rs1, m.AVL, m.Log2SEW)>;
// Store
def : Pat<(store m.Mask:$rs2, BaseAddr:$rs1),
(store_instr VR:$rs2, BaseAddr:$rs1, m.AVL, m.SEW)>;
(store_instr VR:$rs2, BaseAddr:$rs1, m.AVL, m.Log2SEW)>;
}
class VPatBinarySDNode_VV<SDNode vop,
@ -132,10 +134,10 @@ multiclass VPatBinarySDNode_VV_VX<SDNode vop, string instruction_name>
{
foreach vti = AllIntegerVectors in {
def : VPatBinarySDNode_VV<vop, instruction_name,
vti.Vector, vti.Vector, vti.Mask, vti.SEW,
vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
vti.LMul, vti.AVL, vti.RegClass, vti.RegClass>;
def : VPatBinarySDNode_XI<vop, instruction_name, "VX",
vti.Vector, vti.Vector, vti.Mask, vti.SEW,
vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
vti.LMul, vti.AVL, vti.RegClass, vti.RegClass,
SplatPat, GPR>;
}
@ -146,14 +148,14 @@ multiclass VPatBinarySDNode_VV_VX_VI<SDNode vop, string instruction_name,
{
foreach vti = AllIntegerVectors in {
def : VPatBinarySDNode_VV<vop, instruction_name,
vti.Vector, vti.Vector, vti.Mask, vti.SEW,
vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
vti.LMul, vti.AVL, vti.RegClass, vti.RegClass>;
def : VPatBinarySDNode_XI<vop, instruction_name, "VX",
vti.Vector, vti.Vector, vti.Mask, vti.SEW,
vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
vti.LMul, vti.AVL, vti.RegClass, vti.RegClass,
SplatPat, GPR>;
def : VPatBinarySDNode_XI<vop, instruction_name, "VI",
vti.Vector, vti.Vector, vti.Mask, vti.SEW,
vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
vti.LMul, vti.AVL, vti.RegClass, vti.RegClass,
!cast<ComplexPattern>(SplatPat#_#ImmType),
ImmType>;
@ -182,11 +184,11 @@ class VPatBinarySDNode_VF<SDNode vop,
multiclass VPatBinaryFPSDNode_VV_VF<SDNode vop, string instruction_name> {
foreach vti = AllFloatVectors in {
def : VPatBinarySDNode_VV<vop, instruction_name,
vti.Vector, vti.Vector, vti.Mask, vti.SEW,
vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
vti.LMul, vti.AVL, vti.RegClass, vti.RegClass>;
def : VPatBinarySDNode_VF<vop, instruction_name#"_V"#vti.ScalarSuffix,
vti.Vector, vti.Vector, vti.Scalar, vti.Mask,
vti.SEW, vti.LMul, vti.AVL, vti.RegClass, vti.RegClass,
vti.Log2SEW, vti.LMul, vti.AVL, vti.RegClass, vti.RegClass,
vti.ScalarRegClass>;
}
}
@ -198,7 +200,7 @@ multiclass VPatBinaryFPSDNode_R_VF<SDNode vop, string instruction_name> {
(!cast<Instruction>(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)
fvti.RegClass:$rs1,
(fvti.Scalar fvti.ScalarRegClass:$rs2),
fvti.AVL, fvti.SEW)>;
fvti.AVL, fvti.Log2SEW)>;
}
multiclass VPatIntegerSetCCSDNode_VV<CondCode cc,
@ -211,7 +213,7 @@ multiclass VPatIntegerSetCCSDNode_VV<CondCode cc,
SwapHelper<(instruction),
(instruction vti.RegClass:$rs1),
(instruction vti.RegClass:$rs2),
(instruction vti.AVL, vti.SEW),
(instruction vti.AVL, vti.Log2SEW),
swap>.Value>;
}
}
@ -229,7 +231,7 @@ multiclass VPatIntegerSetCCSDNode_XI<CondCode cc,
SwapHelper<(instruction),
(instruction vti.RegClass:$rs1),
(instruction xop_kind:$rs2),
(instruction vti.AVL, vti.SEW),
(instruction vti.AVL, vti.Log2SEW),
swap>.Value>;
}
}
@ -269,7 +271,7 @@ multiclass VPatIntegerSetCCSDNode_VIPlus1<CondCode cc, string instruction_name,
(vti.Vector (splatpat_kind simm5:$rs2)),
cc)),
(instruction vti.RegClass:$rs1, (DecImm simm5:$rs2),
vti.AVL, vti.SEW)>;
vti.AVL, vti.Log2SEW)>;
}
}
@ -281,19 +283,19 @@ multiclass VPatFPSetCCSDNode_VV_VF_FV<CondCode cc,
(fvti.Vector fvti.RegClass:$rs2),
cc)),
(!cast<Instruction>(inst_name#"_VV_"#fvti.LMul.MX)
fvti.RegClass:$rs1, fvti.RegClass:$rs2, fvti.AVL, fvti.SEW)>;
fvti.RegClass:$rs1, fvti.RegClass:$rs2, fvti.AVL, fvti.Log2SEW)>;
def : Pat<(fvti.Mask (setcc (fvti.Vector fvti.RegClass:$rs1),
(splat_vector fvti.ScalarRegClass:$rs2),
cc)),
(!cast<Instruction>(inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)
fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2,
fvti.AVL, fvti.SEW)>;
fvti.AVL, fvti.Log2SEW)>;
def : Pat<(fvti.Mask (setcc (splat_vector fvti.ScalarRegClass:$rs2),
(fvti.Vector fvti.RegClass:$rs1),
cc)),
(!cast<Instruction>(swapped_op_inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)
fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2,
fvti.AVL, fvti.SEW)>;
fvti.AVL, fvti.Log2SEW)>;
}
}
@ -305,7 +307,7 @@ multiclass VPatExtendSDNode_V<list<SDNode> ops, string inst_name, string suffix,
foreach op = ops in
def : Pat<(vti.Vector (op (fti.Vector fti.RegClass:$rs2))),
(!cast<Instruction>(inst_name#"_"#suffix#"_"#vti.LMul.MX)
fti.RegClass:$rs2, fti.AVL, vti.SEW)>;
fti.RegClass:$rs2, fti.AVL, vti.Log2SEW)>;
}
}
@ -314,7 +316,7 @@ multiclass VPatConvertI2FPSDNode_V<SDNode vop, string instruction_name> {
defvar ivti = GetIntVTypeInfo<fvti>.Vti;
def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1))),
(!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX)
ivti.RegClass:$rs1, fvti.AVL, fvti.SEW)>;
ivti.RegClass:$rs1, fvti.AVL, fvti.Log2SEW)>;
}
}
@ -323,7 +325,7 @@ multiclass VPatConvertFP2ISDNode_V<SDNode vop, string instruction_name> {
defvar ivti = GetIntVTypeInfo<fvti>.Vti;
def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1))),
(!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX)
fvti.RegClass:$rs1, ivti.AVL, ivti.SEW)>;
fvti.RegClass:$rs1, ivti.AVL, ivti.Log2SEW)>;
}
}
@ -333,7 +335,7 @@ multiclass VPatWConvertI2FPSDNode_V<SDNode vop, string instruction_name> {
defvar fwti = vtiToWti.Wti;
def : Pat<(fwti.Vector (vop (ivti.Vector ivti.RegClass:$rs1))),
(!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX)
ivti.RegClass:$rs1, ivti.AVL, ivti.SEW)>;
ivti.RegClass:$rs1, ivti.AVL, ivti.Log2SEW)>;
}
}
@ -343,7 +345,7 @@ multiclass VPatWConvertFP2ISDNode_V<SDNode vop, string instruction_name> {
defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1))),
(!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX)
fvti.RegClass:$rs1, fvti.AVL, fvti.SEW)>;
fvti.RegClass:$rs1, fvti.AVL, fvti.Log2SEW)>;
}
}
@ -353,7 +355,7 @@ multiclass VPatNConvertI2FPSDNode_V<SDNode vop, string instruction_name> {
defvar iwti = GetIntVTypeInfo<fvtiToFWti.Wti>.Vti;
def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1))),
(!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX)
iwti.RegClass:$rs1, fvti.AVL, fvti.SEW)>;
iwti.RegClass:$rs1, fvti.AVL, fvti.Log2SEW)>;
}
}
@ -363,7 +365,7 @@ multiclass VPatNConvertFP2ISDNode_V<SDNode vop, string instruction_name> {
defvar fwti = vtiToWti.Wti;
def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1))),
(!cast<Instruction>(instruction_name#"_"#vti.LMul.MX)
fwti.RegClass:$rs1, vti.AVL, vti.SEW)>;
fwti.RegClass:$rs1, vti.AVL, vti.Log2SEW)>;
}
}
@ -376,14 +378,14 @@ let Predicates = [HasStdExtV] in {
// 7.4. Vector Unit-Stride Instructions
foreach vti = !listconcat(FractionalGroupIntegerVectors,
FractionalGroupFloatVectors) in
defm : VPatUSLoadStoreSDNode<vti.Vector, vti.SEW, vti.LMul,
vti.AVL, vti.RegClass>;
defm : VPatUSLoadStoreSDNode<vti.Vector, vti.Log2SEW, vti.LMul,
vti.AVL, vti.RegClass>;
foreach vti = [VI8M1, VI16M1, VI32M1, VI64M1, VF16M1, VF32M1, VF64M1] in
defm : VPatUSLoadStoreWholeVRSDNode<vti.Vector, vti.SEW, vti.LMul,
vti.RegClass>;
defm : VPatUSLoadStoreWholeVRSDNode<vti.Vector, vti.Log2SEW, vti.LMul,
vti.RegClass>;
foreach vti = !listconcat(GroupIntegerVectors, GroupFloatVectors) in
defm : VPatUSLoadStoreWholeVRSDNode<vti.Vector, vti.SEW, vti.LMul,
vti.RegClass>;
defm : VPatUSLoadStoreWholeVRSDNode<vti.Vector, vti.Log2SEW, vti.LMul,
vti.RegClass>;
foreach mti = AllMasks in
defm : VPatUSLoadStoreMaskSDNode<mti>;
@ -396,11 +398,11 @@ foreach vti = AllIntegerVectors in {
def : Pat<(sub (vti.Vector (SplatPat GPR:$rs2)),
(vti.Vector vti.RegClass:$rs1)),
(!cast<Instruction>("PseudoVRSUB_VX_"# vti.LMul.MX)
vti.RegClass:$rs1, GPR:$rs2, vti.AVL, vti.SEW)>;
vti.RegClass:$rs1, GPR:$rs2, vti.AVL, vti.Log2SEW)>;
def : Pat<(sub (vti.Vector (SplatPat_simm5 simm5:$rs2)),
(vti.Vector vti.RegClass:$rs1)),
(!cast<Instruction>("PseudoVRSUB_VI_"# vti.LMul.MX)
vti.RegClass:$rs1, simm5:$rs2, vti.AVL, vti.SEW)>;
vti.RegClass:$rs1, simm5:$rs2, vti.AVL, vti.Log2SEW)>;
}
// 12.3. Vector Integer Extension
@ -476,52 +478,52 @@ foreach vti = AllIntegerVectors in {
vti.RegClass:$rs2)),
(!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX)
vti.RegClass:$rs2, vti.RegClass:$rs1, VMV0:$vm,
vti.AVL, vti.SEW)>;
vti.AVL, vti.Log2SEW)>;
def : Pat<(vti.Vector (vselect (vti.Mask VMV0:$vm), (SplatPat XLenVT:$rs1),
vti.RegClass:$rs2)),
(!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX)
vti.RegClass:$rs2, GPR:$rs1, VMV0:$vm, vti.AVL, vti.SEW)>;
vti.RegClass:$rs2, GPR:$rs1, VMV0:$vm, vti.AVL, vti.Log2SEW)>;
def : Pat<(vti.Vector (vselect (vti.Mask VMV0:$vm), (SplatPat_simm5 simm5:$rs1),
vti.RegClass:$rs2)),
(!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX)
vti.RegClass:$rs2, simm5:$rs1, VMV0:$vm, vti.AVL, vti.SEW)>;
vti.RegClass:$rs2, simm5:$rs1, VMV0:$vm, vti.AVL, vti.Log2SEW)>;
}
// 16.1. Vector Mask-Register Logical Instructions
foreach mti = AllMasks in {
def : Pat<(mti.Mask (and VR:$rs1, VR:$rs2)),
(!cast<Instruction>("PseudoVMAND_MM_"#mti.LMul.MX)
VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>;
VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
def : Pat<(mti.Mask (or VR:$rs1, VR:$rs2)),
(!cast<Instruction>("PseudoVMOR_MM_"#mti.LMul.MX)
VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>;
VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
def : Pat<(mti.Mask (xor VR:$rs1, VR:$rs2)),
(!cast<Instruction>("PseudoVMXOR_MM_"#mti.LMul.MX)
VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>;
VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
def : Pat<(mti.Mask (vnot (and VR:$rs1, VR:$rs2))),
(!cast<Instruction>("PseudoVMNAND_MM_"#mti.LMul.MX)
VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>;
VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
def : Pat<(mti.Mask (vnot (or VR:$rs1, VR:$rs2))),
(!cast<Instruction>("PseudoVMNOR_MM_"#mti.LMul.MX)
VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>;
VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
def : Pat<(mti.Mask (vnot (xor VR:$rs1, VR:$rs2))),
(!cast<Instruction>("PseudoVMXNOR_MM_"#mti.LMul.MX)
VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>;
VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
def : Pat<(mti.Mask (and VR:$rs1, (vnot VR:$rs2))),
(!cast<Instruction>("PseudoVMANDNOT_MM_"#mti.LMul.MX)
VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>;
VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
def : Pat<(mti.Mask (or VR:$rs1, (vnot VR:$rs2))),
(!cast<Instruction>("PseudoVMORNOT_MM_"#mti.LMul.MX)
VR:$rs1, VR:$rs2, mti.AVL, mti.SEW)>;
VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
// Handle vnot the same as the vnot.mm pseudoinstruction.
def : Pat<(mti.Mask (vnot VR:$rs)),
(!cast<Instruction>("PseudoVMNAND_MM_"#mti.LMul.MX)
VR:$rs, VR:$rs, mti.AVL, mti.SEW)>;
VR:$rs, VR:$rs, mti.AVL, mti.Log2SEW)>;
}
} // Predicates = [HasStdExtV]
@ -547,22 +549,22 @@ foreach fvti = AllFloatVectors in {
fvti.RegClass:$rs2)),
(!cast<Instruction>("PseudoVFMADD_VV_"# suffix)
fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2,
fvti.AVL, fvti.SEW)>;
fvti.AVL, fvti.Log2SEW)>;
def : Pat<(fvti.Vector (fma fvti.RegClass:$rs1, fvti.RegClass:$rd,
(fneg fvti.RegClass:$rs2))),
(!cast<Instruction>("PseudoVFMSUB_VV_"# suffix)
fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2,
fvti.AVL, fvti.SEW)>;
fvti.AVL, fvti.Log2SEW)>;
def : Pat<(fvti.Vector (fma (fneg fvti.RegClass:$rs1), fvti.RegClass:$rd,
(fneg fvti.RegClass:$rs2))),
(!cast<Instruction>("PseudoVFNMADD_VV_"# suffix)
fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2,
fvti.AVL, fvti.SEW)>;
fvti.AVL, fvti.Log2SEW)>;
def : Pat<(fvti.Vector (fma (fneg fvti.RegClass:$rs1), fvti.RegClass:$rd,
fvti.RegClass:$rs2)),
(!cast<Instruction>("PseudoVFNMSUB_VV_"# suffix)
fvti.RegClass:$rd, fvti.RegClass:$rs1, fvti.RegClass:$rs2,
fvti.AVL, fvti.SEW)>;
fvti.AVL, fvti.Log2SEW)>;
// The choice of VFMADD here is arbitrary, vfmadd.vf and vfmacc.vf are equally
// commutable.
@ -570,69 +572,69 @@ foreach fvti = AllFloatVectors in {
fvti.RegClass:$rd, fvti.RegClass:$rs2)),
(!cast<Instruction>("PseudoVFMADD_V" # fvti.ScalarSuffix # "_" # suffix)
fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2,
fvti.AVL, fvti.SEW)>;
fvti.AVL, fvti.Log2SEW)>;
def : Pat<(fvti.Vector (fma (splat_vector fvti.ScalarRegClass:$rs1),
fvti.RegClass:$rd, (fneg fvti.RegClass:$rs2))),
(!cast<Instruction>("PseudoVFMSUB_V" # fvti.ScalarSuffix # "_" # suffix)
fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2,
fvti.AVL, fvti.SEW)>;
fvti.AVL, fvti.Log2SEW)>;
def : Pat<(fvti.Vector (fma (splat_vector fvti.ScalarRegClass:$rs1),
(fneg fvti.RegClass:$rd), (fneg fvti.RegClass:$rs2))),
(!cast<Instruction>("PseudoVFNMADD_V" # fvti.ScalarSuffix # "_" # suffix)
fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2,
fvti.AVL, fvti.SEW)>;
fvti.AVL, fvti.Log2SEW)>;
def : Pat<(fvti.Vector (fma (splat_vector fvti.ScalarRegClass:$rs1),
(fneg fvti.RegClass:$rd), fvti.RegClass:$rs2)),
(!cast<Instruction>("PseudoVFNMSUB_V" # fvti.ScalarSuffix # "_" # suffix)
fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2,
fvti.AVL, fvti.SEW)>;
fvti.AVL, fvti.Log2SEW)>;
// The splat might be negated.
def : Pat<(fvti.Vector (fma (fneg (splat_vector fvti.ScalarRegClass:$rs1)),
fvti.RegClass:$rd, (fneg fvti.RegClass:$rs2))),
(!cast<Instruction>("PseudoVFNMADD_V" # fvti.ScalarSuffix # "_" # suffix)
fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2,
fvti.AVL, fvti.SEW)>;
fvti.AVL, fvti.Log2SEW)>;
def : Pat<(fvti.Vector (fma (fneg (splat_vector fvti.ScalarRegClass:$rs1)),
fvti.RegClass:$rd, fvti.RegClass:$rs2)),
(!cast<Instruction>("PseudoVFNMSUB_V" # fvti.ScalarSuffix # "_" # suffix)
fvti.RegClass:$rd, fvti.ScalarRegClass:$rs1, fvti.RegClass:$rs2,
fvti.AVL, fvti.SEW)>;
fvti.AVL, fvti.Log2SEW)>;
}
foreach vti = AllFloatVectors in {
// 14.8. Vector Floating-Point Square-Root Instruction
def : Pat<(fsqrt (vti.Vector vti.RegClass:$rs2)),
(!cast<Instruction>("PseudoVFSQRT_V_"# vti.LMul.MX)
vti.RegClass:$rs2, vti.AVL, vti.SEW)>;
vti.RegClass:$rs2, vti.AVL, vti.Log2SEW)>;
// 14.12. Vector Floating-Point Sign-Injection Instructions
def : Pat<(fabs (vti.Vector vti.RegClass:$rs)),
(!cast<Instruction>("PseudoVFSGNJX_VV_"# vti.LMul.MX)
vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.SEW)>;
vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.Log2SEW)>;
// Handle fneg with VFSGNJN using the same input for both operands.
def : Pat<(fneg (vti.Vector vti.RegClass:$rs)),
(!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX)
vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.SEW)>;
vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.Log2SEW)>;
def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1),
(vti.Vector vti.RegClass:$rs2))),
(!cast<Instruction>("PseudoVFSGNJ_VV_"# vti.LMul.MX)
vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.SEW)>;
vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW)>;
def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1),
(vti.Vector (splat_vector vti.ScalarRegClass:$rs2)))),
(!cast<Instruction>("PseudoVFSGNJ_V"#vti.ScalarSuffix#"_"#vti.LMul.MX)
vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.SEW)>;
vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.Log2SEW)>;
def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1),
(vti.Vector (fneg vti.RegClass:$rs2)))),
(!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX)
vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.SEW)>;
vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW)>;
def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1),
(vti.Vector (fneg (splat_vector vti.ScalarRegClass:$rs2))))),
(!cast<Instruction>("PseudoVFSGNJN_V"#vti.ScalarSuffix#"_"#vti.LMul.MX)
vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.SEW)>;
vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.Log2SEW)>;
}
// 14.11. Vector Floating-Point MIN/MAX Instructions
@ -660,7 +662,7 @@ foreach fvti = AllFloatVectors in {
fvti.RegClass:$rs2)),
(!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX)
fvti.RegClass:$rs2, fvti.RegClass:$rs1, VMV0:$vm,
fvti.AVL, fvti.SEW)>;
fvti.AVL, fvti.Log2SEW)>;
def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm),
(splat_vector fvti.ScalarRegClass:$rs1),
@ -668,13 +670,13 @@ foreach fvti = AllFloatVectors in {
(!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX)
fvti.RegClass:$rs2,
(fvti.Scalar fvti.ScalarRegClass:$rs1),
VMV0:$vm, fvti.AVL, fvti.SEW)>;
VMV0:$vm, fvti.AVL, fvti.Log2SEW)>;
def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm),
(splat_vector (fvti.Scalar fpimm0)),
fvti.RegClass:$rs2)),
(!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX)
fvti.RegClass:$rs2, 0, VMV0:$vm, fvti.AVL, fvti.SEW)>;
fvti.RegClass:$rs2, 0, VMV0:$vm, fvti.AVL, fvti.Log2SEW)>;
}
// 14.17. Vector Single-Width Floating-Point/Integer Type-Convert Instructions
@ -693,7 +695,7 @@ foreach fvtiToFWti = AllWidenableFloatVectors in {
defvar fwti = fvtiToFWti.Wti;
def : Pat<(fwti.Vector (fpextend (fvti.Vector fvti.RegClass:$rs1))),
(!cast<Instruction>("PseudoVFWCVT_F_F_V_"#fvti.LMul.MX)
fvti.RegClass:$rs1, fvti.AVL, fvti.SEW)>;
fvti.RegClass:$rs1, fvti.AVL, fvti.Log2SEW)>;
}
// 14.19. Narrowing Floating-Point/Integer Type-Convert Instructions
@ -706,7 +708,7 @@ foreach fvtiToFWti = AllWidenableFloatVectors in {
defvar fwti = fvtiToFWti.Wti;
def : Pat<(fvti.Vector (fpround (fwti.Vector fwti.RegClass:$rs1))),
(!cast<Instruction>("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX)
fwti.RegClass:$rs1, fvti.AVL, fvti.SEW)>;
fwti.RegClass:$rs1, fvti.AVL, fvti.Log2SEW)>;
}
} // Predicates = [HasStdExtV, HasStdExtF]
@ -718,17 +720,17 @@ let Predicates = [HasStdExtV] in {
foreach vti = AllIntegerVectors in {
def : Pat<(vti.Vector (SplatPat GPR:$rs1)),
(!cast<Instruction>("PseudoVMV_V_X_" # vti.LMul.MX)
GPR:$rs1, vti.AVL, vti.SEW)>;
GPR:$rs1, vti.AVL, vti.Log2SEW)>;
def : Pat<(vti.Vector (SplatPat_simm5 simm5:$rs1)),
(!cast<Instruction>("PseudoVMV_V_I_" # vti.LMul.MX)
simm5:$rs1, vti.AVL, vti.SEW)>;
simm5:$rs1, vti.AVL, vti.Log2SEW)>;
}
foreach mti = AllMasks in {
def : Pat<(mti.Mask immAllOnesV),
(!cast<Instruction>("PseudoVMSET_M_"#mti.BX) mti.AVL, mti.SEW)>;
(!cast<Instruction>("PseudoVMSET_M_"#mti.BX) mti.AVL, mti.Log2SEW)>;
def : Pat<(mti.Mask immAllZerosV),
(!cast<Instruction>("PseudoVMCLR_M_"#mti.BX) mti.AVL, mti.SEW)>;
(!cast<Instruction>("PseudoVMCLR_M_"#mti.BX) mti.AVL, mti.Log2SEW)>;
}
} // Predicates = [HasStdExtV]
@ -737,11 +739,11 @@ foreach fvti = AllFloatVectors in {
def : Pat<(fvti.Vector (splat_vector fvti.ScalarRegClass:$rs1)),
(!cast<Instruction>("PseudoVFMV_V_"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)
(fvti.Scalar fvti.ScalarRegClass:$rs1),
fvti.AVL, fvti.SEW)>;
fvti.AVL, fvti.Log2SEW)>;
def : Pat<(fvti.Vector (splat_vector (fvti.Scalar fpimm0))),
(!cast<Instruction>("PseudoVMV_V_I_"#fvti.LMul.MX)
0, fvti.AVL, fvti.SEW)>;
0, fvti.AVL, fvti.Log2SEW)>;
}
} // Predicates = [HasStdExtV, HasStdExtF]
@ -757,5 +759,5 @@ foreach vti = AllFloatVectors in {
// other index will have been custom-lowered to slide the vector correctly
// into place.
def : Pat<(vti.Scalar (extractelt (vti.Vector vti.RegClass:$rs2), 0)),
(vmv_f_s_inst vti.RegClass:$rs2, vti.SEW)>;
(vmv_f_s_inst vti.RegClass:$rs2, vti.Log2SEW)>;
}

View File

@ -287,10 +287,10 @@ class VPatBinaryVL_XI<SDNode vop,
multiclass VPatBinaryVL_VV_VX<SDNode vop, string instruction_name> {
foreach vti = AllIntegerVectors in {
def : VPatBinaryVL_VV<vop, instruction_name,
vti.Vector, vti.Vector, vti.Mask, vti.SEW,
vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
vti.LMul, vti.RegClass, vti.RegClass>;
def : VPatBinaryVL_XI<vop, instruction_name, "VX",
vti.Vector, vti.Vector, vti.Mask, vti.SEW,
vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
vti.LMul, vti.RegClass, vti.RegClass,
SplatPat, GPR>;
}
@ -300,14 +300,14 @@ multiclass VPatBinaryVL_VV_VX_VI<SDNode vop, string instruction_name,
Operand ImmType = simm5> {
foreach vti = AllIntegerVectors in {
def : VPatBinaryVL_VV<vop, instruction_name,
vti.Vector, vti.Vector, vti.Mask, vti.SEW,
vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
vti.LMul, vti.RegClass, vti.RegClass>;
def : VPatBinaryVL_XI<vop, instruction_name, "VX",
vti.Vector, vti.Vector, vti.Mask, vti.SEW,
vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
vti.LMul, vti.RegClass, vti.RegClass,
SplatPat, GPR>;
def : VPatBinaryVL_XI<vop, instruction_name, "VI",
vti.Vector, vti.Vector, vti.Mask, vti.SEW,
vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
vti.LMul, vti.RegClass, vti.RegClass,
!cast<ComplexPattern>(SplatPat#_#ImmType),
ImmType>;
@ -336,10 +336,10 @@ class VPatBinaryVL_VF<SDNode vop,
multiclass VPatBinaryFPVL_VV_VF<SDNode vop, string instruction_name> {
foreach vti = AllFloatVectors in {
def : VPatBinaryVL_VV<vop, instruction_name,
vti.Vector, vti.Vector, vti.Mask, vti.SEW,
vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
vti.LMul, vti.RegClass, vti.RegClass>;
def : VPatBinaryVL_VF<vop, instruction_name#"_V"#vti.ScalarSuffix,
vti.Vector, vti.Vector, vti.Mask, vti.SEW,
vti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
vti.LMul, vti.RegClass, vti.RegClass,
vti.ScalarRegClass>;
}
@ -353,7 +353,7 @@ multiclass VPatBinaryFPVL_R_VF<SDNode vop, string instruction_name> {
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)
fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2,
GPR:$vl, fvti.SEW)>;
GPR:$vl, fvti.Log2SEW)>;
}
multiclass VPatIntegerSetCCVL_VV<VTypeInfo vti, string instruction_name,
@ -364,7 +364,7 @@ multiclass VPatIntegerSetCCVL_VV<VTypeInfo vti, string instruction_name,
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX)
vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl,
vti.SEW)>;
vti.Log2SEW)>;
}
// Inherits from VPatIntegerSetCCVL_VV and adds a pattern with operands swapped.
@ -377,7 +377,7 @@ multiclass VPatIntegerSetCCVL_VV_Swappable<VTypeInfo vti, string instruction_nam
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_VV_"#vti.LMul.MX)
vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl,
vti.SEW)>;
vti.Log2SEW)>;
}
multiclass VPatIntegerSetCCVL_VX_Swappable<VTypeInfo vti, string instruction_name,
@ -387,12 +387,12 @@ multiclass VPatIntegerSetCCVL_VX_Swappable<VTypeInfo vti, string instruction_nam
(SplatPat (XLenVT GPR:$rs2)), cc,
(vti.Mask true_mask),
VLOpFrag)),
(instruction vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.SEW)>;
(instruction vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat (XLenVT GPR:$rs2)),
(vti.Vector vti.RegClass:$rs1), invcc,
(vti.Mask true_mask),
VLOpFrag)),
(instruction vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.SEW)>;
(instruction vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>;
}
multiclass VPatIntegerSetCCVL_VI_Swappable<VTypeInfo vti, string instruction_name,
@ -402,12 +402,12 @@ multiclass VPatIntegerSetCCVL_VI_Swappable<VTypeInfo vti, string instruction_nam
(SplatPat_simm5 simm5:$rs2), cc,
(vti.Mask true_mask),
VLOpFrag)),
(instruction vti.RegClass:$rs1, XLenVT:$rs2, GPR:$vl, vti.SEW)>;
(instruction vti.RegClass:$rs1, XLenVT:$rs2, GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat_simm5 simm5:$rs2),
(vti.Vector vti.RegClass:$rs1), invcc,
(vti.Mask true_mask),
VLOpFrag)),
(instruction vti.RegClass:$rs1, simm5:$rs2, GPR:$vl, vti.SEW)>;
(instruction vti.RegClass:$rs1, simm5:$rs2, GPR:$vl, vti.Log2SEW)>;
}
multiclass VPatIntegerSetCCVL_VIPlus1<VTypeInfo vti, string instruction_name,
@ -418,7 +418,7 @@ multiclass VPatIntegerSetCCVL_VIPlus1<VTypeInfo vti, string instruction_name,
(vti.Mask true_mask),
VLOpFrag)),
(instruction vti.RegClass:$rs1, (DecImm simm5:$rs2),
GPR:$vl, vti.SEW)>;
GPR:$vl, vti.Log2SEW)>;
}
multiclass VPatFPSetCCVL_VV_VF_FV<CondCode cc,
@ -431,7 +431,7 @@ multiclass VPatFPSetCCVL_VV_VF_FV<CondCode cc,
(fvti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>(inst_name#"_VV_"#fvti.LMul.MX)
fvti.RegClass:$rs1, fvti.RegClass:$rs2, GPR:$vl, fvti.SEW)>;
fvti.RegClass:$rs1, fvti.RegClass:$rs2, GPR:$vl, fvti.Log2SEW)>;
def : Pat<(fvti.Mask (riscv_setcc_vl (fvti.Vector fvti.RegClass:$rs1),
(SplatFPOp fvti.ScalarRegClass:$rs2),
cc,
@ -439,7 +439,7 @@ multiclass VPatFPSetCCVL_VV_VF_FV<CondCode cc,
VLOpFrag)),
(!cast<Instruction>(inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)
fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2,
GPR:$vl, fvti.SEW)>;
GPR:$vl, fvti.Log2SEW)>;
def : Pat<(fvti.Mask (riscv_setcc_vl (SplatFPOp fvti.ScalarRegClass:$rs2),
(fvti.Vector fvti.RegClass:$rs1),
cc,
@ -447,7 +447,7 @@ multiclass VPatFPSetCCVL_VV_VF_FV<CondCode cc,
VLOpFrag)),
(!cast<Instruction>(swapped_op_inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX)
fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2,
GPR:$vl, fvti.SEW)>;
GPR:$vl, fvti.Log2SEW)>;
}
}
@ -459,7 +459,7 @@ multiclass VPatExtendSDNode_V_VL<SDNode vop, string inst_name, string suffix,
def : Pat<(vti.Vector (vop (fti.Vector fti.RegClass:$rs2),
true_mask, VLOpFrag)),
(!cast<Instruction>(inst_name#"_"#suffix#"_"#vti.LMul.MX)
fti.RegClass:$rs2, GPR:$vl, vti.SEW)>;
fti.RegClass:$rs2, GPR:$vl, vti.Log2SEW)>;
}
}
@ -470,7 +470,7 @@ multiclass VPatConvertFP2ISDNode_V_VL<SDNode vop, string instruction_name> {
(fvti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX)
fvti.RegClass:$rs1, GPR:$vl, ivti.SEW)>;
fvti.RegClass:$rs1, GPR:$vl, ivti.Log2SEW)>;
}
}
@ -481,7 +481,7 @@ multiclass VPatConvertI2FPSDNode_V_VL<SDNode vop, string instruction_name> {
(ivti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX)
ivti.RegClass:$rs1, GPR:$vl, fvti.SEW)>;
ivti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>;
}
}
@ -493,7 +493,7 @@ multiclass VPatWConvertFP2ISDNode_V_VL<SDNode vop, string instruction_name> {
(fvti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX)
fvti.RegClass:$rs1, GPR:$vl, fvti.SEW)>;
fvti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>;
}
}
@ -505,7 +505,7 @@ multiclass VPatWConvertI2FPSDNode_V_VL<SDNode vop, string instruction_name> {
(ivti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_"#ivti.LMul.MX)
ivti.RegClass:$rs1, GPR:$vl, ivti.SEW)>;
ivti.RegClass:$rs1, GPR:$vl, ivti.Log2SEW)>;
}
}
@ -517,7 +517,7 @@ multiclass VPatNConvertFP2ISDNode_V_VL<SDNode vop, string instruction_name> {
(fwti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_"#vti.LMul.MX)
fwti.RegClass:$rs1, GPR:$vl, vti.SEW)>;
fwti.RegClass:$rs1, GPR:$vl, vti.Log2SEW)>;
}
}
@ -529,7 +529,7 @@ multiclass VPatNConvertI2FPSDNode_V_VL<SDNode vop, string instruction_name> {
(iwti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>(instruction_name#"_"#fvti.LMul.MX)
iwti.RegClass:$rs1, GPR:$vl, fvti.SEW)>;
iwti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>;
}
}
@ -543,7 +543,7 @@ multiclass VPatReductionVL<SDNode vop, string instruction_name, bit is_float> {
(vti_m1.Vector (IMPLICIT_DEF)),
(vti.Vector vti.RegClass:$rs1),
(vti_m1.Vector VR:$rs2),
GPR:$vl, vti.SEW)>;
GPR:$vl, vti.Log2SEW)>;
}
}
@ -559,21 +559,21 @@ foreach vti = AllVectors in {
defvar store_instr = !cast<Instruction>("PseudoVSE"#vti.SEW#"_V_"#vti.LMul.MX);
// Load
def : Pat<(vti.Vector (riscv_vle_vl BaseAddr:$rs1, VLOpFrag)),
(load_instr BaseAddr:$rs1, GPR:$vl, vti.SEW)>;
(load_instr BaseAddr:$rs1, GPR:$vl, vti.Log2SEW)>;
// Store
def : Pat<(riscv_vse_vl (vti.Vector vti.RegClass:$rs2), BaseAddr:$rs1,
VLOpFrag),
(store_instr vti.RegClass:$rs2, BaseAddr:$rs1, GPR:$vl, vti.SEW)>;
(store_instr vti.RegClass:$rs2, BaseAddr:$rs1, GPR:$vl, vti.Log2SEW)>;
}
foreach mti = AllMasks in {
defvar load_instr = !cast<Instruction>("PseudoVLE1_V_"#mti.BX);
defvar store_instr = !cast<Instruction>("PseudoVSE1_V_"#mti.BX);
def : Pat<(mti.Mask (riscv_vle_vl BaseAddr:$rs1, VLOpFrag)),
(load_instr BaseAddr:$rs1, GPR:$vl, mti.SEW)>;
(load_instr BaseAddr:$rs1, GPR:$vl, mti.Log2SEW)>;
def : Pat<(riscv_vse_vl (mti.Mask VR:$rs2), BaseAddr:$rs1,
VLOpFrag),
(store_instr VR:$rs2, BaseAddr:$rs1, GPR:$vl, mti.SEW)>;
(store_instr VR:$rs2, BaseAddr:$rs1, GPR:$vl, mti.Log2SEW)>;
}
// 12.1. Vector Single-Width Integer Add and Subtract
@ -586,12 +586,12 @@ foreach vti = AllIntegerVectors in {
(vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask),
VLOpFrag),
(!cast<Instruction>("PseudoVRSUB_VX_"# vti.LMul.MX)
vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.SEW)>;
vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>;
def : Pat<(riscv_sub_vl (vti.Vector (SplatPat_simm5 simm5:$rs2)),
(vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask),
VLOpFrag),
(!cast<Instruction>("PseudoVRSUB_VI_"# vti.LMul.MX)
vti.RegClass:$rs1, simm5:$rs2, GPR:$vl, vti.SEW)>;
vti.RegClass:$rs1, simm5:$rs2, GPR:$vl, vti.Log2SEW)>;
}
// 12.3. Vector Integer Extension
@ -626,7 +626,7 @@ foreach vtiTofti = AllFractionableVF2IntVectors in {
(vti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>("PseudoVNSRL_WI_"#fti.LMul.MX)
vti.RegClass:$rs1, 0, GPR:$vl, fti.SEW)>;
vti.RegClass:$rs1, 0, GPR:$vl, fti.Log2SEW)>;
}
// 12.8. Vector Integer Comparison Instructions
@ -689,33 +689,33 @@ foreach vti = AllIntegerVectors in {
VLOpFrag)),
(!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX)
vti.RegClass:$rs2, vti.RegClass:$rs1, VMV0:$vm,
GPR:$vl, vti.SEW)>;
GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
(SplatPat XLenVT:$rs1),
vti.RegClass:$rs2,
VLOpFrag)),
(!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX)
vti.RegClass:$rs2, GPR:$rs1, VMV0:$vm, GPR:$vl, vti.SEW)>;
vti.RegClass:$rs2, GPR:$rs1, VMV0:$vm, GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
(SplatPat_simm5 simm5:$rs1),
vti.RegClass:$rs2,
VLOpFrag)),
(!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX)
vti.RegClass:$rs2, simm5:$rs1, VMV0:$vm, GPR:$vl, vti.SEW)>;
vti.RegClass:$rs2, simm5:$rs1, VMV0:$vm, GPR:$vl, vti.Log2SEW)>;
}
// 12.16. Vector Integer Move Instructions
foreach vti = AllIntegerVectors in {
def : Pat<(vti.Vector (riscv_vmv_v_x_vl GPR:$rs2, VLOpFrag)),
(!cast<Instruction>("PseudoVMV_V_X_"#vti.LMul.MX)
$rs2, GPR:$vl, vti.SEW)>;
$rs2, GPR:$vl, vti.Log2SEW)>;
defvar ImmPat = !cast<ComplexPattern>("sew"#vti.SEW#"simm5");
def : Pat<(vti.Vector (riscv_vmv_v_x_vl (ImmPat XLenVT:$imm5),
VLOpFrag)),
(!cast<Instruction>("PseudoVMV_V_I_"#vti.LMul.MX)
XLenVT:$imm5, GPR:$vl, vti.SEW)>;
XLenVT:$imm5, GPR:$vl, vti.Log2SEW)>;
}
} // Predicates = [HasStdExtV]
@ -760,7 +760,7 @@ foreach vti = AllFloatVectors in {
VLOpFrag)),
(!cast<Instruction>("PseudoVFMADD_VV_"# suffix)
vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
GPR:$vl, vti.SEW)>;
GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_fma_vl vti.RegClass:$rs1, vti.RegClass:$rd,
(riscv_fneg_vl vti.RegClass:$rs2,
(vti.Mask true_mask),
@ -769,7 +769,7 @@ foreach vti = AllFloatVectors in {
VLOpFrag)),
(!cast<Instruction>("PseudoVFMSUB_VV_"# suffix)
vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
GPR:$vl, vti.SEW)>;
GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_fma_vl (riscv_fneg_vl vti.RegClass:$rs1,
(vti.Mask true_mask),
VLOpFrag),
@ -781,7 +781,7 @@ foreach vti = AllFloatVectors in {
VLOpFrag)),
(!cast<Instruction>("PseudoVFNMADD_VV_"# suffix)
vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
GPR:$vl, vti.SEW)>;
GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_fma_vl (riscv_fneg_vl vti.RegClass:$rs1,
(vti.Mask true_mask),
VLOpFrag),
@ -790,7 +790,7 @@ foreach vti = AllFloatVectors in {
VLOpFrag)),
(!cast<Instruction>("PseudoVFNMSUB_VV_"# suffix)
vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
GPR:$vl, vti.SEW)>;
GPR:$vl, vti.Log2SEW)>;
// The choice of VFMADD here is arbitrary, vfmadd.vf and vfmacc.vf are equally
// commutable.
@ -800,7 +800,7 @@ foreach vti = AllFloatVectors in {
VLOpFrag)),
(!cast<Instruction>("PseudoVFMADD_V" # vti.ScalarSuffix # "_" # suffix)
vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
GPR:$vl, vti.SEW)>;
GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_fma_vl (SplatFPOp vti.ScalarRegClass:$rs1),
vti.RegClass:$rd,
(riscv_fneg_vl vti.RegClass:$rs2,
@ -810,7 +810,7 @@ foreach vti = AllFloatVectors in {
VLOpFrag)),
(!cast<Instruction>("PseudoVFMSUB_V" # vti.ScalarSuffix # "_" # suffix)
vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
GPR:$vl, vti.SEW)>;
GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_fma_vl (SplatFPOp vti.ScalarRegClass:$rs1),
(riscv_fneg_vl vti.RegClass:$rd,
(vti.Mask true_mask),
@ -822,7 +822,7 @@ foreach vti = AllFloatVectors in {
VLOpFrag)),
(!cast<Instruction>("PseudoVFNMADD_V" # vti.ScalarSuffix # "_" # suffix)
vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
GPR:$vl, vti.SEW)>;
GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_fma_vl (SplatFPOp vti.ScalarRegClass:$rs1),
(riscv_fneg_vl vti.RegClass:$rd,
(vti.Mask true_mask),
@ -832,7 +832,7 @@ foreach vti = AllFloatVectors in {
VLOpFrag)),
(!cast<Instruction>("PseudoVFNMSUB_V" # vti.ScalarSuffix # "_" # suffix)
vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
GPR:$vl, vti.SEW)>;
GPR:$vl, vti.Log2SEW)>;
// The splat might be negated.
def : Pat<(vti.Vector (riscv_fma_vl (riscv_fneg_vl (SplatFPOp vti.ScalarRegClass:$rs1),
@ -846,7 +846,7 @@ foreach vti = AllFloatVectors in {
VLOpFrag)),
(!cast<Instruction>("PseudoVFNMADD_V" # vti.ScalarSuffix # "_" # suffix)
vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
GPR:$vl, vti.SEW)>;
GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_fma_vl (riscv_fneg_vl (SplatFPOp vti.ScalarRegClass:$rs1),
(vti.Mask true_mask),
VLOpFrag),
@ -855,7 +855,7 @@ foreach vti = AllFloatVectors in {
VLOpFrag)),
(!cast<Instruction>("PseudoVFNMSUB_V" # vti.ScalarSuffix # "_" # suffix)
vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
GPR:$vl, vti.SEW)>;
GPR:$vl, vti.Log2SEW)>;
}
// 14.11. Vector Floating-Point MIN/MAX Instructions
@ -880,24 +880,24 @@ foreach vti = AllFloatVectors in {
def : Pat<(riscv_fsqrt_vl (vti.Vector vti.RegClass:$rs2), (vti.Mask true_mask),
VLOpFrag),
(!cast<Instruction>("PseudoVFSQRT_V_"# vti.LMul.MX)
vti.RegClass:$rs2, GPR:$vl, vti.SEW)>;
vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW)>;
// 14.12. Vector Floating-Point Sign-Injection Instructions
def : Pat<(riscv_fabs_vl (vti.Vector vti.RegClass:$rs), (vti.Mask true_mask),
VLOpFrag),
(!cast<Instruction>("PseudoVFSGNJX_VV_"# vti.LMul.MX)
vti.RegClass:$rs, vti.RegClass:$rs, GPR:$vl, vti.SEW)>;
vti.RegClass:$rs, vti.RegClass:$rs, GPR:$vl, vti.Log2SEW)>;
// Handle fneg with VFSGNJN using the same input for both operands.
def : Pat<(riscv_fneg_vl (vti.Vector vti.RegClass:$rs), (vti.Mask true_mask),
VLOpFrag),
(!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX)
vti.RegClass:$rs, vti.RegClass:$rs, GPR:$vl, vti.SEW)>;
vti.RegClass:$rs, vti.RegClass:$rs, GPR:$vl, vti.Log2SEW)>;
def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1),
(vti.Vector vti.RegClass:$rs2),
(vti.Mask true_mask),
VLOpFrag),
(!cast<Instruction>("PseudoVFSGNJ_VV_"# vti.LMul.MX)
vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.SEW)>;
vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW)>;
def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1),
(riscv_fneg_vl vti.RegClass:$rs2,
(vti.Mask true_mask),
@ -905,14 +905,14 @@ foreach vti = AllFloatVectors in {
(vti.Mask true_mask),
VLOpFrag),
(!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX)
vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.SEW)>;
vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW)>;
def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1),
(SplatFPOp vti.ScalarRegClass:$rs2),
(vti.Mask true_mask),
VLOpFrag),
(!cast<Instruction>("PseudoVFSGNJ_V"#vti.ScalarSuffix#"_"# vti.LMul.MX)
vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, GPR:$vl, vti.SEW)>;
vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, GPR:$vl, vti.Log2SEW)>;
}
foreach fvti = AllFloatVectors in {
@ -925,7 +925,7 @@ foreach fvti = AllFloatVectors in {
VLOpFrag)),
(!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX)
fvti.RegClass:$rs2, fvti.RegClass:$rs1, VMV0:$vm,
GPR:$vl, fvti.SEW)>;
GPR:$vl, fvti.Log2SEW)>;
def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask VMV0:$vm),
(SplatFPOp fvti.ScalarRegClass:$rs1),
@ -934,28 +934,28 @@ foreach fvti = AllFloatVectors in {
(!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX)
fvti.RegClass:$rs2,
(fvti.Scalar fvti.ScalarRegClass:$rs1),
VMV0:$vm, GPR:$vl, fvti.SEW)>;
VMV0:$vm, GPR:$vl, fvti.Log2SEW)>;
def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask VMV0:$vm),
(SplatFPOp (fvti.Scalar fpimm0)),
fvti.RegClass:$rs2,
VLOpFrag)),
(!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX)
fvti.RegClass:$rs2, 0, VMV0:$vm, GPR:$vl, fvti.SEW)>;
fvti.RegClass:$rs2, 0, VMV0:$vm, GPR:$vl, fvti.Log2SEW)>;
// 14.16. Vector Floating-Point Move Instruction
// If we're splatting fpimm0, use vmv.v.x vd, x0.
def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl
(fvti.Scalar (fpimm0)), VLOpFrag)),
(!cast<Instruction>("PseudoVMV_V_I_"#fvti.LMul.MX)
0, GPR:$vl, fvti.SEW)>;
0, GPR:$vl, fvti.Log2SEW)>;
def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl
(fvti.Scalar fvti.ScalarRegClass:$rs2), VLOpFrag)),
(!cast<Instruction>("PseudoVFMV_V_" # fvti.ScalarSuffix # "_" #
fvti.LMul.MX)
(fvti.Scalar fvti.ScalarRegClass:$rs2),
GPR:$vl, fvti.SEW)>;
GPR:$vl, fvti.Log2SEW)>;
// 14.17. Vector Single-Width Floating-Point/Integer Type-Convert Instructions
defm : VPatConvertFP2ISDNode_V_VL<riscv_fp_to_sint_vl, "PseudoVFCVT_RTZ_X_F_V">;
@ -975,7 +975,7 @@ foreach fvti = AllFloatVectors in {
(fvti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>("PseudoVFWCVT_F_F_V_"#fvti.LMul.MX)
fvti.RegClass:$rs1, GPR:$vl, fvti.SEW)>;
fvti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>;
}
// 14.19 Narrowing Floating-Point/Integer Type-Convert Instructions
@ -990,13 +990,13 @@ foreach fvti = AllFloatVectors in {
(fwti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX)
fwti.RegClass:$rs1, GPR:$vl, fvti.SEW)>;
fwti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>;
def : Pat<(fvti.Vector (riscv_fncvt_rod_vl (fwti.Vector fwti.RegClass:$rs1),
(fwti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>("PseudoVFNCVT_ROD_F_F_W_"#fvti.LMul.MX)
fwti.RegClass:$rs1, GPR:$vl, fvti.SEW)>;
fwti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>;
}
}
@ -1007,63 +1007,63 @@ let Predicates = [HasStdExtV] in {
foreach mti = AllMasks in {
// 16.1 Vector Mask-Register Logical Instructions
def : Pat<(mti.Mask (riscv_vmset_vl VLOpFrag)),
(!cast<Instruction>("PseudoVMSET_M_" # mti.BX) GPR:$vl, mti.SEW)>;
(!cast<Instruction>("PseudoVMSET_M_" # mti.BX) GPR:$vl, mti.Log2SEW)>;
def : Pat<(mti.Mask (riscv_vmclr_vl VLOpFrag)),
(!cast<Instruction>("PseudoVMCLR_M_" # mti.BX) GPR:$vl, mti.SEW)>;
(!cast<Instruction>("PseudoVMCLR_M_" # mti.BX) GPR:$vl, mti.Log2SEW)>;
def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1, VR:$rs2, VLOpFrag)),
(!cast<Instruction>("PseudoVMAND_MM_" # mti.LMul.MX)
VR:$rs1, VR:$rs2, GPR:$vl, mti.SEW)>;
VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1, VR:$rs2, VLOpFrag)),
(!cast<Instruction>("PseudoVMOR_MM_" # mti.LMul.MX)
VR:$rs1, VR:$rs2, GPR:$vl, mti.SEW)>;
VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
def : Pat<(mti.Mask (riscv_vmxor_vl VR:$rs1, VR:$rs2, VLOpFrag)),
(!cast<Instruction>("PseudoVMXOR_MM_" # mti.LMul.MX)
VR:$rs1, VR:$rs2, GPR:$vl, mti.SEW)>;
VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
def : Pat<(mti.Mask (riscv_vmand_vl (riscv_vmnot_vl VR:$rs1,
VLOpFrag),
VR:$rs2, VLOpFrag)),
(!cast<Instruction>("PseudoVMANDNOT_MM_" # mti.LMul.MX)
VR:$rs1, VR:$rs2, GPR:$vl, mti.SEW)>;
VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
def : Pat<(mti.Mask (riscv_vmor_vl (riscv_vmnot_vl VR:$rs1,
VLOpFrag),
VR:$rs2, VLOpFrag)),
(!cast<Instruction>("PseudoVMORNOT_MM_" # mti.LMul.MX)
VR:$rs1, VR:$rs2, GPR:$vl, mti.SEW)>;
VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
// XOR is associative so we need 2 patterns for VMXNOR.
def : Pat<(mti.Mask (riscv_vmxor_vl (riscv_vmnot_vl VR:$rs1,
VLOpFrag),
VR:$rs2, VLOpFrag)),
(!cast<Instruction>("PseudoVMXNOR_MM_" # mti.LMul.MX)
VR:$rs1, VR:$rs2, GPR:$vl, mti.SEW)>;
VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmand_vl VR:$rs1, VR:$rs2,
VLOpFrag),
VLOpFrag)),
(!cast<Instruction>("PseudoVMNAND_MM_" # mti.LMul.MX)
VR:$rs1, VR:$rs2, GPR:$vl, mti.SEW)>;
VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmor_vl VR:$rs1, VR:$rs2,
VLOpFrag),
VLOpFrag)),
(!cast<Instruction>("PseudoVMNOR_MM_" # mti.LMul.MX)
VR:$rs1, VR:$rs2, GPR:$vl, mti.SEW)>;
VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmxor_vl VR:$rs1, VR:$rs2,
VLOpFrag),
VLOpFrag)),
(!cast<Instruction>("PseudoVMXNOR_MM_" # mti.LMul.MX)
VR:$rs1, VR:$rs2, GPR:$vl, mti.SEW)>;
VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
// Match the not idiom to the vnot.mm pseudo.
def : Pat<(mti.Mask (riscv_vmnot_vl VR:$rs, VLOpFrag)),
(!cast<Instruction>("PseudoVMNAND_MM_" # mti.LMul.MX)
VR:$rs, VR:$rs, GPR:$vl, mti.SEW)>;
VR:$rs, VR:$rs, GPR:$vl, mti.Log2SEW)>;
// 16.2 Vector Mask Population Count vpopc
def : Pat<(XLenVT (riscv_vpopc_vl (mti.Mask VR:$rs2), (mti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>("PseudoVPOPC_M_" # mti.BX)
VR:$rs2, GPR:$vl, mti.SEW)>;
VR:$rs2, GPR:$vl, mti.Log2SEW)>;
}
} // Predicates = [HasStdExtV]
@ -1077,23 +1077,23 @@ foreach vti = AllIntegerVectors in {
VLOpFrag)),
(!cast<Instruction>("PseudoVMV_S_X_"#vti.LMul.MX)
vti.RegClass:$merge,
(vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.SEW)>;
(vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_vrgather_vv_vl vti.RegClass:$rs2,
(vti.Vector vti.RegClass:$rs1),
(vti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX)
vti.RegClass:$rs2, vti.RegClass:$rs1, GPR:$vl, vti.SEW)>;
vti.RegClass:$rs2, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1,
(vti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>("PseudoVRGATHER_VX_"# vti.LMul.MX)
vti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.SEW)>;
vti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, uimm5:$imm,
(vti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX)
vti.RegClass:$rs2, uimm5:$imm, GPR:$vl, vti.SEW)>;
vti.RegClass:$rs2, uimm5:$imm, GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
(riscv_vrgather_vv_vl
@ -1105,12 +1105,12 @@ foreach vti = AllIntegerVectors in {
VLOpFrag)),
(!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_MASK")
vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1,
vti.Mask:$vm, GPR:$vl, vti.SEW)>;
vti.Mask:$vm, GPR:$vl, vti.Log2SEW)>;
// emul = lmul * 16 / sew
defvar vlmul = vti.LMul;
defvar octuple_lmul = octuple_from_str<vlmul.MX>.ret;
defvar octuple_emul = !srl(!mul(octuple_lmul, 16), shift_amount<vti.SEW>.val);
defvar octuple_emul = !srl(!mul(octuple_lmul, 16), vti.Log2SEW);
if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
defvar emul_str = octuple_to_str<octuple_emul>.ret;
defvar ivti = !cast<VTypeInfo>("VI16" # emul_str);
@ -1120,7 +1120,7 @@ foreach vti = AllIntegerVectors in {
(vti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>(inst)
vti.RegClass:$rs2, ivti.RegClass:$rs1, GPR:$vl, vti.SEW)>;
vti.RegClass:$rs2, ivti.RegClass:$rs1, GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
(riscv_vrgatherei16_vv_vl
@ -1132,7 +1132,7 @@ foreach vti = AllIntegerVectors in {
VLOpFrag)),
(!cast<Instruction>(inst#"_MASK")
vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1,
vti.Mask:$vm, GPR:$vl, vti.SEW)>;
vti.Mask:$vm, GPR:$vl, vti.Log2SEW)>;
}
}
@ -1147,24 +1147,24 @@ foreach vti = AllFloatVectors in {
VLOpFrag)),
(!cast<Instruction>("PseudoVFMV_S_"#vti.ScalarSuffix#"_"#vti.LMul.MX)
vti.RegClass:$merge,
(vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.SEW)>;
(vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.Log2SEW)>;
defvar ivti = GetIntVTypeInfo<vti>.Vti;
def : Pat<(vti.Vector (riscv_vrgather_vv_vl vti.RegClass:$rs2,
(ivti.Vector vti.RegClass:$rs1),
(vti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX)
vti.RegClass:$rs2, vti.RegClass:$rs1, GPR:$vl, vti.SEW)>;
vti.RegClass:$rs2, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1,
(vti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>("PseudoVRGATHER_VX_"# vti.LMul.MX)
vti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.SEW)>;
vti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, uimm5:$imm,
(vti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>("PseudoVRGATHER_VI_"# vti.LMul.MX)
vti.RegClass:$rs2, uimm5:$imm, GPR:$vl, vti.SEW)>;
vti.RegClass:$rs2, uimm5:$imm, GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
(riscv_vrgather_vv_vl
@ -1176,11 +1176,11 @@ foreach vti = AllFloatVectors in {
VLOpFrag)),
(!cast<Instruction>("PseudoVRGATHER_VV_"# vti.LMul.MX#"_MASK")
vti.RegClass:$merge, vti.RegClass:$rs2, vti.RegClass:$rs1,
vti.Mask:$vm, GPR:$vl, vti.SEW)>;
vti.Mask:$vm, GPR:$vl, vti.Log2SEW)>;
defvar vlmul = vti.LMul;
defvar octuple_lmul = octuple_from_str<vlmul.MX>.ret;
defvar octuple_emul = !srl(!mul(octuple_lmul, 16), shift_amount<vti.SEW>.val);
defvar octuple_emul = !srl(!mul(octuple_lmul, 16), vti.Log2SEW);
if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
defvar emul_str = octuple_to_str<octuple_emul>.ret;
defvar ivti = !cast<VTypeInfo>("VI16" # emul_str);
@ -1190,7 +1190,7 @@ foreach vti = AllFloatVectors in {
(vti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>(inst)
vti.RegClass:$rs2, ivti.RegClass:$rs1, GPR:$vl, vti.SEW)>;
vti.RegClass:$rs2, ivti.RegClass:$rs1, GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
(riscv_vrgatherei16_vv_vl
@ -1202,7 +1202,7 @@ foreach vti = AllFloatVectors in {
VLOpFrag)),
(!cast<Instruction>(inst#"_MASK")
vti.RegClass:$merge, vti.RegClass:$rs2, ivti.RegClass:$rs1,
vti.Mask:$vm, GPR:$vl, vti.SEW)>;
vti.Mask:$vm, GPR:$vl, vti.Log2SEW)>;
}
}
@ -1235,18 +1235,18 @@ let Predicates = [HasStdExtV] in {
foreach vti = AllIntegerVectors in {
def : Pat<(vti.Vector (riscv_vid_vl (vti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>("PseudoVID_V_"#vti.LMul.MX) GPR:$vl, vti.SEW)>;
(!cast<Instruction>("PseudoVID_V_"#vti.LMul.MX) GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_slide1up_vl (vti.Vector vti.RegClass:$rs1),
GPR:$rs2, (vti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>("PseudoVSLIDE1UP_VX_"#vti.LMul.MX)
vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.SEW)>;
vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_slide1down_vl (vti.Vector vti.RegClass:$rs1),
GPR:$rs2, (vti.Mask true_mask),
VLOpFrag)),
(!cast<Instruction>("PseudoVSLIDE1DOWN_VX_"#vti.LMul.MX)
vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.SEW)>;
vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>;
}
foreach vti = !listconcat(AllIntegerVectors, AllFloatVectors) in {
@ -1256,7 +1256,7 @@ foreach vti = !listconcat(AllIntegerVectors, AllFloatVectors) in {
VLOpFrag)),
(!cast<Instruction>("PseudoVSLIDEUP_VI_"#vti.LMul.MX)
vti.RegClass:$rs3, vti.RegClass:$rs1, uimm5:$rs2,
GPR:$vl, vti.SEW)>;
GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_slideup_vl (vti.Vector vti.RegClass:$rs3),
(vti.Vector vti.RegClass:$rs1),
@ -1264,7 +1264,7 @@ foreach vti = !listconcat(AllIntegerVectors, AllFloatVectors) in {
VLOpFrag)),
(!cast<Instruction>("PseudoVSLIDEUP_VX_"#vti.LMul.MX)
vti.RegClass:$rs3, vti.RegClass:$rs1, GPR:$rs2,
GPR:$vl, vti.SEW)>;
GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_slidedown_vl (vti.Vector vti.RegClass:$rs3),
(vti.Vector vti.RegClass:$rs1),
@ -1272,7 +1272,7 @@ foreach vti = !listconcat(AllIntegerVectors, AllFloatVectors) in {
VLOpFrag)),
(!cast<Instruction>("PseudoVSLIDEDOWN_VI_"#vti.LMul.MX)
vti.RegClass:$rs3, vti.RegClass:$rs1, uimm5:$rs2,
GPR:$vl, vti.SEW)>;
GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (riscv_slidedown_vl (vti.Vector vti.RegClass:$rs3),
(vti.Vector vti.RegClass:$rs1),
@ -1280,7 +1280,7 @@ foreach vti = !listconcat(AllIntegerVectors, AllFloatVectors) in {
VLOpFrag)),
(!cast<Instruction>("PseudoVSLIDEDOWN_VX_"#vti.LMul.MX)
vti.RegClass:$rs3, vti.RegClass:$rs1, GPR:$rs2,
GPR:$vl, vti.SEW)>;
GPR:$vl, vti.Log2SEW)>;
}
} // Predicates = [HasStdExtV]

View File

@ -27,10 +27,10 @@ body: |
%2:gpr = COPY $x12
%1:gpr = COPY $x11
%0:gpr = COPY $x10
%4:vr = PseudoVLE64_V_M1 %1, %3, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
%5:vr = PseudoVLE64_V_M1 %2, %3, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8)
%6:vr = PseudoVADD_VV_M1 killed %4, killed %5, %3, 64, implicit $vl, implicit $vtype
PseudoVSE64_V_M1 killed %6, %0, %3, 64, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8)
%4:vr = PseudoVLE64_V_M1 %1, %3, 6, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
%5:vr = PseudoVLE64_V_M1 %2, %3, 6, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8)
%6:vr = PseudoVADD_VV_M1 killed %4, killed %5, %3, 6, implicit $vl, implicit $vtype
PseudoVSE64_V_M1 killed %6, %0, %3, 6, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8)
PseudoRET
...
@ -40,13 +40,13 @@ body: |
# POST-INSERTER: %2:gpr = COPY $x11
# POST-INSERTER: %3:gpr = COPY $x10
# POST-INSERTER: dead %7:gpr = PseudoVSETVLI %0, 88, implicit-def $vl, implicit-def $vtype
# POST-INSERTER: %4:vr = PseudoVLE64_V_M1 %2, $noreg, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
# POST-INSERTER: %4:vr = PseudoVLE64_V_M1 %2, $noreg, 6, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
# POST-INSERTER: dead %8:gpr = PseudoVSETVLI %0, 88, implicit-def $vl, implicit-def $vtype
# POST-INSERTER: %5:vr = PseudoVLE64_V_M1 %1, $noreg, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8)
# POST-INSERTER: %5:vr = PseudoVLE64_V_M1 %1, $noreg, 6, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8)
# POST-INSERTER: dead %9:gpr = PseudoVSETVLI %0, 88, implicit-def $vl, implicit-def $vtype
# POST-INSERTER: %6:vr = PseudoVADD_VV_M1 killed %4, killed %5, $noreg, 64, implicit $vl, implicit $vtype
# POST-INSERTER: %6:vr = PseudoVADD_VV_M1 killed %4, killed %5, $noreg, 6, implicit $vl, implicit $vtype
# POST-INSERTER: dead %10:gpr = PseudoVSETVLI %0, 88, implicit-def $vl, implicit-def $vtype
# POST-INSERTER: PseudoVSE64_V_M1 killed %6, %3, $noreg, 64, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8)
# POST-INSERTER: PseudoVSE64_V_M1 killed %6, %3, $noreg, 6, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8)
# CODEGEN: vsetvli a3, a3, e64,m1,ta,mu
# CODEGEN-NEXT: vle64.v v25, (a1)

View File

@ -22,11 +22,11 @@ define void @vadd_vint64m1(
; PRE-INSERTER: %3:vr = VL1RE64_V %1 :: (load unknown-size from %ir.pa, align 8)
; PRE-INSERTER: %4:vr = VL1RE64_V %2 :: (load unknown-size from %ir.pb, align 8)
; PRE-INSERTER: %5:vr = PseudoVADD_VV_M1 killed %3, killed %4, $x0, 64, implicit $vl, implicit $vtype
; PRE-INSERTER: %5:vr = PseudoVADD_VV_M1 killed %3, killed %4, $x0, 6, implicit $vl, implicit $vtype
; PRE-INSERTER: VS1R_V killed %5, %0 :: (store unknown-size into %ir.pc, align 8)
; POST-INSERTER: %3:vr = VL1RE64_V %1 :: (load unknown-size from %ir.pa, align 8)
; POST-INSERTER: %4:vr = VL1RE64_V %2 :: (load unknown-size from %ir.pb, align 8)
; POST-INSERTER: dead %6:gpr = PseudoVSETVLI $x0, 88, implicit-def $vl, implicit-def $vtype
; POST-INSERTER: %5:vr = PseudoVADD_VV_M1 killed %3, killed %4, $noreg, 64, implicit $vl, implicit $vtype
; POST-INSERTER: %5:vr = PseudoVADD_VV_M1 killed %3, killed %4, $noreg, 6, implicit $vl, implicit $vtype
; POST-INSERTER: VS1R_V killed %5, %0 :: (store unknown-size into %ir.pc, align 8)

View File

@ -39,7 +39,7 @@ body: |
; CHECK: $x12 = PseudoReadVLENB
; CHECK: $x2 = SUB $x2, killed $x12
; CHECK: dead renamable $x11 = PseudoVSETVLI killed renamable $x11, 88, implicit-def $vl, implicit-def $vtype
; CHECK: renamable $v25 = PseudoVLE64_V_M1 killed renamable $x10, $noreg, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
; CHECK: renamable $v25 = PseudoVLE64_V_M1 killed renamable $x10, $noreg, 6, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
; CHECK: $x11 = PseudoReadVLENB
; CHECK: $x10 = LUI 1048575
; CHECK: $x10 = ADDIW killed $x10, 1824
@ -55,7 +55,7 @@ body: |
; CHECK: PseudoRET
%1:gpr = COPY $x11
%0:gpr = COPY $x10
%2:vr = PseudoVLE64_V_M1 %0, %1, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
%2:vr = PseudoVLE64_V_M1 %0, %1, 6, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
%3:gpr = ADDI %stack.2, 0
VS1R_V killed %2:vr, %3:gpr
PseudoRET

View File

@ -16,7 +16,7 @@ define i64 @test(<vscale x 1 x i64> %0) nounwind {
; CHECK: liveins: $v8
; CHECK: [[COPY:%[0-9]+]]:vr = COPY $v8
; CHECK: dead %2:gpr = PseudoVSETIVLI 1, 88, implicit-def $vl, implicit-def $vtype
; CHECK: PseudoVSE64_V_M1 [[COPY]], %stack.0.a, 1, 64, implicit $vl, implicit $vtype
; CHECK: PseudoVSE64_V_M1 [[COPY]], %stack.0.a, 1, 6, implicit $vl, implicit $vtype
; CHECK: [[LD:%[0-9]+]]:gpr = LD %stack.0.a, 0 :: (dereferenceable load 8 from %ir.a)
; CHECK: $x10 = COPY [[LD]]
; CHECK: PseudoRET implicit $x10

View File

@ -53,7 +53,7 @@ body: |
; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
; CHECK: [[COPY2:%[0-9]+]]:vrm8nov0 = COPY [[DEF]]
; CHECK: dead %5:gpr = PseudoVSETVLI $x0, 91, implicit-def $vl, implicit-def $vtype
; CHECK: [[PseudoVLE64_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64_V_M8_MASK [[COPY2]], [[COPY1]], $v0, $noreg, 64, implicit $vl, implicit $vtype :: (load 64 from %ir.a, align 8)
; CHECK: [[PseudoVLE64_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64_V_M8_MASK [[COPY2]], [[COPY1]], $v0, $noreg, 6, implicit $vl, implicit $vtype :: (load 64 from %ir.a, align 8)
; CHECK: $v8m8 = COPY [[PseudoVLE64_V_M8_MASK]]
; CHECK: PseudoRET implicit $v8m8
%1:vr = COPY $v0
@ -61,7 +61,7 @@ body: |
$v0 = COPY %1
%3:vrm8 = IMPLICIT_DEF
%4:vrm8nov0 = COPY %3
%2:vrm8nov0 = PseudoVLE64_V_M8_MASK %4, %0, $v0, $x0, 64, implicit $vl, implicit $vtype :: (load 64 from %ir.a, align 8)
%2:vrm8nov0 = PseudoVLE64_V_M8_MASK %4, %0, $v0, $x0, 6, implicit $vl, implicit $vtype :: (load 64 from %ir.a, align 8)
$v8m8 = COPY %2
PseudoRET implicit $v8m8