mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 02:33:06 +01:00
[RISCV] Define vadd/vsub/vrsub intrinsics and lower to V instructions.
This patch is based on the proposal from Roger Ferrer Ibanez. http://lists.llvm.org/pipermail/llvm-dev/2020-October/145850.html Differential Revision: https://reviews.llvm.org/D93013
This commit is contained in:
parent
851d64b940
commit
f039b9862f
@ -66,3 +66,43 @@ let TargetPrefix = "riscv" in {
|
||||
defm int_riscv_masked_cmpxchg : MaskedAtomicRMWFiveArgIntrinsics;
|
||||
|
||||
} // TargetPrefix = "riscv"
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Vectors
|
||||
|
||||
class RISCVVIntrinsic {
|
||||
// These intrinsics may accept illegal integer values in their llvm_any_ty
|
||||
// operand, so they have to be extended. If set to zero then the intrinsic
|
||||
// does not have any operand that must be extended.
|
||||
Intrinsic IntrinsicID = !cast<Intrinsic>(NAME);
|
||||
bits<4> ExtendOperand = 0;
|
||||
}
|
||||
|
||||
let TargetPrefix = "riscv" in {
|
||||
// For destination vector type is the same as first source vector.
|
||||
// Input: (vector_in, vector_in/scalar_in, vl)
|
||||
class RISCVBinaryAAXNoMask
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let ExtendOperand = 2;
|
||||
}
|
||||
// For destination vector type is the same as first source vector (with mask).
|
||||
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
|
||||
class RISCVBinaryAAXMask
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
|
||||
llvm_anyvector_ty, llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let ExtendOperand = 3;
|
||||
}
|
||||
|
||||
multiclass RISCVBinaryAAX {
|
||||
def "int_riscv_" # NAME : RISCVBinaryAAXNoMask;
|
||||
def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMask;
|
||||
}
|
||||
|
||||
defm vadd : RISCVBinaryAAX;
|
||||
defm vsub : RISCVBinaryAAX;
|
||||
defm vrsub : RISCVBinaryAAX;
|
||||
} // TargetPrefix = "riscv"
|
||||
|
@ -331,8 +331,12 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
|
||||
|
||||
setBooleanContents(ZeroOrOneBooleanContent);
|
||||
|
||||
if (Subtarget.hasStdExtV())
|
||||
if (Subtarget.hasStdExtV()) {
|
||||
setBooleanVectorContents(ZeroOrOneBooleanContent);
|
||||
// RVV intrinsics may have illegal operands.
|
||||
for (auto VT : {MVT::i8, MVT::i16, MVT::i32})
|
||||
setOperationAction(ISD::INTRINSIC_WO_CHAIN, VT, Custom);
|
||||
}
|
||||
|
||||
// Function alignments.
|
||||
const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
|
||||
@ -1002,6 +1006,28 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
|
||||
SelectionDAG &DAG) const {
|
||||
unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
|
||||
SDLoc DL(Op);
|
||||
|
||||
if (Subtarget.hasStdExtV()) {
|
||||
// Some RVV intrinsics may claim that they want an integer operand to be
|
||||
// extended.
|
||||
if (const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
|
||||
RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo)) {
|
||||
if (II->ExtendedOperand) {
|
||||
assert(II->ExtendedOperand < Op.getNumOperands());
|
||||
SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
|
||||
SDValue &ScalarOp = Operands[II->ExtendedOperand];
|
||||
if (ScalarOp.getValueType() == MVT::i8 ||
|
||||
ScalarOp.getValueType() == MVT::i16 ||
|
||||
ScalarOp.getValueType() == MVT::i32) {
|
||||
ScalarOp =
|
||||
DAG.getNode(ISD::ANY_EXTEND, DL, Subtarget.getXLenVT(), ScalarOp);
|
||||
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
|
||||
Operands);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch (IntNo) {
|
||||
default:
|
||||
return SDValue(); // Don't custom lower most intrinsics.
|
||||
@ -2038,6 +2064,16 @@ static const MCPhysReg ArgFPR64s[] = {
|
||||
RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
|
||||
RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
|
||||
};
|
||||
// This is an interim calling convention and it may be changed in the future.
|
||||
static const MCPhysReg ArgVRs[] = {
|
||||
RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19, RISCV::V20,
|
||||
RISCV::V21, RISCV::V22, RISCV::V23
|
||||
};
|
||||
static const MCPhysReg ArgVRM2s[] = {
|
||||
RISCV::V16M2, RISCV::V18M2, RISCV::V20M2, RISCV::V22M2
|
||||
};
|
||||
static const MCPhysReg ArgVRM4s[] = {RISCV::V16M4, RISCV::V20M4};
|
||||
static const MCPhysReg ArgVRM8s[] = {RISCV::V16M8};
|
||||
|
||||
// Pass a 2*XLEN argument that has been split into two XLEN values through
|
||||
// registers or the stack as necessary.
|
||||
@ -2082,7 +2118,8 @@ static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
|
||||
static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
|
||||
MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
|
||||
ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
|
||||
bool IsRet, Type *OrigTy) {
|
||||
bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
|
||||
Optional<unsigned> FirstMaskArgument) {
|
||||
unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
|
||||
assert(XLen == 32 || XLen == 64);
|
||||
MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
|
||||
@ -2215,7 +2252,34 @@ static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
|
||||
Reg = State.AllocateReg(ArgFPR32s);
|
||||
else if (ValVT == MVT::f64 && !UseGPRForF64)
|
||||
Reg = State.AllocateReg(ArgFPR64s);
|
||||
else
|
||||
else if (ValVT.isScalableVector()) {
|
||||
const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
|
||||
if (RC == &RISCV::VRRegClass) {
|
||||
// Assign the first mask argument to V0.
|
||||
// This is an interim calling convention and it may be changed in the
|
||||
// future.
|
||||
if (FirstMaskArgument.hasValue() &&
|
||||
ValNo == FirstMaskArgument.getValue()) {
|
||||
Reg = State.AllocateReg(RISCV::V0);
|
||||
} else {
|
||||
Reg = State.AllocateReg(ArgVRs);
|
||||
}
|
||||
} else if (RC == &RISCV::VRM2RegClass) {
|
||||
Reg = State.AllocateReg(ArgVRM2s);
|
||||
} else if (RC == &RISCV::VRM4RegClass) {
|
||||
Reg = State.AllocateReg(ArgVRM4s);
|
||||
} else if (RC == &RISCV::VRM8RegClass) {
|
||||
Reg = State.AllocateReg(ArgVRM8s);
|
||||
} else {
|
||||
llvm_unreachable("Unhandled class register for ValueType");
|
||||
}
|
||||
if (!Reg) {
|
||||
LocInfo = CCValAssign::Indirect;
|
||||
// Try using a GPR to pass the address
|
||||
Reg = State.AllocateReg(ArgGPRs);
|
||||
LocVT = XLenVT;
|
||||
}
|
||||
} else
|
||||
Reg = State.AllocateReg(ArgGPRs);
|
||||
unsigned StackOffset =
|
||||
Reg ? 0 : State.AllocateStack(XLen / 8, Align(XLen / 8));
|
||||
@ -2238,8 +2302,9 @@ static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
|
||||
return false;
|
||||
}
|
||||
|
||||
assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT) &&
|
||||
"Expected an XLenVT at this stage");
|
||||
assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
|
||||
(TLI.getSubtarget().hasStdExtV() && ValVT.isScalableVector())) &&
|
||||
"Expected an XLenVT or scalable vector types at this stage");
|
||||
|
||||
if (Reg) {
|
||||
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
|
||||
@ -2256,12 +2321,32 @@ static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
|
||||
return false;
|
||||
}
|
||||
|
||||
template <typename ArgTy>
|
||||
static void preAssignMask(const ArgTy &Args,
|
||||
Optional<unsigned> &FirstMaskArgument,
|
||||
CCState &CCInfo) {
|
||||
unsigned NumArgs = Args.size();
|
||||
for (unsigned I = 0; I != NumArgs; ++I) {
|
||||
MVT ArgVT = Args[I].VT;
|
||||
if (!ArgVT.isScalableVector() ||
|
||||
ArgVT.getVectorElementType().SimpleTy != MVT::i1)
|
||||
continue;
|
||||
|
||||
FirstMaskArgument = I;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void RISCVTargetLowering::analyzeInputArgs(
|
||||
MachineFunction &MF, CCState &CCInfo,
|
||||
const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet) const {
|
||||
unsigned NumArgs = Ins.size();
|
||||
FunctionType *FType = MF.getFunction().getFunctionType();
|
||||
|
||||
Optional<unsigned> FirstMaskArgument;
|
||||
if (Subtarget.hasStdExtV())
|
||||
preAssignMask(Ins, FirstMaskArgument, CCInfo);
|
||||
|
||||
for (unsigned i = 0; i != NumArgs; ++i) {
|
||||
MVT ArgVT = Ins[i].VT;
|
||||
ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
|
||||
@ -2274,7 +2359,8 @@ void RISCVTargetLowering::analyzeInputArgs(
|
||||
|
||||
RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
|
||||
if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
|
||||
ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy)) {
|
||||
ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
|
||||
FirstMaskArgument)) {
|
||||
LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
|
||||
<< EVT(ArgVT).getEVTString() << '\n');
|
||||
llvm_unreachable(nullptr);
|
||||
@ -2288,6 +2374,10 @@ void RISCVTargetLowering::analyzeOutputArgs(
|
||||
CallLoweringInfo *CLI) const {
|
||||
unsigned NumArgs = Outs.size();
|
||||
|
||||
Optional<unsigned> FirstMaskArgument;
|
||||
if (Subtarget.hasStdExtV())
|
||||
preAssignMask(Outs, FirstMaskArgument, CCInfo);
|
||||
|
||||
for (unsigned i = 0; i != NumArgs; i++) {
|
||||
MVT ArgVT = Outs[i].VT;
|
||||
ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
|
||||
@ -2295,7 +2385,8 @@ void RISCVTargetLowering::analyzeOutputArgs(
|
||||
|
||||
RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
|
||||
if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
|
||||
ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy)) {
|
||||
ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
|
||||
FirstMaskArgument)) {
|
||||
LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
|
||||
<< EVT(ArgVT).getEVTString() << "\n");
|
||||
llvm_unreachable(nullptr);
|
||||
@ -2327,31 +2418,13 @@ static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
|
||||
// The caller is responsible for loading the full value if the argument is
|
||||
// passed with CCValAssign::Indirect.
|
||||
static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
|
||||
const CCValAssign &VA, const SDLoc &DL) {
|
||||
const CCValAssign &VA, const SDLoc &DL,
|
||||
const RISCVTargetLowering &TLI) {
|
||||
MachineFunction &MF = DAG.getMachineFunction();
|
||||
MachineRegisterInfo &RegInfo = MF.getRegInfo();
|
||||
EVT LocVT = VA.getLocVT();
|
||||
SDValue Val;
|
||||
const TargetRegisterClass *RC;
|
||||
|
||||
switch (LocVT.getSimpleVT().SimpleTy) {
|
||||
default:
|
||||
llvm_unreachable("Unexpected register type");
|
||||
case MVT::i32:
|
||||
case MVT::i64:
|
||||
RC = &RISCV::GPRRegClass;
|
||||
break;
|
||||
case MVT::f16:
|
||||
RC = &RISCV::FPR16RegClass;
|
||||
break;
|
||||
case MVT::f32:
|
||||
RC = &RISCV::FPR32RegClass;
|
||||
break;
|
||||
case MVT::f64:
|
||||
RC = &RISCV::FPR64RegClass;
|
||||
break;
|
||||
}
|
||||
|
||||
const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
|
||||
Register VReg = RegInfo.createVirtualRegister(RC);
|
||||
RegInfo.addLiveIn(VA.getLocReg(), VReg);
|
||||
Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
|
||||
@ -2623,7 +2696,7 @@ SDValue RISCVTargetLowering::LowerFormalArguments(
|
||||
if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
|
||||
ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
|
||||
else if (VA.isRegLoc())
|
||||
ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL);
|
||||
ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
|
||||
else
|
||||
ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
|
||||
|
||||
@ -3071,12 +3144,18 @@ bool RISCVTargetLowering::CanLowerReturn(
|
||||
const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
|
||||
|
||||
Optional<unsigned> FirstMaskArgument;
|
||||
if (Subtarget.hasStdExtV())
|
||||
preAssignMask(Outs, FirstMaskArgument, CCInfo);
|
||||
|
||||
for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
|
||||
MVT VT = Outs[i].VT;
|
||||
ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
|
||||
RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
|
||||
if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
|
||||
ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr))
|
||||
ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
|
||||
*this, FirstMaskArgument))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@ -3673,3 +3752,12 @@ RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
|
||||
StringRef(RegName) + "\"."));
|
||||
return Reg;
|
||||
}
|
||||
|
||||
namespace llvm {
|
||||
namespace RISCVVIntrinsicsTable {
|
||||
|
||||
#define GET_RISCVVIntrinsicsTable_IMPL
|
||||
#include "RISCVGenSearchableTables.inc"
|
||||
|
||||
} // namespace RISCVVIntrinsicsTable
|
||||
} // namespace llvm
|
||||
|
@ -87,6 +87,8 @@ public:
|
||||
explicit RISCVTargetLowering(const TargetMachine &TM,
|
||||
const RISCVSubtarget &STI);
|
||||
|
||||
const RISCVSubtarget &getSubtarget() const { return Subtarget; }
|
||||
|
||||
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
|
||||
MachineFunction &MF,
|
||||
unsigned Intrinsic) const override;
|
||||
@ -269,6 +271,20 @@ private:
|
||||
const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
|
||||
MachineFunction &MF) const;
|
||||
};
|
||||
|
||||
namespace RISCVVIntrinsicsTable {
|
||||
|
||||
struct RISCVVIntrinsicInfo {
|
||||
unsigned int IntrinsicID;
|
||||
unsigned int ExtendedOperand;
|
||||
};
|
||||
|
||||
using namespace RISCV;
|
||||
|
||||
#define GET_RISCVVIntrinsicsTable_DECL
|
||||
#include "RISCVGenSearchableTables.inc"
|
||||
|
||||
} // end namespace RISCVVIntrinsicsTable
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -98,20 +98,37 @@ void RISCVInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
|
||||
return;
|
||||
}
|
||||
|
||||
// FPR->FPR copies
|
||||
// FPR->FPR copies and VR->VR copies.
|
||||
unsigned Opc;
|
||||
bool IsScalableVector = false;
|
||||
if (RISCV::FPR16RegClass.contains(DstReg, SrcReg))
|
||||
Opc = RISCV::FSGNJ_H;
|
||||
else if (RISCV::FPR32RegClass.contains(DstReg, SrcReg))
|
||||
Opc = RISCV::FSGNJ_S;
|
||||
else if (RISCV::FPR64RegClass.contains(DstReg, SrcReg))
|
||||
Opc = RISCV::FSGNJ_D;
|
||||
else
|
||||
else if (RISCV::VRRegClass.contains(DstReg, SrcReg)) {
|
||||
Opc = RISCV::PseudoVMV1R_V;
|
||||
IsScalableVector = true;
|
||||
} else if (RISCV::VRM2RegClass.contains(DstReg, SrcReg)) {
|
||||
Opc = RISCV::PseudoVMV2R_V;
|
||||
IsScalableVector = true;
|
||||
} else if (RISCV::VRM4RegClass.contains(DstReg, SrcReg)) {
|
||||
Opc = RISCV::PseudoVMV4R_V;
|
||||
IsScalableVector = true;
|
||||
} else if (RISCV::VRM8RegClass.contains(DstReg, SrcReg)) {
|
||||
Opc = RISCV::PseudoVMV8R_V;
|
||||
IsScalableVector = true;
|
||||
} else
|
||||
llvm_unreachable("Impossible reg-to-reg copy");
|
||||
|
||||
BuildMI(MBB, MBBI, DL, get(Opc), DstReg)
|
||||
.addReg(SrcReg, getKillRegState(KillSrc))
|
||||
.addReg(SrcReg, getKillRegState(KillSrc));
|
||||
if (IsScalableVector)
|
||||
BuildMI(MBB, MBBI, DL, get(Opc), DstReg)
|
||||
.addReg(SrcReg, getKillRegState(KillSrc));
|
||||
else
|
||||
BuildMI(MBB, MBBI, DL, get(Opc), DstReg)
|
||||
.addReg(SrcReg, getKillRegState(KillSrc))
|
||||
.addReg(SrcReg, getKillRegState(KillSrc));
|
||||
}
|
||||
|
||||
void RISCVInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
|
||||
|
@ -14,6 +14,23 @@
|
||||
///
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
// X0 has special meaning for vsetvl/vsetvli.
|
||||
// rd | rs1 | AVL value | Effect on vl
|
||||
//--------------------------------------------------------------
|
||||
// !X0 | X0 | VLMAX | Set vl to VLMAX
|
||||
// X0 | X0 | Value in vl | Keep current vl, just change vtype.
|
||||
def NoX0 : SDNodeXForm<undef,
|
||||
[{
|
||||
auto *C = dyn_cast<ConstantSDNode>(N);
|
||||
if (C && C->isNullValue()) {
|
||||
SDLoc DL(N);
|
||||
return SDValue(CurDAG->getMachineNode(RISCV::ADDI, DL, Subtarget->getXLenVT(),
|
||||
CurDAG->getRegister(RISCV::X0, Subtarget->getXLenVT()),
|
||||
CurDAG->getTargetConstant(0, DL, Subtarget->getXLenVT())), 0);
|
||||
}
|
||||
return SDValue(N, 0);
|
||||
}]>;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Utilities.
|
||||
//===----------------------------------------------------------------------===//
|
||||
@ -50,6 +67,18 @@ def VLMax : OutPatFrag<(ops), (XLenVT X0)>;
|
||||
// List of EEW.
|
||||
defvar EEWList = [8, 16, 32, 64];
|
||||
|
||||
// We only model FPR32 for V instructions in RISCVInstrInfoV.td.
|
||||
// FP16/FP32/FP64 registers are alias each other. Convert FPR16 and FPR64
|
||||
// to FPR32 for V instructions is enough.
|
||||
class ToFPR32<ValueType type, DAGOperand operand, string name> {
|
||||
dag ret = !cond(!eq(!cast<string>(operand), !cast<string>(FPR64)):
|
||||
(EXTRACT_SUBREG !dag(type, [FPR64], [name]), sub_32),
|
||||
!eq(!cast<string>(operand), !cast<string>(FPR16)):
|
||||
(SUBREG_TO_REG (i16 -1), !dag(type, [FPR16], [name]), sub_16),
|
||||
!eq(1, 1):
|
||||
!dag(type, [operand], [name]));
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Vector register and vector group type information.
|
||||
//===----------------------------------------------------------------------===//
|
||||
@ -103,13 +132,20 @@ defset list<VTypeInfo> AllVectors = {
|
||||
|
||||
// This class holds the record of the RISCVVPseudoTable below.
|
||||
// This represents the information we need in codegen for each pseudo.
|
||||
// The definition should be consistent with `struct PseudoInfo` in
|
||||
// RISCVBaseInfo.h.
|
||||
class CONST8b<bits<8> val> {
|
||||
bits<8> V = val;
|
||||
}
|
||||
def InvalidIndex : CONST8b<0x80>;
|
||||
class RISCVVPseudo {
|
||||
Pseudo Pseudo = !cast<Pseudo>(NAME); // Used as a key.
|
||||
Instruction BaseInstr;
|
||||
bits<8> VLIndex;
|
||||
bits<8> SEWIndex;
|
||||
bits<8> MergeOpIndex;
|
||||
bits<8> VLIndex = InvalidIndex.V;
|
||||
bits<8> SEWIndex = InvalidIndex.V;
|
||||
bits<8> MergeOpIndex = InvalidIndex.V;
|
||||
bits<3> VLMul;
|
||||
bit HasDummyMask = 0;
|
||||
}
|
||||
|
||||
// The actual table.
|
||||
@ -117,11 +153,19 @@ def RISCVVPseudosTable : GenericTable {
|
||||
let FilterClass = "RISCVVPseudo";
|
||||
let CppTypeName = "PseudoInfo";
|
||||
let Fields = [ "Pseudo", "BaseInstr", "VLIndex", "SEWIndex", "MergeOpIndex",
|
||||
"VLMul" ];
|
||||
"VLMul", "HasDummyMask" ];
|
||||
let PrimaryKey = [ "Pseudo" ];
|
||||
let PrimaryKeyName = "getPseudoInfo";
|
||||
}
|
||||
|
||||
def RISCVVIntrinsicsTable : GenericTable {
|
||||
let FilterClass = "RISCVVIntrinsic";
|
||||
let CppTypeName = "RISCVVIntrinsicInfo";
|
||||
let Fields = ["IntrinsicID", "ExtendOperand"];
|
||||
let PrimaryKey = ["IntrinsicID"];
|
||||
let PrimaryKeyName = "getRISCVVIntrinsicInfo";
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Helpers to define the different pseudo instructions.
|
||||
//===----------------------------------------------------------------------===//
|
||||
@ -138,27 +182,53 @@ class PseudoToVInst<string PseudoInst> {
|
||||
!subst("Pseudo", "", PseudoInst)))))))));
|
||||
}
|
||||
|
||||
class VPseudoBinary<VReg RetClass,
|
||||
VReg Op1Class,
|
||||
DAGOperand Op2Class> :
|
||||
// The destination vector register group for a masked vector instruction cannot
|
||||
// overlap the source mask register (v0), unless the destination vector register
|
||||
// is being written with a mask value (e.g., comparisons) or the scalar result
|
||||
// of a reduction.
|
||||
class GetVRegNoV0<VReg VRegClass> {
|
||||
VReg R = !cond(!eq(VRegClass, VR) : VRNoV0,
|
||||
!eq(VRegClass, VRM2) : VRM2NoV0,
|
||||
!eq(VRegClass, VRM4) : VRM4NoV0,
|
||||
!eq(VRegClass, VRM8) : VRM8NoV0,
|
||||
!eq(1, 1) : VRegClass);
|
||||
}
|
||||
|
||||
class VPseudo<Instruction instr, LMULInfo m, dag outs, dag ins> :
|
||||
Pseudo<outs, ins, []>, RISCVVPseudo {
|
||||
let BaseInstr = instr;
|
||||
let VLMul = m.value;
|
||||
}
|
||||
|
||||
class VPseudoBinaryNoMask<VReg RetClass,
|
||||
VReg Op1Class,
|
||||
DAGOperand Op2Class> :
|
||||
Pseudo<(outs RetClass:$rd),
|
||||
(ins Op1Class:$rs2, Op2Class:$rs1, GPR:$vl, ixlenimm:$sew), []>,
|
||||
RISCVVPseudo {
|
||||
let mayLoad = 0;
|
||||
let mayStore = 0;
|
||||
let hasSideEffects = 0;
|
||||
let usesCustomInserter = 1;
|
||||
let Uses = [VL, VTYPE];
|
||||
let VLIndex = 3;
|
||||
let SEWIndex = 4;
|
||||
let MergeOpIndex = -1;
|
||||
let HasDummyMask = 1;
|
||||
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
|
||||
}
|
||||
|
||||
class VPseudoBinaryMask<VReg RetClass,
|
||||
VReg Op1Class,
|
||||
DAGOperand Op2Class> :
|
||||
Pseudo<(outs RetClass:$rd),
|
||||
(ins RetClass:$merge,
|
||||
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
|
||||
(ins GetVRegNoV0<RetClass>.R:$merge,
|
||||
Op1Class:$rs2, Op2Class:$rs1,
|
||||
VMaskOp:$vm, GPR:$vl, ixlenimm:$sew), []>,
|
||||
RISCVVPseudo {
|
||||
let mayLoad = 0;
|
||||
let mayStore = 0;
|
||||
let hasSideEffects = 0;
|
||||
let usesCustomInserter = 1;
|
||||
let Constraints = "$rd = $merge";
|
||||
let Uses = [VL, VTYPE];
|
||||
let VLIndex = 5;
|
||||
@ -171,75 +241,181 @@ multiclass VPseudoBinary<VReg RetClass,
|
||||
VReg Op1Class,
|
||||
DAGOperand Op2Class,
|
||||
LMULInfo MInfo> {
|
||||
def "_" # MInfo.MX : VPseudoBinary<RetClass, Op1Class, Op2Class>;
|
||||
def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMask<RetClass, Op1Class, Op2Class>;
|
||||
let VLMul = MInfo.value in {
|
||||
def "_" # MInfo.MX : VPseudoBinaryNoMask<RetClass, Op1Class, Op2Class>;
|
||||
def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMask<RetClass, Op1Class, Op2Class>;
|
||||
}
|
||||
}
|
||||
|
||||
multiclass VPseudoBinaryV_VV {
|
||||
let mayLoad = 0, mayStore = 0, hasSideEffects = 0, usesCustomInserter = 1 in
|
||||
foreach m = MxList.m in
|
||||
{
|
||||
let VLMul = m.value in
|
||||
defm _VV : VPseudoBinary<m.vrclass, m.vrclass, m.vrclass, m>;
|
||||
}
|
||||
}
|
||||
|
||||
multiclass VPseudoBinaryV_VX {
|
||||
let mayLoad = 0, mayStore = 0, hasSideEffects = 0, usesCustomInserter = 1 in
|
||||
foreach m = MxList.m in
|
||||
{
|
||||
let VLMul = m.value in
|
||||
defm _VX : VPseudoBinary<m.vrclass, m.vrclass, GPR, m>;
|
||||
}
|
||||
}
|
||||
|
||||
multiclass VPseudoBinaryV_VI<Operand ImmType = simm5> {
|
||||
let mayLoad = 0, mayStore = 0, hasSideEffects = 0, usesCustomInserter = 1 in
|
||||
foreach m = MxList.m in
|
||||
{
|
||||
let VLMul = m.value in
|
||||
defm _VI : VPseudoBinary<m.vrclass, m.vrclass, ImmType, m>;
|
||||
}
|
||||
}
|
||||
|
||||
multiclass VPseudoBinary_VV_VX_VI<Operand ImmType = simm5> {
|
||||
multiclass VPseudoBinaryV_VV_VX_VI<Operand ImmType = simm5> {
|
||||
defm "" : VPseudoBinaryV_VV;
|
||||
defm "" : VPseudoBinaryV_VX;
|
||||
defm "" : VPseudoBinaryV_VI<ImmType>;
|
||||
}
|
||||
|
||||
multiclass VPseudoBinaryV_VV_VX {
|
||||
defm "" : VPseudoBinaryV_VV;
|
||||
defm "" : VPseudoBinaryV_VX;
|
||||
}
|
||||
|
||||
multiclass VPseudoBinaryV_VX_VI<Operand ImmType = simm5> {
|
||||
defm "" : VPseudoBinaryV_VX;
|
||||
defm "" : VPseudoBinaryV_VI<ImmType>;
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Helpers to define the different patterns.
|
||||
//===----------------------------------------------------------------------===//
|
||||
class VPatBinarySDNode<SDNode vop,
|
||||
string instruction_name,
|
||||
ValueType result_type,
|
||||
ValueType op_type,
|
||||
ValueType mask_type,
|
||||
int sew,
|
||||
LMULInfo vlmul,
|
||||
VReg RetClass,
|
||||
VReg op_reg_class> :
|
||||
Pat<(result_type (vop
|
||||
(op_type op_reg_class:$rs1),
|
||||
(op_type op_reg_class:$rs2))),
|
||||
(!cast<Instruction>(instruction_name#"_VV_"# vlmul.MX)
|
||||
op_reg_class:$rs1,
|
||||
op_reg_class:$rs2,
|
||||
VLMax, sew)>;
|
||||
|
||||
multiclass pat_vop_binary<SDNode vop,
|
||||
string instruction_name,
|
||||
ValueType result_type,
|
||||
ValueType op_type,
|
||||
ValueType mask_type,
|
||||
int sew,
|
||||
LMULInfo vlmul,
|
||||
VReg RetClass,
|
||||
VReg op_reg_class,
|
||||
bit swap = 0>
|
||||
multiclass VPatBinarySDNode<SDNode vop, string instruction_name>
|
||||
{
|
||||
defvar instruction = !cast<Instruction>(instruction_name#"_VV_"# vlmul.MX);
|
||||
def : Pat<(result_type (vop
|
||||
(op_type op_reg_class:$rs1),
|
||||
(op_type op_reg_class:$rs2))),
|
||||
(instruction op_reg_class:$rs1,
|
||||
op_reg_class:$rs2,
|
||||
VLMax, sew)>;
|
||||
foreach vti = AllIntegerVectors in
|
||||
def : VPatBinarySDNode<vop, instruction_name,
|
||||
vti.Vector, vti.Vector, vti.Mask, vti.SEW,
|
||||
vti.LMul, vti.RegClass, vti.RegClass>;
|
||||
}
|
||||
|
||||
multiclass pat_vop_binary_common<SDNode vop,
|
||||
string instruction_name,
|
||||
list<VTypeInfo> vtilist>
|
||||
class VPatBinaryNoMask<string intrinsic_name,
|
||||
string inst,
|
||||
string kind,
|
||||
ValueType result_type,
|
||||
ValueType op1_type,
|
||||
ValueType op2_type,
|
||||
int sew,
|
||||
LMULInfo vlmul,
|
||||
VReg op1_reg_class,
|
||||
DAGOperand op2_kind> :
|
||||
Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
|
||||
(op1_type op1_reg_class:$rs1),
|
||||
(op2_type op2_kind:$rs2),
|
||||
(XLenVT GPR:$vl))),
|
||||
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
|
||||
(op1_type op1_reg_class:$rs1),
|
||||
ToFPR32<op2_type, op2_kind, "rs2">.ret,
|
||||
(NoX0 GPR:$vl), sew)>;
|
||||
|
||||
class VPatBinaryMask<string intrinsic_name,
|
||||
string inst,
|
||||
string kind,
|
||||
ValueType result_type,
|
||||
ValueType op1_type,
|
||||
ValueType op2_type,
|
||||
ValueType mask_type,
|
||||
int sew,
|
||||
LMULInfo vlmul,
|
||||
VReg result_reg_class,
|
||||
VReg op1_reg_class,
|
||||
DAGOperand op2_kind> :
|
||||
Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
|
||||
(result_type result_reg_class:$merge),
|
||||
(op1_type op1_reg_class:$rs1),
|
||||
(op2_type op2_kind:$rs2),
|
||||
(mask_type V0),
|
||||
(XLenVT GPR:$vl))),
|
||||
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_MASK")
|
||||
(result_type result_reg_class:$merge),
|
||||
(op1_type op1_reg_class:$rs1),
|
||||
ToFPR32<op2_type, op2_kind, "rs2">.ret,
|
||||
(mask_type V0), (NoX0 GPR:$vl), sew)>;
|
||||
|
||||
multiclass VPatBinary<string intrinsic,
|
||||
string inst,
|
||||
string kind,
|
||||
ValueType result_type,
|
||||
ValueType op1_type,
|
||||
ValueType op2_type,
|
||||
ValueType mask_type,
|
||||
int sew,
|
||||
LMULInfo vlmul,
|
||||
VReg result_reg_class,
|
||||
VReg op1_reg_class,
|
||||
DAGOperand op2_kind>
|
||||
{
|
||||
def : VPatBinaryNoMask<intrinsic, inst, kind, result_type, op1_type, op2_type,
|
||||
sew, vlmul, op1_reg_class, op2_kind>;
|
||||
def : VPatBinaryMask<intrinsic, inst, kind, result_type, op1_type, op2_type,
|
||||
mask_type, sew, vlmul, result_reg_class, op1_reg_class,
|
||||
op2_kind>;
|
||||
}
|
||||
|
||||
multiclass VPatBinaryV_VV<string intrinsic, string instruction,
|
||||
list<VTypeInfo> vtilist> {
|
||||
foreach vti = vtilist in
|
||||
defm : pat_vop_binary<vop, instruction_name,
|
||||
vti.Vector, vti.Vector, vti.Mask, vti.SEW,
|
||||
vti.LMul, vti.RegClass, vti.RegClass>;
|
||||
defm : VPatBinary<intrinsic, instruction, "VV",
|
||||
vti.Vector, vti.Vector, vti.Vector, vti.Mask,
|
||||
vti.SEW, vti.LMul, vti.RegClass,
|
||||
vti.RegClass, vti.RegClass>;
|
||||
}
|
||||
|
||||
multiclass VPatBinaryV_VX<string intrinsic, string instruction,
|
||||
list<VTypeInfo> vtilist> {
|
||||
foreach vti = vtilist in
|
||||
defm : VPatBinary<intrinsic, instruction, "VX",
|
||||
vti.Vector, vti.Vector, XLenVT, vti.Mask,
|
||||
vti.SEW, vti.LMul, vti.RegClass,
|
||||
vti.RegClass, GPR>;
|
||||
}
|
||||
|
||||
multiclass VPatBinaryV_VI<string intrinsic, string instruction,
|
||||
list<VTypeInfo> vtilist, Operand imm_type> {
|
||||
foreach vti = vtilist in
|
||||
defm : VPatBinary<intrinsic, instruction, "VI",
|
||||
vti.Vector, vti.Vector, XLenVT, vti.Mask,
|
||||
vti.SEW, vti.LMul, vti.RegClass,
|
||||
vti.RegClass, imm_type>;
|
||||
}
|
||||
|
||||
multiclass VPatBinaryV_VV_VX_VI<string intrinsic, string instruction,
|
||||
list<VTypeInfo> vtilist>
|
||||
{
|
||||
defm "" : VPatBinaryV_VV<intrinsic, instruction, vtilist>;
|
||||
defm "" : VPatBinaryV_VX<intrinsic, instruction, vtilist>;
|
||||
defm "" : VPatBinaryV_VI<intrinsic, instruction, vtilist, simm5>;
|
||||
}
|
||||
|
||||
multiclass VPatBinaryV_VV_VX<string intrinsic, string instruction,
|
||||
list<VTypeInfo> vtilist>
|
||||
{
|
||||
defm "" : VPatBinaryV_VV<intrinsic, instruction, vtilist>;
|
||||
defm "" : VPatBinaryV_VX<intrinsic, instruction, vtilist>;
|
||||
}
|
||||
|
||||
multiclass VPatBinaryV_VX_VI<string intrinsic, string instruction,
|
||||
list<VTypeInfo> vtilist>
|
||||
{
|
||||
defm "" : VPatBinaryV_VX<intrinsic, instruction, vtilist>;
|
||||
defm "" : VPatBinaryV_VI<intrinsic, instruction, vtilist, simm5>;
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
@ -248,6 +424,16 @@ multiclass pat_vop_binary_common<SDNode vop,
|
||||
|
||||
let Predicates = [HasStdExtV] in {
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Pseudo Instructions for CodeGen
|
||||
//===----------------------------------------------------------------------===//
|
||||
let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
|
||||
def PseudoVMV1R_V : VPseudo<VMV1R_V, V_M1, (outs VR:$vd), (ins VR:$vs2)>;
|
||||
def PseudoVMV2R_V : VPseudo<VMV2R_V, V_M2, (outs VRM2:$vd), (ins VRM2:$vs2)>;
|
||||
def PseudoVMV4R_V : VPseudo<VMV4R_V, V_M4, (outs VRM4:$vd), (ins VRM4:$vs2)>;
|
||||
def PseudoVMV8R_V : VPseudo<VMV8R_V, V_M8, (outs VRM8:$vd), (ins VRM8:$vs2)>;
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// 6. Configuration-Setting Instructions
|
||||
//===----------------------------------------------------------------------===//
|
||||
@ -341,6 +527,10 @@ foreach vti = AllVectors in
|
||||
vti.SEW, vti.LMul, vti.RegClass>;
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Pseudo Instructions
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// 12. Vector Integer Arithmetic Instructions
|
||||
//===----------------------------------------------------------------------===//
|
||||
@ -348,11 +538,26 @@ foreach vti = AllVectors in
|
||||
//===----------------------------------------------------------------------===//
|
||||
// 12.1. Vector Single-Width Integer Add and Subtract
|
||||
//===----------------------------------------------------------------------===//
|
||||
defm PseudoVADD : VPseudoBinaryV_VV_VX_VI;
|
||||
defm PseudoVSUB : VPseudoBinaryV_VV_VX;
|
||||
defm PseudoVRSUB : VPseudoBinaryV_VX_VI;
|
||||
|
||||
// Pseudo instructions.
|
||||
defm PseudoVADD : VPseudoBinary_VV_VX_VI;
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Patterns.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
// Whole-register vector patterns.
|
||||
defm "" : pat_vop_binary_common<add, "PseudoVADD", AllIntegerVectors>;
|
||||
defm "" : VPatBinarySDNode<add, "PseudoVADD">;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// 12. Vector Integer Arithmetic Instructions
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// 12.1. Vector Single-Width Integer Add and Subtract
|
||||
//===----------------------------------------------------------------------===//
|
||||
defm "" : VPatBinaryV_VV_VX_VI<"int_riscv_vadd", "PseudoVADD", AllIntegerVectors>;
|
||||
defm "" : VPatBinaryV_VV_VX<"int_riscv_vsub", "PseudoVSUB", AllIntegerVectors>;
|
||||
defm "" : VPatBinaryV_VX_VI<"int_riscv_vrsub", "PseudoVRSUB", AllIntegerVectors>;
|
||||
|
||||
} // Predicates = [HasStdExtV]
|
||||
|
@ -177,9 +177,9 @@ static bool lowerRISCVVMachineInstrToMCInst(const MachineInstr *MI,
|
||||
OutMI.addOperand(MCOp);
|
||||
}
|
||||
|
||||
// Unmasked pseudo instructions define MergeOpIndex to -1.
|
||||
// Append dummy mask operand to V instructions.
|
||||
if (RVV->getMergeOpIndex() == -1)
|
||||
// Unmasked pseudo instructions need to append dummy mask operand to
|
||||
// V instructions. All V instructions are modeled as the masked version.
|
||||
if (RVV->hasDummyMask())
|
||||
OutMI.addOperand(MCOperand::createReg(RISCV::NoRegister));
|
||||
|
||||
return true;
|
||||
|
@ -413,14 +413,27 @@ def VRM2 : VReg<[vint8m2_t, vint16m2_t, vint32m2_t, vint64m2_t,
|
||||
(add V26M2, V28M2, V30M2, V8M2, V10M2, V12M2, V14M2, V16M2,
|
||||
V18M2, V20M2, V22M2, V24M2, V0M2, V2M2, V4M2, V6M2), 2>;
|
||||
|
||||
def VRM2NoV0 : VReg<[vint8m2_t, vint16m2_t, vint32m2_t, vint64m2_t,
|
||||
vfloat32m2_t, vfloat64m2_t],
|
||||
(add V26M2, V28M2, V30M2, V8M2, V10M2, V12M2, V14M2, V16M2,
|
||||
V18M2, V20M2, V22M2, V24M2, V2M2, V4M2, V6M2), 2>;
|
||||
|
||||
def VRM4 : VReg<[vint8m4_t, vint16m4_t, vint32m4_t, vint64m4_t,
|
||||
vfloat32m4_t, vfloat64m4_t],
|
||||
(add V28M4, V8M4, V12M4, V16M4, V20M4, V24M4, V0M4, V4M4), 4>;
|
||||
|
||||
def VRM4NoV0 : VReg<[vint8m4_t, vint16m4_t, vint32m4_t, vint64m4_t,
|
||||
vfloat32m4_t, vfloat64m4_t],
|
||||
(add V28M4, V8M4, V12M4, V16M4, V20M4, V24M4, V4M4), 4>;
|
||||
|
||||
def VRM8 : VReg<[vint8m8_t, vint16m8_t, vint32m8_t, vint64m8_t,
|
||||
vfloat32m8_t, vfloat64m8_t],
|
||||
(add V8M8, V16M8, V24M8, V0M8), 8>;
|
||||
|
||||
def VRM8NoV0 : VReg<[vint8m8_t, vint16m8_t, vint32m8_t, vint64m8_t,
|
||||
vfloat32m8_t, vfloat64m8_t],
|
||||
(add V8M8, V16M8, V24M8), 8>;
|
||||
|
||||
defvar VMaskVTs = [vbool64_t, vbool32_t, vbool16_t, vbool8_t,
|
||||
vbool4_t, vbool2_t, vbool1_t];
|
||||
|
||||
|
@ -409,6 +409,10 @@ void printVType(unsigned VType, raw_ostream &OS);
|
||||
|
||||
namespace RISCVVPseudosTable {
|
||||
|
||||
// The definition should be consistent with `class RISCVVPseudo` in
|
||||
// RISCVInstrInfoVPseudos.td.
|
||||
static const uint8_t InvalidIndex = 0x80;
|
||||
|
||||
struct PseudoInfo {
|
||||
unsigned int Pseudo;
|
||||
unsigned int BaseInstr;
|
||||
@ -416,12 +420,15 @@ struct PseudoInfo {
|
||||
uint8_t SEWIndex;
|
||||
uint8_t MergeOpIndex;
|
||||
uint8_t VLMul;
|
||||
bool HasDummyMask;
|
||||
|
||||
int getVLIndex() const { return static_cast<int8_t>(VLIndex); }
|
||||
|
||||
int getSEWIndex() const { return static_cast<int8_t>(SEWIndex); }
|
||||
|
||||
int getMergeOpIndex() const { return static_cast<int8_t>(MergeOpIndex); }
|
||||
|
||||
bool hasDummyMask() const { return HasDummyMask; }
|
||||
};
|
||||
|
||||
using namespace RISCV;
|
||||
|
1945
test/CodeGen/RISCV/rvv/vadd-rv32.ll
Normal file
1945
test/CodeGen/RISCV/rvv/vadd-rv32.ll
Normal file
File diff suppressed because it is too large
Load Diff
2377
test/CodeGen/RISCV/rvv/vadd-rv64.ll
Normal file
2377
test/CodeGen/RISCV/rvv/vadd-rv64.ll
Normal file
File diff suppressed because it is too large
Load Diff
1225
test/CodeGen/RISCV/rvv/vrsub-rv32.ll
Normal file
1225
test/CodeGen/RISCV/rvv/vrsub-rv32.ll
Normal file
File diff suppressed because it is too large
Load Diff
1497
test/CodeGen/RISCV/rvv/vrsub-rv64.ll
Normal file
1497
test/CodeGen/RISCV/rvv/vrsub-rv64.ll
Normal file
File diff suppressed because it is too large
Load Diff
1441
test/CodeGen/RISCV/rvv/vsub-rv32.ll
Normal file
1441
test/CodeGen/RISCV/rvv/vsub-rv32.ll
Normal file
File diff suppressed because it is too large
Load Diff
1761
test/CodeGen/RISCV/rvv/vsub-rv64.ll
Normal file
1761
test/CodeGen/RISCV/rvv/vsub-rv64.ll
Normal file
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user