1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 10:42:39 +01:00

[RISCV] Define vsadd/vsaddu/vssub/vssubu intrinsics.

We work with @rogfer01 from BSC to come out this patch.

Authored-by: Roger Ferrer Ibanez <rofirrim@gmail.com>
Co-Authored-by: ShihPo Hung <shihpo.hung@sifive.com>
Co-Authored-by: Monk Chiang <monk.chiang@sifive.com>

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D93366
This commit is contained in:
Monk Chiang 2020-12-17 13:45:52 +08:00 committed by Hsiangkai Wang
parent 787313e887
commit f958745fcd
14 changed files with 15136 additions and 1 deletions

View File

@ -209,6 +209,26 @@ let TargetPrefix = "riscv" in {
let ExtendOperand = 2;
}
// For Saturating binary operations.
// The destination vector type is the same as first source vector.
// Input: (vector_in, vector_in/scalar_in, vl)
class RISCVSaturatingBinaryAAXNoMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
[IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
let ExtendOperand = 2;
}
// For Saturating binary operations with mask.
// The destination vector type is the same as first source vector.
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
class RISCVSaturatingBinaryAAXMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
[IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
let ExtendOperand = 3;
}
multiclass RISCVUSLoad {
def "int_riscv_" # NAME : RISCVUSLoad;
def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMask;
@ -243,6 +263,10 @@ let TargetPrefix = "riscv" in {
multiclass RISCVBinaryMaskOut {
def "int_riscv_" # NAME : RISCVBinaryMOut;
}
multiclass RISCVSaturatingBinaryAAX {
def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXNoMask;
def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMask;
}
defm vle : RISCVUSLoad;
defm vse : RISCVUSStore;
@ -299,4 +323,9 @@ let TargetPrefix = "riscv" in {
defm vfadd : RISCVBinaryAAX;
defm vfsub : RISCVBinaryAAX;
defm vfrsub : RISCVBinaryAAX;
defm vsaddu : RISCVSaturatingBinaryAAX;
defm vsadd : RISCVSaturatingBinaryAAX;
defm vssubu : RISCVSaturatingBinaryAAX;
defm vssub : RISCVSaturatingBinaryAAX;
} // TargetPrefix = "riscv"

View File

@ -350,8 +350,13 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
// RVV intrinsics may have illegal operands.
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
if (Subtarget.is64Bit())
setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
if (Subtarget.is64Bit()) {
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
}
}
// Function alignments.
@ -599,6 +604,8 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
}
case ISD::INTRINSIC_WO_CHAIN:
return LowerINTRINSIC_WO_CHAIN(Op, DAG);
case ISD::INTRINSIC_W_CHAIN:
return LowerINTRINSIC_W_CHAIN(Op, DAG);
case ISD::BSWAP:
case ISD::BITREVERSE: {
// Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
@ -1054,6 +1061,36 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
}
}
SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
SelectionDAG &DAG) const {
unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
SDLoc DL(Op);
if (Subtarget.hasStdExtV()) {
// Some RVV intrinsics may claim that they want an integer operand to be
// extended.
if (const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo)) {
if (II->ExtendedOperand) {
// The operands start from the second argument in INTRINSIC_W_CHAIN.
unsigned ExtendOp = II->ExtendedOperand + 1;
assert(ExtendOp < Op.getNumOperands());
SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
SDValue &ScalarOp = Operands[ExtendOp];
if (ScalarOp.getValueType() == MVT::i32 ||
ScalarOp.getValueType() == MVT::i16 ||
ScalarOp.getValueType() == MVT::i8) {
ScalarOp =
DAG.getNode(ISD::ANY_EXTEND, DL, Subtarget.getXLenVT(), ScalarOp);
return DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, Op->getVTList(), Operands);
}
}
}
}
return SDValue();
}
// Returns the opcode of the target-specific SDNode that implements the 32-bit
// form of the given Opcode.
static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {

View File

@ -260,6 +260,7 @@ private:
SDValue lowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, bool IsSRA) const;
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const;
bool isEligibleForTailCallOptimization(
CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,

View File

@ -1295,6 +1295,16 @@ defm PseudoVWMUL : VPseudoBinaryW_VV_VX;
defm PseudoVWMULU : VPseudoBinaryW_VV_VX;
defm PseudoVWMULSU : VPseudoBinaryW_VV_VX;
//===----------------------------------------------------------------------===//
// 13.1. Vector Single-Width Saturating Add and Subtract
//===----------------------------------------------------------------------===//
let Defs = [VXSAT], hasSideEffects = 1 in {
defm PseudoVSADDU : VPseudoBinaryV_VV_VX_VI;
defm PseudoVSADD : VPseudoBinaryV_VV_VX_VI;
defm PseudoVSSUBU : VPseudoBinaryV_VV_VX;
defm PseudoVSSUB : VPseudoBinaryV_VV_VX;
}
} // Predicates = [HasStdExtV]
let Predicates = [HasStdExtV, HasStdExtF] in {
@ -1434,6 +1444,14 @@ defm "" : VPatBinaryW_VV_VX<"int_riscv_vwmul", "PseudoVWMUL">;
defm "" : VPatBinaryW_VV_VX<"int_riscv_vwmulu", "PseudoVWMULU">;
defm "" : VPatBinaryW_VV_VX<"int_riscv_vwmulsu", "PseudoVWMULSU">;
//===----------------------------------------------------------------------===//
// 13.1. Vector Single-Width Saturating Add and Subtract
//===----------------------------------------------------------------------===//
defm "" : VPatBinaryV_VV_VX_VI<"int_riscv_vsaddu", "PseudoVSADDU", AllIntegerVectors>;
defm "" : VPatBinaryV_VV_VX_VI<"int_riscv_vsadd", "PseudoVSADD", AllIntegerVectors>;
defm "" : VPatBinaryV_VV_VX<"int_riscv_vssubu", "PseudoVSSUBU", AllIntegerVectors>;
defm "" : VPatBinaryV_VV_VX<"int_riscv_vssub", "PseudoVSSUB", AllIntegerVectors>;
} // Predicates = [HasStdExtV]
let Predicates = [HasStdExtV, HasStdExtF] in {

View File

@ -98,6 +98,7 @@ BitVector RISCVRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
// V registers for code generation. We handle them manually.
markSuperRegs(Reserved, RISCV::VL);
markSuperRegs(Reserved, RISCV::VTYPE);
markSuperRegs(Reserved, RISCV::VXSAT);
assert(checkAllSuperRegsMarked(Reserved));
return Reserved;

View File

@ -379,6 +379,7 @@ let RegAltNameIndices = [ABIRegAltName] in {
def VTYPE : RISCVReg<0, "vtype", ["vtype"]>;
def VL : RISCVReg<0, "vl", ["vl"]>;
def VXSAT : RISCVReg<0, "vxsat", ["vxsat"]>;
}
class RegisterTypes<list<ValueType> reg_types> {

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff