1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 10:42:39 +01:00

[RISCV] Implement vsoxseg/vsuxseg intrinsics.

Define vsoxseg/vsuxseg intrinsics and pseudo instructions.
Lower vsoxseg/vsuxseg intrinsics to pseudo instructions in RISCVDAGToDAGISel.

Differential Revision: https://reviews.llvm.org/D94940
This commit is contained in:
Hsiangkai Wang 2021-01-19 10:47:44 +08:00
parent 9ef6b79d87
commit 02776ce1c4
4 changed files with 182 additions and 1 deletions

View File

@ -601,6 +601,26 @@ let TargetPrefix = "riscv" in {
LLVMMatchType<1>]),
[NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
// For indexed segment store
// Input: (value, pointer, offset, vl)
class RISCVISegStore<int nf>
: Intrinsic<[],
!listconcat([llvm_anyvector_ty],
!listsplat(LLVMMatchType<0>, !add(nf, -1)),
[LLVMPointerToElt<0>, llvm_anyvector_ty,
llvm_anyint_ty]),
[NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
// For indexed segment store with mask
// Input: (value, pointer, offset, mask, vl)
class RISCVISegStoreMask<int nf>
: Intrinsic<[],
!listconcat([llvm_anyvector_ty],
!listsplat(LLVMMatchType<0>, !add(nf, -1)),
[LLVMPointerToElt<0>, llvm_anyvector_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
llvm_anyint_ty]),
[NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
multiclass RISCVUSLoad {
def "int_riscv_" # NAME : RISCVUSLoad;
def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMask;
@ -727,6 +747,10 @@ let TargetPrefix = "riscv" in {
def "int_riscv_" # NAME : RISCVSSegStore<nf>;
def "int_riscv_" # NAME # "_mask" : RISCVSSegStoreMask<nf>;
}
multiclass RISCVISegStore<int nf> {
def "int_riscv_" # NAME : RISCVISegStore<nf>;
def "int_riscv_" # NAME # "_mask" : RISCVISegStoreMask<nf>;
}
defm vle : RISCVUSLoad;
defm vleff : RISCVUSLoadFF;
@ -1029,6 +1053,8 @@ let TargetPrefix = "riscv" in {
defm vluxseg # nf : RISCVISegLoad<nf>;
defm vsseg # nf : RISCVUSSegStore<nf>;
defm vssseg # nf : RISCVSSegStore<nf>;
defm vsoxseg # nf : RISCVISegStore<nf>;
defm vsuxseg # nf : RISCVISegStore<nf>;
}
} // TargetPrefix = "riscv"

View File

@ -359,6 +359,67 @@ void RISCVDAGToDAGISel::selectVSSEGMask(SDNode *Node, unsigned IntNo,
ReplaceNode(Node, Store);
}
void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, unsigned IntNo) {
SDLoc DL(Node);
unsigned NF = Node->getNumOperands() - 5;
EVT VT = Node->getOperand(2)->getValueType(0);
unsigned ScalarSize = VT.getScalarSizeInBits();
MVT XLenVT = Subtarget->getXLenVT();
RISCVVLMUL LMUL = getLMUL(VT);
SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT);
SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
SDValue Operands[] = {
StoreVal,
Node->getOperand(2 + NF), // Base pointer.
Node->getOperand(3 + NF), // Index.
Node->getOperand(4 + NF), // VL.
SEW,
Node->getOperand(0) // Chain.
};
EVT IndexVT = Node->getOperand(3 + NF)->getValueType(0);
RISCVVLMUL IndexLMUL = getLMUL(IndexVT);
unsigned IndexScalarSize = IndexVT.getScalarSizeInBits();
const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo(
IntNo, IndexScalarSize, static_cast<unsigned>(LMUL),
static_cast<unsigned>(IndexLMUL));
SDNode *Store =
CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
ReplaceNode(Node, Store);
}
void RISCVDAGToDAGISel::selectVSXSEGMask(SDNode *Node, unsigned IntNo) {
SDLoc DL(Node);
unsigned NF = Node->getNumOperands() - 6;
EVT VT = Node->getOperand(2)->getValueType(0);
unsigned ScalarSize = VT.getScalarSizeInBits();
MVT XLenVT = Subtarget->getXLenVT();
RISCVVLMUL LMUL = getLMUL(VT);
SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT);
SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
SDValue Operands[] = {
StoreVal,
Node->getOperand(2 + NF), // Base pointer.
Node->getOperand(3 + NF), // Index.
Node->getOperand(4 + NF), // Mask.
Node->getOperand(5 + NF), // VL.
SEW,
Node->getOperand(0) // Chain.
};
EVT IndexVT = Node->getOperand(3 + NF)->getValueType(0);
RISCVVLMUL IndexLMUL = getLMUL(IndexVT);
unsigned IndexScalarSize = IndexVT.getScalarSizeInBits();
const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo(
IntNo, IndexScalarSize, static_cast<unsigned>(LMUL),
static_cast<unsigned>(IndexLMUL));
SDNode *Store =
CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
ReplaceNode(Node, Store);
}
void RISCVDAGToDAGISel::Select(SDNode *Node) {
// If we have a custom node, we have already selected.
if (Node->isMachineOpcode()) {
@ -601,6 +662,40 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
selectVSSEGMask(Node, IntNo, /*IsStrided=*/true);
return;
}
case Intrinsic::riscv_vsoxseg2:
case Intrinsic::riscv_vsoxseg3:
case Intrinsic::riscv_vsoxseg4:
case Intrinsic::riscv_vsoxseg5:
case Intrinsic::riscv_vsoxseg6:
case Intrinsic::riscv_vsoxseg7:
case Intrinsic::riscv_vsoxseg8:
case Intrinsic::riscv_vsuxseg2:
case Intrinsic::riscv_vsuxseg3:
case Intrinsic::riscv_vsuxseg4:
case Intrinsic::riscv_vsuxseg5:
case Intrinsic::riscv_vsuxseg6:
case Intrinsic::riscv_vsuxseg7:
case Intrinsic::riscv_vsuxseg8: {
selectVSXSEG(Node, IntNo);
return;
}
case Intrinsic::riscv_vsoxseg2_mask:
case Intrinsic::riscv_vsoxseg3_mask:
case Intrinsic::riscv_vsoxseg4_mask:
case Intrinsic::riscv_vsoxseg5_mask:
case Intrinsic::riscv_vsoxseg6_mask:
case Intrinsic::riscv_vsoxseg7_mask:
case Intrinsic::riscv_vsoxseg8_mask:
case Intrinsic::riscv_vsuxseg2_mask:
case Intrinsic::riscv_vsuxseg3_mask:
case Intrinsic::riscv_vsuxseg4_mask:
case Intrinsic::riscv_vsuxseg5_mask:
case Intrinsic::riscv_vsuxseg6_mask:
case Intrinsic::riscv_vsuxseg7_mask:
case Intrinsic::riscv_vsuxseg8_mask: {
selectVSXSEGMask(Node, IntNo);
return;
}
}
break;
}

View File

@ -61,6 +61,8 @@ public:
void selectVLXSEGMask(SDNode *Node, unsigned IntNo);
void selectVSSEG(SDNode *Node, unsigned IntNo, bool IsStrided);
void selectVSSEGMask(SDNode *Node, unsigned IntNo, bool IsStrided);
void selectVSXSEG(SDNode *Node, unsigned IntNo);
void selectVSXSEGMask(SDNode *Node, unsigned IntNo);
// Include the pieces autogenerated from the target description.
#include "RISCVGenDAGISel.inc"

View File

@ -461,7 +461,9 @@ class ToLowerCase<string Upper> {
!subst("VSSEG", "vsseg",
!subst("VSSSEG", "vssseg",
!subst("VLOXSEG", "vloxseg",
!subst("VLUXSEG", "vluxseg", Upper))))));
!subst("VLUXSEG", "vluxseg",
!subst("VSOXSEG", "vsoxseg",
!subst("VSUXSEG", "vsuxseg", Upper))))))));
}
// Example: PseudoVLSEG2E32_V_M2 -> int_riscv_vlseg2
@ -1172,6 +1174,39 @@ class VPseudoSSegStoreMask<VReg ValClass, bits<11> EEW>:
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoISegStoreNoMask<VReg ValClass, VReg IdxClass, bits<11> EEW, bits<3> LMUL>:
Pseudo<(outs),
(ins ValClass:$rd, GPR:$rs1, IdxClass: $index,
GPR:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
RISCVZvlsseg<PseudoToIntrinsic<NAME, false>.Intrinsic, EEW, VLMul, LMUL> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
let usesCustomInserter = 1;
let Uses = [VL, VTYPE];
let HasVLOp = 1;
let HasSEWOp = 1;
let HasDummyMask = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoISegStoreMask<VReg ValClass, VReg IdxClass, bits<11> EEW, bits<3> LMUL>:
Pseudo<(outs),
(ins ValClass:$rd, GPR:$rs1, IdxClass: $index,
VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
RISCVZvlsseg<PseudoToIntrinsic<NAME, true>.Intrinsic, EEW, VLMul, LMUL> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
let usesCustomInserter = 1;
let Uses = [VL, VTYPE];
let HasVLOp = 1;
let HasSEWOp = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
multiclass VPseudoUSLoad {
foreach lmul = MxList.m in {
defvar LInfo = lmul.MX;
@ -1790,6 +1825,27 @@ multiclass VPseudoSSegStore {
}
}
multiclass VPseudoISegStore {
foreach idx_eew = EEWList in { // EEW for index argument.
foreach idx_lmul = MxSet<idx_eew>.m in { // LMUL for index argument.
foreach val_lmul = MxList.m in { // LMUL for the value.
defvar IdxLInfo = idx_lmul.MX;
defvar IdxVreg = idx_lmul.vrclass;
defvar ValLInfo = val_lmul.MX;
let VLMul = val_lmul.value in {
foreach nf = NFSet<val_lmul>.L in {
defvar ValVreg = SegRegClass<val_lmul, nf>.RC;
def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo :
VPseudoISegStoreNoMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value>;
def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo # "_MASK" :
VPseudoISegStoreMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value>;
}
}
}
}
}
}
//===----------------------------------------------------------------------===//
// Helpers to define the intrinsic patterns.
//===----------------------------------------------------------------------===//
@ -3036,6 +3092,8 @@ defm PseudoVLOXSEG : VPseudoISegLoad;
defm PseudoVLUXSEG : VPseudoISegLoad;
defm PseudoVSSEG : VPseudoUSSegStore;
defm PseudoVSSSEG : VPseudoSSegStore;
defm PseudoVSOXSEG : VPseudoISegStore;
defm PseudoVSUXSEG : VPseudoISegStore;
//===----------------------------------------------------------------------===//
// 8. Vector AMO Operations