From 02776ce1c4cc27a56139b06519348925f3c8c635 Mon Sep 17 00:00:00 2001 From: Hsiangkai Wang Date: Tue, 19 Jan 2021 10:47:44 +0800 Subject: [PATCH] [RISCV] Implement vsoxseg/vsuxseg intrinsics. Define vsoxseg/vsuxseg intrinsics and pseudo instructions. Lower vsoxseg/vsuxseg intrinsics to pseudo instructions in RISCVDAGToDAGISel. Differential Revision: https://reviews.llvm.org/D94940 --- include/llvm/IR/IntrinsicsRISCV.td | 26 ++++++ lib/Target/RISCV/RISCVISelDAGToDAG.cpp | 95 ++++++++++++++++++++++ lib/Target/RISCV/RISCVISelDAGToDAG.h | 2 + lib/Target/RISCV/RISCVInstrInfoVPseudos.td | 60 +++++++++++++- 4 files changed, 182 insertions(+), 1 deletion(-) diff --git a/include/llvm/IR/IntrinsicsRISCV.td b/include/llvm/IR/IntrinsicsRISCV.td index a9629806e87..f9ed0455fdb 100644 --- a/include/llvm/IR/IntrinsicsRISCV.td +++ b/include/llvm/IR/IntrinsicsRISCV.td @@ -601,6 +601,26 @@ let TargetPrefix = "riscv" in { LLVMMatchType<1>]), [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic; + // For indexed segment store + // Input: (value, pointer, offset, vl) + class RISCVISegStore + : Intrinsic<[], + !listconcat([llvm_anyvector_ty], + !listsplat(LLVMMatchType<0>, !add(nf, -1)), + [LLVMPointerToElt<0>, llvm_anyvector_ty, + llvm_anyint_ty]), + [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic; + // For indexed segment store with mask + // Input: (value, pointer, offset, mask, vl) + class RISCVISegStoreMask + : Intrinsic<[], + !listconcat([llvm_anyvector_ty], + !listsplat(LLVMMatchType<0>, !add(nf, -1)), + [LLVMPointerToElt<0>, llvm_anyvector_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_anyint_ty]), + [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic; + multiclass RISCVUSLoad { def "int_riscv_" # NAME : RISCVUSLoad; def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMask; @@ -727,6 +747,10 @@ let TargetPrefix = "riscv" in { def "int_riscv_" # NAME : RISCVSSegStore; def "int_riscv_" # NAME # "_mask" : RISCVSSegStoreMask; } + multiclass RISCVISegStore { + def "int_riscv_" # NAME : RISCVISegStore; + def "int_riscv_" # NAME # "_mask" : RISCVISegStoreMask; + } defm vle : RISCVUSLoad; defm vleff : RISCVUSLoadFF; @@ -1029,6 +1053,8 @@ let TargetPrefix = "riscv" in { defm vluxseg # nf : RISCVISegLoad; defm vsseg # nf : RISCVUSSegStore; defm vssseg # nf : RISCVSSegStore; + defm vsoxseg # nf : RISCVISegStore; + defm vsuxseg # nf : RISCVISegStore; } } // TargetPrefix = "riscv" diff --git a/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/lib/Target/RISCV/RISCVISelDAGToDAG.cpp index 81972d88f63..078ed1b8d8d 100644 --- a/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -359,6 +359,67 @@ void RISCVDAGToDAGISel::selectVSSEGMask(SDNode *Node, unsigned IntNo, ReplaceNode(Node, Store); } +void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, unsigned IntNo) { + SDLoc DL(Node); + unsigned NF = Node->getNumOperands() - 5; + EVT VT = Node->getOperand(2)->getValueType(0); + unsigned ScalarSize = VT.getScalarSizeInBits(); + MVT XLenVT = Subtarget->getXLenVT(); + RISCVVLMUL LMUL = getLMUL(VT); + SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT); + SmallVector Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF); + SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL); + SDValue Operands[] = { + StoreVal, + Node->getOperand(2 + NF), // Base pointer. + Node->getOperand(3 + NF), // Index. + Node->getOperand(4 + NF), // VL. + SEW, + Node->getOperand(0) // Chain. + }; + + EVT IndexVT = Node->getOperand(3 + NF)->getValueType(0); + RISCVVLMUL IndexLMUL = getLMUL(IndexVT); + unsigned IndexScalarSize = IndexVT.getScalarSizeInBits(); + const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo( + IntNo, IndexScalarSize, static_cast(LMUL), + static_cast(IndexLMUL)); + SDNode *Store = + CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands); + ReplaceNode(Node, Store); +} + +void RISCVDAGToDAGISel::selectVSXSEGMask(SDNode *Node, unsigned IntNo) { + SDLoc DL(Node); + unsigned NF = Node->getNumOperands() - 6; + EVT VT = Node->getOperand(2)->getValueType(0); + unsigned ScalarSize = VT.getScalarSizeInBits(); + MVT XLenVT = Subtarget->getXLenVT(); + RISCVVLMUL LMUL = getLMUL(VT); + SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT); + SmallVector Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF); + SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL); + SDValue Operands[] = { + StoreVal, + Node->getOperand(2 + NF), // Base pointer. + Node->getOperand(3 + NF), // Index. + Node->getOperand(4 + NF), // Mask. + Node->getOperand(5 + NF), // VL. + SEW, + Node->getOperand(0) // Chain. + }; + + EVT IndexVT = Node->getOperand(3 + NF)->getValueType(0); + RISCVVLMUL IndexLMUL = getLMUL(IndexVT); + unsigned IndexScalarSize = IndexVT.getScalarSizeInBits(); + const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo( + IntNo, IndexScalarSize, static_cast(LMUL), + static_cast(IndexLMUL)); + SDNode *Store = + CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands); + ReplaceNode(Node, Store); +} + void RISCVDAGToDAGISel::Select(SDNode *Node) { // If we have a custom node, we have already selected. if (Node->isMachineOpcode()) { @@ -601,6 +662,40 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { selectVSSEGMask(Node, IntNo, /*IsStrided=*/true); return; } + case Intrinsic::riscv_vsoxseg2: + case Intrinsic::riscv_vsoxseg3: + case Intrinsic::riscv_vsoxseg4: + case Intrinsic::riscv_vsoxseg5: + case Intrinsic::riscv_vsoxseg6: + case Intrinsic::riscv_vsoxseg7: + case Intrinsic::riscv_vsoxseg8: + case Intrinsic::riscv_vsuxseg2: + case Intrinsic::riscv_vsuxseg3: + case Intrinsic::riscv_vsuxseg4: + case Intrinsic::riscv_vsuxseg5: + case Intrinsic::riscv_vsuxseg6: + case Intrinsic::riscv_vsuxseg7: + case Intrinsic::riscv_vsuxseg8: { + selectVSXSEG(Node, IntNo); + return; + } + case Intrinsic::riscv_vsoxseg2_mask: + case Intrinsic::riscv_vsoxseg3_mask: + case Intrinsic::riscv_vsoxseg4_mask: + case Intrinsic::riscv_vsoxseg5_mask: + case Intrinsic::riscv_vsoxseg6_mask: + case Intrinsic::riscv_vsoxseg7_mask: + case Intrinsic::riscv_vsoxseg8_mask: + case Intrinsic::riscv_vsuxseg2_mask: + case Intrinsic::riscv_vsuxseg3_mask: + case Intrinsic::riscv_vsuxseg4_mask: + case Intrinsic::riscv_vsuxseg5_mask: + case Intrinsic::riscv_vsuxseg6_mask: + case Intrinsic::riscv_vsuxseg7_mask: + case Intrinsic::riscv_vsuxseg8_mask: { + selectVSXSEGMask(Node, IntNo); + return; + } } break; } diff --git a/lib/Target/RISCV/RISCVISelDAGToDAG.h b/lib/Target/RISCV/RISCVISelDAGToDAG.h index 7de4705d482..1ab4ac57a2a 100644 --- a/lib/Target/RISCV/RISCVISelDAGToDAG.h +++ b/lib/Target/RISCV/RISCVISelDAGToDAG.h @@ -61,6 +61,8 @@ public: void selectVLXSEGMask(SDNode *Node, unsigned IntNo); void selectVSSEG(SDNode *Node, unsigned IntNo, bool IsStrided); void selectVSSEGMask(SDNode *Node, unsigned IntNo, bool IsStrided); + void selectVSXSEG(SDNode *Node, unsigned IntNo); + void selectVSXSEGMask(SDNode *Node, unsigned IntNo); // Include the pieces autogenerated from the target description. #include "RISCVGenDAGISel.inc" diff --git a/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/lib/Target/RISCV/RISCVInstrInfoVPseudos.td index 9f244637566..68bfd731e11 100644 --- a/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -461,7 +461,9 @@ class ToLowerCase { !subst("VSSEG", "vsseg", !subst("VSSSEG", "vssseg", !subst("VLOXSEG", "vloxseg", - !subst("VLUXSEG", "vluxseg", Upper)))))); + !subst("VLUXSEG", "vluxseg", + !subst("VSOXSEG", "vsoxseg", + !subst("VSUXSEG", "vsuxseg", Upper)))))))); } // Example: PseudoVLSEG2E32_V_M2 -> int_riscv_vlseg2 @@ -1172,6 +1174,39 @@ class VPseudoSSegStoreMask EEW>: let BaseInstr = !cast(PseudoToVInst.VInst); } +class VPseudoISegStoreNoMask EEW, bits<3> LMUL>: + Pseudo<(outs), + (ins ValClass:$rd, GPR:$rs1, IdxClass: $index, + GPR:$vl, ixlenimm:$sew),[]>, + RISCVVPseudo, + RISCVZvlsseg.Intrinsic, EEW, VLMul, LMUL> { + let mayLoad = 0; + let mayStore = 1; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Uses = [VL, VTYPE]; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasDummyMask = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + +class VPseudoISegStoreMask EEW, bits<3> LMUL>: + Pseudo<(outs), + (ins ValClass:$rd, GPR:$rs1, IdxClass: $index, + VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>, + RISCVVPseudo, + RISCVZvlsseg.Intrinsic, EEW, VLMul, LMUL> { + let mayLoad = 0; + let mayStore = 1; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Uses = [VL, VTYPE]; + let HasVLOp = 1; + let HasSEWOp = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + multiclass VPseudoUSLoad { foreach lmul = MxList.m in { defvar LInfo = lmul.MX; @@ -1790,6 +1825,27 @@ multiclass VPseudoSSegStore { } } +multiclass VPseudoISegStore { + foreach idx_eew = EEWList in { // EEW for index argument. + foreach idx_lmul = MxSet.m in { // LMUL for index argument. + foreach val_lmul = MxList.m in { // LMUL for the value. + defvar IdxLInfo = idx_lmul.MX; + defvar IdxVreg = idx_lmul.vrclass; + defvar ValLInfo = val_lmul.MX; + let VLMul = val_lmul.value in { + foreach nf = NFSet.L in { + defvar ValVreg = SegRegClass.RC; + def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo : + VPseudoISegStoreNoMask; + def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo # "_MASK" : + VPseudoISegStoreMask; + } + } + } + } + } +} + //===----------------------------------------------------------------------===// // Helpers to define the intrinsic patterns. //===----------------------------------------------------------------------===// @@ -3036,6 +3092,8 @@ defm PseudoVLOXSEG : VPseudoISegLoad; defm PseudoVLUXSEG : VPseudoISegLoad; defm PseudoVSSEG : VPseudoUSSegStore; defm PseudoVSSSEG : VPseudoSSegStore; +defm PseudoVSOXSEG : VPseudoISegStore; +defm PseudoVSUXSEG : VPseudoISegStore; //===----------------------------------------------------------------------===// // 8. Vector AMO Operations