1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-19 11:02:59 +02:00

[RISCV] Define vmclr.m/vmset.m intrinsics.

Define vmclr.m/vmset.m intrinsics and lower to vmxor.mm/vmxnor.mm.

Ideally all rvv pseudo instructions could be implemented in C header,
but those two instructions don't take an input, codegen can not guarantee
that the source register becomes the same as the destination.

We expand pseduo-v-inst into corresponding v-inst in
RISCVExpandPseudoInsts pass.

Reviewed By: craig.topper, frasercrmck

Differential Revision: https://reviews.llvm.org/D93849
This commit is contained in:
Zakk Chen 2020-12-27 20:00:33 -08:00
parent 7a9a2b83c6
commit 55452d0592
7 changed files with 481 additions and 3 deletions

View File

@ -404,6 +404,12 @@ let TargetPrefix = "riscv" in {
[LLVMMatchType<0>, LLVMMatchType<0>,
LLVMMatchType<0>, llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic;
// Output: (vector)
// Input: (vl)
class RISCVNullaryIntrinsic
: Intrinsic<[llvm_anyvector_ty],
[llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic;
multiclass RISCVUSLoad {
def "int_riscv_" # NAME : RISCVUSLoad;
@ -701,6 +707,8 @@ let TargetPrefix = "riscv" in {
def int_riscv_vmnor: RISCVBinaryAAANoMask;
def int_riscv_vmornot: RISCVBinaryAAANoMask;
def int_riscv_vmxnor: RISCVBinaryAAANoMask;
def int_riscv_vmclr : RISCVNullaryIntrinsic;
def int_riscv_vmset : RISCVNullaryIntrinsic;
defm vpopc : RISCVMaskUnarySOut;
defm vfirst : RISCVMaskUnarySOut;
@ -724,9 +732,8 @@ let TargetPrefix = "riscv" in {
[IntrNoMem]>, RISCVVIntrinsic;
// Output: (vector)
// Input: (vl)
def int_riscv_vid : Intrinsic<[llvm_anyvector_ty],
[llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic;
def int_riscv_vid : RISCVNullaryIntrinsic;
// Output: (vector)
// Input: (maskedoff, mask, vl)
def int_riscv_vid_mask : Intrinsic<[llvm_anyvector_ty],

View File

@ -60,6 +60,8 @@ private:
MachineBasicBlock::iterator MBBI,
MachineBasicBlock::iterator &NextMBBI);
bool expandVSetVL(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI);
bool expandVMSET_VMCLR(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI, unsigned Opcode);
};
char RISCVExpandPseudo::ID = 0;
@ -102,6 +104,24 @@ bool RISCVExpandPseudo::expandMI(MachineBasicBlock &MBB,
return expandLoadTLSGDAddress(MBB, MBBI, NextMBBI);
case RISCV::PseudoVSETVLI:
return expandVSetVL(MBB, MBBI);
case RISCV::PseudoVMCLR_M_B1:
case RISCV::PseudoVMCLR_M_B2:
case RISCV::PseudoVMCLR_M_B4:
case RISCV::PseudoVMCLR_M_B8:
case RISCV::PseudoVMCLR_M_B16:
case RISCV::PseudoVMCLR_M_B32:
case RISCV::PseudoVMCLR_M_B64:
// vmclr.m vd => vmxor.mm vd, vd, vd
return expandVMSET_VMCLR(MBB, MBBI, RISCV::VMXOR_MM);
case RISCV::PseudoVMSET_M_B1:
case RISCV::PseudoVMSET_M_B2:
case RISCV::PseudoVMSET_M_B4:
case RISCV::PseudoVMSET_M_B8:
case RISCV::PseudoVMSET_M_B16:
case RISCV::PseudoVMSET_M_B32:
case RISCV::PseudoVMSET_M_B64:
// vmset.m vd => vmxnor.mm vd, vd, vd
return expandVMSET_VMCLR(MBB, MBBI, RISCV::VMXNOR_MM);
}
return false;
@ -213,6 +233,19 @@ bool RISCVExpandPseudo::expandVSetVL(MachineBasicBlock &MBB,
return true;
}
bool RISCVExpandPseudo::expandVMSET_VMCLR(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned Opcode) {
DebugLoc DL = MBBI->getDebugLoc();
Register DstReg = MBBI->getOperand(0).getReg();
const MCInstrDesc &Desc = TII->get(Opcode);
BuildMI(MBB, MBBI, DL, Desc, DstReg)
.addReg(DstReg, RegState::Undef)
.addReg(DstReg, RegState::Undef);
MBBI->eraseFromParent(); // The pseudo instruction is gone now.
return true;
}
} // end of anonymous namespace
INITIALIZE_PASS(RISCVExpandPseudo, "riscv-expand-pseudo",

View File

@ -538,6 +538,23 @@ class VPseudoNullaryMask<VReg RegClass>:
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
// Nullary for pseudo instructions. They are expanded in
// RISCVExpandPseudoInsts pass.
class VPseudoNullaryPseudoM<string BaseInst>
: Pseudo<(outs VR:$rd), (ins GPR:$vl, ixlenimm:$sew), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
let usesCustomInserter = 1;
let Uses = [VL, VTYPE];
let VLIndex = 1;
let SEWIndex = 2;
// BaseInstr is not used in RISCVExpandPseudoInsts pass.
// Just fill a corresponding real v-inst to pass tablegen check.
let BaseInstr = !cast<Instruction>(BaseInst);
}
// RetClass could be GPR or VReg.
class VPseudoUnaryNoMask<DAGOperand RetClass, VReg OpClass, string Constraint = ""> :
Pseudo<(outs RetClass:$rd),
@ -821,6 +838,14 @@ multiclass VPseudoMaskNullaryV {
}
}
multiclass VPseudoNullaryPseudoM <string BaseInst> {
foreach mti = AllMasks in {
let VLMul = mti.LMul.value in {
def "_M_" # mti.BX : VPseudoNullaryPseudoM<BaseInst # "_MM">;
}
}
}
multiclass VPseudoUnaryV_M {
defvar constraint = "@earlyclobber $rd";
foreach m = MxList.m in {
@ -1464,6 +1489,15 @@ multiclass VPatNullaryV<string intrinsic, string instruction>
}
}
multiclass VPatNullaryM<string intrinsic, string inst> {
foreach mti = AllMasks in
def : Pat<(mti.Mask (!cast<Intrinsic>(intrinsic)
(XLenVT GPR:$vl))),
(!cast<Instruction>(inst#"_M_"#mti.BX)
(NoX0 GPR:$vl), mti.SEW)>;
}
multiclass VPatBinary<string intrinsic,
string inst,
string kind,
@ -2384,6 +2418,10 @@ defm PseudoVMNOR: VPseudoBinaryM_MM;
defm PseudoVMORNOT: VPseudoBinaryM_MM;
defm PseudoVMXNOR: VPseudoBinaryM_MM;
// Pseudo insturctions
defm PseudoVMCLR : VPseudoNullaryPseudoM<"VMXOR">;
defm PseudoVMSET : VPseudoNullaryPseudoM<"VMXNOR">;
//===----------------------------------------------------------------------===//
// 16.2. Vector mask population count vpopc
//===----------------------------------------------------------------------===//
@ -2913,6 +2951,10 @@ defm "" : VPatBinaryM_MM<"int_riscv_vmnor", "PseudoVMNOR">;
defm "" : VPatBinaryM_MM<"int_riscv_vmornot", "PseudoVMORNOT">;
defm "" : VPatBinaryM_MM<"int_riscv_vmxnor", "PseudoVMXNOR">;
// pseudo instructions
defm "" : VPatNullaryM<"int_riscv_vmclr", "PseudoVMCLR">;
defm "" : VPatNullaryM<"int_riscv_vmset", "PseudoVMSET">;
//===----------------------------------------------------------------------===//
// 16.2. Vector mask population count vpopc
//===----------------------------------------------------------------------===//

View File

@ -0,0 +1,99 @@
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zfh -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i1> @llvm.riscv.vmclr.nxv1i1(
i32);
define <vscale x 1 x i1> @intrinsic_vmclr_m_pseudo_nxv1i1(i32 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv1i1
; CHECK: vsetvli {{.*}}, a0, e8,mf8
; CHECK: vmclr.m {{v[0-9]+}}
%a = call <vscale x 1 x i1> @llvm.riscv.vmclr.nxv1i1(
i32 %0)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmclr.nxv2i1(
i32);
define <vscale x 2 x i1> @intrinsic_vmclr_m_pseudo_nxv2i1(i32 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv2i1
; CHECK: vsetvli {{.*}}, a0, e8,mf4
; CHECK: vmclr.m {{v[0-9]+}}
%a = call <vscale x 2 x i1> @llvm.riscv.vmclr.nxv2i1(
i32 %0)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmclr.nxv4i1(
i32);
define <vscale x 4 x i1> @intrinsic_vmclr_m_pseudo_nxv4i1(i32 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv4i1
; CHECK: vsetvli {{.*}}, a0, e8,mf2
; CHECK: vmclr.m {{v[0-9]+}}
%a = call <vscale x 4 x i1> @llvm.riscv.vmclr.nxv4i1(
i32 %0)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmclr.nxv8i1(
i32);
define <vscale x 8 x i1> @intrinsic_vmclr_m_pseudo_nxv8i1(i32 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv8i1
; CHECK: vsetvli {{.*}}, a0, e8,m1
; CHECK: vmclr.m {{v[0-9]+}}
%a = call <vscale x 8 x i1> @llvm.riscv.vmclr.nxv8i1(
i32 %0)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmclr.nxv16i1(
i32);
define <vscale x 16 x i1> @intrinsic_vmclr_m_pseudo_nxv16i1(i32 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv16i1
; CHECK: vsetvli {{.*}}, a0, e8,m2
; CHECK: vmclr.m {{v[0-9]+}}
%a = call <vscale x 16 x i1> @llvm.riscv.vmclr.nxv16i1(
i32 %0)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmclr.nxv32i1(
i32);
define <vscale x 32 x i1> @intrinsic_vmclr_m_pseudo_nxv32i1(i32 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv32i1
; CHECK: vsetvli {{.*}}, a0, e8,m4
; CHECK: vmclr.m {{v[0-9]+}}
%a = call <vscale x 32 x i1> @llvm.riscv.vmclr.nxv32i1(
i32 %0)
ret <vscale x 32 x i1> %a
}
declare <vscale x 64 x i1> @llvm.riscv.vmclr.nxv64i1(
i32);
define <vscale x 64 x i1> @intrinsic_vmclr_m_pseudo_nxv64i1(i32 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv64i1
; CHECK: vsetvli {{.*}}, a0, e8,m8
; CHECK: vmclr.m {{v[0-9]+}}
%a = call <vscale x 64 x i1> @llvm.riscv.vmclr.nxv64i1(
i32 %0)
ret <vscale x 64 x i1> %a
}

View File

@ -0,0 +1,99 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zfh -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i1> @llvm.riscv.vmclr.nxv1i1(
i64);
define <vscale x 1 x i1> @intrinsic_vmclr_m_pseudo_nxv1i1(i64 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv1i1
; CHECK: vsetvli {{.*}}, a0, e8,mf8
; CHECK: vmclr.m {{v[0-9]+}}
%a = call <vscale x 1 x i1> @llvm.riscv.vmclr.nxv1i1(
i64 %0)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmclr.nxv2i1(
i64);
define <vscale x 2 x i1> @intrinsic_vmclr_m_pseudo_nxv2i1(i64 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv2i1
; CHECK: vsetvli {{.*}}, a0, e8,mf4
; CHECK: vmclr.m {{v[0-9]+}}
%a = call <vscale x 2 x i1> @llvm.riscv.vmclr.nxv2i1(
i64 %0)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmclr.nxv4i1(
i64);
define <vscale x 4 x i1> @intrinsic_vmclr_m_pseudo_nxv4i1(i64 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv4i1
; CHECK: vsetvli {{.*}}, a0, e8,mf2
; CHECK: vmclr.m {{v[0-9]+}}
%a = call <vscale x 4 x i1> @llvm.riscv.vmclr.nxv4i1(
i64 %0)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmclr.nxv8i1(
i64);
define <vscale x 8 x i1> @intrinsic_vmclr_m_pseudo_nxv8i1(i64 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv8i1
; CHECK: vsetvli {{.*}}, a0, e8,m1
; CHECK: vmclr.m {{v[0-9]+}}
%a = call <vscale x 8 x i1> @llvm.riscv.vmclr.nxv8i1(
i64 %0)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmclr.nxv16i1(
i64);
define <vscale x 16 x i1> @intrinsic_vmclr_m_pseudo_nxv16i1(i64 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv16i1
; CHECK: vsetvli {{.*}}, a0, e8,m2
; CHECK: vmclr.m {{v[0-9]+}}
%a = call <vscale x 16 x i1> @llvm.riscv.vmclr.nxv16i1(
i64 %0)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmclr.nxv32i1(
i64);
define <vscale x 32 x i1> @intrinsic_vmclr_m_pseudo_nxv32i1(i64 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv32i1
; CHECK: vsetvli {{.*}}, a0, e8,m4
; CHECK: vmclr.m {{v[0-9]+}}
%a = call <vscale x 32 x i1> @llvm.riscv.vmclr.nxv32i1(
i64 %0)
ret <vscale x 32 x i1> %a
}
declare <vscale x 64 x i1> @llvm.riscv.vmclr.nxv64i1(
i64);
define <vscale x 64 x i1> @intrinsic_vmclr_m_pseudo_nxv64i1(i64 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv64i1
; CHECK: vsetvli {{.*}}, a0, e8,m8
; CHECK: vmclr.m {{v[0-9]+}}
%a = call <vscale x 64 x i1> @llvm.riscv.vmclr.nxv64i1(
i64 %0)
ret <vscale x 64 x i1> %a
}

View File

@ -0,0 +1,99 @@
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zfh -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i1> @llvm.riscv.vmset.nxv1i1(
i32);
define <vscale x 1 x i1> @intrinsic_vmset_m_pseudo_nxv1i1(i32 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv1i1
; CHECK: vsetvli {{.*}}, a0, e8,mf8
; CHECK: vmset.m {{v[0-9]+}}
%a = call <vscale x 1 x i1> @llvm.riscv.vmset.nxv1i1(
i32 %0)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmset.nxv2i1(
i32);
define <vscale x 2 x i1> @intrinsic_vmset_m_pseudo_nxv2i1(i32 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv2i1
; CHECK: vsetvli {{.*}}, a0, e8,mf4
; CHECK: vmset.m {{v[0-9]+}}
%a = call <vscale x 2 x i1> @llvm.riscv.vmset.nxv2i1(
i32 %0)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmset.nxv4i1(
i32);
define <vscale x 4 x i1> @intrinsic_vmset_m_pseudo_nxv4i1(i32 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv4i1
; CHECK: vsetvli {{.*}}, a0, e8,mf2
; CHECK: vmset.m {{v[0-9]+}}
%a = call <vscale x 4 x i1> @llvm.riscv.vmset.nxv4i1(
i32 %0)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmset.nxv8i1(
i32);
define <vscale x 8 x i1> @intrinsic_vmset_m_pseudo_nxv8i1(i32 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv8i1
; CHECK: vsetvli {{.*}}, a0, e8,m1
; CHECK: vmset.m {{v[0-9]+}}
%a = call <vscale x 8 x i1> @llvm.riscv.vmset.nxv8i1(
i32 %0)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmset.nxv16i1(
i32);
define <vscale x 16 x i1> @intrinsic_vmset_m_pseudo_nxv16i1(i32 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv16i1
; CHECK: vsetvli {{.*}}, a0, e8,m2
; CHECK: vmset.m {{v[0-9]+}}
%a = call <vscale x 16 x i1> @llvm.riscv.vmset.nxv16i1(
i32 %0)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmset.nxv32i1(
i32);
define <vscale x 32 x i1> @intrinsic_vmset_m_pseudo_nxv32i1(i32 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv32i1
; CHECK: vsetvli {{.*}}, a0, e8,m4
; CHECK: vmset.m {{v[0-9]+}}
%a = call <vscale x 32 x i1> @llvm.riscv.vmset.nxv32i1(
i32 %0)
ret <vscale x 32 x i1> %a
}
declare <vscale x 64 x i1> @llvm.riscv.vmset.nxv64i1(
i32);
define <vscale x 64 x i1> @intrinsic_vmset_m_pseudo_nxv64i1(i32 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv64i1
; CHECK: vsetvli {{.*}}, a0, e8,m8
; CHECK: vmset.m {{v[0-9]+}}
%a = call <vscale x 64 x i1> @llvm.riscv.vmset.nxv64i1(
i32 %0)
ret <vscale x 64 x i1> %a
}

View File

@ -0,0 +1,99 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zfh -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i1> @llvm.riscv.vmset.nxv1i1(
i64);
define <vscale x 1 x i1> @intrinsic_vmset_m_pseudo_nxv1i1(i64 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv1i1
; CHECK: vsetvli {{.*}}, a0, e8,mf8
; CHECK: vmset.m {{v[0-9]+}}
%a = call <vscale x 1 x i1> @llvm.riscv.vmset.nxv1i1(
i64 %0)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmset.nxv2i1(
i64);
define <vscale x 2 x i1> @intrinsic_vmset_m_pseudo_nxv2i1(i64 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv2i1
; CHECK: vsetvli {{.*}}, a0, e8,mf4
; CHECK: vmset.m {{v[0-9]+}}
%a = call <vscale x 2 x i1> @llvm.riscv.vmset.nxv2i1(
i64 %0)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmset.nxv4i1(
i64);
define <vscale x 4 x i1> @intrinsic_vmset_m_pseudo_nxv4i1(i64 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv4i1
; CHECK: vsetvli {{.*}}, a0, e8,mf2
; CHECK: vmset.m {{v[0-9]+}}
%a = call <vscale x 4 x i1> @llvm.riscv.vmset.nxv4i1(
i64 %0)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmset.nxv8i1(
i64);
define <vscale x 8 x i1> @intrinsic_vmset_m_pseudo_nxv8i1(i64 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv8i1
; CHECK: vsetvli {{.*}}, a0, e8,m1
; CHECK: vmset.m {{v[0-9]+}}
%a = call <vscale x 8 x i1> @llvm.riscv.vmset.nxv8i1(
i64 %0)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmset.nxv16i1(
i64);
define <vscale x 16 x i1> @intrinsic_vmset_m_pseudo_nxv16i1(i64 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv16i1
; CHECK: vsetvli {{.*}}, a0, e8,m2
; CHECK: vmset.m {{v[0-9]+}}
%a = call <vscale x 16 x i1> @llvm.riscv.vmset.nxv16i1(
i64 %0)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmset.nxv32i1(
i64);
define <vscale x 32 x i1> @intrinsic_vmset_m_pseudo_nxv32i1(i64 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv32i1
; CHECK: vsetvli {{.*}}, a0, e8,m4
; CHECK: vmset.m {{v[0-9]+}}
%a = call <vscale x 32 x i1> @llvm.riscv.vmset.nxv32i1(
i64 %0)
ret <vscale x 32 x i1> %a
}
declare <vscale x 64 x i1> @llvm.riscv.vmset.nxv64i1(
i64);
define <vscale x 64 x i1> @intrinsic_vmset_m_pseudo_nxv64i1(i64 %0) nounwind {
entry:
; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv64i1
; CHECK: vsetvli {{.*}}, a0, e8,m8
; CHECK: vmset.m {{v[0-9]+}}
%a = call <vscale x 64 x i1> @llvm.riscv.vmset.nxv64i1(
i64 %0)
ret <vscale x 64 x i1> %a
}