mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-26 04:32:44 +01:00
[RISCV] Split vrgather intrinsics into separate vrgather.vv and vrgather.vx intrinsics.
The vrgather.vv instruction uses a vector of indices with the same SEW as operand 0. The vrgather.vx instructions use a scalar index operand of XLen bits. By splitting this into 2 intrinsics we are able to use LLVMatchType in the definition to avoid specifying the type for the index operand when creating the IR for the intrinsic. For .vv it will match the operand 0 type. And for .vx it will match the type of the vl operand we already needed to specify a type for. I'm considering splitting more intrinsics. This was a somewhat odd one because the .vx doesn't use the element type, it always use XLen. Reviewed By: HsiangKai Differential Revision: https://reviews.llvm.org/D95979
This commit is contained in:
parent
7065f0a696
commit
29b2411301
@ -231,12 +231,35 @@ let TargetPrefix = "riscv" in {
|
||||
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic;
|
||||
// For destination vector type is the same as first and second source vector.
|
||||
// Input: (vector_in, vector_in, vl)
|
||||
class RISCVBinaryAAAMask
|
||||
// Input: (vector_in, int_vector_in, vl)
|
||||
class RISCVRGatherVVNoMask
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[LLVMMatchType<0>, LLVMMatchType<0>,
|
||||
[LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>, llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic;
|
||||
// For destination vector type is the same as first and second source vector.
|
||||
// Input: (vector_in, vector_in, int_vector_in, vl)
|
||||
class RISCVRGatherVVMask
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[LLVMMatchType<0>, LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>,
|
||||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic;
|
||||
// For destination vector type is the same as first source vector, and the
|
||||
// second operand is XLen.
|
||||
// Input: (vector_in, xlen_in, vl)
|
||||
class RISCVGatherVXNoMask
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[LLVMMatchType<0>, llvm_anyint_ty, LLVMMatchType<1>],
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
}
|
||||
// For destination vector type is the same as first source vector (with mask).
|
||||
// Second operand is XLen.
|
||||
// Input: (maskedoff, vector_in, xlen_in, mask, vl)
|
||||
class RISCVGatherVXMask
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
|
||||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
}
|
||||
// For destination vector type is the same as first source vector.
|
||||
// Input: (vector_in, vector_in/scalar_in, vl)
|
||||
class RISCVBinaryAAXNoMask
|
||||
@ -688,6 +711,14 @@ let TargetPrefix = "riscv" in {
|
||||
def "int_riscv_" # NAME : RISCVBinaryAAXNoMask;
|
||||
def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMask;
|
||||
}
|
||||
multiclass RISCVRGatherVV {
|
||||
def "int_riscv_" # NAME : RISCVRGatherVVNoMask;
|
||||
def "int_riscv_" # NAME # "_mask" : RISCVRGatherVVMask;
|
||||
}
|
||||
multiclass RISCVRGatherVX {
|
||||
def "int_riscv_" # NAME : RISCVGatherVXNoMask;
|
||||
def "int_riscv_" # NAME # "_mask" : RISCVGatherVXMask;
|
||||
}
|
||||
// ABX means the destination type(A) is different from the first source
|
||||
// type(B). X means any type for the second source operand.
|
||||
multiclass RISCVBinaryABX {
|
||||
@ -965,10 +996,11 @@ let TargetPrefix = "riscv" in {
|
||||
defm vfslide1up : RISCVBinaryAAX;
|
||||
defm vfslide1down : RISCVBinaryAAX;
|
||||
|
||||
defm vrgather : RISCVBinaryAAX;
|
||||
defm vrgather_vv : RISCVRGatherVV;
|
||||
defm vrgather_vx : RISCVRGatherVX;
|
||||
defm vrgatherei16 : RISCVBinaryAAX;
|
||||
|
||||
def "int_riscv_vcompress" : RISCVBinaryAAAMask;
|
||||
def "int_riscv_vcompress" : RISCVUnaryAAMask;
|
||||
|
||||
defm vaaddu : RISCVSaturatingBinaryAAX;
|
||||
defm vaadd : RISCVSaturatingBinaryAAX;
|
||||
|
@ -2933,9 +2933,9 @@ multiclass VPatBinaryM_VX_VI<string intrinsic, string instruction,
|
||||
multiclass VPatBinaryV_VV_VX_VI_INT<string intrinsic, string instruction,
|
||||
list<VTypeInfo> vtilist, Operand ImmType = simm5>
|
||||
{
|
||||
defm "" : VPatBinaryV_VV_INT<intrinsic, instruction, vtilist>;
|
||||
defm "" : VPatBinaryV_VX_INT<intrinsic, instruction, vtilist>;
|
||||
defm "" : VPatBinaryV_VI<intrinsic, instruction, vtilist, ImmType>;
|
||||
defm "" : VPatBinaryV_VV_INT<intrinsic#"_vv", instruction, vtilist>;
|
||||
defm "" : VPatBinaryV_VX_INT<intrinsic#"_vx", instruction, vtilist>;
|
||||
defm "" : VPatBinaryV_VI<intrinsic#"_vx", instruction, vtilist, ImmType>;
|
||||
}
|
||||
|
||||
multiclass VPatReductionV_VS<string intrinsic, string instruction, bit IsFloat = 0> {
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user