1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-19 11:02:59 +02:00

[RISCV] Define the vfclass RVV intrinsics

Define the `vfclass` IR intrinsics for the respective V instructions.

Authored-by: Roger Ferrer Ibanez <rofirrim@gmail.com>
Co-Authored-by: Evandro Menezes <evandro.menezes@sifive.com>

Differential Revision: https://reviews.llvm.org/D94356
This commit is contained in:
Evandro Menezes 2020-12-30 21:51:41 -06:00
parent 0557f33853
commit c0ec25ab1b
4 changed files with 1247 additions and 5 deletions

View File

@ -299,7 +299,21 @@ let TargetPrefix = "riscv" in {
[IntrNoMem]>, RISCVVIntrinsic {
let ExtendOperand = 3;
}
// For FP classify operations.
// Output: (bit mask type output)
// Input: (vector_in, vl)
class RISCVClassifyNoMask
: Intrinsic<[LLVMVectorOfBitcastsToInt<0>],
[llvm_anyvector_ty, llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic;
// For FP classify operations with mask.
// Output: (bit mask type output)
// Input: (maskedoff, vector_in, mask, vl)
class RISCVClassifyMask
: Intrinsic<[LLVMVectorOfBitcastsToInt<0>],
[LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic;
// For Saturating binary operations.
// The destination vector type is the same as first source vector.
// Input: (vector_in, vector_in/scalar_in, vl)
@ -485,6 +499,10 @@ let TargetPrefix = "riscv" in {
def "int_riscv_" # NAME : RISCVUnaryAANoMask;
def "int_riscv_" # NAME # "_mask" : RISCVUnaryAAMask;
}
multiclass RISCVUnaryAB {
def "int_riscv_" # NAME : RISCVUnaryABNoMask;
def "int_riscv_" # NAME # "_mask" : RISCVUnaryABMask;
}
// AAX means the destination type(A) is the same as the first source
// type(A). X means any type for the second source operand.
multiclass RISCVBinaryAAX {
@ -526,6 +544,10 @@ let TargetPrefix = "riscv" in {
def "int_riscv_" # NAME : RISCVCompareNoMask;
def "int_riscv_" # NAME # "_mask" : RISCVCompareMask;
}
multiclass RISCVClassify {
def "int_riscv_" # NAME : RISCVClassifyNoMask;
def "int_riscv_" # NAME # "_mask" : RISCVClassifyMask;
}
multiclass RISCVTernaryWide {
def "int_riscv_" # NAME : RISCVTernaryWideNoMask;
def "int_riscv_" # NAME # "_mask" : RISCVTernaryWideMask;
@ -538,10 +560,6 @@ let TargetPrefix = "riscv" in {
def "int_riscv_" # NAME : RISCVMaskUnarySOutNoMask;
def "int_riscv_" # NAME # "_mask" : RISCVMaskUnarySOutMask;
}
multiclass RISCVUnaryAB {
def "int_riscv_" # NAME : RISCVUnaryABNoMask;
def "int_riscv_" # NAME # "_mask" : RISCVUnaryABMask;
}
multiclass RISCVMaskUnaryMOut {
def "int_riscv_" # NAME : RISCVUnaryNoMask;
def "int_riscv_" # NAME # "_mask" : RISCVMaskUnaryMOutMask;
@ -708,6 +726,8 @@ let TargetPrefix = "riscv" in {
defm vfsgnjn : RISCVBinaryAAX;
defm vfsgnjx : RISCVBinaryAAX;
defm vfclass : RISCVClassify;
defm vfmerge : RISCVBinaryWithV0;
defm vslideup : RISCVTernaryAAAX;

View File

@ -2706,6 +2706,11 @@ defm PseudoVMFLE : VPseudoBinaryM_VV_VX</*IsFloat=*/1>;
defm PseudoVMFGT : VPseudoBinaryM_VX</*IsFloat=*/1>;
defm PseudoVMFGE : VPseudoBinaryM_VX</*IsFloat=*/1>;
//===----------------------------------------------------------------------===//
// 14.14. Vector Floating-Point Classify Instruction
//===----------------------------------------------------------------------===//
defm PseudoVFCLASS : VPseudoUnaryV_V;
//===----------------------------------------------------------------------===//
// 14.15. Vector Floating-Point Merge Instruction
//===----------------------------------------------------------------------===//
@ -2785,6 +2790,7 @@ defm PseudoVFREDMAX : VPseudoReductionV_VS;
//===----------------------------------------------------------------------===//
defm PseudoVFWREDSUM : VPseudoReductionV_VS;
defm PseudoVFWREDOSUM : VPseudoReductionV_VS;
} // Predicates = [HasStdExtV, HasStdExtF]
//===----------------------------------------------------------------------===//
@ -3347,6 +3353,11 @@ defm "" : VPatBinaryM_VV_VX<"int_riscv_vmfne", "PseudoVMFNE", AllFloatVectors>;
defm "" : VPatBinaryM_VX<"int_riscv_vmfgt", "PseudoVMFGT", AllFloatVectors>;
defm "" : VPatBinaryM_VX<"int_riscv_vmfge", "PseudoVMFGE", AllFloatVectors>;
//===----------------------------------------------------------------------===//
// 14.14. Vector Floating-Point Classify Instruction
//===----------------------------------------------------------------------===//
defm "" : VPatConversionVI_VF<"int_riscv_vfclass", "PseudoVFCLASS">;
//===----------------------------------------------------------------------===//
// 14.15. Vector Floating-Point Merge Instruction
//===----------------------------------------------------------------------===//
@ -3441,6 +3452,7 @@ defm "" : VPatReductionV_VS<"int_riscv_vfredmax", "PseudoVFREDMAX", /*IsFloat=*/
//===----------------------------------------------------------------------===//
defm "" : VPatReductionW_VS<"int_riscv_vfwredsum", "PseudoVFWREDSUM", /*IsFloat=*/1>;
defm "" : VPatReductionW_VS<"int_riscv_vfwredosum", "PseudoVFWREDOSUM", /*IsFloat=*/1>;
} // Predicates = [HasStdExtV, HasStdExtF]
//===----------------------------------------------------------------------===//

View File

@ -0,0 +1,512 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1i16(
<vscale x 1 x half>,
i32);
define <vscale x 1 x i16> @intrinsic_vfclass_v_nxv1i16_nxv1f16(
; CHECK-LABEL: intrinsic_vfclass_v_nxv1i16_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
; CHECK-NEXT: vfclass.v v16, v16
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 1 x half> %0,
i32 %1) nounwind {
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1i16(
<vscale x 1 x half> %0,
i32 %1)
ret <vscale x 1 x i16> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x half>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i16> @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu
; CHECK-NEXT: vfclass.v v16, v17, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 1 x i16> %0,
<vscale x 1 x half> %1,
<vscale x 1 x i1> %2,
i32 %3) nounwind {
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1i16(
<vscale x 1 x i16> %0,
<vscale x 1 x half> %1,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2i16(
<vscale x 2 x half>,
i32);
define <vscale x 2 x i16> @intrinsic_vfclass_v_nxv2i16_nxv2f16(
; CHECK-LABEL: intrinsic_vfclass_v_nxv2i16_nxv2f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
; CHECK-NEXT: vfclass.v v16, v16
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 2 x half> %0,
i32 %1) nounwind {
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2i16(
<vscale x 2 x half> %0,
i32 %1)
ret <vscale x 2 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x half>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i16> @intrinsic_vfclass_mask_v_nxv2i16_nxv2f16(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i16_nxv2f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu
; CHECK-NEXT: vfclass.v v16, v17, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 2 x i16> %0,
<vscale x 2 x half> %1,
<vscale x 2 x i1> %2,
i32 %3) nounwind {
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2i16(
<vscale x 2 x i16> %0,
<vscale x 2 x half> %1,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4i16(
<vscale x 4 x half>,
i32);
define <vscale x 4 x i16> @intrinsic_vfclass_v_nxv4i16_nxv4f16(
; CHECK-LABEL: intrinsic_vfclass_v_nxv4i16_nxv4f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
; CHECK-NEXT: vfclass.v v16, v16
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 4 x half> %0,
i32 %1) nounwind {
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4i16(
<vscale x 4 x half> %0,
i32 %1)
ret <vscale x 4 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x half>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i16> @intrinsic_vfclass_mask_v_nxv4i16_nxv4f16(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i16_nxv4f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu
; CHECK-NEXT: vfclass.v v16, v17, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 4 x i16> %0,
<vscale x 4 x half> %1,
<vscale x 4 x i1> %2,
i32 %3) nounwind {
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4i16(
<vscale x 4 x i16> %0,
<vscale x 4 x half> %1,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8i16(
<vscale x 8 x half>,
i32);
define <vscale x 8 x i16> @intrinsic_vfclass_v_nxv8i16_nxv8f16(
; CHECK-LABEL: intrinsic_vfclass_v_nxv8i16_nxv8f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
; CHECK-NEXT: vfclass.v v16, v16
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 8 x half> %0,
i32 %1) nounwind {
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8i16(
<vscale x 8 x half> %0,
i32 %1)
ret <vscale x 8 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x half>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i16> @intrinsic_vfclass_mask_v_nxv8i16_nxv8f16(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i16_nxv8f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu
; CHECK-NEXT: vfclass.v v16, v18, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 8 x i16> %0,
<vscale x 8 x half> %1,
<vscale x 8 x i1> %2,
i32 %3) nounwind {
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8i16(
<vscale x 8 x i16> %0,
<vscale x 8 x half> %1,
<vscale x 8 x i1> %2,
i32 %3)
ret <vscale x 8 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16i16(
<vscale x 16 x half>,
i32);
define <vscale x 16 x i16> @intrinsic_vfclass_v_nxv16i16_nxv16f16(
; CHECK-LABEL: intrinsic_vfclass_v_nxv16i16_nxv16f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu
; CHECK-NEXT: vfclass.v v16, v16
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 16 x half> %0,
i32 %1) nounwind {
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16i16(
<vscale x 16 x half> %0,
i32 %1)
ret <vscale x 16 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x half>,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i16> @intrinsic_vfclass_mask_v_nxv16i16_nxv16f16(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i16_nxv16f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu
; CHECK-NEXT: vfclass.v v16, v20, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 16 x i16> %0,
<vscale x 16 x half> %1,
<vscale x 16 x i1> %2,
i32 %3) nounwind {
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16i16(
<vscale x 16 x i16> %0,
<vscale x 16 x half> %1,
<vscale x 16 x i1> %2,
i32 %3)
ret <vscale x 16 x i16> %a
}
declare <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32i16(
<vscale x 32 x half>,
i32);
define <vscale x 32 x i16> @intrinsic_vfclass_v_nxv32i16_nxv32f16(
; CHECK-LABEL: intrinsic_vfclass_v_nxv32i16_nxv32f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu
; CHECK-NEXT: vfclass.v v16, v16
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 32 x half> %0,
i32 %1) nounwind {
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32i16(
<vscale x 32 x half> %0,
i32 %1)
ret <vscale x 32 x i16> %a
}
declare <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32i16(
<vscale x 32 x i16>,
<vscale x 32 x half>,
<vscale x 32 x i1>,
i32);
define <vscale x 32 x i16> @intrinsic_vfclass_mask_v_nxv32i16_nxv32f16(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv32i16_nxv32f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vfclass.v v16, v8, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 32 x i16> %0,
<vscale x 32 x half> %1,
<vscale x 32 x i1> %2,
i32 %3) nounwind {
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32i16(
<vscale x 32 x i16> %0,
<vscale x 32 x half> %1,
<vscale x 32 x i1> %2,
i32 %3)
ret <vscale x 32 x i16> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1i32(
<vscale x 1 x float>,
i32);
define <vscale x 1 x i32> @intrinsic_vfclass_v_nxv1i32_nxv1f32(
; CHECK-LABEL: intrinsic_vfclass_v_nxv1i32_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
; CHECK-NEXT: vfclass.v v16, v16
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 1 x float> %0,
i32 %1) nounwind {
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1i32(
<vscale x 1 x float> %0,
i32 %1)
ret <vscale x 1 x i32> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vfclass.mask.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x float>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i32> @intrinsic_vfclass_mask_v_nxv1i32_nxv1f32(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i32_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu
; CHECK-NEXT: vfclass.v v16, v17, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 1 x i32> %0,
<vscale x 1 x float> %1,
<vscale x 1 x i1> %2,
i32 %3) nounwind {
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vfclass.mask.nxv1i32(
<vscale x 1 x i32> %0,
<vscale x 1 x float> %1,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2i32(
<vscale x 2 x float>,
i32);
define <vscale x 2 x i32> @intrinsic_vfclass_v_nxv2i32_nxv2f32(
; CHECK-LABEL: intrinsic_vfclass_v_nxv2i32_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
; CHECK-NEXT: vfclass.v v16, v16
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 2 x float> %0,
i32 %1) nounwind {
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2i32(
<vscale x 2 x float> %0,
i32 %1)
ret <vscale x 2 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vfclass.mask.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x float>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i32> @intrinsic_vfclass_mask_v_nxv2i32_nxv2f32(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i32_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu
; CHECK-NEXT: vfclass.v v16, v17, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 2 x i32> %0,
<vscale x 2 x float> %1,
<vscale x 2 x i1> %2,
i32 %3) nounwind {
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vfclass.mask.nxv2i32(
<vscale x 2 x i32> %0,
<vscale x 2 x float> %1,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vfclass.nxv4i32(
<vscale x 4 x float>,
i32);
define <vscale x 4 x i32> @intrinsic_vfclass_v_nxv4i32_nxv4f32(
; CHECK-LABEL: intrinsic_vfclass_v_nxv4i32_nxv4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
; CHECK-NEXT: vfclass.v v16, v16
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 4 x float> %0,
i32 %1) nounwind {
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vfclass.nxv4i32(
<vscale x 4 x float> %0,
i32 %1)
ret <vscale x 4 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vfclass.mask.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x float>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i32> @intrinsic_vfclass_mask_v_nxv4i32_nxv4f32(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i32_nxv4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu
; CHECK-NEXT: vfclass.v v16, v18, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 4 x i32> %0,
<vscale x 4 x float> %1,
<vscale x 4 x i1> %2,
i32 %3) nounwind {
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vfclass.mask.nxv4i32(
<vscale x 4 x i32> %0,
<vscale x 4 x float> %1,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vfclass.nxv8i32(
<vscale x 8 x float>,
i32);
define <vscale x 8 x i32> @intrinsic_vfclass_v_nxv8i32_nxv8f32(
; CHECK-LABEL: intrinsic_vfclass_v_nxv8i32_nxv8f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
; CHECK-NEXT: vfclass.v v16, v16
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 8 x float> %0,
i32 %1) nounwind {
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vfclass.nxv8i32(
<vscale x 8 x float> %0,
i32 %1)
ret <vscale x 8 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vfclass.mask.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x float>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i32> @intrinsic_vfclass_mask_v_nxv8i32_nxv8f32(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i32_nxv8f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu
; CHECK-NEXT: vfclass.v v16, v20, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 8 x i32> %0,
<vscale x 8 x float> %1,
<vscale x 8 x i1> %2,
i32 %3) nounwind {
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vfclass.mask.nxv8i32(
<vscale x 8 x i32> %0,
<vscale x 8 x float> %1,
<vscale x 8 x i1> %2,
i32 %3)
ret <vscale x 8 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vfclass.nxv16i32(
<vscale x 16 x float>,
i32);
define <vscale x 16 x i32> @intrinsic_vfclass_v_nxv16i32_nxv16f32(
; CHECK-LABEL: intrinsic_vfclass_v_nxv16i32_nxv16f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu
; CHECK-NEXT: vfclass.v v16, v16
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 16 x float> %0,
i32 %1) nounwind {
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vfclass.nxv16i32(
<vscale x 16 x float> %0,
i32 %1)
ret <vscale x 16 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vfclass.mask.nxv16i32(
<vscale x 16 x i32>,
<vscale x 16 x float>,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i32> @intrinsic_vfclass_mask_v_nxv16i32_nxv16f32(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i32_nxv16f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vfclass.v v16, v8, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 16 x i32> %0,
<vscale x 16 x float> %1,
<vscale x 16 x i1> %2,
i32 %3) nounwind {
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vfclass.mask.nxv16i32(
<vscale x 16 x i32> %0,
<vscale x 16 x float> %1,
<vscale x 16 x i1> %2,
i32 %3)
ret <vscale x 16 x i32> %a
}

View File

@ -0,0 +1,698 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1i16(
<vscale x 1 x half>,
i64);
define <vscale x 1 x i16> @intrinsic_vfclass_v_nxv1i16_nxv1f16(
; CHECK-LABEL: intrinsic_vfclass_v_nxv1i16_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
; CHECK-NEXT: vfclass.v v16, v16
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 1 x half> %0,
i64 %1) nounwind {
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1i16(
<vscale x 1 x half> %0,
i64 %1)
ret <vscale x 1 x i16> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x half>,
<vscale x 1 x i1>,
i64);
define <vscale x 1 x i16> @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu
; CHECK-NEXT: vfclass.v v16, v17, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 1 x i16> %0,
<vscale x 1 x half> %1,
<vscale x 1 x i1> %2,
i64 %3) nounwind {
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1i16(
<vscale x 1 x i16> %0,
<vscale x 1 x half> %1,
<vscale x 1 x i1> %2,
i64 %3)
ret <vscale x 1 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2i16(
<vscale x 2 x half>,
i64);
define <vscale x 2 x i16> @intrinsic_vfclass_v_nxv2i16_nxv2f16(
; CHECK-LABEL: intrinsic_vfclass_v_nxv2i16_nxv2f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
; CHECK-NEXT: vfclass.v v16, v16
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 2 x half> %0,
i64 %1) nounwind {
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2i16(
<vscale x 2 x half> %0,
i64 %1)
ret <vscale x 2 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x half>,
<vscale x 2 x i1>,
i64);
define <vscale x 2 x i16> @intrinsic_vfclass_mask_v_nxv2i16_nxv2f16(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i16_nxv2f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu
; CHECK-NEXT: vfclass.v v16, v17, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 2 x i16> %0,
<vscale x 2 x half> %1,
<vscale x 2 x i1> %2,
i64 %3) nounwind {
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2i16(
<vscale x 2 x i16> %0,
<vscale x 2 x half> %1,
<vscale x 2 x i1> %2,
i64 %3)
ret <vscale x 2 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4i16(
<vscale x 4 x half>,
i64);
define <vscale x 4 x i16> @intrinsic_vfclass_v_nxv4i16_nxv4f16(
; CHECK-LABEL: intrinsic_vfclass_v_nxv4i16_nxv4f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
; CHECK-NEXT: vfclass.v v16, v16
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 4 x half> %0,
i64 %1) nounwind {
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4i16(
<vscale x 4 x half> %0,
i64 %1)
ret <vscale x 4 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x half>,
<vscale x 4 x i1>,
i64);
define <vscale x 4 x i16> @intrinsic_vfclass_mask_v_nxv4i16_nxv4f16(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i16_nxv4f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu
; CHECK-NEXT: vfclass.v v16, v17, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 4 x i16> %0,
<vscale x 4 x half> %1,
<vscale x 4 x i1> %2,
i64 %3) nounwind {
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4i16(
<vscale x 4 x i16> %0,
<vscale x 4 x half> %1,
<vscale x 4 x i1> %2,
i64 %3)
ret <vscale x 4 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8i16(
<vscale x 8 x half>,
i64);
define <vscale x 8 x i16> @intrinsic_vfclass_v_nxv8i16_nxv8f16(
; CHECK-LABEL: intrinsic_vfclass_v_nxv8i16_nxv8f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
; CHECK-NEXT: vfclass.v v16, v16
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 8 x half> %0,
i64 %1) nounwind {
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8i16(
<vscale x 8 x half> %0,
i64 %1)
ret <vscale x 8 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x half>,
<vscale x 8 x i1>,
i64);
define <vscale x 8 x i16> @intrinsic_vfclass_mask_v_nxv8i16_nxv8f16(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i16_nxv8f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu
; CHECK-NEXT: vfclass.v v16, v18, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 8 x i16> %0,
<vscale x 8 x half> %1,
<vscale x 8 x i1> %2,
i64 %3) nounwind {
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8i16(
<vscale x 8 x i16> %0,
<vscale x 8 x half> %1,
<vscale x 8 x i1> %2,
i64 %3)
ret <vscale x 8 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16i16(
<vscale x 16 x half>,
i64);
define <vscale x 16 x i16> @intrinsic_vfclass_v_nxv16i16_nxv16f16(
; CHECK-LABEL: intrinsic_vfclass_v_nxv16i16_nxv16f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu
; CHECK-NEXT: vfclass.v v16, v16
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 16 x half> %0,
i64 %1) nounwind {
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16i16(
<vscale x 16 x half> %0,
i64 %1)
ret <vscale x 16 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x half>,
<vscale x 16 x i1>,
i64);
define <vscale x 16 x i16> @intrinsic_vfclass_mask_v_nxv16i16_nxv16f16(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i16_nxv16f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu
; CHECK-NEXT: vfclass.v v16, v20, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 16 x i16> %0,
<vscale x 16 x half> %1,
<vscale x 16 x i1> %2,
i64 %3) nounwind {
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16i16(
<vscale x 16 x i16> %0,
<vscale x 16 x half> %1,
<vscale x 16 x i1> %2,
i64 %3)
ret <vscale x 16 x i16> %a
}
declare <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32i16(
<vscale x 32 x half>,
i64);
define <vscale x 32 x i16> @intrinsic_vfclass_v_nxv32i16_nxv32f16(
; CHECK-LABEL: intrinsic_vfclass_v_nxv32i16_nxv32f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu
; CHECK-NEXT: vfclass.v v16, v16
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 32 x half> %0,
i64 %1) nounwind {
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32i16(
<vscale x 32 x half> %0,
i64 %1)
ret <vscale x 32 x i16> %a
}
declare <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32i16(
<vscale x 32 x i16>,
<vscale x 32 x half>,
<vscale x 32 x i1>,
i64);
define <vscale x 32 x i16> @intrinsic_vfclass_mask_v_nxv32i16_nxv32f16(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv32i16_nxv32f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu
; CHECK-NEXT: vfclass.v v16, v8, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 32 x i16> %0,
<vscale x 32 x half> %1,
<vscale x 32 x i1> %2,
i64 %3) nounwind {
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32i16(
<vscale x 32 x i16> %0,
<vscale x 32 x half> %1,
<vscale x 32 x i1> %2,
i64 %3)
ret <vscale x 32 x i16> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1i32(
<vscale x 1 x float>,
i64);
define <vscale x 1 x i32> @intrinsic_vfclass_v_nxv1i32_nxv1f32(
; CHECK-LABEL: intrinsic_vfclass_v_nxv1i32_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
; CHECK-NEXT: vfclass.v v16, v16
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 1 x float> %0,
i64 %1) nounwind {
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1i32(
<vscale x 1 x float> %0,
i64 %1)
ret <vscale x 1 x i32> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vfclass.mask.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x float>,
<vscale x 1 x i1>,
i64);
define <vscale x 1 x i32> @intrinsic_vfclass_mask_v_nxv1i32_nxv1f32(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i32_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu
; CHECK-NEXT: vfclass.v v16, v17, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 1 x i32> %0,
<vscale x 1 x float> %1,
<vscale x 1 x i1> %2,
i64 %3) nounwind {
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vfclass.mask.nxv1i32(
<vscale x 1 x i32> %0,
<vscale x 1 x float> %1,
<vscale x 1 x i1> %2,
i64 %3)
ret <vscale x 1 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2i32(
<vscale x 2 x float>,
i64);
define <vscale x 2 x i32> @intrinsic_vfclass_v_nxv2i32_nxv2f32(
; CHECK-LABEL: intrinsic_vfclass_v_nxv2i32_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
; CHECK-NEXT: vfclass.v v16, v16
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 2 x float> %0,
i64 %1) nounwind {
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2i32(
<vscale x 2 x float> %0,
i64 %1)
ret <vscale x 2 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vfclass.mask.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x float>,
<vscale x 2 x i1>,
i64);
define <vscale x 2 x i32> @intrinsic_vfclass_mask_v_nxv2i32_nxv2f32(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i32_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu
; CHECK-NEXT: vfclass.v v16, v17, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 2 x i32> %0,
<vscale x 2 x float> %1,
<vscale x 2 x i1> %2,
i64 %3) nounwind {
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vfclass.mask.nxv2i32(
<vscale x 2 x i32> %0,
<vscale x 2 x float> %1,
<vscale x 2 x i1> %2,
i64 %3)
ret <vscale x 2 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vfclass.nxv4i32(
<vscale x 4 x float>,
i64);
define <vscale x 4 x i32> @intrinsic_vfclass_v_nxv4i32_nxv4f32(
; CHECK-LABEL: intrinsic_vfclass_v_nxv4i32_nxv4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
; CHECK-NEXT: vfclass.v v16, v16
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 4 x float> %0,
i64 %1) nounwind {
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vfclass.nxv4i32(
<vscale x 4 x float> %0,
i64 %1)
ret <vscale x 4 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vfclass.mask.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x float>,
<vscale x 4 x i1>,
i64);
define <vscale x 4 x i32> @intrinsic_vfclass_mask_v_nxv4i32_nxv4f32(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i32_nxv4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu
; CHECK-NEXT: vfclass.v v16, v18, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 4 x i32> %0,
<vscale x 4 x float> %1,
<vscale x 4 x i1> %2,
i64 %3) nounwind {
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vfclass.mask.nxv4i32(
<vscale x 4 x i32> %0,
<vscale x 4 x float> %1,
<vscale x 4 x i1> %2,
i64 %3)
ret <vscale x 4 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vfclass.nxv8i32(
<vscale x 8 x float>,
i64);
define <vscale x 8 x i32> @intrinsic_vfclass_v_nxv8i32_nxv8f32(
; CHECK-LABEL: intrinsic_vfclass_v_nxv8i32_nxv8f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
; CHECK-NEXT: vfclass.v v16, v16
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 8 x float> %0,
i64 %1) nounwind {
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vfclass.nxv8i32(
<vscale x 8 x float> %0,
i64 %1)
ret <vscale x 8 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vfclass.mask.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x float>,
<vscale x 8 x i1>,
i64);
define <vscale x 8 x i32> @intrinsic_vfclass_mask_v_nxv8i32_nxv8f32(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i32_nxv8f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu
; CHECK-NEXT: vfclass.v v16, v20, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 8 x i32> %0,
<vscale x 8 x float> %1,
<vscale x 8 x i1> %2,
i64 %3) nounwind {
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vfclass.mask.nxv8i32(
<vscale x 8 x i32> %0,
<vscale x 8 x float> %1,
<vscale x 8 x i1> %2,
i64 %3)
ret <vscale x 8 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vfclass.nxv16i32(
<vscale x 16 x float>,
i64);
define <vscale x 16 x i32> @intrinsic_vfclass_v_nxv16i32_nxv16f32(
; CHECK-LABEL: intrinsic_vfclass_v_nxv16i32_nxv16f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu
; CHECK-NEXT: vfclass.v v16, v16
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 16 x float> %0,
i64 %1) nounwind {
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vfclass.nxv16i32(
<vscale x 16 x float> %0,
i64 %1)
ret <vscale x 16 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vfclass.mask.nxv16i32(
<vscale x 16 x i32>,
<vscale x 16 x float>,
<vscale x 16 x i1>,
i64);
define <vscale x 16 x i32> @intrinsic_vfclass_mask_v_nxv16i32_nxv16f32(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i32_nxv16f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu
; CHECK-NEXT: vfclass.v v16, v8, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 16 x i32> %0,
<vscale x 16 x float> %1,
<vscale x 16 x i1> %2,
i64 %3) nounwind {
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vfclass.mask.nxv16i32(
<vscale x 16 x i32> %0,
<vscale x 16 x float> %1,
<vscale x 16 x i1> %2,
i64 %3)
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vfclass.nxv1i64(
<vscale x 1 x double>,
i64);
define <vscale x 1 x i64> @intrinsic_vfclass_v_nxv1i64_nxv1f64(
; CHECK-LABEL: intrinsic_vfclass_v_nxv1i64_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vfclass.v v16, v16
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 1 x double> %0,
i64 %1) nounwind {
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vfclass.nxv1i64(
<vscale x 1 x double> %0,
i64 %1)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vfclass.mask.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x double>,
<vscale x 1 x i1>,
i64);
define <vscale x 1 x i64> @intrinsic_vfclass_mask_v_nxv1i64_nxv1f64(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i64_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vfclass.v v16, v17, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 1 x i64> %0,
<vscale x 1 x double> %1,
<vscale x 1 x i1> %2,
i64 %3) nounwind {
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vfclass.mask.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x double> %1,
<vscale x 1 x i1> %2,
i64 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vfclass.nxv2i64(
<vscale x 2 x double>,
i64);
define <vscale x 2 x i64> @intrinsic_vfclass_v_nxv2i64_nxv2f64(
; CHECK-LABEL: intrinsic_vfclass_v_nxv2i64_nxv2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vfclass.v v16, v16
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 2 x double> %0,
i64 %1) nounwind {
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vfclass.nxv2i64(
<vscale x 2 x double> %0,
i64 %1)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vfclass.mask.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x double>,
<vscale x 2 x i1>,
i64);
define <vscale x 2 x i64> @intrinsic_vfclass_mask_v_nxv2i64_nxv2f64(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i64_nxv2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vfclass.v v16, v18, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 2 x i64> %0,
<vscale x 2 x double> %1,
<vscale x 2 x i1> %2,
i64 %3) nounwind {
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vfclass.mask.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x double> %1,
<vscale x 2 x i1> %2,
i64 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vfclass.nxv4i64(
<vscale x 4 x double>,
i64);
define <vscale x 4 x i64> @intrinsic_vfclass_v_nxv4i64_nxv4f64(
; CHECK-LABEL: intrinsic_vfclass_v_nxv4i64_nxv4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vfclass.v v16, v16
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 4 x double> %0,
i64 %1) nounwind {
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vfclass.nxv4i64(
<vscale x 4 x double> %0,
i64 %1)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vfclass.mask.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x double>,
<vscale x 4 x i1>,
i64);
define <vscale x 4 x i64> @intrinsic_vfclass_mask_v_nxv4i64_nxv4f64(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i64_nxv4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vfclass.v v16, v20, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 4 x i64> %0,
<vscale x 4 x double> %1,
<vscale x 4 x i1> %2,
i64 %3) nounwind {
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vfclass.mask.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x double> %1,
<vscale x 4 x i1> %2,
i64 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vfclass.nxv8i64(
<vscale x 8 x double>,
i64);
define <vscale x 8 x i64> @intrinsic_vfclass_v_nxv8i64_nxv8f64(
; CHECK-LABEL: intrinsic_vfclass_v_nxv8i64_nxv8f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vfclass.v v16, v16
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 8 x double> %0,
i64 %1) nounwind {
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vfclass.nxv8i64(
<vscale x 8 x double> %0,
i64 %1)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vfclass.mask.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x double>,
<vscale x 8 x i1>,
i64);
define <vscale x 8 x i64> @intrinsic_vfclass_mask_v_nxv8i64_nxv8f64(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i64_nxv8f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vfclass.v v16, v8, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 8 x i64> %0,
<vscale x 8 x double> %1,
<vscale x 8 x i1> %2,
i64 %3) nounwind {
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vfclass.mask.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x double> %1,
<vscale x 8 x i1> %2,
i64 %3)
ret <vscale x 8 x i64> %a
}