2018-09-19 12:54:22 +02:00
|
|
|
//===- IntrinsicsRISCV.td - Defines RISCV intrinsics -------*- tablegen -*-===//
|
|
|
|
//
|
2019-01-19 09:50:56 +01:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2018-09-19 12:54:22 +02:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file defines all of the RISCV-specific intrinsics.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Atomics
|
|
|
|
|
[RISCV][NFC] Deduplicate Atomic Intrinsic Definitions
Summary:
This is a slight cleanup, to use multiclasses to avoid the duplication between
the different atomic intrinsic definitions. The produced intrinsics are
unchanged, they're just generated in a more succinct way.
Reviewers: asb, luismarques, jrtc27
Reviewed By: luismarques, jrtc27
Subscribers: Jim, rbar, johnrusso, simoncook, sabuasal, niosHD, kito-cheng, shiva0217, jrtc27, MaskRay, zzheng, edward-jones, rogfer01, MartinMosbeck, brucehoult, the_o, rkruppe, jfb, PkmX, jocewei, psnobl, benna, s.egerton, pzheng, sameer.abuasal, apazos, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D71777
2020-01-14 14:16:52 +01:00
|
|
|
// Atomic Intrinsics have multiple versions for different access widths, which
|
|
|
|
// all follow one of the following signatures (depending on how many arguments
|
|
|
|
// they require). We carefully instantiate only specific versions of these for
|
|
|
|
// specific integer widths, rather than using `llvm_anyint_ty`.
|
|
|
|
//
|
|
|
|
// In fact, as these intrinsics take `llvm_anyptr_ty`, the given names are the
|
|
|
|
// canonical names, and the intrinsics used in the code will have a name
|
|
|
|
// suffixed with the pointer type they are specialised for (denoted `<p>` in the
|
|
|
|
// names below), in order to avoid type conflicts.
|
2018-09-19 12:54:22 +02:00
|
|
|
|
[RISCV][NFC] Deduplicate Atomic Intrinsic Definitions
Summary:
This is a slight cleanup, to use multiclasses to avoid the duplication between
the different atomic intrinsic definitions. The produced intrinsics are
unchanged, they're just generated in a more succinct way.
Reviewers: asb, luismarques, jrtc27
Reviewed By: luismarques, jrtc27
Subscribers: Jim, rbar, johnrusso, simoncook, sabuasal, niosHD, kito-cheng, shiva0217, jrtc27, MaskRay, zzheng, edward-jones, rogfer01, MartinMosbeck, brucehoult, the_o, rkruppe, jfb, PkmX, jocewei, psnobl, benna, s.egerton, pzheng, sameer.abuasal, apazos, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D71777
2020-01-14 14:16:52 +01:00
|
|
|
let TargetPrefix = "riscv" in {
|
2018-09-19 12:54:22 +02:00
|
|
|
|
[RISCV][NFC] Deduplicate Atomic Intrinsic Definitions
Summary:
This is a slight cleanup, to use multiclasses to avoid the duplication between
the different atomic intrinsic definitions. The produced intrinsics are
unchanged, they're just generated in a more succinct way.
Reviewers: asb, luismarques, jrtc27
Reviewed By: luismarques, jrtc27
Subscribers: Jim, rbar, johnrusso, simoncook, sabuasal, niosHD, kito-cheng, shiva0217, jrtc27, MaskRay, zzheng, edward-jones, rogfer01, MartinMosbeck, brucehoult, the_o, rkruppe, jfb, PkmX, jocewei, psnobl, benna, s.egerton, pzheng, sameer.abuasal, apazos, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D71777
2020-01-14 14:16:52 +01:00
|
|
|
// T @llvm.<name>.T.<p>(any*, T, T, T imm);
|
|
|
|
class MaskedAtomicRMWFourArg<LLVMType itype>
|
|
|
|
: Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype],
|
2020-05-27 21:58:07 +02:00
|
|
|
[IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<3>>]>;
|
[RISCV][NFC] Deduplicate Atomic Intrinsic Definitions
Summary:
This is a slight cleanup, to use multiclasses to avoid the duplication between
the different atomic intrinsic definitions. The produced intrinsics are
unchanged, they're just generated in a more succinct way.
Reviewers: asb, luismarques, jrtc27
Reviewed By: luismarques, jrtc27
Subscribers: Jim, rbar, johnrusso, simoncook, sabuasal, niosHD, kito-cheng, shiva0217, jrtc27, MaskRay, zzheng, edward-jones, rogfer01, MartinMosbeck, brucehoult, the_o, rkruppe, jfb, PkmX, jocewei, psnobl, benna, s.egerton, pzheng, sameer.abuasal, apazos, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D71777
2020-01-14 14:16:52 +01:00
|
|
|
// T @llvm.<name>.T.<p>(any*, T, T, T, T imm);
|
|
|
|
class MaskedAtomicRMWFiveArg<LLVMType itype>
|
|
|
|
: Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype, itype],
|
2020-05-27 21:58:07 +02:00
|
|
|
[IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<4>>]>;
|
2018-11-29 21:43:42 +01:00
|
|
|
|
[RISCV][NFC] Deduplicate Atomic Intrinsic Definitions
Summary:
This is a slight cleanup, to use multiclasses to avoid the duplication between
the different atomic intrinsic definitions. The produced intrinsics are
unchanged, they're just generated in a more succinct way.
Reviewers: asb, luismarques, jrtc27
Reviewed By: luismarques, jrtc27
Subscribers: Jim, rbar, johnrusso, simoncook, sabuasal, niosHD, kito-cheng, shiva0217, jrtc27, MaskRay, zzheng, edward-jones, rogfer01, MartinMosbeck, brucehoult, the_o, rkruppe, jfb, PkmX, jocewei, psnobl, benna, s.egerton, pzheng, sameer.abuasal, apazos, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D71777
2020-01-14 14:16:52 +01:00
|
|
|
// We define 32-bit and 64-bit variants of the above, where T stands for i32
|
|
|
|
// or i64 respectively:
|
|
|
|
multiclass MaskedAtomicRMWFourArgIntrinsics {
|
|
|
|
// i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32 imm);
|
|
|
|
def _i32 : MaskedAtomicRMWFourArg<llvm_i32_ty>;
|
|
|
|
// i64 @llvm.<name>.i32.<p>(any*, i64, i64, i64 imm);
|
|
|
|
def _i64 : MaskedAtomicRMWFourArg<llvm_i64_ty>;
|
|
|
|
}
|
2019-01-17 11:04:39 +01:00
|
|
|
|
[RISCV][NFC] Deduplicate Atomic Intrinsic Definitions
Summary:
This is a slight cleanup, to use multiclasses to avoid the duplication between
the different atomic intrinsic definitions. The produced intrinsics are
unchanged, they're just generated in a more succinct way.
Reviewers: asb, luismarques, jrtc27
Reviewed By: luismarques, jrtc27
Subscribers: Jim, rbar, johnrusso, simoncook, sabuasal, niosHD, kito-cheng, shiva0217, jrtc27, MaskRay, zzheng, edward-jones, rogfer01, MartinMosbeck, brucehoult, the_o, rkruppe, jfb, PkmX, jocewei, psnobl, benna, s.egerton, pzheng, sameer.abuasal, apazos, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D71777
2020-01-14 14:16:52 +01:00
|
|
|
multiclass MaskedAtomicRMWFiveArgIntrinsics {
|
|
|
|
// i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32, i32 imm);
|
|
|
|
def _i32 : MaskedAtomicRMWFiveArg<llvm_i32_ty>;
|
|
|
|
// i64 @llvm.<name>.i64.<p>(any*, i64, i64, i64, i64 imm);
|
|
|
|
def _i64 : MaskedAtomicRMWFiveArg<llvm_i64_ty>;
|
|
|
|
}
|
2019-01-17 11:04:39 +01:00
|
|
|
|
[RISCV][NFC] Deduplicate Atomic Intrinsic Definitions
Summary:
This is a slight cleanup, to use multiclasses to avoid the duplication between
the different atomic intrinsic definitions. The produced intrinsics are
unchanged, they're just generated in a more succinct way.
Reviewers: asb, luismarques, jrtc27
Reviewed By: luismarques, jrtc27
Subscribers: Jim, rbar, johnrusso, simoncook, sabuasal, niosHD, kito-cheng, shiva0217, jrtc27, MaskRay, zzheng, edward-jones, rogfer01, MartinMosbeck, brucehoult, the_o, rkruppe, jfb, PkmX, jocewei, psnobl, benna, s.egerton, pzheng, sameer.abuasal, apazos, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D71777
2020-01-14 14:16:52 +01:00
|
|
|
// @llvm.riscv.masked.atomicrmw.*.{i32,i64}.<p>(...)
|
|
|
|
defm int_riscv_masked_atomicrmw_xchg : MaskedAtomicRMWFourArgIntrinsics;
|
|
|
|
defm int_riscv_masked_atomicrmw_add : MaskedAtomicRMWFourArgIntrinsics;
|
|
|
|
defm int_riscv_masked_atomicrmw_sub : MaskedAtomicRMWFourArgIntrinsics;
|
|
|
|
defm int_riscv_masked_atomicrmw_nand : MaskedAtomicRMWFourArgIntrinsics;
|
|
|
|
// Signed min and max need an extra operand to do sign extension with.
|
|
|
|
defm int_riscv_masked_atomicrmw_max : MaskedAtomicRMWFiveArgIntrinsics;
|
|
|
|
defm int_riscv_masked_atomicrmw_min : MaskedAtomicRMWFiveArgIntrinsics;
|
|
|
|
// Unsigned min and max don't need the extra operand.
|
|
|
|
defm int_riscv_masked_atomicrmw_umax : MaskedAtomicRMWFourArgIntrinsics;
|
|
|
|
defm int_riscv_masked_atomicrmw_umin : MaskedAtomicRMWFourArgIntrinsics;
|
2019-01-17 11:04:39 +01:00
|
|
|
|
[RISCV][NFC] Deduplicate Atomic Intrinsic Definitions
Summary:
This is a slight cleanup, to use multiclasses to avoid the duplication between
the different atomic intrinsic definitions. The produced intrinsics are
unchanged, they're just generated in a more succinct way.
Reviewers: asb, luismarques, jrtc27
Reviewed By: luismarques, jrtc27
Subscribers: Jim, rbar, johnrusso, simoncook, sabuasal, niosHD, kito-cheng, shiva0217, jrtc27, MaskRay, zzheng, edward-jones, rogfer01, MartinMosbeck, brucehoult, the_o, rkruppe, jfb, PkmX, jocewei, psnobl, benna, s.egerton, pzheng, sameer.abuasal, apazos, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D71777
2020-01-14 14:16:52 +01:00
|
|
|
// @llvm.riscv.masked.cmpxchg.{i32,i64}.<p>(...)
|
|
|
|
defm int_riscv_masked_cmpxchg : MaskedAtomicRMWFiveArgIntrinsics;
|
2019-01-17 11:04:39 +01:00
|
|
|
|
2018-09-19 12:54:22 +02:00
|
|
|
} // TargetPrefix = "riscv"
|
2020-12-11 08:16:08 +01:00
|
|
|
|
2021-04-02 20:43:58 +02:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Bitmanip (Bit Manipulation) Extension
|
|
|
|
|
|
|
|
let TargetPrefix = "riscv" in {
|
|
|
|
|
|
|
|
class BitManipGPRIntrinsics
|
|
|
|
: Intrinsic<[llvm_any_ty],
|
|
|
|
[LLVMMatchType<0>],
|
|
|
|
[IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
|
2021-04-02 20:42:28 +02:00
|
|
|
class BitManipGPRGPRIntrinsics
|
|
|
|
: Intrinsic<[llvm_any_ty],
|
|
|
|
[LLVMMatchType<0>, LLVMMatchType<0>],
|
|
|
|
[IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
|
2021-04-02 20:43:58 +02:00
|
|
|
|
|
|
|
// Zbb
|
|
|
|
def int_riscv_orc_b : BitManipGPRIntrinsics;
|
|
|
|
|
2021-04-02 20:42:28 +02:00
|
|
|
// Zbc
|
|
|
|
def int_riscv_clmul : BitManipGPRGPRIntrinsics;
|
|
|
|
def int_riscv_clmulh : BitManipGPRGPRIntrinsics;
|
|
|
|
def int_riscv_clmulr : BitManipGPRGPRIntrinsics;
|
|
|
|
|
2021-04-26 03:49:40 +02:00
|
|
|
// Zbe
|
|
|
|
def int_riscv_bcompress : BitManipGPRGPRIntrinsics;
|
|
|
|
def int_riscv_bdecompress : BitManipGPRGPRIntrinsics;
|
|
|
|
|
2021-04-23 01:19:15 +02:00
|
|
|
// Zbp
|
|
|
|
def int_riscv_grev : BitManipGPRGPRIntrinsics;
|
|
|
|
def int_riscv_gorc : BitManipGPRGPRIntrinsics;
|
|
|
|
def int_riscv_shfl : BitManipGPRGPRIntrinsics;
|
|
|
|
def int_riscv_unshfl : BitManipGPRGPRIntrinsics;
|
|
|
|
def int_riscv_xperm_n : BitManipGPRGPRIntrinsics;
|
|
|
|
def int_riscv_xperm_b : BitManipGPRGPRIntrinsics;
|
|
|
|
def int_riscv_xperm_h : BitManipGPRGPRIntrinsics;
|
|
|
|
def int_riscv_xperm_w : BitManipGPRGPRIntrinsics;
|
|
|
|
|
2021-04-02 20:43:58 +02:00
|
|
|
// Zbr
|
|
|
|
def int_riscv_crc32_b : BitManipGPRIntrinsics;
|
|
|
|
def int_riscv_crc32_h : BitManipGPRIntrinsics;
|
|
|
|
def int_riscv_crc32_w : BitManipGPRIntrinsics;
|
|
|
|
def int_riscv_crc32_d : BitManipGPRIntrinsics;
|
|
|
|
def int_riscv_crc32c_b : BitManipGPRIntrinsics;
|
|
|
|
def int_riscv_crc32c_h : BitManipGPRIntrinsics;
|
|
|
|
def int_riscv_crc32c_w : BitManipGPRIntrinsics;
|
|
|
|
def int_riscv_crc32c_d : BitManipGPRIntrinsics;
|
|
|
|
} // TargetPrefix = "riscv"
|
|
|
|
|
2020-12-11 08:16:08 +01:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Vectors
|
|
|
|
|
|
|
|
class RISCVVIntrinsic {
|
|
|
|
// These intrinsics may accept illegal integer values in their llvm_any_ty
|
|
|
|
// operand, so they have to be extended. If set to zero then the intrinsic
|
|
|
|
// does not have any operand that must be extended.
|
|
|
|
Intrinsic IntrinsicID = !cast<Intrinsic>(NAME);
|
2021-03-10 18:37:25 +01:00
|
|
|
bits<4> SplatOperand = 0;
|
2020-12-11 08:16:08 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
let TargetPrefix = "riscv" in {
|
2020-12-18 21:08:27 +01:00
|
|
|
// We use anyint here but we only support XLen.
|
|
|
|
def int_riscv_vsetvli : Intrinsic<[llvm_anyint_ty],
|
|
|
|
/* AVL */ [LLVMMatchType<0>,
|
|
|
|
/* VSEW */ LLVMMatchType<0>,
|
|
|
|
/* VLMUL */ LLVMMatchType<0>],
|
|
|
|
[IntrNoMem, IntrHasSideEffects,
|
|
|
|
ImmArg<ArgIndex<1>>,
|
|
|
|
ImmArg<ArgIndex<2>>]>;
|
|
|
|
def int_riscv_vsetvlimax : Intrinsic<[llvm_anyint_ty],
|
|
|
|
/* VSEW */ [LLVMMatchType<0>,
|
|
|
|
/* VLMUL */ LLVMMatchType<0>],
|
|
|
|
[IntrNoMem, IntrHasSideEffects,
|
|
|
|
ImmArg<ArgIndex<0>>,
|
|
|
|
ImmArg<ArgIndex<1>>]>;
|
|
|
|
|
2020-12-15 15:53:16 +01:00
|
|
|
// For unit stride load
|
|
|
|
// Input: (pointer, vl)
|
|
|
|
class RISCVUSLoad
|
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[LLVMPointerType<LLVMMatchType<0>>,
|
|
|
|
llvm_anyint_ty],
|
|
|
|
[NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
|
2021-01-22 02:08:41 +01:00
|
|
|
// For unit stride fault-only-first load
|
|
|
|
// Input: (pointer, vl)
|
|
|
|
// Output: (data, vl)
|
|
|
|
// NOTE: We model this with default memory properties since we model writing
|
|
|
|
// VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
|
|
|
|
class RISCVUSLoadFF
|
|
|
|
: Intrinsic<[llvm_anyvector_ty, llvm_anyint_ty],
|
|
|
|
[LLVMPointerType<LLVMMatchType<0>>, LLVMMatchType<1>],
|
|
|
|
[NoCapture<ArgIndex<0>>]>,
|
|
|
|
RISCVVIntrinsic;
|
2020-12-15 15:53:16 +01:00
|
|
|
// For unit stride load with mask
|
|
|
|
// Input: (maskedoff, pointer, mask, vl)
|
|
|
|
class RISCVUSLoadMask
|
|
|
|
: Intrinsic<[llvm_anyvector_ty ],
|
|
|
|
[LLVMMatchType<0>,
|
|
|
|
LLVMPointerType<LLVMMatchType<0>>,
|
2020-12-17 05:55:23 +01:00
|
|
|
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
|
|
|
llvm_anyint_ty],
|
2020-12-15 15:53:16 +01:00
|
|
|
[NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic;
|
2021-01-22 02:08:41 +01:00
|
|
|
// For unit stride fault-only-first load with mask
|
|
|
|
// Input: (maskedoff, pointer, mask, vl)
|
|
|
|
// Output: (data, vl)
|
|
|
|
// NOTE: We model this with default memory properties since we model writing
|
|
|
|
// VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
|
|
|
|
class RISCVUSLoadFFMask
|
|
|
|
: Intrinsic<[llvm_anyvector_ty, llvm_anyint_ty],
|
|
|
|
[LLVMMatchType<0>,
|
|
|
|
LLVMPointerType<LLVMMatchType<0>>,
|
|
|
|
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
|
|
|
LLVMMatchType<1>],
|
|
|
|
[NoCapture<ArgIndex<1>>]>, RISCVVIntrinsic;
|
2020-12-17 06:59:09 +01:00
|
|
|
// For strided load
|
|
|
|
// Input: (pointer, stride, vl)
|
|
|
|
class RISCVSLoad
|
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[LLVMPointerType<LLVMMatchType<0>>,
|
|
|
|
llvm_anyint_ty, LLVMMatchType<1>],
|
|
|
|
[NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
|
|
|
|
// For strided load with mask
|
|
|
|
// Input: (maskedoff, pointer, stride, mask, vl)
|
|
|
|
class RISCVSLoadMask
|
|
|
|
: Intrinsic<[llvm_anyvector_ty ],
|
|
|
|
[LLVMMatchType<0>,
|
|
|
|
LLVMPointerType<LLVMMatchType<0>>, llvm_anyint_ty,
|
|
|
|
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
|
|
|
|
[NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic;
|
2020-12-17 18:30:03 +01:00
|
|
|
// For indexed load
|
|
|
|
// Input: (pointer, index, vl)
|
|
|
|
class RISCVILoad
|
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[LLVMPointerType<LLVMMatchType<0>>,
|
|
|
|
llvm_anyvector_ty, llvm_anyint_ty],
|
|
|
|
[NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
|
|
|
|
// For indexed load with mask
|
|
|
|
// Input: (maskedoff, pointer, index, mask, vl)
|
|
|
|
class RISCVILoadMask
|
|
|
|
: Intrinsic<[llvm_anyvector_ty ],
|
|
|
|
[LLVMMatchType<0>,
|
|
|
|
LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty,
|
|
|
|
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
|
|
|
|
[NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic;
|
2020-12-15 15:53:16 +01:00
|
|
|
// For unit stride store
|
|
|
|
// Input: (vector_in, pointer, vl)
|
|
|
|
class RISCVUSStore
|
|
|
|
: Intrinsic<[],
|
|
|
|
[llvm_anyvector_ty,
|
|
|
|
LLVMPointerType<LLVMMatchType<0>>,
|
|
|
|
llvm_anyint_ty],
|
|
|
|
[NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
|
|
|
|
// For unit stride store with mask
|
|
|
|
// Input: (vector_in, pointer, mask, vl)
|
|
|
|
class RISCVUSStoreMask
|
|
|
|
: Intrinsic<[],
|
|
|
|
[llvm_anyvector_ty,
|
|
|
|
LLVMPointerType<LLVMMatchType<0>>,
|
2020-12-17 05:55:23 +01:00
|
|
|
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
|
|
|
llvm_anyint_ty],
|
2020-12-15 15:53:16 +01:00
|
|
|
[NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
|
2020-12-17 06:59:09 +01:00
|
|
|
// For strided store
|
|
|
|
// Input: (vector_in, pointer, stride, vl)
|
|
|
|
class RISCVSStore
|
|
|
|
: Intrinsic<[],
|
|
|
|
[llvm_anyvector_ty,
|
|
|
|
LLVMPointerType<LLVMMatchType<0>>,
|
|
|
|
llvm_anyint_ty, LLVMMatchType<1>],
|
|
|
|
[NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
|
|
|
|
// For stride store with mask
|
|
|
|
// Input: (vector_in, pointer, stirde, mask, vl)
|
|
|
|
class RISCVSStoreMask
|
|
|
|
: Intrinsic<[],
|
|
|
|
[llvm_anyvector_ty,
|
|
|
|
LLVMPointerType<LLVMMatchType<0>>, llvm_anyint_ty,
|
|
|
|
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
|
|
|
|
[NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
|
2020-12-17 18:30:03 +01:00
|
|
|
// For indexed store
|
|
|
|
// Input: (vector_in, pointer, index, vl)
|
|
|
|
class RISCVIStore
|
|
|
|
: Intrinsic<[],
|
|
|
|
[llvm_anyvector_ty,
|
|
|
|
LLVMPointerType<LLVMMatchType<0>>,
|
|
|
|
llvm_anyint_ty, llvm_anyint_ty],
|
|
|
|
[NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
|
|
|
|
// For indexed store with mask
|
|
|
|
// Input: (vector_in, pointer, index, mask, vl)
|
|
|
|
class RISCVIStoreMask
|
|
|
|
: Intrinsic<[],
|
|
|
|
[llvm_anyvector_ty,
|
|
|
|
LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty,
|
|
|
|
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
|
|
|
|
[NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
|
2020-12-23 07:43:15 +01:00
|
|
|
// For destination vector type is the same as source vector.
|
|
|
|
// Input: (vector_in, vl)
|
|
|
|
class RISCVUnaryAANoMask
|
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[LLVMMatchType<0>, llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic;
|
|
|
|
// For destination vector type is the same as first source vector (with mask).
|
|
|
|
// Input: (vector_in, mask, vl)
|
|
|
|
class RISCVUnaryAAMask
|
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[LLVMMatchType<0>, LLVMMatchType<0>,
|
|
|
|
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic;
|
2020-12-25 03:59:05 +01:00
|
|
|
// For destination vector type is the same as first and second source vector.
|
|
|
|
// Input: (vector_in, vector_in, vl)
|
|
|
|
class RISCVBinaryAAANoMask
|
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic;
|
2020-12-31 10:55:19 +01:00
|
|
|
// For destination vector type is the same as first and second source vector.
|
2021-02-05 04:50:11 +01:00
|
|
|
// Input: (vector_in, int_vector_in, vl)
|
|
|
|
class RISCVRGatherVVNoMask
|
2020-12-31 10:55:19 +01:00
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
2021-02-05 04:50:11 +01:00
|
|
|
[LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>, llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic;
|
|
|
|
// For destination vector type is the same as first and second source vector.
|
|
|
|
// Input: (vector_in, vector_in, int_vector_in, vl)
|
|
|
|
class RISCVRGatherVVMask
|
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[LLVMMatchType<0>, LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>,
|
2020-12-31 10:55:19 +01:00
|
|
|
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic;
|
2021-02-05 05:26:45 +01:00
|
|
|
// Input: (vector_in, int16_vector_in, vl)
|
|
|
|
class RISCVRGatherEI16VVNoMask
|
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>,
|
|
|
|
llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic;
|
|
|
|
// For destination vector type is the same as first and second source vector.
|
|
|
|
// Input: (vector_in, vector_in, int16_vector_in, vl)
|
|
|
|
class RISCVRGatherEI16VVMask
|
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[LLVMMatchType<0>, LLVMMatchType<0>,
|
|
|
|
LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>,
|
|
|
|
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic;
|
2021-02-05 04:50:11 +01:00
|
|
|
// For destination vector type is the same as first source vector, and the
|
|
|
|
// second operand is XLen.
|
|
|
|
// Input: (vector_in, xlen_in, vl)
|
|
|
|
class RISCVGatherVXNoMask
|
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[LLVMMatchType<0>, llvm_anyint_ty, LLVMMatchType<1>],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic {
|
|
|
|
}
|
|
|
|
// For destination vector type is the same as first source vector (with mask).
|
|
|
|
// Second operand is XLen.
|
|
|
|
// Input: (maskedoff, vector_in, xlen_in, mask, vl)
|
|
|
|
class RISCVGatherVXMask
|
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
|
|
|
|
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic {
|
|
|
|
}
|
2020-12-11 08:16:08 +01:00
|
|
|
// For destination vector type is the same as first source vector.
|
|
|
|
// Input: (vector_in, vector_in/scalar_in, vl)
|
|
|
|
class RISCVBinaryAAXNoMask
|
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic {
|
2021-03-10 18:37:25 +01:00
|
|
|
let SplatOperand = 2;
|
2020-12-11 08:16:08 +01:00
|
|
|
}
|
|
|
|
// For destination vector type is the same as first source vector (with mask).
|
|
|
|
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
|
|
|
|
class RISCVBinaryAAXMask
|
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
|
2020-12-17 05:04:48 +01:00
|
|
|
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
|
2020-12-11 08:16:08 +01:00
|
|
|
[IntrNoMem]>, RISCVVIntrinsic {
|
2021-03-10 18:37:25 +01:00
|
|
|
let SplatOperand = 3;
|
2020-12-11 08:16:08 +01:00
|
|
|
}
|
2021-03-30 18:23:34 +02:00
|
|
|
// For destination vector type is the same as first source vector. The
|
|
|
|
// second source operand must match the destination type or be an XLen scalar.
|
|
|
|
// Input: (vector_in, vector_in/scalar_in, vl)
|
|
|
|
class RISCVBinaryAAShiftNoMask
|
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic;
|
|
|
|
// For destination vector type is the same as first source vector (with mask).
|
|
|
|
// The second source operand must match the destination type or be an XLen scalar.
|
|
|
|
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
|
|
|
|
class RISCVBinaryAAShiftMask
|
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
|
|
|
|
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic;
|
2020-12-11 09:08:10 +01:00
|
|
|
// For destination vector type is NOT the same as first source vector.
|
|
|
|
// Input: (vector_in, vector_in/scalar_in, vl)
|
|
|
|
class RISCVBinaryABXNoMask
|
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic {
|
2021-03-10 18:37:25 +01:00
|
|
|
let SplatOperand = 2;
|
2020-12-11 09:08:10 +01:00
|
|
|
}
|
|
|
|
// For destination vector type is NOT the same as first source vector (with mask).
|
|
|
|
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
|
|
|
|
class RISCVBinaryABXMask
|
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
|
2020-12-17 05:04:48 +01:00
|
|
|
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
|
2020-12-11 09:08:10 +01:00
|
|
|
[IntrNoMem]>, RISCVVIntrinsic {
|
2021-03-10 18:37:25 +01:00
|
|
|
let SplatOperand = 3;
|
2020-12-11 09:08:10 +01:00
|
|
|
}
|
2021-03-30 18:23:34 +02:00
|
|
|
// For destination vector type is NOT the same as first source vector. The
|
|
|
|
// second source operand must match the destination type or be an XLen scalar.
|
|
|
|
// Input: (vector_in, vector_in/scalar_in, vl)
|
|
|
|
class RISCVBinaryABShiftNoMask
|
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic;
|
|
|
|
// For destination vector type is NOT the same as first source vector (with mask).
|
|
|
|
// The second source operand must match the destination type or be an XLen scalar.
|
|
|
|
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
|
|
|
|
class RISCVBinaryABShiftMask
|
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
|
|
|
|
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic;
|
2020-12-12 10:18:32 +01:00
|
|
|
// For binary operations with V0 as input.
|
|
|
|
// Input: (vector_in, vector_in/scalar_in, V0, vl)
|
|
|
|
class RISCVBinaryWithV0
|
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
2020-12-17 05:04:48 +01:00
|
|
|
[LLVMMatchType<0>, llvm_any_ty,
|
|
|
|
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
2020-12-12 10:18:32 +01:00
|
|
|
llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic {
|
2021-03-10 18:37:25 +01:00
|
|
|
let SplatOperand = 2;
|
2020-12-12 10:18:32 +01:00
|
|
|
}
|
|
|
|
// For binary operations with mask type output and V0 as input.
|
|
|
|
// Output: (mask type output)
|
|
|
|
// Input: (vector_in, vector_in/scalar_in, V0, vl)
|
|
|
|
class RISCVBinaryMOutWithV0
|
2020-12-17 05:04:48 +01:00
|
|
|
:Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
|
|
|
|
[llvm_anyvector_ty, llvm_any_ty,
|
|
|
|
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
2020-12-12 10:18:32 +01:00
|
|
|
llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic {
|
2021-03-10 18:37:25 +01:00
|
|
|
let SplatOperand = 2;
|
2020-12-12 10:18:32 +01:00
|
|
|
}
|
|
|
|
// For binary operations with mask type output.
|
|
|
|
// Output: (mask type output)
|
|
|
|
// Input: (vector_in, vector_in/scalar_in, vl)
|
|
|
|
class RISCVBinaryMOut
|
2020-12-17 05:04:48 +01:00
|
|
|
: Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
|
2020-12-12 10:18:32 +01:00
|
|
|
[llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic {
|
2021-03-10 18:37:25 +01:00
|
|
|
let SplatOperand = 2;
|
2020-12-12 10:18:32 +01:00
|
|
|
}
|
2020-12-16 00:06:07 +01:00
|
|
|
// For binary operations with mask type output without mask.
|
|
|
|
// Output: (mask type output)
|
|
|
|
// Input: (vector_in, vector_in/scalar_in, vl)
|
|
|
|
class RISCVCompareNoMask
|
|
|
|
: Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
|
|
|
|
[llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic {
|
2021-03-10 18:37:25 +01:00
|
|
|
let SplatOperand = 2;
|
2020-12-16 00:06:07 +01:00
|
|
|
}
|
|
|
|
// For binary operations with mask type output with mask.
|
|
|
|
// Output: (mask type output)
|
|
|
|
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
|
|
|
|
class RISCVCompareMask
|
|
|
|
: Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
|
|
|
|
[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
|
|
|
llvm_anyvector_ty, llvm_any_ty,
|
|
|
|
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic {
|
2021-03-10 18:37:25 +01:00
|
|
|
let SplatOperand = 3;
|
2020-12-16 00:06:07 +01:00
|
|
|
}
|
2020-12-31 04:51:41 +01:00
|
|
|
// For FP classify operations.
|
|
|
|
// Output: (bit mask type output)
|
|
|
|
// Input: (vector_in, vl)
|
|
|
|
class RISCVClassifyNoMask
|
|
|
|
: Intrinsic<[LLVMVectorOfBitcastsToInt<0>],
|
|
|
|
[llvm_anyvector_ty, llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic;
|
|
|
|
// For FP classify operations with mask.
|
|
|
|
// Output: (bit mask type output)
|
|
|
|
// Input: (maskedoff, vector_in, mask, vl)
|
|
|
|
class RISCVClassifyMask
|
|
|
|
: Intrinsic<[LLVMVectorOfBitcastsToInt<0>],
|
|
|
|
[LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty,
|
|
|
|
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic;
|
2020-12-17 06:45:52 +01:00
|
|
|
// For Saturating binary operations.
|
|
|
|
// The destination vector type is the same as first source vector.
|
|
|
|
// Input: (vector_in, vector_in/scalar_in, vl)
|
|
|
|
class RISCVSaturatingBinaryAAXNoMask
|
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
|
|
|
|
[IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
|
2021-03-10 18:37:25 +01:00
|
|
|
let SplatOperand = 2;
|
2020-12-17 06:45:52 +01:00
|
|
|
}
|
|
|
|
// For Saturating binary operations with mask.
|
|
|
|
// The destination vector type is the same as first source vector.
|
|
|
|
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
|
|
|
|
class RISCVSaturatingBinaryAAXMask
|
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
|
|
|
|
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
|
|
|
|
[IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
|
2021-03-10 18:37:25 +01:00
|
|
|
let SplatOperand = 3;
|
2020-12-17 06:45:52 +01:00
|
|
|
}
|
2020-12-21 06:51:57 +01:00
|
|
|
// For Saturating binary operations.
|
2021-03-30 18:23:34 +02:00
|
|
|
// The destination vector type is the same as first source vector.
|
|
|
|
// The second source operand matches the destination type or is an XLen scalar.
|
|
|
|
// Input: (vector_in, vector_in/scalar_in, vl)
|
|
|
|
class RISCVSaturatingBinaryAAShiftNoMask
|
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty],
|
|
|
|
[IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic;
|
|
|
|
// For Saturating binary operations with mask.
|
|
|
|
// The destination vector type is the same as first source vector.
|
|
|
|
// The second source operand matches the destination type or is an XLen scalar.
|
|
|
|
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
|
|
|
|
class RISCVSaturatingBinaryAAShiftMask
|
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
|
|
|
|
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
|
|
|
|
[IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic;
|
|
|
|
// For Saturating binary operations.
|
2020-12-21 06:51:57 +01:00
|
|
|
// The destination vector type is NOT the same as first source vector.
|
2021-03-30 18:23:34 +02:00
|
|
|
// The second source operand matches the destination type or is an XLen scalar.
|
2020-12-21 06:51:57 +01:00
|
|
|
// Input: (vector_in, vector_in/scalar_in, vl)
|
2021-03-30 18:23:34 +02:00
|
|
|
class RISCVSaturatingBinaryABShiftNoMask
|
2020-12-21 06:51:57 +01:00
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
|
2021-03-30 18:23:34 +02:00
|
|
|
[IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic;
|
2020-12-21 06:51:57 +01:00
|
|
|
// For Saturating binary operations with mask.
|
|
|
|
// The destination vector type is NOT the same as first source vector (with mask).
|
2021-03-30 18:23:34 +02:00
|
|
|
// The second source operand matches the destination type or is an XLen scalar.
|
2020-12-21 06:51:57 +01:00
|
|
|
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
|
2021-03-30 18:23:34 +02:00
|
|
|
class RISCVSaturatingBinaryABShiftMask
|
2020-12-21 06:51:57 +01:00
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
|
|
|
|
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
|
2021-03-30 18:23:34 +02:00
|
|
|
[IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic;
|
2020-12-20 13:56:07 +01:00
|
|
|
class RISCVTernaryAAAXNoMask
|
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
|
|
|
|
LLVMMatchType<1>],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic;
|
|
|
|
class RISCVTernaryAAAXMask
|
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
|
|
|
|
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic;
|
2020-12-21 07:41:47 +01:00
|
|
|
class RISCVTernaryAAXANoMask
|
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
|
|
|
|
llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic {
|
2021-03-10 18:37:25 +01:00
|
|
|
let SplatOperand = 2;
|
2020-12-21 07:41:47 +01:00
|
|
|
}
|
|
|
|
class RISCVTernaryAAXAMask
|
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
|
|
|
|
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic {
|
2021-03-10 18:37:25 +01:00
|
|
|
let SplatOperand = 2;
|
2020-12-21 07:41:47 +01:00
|
|
|
}
|
2020-12-22 09:01:46 +01:00
|
|
|
class RISCVTernaryWideNoMask
|
|
|
|
: Intrinsic< [llvm_anyvector_ty],
|
|
|
|
[LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
|
|
|
|
llvm_anyint_ty],
|
|
|
|
[IntrNoMem] >, RISCVVIntrinsic {
|
2021-03-10 18:37:25 +01:00
|
|
|
let SplatOperand = 2;
|
2020-12-22 09:01:46 +01:00
|
|
|
}
|
|
|
|
class RISCVTernaryWideMask
|
|
|
|
: Intrinsic< [llvm_anyvector_ty],
|
|
|
|
[LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
|
|
|
|
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic {
|
2021-03-10 18:37:25 +01:00
|
|
|
let SplatOperand = 2;
|
2020-12-22 09:01:46 +01:00
|
|
|
}
|
2020-12-24 03:31:35 +01:00
|
|
|
// For Reduction ternary operations.
|
|
|
|
// For destination vector type is the same as first and third source vector.
|
|
|
|
// Input: (vector_in, vector_in, vector_in, vl)
|
|
|
|
class RISCVReductionNoMask
|
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
|
|
|
|
llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic;
|
|
|
|
// For Reduction ternary operations with mask.
|
|
|
|
// For destination vector type is the same as first and third source vector.
|
|
|
|
// The mask type come from second source vector.
|
|
|
|
// Input: (maskedoff, vector_in, vector_in, vector_in, mask, vl)
|
|
|
|
class RISCVReductionMask
|
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
|
|
|
|
LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic;
|
2020-12-23 16:42:36 +01:00
|
|
|
// For unary operations with scalar type output without mask
|
|
|
|
// Output: (scalar type)
|
|
|
|
// Input: (vector_in, vl)
|
|
|
|
class RISCVMaskUnarySOutNoMask
|
2021-04-08 17:28:15 +02:00
|
|
|
: Intrinsic<[LLVMMatchType<1>],
|
|
|
|
[llvm_anyvector_ty, llvm_anyint_ty],
|
2020-12-23 16:42:36 +01:00
|
|
|
[IntrNoMem]>, RISCVVIntrinsic;
|
|
|
|
// For unary operations with scalar type output with mask
|
|
|
|
// Output: (scalar type)
|
|
|
|
// Input: (vector_in, mask, vl)
|
|
|
|
class RISCVMaskUnarySOutMask
|
2021-04-08 17:28:15 +02:00
|
|
|
: Intrinsic<[LLVMMatchType<1>],
|
|
|
|
[llvm_anyvector_ty, LLVMMatchType<0>, llvm_anyint_ty],
|
2020-12-23 16:42:36 +01:00
|
|
|
[IntrNoMem]>, RISCVVIntrinsic;
|
2020-12-28 17:44:38 +01:00
|
|
|
// For destination vector type is NOT the same as source vector.
|
|
|
|
// Input: (vector_in, vl)
|
|
|
|
class RISCVUnaryABNoMask
|
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[llvm_anyvector_ty, llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic;
|
|
|
|
// For destination vector type is NOT the same as source vector (with mask).
|
|
|
|
// Input: (maskedoff, vector_in, mask, vl)
|
|
|
|
class RISCVUnaryABMask
|
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[LLVMMatchType<0>, llvm_anyvector_ty,
|
|
|
|
LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>,
|
|
|
|
llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic;
|
|
|
|
// For unary operations with the same vector type in/out without mask
|
|
|
|
// Output: (vector)
|
|
|
|
// Input: (vector_in, vl)
|
|
|
|
class RISCVUnaryNoMask
|
2020-12-25 03:13:56 +01:00
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[LLVMMatchType<0>, llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic;
|
|
|
|
// For mask unary operations with mask type in/out with mask
|
|
|
|
// Output: (mask type output)
|
|
|
|
// Input: (mask type maskedoff, mask type vector_in, mask, vl)
|
|
|
|
class RISCVMaskUnaryMOutMask
|
|
|
|
: Intrinsic<[llvm_anyint_ty],
|
|
|
|
[LLVMMatchType<0>, LLVMMatchType<0>,
|
|
|
|
LLVMMatchType<0>, llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic;
|
2020-12-28 05:00:33 +01:00
|
|
|
// Output: (vector)
|
|
|
|
// Input: (vl)
|
|
|
|
class RISCVNullaryIntrinsic
|
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic;
|
2020-12-31 04:31:46 +01:00
|
|
|
// For Conversion unary operations.
|
|
|
|
// Input: (vector_in, vl)
|
|
|
|
class RISCVConversionNoMask
|
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[llvm_anyvector_ty, llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic;
|
|
|
|
// For Conversion unary operations with mask.
|
|
|
|
// Input: (maskedoff, vector_in, mask, vl)
|
|
|
|
class RISCVConversionMask
|
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[LLVMMatchType<0>, llvm_anyvector_ty,
|
|
|
|
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic;
|
2021-01-08 07:51:37 +01:00
|
|
|
// For atomic operations without mask
|
|
|
|
// Input: (base, index, value, vl)
|
|
|
|
class RISCVAMONoMask
|
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty, LLVMMatchType<0>,
|
|
|
|
llvm_anyint_ty],
|
|
|
|
[NoCapture<ArgIndex<0>>]>, RISCVVIntrinsic;
|
|
|
|
// For atomic operations with mask
|
|
|
|
// Input: (base, index, value, mask, vl)
|
|
|
|
class RISCVAMOMask
|
|
|
|
: Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty, LLVMMatchType<0>,
|
|
|
|
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
|
|
|
|
[NoCapture<ArgIndex<0>>]>, RISCVVIntrinsic;
|
2020-12-20 13:56:07 +01:00
|
|
|
|
[RISCV] Implement vlseg intrinsics.
For Zvlsseg, we need continuous vector registers for the values. We need
to define new register classes for the different combinations of (number
of fields and LMUL). For example,
when the number of fields(NF) = 3, LMUL = 2, the values will be assigned
to (V0M2, V2M2, V4M2), (V2M2, V4M2, V6M2), (V4M2, V6M2, V8M2), ...
We define the vlseg intrinsics with multiple outputs. There is no way to
describe the codegen patterns with multiple outputs in the tablegen
files. We do the codegen in RISCVISelDAGToDAG and use EXTRACT_SUBREG to
extract the values of output.
The multiple scalable vector values will be put into a struct. This
patch is depended on the support for scalable vector struct.
Differential Revision: https://reviews.llvm.org/D94229
2020-12-31 10:14:15 +01:00
|
|
|
// For unit stride segment load
|
|
|
|
// Input: (pointer, vl)
|
|
|
|
class RISCVUSSegLoad<int nf>
|
|
|
|
: Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
|
|
|
|
!add(nf, -1))),
|
|
|
|
[LLVMPointerToElt<0>, llvm_anyint_ty],
|
|
|
|
[NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
|
|
|
|
// For unit stride segment load with mask
|
|
|
|
// Input: (maskedoff, pointer, mask, vl)
|
|
|
|
class RISCVUSSegLoadMask<int nf>
|
|
|
|
: Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
|
|
|
|
!add(nf, -1))),
|
|
|
|
!listconcat(!listsplat(LLVMMatchType<0>, nf),
|
|
|
|
[LLVMPointerToElt<0>,
|
|
|
|
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
|
|
|
llvm_anyint_ty]),
|
|
|
|
[NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic;
|
|
|
|
|
2021-01-24 06:37:38 +01:00
|
|
|
// For unit stride fault-only-first segment load
|
|
|
|
// Input: (pointer, vl)
|
|
|
|
// Output: (data, vl)
|
|
|
|
// NOTE: We model this with default memory properties since we model writing
|
|
|
|
// VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
|
|
|
|
class RISCVUSSegLoadFF<int nf>
|
|
|
|
: Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
|
|
|
|
!add(nf, -1)), [llvm_anyint_ty]),
|
|
|
|
[LLVMPointerToElt<0>, LLVMMatchType<1>],
|
|
|
|
[NoCapture<ArgIndex<0>>]>, RISCVVIntrinsic;
|
|
|
|
// For unit stride fault-only-first segment load with mask
|
|
|
|
// Input: (maskedoff, pointer, mask, vl)
|
|
|
|
// Output: (data, vl)
|
|
|
|
// NOTE: We model this with default memory properties since we model writing
|
|
|
|
// VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
|
|
|
|
class RISCVUSSegLoadFFMask<int nf>
|
|
|
|
: Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
|
|
|
|
!add(nf, -1)), [llvm_anyint_ty]),
|
|
|
|
!listconcat(!listsplat(LLVMMatchType<0>, nf),
|
|
|
|
[LLVMPointerToElt<0>,
|
|
|
|
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
|
|
|
LLVMMatchType<1>]),
|
|
|
|
[NoCapture<ArgIndex<nf>>]>, RISCVVIntrinsic;
|
|
|
|
|
2021-01-15 12:29:51 +01:00
|
|
|
// For stride segment load
|
|
|
|
// Input: (pointer, offset, vl)
|
|
|
|
class RISCVSSegLoad<int nf>
|
|
|
|
: Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
|
|
|
|
!add(nf, -1))),
|
|
|
|
[LLVMPointerToElt<0>, llvm_anyint_ty, LLVMMatchType<1>],
|
|
|
|
[NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
|
|
|
|
// For stride segment load with mask
|
|
|
|
// Input: (maskedoff, pointer, offset, mask, vl)
|
|
|
|
class RISCVSSegLoadMask<int nf>
|
|
|
|
: Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
|
|
|
|
!add(nf, -1))),
|
|
|
|
!listconcat(!listsplat(LLVMMatchType<0>, nf),
|
|
|
|
[LLVMPointerToElt<0>,
|
|
|
|
llvm_anyint_ty,
|
|
|
|
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
|
|
|
LLVMMatchType<1>]),
|
|
|
|
[NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic;
|
|
|
|
|
2021-01-18 03:02:40 +01:00
|
|
|
// For indexed segment load
|
|
|
|
// Input: (pointer, index, vl)
|
|
|
|
class RISCVISegLoad<int nf>
|
|
|
|
: Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
|
|
|
|
!add(nf, -1))),
|
|
|
|
[LLVMPointerToElt<0>, llvm_anyvector_ty, llvm_anyint_ty],
|
|
|
|
[NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
|
|
|
|
// For indexed segment load with mask
|
|
|
|
// Input: (maskedoff, pointer, index, mask, vl)
|
|
|
|
class RISCVISegLoadMask<int nf>
|
|
|
|
: Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
|
|
|
|
!add(nf, -1))),
|
|
|
|
!listconcat(!listsplat(LLVMMatchType<0>, nf),
|
|
|
|
[LLVMPointerToElt<0>,
|
|
|
|
llvm_anyvector_ty,
|
|
|
|
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
|
|
|
llvm_anyint_ty]),
|
|
|
|
[NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic;
|
|
|
|
|
2021-01-14 10:07:18 +01:00
|
|
|
// For unit stride segment store
|
|
|
|
// Input: (value, pointer, vl)
|
|
|
|
class RISCVUSSegStore<int nf>
|
|
|
|
: Intrinsic<[],
|
|
|
|
!listconcat([llvm_anyvector_ty],
|
|
|
|
!listsplat(LLVMMatchType<0>, !add(nf, -1)),
|
|
|
|
[LLVMPointerToElt<0>, llvm_anyint_ty]),
|
|
|
|
[NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
|
|
|
|
// For unit stride segment store with mask
|
|
|
|
// Input: (value, pointer, mask, vl)
|
|
|
|
class RISCVUSSegStoreMask<int nf>
|
|
|
|
: Intrinsic<[],
|
|
|
|
!listconcat([llvm_anyvector_ty],
|
|
|
|
!listsplat(LLVMMatchType<0>, !add(nf, -1)),
|
|
|
|
[LLVMPointerToElt<0>,
|
|
|
|
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
|
|
|
llvm_anyint_ty]),
|
|
|
|
[NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
|
|
|
|
|
2021-01-16 14:40:41 +01:00
|
|
|
// For stride segment store
|
|
|
|
// Input: (value, pointer, offset, vl)
|
|
|
|
class RISCVSSegStore<int nf>
|
|
|
|
: Intrinsic<[],
|
|
|
|
!listconcat([llvm_anyvector_ty],
|
|
|
|
!listsplat(LLVMMatchType<0>, !add(nf, -1)),
|
|
|
|
[LLVMPointerToElt<0>, llvm_anyint_ty,
|
|
|
|
LLVMMatchType<1>]),
|
|
|
|
[NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
|
|
|
|
// For stride segment store with mask
|
|
|
|
// Input: (value, pointer, offset, mask, vl)
|
|
|
|
class RISCVSSegStoreMask<int nf>
|
|
|
|
: Intrinsic<[],
|
|
|
|
!listconcat([llvm_anyvector_ty],
|
|
|
|
!listsplat(LLVMMatchType<0>, !add(nf, -1)),
|
|
|
|
[LLVMPointerToElt<0>, llvm_anyint_ty,
|
|
|
|
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
|
|
|
LLVMMatchType<1>]),
|
|
|
|
[NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
|
|
|
|
|
2021-01-19 03:47:44 +01:00
|
|
|
// For indexed segment store
|
|
|
|
// Input: (value, pointer, offset, vl)
|
|
|
|
class RISCVISegStore<int nf>
|
|
|
|
: Intrinsic<[],
|
|
|
|
!listconcat([llvm_anyvector_ty],
|
|
|
|
!listsplat(LLVMMatchType<0>, !add(nf, -1)),
|
|
|
|
[LLVMPointerToElt<0>, llvm_anyvector_ty,
|
|
|
|
llvm_anyint_ty]),
|
|
|
|
[NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
|
|
|
|
// For indexed segment store with mask
|
|
|
|
// Input: (value, pointer, offset, mask, vl)
|
|
|
|
class RISCVISegStoreMask<int nf>
|
|
|
|
: Intrinsic<[],
|
|
|
|
!listconcat([llvm_anyvector_ty],
|
|
|
|
!listsplat(LLVMMatchType<0>, !add(nf, -1)),
|
|
|
|
[LLVMPointerToElt<0>, llvm_anyvector_ty,
|
|
|
|
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
|
|
|
llvm_anyint_ty]),
|
|
|
|
[NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
|
|
|
|
|
2020-12-15 15:53:16 +01:00
|
|
|
multiclass RISCVUSLoad {
|
|
|
|
def "int_riscv_" # NAME : RISCVUSLoad;
|
|
|
|
def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMask;
|
|
|
|
}
|
2021-01-22 02:08:41 +01:00
|
|
|
multiclass RISCVUSLoadFF {
|
|
|
|
def "int_riscv_" # NAME : RISCVUSLoadFF;
|
|
|
|
def "int_riscv_" # NAME # "_mask" : RISCVUSLoadFFMask;
|
|
|
|
}
|
2020-12-17 06:59:09 +01:00
|
|
|
multiclass RISCVSLoad {
|
|
|
|
def "int_riscv_" # NAME : RISCVSLoad;
|
|
|
|
def "int_riscv_" # NAME # "_mask" : RISCVSLoadMask;
|
|
|
|
}
|
2020-12-17 18:30:03 +01:00
|
|
|
multiclass RISCVILoad {
|
|
|
|
def "int_riscv_" # NAME : RISCVILoad;
|
|
|
|
def "int_riscv_" # NAME # "_mask" : RISCVILoadMask;
|
|
|
|
}
|
2020-12-15 15:53:16 +01:00
|
|
|
multiclass RISCVUSStore {
|
|
|
|
def "int_riscv_" # NAME : RISCVUSStore;
|
|
|
|
def "int_riscv_" # NAME # "_mask" : RISCVUSStoreMask;
|
|
|
|
}
|
2020-12-17 06:59:09 +01:00
|
|
|
multiclass RISCVSStore {
|
|
|
|
def "int_riscv_" # NAME : RISCVSStore;
|
|
|
|
def "int_riscv_" # NAME # "_mask" : RISCVSStoreMask;
|
|
|
|
}
|
2020-12-16 00:06:07 +01:00
|
|
|
|
2020-12-17 18:30:03 +01:00
|
|
|
multiclass RISCVIStore {
|
|
|
|
def "int_riscv_" # NAME : RISCVIStore;
|
|
|
|
def "int_riscv_" # NAME # "_mask" : RISCVIStoreMask;
|
|
|
|
}
|
2020-12-23 07:43:15 +01:00
|
|
|
multiclass RISCVUnaryAA {
|
|
|
|
def "int_riscv_" # NAME : RISCVUnaryAANoMask;
|
|
|
|
def "int_riscv_" # NAME # "_mask" : RISCVUnaryAAMask;
|
|
|
|
}
|
2020-12-31 04:51:41 +01:00
|
|
|
multiclass RISCVUnaryAB {
|
|
|
|
def "int_riscv_" # NAME : RISCVUnaryABNoMask;
|
|
|
|
def "int_riscv_" # NAME # "_mask" : RISCVUnaryABMask;
|
|
|
|
}
|
2020-12-16 00:06:07 +01:00
|
|
|
// AAX means the destination type(A) is the same as the first source
|
|
|
|
// type(A). X means any type for the second source operand.
|
2020-12-11 08:16:08 +01:00
|
|
|
multiclass RISCVBinaryAAX {
|
|
|
|
def "int_riscv_" # NAME : RISCVBinaryAAXNoMask;
|
|
|
|
def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMask;
|
|
|
|
}
|
2021-03-30 18:23:34 +02:00
|
|
|
// Like RISCVBinaryAAX, but the second operand is used a shift amount so it
|
|
|
|
// must be a vector or an XLen scalar.
|
|
|
|
multiclass RISCVBinaryAAShift {
|
|
|
|
def "int_riscv_" # NAME : RISCVBinaryAAShiftNoMask;
|
|
|
|
def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAShiftMask;
|
|
|
|
}
|
2021-02-05 04:50:11 +01:00
|
|
|
multiclass RISCVRGatherVV {
|
|
|
|
def "int_riscv_" # NAME : RISCVRGatherVVNoMask;
|
|
|
|
def "int_riscv_" # NAME # "_mask" : RISCVRGatherVVMask;
|
|
|
|
}
|
|
|
|
multiclass RISCVRGatherVX {
|
|
|
|
def "int_riscv_" # NAME : RISCVGatherVXNoMask;
|
|
|
|
def "int_riscv_" # NAME # "_mask" : RISCVGatherVXMask;
|
|
|
|
}
|
2021-02-05 05:26:45 +01:00
|
|
|
multiclass RISCVRGatherEI16VV {
|
|
|
|
def "int_riscv_" # NAME : RISCVRGatherEI16VVNoMask;
|
|
|
|
def "int_riscv_" # NAME # "_mask" : RISCVRGatherEI16VVMask;
|
|
|
|
}
|
2020-12-16 00:06:07 +01:00
|
|
|
// ABX means the destination type(A) is different from the first source
|
|
|
|
// type(B). X means any type for the second source operand.
|
2020-12-11 09:08:10 +01:00
|
|
|
multiclass RISCVBinaryABX {
|
|
|
|
def "int_riscv_" # NAME : RISCVBinaryABXNoMask;
|
|
|
|
def "int_riscv_" # NAME # "_mask" : RISCVBinaryABXMask;
|
|
|
|
}
|
2021-03-30 18:23:34 +02:00
|
|
|
// Like RISCVBinaryABX, but the second operand is used a shift amount so it
|
|
|
|
// must be a vector or an XLen scalar.
|
|
|
|
multiclass RISCVBinaryABShift {
|
|
|
|
def "int_riscv_" # NAME : RISCVBinaryABShiftNoMask;
|
|
|
|
def "int_riscv_" # NAME # "_mask" : RISCVBinaryABShiftMask;
|
|
|
|
}
|
2020-12-12 10:18:32 +01:00
|
|
|
multiclass RISCVBinaryWithV0 {
|
|
|
|
def "int_riscv_" # NAME : RISCVBinaryWithV0;
|
|
|
|
}
|
|
|
|
multiclass RISCVBinaryMaskOutWithV0 {
|
|
|
|
def "int_riscv_" # NAME : RISCVBinaryMOutWithV0;
|
|
|
|
}
|
|
|
|
multiclass RISCVBinaryMaskOut {
|
|
|
|
def "int_riscv_" # NAME : RISCVBinaryMOut;
|
|
|
|
}
|
2020-12-17 06:45:52 +01:00
|
|
|
multiclass RISCVSaturatingBinaryAAX {
|
|
|
|
def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXNoMask;
|
|
|
|
def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMask;
|
|
|
|
}
|
2021-03-30 18:23:34 +02:00
|
|
|
multiclass RISCVSaturatingBinaryAAShift {
|
|
|
|
def "int_riscv_" # NAME : RISCVSaturatingBinaryAAShiftNoMask;
|
|
|
|
def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAShiftMask;
|
|
|
|
}
|
|
|
|
multiclass RISCVSaturatingBinaryABShift {
|
|
|
|
def "int_riscv_" # NAME : RISCVSaturatingBinaryABShiftNoMask;
|
|
|
|
def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryABShiftMask;
|
2020-12-21 06:51:57 +01:00
|
|
|
}
|
2020-12-20 13:56:07 +01:00
|
|
|
multiclass RISCVTernaryAAAX {
|
|
|
|
def "int_riscv_" # NAME : RISCVTernaryAAAXNoMask;
|
|
|
|
def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAAXMask;
|
|
|
|
}
|
2020-12-21 07:41:47 +01:00
|
|
|
multiclass RISCVTernaryAAXA {
|
|
|
|
def "int_riscv_" # NAME : RISCVTernaryAAXANoMask;
|
|
|
|
def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAXAMask;
|
|
|
|
}
|
2020-12-16 00:06:07 +01:00
|
|
|
multiclass RISCVCompare {
|
|
|
|
def "int_riscv_" # NAME : RISCVCompareNoMask;
|
|
|
|
def "int_riscv_" # NAME # "_mask" : RISCVCompareMask;
|
|
|
|
}
|
2020-12-31 04:51:41 +01:00
|
|
|
multiclass RISCVClassify {
|
|
|
|
def "int_riscv_" # NAME : RISCVClassifyNoMask;
|
|
|
|
def "int_riscv_" # NAME # "_mask" : RISCVClassifyMask;
|
|
|
|
}
|
2020-12-22 09:01:46 +01:00
|
|
|
multiclass RISCVTernaryWide {
|
|
|
|
def "int_riscv_" # NAME : RISCVTernaryWideNoMask;
|
|
|
|
def "int_riscv_" # NAME # "_mask" : RISCVTernaryWideMask;
|
|
|
|
}
|
2020-12-24 03:31:35 +01:00
|
|
|
multiclass RISCVReduction {
|
|
|
|
def "int_riscv_" # NAME : RISCVReductionNoMask;
|
|
|
|
def "int_riscv_" # NAME # "_mask" : RISCVReductionMask;
|
|
|
|
}
|
2020-12-23 16:42:36 +01:00
|
|
|
multiclass RISCVMaskUnarySOut {
|
|
|
|
def "int_riscv_" # NAME : RISCVMaskUnarySOutNoMask;
|
|
|
|
def "int_riscv_" # NAME # "_mask" : RISCVMaskUnarySOutMask;
|
|
|
|
}
|
2020-12-25 03:13:56 +01:00
|
|
|
multiclass RISCVMaskUnaryMOut {
|
2020-12-28 17:44:38 +01:00
|
|
|
def "int_riscv_" # NAME : RISCVUnaryNoMask;
|
2020-12-25 03:13:56 +01:00
|
|
|
def "int_riscv_" # NAME # "_mask" : RISCVMaskUnaryMOutMask;
|
|
|
|
}
|
2020-12-31 04:31:46 +01:00
|
|
|
multiclass RISCVConversion {
|
|
|
|
def "int_riscv_" #NAME :RISCVConversionNoMask;
|
|
|
|
def "int_riscv_" # NAME # "_mask" : RISCVConversionMask;
|
|
|
|
}
|
2021-01-08 07:51:37 +01:00
|
|
|
multiclass RISCVAMO {
|
|
|
|
def "int_riscv_" # NAME : RISCVAMONoMask;
|
|
|
|
def "int_riscv_" # NAME # "_mask" : RISCVAMOMask;
|
|
|
|
}
|
[RISCV] Implement vlseg intrinsics.
For Zvlsseg, we need continuous vector registers for the values. We need
to define new register classes for the different combinations of (number
of fields and LMUL). For example,
when the number of fields(NF) = 3, LMUL = 2, the values will be assigned
to (V0M2, V2M2, V4M2), (V2M2, V4M2, V6M2), (V4M2, V6M2, V8M2), ...
We define the vlseg intrinsics with multiple outputs. There is no way to
describe the codegen patterns with multiple outputs in the tablegen
files. We do the codegen in RISCVISelDAGToDAG and use EXTRACT_SUBREG to
extract the values of output.
The multiple scalable vector values will be put into a struct. This
patch is depended on the support for scalable vector struct.
Differential Revision: https://reviews.llvm.org/D94229
2020-12-31 10:14:15 +01:00
|
|
|
multiclass RISCVUSSegLoad<int nf> {
|
|
|
|
def "int_riscv_" # NAME : RISCVUSSegLoad<nf>;
|
|
|
|
def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadMask<nf>;
|
|
|
|
}
|
2021-01-24 06:37:38 +01:00
|
|
|
multiclass RISCVUSSegLoadFF<int nf> {
|
|
|
|
def "int_riscv_" # NAME : RISCVUSSegLoadFF<nf>;
|
|
|
|
def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadFFMask<nf>;
|
|
|
|
}
|
2021-01-15 12:29:51 +01:00
|
|
|
multiclass RISCVSSegLoad<int nf> {
|
|
|
|
def "int_riscv_" # NAME : RISCVSSegLoad<nf>;
|
|
|
|
def "int_riscv_" # NAME # "_mask" : RISCVSSegLoadMask<nf>;
|
|
|
|
}
|
2021-01-18 03:02:40 +01:00
|
|
|
multiclass RISCVISegLoad<int nf> {
|
|
|
|
def "int_riscv_" # NAME : RISCVISegLoad<nf>;
|
|
|
|
def "int_riscv_" # NAME # "_mask" : RISCVISegLoadMask<nf>;
|
|
|
|
}
|
2021-01-14 10:07:18 +01:00
|
|
|
multiclass RISCVUSSegStore<int nf> {
|
|
|
|
def "int_riscv_" # NAME : RISCVUSSegStore<nf>;
|
|
|
|
def "int_riscv_" # NAME # "_mask" : RISCVUSSegStoreMask<nf>;
|
|
|
|
}
|
2021-01-16 14:40:41 +01:00
|
|
|
multiclass RISCVSSegStore<int nf> {
|
|
|
|
def "int_riscv_" # NAME : RISCVSSegStore<nf>;
|
|
|
|
def "int_riscv_" # NAME # "_mask" : RISCVSSegStoreMask<nf>;
|
|
|
|
}
|
2021-01-19 03:47:44 +01:00
|
|
|
multiclass RISCVISegStore<int nf> {
|
|
|
|
def "int_riscv_" # NAME : RISCVISegStore<nf>;
|
|
|
|
def "int_riscv_" # NAME # "_mask" : RISCVISegStoreMask<nf>;
|
|
|
|
}
|
2020-12-11 08:16:08 +01:00
|
|
|
|
2020-12-15 15:53:16 +01:00
|
|
|
defm vle : RISCVUSLoad;
|
2021-01-22 02:08:41 +01:00
|
|
|
defm vleff : RISCVUSLoadFF;
|
2020-12-15 15:53:16 +01:00
|
|
|
defm vse : RISCVUSStore;
|
2020-12-17 06:59:09 +01:00
|
|
|
defm vlse: RISCVSLoad;
|
|
|
|
defm vsse: RISCVSStore;
|
2021-01-19 10:07:34 +01:00
|
|
|
defm vluxei : RISCVILoad;
|
|
|
|
defm vloxei : RISCVILoad;
|
|
|
|
defm vsoxei : RISCVIStore;
|
|
|
|
defm vsuxei : RISCVIStore;
|
2020-12-15 15:53:16 +01:00
|
|
|
|
2021-02-01 09:08:46 +01:00
|
|
|
def int_riscv_vle1 : RISCVUSLoad;
|
|
|
|
def int_riscv_vse1 : RISCVUSStore;
|
|
|
|
|
2021-01-08 07:51:37 +01:00
|
|
|
defm vamoswap : RISCVAMO;
|
|
|
|
defm vamoadd : RISCVAMO;
|
|
|
|
defm vamoxor : RISCVAMO;
|
|
|
|
defm vamoand : RISCVAMO;
|
|
|
|
defm vamoor : RISCVAMO;
|
|
|
|
defm vamomin : RISCVAMO;
|
|
|
|
defm vamomax : RISCVAMO;
|
|
|
|
defm vamominu : RISCVAMO;
|
|
|
|
defm vamomaxu : RISCVAMO;
|
|
|
|
|
2020-12-11 08:16:08 +01:00
|
|
|
defm vadd : RISCVBinaryAAX;
|
|
|
|
defm vsub : RISCVBinaryAAX;
|
|
|
|
defm vrsub : RISCVBinaryAAX;
|
2020-12-11 09:08:10 +01:00
|
|
|
|
|
|
|
defm vwaddu : RISCVBinaryABX;
|
|
|
|
defm vwadd : RISCVBinaryABX;
|
|
|
|
defm vwaddu_w : RISCVBinaryAAX;
|
|
|
|
defm vwadd_w : RISCVBinaryAAX;
|
|
|
|
defm vwsubu : RISCVBinaryABX;
|
|
|
|
defm vwsub : RISCVBinaryABX;
|
|
|
|
defm vwsubu_w : RISCVBinaryAAX;
|
|
|
|
defm vwsub_w : RISCVBinaryAAX;
|
|
|
|
|
2020-12-28 17:44:38 +01:00
|
|
|
defm vzext : RISCVUnaryAB;
|
|
|
|
defm vsext : RISCVUnaryAB;
|
|
|
|
|
2020-12-12 10:18:32 +01:00
|
|
|
defm vadc : RISCVBinaryWithV0;
|
|
|
|
defm vmadc_carry_in : RISCVBinaryMaskOutWithV0;
|
|
|
|
defm vmadc : RISCVBinaryMaskOut;
|
|
|
|
|
|
|
|
defm vsbc : RISCVBinaryWithV0;
|
|
|
|
defm vmsbc_borrow_in : RISCVBinaryMaskOutWithV0;
|
|
|
|
defm vmsbc : RISCVBinaryMaskOut;
|
|
|
|
|
2020-12-19 03:34:55 +01:00
|
|
|
defm vand : RISCVBinaryAAX;
|
|
|
|
defm vor : RISCVBinaryAAX;
|
|
|
|
defm vxor : RISCVBinaryAAX;
|
|
|
|
|
2021-03-30 18:23:34 +02:00
|
|
|
defm vsll : RISCVBinaryAAShift;
|
|
|
|
defm vsrl : RISCVBinaryAAShift;
|
|
|
|
defm vsra : RISCVBinaryAAShift;
|
2020-12-14 07:54:14 +01:00
|
|
|
|
2021-03-30 18:23:34 +02:00
|
|
|
defm vnsrl : RISCVBinaryABShift;
|
|
|
|
defm vnsra : RISCVBinaryABShift;
|
2020-12-14 14:47:15 +01:00
|
|
|
|
2020-12-16 00:06:07 +01:00
|
|
|
defm vmseq : RISCVCompare;
|
|
|
|
defm vmsne : RISCVCompare;
|
|
|
|
defm vmsltu : RISCVCompare;
|
|
|
|
defm vmslt : RISCVCompare;
|
|
|
|
defm vmsleu : RISCVCompare;
|
|
|
|
defm vmsle : RISCVCompare;
|
|
|
|
defm vmsgtu : RISCVCompare;
|
|
|
|
defm vmsgt : RISCVCompare;
|
[RISCV] Add IR intrinsics for vmsge(u).vv/vx/vi.
These instructions don't really exist, but we have ways we can
emulate them.
.vv will swap operands and use vmsle().vv. .vi will adjust the
immediate and use .vmsgt(u).vi when possible. For .vx we need to
use some of the multiple instruction sequences from the V extension
spec.
For unmasked vmsge(u).vx we use:
vmslt{u}.vx vd, va, x; vmnand.mm vd, vd, vd
For cases where mask and maskedoff are the same value then we have
vmsge{u}.vx v0, va, x, v0.t which is the vd==v0 case that
requires a temporary so we use:
vmslt{u}.vx vt, va, x; vmandnot.mm vd, vd, vt
For other masked cases we use this sequence:
vmslt{u}.vx vd, va, x, v0.t; vmxor.mm vd, vd, v0
We trust that register allocation will prevent vd in vmslt{u}.vx
from being v0 since v0 is still needed by the vmxor.
Differential Revision: https://reviews.llvm.org/D100925
2021-04-22 19:29:36 +02:00
|
|
|
defm vmsgeu : RISCVCompare;
|
|
|
|
defm vmsge : RISCVCompare;
|
2020-12-16 00:06:07 +01:00
|
|
|
|
2020-12-14 16:39:35 +01:00
|
|
|
defm vminu : RISCVBinaryAAX;
|
|
|
|
defm vmin : RISCVBinaryAAX;
|
|
|
|
defm vmaxu : RISCVBinaryAAX;
|
|
|
|
defm vmax : RISCVBinaryAAX;
|
|
|
|
|
2020-12-16 09:25:46 +01:00
|
|
|
defm vmul : RISCVBinaryAAX;
|
|
|
|
defm vmulh : RISCVBinaryAAX;
|
|
|
|
defm vmulhu : RISCVBinaryAAX;
|
|
|
|
defm vmulhsu : RISCVBinaryAAX;
|
|
|
|
|
|
|
|
defm vdivu : RISCVBinaryAAX;
|
|
|
|
defm vdiv : RISCVBinaryAAX;
|
|
|
|
defm vremu : RISCVBinaryAAX;
|
|
|
|
defm vrem : RISCVBinaryAAX;
|
|
|
|
|
2020-12-16 09:46:21 +01:00
|
|
|
defm vwmul : RISCVBinaryABX;
|
|
|
|
defm vwmulu : RISCVBinaryABX;
|
|
|
|
defm vwmulsu : RISCVBinaryABX;
|
|
|
|
|
2020-12-21 07:41:47 +01:00
|
|
|
defm vmacc : RISCVTernaryAAXA;
|
|
|
|
defm vnmsac : RISCVTernaryAAXA;
|
|
|
|
defm vmadd : RISCVTernaryAAXA;
|
|
|
|
defm vnmsub : RISCVTernaryAAXA;
|
|
|
|
|
2020-12-22 09:01:46 +01:00
|
|
|
defm vwmaccu : RISCVTernaryWide;
|
|
|
|
defm vwmacc : RISCVTernaryWide;
|
|
|
|
defm vwmaccus : RISCVTernaryWide;
|
|
|
|
defm vwmaccsu : RISCVTernaryWide;
|
|
|
|
|
2020-12-14 17:51:07 +01:00
|
|
|
defm vfadd : RISCVBinaryAAX;
|
|
|
|
defm vfsub : RISCVBinaryAAX;
|
|
|
|
defm vfrsub : RISCVBinaryAAX;
|
2020-12-17 06:45:52 +01:00
|
|
|
|
2020-12-19 16:12:18 +01:00
|
|
|
defm vfwadd : RISCVBinaryABX;
|
|
|
|
defm vfwsub : RISCVBinaryABX;
|
|
|
|
defm vfwadd_w : RISCVBinaryAAX;
|
|
|
|
defm vfwsub_w : RISCVBinaryAAX;
|
|
|
|
|
2020-12-17 06:45:52 +01:00
|
|
|
defm vsaddu : RISCVSaturatingBinaryAAX;
|
|
|
|
defm vsadd : RISCVSaturatingBinaryAAX;
|
|
|
|
defm vssubu : RISCVSaturatingBinaryAAX;
|
|
|
|
defm vssub : RISCVSaturatingBinaryAAX;
|
2020-12-18 06:56:42 +01:00
|
|
|
|
2020-12-22 05:50:58 +01:00
|
|
|
def int_riscv_vmerge : RISCVBinaryWithV0;
|
|
|
|
|
2020-12-22 00:16:57 +01:00
|
|
|
def int_riscv_vmv_v_v : Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[LLVMMatchType<0>, llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic;
|
|
|
|
def int_riscv_vmv_v_x : Intrinsic<[llvm_anyint_ty],
|
|
|
|
[LLVMVectorElementType<0>, llvm_anyint_ty],
|
2021-03-10 18:37:25 +01:00
|
|
|
[IntrNoMem]>, RISCVVIntrinsic;
|
2020-12-23 19:01:43 +01:00
|
|
|
def int_riscv_vfmv_v_f : Intrinsic<[llvm_anyfloat_ty],
|
|
|
|
[LLVMVectorElementType<0>, llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic;
|
|
|
|
|
2020-12-18 18:50:23 +01:00
|
|
|
def int_riscv_vmv_x_s : Intrinsic<[LLVMVectorElementType<0>],
|
|
|
|
[llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic;
|
|
|
|
def int_riscv_vmv_s_x : Intrinsic<[llvm_anyint_ty],
|
|
|
|
[LLVMMatchType<0>, LLVMVectorElementType<0>,
|
|
|
|
llvm_anyint_ty],
|
2021-03-10 18:37:25 +01:00
|
|
|
[IntrNoMem]>, RISCVVIntrinsic;
|
2020-12-18 20:17:09 +01:00
|
|
|
|
|
|
|
def int_riscv_vfmv_f_s : Intrinsic<[LLVMVectorElementType<0>],
|
|
|
|
[llvm_anyfloat_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic;
|
|
|
|
def int_riscv_vfmv_s_f : Intrinsic<[llvm_anyfloat_ty],
|
|
|
|
[LLVMMatchType<0>, LLVMVectorElementType<0>,
|
|
|
|
llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic;
|
2020-12-19 14:46:29 +01:00
|
|
|
|
|
|
|
defm vfmul : RISCVBinaryAAX;
|
|
|
|
defm vfdiv : RISCVBinaryAAX;
|
|
|
|
defm vfrdiv : RISCVBinaryAAX;
|
2020-12-19 15:01:41 +01:00
|
|
|
|
2020-12-19 16:34:07 +01:00
|
|
|
defm vfwmul : RISCVBinaryABX;
|
|
|
|
|
2020-12-22 13:50:19 +01:00
|
|
|
defm vfmacc : RISCVTernaryAAXA;
|
|
|
|
defm vfnmacc : RISCVTernaryAAXA;
|
|
|
|
defm vfmsac : RISCVTernaryAAXA;
|
|
|
|
defm vfnmsac : RISCVTernaryAAXA;
|
|
|
|
defm vfmadd : RISCVTernaryAAXA;
|
|
|
|
defm vfnmadd : RISCVTernaryAAXA;
|
|
|
|
defm vfmsub : RISCVTernaryAAXA;
|
|
|
|
defm vfnmsub : RISCVTernaryAAXA;
|
|
|
|
|
2020-12-22 14:30:24 +01:00
|
|
|
defm vfwmacc : RISCVTernaryWide;
|
|
|
|
defm vfwnmacc : RISCVTernaryWide;
|
|
|
|
defm vfwmsac : RISCVTernaryWide;
|
|
|
|
defm vfwnmsac : RISCVTernaryWide;
|
|
|
|
|
2020-12-23 07:43:15 +01:00
|
|
|
defm vfsqrt : RISCVUnaryAA;
|
2021-02-01 09:08:46 +01:00
|
|
|
defm vfrsqrt7 : RISCVUnaryAA;
|
|
|
|
defm vfrec7 : RISCVUnaryAA;
|
2020-12-23 07:43:15 +01:00
|
|
|
|
2020-12-23 07:27:38 +01:00
|
|
|
defm vfmin : RISCVBinaryAAX;
|
|
|
|
defm vfmax : RISCVBinaryAAX;
|
|
|
|
|
2020-12-19 15:01:41 +01:00
|
|
|
defm vfsgnj : RISCVBinaryAAX;
|
|
|
|
defm vfsgnjn : RISCVBinaryAAX;
|
|
|
|
defm vfsgnjx : RISCVBinaryAAX;
|
2020-12-20 13:56:07 +01:00
|
|
|
|
2020-12-31 04:51:41 +01:00
|
|
|
defm vfclass : RISCVClassify;
|
|
|
|
|
2020-12-22 05:50:58 +01:00
|
|
|
defm vfmerge : RISCVBinaryWithV0;
|
|
|
|
|
2020-12-20 13:56:07 +01:00
|
|
|
defm vslideup : RISCVTernaryAAAX;
|
|
|
|
defm vslidedown : RISCVTernaryAAAX;
|
2020-12-21 06:51:57 +01:00
|
|
|
|
2020-12-21 03:07:42 +01:00
|
|
|
defm vslide1up : RISCVBinaryAAX;
|
|
|
|
defm vslide1down : RISCVBinaryAAX;
|
|
|
|
defm vfslide1up : RISCVBinaryAAX;
|
|
|
|
defm vfslide1down : RISCVBinaryAAX;
|
|
|
|
|
2021-02-05 04:50:11 +01:00
|
|
|
defm vrgather_vv : RISCVRGatherVV;
|
|
|
|
defm vrgather_vx : RISCVRGatherVX;
|
2021-02-05 05:26:45 +01:00
|
|
|
defm vrgatherei16_vv : RISCVRGatherEI16VV;
|
2020-12-24 09:23:35 +01:00
|
|
|
|
2021-02-05 04:50:11 +01:00
|
|
|
def "int_riscv_vcompress" : RISCVUnaryAAMask;
|
2020-12-25 03:56:24 +01:00
|
|
|
|
2020-12-21 06:51:57 +01:00
|
|
|
defm vaaddu : RISCVSaturatingBinaryAAX;
|
|
|
|
defm vaadd : RISCVSaturatingBinaryAAX;
|
|
|
|
defm vasubu : RISCVSaturatingBinaryAAX;
|
|
|
|
defm vasub : RISCVSaturatingBinaryAAX;
|
|
|
|
|
|
|
|
defm vsmul : RISCVSaturatingBinaryAAX;
|
|
|
|
|
2021-03-30 18:23:34 +02:00
|
|
|
defm vssrl : RISCVSaturatingBinaryAAShift;
|
|
|
|
defm vssra : RISCVSaturatingBinaryAAShift;
|
2020-12-21 06:51:57 +01:00
|
|
|
|
2021-03-30 18:23:34 +02:00
|
|
|
defm vnclipu : RISCVSaturatingBinaryABShift;
|
|
|
|
defm vnclip : RISCVSaturatingBinaryABShift;
|
2020-12-16 00:06:07 +01:00
|
|
|
|
|
|
|
defm vmfeq : RISCVCompare;
|
|
|
|
defm vmfne : RISCVCompare;
|
|
|
|
defm vmflt : RISCVCompare;
|
|
|
|
defm vmfle : RISCVCompare;
|
|
|
|
defm vmfgt : RISCVCompare;
|
|
|
|
defm vmfge : RISCVCompare;
|
2020-12-24 03:31:35 +01:00
|
|
|
|
|
|
|
defm vredsum : RISCVReduction;
|
|
|
|
defm vredand : RISCVReduction;
|
|
|
|
defm vredor : RISCVReduction;
|
|
|
|
defm vredxor : RISCVReduction;
|
|
|
|
defm vredminu : RISCVReduction;
|
|
|
|
defm vredmin : RISCVReduction;
|
|
|
|
defm vredmaxu : RISCVReduction;
|
|
|
|
defm vredmax : RISCVReduction;
|
|
|
|
|
2020-12-26 14:21:46 +01:00
|
|
|
defm vwredsumu : RISCVReduction;
|
|
|
|
defm vwredsum : RISCVReduction;
|
|
|
|
|
2020-12-24 03:31:35 +01:00
|
|
|
defm vfredosum : RISCVReduction;
|
|
|
|
defm vfredsum : RISCVReduction;
|
|
|
|
defm vfredmin : RISCVReduction;
|
|
|
|
defm vfredmax : RISCVReduction;
|
2020-12-25 03:59:05 +01:00
|
|
|
|
2020-12-26 14:21:46 +01:00
|
|
|
defm vfwredsum : RISCVReduction;
|
|
|
|
defm vfwredosum : RISCVReduction;
|
|
|
|
|
2020-12-25 03:59:05 +01:00
|
|
|
def int_riscv_vmand: RISCVBinaryAAANoMask;
|
|
|
|
def int_riscv_vmnand: RISCVBinaryAAANoMask;
|
|
|
|
def int_riscv_vmandnot: RISCVBinaryAAANoMask;
|
|
|
|
def int_riscv_vmxor: RISCVBinaryAAANoMask;
|
|
|
|
def int_riscv_vmor: RISCVBinaryAAANoMask;
|
|
|
|
def int_riscv_vmnor: RISCVBinaryAAANoMask;
|
|
|
|
def int_riscv_vmornot: RISCVBinaryAAANoMask;
|
|
|
|
def int_riscv_vmxnor: RISCVBinaryAAANoMask;
|
2020-12-28 05:00:33 +01:00
|
|
|
def int_riscv_vmclr : RISCVNullaryIntrinsic;
|
|
|
|
def int_riscv_vmset : RISCVNullaryIntrinsic;
|
2020-12-23 16:42:36 +01:00
|
|
|
|
|
|
|
defm vpopc : RISCVMaskUnarySOut;
|
|
|
|
defm vfirst : RISCVMaskUnarySOut;
|
2020-12-25 03:13:56 +01:00
|
|
|
defm vmsbf : RISCVMaskUnaryMOut;
|
|
|
|
defm vmsof : RISCVMaskUnaryMOut;
|
|
|
|
defm vmsif : RISCVMaskUnaryMOut;
|
|
|
|
|
2020-12-31 04:37:13 +01:00
|
|
|
defm vfcvt_xu_f_v : RISCVConversion;
|
|
|
|
defm vfcvt_x_f_v : RISCVConversion;
|
|
|
|
defm vfcvt_rtz_xu_f_v : RISCVConversion;
|
|
|
|
defm vfcvt_rtz_x_f_v : RISCVConversion;
|
|
|
|
defm vfcvt_f_xu_v : RISCVConversion;
|
|
|
|
defm vfcvt_f_x_v : RISCVConversion;
|
|
|
|
|
2020-12-31 04:31:46 +01:00
|
|
|
defm vfwcvt_f_xu_v : RISCVConversion;
|
|
|
|
defm vfwcvt_f_x_v : RISCVConversion;
|
|
|
|
defm vfwcvt_xu_f_v : RISCVConversion;
|
|
|
|
defm vfwcvt_x_f_v : RISCVConversion;
|
|
|
|
defm vfwcvt_rtz_xu_f_v : RISCVConversion;
|
|
|
|
defm vfwcvt_rtz_x_f_v : RISCVConversion;
|
|
|
|
defm vfwcvt_f_f_v : RISCVConversion;
|
|
|
|
|
2020-12-31 04:35:37 +01:00
|
|
|
defm vfncvt_f_xu_w : RISCVConversion;
|
|
|
|
defm vfncvt_f_x_w : RISCVConversion;
|
|
|
|
defm vfncvt_xu_f_w : RISCVConversion;
|
|
|
|
defm vfncvt_x_f_w : RISCVConversion;
|
|
|
|
defm vfncvt_rtz_xu_f_w : RISCVConversion;
|
|
|
|
defm vfncvt_rtz_x_f_w : RISCVConversion;
|
|
|
|
defm vfncvt_f_f_w : RISCVConversion;
|
|
|
|
defm vfncvt_rod_f_f_w : RISCVConversion;
|
|
|
|
|
2020-12-25 03:13:56 +01:00
|
|
|
// Output: (vector)
|
|
|
|
// Input: (mask type input, vl)
|
|
|
|
def int_riscv_viota : Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
|
|
|
llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic;
|
|
|
|
// Output: (vector)
|
|
|
|
// Input: (maskedoff, mask type vector_in, mask, vl)
|
|
|
|
def int_riscv_viota_mask : Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[LLVMMatchType<0>,
|
|
|
|
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
|
|
|
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
|
|
|
llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic;
|
|
|
|
// Output: (vector)
|
|
|
|
// Input: (vl)
|
2020-12-28 05:00:33 +01:00
|
|
|
def int_riscv_vid : RISCVNullaryIntrinsic;
|
|
|
|
|
2020-12-25 03:13:56 +01:00
|
|
|
// Output: (vector)
|
|
|
|
// Input: (maskedoff, mask, vl)
|
|
|
|
def int_riscv_vid_mask : Intrinsic<[llvm_anyvector_ty],
|
|
|
|
[LLVMMatchType<0>,
|
|
|
|
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
|
|
|
llvm_anyint_ty],
|
|
|
|
[IntrNoMem]>, RISCVVIntrinsic;
|
2020-12-23 16:42:36 +01:00
|
|
|
|
[RISCV] Implement vlseg intrinsics.
For Zvlsseg, we need continuous vector registers for the values. We need
to define new register classes for the different combinations of (number
of fields and LMUL). For example,
when the number of fields(NF) = 3, LMUL = 2, the values will be assigned
to (V0M2, V2M2, V4M2), (V2M2, V4M2, V6M2), (V4M2, V6M2, V8M2), ...
We define the vlseg intrinsics with multiple outputs. There is no way to
describe the codegen patterns with multiple outputs in the tablegen
files. We do the codegen in RISCVISelDAGToDAG and use EXTRACT_SUBREG to
extract the values of output.
The multiple scalable vector values will be put into a struct. This
patch is depended on the support for scalable vector struct.
Differential Revision: https://reviews.llvm.org/D94229
2020-12-31 10:14:15 +01:00
|
|
|
foreach nf = [2, 3, 4, 5, 6, 7, 8] in {
|
|
|
|
defm vlseg # nf : RISCVUSSegLoad<nf>;
|
2021-01-24 06:37:38 +01:00
|
|
|
defm vlseg # nf # ff : RISCVUSSegLoadFF<nf>;
|
2021-01-15 12:29:51 +01:00
|
|
|
defm vlsseg # nf : RISCVSSegLoad<nf>;
|
2021-01-18 03:02:40 +01:00
|
|
|
defm vloxseg # nf : RISCVISegLoad<nf>;
|
|
|
|
defm vluxseg # nf : RISCVISegLoad<nf>;
|
2021-01-14 10:07:18 +01:00
|
|
|
defm vsseg # nf : RISCVUSSegStore<nf>;
|
2021-01-16 14:40:41 +01:00
|
|
|
defm vssseg # nf : RISCVSSegStore<nf>;
|
2021-01-19 03:47:44 +01:00
|
|
|
defm vsoxseg # nf : RISCVISegStore<nf>;
|
|
|
|
defm vsuxseg # nf : RISCVISegStore<nf>;
|
[RISCV] Implement vlseg intrinsics.
For Zvlsseg, we need continuous vector registers for the values. We need
to define new register classes for the different combinations of (number
of fields and LMUL). For example,
when the number of fields(NF) = 3, LMUL = 2, the values will be assigned
to (V0M2, V2M2, V4M2), (V2M2, V4M2, V6M2), (V4M2, V6M2, V8M2), ...
We define the vlseg intrinsics with multiple outputs. There is no way to
describe the codegen patterns with multiple outputs in the tablegen
files. We do the codegen in RISCVISelDAGToDAG and use EXTRACT_SUBREG to
extract the values of output.
The multiple scalable vector values will be put into a struct. This
patch is depended on the support for scalable vector struct.
Differential Revision: https://reviews.llvm.org/D94229
2020-12-31 10:14:15 +01:00
|
|
|
}
|
|
|
|
|
2020-12-11 08:16:08 +01:00
|
|
|
} // TargetPrefix = "riscv"
|