1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 18:54:02 +01:00
llvm-mirror/include/llvm/IR/VPIntrinsics.def
Hussain Kadhem 7eddb43fa0 [VP] Implementation of intrinsic and SDNode definitions for VP load, store, gather, scatter.
This patch adds intrinsic definitions and SDNodes for predicated
load/store/gather/scatter, based on the work done in D57504.

Reviewed By: simoll, craig.topper

Differential Revision: https://reviews.llvm.org/D99355
2021-07-01 13:34:44 +02:00

245 lines
8.5 KiB
C++

//===-- IR/VPIntrinsics.def - Describes llvm.vp.* Intrinsics -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains descriptions of the various Vector Predication intrinsics.
// This is used as a central place for enumerating the different instructions
// and should eventually be the place to put comments about the instructions.
//
//===----------------------------------------------------------------------===//
// NOTE: NO INCLUDE GUARD DESIRED!
// Provide definitions of macros so that users of this file do not have to
// define everything to use it...
//
// Register a VP intrinsic and begin its property scope.
// All VP intrinsic scopes are top level, ie it is illegal to place a
// BEGIN_REGISTER_VP_INTRINSIC within a VP intrinsic scope.
// \p VPID The VP intrinsic id.
// \p MASKPOS The mask operand position.
// \p EVLPOS The explicit vector length operand position.
#ifndef BEGIN_REGISTER_VP_INTRINSIC
#define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, EVLPOS)
#endif
// End the property scope of a VP intrinsic.
#ifndef END_REGISTER_VP_INTRINSIC
#define END_REGISTER_VP_INTRINSIC(VPID)
#endif
// Register a new VP SDNode and begin its property scope.
// When the SDNode scope is nested within a VP intrinsic scope, it is
// implicitly registered as the canonical SDNode for this VP intrinsic. There
// is one VP intrinsic that maps directly to one SDNode that goes by the
// same name. Since the operands are also the same, we open the property
// scopes for both the VPIntrinsic and the SDNode at once.
// \p SDOPC The SelectionDAG Node id (eg VP_ADD).
// \p LEGALPOS The operand position of the SDNode that is used for legalizing
// this SDNode. This can be `-1`, in which case the return type of
// the SDNode is used.
// \p TDNAME The name of the TableGen definition of this SDNode.
// \p MASKPOS The mask operand position.
// \p EVLPOS The explicit vector length operand position.
#ifndef BEGIN_REGISTER_VP_SDNODE
#define BEGIN_REGISTER_VP_SDNODE(SDOPC, LEGALPOS, TDNAME, MASKPOS, EVLPOS)
#endif
// End the property scope of a new VP SDNode.
#ifndef END_REGISTER_VP_SDNODE
#define END_REGISTER_VP_SDNODE(SDOPC)
#endif
// Helper macros for the common "1:1 - Intrinsic : SDNode" case.
//
// There is one VP intrinsic that maps directly to one SDNode that goes by the
// same name. Since the operands are also the same, we open the property
// scopes for both the VPIntrinsic and the SDNode at once.
//
// \p INTRIN The canonical name (eg `vp_add`, which at the same time is the
// name of the intrinsic and the TableGen def of the SDNode).
// \p MASKPOS The mask operand position.
// \p EVLPOS The explicit vector length operand position.
// \p SDOPC The SelectionDAG Node id (eg VP_ADD).
// \p LEGALPOS The operand position of the SDNode that is used for legalizing
// this SDNode. This can be `-1`, in which case the return type of
// the SDNode is used.
#define BEGIN_REGISTER_VP(INTRIN, MASKPOS, EVLPOS, SDOPC, LEGALPOS) \
BEGIN_REGISTER_VP_INTRINSIC(INTRIN, MASKPOS, EVLPOS) \
BEGIN_REGISTER_VP_SDNODE(SDOPC, LEGALPOS, INTRIN, MASKPOS, EVLPOS)
#define END_REGISTER_VP(INTRIN, SDOPC) \
END_REGISTER_VP_INTRINSIC(INTRIN) \
END_REGISTER_VP_SDNODE(SDOPC)
// The following macros attach properties to the scope they are placed in. This
// assigns the property to the VP Intrinsic and/or SDNode that belongs to the
// scope.
//
// Property Macros {
// The intrinsic and/or SDNode has the same function as this LLVM IR Opcode.
// \p OPC The standard IR opcode.
#ifndef HANDLE_VP_TO_OPC
#define HANDLE_VP_TO_OPC(OPC)
#endif
// Whether the intrinsic may have a rounding mode or exception behavior operand
// bundle.
// \p HASROUND '1' if the intrinsic can have a rounding mode operand bundle,
// '0' otherwise.
// \p HASEXCEPT '1' if the intrinsic can have an exception behavior operand
// bundle, '0' otherwise.
// \p INTRINID The constrained fp intrinsic this VP intrinsic corresponds to.
#ifndef HANDLE_VP_TO_CONSTRAINEDFP
#define HANDLE_VP_TO_CONSTRAINEDFP(HASROUND, HASEXCEPT, INTRINID)
#endif
// Map this VP intrinsic to its canonical functional intrinsic.
#ifndef HANDLE_VP_TO_INTRIN
#define HANDLE_VP_TO_INTRIN(ID)
#endif
// This VP Intrinsic is a memory operation
// The pointer arg is at POINTERPOS and the data arg is at DATAPOS.
#ifndef HANDLE_VP_IS_MEMOP
#define HANDLE_VP_IS_MEMOP(VPID, POINTERPOS, DATAPOS)
#endif
/// } Property Macros
///// Integer Arithmetic {
// Specialized helper macro for integer binary operators (%x, %y, %mask, %evl).
#ifdef HELPER_REGISTER_BINARY_INT_VP
#error "The internal helper macro HELPER_REGISTER_BINARY_INT_VP is already defined!"
#endif
#define HELPER_REGISTER_BINARY_INT_VP(INTRIN, SDOPC, OPC) \
BEGIN_REGISTER_VP(INTRIN, 2, 3, SDOPC, -1) \
HANDLE_VP_TO_OPC(OPC) \
END_REGISTER_VP(INTRIN, SDOPC)
// llvm.vp.add(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_add, VP_ADD, Add)
// llvm.vp.and(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_and, VP_AND, And)
// llvm.vp.ashr(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_ashr, VP_ASHR, AShr)
// llvm.vp.lshr(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_lshr, VP_LSHR, LShr)
// llvm.vp.mul(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_mul, VP_MUL, Mul)
// llvm.vp.or(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_or, VP_OR, Or)
// llvm.vp.sdiv(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_sdiv, VP_SDIV, SDiv)
// llvm.vp.shl(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_shl, VP_SHL, Shl)
// llvm.vp.srem(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_srem, VP_SREM, SRem)
// llvm.vp.sub(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_sub, VP_SUB, Sub)
// llvm.vp.udiv(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_udiv, VP_UDIV, UDiv)
// llvm.vp.urem(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_urem, VP_UREM, URem)
// llvm.vp.xor(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_xor, VP_XOR, Xor)
#undef HELPER_REGISTER_BINARY_INT_VP
///// } Integer Arithmetic
///// Floating-Point Arithmetic {
// Specialized helper macro for floating-point binary operators
// <operation>(%x, %y, %mask, %evl).
#ifdef HELPER_REGISTER_BINARY_FP_VP
#error \
"The internal helper macro HELPER_REGISTER_BINARY_FP_VP is already defined!"
#endif
#define HELPER_REGISTER_BINARY_FP_VP(OPSUFFIX, SDOPC, OPC) \
BEGIN_REGISTER_VP(vp_##OPSUFFIX, 2, 3, SDOPC, -1) \
HANDLE_VP_TO_OPC(OPC) \
HANDLE_VP_TO_CONSTRAINEDFP(1, 1, experimental_constrained_##OPSUFFIX) \
END_REGISTER_VP(vp_##OPSUFFIX, SDOPC)
// llvm.vp.fadd(x,y,mask,vlen)
HELPER_REGISTER_BINARY_FP_VP(fadd, VP_FADD, FAdd)
// llvm.vp.fsub(x,y,mask,vlen)
HELPER_REGISTER_BINARY_FP_VP(fsub, VP_FSUB, FSub)
// llvm.vp.fmul(x,y,mask,vlen)
HELPER_REGISTER_BINARY_FP_VP(fmul, VP_FMUL, FMul)
// llvm.vp.fdiv(x,y,mask,vlen)
HELPER_REGISTER_BINARY_FP_VP(fdiv, VP_FDIV, FDiv)
// llvm.vp.frem(x,y,mask,vlen)
HELPER_REGISTER_BINARY_FP_VP(frem, VP_FREM, FRem)
#undef HELPER_REGISTER_BINARY_FP_VP
///// } Floating-Point Arithmetic
///// Memory Operations {
// llvm.vp.store(ptr,val,mask,vlen)
BEGIN_REGISTER_VP(vp_store, 2, 3, VP_STORE, 0)
HANDLE_VP_TO_OPC(Store)
HANDLE_VP_TO_INTRIN(masked_store)
HANDLE_VP_IS_MEMOP(vp_store, 1, 0)
END_REGISTER_VP(vp_store, VP_STORE)
// llvm.vp.scatter(ptr,val,mask,vlen)
BEGIN_REGISTER_VP(vp_scatter, 2, 3, VP_SCATTER, 0)
HANDLE_VP_TO_INTRIN(masked_scatter)
HANDLE_VP_IS_MEMOP(vp_scatter, 1, 0)
END_REGISTER_VP(vp_scatter, VP_SCATTER)
// llvm.vp.load(ptr,mask,vlen)
BEGIN_REGISTER_VP(vp_load, 1, 2, VP_LOAD, -1)
HANDLE_VP_TO_OPC(Load)
HANDLE_VP_TO_INTRIN(masked_load)
HANDLE_VP_IS_MEMOP(vp_load, 0, None)
END_REGISTER_VP(vp_load, VP_LOAD)
// llvm.vp.gather(ptr,mask,vlen)
BEGIN_REGISTER_VP(vp_gather, 1, 2, VP_GATHER, -1)
HANDLE_VP_TO_INTRIN(masked_gather)
HANDLE_VP_IS_MEMOP(vp_gather, 0, None)
END_REGISTER_VP(vp_gather, VP_GATHER)
///// } Memory Operations
#undef BEGIN_REGISTER_VP
#undef BEGIN_REGISTER_VP_INTRINSIC
#undef BEGIN_REGISTER_VP_SDNODE
#undef END_REGISTER_VP
#undef END_REGISTER_VP_INTRINSIC
#undef END_REGISTER_VP_SDNODE
#undef HANDLE_VP_TO_OPC
#undef HANDLE_VP_TO_CONSTRAINEDFP
#undef HANDLE_VP_TO_INTRIN
#undef HANDLE_VP_IS_MEMOP