2015-06-19 19:56:51 +02:00
|
|
|
//===- IntrinsicsAMDGPU.td - Defines AMDGPU intrinsics -----*- tablegen -*-===//
|
2012-12-11 22:25:42 +01:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file defines all of the R600-specific intrinsics.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2016-06-21 22:46:22 +02:00
|
|
|
class AMDGPUReadPreloadRegisterIntrinsic
|
2017-05-02 18:57:44 +02:00
|
|
|
: Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable]>;
|
2016-06-21 22:46:22 +02:00
|
|
|
|
|
|
|
class AMDGPUReadPreloadRegisterIntrinsicNamed<string name>
|
2017-05-02 18:57:44 +02:00
|
|
|
: Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable]>, GCCBuiltin<name>;
|
2016-01-22 22:30:34 +01:00
|
|
|
|
2012-12-11 22:25:42 +01:00
|
|
|
let TargetPrefix = "r600" in {
|
|
|
|
|
2016-06-21 22:46:22 +02:00
|
|
|
multiclass AMDGPUReadPreloadRegisterIntrinsic_xyz {
|
|
|
|
def _x : AMDGPUReadPreloadRegisterIntrinsic;
|
|
|
|
def _y : AMDGPUReadPreloadRegisterIntrinsic;
|
|
|
|
def _z : AMDGPUReadPreloadRegisterIntrinsic;
|
|
|
|
}
|
|
|
|
|
|
|
|
multiclass AMDGPUReadPreloadRegisterIntrinsic_xyz_named<string prefix> {
|
|
|
|
def _x : AMDGPUReadPreloadRegisterIntrinsicNamed<!strconcat(prefix, "_x")>;
|
|
|
|
def _y : AMDGPUReadPreloadRegisterIntrinsicNamed<!strconcat(prefix, "_y")>;
|
|
|
|
def _z : AMDGPUReadPreloadRegisterIntrinsicNamed<!strconcat(prefix, "_z")>;
|
2012-12-11 22:25:42 +01:00
|
|
|
}
|
|
|
|
|
2016-06-21 22:46:22 +02:00
|
|
|
defm int_r600_read_global_size : AMDGPUReadPreloadRegisterIntrinsic_xyz_named
|
|
|
|
<"__builtin_r600_read_global_size">;
|
|
|
|
defm int_r600_read_ngroups : AMDGPUReadPreloadRegisterIntrinsic_xyz_named
|
|
|
|
<"__builtin_r600_read_ngroups">;
|
|
|
|
defm int_r600_read_tgid : AMDGPUReadPreloadRegisterIntrinsic_xyz_named
|
|
|
|
<"__builtin_r600_read_tgid">;
|
|
|
|
|
|
|
|
defm int_r600_read_local_size : AMDGPUReadPreloadRegisterIntrinsic_xyz;
|
|
|
|
defm int_r600_read_tidig : AMDGPUReadPreloadRegisterIntrinsic_xyz;
|
|
|
|
|
2016-07-18 20:34:59 +02:00
|
|
|
def int_r600_group_barrier : GCCBuiltin<"__builtin_r600_group_barrier">,
|
|
|
|
Intrinsic<[], [], [IntrConvergent]>;
|
2016-07-10 23:20:29 +02:00
|
|
|
|
|
|
|
// AS 7 is PARAM_I_ADDRESS, used for kernel arguments
|
|
|
|
def int_r600_implicitarg_ptr :
|
|
|
|
GCCBuiltin<"__builtin_r600_implicitarg_ptr">,
|
2017-05-02 18:57:44 +02:00
|
|
|
Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 7>], [],
|
|
|
|
[IntrNoMem, IntrSpeculatable]>;
|
2016-07-10 23:20:29 +02:00
|
|
|
|
2015-10-01 19:51:34 +02:00
|
|
|
def int_r600_rat_store_typed :
|
|
|
|
// 1st parameter: Data
|
|
|
|
// 2nd parameter: Index
|
|
|
|
// 3rd parameter: Constant RAT ID
|
|
|
|
Intrinsic<[], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty], []>,
|
|
|
|
GCCBuiltin<"__builtin_r600_rat_store_typed">;
|
|
|
|
|
2016-07-15 23:26:52 +02:00
|
|
|
def int_r600_recipsqrt_ieee : Intrinsic<
|
2017-05-02 18:57:44 +02:00
|
|
|
[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
|
2016-01-22 22:30:34 +01:00
|
|
|
>;
|
|
|
|
|
2016-07-15 23:26:52 +02:00
|
|
|
def int_r600_recipsqrt_clamped : Intrinsic<
|
2017-05-02 18:57:44 +02:00
|
|
|
[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
|
2016-07-15 23:26:52 +02:00
|
|
|
>;
|
2016-01-22 22:30:34 +01:00
|
|
|
|
2017-02-16 20:09:04 +01:00
|
|
|
def int_r600_cube : Intrinsic<
|
2017-05-02 18:57:44 +02:00
|
|
|
[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem, IntrSpeculatable]
|
2017-02-16 20:09:04 +01:00
|
|
|
>;
|
|
|
|
|
2012-12-11 22:25:42 +01:00
|
|
|
} // End TargetPrefix = "r600"
|
2014-06-19 03:19:19 +02:00
|
|
|
|
2016-01-22 22:30:34 +01:00
|
|
|
let TargetPrefix = "amdgcn" in {
|
|
|
|
|
2016-07-22 19:01:30 +02:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// ABI Special Intrinsics
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2016-06-21 22:46:22 +02:00
|
|
|
defm int_amdgcn_workitem_id : AMDGPUReadPreloadRegisterIntrinsic_xyz;
|
|
|
|
defm int_amdgcn_workgroup_id : AMDGPUReadPreloadRegisterIntrinsic_xyz_named
|
|
|
|
<"__builtin_amdgcn_workgroup_id">;
|
2016-01-30 05:25:19 +01:00
|
|
|
|
2016-07-22 19:01:30 +02:00
|
|
|
def int_amdgcn_dispatch_ptr :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_dispatch_ptr">,
|
2017-05-02 18:57:44 +02:00
|
|
|
Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 2>], [],
|
|
|
|
[IntrNoMem, IntrSpeculatable]>;
|
2016-07-22 19:01:30 +02:00
|
|
|
|
|
|
|
def int_amdgcn_queue_ptr :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_queue_ptr">,
|
2017-05-02 18:57:44 +02:00
|
|
|
Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 2>], [],
|
|
|
|
[IntrNoMem, IntrSpeculatable]>;
|
2016-07-22 19:01:30 +02:00
|
|
|
|
|
|
|
def int_amdgcn_kernarg_segment_ptr :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_kernarg_segment_ptr">,
|
2017-05-02 18:57:44 +02:00
|
|
|
Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 2>], [],
|
|
|
|
[IntrNoMem, IntrSpeculatable]>;
|
2016-07-22 19:01:30 +02:00
|
|
|
|
|
|
|
def int_amdgcn_implicitarg_ptr :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_implicitarg_ptr">,
|
2017-05-02 18:57:44 +02:00
|
|
|
Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 2>], [],
|
|
|
|
[IntrNoMem, IntrSpeculatable]>;
|
2016-07-22 19:01:30 +02:00
|
|
|
|
|
|
|
def int_amdgcn_groupstaticsize :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_groupstaticsize">,
|
2017-05-02 18:57:44 +02:00
|
|
|
Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable]>;
|
2016-07-22 19:01:30 +02:00
|
|
|
|
|
|
|
def int_amdgcn_dispatch_id :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_dispatch_id">,
|
2017-05-02 18:57:44 +02:00
|
|
|
Intrinsic<[llvm_i64_ty], [], [IntrNoMem, IntrSpeculatable]>;
|
2016-07-22 19:01:30 +02:00
|
|
|
|
2017-01-25 02:25:13 +01:00
|
|
|
def int_amdgcn_implicit_buffer_ptr :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_implicit_buffer_ptr">,
|
2017-05-02 18:57:44 +02:00
|
|
|
Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 2>], [],
|
|
|
|
[IntrNoMem, IntrSpeculatable]>;
|
2017-01-25 02:25:13 +01:00
|
|
|
|
AMDGPU: Add new amdgcn.init.exec intrinsics
v2: More tests, bug fixes, cosmetic changes.
Subscribers: arsenm, kzhuravl, wdng, nhaehnle, yaxunl, dstuttard, tpr, llvm-commits, t-tye
Differential Revision: https://reviews.llvm.org/D31762
llvm-svn: 301677
2017-04-28 22:21:58 +02:00
|
|
|
// Set EXEC to the 64-bit value given.
|
|
|
|
// This is always moved to the beginning of the basic block.
|
|
|
|
def int_amdgcn_init_exec : Intrinsic<[],
|
|
|
|
[llvm_i64_ty], // 64-bit literal constant
|
|
|
|
[IntrConvergent]>;
|
|
|
|
|
|
|
|
// Set EXEC according to a thread count packed in an SGPR input:
|
|
|
|
// thread_count = (input >> bitoffset) & 0x7f;
|
|
|
|
// This is always moved to the beginning of the basic block.
|
|
|
|
def int_amdgcn_init_exec_from_input : Intrinsic<[],
|
|
|
|
[llvm_i32_ty, // 32-bit SGPR input
|
|
|
|
llvm_i32_ty], // bit offset of the thread count
|
|
|
|
[IntrConvergent]>;
|
|
|
|
|
|
|
|
|
2016-07-22 19:01:30 +02:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Instruction Intrinsics
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2017-01-04 19:06:55 +01:00
|
|
|
// The first parameter is s_sendmsg immediate (i16),
|
|
|
|
// the second one is copied to m0
|
|
|
|
def int_amdgcn_s_sendmsg : GCCBuiltin<"__builtin_amdgcn_s_sendmsg">,
|
|
|
|
Intrinsic <[], [llvm_i32_ty, llvm_i32_ty], []>;
|
|
|
|
def int_amdgcn_s_sendmsghalt : GCCBuiltin<"__builtin_amdgcn_s_sendmsghalt">,
|
|
|
|
Intrinsic <[], [llvm_i32_ty, llvm_i32_ty], []>;
|
|
|
|
|
2016-01-22 22:30:43 +01:00
|
|
|
def int_amdgcn_s_barrier : GCCBuiltin<"__builtin_amdgcn_s_barrier">,
|
|
|
|
Intrinsic<[], [], [IntrConvergent]>;
|
|
|
|
|
2016-11-15 20:00:15 +01:00
|
|
|
def int_amdgcn_wave_barrier : GCCBuiltin<"__builtin_amdgcn_wave_barrier">,
|
|
|
|
Intrinsic<[], [], [IntrConvergent]>;
|
|
|
|
|
2017-02-25 03:13:32 +01:00
|
|
|
def int_amdgcn_s_waitcnt : GCCBuiltin<"__builtin_amdgcn_s_waitcnt">,
|
|
|
|
Intrinsic<[], [llvm_i32_ty], []>;
|
2016-04-27 17:46:01 +02:00
|
|
|
|
2016-01-22 22:30:46 +01:00
|
|
|
def int_amdgcn_div_scale : Intrinsic<
|
2014-06-23 20:28:28 +02:00
|
|
|
// 1st parameter: Numerator
|
|
|
|
// 2nd parameter: Denominator
|
|
|
|
// 3rd parameter: Constant to select select between first and
|
|
|
|
// second. (0 = first, 1 = second).
|
2016-01-22 22:30:46 +01:00
|
|
|
[llvm_anyfloat_ty, llvm_i1_ty],
|
|
|
|
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_i1_ty],
|
2017-05-02 18:57:44 +02:00
|
|
|
[IntrNoMem, IntrSpeculatable]
|
2016-01-22 22:30:46 +01:00
|
|
|
>;
|
2014-06-19 03:19:19 +02:00
|
|
|
|
2016-01-22 22:30:46 +01:00
|
|
|
def int_amdgcn_div_fmas : Intrinsic<[llvm_anyfloat_ty],
|
|
|
|
[LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>, llvm_i1_ty],
|
2017-05-02 18:57:44 +02:00
|
|
|
[IntrNoMem, IntrSpeculatable]
|
2016-01-22 22:30:46 +01:00
|
|
|
>;
|
2014-06-19 03:19:19 +02:00
|
|
|
|
2016-01-22 22:30:46 +01:00
|
|
|
def int_amdgcn_div_fixup : Intrinsic<[llvm_anyfloat_ty],
|
|
|
|
[LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
|
2017-05-02 18:57:44 +02:00
|
|
|
[IntrNoMem, IntrSpeculatable]
|
2016-01-22 22:30:46 +01:00
|
|
|
>;
|
2014-06-19 03:19:19 +02:00
|
|
|
|
2016-01-22 22:30:46 +01:00
|
|
|
def int_amdgcn_trig_preop : Intrinsic<
|
2017-05-02 18:57:44 +02:00
|
|
|
[llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_i32_ty],
|
|
|
|
[IntrNoMem, IntrSpeculatable]
|
2016-01-22 22:30:46 +01:00
|
|
|
>;
|
2014-06-19 03:19:19 +02:00
|
|
|
|
2016-02-13 02:19:56 +01:00
|
|
|
def int_amdgcn_sin : Intrinsic<
|
2017-05-02 18:57:44 +02:00
|
|
|
[llvm_anyfloat_ty], [LLVMMatchType<0>],
|
|
|
|
[IntrNoMem, IntrSpeculatable]
|
2016-02-13 02:19:56 +01:00
|
|
|
>;
|
|
|
|
|
|
|
|
def int_amdgcn_cos : Intrinsic<
|
2017-05-02 18:57:44 +02:00
|
|
|
[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
|
2016-02-13 02:19:56 +01:00
|
|
|
>;
|
|
|
|
|
|
|
|
def int_amdgcn_log_clamp : Intrinsic<
|
2017-05-02 18:57:44 +02:00
|
|
|
[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
|
2016-02-13 02:19:56 +01:00
|
|
|
>;
|
|
|
|
|
2016-07-26 18:45:45 +02:00
|
|
|
def int_amdgcn_fmul_legacy : GCCBuiltin<"__builtin_amdgcn_fmul_legacy">,
|
2017-05-02 18:57:44 +02:00
|
|
|
Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
|
|
|
|
[IntrNoMem, IntrSpeculatable]
|
2016-07-26 18:45:45 +02:00
|
|
|
>;
|
|
|
|
|
2016-01-22 22:30:46 +01:00
|
|
|
def int_amdgcn_rcp : Intrinsic<
|
2017-05-02 18:57:44 +02:00
|
|
|
[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
|
2016-01-22 22:30:46 +01:00
|
|
|
>;
|
2014-06-19 03:19:19 +02:00
|
|
|
|
2016-07-26 18:45:45 +02:00
|
|
|
def int_amdgcn_rcp_legacy : GCCBuiltin<"__builtin_amdgcn_rcp_legacy">,
|
2017-05-02 18:57:44 +02:00
|
|
|
Intrinsic<[llvm_float_ty], [llvm_float_ty],
|
|
|
|
[IntrNoMem, IntrSpeculatable]
|
2016-07-26 18:45:45 +02:00
|
|
|
>;
|
|
|
|
|
2016-01-22 22:30:46 +01:00
|
|
|
def int_amdgcn_rsq : Intrinsic<
|
2017-05-02 18:57:44 +02:00
|
|
|
[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
|
2016-01-22 22:30:46 +01:00
|
|
|
>;
|
2014-06-19 03:19:19 +02:00
|
|
|
|
2016-06-20 20:33:56 +02:00
|
|
|
def int_amdgcn_rsq_legacy : GCCBuiltin<"__builtin_amdgcn_rsq_legacy">,
|
|
|
|
Intrinsic<
|
2017-05-02 18:57:44 +02:00
|
|
|
[llvm_float_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]
|
2016-06-20 20:33:56 +02:00
|
|
|
>;
|
|
|
|
|
2016-02-13 02:03:00 +01:00
|
|
|
def int_amdgcn_rsq_clamp : Intrinsic<
|
2017-05-02 18:57:44 +02:00
|
|
|
[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]>;
|
2014-06-25 00:13:39 +02:00
|
|
|
|
2016-01-22 22:30:46 +01:00
|
|
|
def int_amdgcn_ldexp : Intrinsic<
|
2017-05-02 18:57:44 +02:00
|
|
|
[llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_i32_ty],
|
|
|
|
[IntrNoMem, IntrSpeculatable]
|
2016-01-22 22:30:46 +01:00
|
|
|
>;
|
2014-08-15 19:30:25 +02:00
|
|
|
|
2016-03-21 17:11:05 +01:00
|
|
|
def int_amdgcn_frexp_mant : Intrinsic<
|
2017-05-02 18:57:44 +02:00
|
|
|
[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
|
2016-03-21 17:11:05 +01:00
|
|
|
>;
|
|
|
|
|
2016-03-31 00:28:52 +02:00
|
|
|
def int_amdgcn_frexp_exp : Intrinsic<
|
2017-05-02 18:57:44 +02:00
|
|
|
[llvm_anyint_ty], [llvm_anyfloat_ty], [IntrNoMem, IntrSpeculatable]
|
2016-03-31 00:28:52 +02:00
|
|
|
>;
|
|
|
|
|
2016-05-28 02:19:52 +02:00
|
|
|
// v_fract is buggy on SI/CI. It mishandles infinities, may return 1.0
|
|
|
|
// and always uses rtz, so is not suitable for implementing the OpenCL
|
|
|
|
// fract function. It should be ok on VI.
|
|
|
|
def int_amdgcn_fract : Intrinsic<
|
2017-05-02 18:57:44 +02:00
|
|
|
[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
|
2016-05-28 02:19:52 +02:00
|
|
|
>;
|
|
|
|
|
2017-02-22 01:27:34 +01:00
|
|
|
def int_amdgcn_cvt_pkrtz : Intrinsic<
|
2017-05-02 18:57:44 +02:00
|
|
|
[llvm_v2f16_ty], [llvm_float_ty, llvm_float_ty],
|
|
|
|
[IntrNoMem, IntrSpeculatable]
|
2017-02-22 01:27:34 +01:00
|
|
|
>;
|
|
|
|
|
2016-01-22 22:30:46 +01:00
|
|
|
def int_amdgcn_class : Intrinsic<
|
2017-05-02 18:57:44 +02:00
|
|
|
[llvm_i1_ty], [llvm_anyfloat_ty, llvm_i32_ty],
|
|
|
|
[IntrNoMem, IntrSpeculatable]
|
2016-01-22 22:30:46 +01:00
|
|
|
>;
|
2015-01-07 00:00:37 +01:00
|
|
|
|
2017-01-31 04:07:46 +01:00
|
|
|
def int_amdgcn_fmed3 : GCCBuiltin<"__builtin_amdgcn_fmed3">,
|
|
|
|
Intrinsic<[llvm_anyfloat_ty],
|
2017-05-02 18:57:44 +02:00
|
|
|
[LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
|
|
|
|
[IntrNoMem, IntrSpeculatable]
|
2017-01-31 04:07:46 +01:00
|
|
|
>;
|
|
|
|
|
2016-01-26 05:29:56 +01:00
|
|
|
def int_amdgcn_cubeid : GCCBuiltin<"__builtin_amdgcn_cubeid">,
|
|
|
|
Intrinsic<[llvm_float_ty],
|
2017-05-02 18:57:44 +02:00
|
|
|
[llvm_float_ty, llvm_float_ty, llvm_float_ty],
|
|
|
|
[IntrNoMem, IntrSpeculatable]
|
2016-01-26 05:29:56 +01:00
|
|
|
>;
|
|
|
|
|
|
|
|
def int_amdgcn_cubema : GCCBuiltin<"__builtin_amdgcn_cubema">,
|
|
|
|
Intrinsic<[llvm_float_ty],
|
2017-05-02 18:57:44 +02:00
|
|
|
[llvm_float_ty, llvm_float_ty, llvm_float_ty],
|
|
|
|
[IntrNoMem, IntrSpeculatable]
|
2016-01-26 05:29:56 +01:00
|
|
|
>;
|
|
|
|
|
|
|
|
def int_amdgcn_cubesc : GCCBuiltin<"__builtin_amdgcn_cubesc">,
|
|
|
|
Intrinsic<[llvm_float_ty],
|
2017-05-02 18:57:44 +02:00
|
|
|
[llvm_float_ty, llvm_float_ty, llvm_float_ty],
|
|
|
|
[IntrNoMem, IntrSpeculatable]
|
2016-01-26 05:29:56 +01:00
|
|
|
>;
|
|
|
|
|
|
|
|
def int_amdgcn_cubetc : GCCBuiltin<"__builtin_amdgcn_cubetc">,
|
|
|
|
Intrinsic<[llvm_float_ty],
|
2017-05-02 18:57:44 +02:00
|
|
|
[llvm_float_ty, llvm_float_ty, llvm_float_ty],
|
|
|
|
[IntrNoMem, IntrSpeculatable]
|
2016-01-26 05:29:56 +01:00
|
|
|
>;
|
|
|
|
|
2016-07-18 20:35:05 +02:00
|
|
|
// v_ffbh_i32, as opposed to v_ffbh_u32. For v_ffbh_u32, llvm.ctlz
|
|
|
|
// should be used.
|
|
|
|
def int_amdgcn_sffbh :
|
2017-05-02 18:57:44 +02:00
|
|
|
Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>],
|
|
|
|
[IntrNoMem, IntrSpeculatable]
|
|
|
|
>;
|
2016-07-18 20:35:05 +02:00
|
|
|
|
2016-04-12 16:05:04 +02:00
|
|
|
|
2017-03-31 00:21:40 +02:00
|
|
|
// Fields should mirror atomicrmw
|
|
|
|
class AMDGPUAtomicIncIntrin : Intrinsic<[llvm_anyint_ty],
|
|
|
|
[llvm_anyptr_ty,
|
|
|
|
LLVMMatchType<0>,
|
|
|
|
llvm_i32_ty, // ordering
|
|
|
|
llvm_i32_ty, // scope
|
|
|
|
llvm_i1_ty], // isVolatile
|
2016-04-21 19:48:02 +02:00
|
|
|
[IntrArgMemOnly, NoCapture<0>]
|
2016-04-12 16:05:04 +02:00
|
|
|
>;
|
|
|
|
|
2017-03-31 00:21:40 +02:00
|
|
|
def int_amdgcn_atomic_inc : AMDGPUAtomicIncIntrin;
|
|
|
|
def int_amdgcn_atomic_dec : AMDGPUAtomicIncIntrin;
|
|
|
|
|
2016-02-18 17:44:18 +01:00
|
|
|
class AMDGPUImageLoad : Intrinsic <
|
2016-10-12 18:35:29 +02:00
|
|
|
[llvm_anyfloat_ty], // vdata(VGPR)
|
2016-02-18 17:44:18 +01:00
|
|
|
[llvm_anyint_ty, // vaddr(VGPR)
|
2016-10-12 18:35:29 +02:00
|
|
|
llvm_anyint_ty, // rsrc(SGPR)
|
2016-02-18 17:44:18 +01:00
|
|
|
llvm_i32_ty, // dmask(imm)
|
|
|
|
llvm_i1_ty, // glc(imm)
|
2016-10-12 18:35:29 +02:00
|
|
|
llvm_i1_ty, // slc(imm)
|
|
|
|
llvm_i1_ty, // lwe(imm)
|
|
|
|
llvm_i1_ty], // da(imm)
|
2016-02-18 17:44:18 +01:00
|
|
|
[IntrReadMem]>;
|
|
|
|
|
|
|
|
def int_amdgcn_image_load : AMDGPUImageLoad;
|
|
|
|
def int_amdgcn_image_load_mip : AMDGPUImageLoad;
|
2016-10-12 18:35:29 +02:00
|
|
|
def int_amdgcn_image_getresinfo : AMDGPUImageLoad;
|
2016-02-18 17:44:18 +01:00
|
|
|
|
|
|
|
class AMDGPUImageStore : Intrinsic <
|
|
|
|
[],
|
2016-10-12 18:35:29 +02:00
|
|
|
[llvm_anyfloat_ty, // vdata(VGPR)
|
2016-02-18 17:44:18 +01:00
|
|
|
llvm_anyint_ty, // vaddr(VGPR)
|
2016-10-12 18:35:29 +02:00
|
|
|
llvm_anyint_ty, // rsrc(SGPR)
|
2016-02-18 17:44:18 +01:00
|
|
|
llvm_i32_ty, // dmask(imm)
|
|
|
|
llvm_i1_ty, // glc(imm)
|
2016-10-12 18:35:29 +02:00
|
|
|
llvm_i1_ty, // slc(imm)
|
|
|
|
llvm_i1_ty, // lwe(imm)
|
|
|
|
llvm_i1_ty], // da(imm)
|
2016-02-18 17:44:18 +01:00
|
|
|
[]>;
|
|
|
|
|
|
|
|
def int_amdgcn_image_store : AMDGPUImageStore;
|
|
|
|
def int_amdgcn_image_store_mip : AMDGPUImageStore;
|
|
|
|
|
AMDGPU/SI: Implement amdgcn image intrinsics with sampler
Summary:
This patch define and implement amdgcn image intrinsics with sampler.
1. define vdata type to be llvm_anyfloat_ty, address type to be llvm_anyfloat_ty,
and rsrc type to be llvm_anyint_ty. As a result, we expect the intrinsics name
to have three suffixes to overload each of these three types;
2. D128 as well as two other flags are implied in the three types, for example,
if you use v8i32 as resource type, then r128 is 0!
3. don't expose TFE flag, and other flags are exposed in the instruction order:
unrm, glc, slc, lwe and da.
Differential Revision: http://reviews.llvm.org/D22838
Reviewed by:
arsenm and tstellarAMD
llvm-svn: 278291
2016-08-10 23:15:30 +02:00
|
|
|
class AMDGPUImageSample : Intrinsic <
|
|
|
|
[llvm_anyfloat_ty], // vdata(VGPR)
|
|
|
|
[llvm_anyfloat_ty, // vaddr(VGPR)
|
|
|
|
llvm_anyint_ty, // rsrc(SGPR)
|
|
|
|
llvm_v4i32_ty, // sampler(SGPR)
|
|
|
|
llvm_i32_ty, // dmask(imm)
|
|
|
|
llvm_i1_ty, // unorm(imm)
|
|
|
|
llvm_i1_ty, // glc(imm)
|
|
|
|
llvm_i1_ty, // slc(imm)
|
|
|
|
llvm_i1_ty, // lwe(imm)
|
|
|
|
llvm_i1_ty], // da(imm)
|
|
|
|
[IntrReadMem]>;
|
|
|
|
|
|
|
|
// Basic sample
|
|
|
|
def int_amdgcn_image_sample : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_sample_cl : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_sample_d : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_sample_d_cl : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_sample_l : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_sample_b : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_sample_b_cl : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_sample_lz : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_sample_cd : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_sample_cd_cl : AMDGPUImageSample;
|
|
|
|
|
|
|
|
// Sample with comparison
|
|
|
|
def int_amdgcn_image_sample_c : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_sample_c_cl : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_sample_c_d : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_sample_c_d_cl : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_sample_c_l : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_sample_c_b : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_sample_c_b_cl : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_sample_c_lz : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_sample_c_cd : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_sample_c_cd_cl : AMDGPUImageSample;
|
|
|
|
|
|
|
|
// Sample with offsets
|
|
|
|
def int_amdgcn_image_sample_o : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_sample_cl_o : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_sample_d_o : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_sample_d_cl_o : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_sample_l_o : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_sample_b_o : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_sample_b_cl_o : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_sample_lz_o : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_sample_cd_o : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_sample_cd_cl_o : AMDGPUImageSample;
|
|
|
|
|
|
|
|
// Sample with comparison and offsets
|
|
|
|
def int_amdgcn_image_sample_c_o : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_sample_c_cl_o : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_sample_c_d_o : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_sample_c_d_cl_o : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_sample_c_l_o : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_sample_c_b_o : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_sample_c_b_cl_o : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_sample_c_lz_o : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_sample_c_cd_o : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_sample_c_cd_cl_o : AMDGPUImageSample;
|
|
|
|
|
|
|
|
// Basic gather4
|
|
|
|
def int_amdgcn_image_gather4 : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_gather4_cl : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_gather4_l : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_gather4_b : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_gather4_b_cl : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_gather4_lz : AMDGPUImageSample;
|
|
|
|
|
|
|
|
// Gather4 with comparison
|
|
|
|
def int_amdgcn_image_gather4_c : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_gather4_c_cl : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_gather4_c_l : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_gather4_c_b : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_gather4_c_b_cl : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_gather4_c_lz : AMDGPUImageSample;
|
|
|
|
|
|
|
|
// Gather4 with offsets
|
|
|
|
def int_amdgcn_image_gather4_o : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_gather4_cl_o : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_gather4_l_o : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_gather4_b_o : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_gather4_b_cl_o : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_gather4_lz_o : AMDGPUImageSample;
|
|
|
|
|
|
|
|
// Gather4 with comparison and offsets
|
|
|
|
def int_amdgcn_image_gather4_c_o : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_gather4_c_cl_o : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_gather4_c_l_o : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_gather4_c_b_o : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_gather4_c_b_cl_o : AMDGPUImageSample;
|
|
|
|
def int_amdgcn_image_gather4_c_lz_o : AMDGPUImageSample;
|
|
|
|
|
|
|
|
def int_amdgcn_image_getlod : AMDGPUImageSample;
|
|
|
|
|
2016-03-04 11:39:50 +01:00
|
|
|
class AMDGPUImageAtomic : Intrinsic <
|
|
|
|
[llvm_i32_ty],
|
|
|
|
[llvm_i32_ty, // vdata(VGPR)
|
|
|
|
llvm_anyint_ty, // vaddr(VGPR)
|
|
|
|
llvm_v8i32_ty, // rsrc(SGPR)
|
|
|
|
llvm_i1_ty, // r128(imm)
|
|
|
|
llvm_i1_ty, // da(imm)
|
|
|
|
llvm_i1_ty], // slc(imm)
|
|
|
|
[]>;
|
|
|
|
|
|
|
|
def int_amdgcn_image_atomic_swap : AMDGPUImageAtomic;
|
|
|
|
def int_amdgcn_image_atomic_add : AMDGPUImageAtomic;
|
|
|
|
def int_amdgcn_image_atomic_sub : AMDGPUImageAtomic;
|
|
|
|
def int_amdgcn_image_atomic_smin : AMDGPUImageAtomic;
|
|
|
|
def int_amdgcn_image_atomic_umin : AMDGPUImageAtomic;
|
|
|
|
def int_amdgcn_image_atomic_smax : AMDGPUImageAtomic;
|
|
|
|
def int_amdgcn_image_atomic_umax : AMDGPUImageAtomic;
|
|
|
|
def int_amdgcn_image_atomic_and : AMDGPUImageAtomic;
|
|
|
|
def int_amdgcn_image_atomic_or : AMDGPUImageAtomic;
|
|
|
|
def int_amdgcn_image_atomic_xor : AMDGPUImageAtomic;
|
|
|
|
def int_amdgcn_image_atomic_inc : AMDGPUImageAtomic;
|
|
|
|
def int_amdgcn_image_atomic_dec : AMDGPUImageAtomic;
|
|
|
|
def int_amdgcn_image_atomic_cmpswap : Intrinsic <
|
|
|
|
[llvm_i32_ty],
|
|
|
|
[llvm_i32_ty, // src(VGPR)
|
|
|
|
llvm_i32_ty, // cmp(VGPR)
|
|
|
|
llvm_anyint_ty, // vaddr(VGPR)
|
|
|
|
llvm_v8i32_ty, // rsrc(SGPR)
|
|
|
|
llvm_i1_ty, // r128(imm)
|
|
|
|
llvm_i1_ty, // da(imm)
|
|
|
|
llvm_i1_ty], // slc(imm)
|
|
|
|
[]>;
|
|
|
|
|
2016-04-12 23:18:10 +02:00
|
|
|
class AMDGPUBufferLoad : Intrinsic <
|
2016-03-18 17:24:40 +01:00
|
|
|
[llvm_anyfloat_ty],
|
AMDGPU/SI: add llvm.amdgcn.buffer.load/store.format intrinsics
Summary:
They correspond to BUFFER_LOAD/STORE_FORMAT_XYZW and will be used by Mesa
to implement the GL_ARB_shader_image_load_store extension.
The intention is that for llvm.amdgcn.buffer.load.format, LLVM will decide
whether one of the _X/_XY/_XYZ opcodes can be used (similar to image sampling
and loads). However, this is not currently implemented.
For llvm.amdgcn.buffer.store, LLVM cannot decide to use one of the "smaller"
opcodes and therefore the intrinsic is overloaded. Currently, only the v4f32
is actually implemented since GLSL also only has a vec4 variant of the store
instructions, although it's conceivable that Mesa will want to be smarter
about this in the future.
BUFFER_LOAD_FORMAT_XYZW is already exposed via llvm.SI.vs.load.input, which
has a legacy name, pretends not to access memory, and does not capture the
full flexibility of the instruction.
Reviewers: arsenm, tstellarAMD, mareko
Subscribers: arsenm, llvm-commits
Differential Revision: http://reviews.llvm.org/D17277
llvm-svn: 263140
2016-03-10 19:43:50 +01:00
|
|
|
[llvm_v4i32_ty, // rsrc(SGPR)
|
|
|
|
llvm_i32_ty, // vindex(VGPR)
|
2016-03-18 17:24:20 +01:00
|
|
|
llvm_i32_ty, // offset(SGPR/VGPR/imm)
|
AMDGPU/SI: add llvm.amdgcn.buffer.load/store.format intrinsics
Summary:
They correspond to BUFFER_LOAD/STORE_FORMAT_XYZW and will be used by Mesa
to implement the GL_ARB_shader_image_load_store extension.
The intention is that for llvm.amdgcn.buffer.load.format, LLVM will decide
whether one of the _X/_XY/_XYZ opcodes can be used (similar to image sampling
and loads). However, this is not currently implemented.
For llvm.amdgcn.buffer.store, LLVM cannot decide to use one of the "smaller"
opcodes and therefore the intrinsic is overloaded. Currently, only the v4f32
is actually implemented since GLSL also only has a vec4 variant of the store
instructions, although it's conceivable that Mesa will want to be smarter
about this in the future.
BUFFER_LOAD_FORMAT_XYZW is already exposed via llvm.SI.vs.load.input, which
has a legacy name, pretends not to access memory, and does not capture the
full flexibility of the instruction.
Reviewers: arsenm, tstellarAMD, mareko
Subscribers: arsenm, llvm-commits
Differential Revision: http://reviews.llvm.org/D17277
llvm-svn: 263140
2016-03-10 19:43:50 +01:00
|
|
|
llvm_i1_ty, // glc(imm)
|
|
|
|
llvm_i1_ty], // slc(imm)
|
|
|
|
[IntrReadMem]>;
|
2016-04-12 23:18:10 +02:00
|
|
|
def int_amdgcn_buffer_load_format : AMDGPUBufferLoad;
|
|
|
|
def int_amdgcn_buffer_load : AMDGPUBufferLoad;
|
AMDGPU/SI: add llvm.amdgcn.buffer.load/store.format intrinsics
Summary:
They correspond to BUFFER_LOAD/STORE_FORMAT_XYZW and will be used by Mesa
to implement the GL_ARB_shader_image_load_store extension.
The intention is that for llvm.amdgcn.buffer.load.format, LLVM will decide
whether one of the _X/_XY/_XYZ opcodes can be used (similar to image sampling
and loads). However, this is not currently implemented.
For llvm.amdgcn.buffer.store, LLVM cannot decide to use one of the "smaller"
opcodes and therefore the intrinsic is overloaded. Currently, only the v4f32
is actually implemented since GLSL also only has a vec4 variant of the store
instructions, although it's conceivable that Mesa will want to be smarter
about this in the future.
BUFFER_LOAD_FORMAT_XYZW is already exposed via llvm.SI.vs.load.input, which
has a legacy name, pretends not to access memory, and does not capture the
full flexibility of the instruction.
Reviewers: arsenm, tstellarAMD, mareko
Subscribers: arsenm, llvm-commits
Differential Revision: http://reviews.llvm.org/D17277
llvm-svn: 263140
2016-03-10 19:43:50 +01:00
|
|
|
|
2016-04-12 23:18:10 +02:00
|
|
|
class AMDGPUBufferStore : Intrinsic <
|
AMDGPU/SI: add llvm.amdgcn.buffer.load/store.format intrinsics
Summary:
They correspond to BUFFER_LOAD/STORE_FORMAT_XYZW and will be used by Mesa
to implement the GL_ARB_shader_image_load_store extension.
The intention is that for llvm.amdgcn.buffer.load.format, LLVM will decide
whether one of the _X/_XY/_XYZ opcodes can be used (similar to image sampling
and loads). However, this is not currently implemented.
For llvm.amdgcn.buffer.store, LLVM cannot decide to use one of the "smaller"
opcodes and therefore the intrinsic is overloaded. Currently, only the v4f32
is actually implemented since GLSL also only has a vec4 variant of the store
instructions, although it's conceivable that Mesa will want to be smarter
about this in the future.
BUFFER_LOAD_FORMAT_XYZW is already exposed via llvm.SI.vs.load.input, which
has a legacy name, pretends not to access memory, and does not capture the
full flexibility of the instruction.
Reviewers: arsenm, tstellarAMD, mareko
Subscribers: arsenm, llvm-commits
Differential Revision: http://reviews.llvm.org/D17277
llvm-svn: 263140
2016-03-10 19:43:50 +01:00
|
|
|
[],
|
2016-04-12 23:18:10 +02:00
|
|
|
[llvm_anyfloat_ty, // vdata(VGPR) -- can currently only select f32, v2f32, v4f32
|
AMDGPU/SI: add llvm.amdgcn.buffer.load/store.format intrinsics
Summary:
They correspond to BUFFER_LOAD/STORE_FORMAT_XYZW and will be used by Mesa
to implement the GL_ARB_shader_image_load_store extension.
The intention is that for llvm.amdgcn.buffer.load.format, LLVM will decide
whether one of the _X/_XY/_XYZ opcodes can be used (similar to image sampling
and loads). However, this is not currently implemented.
For llvm.amdgcn.buffer.store, LLVM cannot decide to use one of the "smaller"
opcodes and therefore the intrinsic is overloaded. Currently, only the v4f32
is actually implemented since GLSL also only has a vec4 variant of the store
instructions, although it's conceivable that Mesa will want to be smarter
about this in the future.
BUFFER_LOAD_FORMAT_XYZW is already exposed via llvm.SI.vs.load.input, which
has a legacy name, pretends not to access memory, and does not capture the
full flexibility of the instruction.
Reviewers: arsenm, tstellarAMD, mareko
Subscribers: arsenm, llvm-commits
Differential Revision: http://reviews.llvm.org/D17277
llvm-svn: 263140
2016-03-10 19:43:50 +01:00
|
|
|
llvm_v4i32_ty, // rsrc(SGPR)
|
|
|
|
llvm_i32_ty, // vindex(VGPR)
|
2016-03-18 17:24:20 +01:00
|
|
|
llvm_i32_ty, // offset(SGPR/VGPR/imm)
|
AMDGPU/SI: add llvm.amdgcn.buffer.load/store.format intrinsics
Summary:
They correspond to BUFFER_LOAD/STORE_FORMAT_XYZW and will be used by Mesa
to implement the GL_ARB_shader_image_load_store extension.
The intention is that for llvm.amdgcn.buffer.load.format, LLVM will decide
whether one of the _X/_XY/_XYZ opcodes can be used (similar to image sampling
and loads). However, this is not currently implemented.
For llvm.amdgcn.buffer.store, LLVM cannot decide to use one of the "smaller"
opcodes and therefore the intrinsic is overloaded. Currently, only the v4f32
is actually implemented since GLSL also only has a vec4 variant of the store
instructions, although it's conceivable that Mesa will want to be smarter
about this in the future.
BUFFER_LOAD_FORMAT_XYZW is already exposed via llvm.SI.vs.load.input, which
has a legacy name, pretends not to access memory, and does not capture the
full flexibility of the instruction.
Reviewers: arsenm, tstellarAMD, mareko
Subscribers: arsenm, llvm-commits
Differential Revision: http://reviews.llvm.org/D17277
llvm-svn: 263140
2016-03-10 19:43:50 +01:00
|
|
|
llvm_i1_ty, // glc(imm)
|
|
|
|
llvm_i1_ty], // slc(imm)
|
2016-04-19 23:58:33 +02:00
|
|
|
[IntrWriteMem]>;
|
2016-04-12 23:18:10 +02:00
|
|
|
def int_amdgcn_buffer_store_format : AMDGPUBufferStore;
|
|
|
|
def int_amdgcn_buffer_store : AMDGPUBufferStore;
|
2016-03-04 11:39:50 +01:00
|
|
|
|
2016-03-18 17:24:31 +01:00
|
|
|
class AMDGPUBufferAtomic : Intrinsic <
|
|
|
|
[llvm_i32_ty],
|
|
|
|
[llvm_i32_ty, // vdata(VGPR)
|
|
|
|
llvm_v4i32_ty, // rsrc(SGPR)
|
|
|
|
llvm_i32_ty, // vindex(VGPR)
|
|
|
|
llvm_i32_ty, // offset(SGPR/VGPR/imm)
|
|
|
|
llvm_i1_ty], // slc(imm)
|
|
|
|
[]>;
|
|
|
|
def int_amdgcn_buffer_atomic_swap : AMDGPUBufferAtomic;
|
|
|
|
def int_amdgcn_buffer_atomic_add : AMDGPUBufferAtomic;
|
|
|
|
def int_amdgcn_buffer_atomic_sub : AMDGPUBufferAtomic;
|
|
|
|
def int_amdgcn_buffer_atomic_smin : AMDGPUBufferAtomic;
|
|
|
|
def int_amdgcn_buffer_atomic_umin : AMDGPUBufferAtomic;
|
|
|
|
def int_amdgcn_buffer_atomic_smax : AMDGPUBufferAtomic;
|
|
|
|
def int_amdgcn_buffer_atomic_umax : AMDGPUBufferAtomic;
|
|
|
|
def int_amdgcn_buffer_atomic_and : AMDGPUBufferAtomic;
|
|
|
|
def int_amdgcn_buffer_atomic_or : AMDGPUBufferAtomic;
|
|
|
|
def int_amdgcn_buffer_atomic_xor : AMDGPUBufferAtomic;
|
|
|
|
def int_amdgcn_buffer_atomic_cmpswap : Intrinsic<
|
|
|
|
[llvm_i32_ty],
|
|
|
|
[llvm_i32_ty, // src(VGPR)
|
|
|
|
llvm_i32_ty, // cmp(VGPR)
|
|
|
|
llvm_v4i32_ty, // rsrc(SGPR)
|
|
|
|
llvm_i32_ty, // vindex(VGPR)
|
|
|
|
llvm_i32_ty, // offset(SGPR/VGPR/imm)
|
|
|
|
llvm_i1_ty], // slc(imm)
|
|
|
|
[]>;
|
|
|
|
|
2017-01-17 08:26:53 +01:00
|
|
|
// Uses that do not set the done bit should set IntrWriteMem on the
|
|
|
|
// call site.
|
|
|
|
def int_amdgcn_exp : Intrinsic <[], [
|
|
|
|
llvm_i32_ty, // tgt,
|
|
|
|
llvm_i32_ty, // en
|
|
|
|
llvm_any_ty, // src0 (f32 or i32)
|
|
|
|
LLVMMatchType<0>, // src1
|
|
|
|
LLVMMatchType<0>, // src2
|
|
|
|
LLVMMatchType<0>, // src3
|
|
|
|
llvm_i1_ty, // done
|
|
|
|
llvm_i1_ty // vm
|
|
|
|
],
|
|
|
|
[]
|
|
|
|
>;
|
|
|
|
|
|
|
|
// exp with compr bit set.
|
|
|
|
def int_amdgcn_exp_compr : Intrinsic <[], [
|
|
|
|
llvm_i32_ty, // tgt,
|
|
|
|
llvm_i32_ty, // en
|
|
|
|
llvm_anyvector_ty, // src0 (v2f16 or v2i16)
|
|
|
|
LLVMMatchType<0>, // src1
|
|
|
|
llvm_i1_ty, // done
|
|
|
|
llvm_i1_ty], // vm
|
|
|
|
[]
|
|
|
|
>;
|
|
|
|
|
2015-09-24 21:52:21 +02:00
|
|
|
def int_amdgcn_buffer_wbinvl1_sc :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_buffer_wbinvl1_sc">,
|
|
|
|
Intrinsic<[], [], []>;
|
|
|
|
|
|
|
|
def int_amdgcn_buffer_wbinvl1 :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_buffer_wbinvl1">,
|
|
|
|
Intrinsic<[], [], []>;
|
|
|
|
|
2015-09-24 21:52:27 +02:00
|
|
|
def int_amdgcn_s_dcache_inv :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_s_dcache_inv">,
|
|
|
|
Intrinsic<[], [], []>;
|
|
|
|
|
2016-02-27 09:53:46 +01:00
|
|
|
def int_amdgcn_s_memtime :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_s_memtime">,
|
|
|
|
Intrinsic<[llvm_i64_ty], [], []>;
|
|
|
|
|
2016-02-27 09:53:52 +01:00
|
|
|
def int_amdgcn_s_sleep :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_s_sleep">,
|
|
|
|
Intrinsic<[], [llvm_i32_ty], []> {
|
|
|
|
}
|
|
|
|
|
2016-08-18 20:06:20 +02:00
|
|
|
def int_amdgcn_s_incperflevel :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_s_incperflevel">,
|
|
|
|
Intrinsic<[], [llvm_i32_ty], []> {
|
|
|
|
}
|
|
|
|
|
|
|
|
def int_amdgcn_s_decperflevel :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_s_decperflevel">,
|
|
|
|
Intrinsic<[], [llvm_i32_ty], []> {
|
|
|
|
}
|
|
|
|
|
2016-03-10 17:47:15 +01:00
|
|
|
def int_amdgcn_s_getreg :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_s_getreg">,
|
2017-05-02 18:57:44 +02:00
|
|
|
Intrinsic<[llvm_i32_ty], [llvm_i32_ty],
|
|
|
|
[IntrReadMem, IntrSpeculatable]
|
|
|
|
>;
|
2017-05-26 22:38:26 +02:00
|
|
|
|
|
|
|
// int_amdgcn_s_getpc is provided to allow a specific style of position
|
|
|
|
// independent code to determine the high part of its address when it is
|
|
|
|
// known (through convention) that the code and any data of interest does
|
|
|
|
// not cross a 4Gb address boundary. Use for any other purpose may not
|
|
|
|
// produce the desired results as optimizations may cause code movement,
|
|
|
|
// especially as we explicitly use IntrNoMem to allow optimizations.
|
|
|
|
def int_amdgcn_s_getpc :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_s_getpc">,
|
|
|
|
Intrinsic<[llvm_i64_ty], [], [IntrNoMem, IntrSpeculatable]>;
|
2016-06-21 22:46:20 +02:00
|
|
|
|
2016-12-07 00:52:13 +01:00
|
|
|
// __builtin_amdgcn_interp_mov <param>, <attr_chan>, <attr>, <m0>
|
|
|
|
// param values: 0 = P10, 1 = P20, 2 = P0
|
|
|
|
def int_amdgcn_interp_mov :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_interp_mov">,
|
|
|
|
Intrinsic<[llvm_float_ty],
|
|
|
|
[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
|
2017-05-02 18:57:44 +02:00
|
|
|
[IntrNoMem, IntrSpeculatable]>;
|
2016-12-07 00:52:13 +01:00
|
|
|
|
2015-12-15 18:02:49 +01:00
|
|
|
// __builtin_amdgcn_interp_p1 <i>, <attr_chan>, <attr>, <m0>
|
2017-05-02 18:57:44 +02:00
|
|
|
// This intrinsic reads from lds, but the memory values are constant,
|
|
|
|
// so it behaves like IntrNoMem.
|
2015-12-15 18:02:49 +01:00
|
|
|
def int_amdgcn_interp_p1 :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_interp_p1">,
|
|
|
|
Intrinsic<[llvm_float_ty],
|
2016-11-26 03:26:04 +01:00
|
|
|
[llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
|
2017-05-02 18:57:44 +02:00
|
|
|
[IntrNoMem, IntrSpeculatable]>;
|
2015-12-15 18:02:49 +01:00
|
|
|
|
|
|
|
// __builtin_amdgcn_interp_p2 <p1>, <j>, <attr_chan>, <attr>, <m0>
|
|
|
|
def int_amdgcn_interp_p2 :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_interp_p2">,
|
|
|
|
Intrinsic<[llvm_float_ty],
|
2016-11-26 03:26:04 +01:00
|
|
|
[llvm_float_ty, llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
|
2017-05-02 18:57:44 +02:00
|
|
|
[IntrNoMem, IntrSpeculatable]>;
|
|
|
|
// See int_amdgcn_v_interp_p1 for why this is IntrNoMem.
|
2015-12-15 18:02:52 +01:00
|
|
|
|
2016-04-22 06:04:08 +02:00
|
|
|
// Pixel shaders only: whether the current pixel is live (i.e. not a helper
|
|
|
|
// invocation for derivative computation).
|
|
|
|
def int_amdgcn_ps_live : Intrinsic <
|
|
|
|
[llvm_i1_ty],
|
|
|
|
[],
|
|
|
|
[IntrNoMem]>;
|
|
|
|
|
2015-12-15 18:02:52 +01:00
|
|
|
def int_amdgcn_mbcnt_lo :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_mbcnt_lo">,
|
|
|
|
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
|
|
|
|
|
|
|
|
def int_amdgcn_mbcnt_hi :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_mbcnt_hi">,
|
|
|
|
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
|
2016-02-13 01:29:57 +01:00
|
|
|
|
2016-06-22 23:33:49 +02:00
|
|
|
// llvm.amdgcn.ds.swizzle src offset
|
|
|
|
def int_amdgcn_ds_swizzle :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_ds_swizzle">,
|
|
|
|
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrConvergent]>;
|
|
|
|
|
2017-02-23 00:04:58 +01:00
|
|
|
def int_amdgcn_ubfe : Intrinsic<[llvm_anyint_ty],
|
2017-05-02 18:57:44 +02:00
|
|
|
[LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty],
|
|
|
|
[IntrNoMem, IntrSpeculatable]
|
2017-02-23 00:04:58 +01:00
|
|
|
>;
|
|
|
|
|
|
|
|
def int_amdgcn_sbfe : Intrinsic<[llvm_anyint_ty],
|
2017-05-02 18:57:44 +02:00
|
|
|
[LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty],
|
|
|
|
[IntrNoMem, IntrSpeculatable]
|
2017-02-23 00:04:58 +01:00
|
|
|
>;
|
|
|
|
|
2016-07-12 20:02:14 +02:00
|
|
|
def int_amdgcn_lerp :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_lerp">,
|
2017-05-02 18:57:44 +02:00
|
|
|
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
|
|
|
|
[IntrNoMem, IntrSpeculatable]
|
|
|
|
>;
|
2016-07-12 20:02:14 +02:00
|
|
|
|
2016-08-11 18:33:53 +02:00
|
|
|
def int_amdgcn_sad_u8 :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_sad_u8">,
|
2017-05-02 18:57:44 +02:00
|
|
|
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
|
|
|
|
[IntrNoMem, IntrSpeculatable]
|
|
|
|
>;
|
2016-08-11 18:33:53 +02:00
|
|
|
|
|
|
|
def int_amdgcn_msad_u8 :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_msad_u8">,
|
2017-05-02 18:57:44 +02:00
|
|
|
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
|
|
|
|
[IntrNoMem, IntrSpeculatable]
|
|
|
|
>;
|
2016-08-11 18:33:53 +02:00
|
|
|
|
|
|
|
def int_amdgcn_sad_hi_u8 :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_sad_hi_u8">,
|
2017-05-02 18:57:44 +02:00
|
|
|
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
|
|
|
|
[IntrNoMem, IntrSpeculatable]
|
|
|
|
>;
|
2016-08-11 18:33:53 +02:00
|
|
|
|
|
|
|
def int_amdgcn_sad_u16 :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_sad_u16">,
|
2017-05-02 18:57:44 +02:00
|
|
|
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
|
|
|
|
[IntrNoMem, IntrSpeculatable]
|
|
|
|
>;
|
2016-08-11 18:33:53 +02:00
|
|
|
|
|
|
|
def int_amdgcn_qsad_pk_u16_u8 :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_qsad_pk_u16_u8">,
|
2017-05-02 18:57:44 +02:00
|
|
|
Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty],
|
|
|
|
[IntrNoMem, IntrSpeculatable]
|
|
|
|
>;
|
2016-08-11 18:33:53 +02:00
|
|
|
|
|
|
|
def int_amdgcn_mqsad_pk_u16_u8 :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_mqsad_pk_u16_u8">,
|
2017-05-02 18:57:44 +02:00
|
|
|
Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty],
|
|
|
|
[IntrNoMem, IntrSpeculatable]
|
|
|
|
>;
|
2016-08-11 18:33:53 +02:00
|
|
|
|
|
|
|
def int_amdgcn_mqsad_u32_u8 :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_mqsad_u32_u8">,
|
2017-05-02 18:57:44 +02:00
|
|
|
Intrinsic<[llvm_v4i32_ty], [llvm_i64_ty, llvm_i32_ty, llvm_v4i32_ty],
|
|
|
|
[IntrNoMem, IntrSpeculatable]
|
|
|
|
>;
|
2016-08-11 18:33:53 +02:00
|
|
|
|
2016-08-11 22:34:48 +02:00
|
|
|
def int_amdgcn_cvt_pk_u8_f32 :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_cvt_pk_u8_f32">,
|
2017-05-02 18:57:44 +02:00
|
|
|
Intrinsic<[llvm_i32_ty], [llvm_float_ty, llvm_i32_ty, llvm_i32_ty],
|
|
|
|
[IntrNoMem, IntrSpeculatable]
|
|
|
|
>;
|
2016-08-11 22:34:48 +02:00
|
|
|
|
2016-07-28 18:42:13 +02:00
|
|
|
def int_amdgcn_icmp :
|
2016-08-11 18:33:53 +02:00
|
|
|
Intrinsic<[llvm_i64_ty], [llvm_anyint_ty, LLVMMatchType<0>, llvm_i32_ty],
|
2016-07-28 18:42:13 +02:00
|
|
|
[IntrNoMem, IntrConvergent]>;
|
|
|
|
|
|
|
|
def int_amdgcn_fcmp :
|
2016-08-11 18:33:53 +02:00
|
|
|
Intrinsic<[llvm_i64_ty], [llvm_anyfloat_ty, LLVMMatchType<0>, llvm_i32_ty],
|
2016-07-28 18:42:13 +02:00
|
|
|
[IntrNoMem, IntrConvergent]>;
|
|
|
|
|
2016-08-24 22:35:23 +02:00
|
|
|
def int_amdgcn_readfirstlane :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_readfirstlane">,
|
|
|
|
Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem, IntrConvergent]>;
|
|
|
|
|
2017-04-24 19:17:36 +02:00
|
|
|
// The lane argument must be uniform across the currently active threads of the
|
|
|
|
// current wave. Otherwise, the result is undefined.
|
2016-08-24 22:35:23 +02:00
|
|
|
def int_amdgcn_readlane :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_readlane">,
|
|
|
|
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrConvergent]>;
|
|
|
|
|
2016-02-13 01:29:57 +01:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// CI+ Intrinsics
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
def int_amdgcn_s_dcache_inv_vol :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_s_dcache_inv_vol">,
|
|
|
|
Intrinsic<[], [], []>;
|
|
|
|
|
|
|
|
def int_amdgcn_buffer_wbinvl1_vol :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_buffer_wbinvl1_vol">,
|
|
|
|
Intrinsic<[], [], []>;
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// VI Intrinsics
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2016-03-09 13:29:31 +01:00
|
|
|
// llvm.amdgcn.mov.dpp.i32 <src> <dpp_ctrl> <row_mask> <bank_mask> <bound_ctrl>
|
2016-02-13 03:09:49 +01:00
|
|
|
def int_amdgcn_mov_dpp :
|
|
|
|
Intrinsic<[llvm_anyint_ty],
|
2016-03-09 13:29:31 +01:00
|
|
|
[LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
|
|
|
|
llvm_i1_ty], [IntrNoMem, IntrConvergent]>;
|
2016-02-13 03:09:49 +01:00
|
|
|
|
2016-02-13 01:29:57 +01:00
|
|
|
def int_amdgcn_s_dcache_wb :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_s_dcache_wb">,
|
|
|
|
Intrinsic<[], [], []>;
|
|
|
|
|
|
|
|
def int_amdgcn_s_dcache_wb_vol :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_s_dcache_wb_vol">,
|
|
|
|
Intrinsic<[], [], []>;
|
|
|
|
|
2016-02-27 09:53:46 +01:00
|
|
|
def int_amdgcn_s_memrealtime :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_s_memrealtime">,
|
|
|
|
Intrinsic<[llvm_i64_ty], [], []>;
|
2016-03-01 18:51:23 +01:00
|
|
|
|
|
|
|
// llvm.amdgcn.ds.permute <index> <src>
|
|
|
|
def int_amdgcn_ds_permute :
|
2017-03-09 21:04:50 +01:00
|
|
|
GCCBuiltin<"__builtin_amdgcn_ds_permute">,
|
2016-03-01 18:51:23 +01:00
|
|
|
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrConvergent]>;
|
|
|
|
|
|
|
|
// llvm.amdgcn.ds.bpermute <index> <src>
|
|
|
|
def int_amdgcn_ds_bpermute :
|
2017-03-09 21:04:50 +01:00
|
|
|
GCCBuiltin<"__builtin_amdgcn_ds_bpermute">,
|
2016-03-01 18:51:23 +01:00
|
|
|
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrConvergent]>;
|
|
|
|
|
2017-03-17 21:41:45 +01:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Special Intrinsics for backend internal use only. No frontend
|
|
|
|
// should emit calls to these.
|
|
|
|
// ===----------------------------------------------------------------------===//
|
|
|
|
def int_amdgcn_if : Intrinsic<[llvm_i1_ty, llvm_i64_ty],
|
|
|
|
[llvm_i1_ty], [IntrConvergent]
|
|
|
|
>;
|
|
|
|
|
|
|
|
def int_amdgcn_else : Intrinsic<[llvm_i1_ty, llvm_i64_ty],
|
|
|
|
[llvm_i64_ty], [IntrConvergent]
|
|
|
|
>;
|
|
|
|
|
|
|
|
def int_amdgcn_break : Intrinsic<[llvm_i64_ty],
|
|
|
|
[llvm_i64_ty], [IntrNoMem, IntrConvergent]
|
|
|
|
>;
|
|
|
|
|
|
|
|
def int_amdgcn_if_break : Intrinsic<[llvm_i64_ty],
|
|
|
|
[llvm_i1_ty, llvm_i64_ty], [IntrNoMem, IntrConvergent]
|
|
|
|
>;
|
|
|
|
|
|
|
|
def int_amdgcn_else_break : Intrinsic<[llvm_i64_ty],
|
|
|
|
[llvm_i64_ty, llvm_i64_ty], [IntrNoMem, IntrConvergent]
|
|
|
|
>;
|
|
|
|
|
|
|
|
def int_amdgcn_loop : Intrinsic<[llvm_i1_ty],
|
|
|
|
[llvm_i64_ty], [IntrConvergent]
|
|
|
|
>;
|
|
|
|
|
|
|
|
def int_amdgcn_end_cf : Intrinsic<[], [llvm_i64_ty], [IntrConvergent]>;
|
|
|
|
|
2017-03-24 20:52:05 +01:00
|
|
|
// Represent unreachable in a divergent region.
|
|
|
|
def int_amdgcn_unreachable : Intrinsic<[], [], [IntrConvergent]>;
|
|
|
|
|
2017-03-17 21:41:45 +01:00
|
|
|
// Emit 2.5 ulp, no denormal division. Should only be inserted by
|
|
|
|
// pass based on !fpmath metadata.
|
|
|
|
def int_amdgcn_fdiv_fast : Intrinsic<
|
2017-05-02 18:57:44 +02:00
|
|
|
[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
|
|
|
|
[IntrNoMem, IntrSpeculatable]
|
2017-03-17 21:41:45 +01:00
|
|
|
>;
|
2015-09-24 21:52:21 +02:00
|
|
|
}
|