2015-06-19 19:56:51 +02:00
|
|
|
//===- IntrinsicsAMDGPU.td - Defines AMDGPU intrinsics -----*- tablegen -*-===//
|
2012-12-11 22:25:42 +01:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file defines all of the R600-specific intrinsics.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2016-01-22 22:30:34 +01:00
|
|
|
class AMDGPUReadPreloadRegisterIntrinsic<string name>
|
|
|
|
: Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
|
|
|
|
GCCBuiltin<name>;
|
|
|
|
|
2012-12-11 22:25:42 +01:00
|
|
|
let TargetPrefix = "r600" in {
|
|
|
|
|
2016-01-30 05:25:19 +01:00
|
|
|
multiclass AMDGPUReadPreloadRegisterIntrinsic_xyz<string prefix> {
|
|
|
|
def _x : AMDGPUReadPreloadRegisterIntrinsic<!strconcat(prefix, "_x")>;
|
|
|
|
def _y : AMDGPUReadPreloadRegisterIntrinsic<!strconcat(prefix, "_y")>;
|
|
|
|
def _z : AMDGPUReadPreloadRegisterIntrinsic<!strconcat(prefix, "_z")>;
|
2012-12-11 22:25:42 +01:00
|
|
|
}
|
|
|
|
|
2016-01-30 05:25:19 +01:00
|
|
|
defm int_r600_read_global_size : AMDGPUReadPreloadRegisterIntrinsic_xyz <
|
2012-12-11 22:25:42 +01:00
|
|
|
"__builtin_r600_read_global_size">;
|
2016-01-30 05:25:19 +01:00
|
|
|
defm int_r600_read_local_size : AMDGPUReadPreloadRegisterIntrinsic_xyz <
|
2012-12-11 22:25:42 +01:00
|
|
|
"__builtin_r600_read_local_size">;
|
2016-01-30 05:25:19 +01:00
|
|
|
defm int_r600_read_ngroups : AMDGPUReadPreloadRegisterIntrinsic_xyz <
|
2012-12-11 22:25:42 +01:00
|
|
|
"__builtin_r600_read_ngroups">;
|
2016-01-30 05:25:19 +01:00
|
|
|
defm int_r600_read_tgid : AMDGPUReadPreloadRegisterIntrinsic_xyz <
|
2012-12-11 22:25:42 +01:00
|
|
|
"__builtin_r600_read_tgid">;
|
2016-01-30 05:25:19 +01:00
|
|
|
defm int_r600_read_tidig : AMDGPUReadPreloadRegisterIntrinsic_xyz <
|
2012-12-11 22:25:42 +01:00
|
|
|
"__builtin_r600_read_tidig">;
|
2015-10-01 19:51:34 +02:00
|
|
|
|
|
|
|
def int_r600_rat_store_typed :
|
|
|
|
// 1st parameter: Data
|
|
|
|
// 2nd parameter: Index
|
|
|
|
// 3rd parameter: Constant RAT ID
|
|
|
|
Intrinsic<[], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty], []>,
|
|
|
|
GCCBuiltin<"__builtin_r600_rat_store_typed">;
|
|
|
|
|
2016-01-22 22:30:34 +01:00
|
|
|
def int_r600_rsq : Intrinsic<
|
|
|
|
[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]
|
|
|
|
>;
|
|
|
|
|
|
|
|
def int_r600_read_workdim : AMDGPUReadPreloadRegisterIntrinsic <
|
|
|
|
"__builtin_r600_read_workdim"
|
|
|
|
>;
|
|
|
|
|
2012-12-11 22:25:42 +01:00
|
|
|
} // End TargetPrefix = "r600"
|
2014-06-19 03:19:19 +02:00
|
|
|
|
2016-01-22 22:30:34 +01:00
|
|
|
// FIXME: These should be renamed/moved to r600
|
2014-06-19 03:19:19 +02:00
|
|
|
let TargetPrefix = "AMDGPU" in {
|
2016-01-22 22:30:34 +01:00
|
|
|
def int_AMDGPU_ldexp : Intrinsic<
|
|
|
|
[llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_i32_ty], [IntrNoMem]
|
|
|
|
>;
|
|
|
|
}
|
2014-10-14 22:05:26 +02:00
|
|
|
|
2016-01-22 22:30:34 +01:00
|
|
|
let TargetPrefix = "amdgcn" in {
|
|
|
|
|
2016-01-30 05:25:19 +01:00
|
|
|
defm int_amdgcn_workitem_id : AMDGPUReadPreloadRegisterIntrinsic_xyz <
|
|
|
|
"__builtin_amdgcn_workitem_id">;
|
|
|
|
defm int_amdgcn_workgroup_id : AMDGPUReadPreloadRegisterIntrinsic_xyz <
|
|
|
|
"__builtin_amdgcn_workgroup_id">;
|
|
|
|
|
2016-01-22 22:30:43 +01:00
|
|
|
def int_amdgcn_s_barrier : GCCBuiltin<"__builtin_amdgcn_s_barrier">,
|
|
|
|
Intrinsic<[], [], [IntrConvergent]>;
|
|
|
|
|
2016-04-27 17:46:01 +02:00
|
|
|
def int_amdgcn_s_waitcnt : Intrinsic<[], [llvm_i32_ty], []>;
|
|
|
|
|
2016-01-22 22:30:46 +01:00
|
|
|
def int_amdgcn_div_scale : Intrinsic<
|
2014-06-23 20:28:28 +02:00
|
|
|
// 1st parameter: Numerator
|
|
|
|
// 2nd parameter: Denominator
|
|
|
|
// 3rd parameter: Constant to select select between first and
|
|
|
|
// second. (0 = first, 1 = second).
|
2016-01-22 22:30:46 +01:00
|
|
|
[llvm_anyfloat_ty, llvm_i1_ty],
|
|
|
|
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_i1_ty],
|
|
|
|
[IntrNoMem]
|
|
|
|
>;
|
2014-06-19 03:19:19 +02:00
|
|
|
|
2016-01-22 22:30:46 +01:00
|
|
|
def int_amdgcn_div_fmas : Intrinsic<[llvm_anyfloat_ty],
|
|
|
|
[LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>, llvm_i1_ty],
|
|
|
|
[IntrNoMem]
|
|
|
|
>;
|
2014-06-19 03:19:19 +02:00
|
|
|
|
2016-01-22 22:30:46 +01:00
|
|
|
def int_amdgcn_div_fixup : Intrinsic<[llvm_anyfloat_ty],
|
|
|
|
[LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
|
|
|
|
[IntrNoMem]
|
|
|
|
>;
|
2014-06-19 03:19:19 +02:00
|
|
|
|
2016-01-22 22:30:46 +01:00
|
|
|
def int_amdgcn_trig_preop : Intrinsic<
|
|
|
|
[llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_i32_ty], [IntrNoMem]
|
|
|
|
>;
|
2014-06-19 03:19:19 +02:00
|
|
|
|
2016-02-13 02:19:56 +01:00
|
|
|
def int_amdgcn_sin : Intrinsic<
|
|
|
|
[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]
|
|
|
|
>;
|
|
|
|
|
|
|
|
def int_amdgcn_cos : Intrinsic<
|
|
|
|
[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]
|
|
|
|
>;
|
|
|
|
|
|
|
|
def int_amdgcn_log_clamp : Intrinsic<
|
|
|
|
[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]
|
|
|
|
>;
|
|
|
|
|
2016-01-22 22:30:46 +01:00
|
|
|
def int_amdgcn_rcp : Intrinsic<
|
|
|
|
[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]
|
|
|
|
>;
|
2014-06-19 03:19:19 +02:00
|
|
|
|
2016-01-22 22:30:46 +01:00
|
|
|
def int_amdgcn_rsq : Intrinsic<
|
|
|
|
[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]
|
|
|
|
>;
|
2014-06-19 03:19:19 +02:00
|
|
|
|
2016-02-13 02:03:00 +01:00
|
|
|
def int_amdgcn_rsq_clamp : Intrinsic<
|
2016-01-22 22:30:46 +01:00
|
|
|
[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
|
2014-06-25 00:13:39 +02:00
|
|
|
|
2016-01-22 22:30:46 +01:00
|
|
|
def int_amdgcn_ldexp : Intrinsic<
|
|
|
|
[llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_i32_ty], [IntrNoMem]
|
|
|
|
>;
|
2014-08-15 19:30:25 +02:00
|
|
|
|
2016-03-21 17:11:05 +01:00
|
|
|
def int_amdgcn_frexp_mant : Intrinsic<
|
|
|
|
[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]
|
|
|
|
>;
|
|
|
|
|
2016-03-31 00:28:52 +02:00
|
|
|
def int_amdgcn_frexp_exp : Intrinsic<
|
|
|
|
[llvm_i32_ty], [llvm_anyfloat_ty], [IntrNoMem]
|
|
|
|
>;
|
|
|
|
|
2016-05-28 02:19:52 +02:00
|
|
|
// v_fract is buggy on SI/CI. It mishandles infinities, may return 1.0
|
|
|
|
// and always uses rtz, so is not suitable for implementing the OpenCL
|
|
|
|
// fract function. It should be ok on VI.
|
|
|
|
def int_amdgcn_fract : Intrinsic<
|
|
|
|
[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]
|
|
|
|
>;
|
|
|
|
|
2016-01-22 22:30:46 +01:00
|
|
|
def int_amdgcn_class : Intrinsic<
|
|
|
|
[llvm_i1_ty], [llvm_anyfloat_ty, llvm_i32_ty], [IntrNoMem]
|
|
|
|
>;
|
2015-01-07 00:00:37 +01:00
|
|
|
|
2016-01-26 05:29:56 +01:00
|
|
|
def int_amdgcn_cubeid : GCCBuiltin<"__builtin_amdgcn_cubeid">,
|
|
|
|
Intrinsic<[llvm_float_ty],
|
|
|
|
[llvm_float_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem]
|
|
|
|
>;
|
|
|
|
|
|
|
|
def int_amdgcn_cubema : GCCBuiltin<"__builtin_amdgcn_cubema">,
|
|
|
|
Intrinsic<[llvm_float_ty],
|
|
|
|
[llvm_float_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem]
|
|
|
|
>;
|
|
|
|
|
|
|
|
def int_amdgcn_cubesc : GCCBuiltin<"__builtin_amdgcn_cubesc">,
|
|
|
|
Intrinsic<[llvm_float_ty],
|
|
|
|
[llvm_float_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem]
|
|
|
|
>;
|
|
|
|
|
|
|
|
def int_amdgcn_cubetc : GCCBuiltin<"__builtin_amdgcn_cubetc">,
|
|
|
|
Intrinsic<[llvm_float_ty],
|
|
|
|
[llvm_float_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem]
|
|
|
|
>;
|
|
|
|
|
2016-04-12 16:05:04 +02:00
|
|
|
// TODO: Do we want an ordering for these?
|
|
|
|
def int_amdgcn_atomic_inc : Intrinsic<[llvm_anyint_ty],
|
|
|
|
[llvm_anyptr_ty, LLVMMatchType<0>],
|
2016-04-21 19:48:02 +02:00
|
|
|
[IntrArgMemOnly, NoCapture<0>]
|
2016-04-12 16:05:04 +02:00
|
|
|
>;
|
|
|
|
|
|
|
|
def int_amdgcn_atomic_dec : Intrinsic<[llvm_anyint_ty],
|
|
|
|
[llvm_anyptr_ty, LLVMMatchType<0>],
|
2016-04-21 19:48:02 +02:00
|
|
|
[IntrArgMemOnly, NoCapture<0>]
|
2016-04-12 16:05:04 +02:00
|
|
|
>;
|
|
|
|
|
2016-02-18 17:44:18 +01:00
|
|
|
class AMDGPUImageLoad : Intrinsic <
|
|
|
|
[llvm_v4f32_ty], // vdata(VGPR)
|
|
|
|
[llvm_anyint_ty, // vaddr(VGPR)
|
|
|
|
llvm_v8i32_ty, // rsrc(SGPR)
|
|
|
|
llvm_i32_ty, // dmask(imm)
|
|
|
|
llvm_i1_ty, // r128(imm)
|
|
|
|
llvm_i1_ty, // da(imm)
|
|
|
|
llvm_i1_ty, // glc(imm)
|
|
|
|
llvm_i1_ty], // slc(imm)
|
|
|
|
[IntrReadMem]>;
|
|
|
|
|
|
|
|
def int_amdgcn_image_load : AMDGPUImageLoad;
|
|
|
|
def int_amdgcn_image_load_mip : AMDGPUImageLoad;
|
|
|
|
|
|
|
|
class AMDGPUImageStore : Intrinsic <
|
|
|
|
[],
|
|
|
|
[llvm_v4f32_ty, // vdata(VGPR)
|
|
|
|
llvm_anyint_ty, // vaddr(VGPR)
|
|
|
|
llvm_v8i32_ty, // rsrc(SGPR)
|
|
|
|
llvm_i32_ty, // dmask(imm)
|
|
|
|
llvm_i1_ty, // r128(imm)
|
|
|
|
llvm_i1_ty, // da(imm)
|
|
|
|
llvm_i1_ty, // glc(imm)
|
|
|
|
llvm_i1_ty], // slc(imm)
|
|
|
|
[]>;
|
|
|
|
|
|
|
|
def int_amdgcn_image_store : AMDGPUImageStore;
|
|
|
|
def int_amdgcn_image_store_mip : AMDGPUImageStore;
|
|
|
|
|
2016-03-04 11:39:50 +01:00
|
|
|
class AMDGPUImageAtomic : Intrinsic <
|
|
|
|
[llvm_i32_ty],
|
|
|
|
[llvm_i32_ty, // vdata(VGPR)
|
|
|
|
llvm_anyint_ty, // vaddr(VGPR)
|
|
|
|
llvm_v8i32_ty, // rsrc(SGPR)
|
|
|
|
llvm_i1_ty, // r128(imm)
|
|
|
|
llvm_i1_ty, // da(imm)
|
|
|
|
llvm_i1_ty], // slc(imm)
|
|
|
|
[]>;
|
|
|
|
|
|
|
|
def int_amdgcn_image_atomic_swap : AMDGPUImageAtomic;
|
|
|
|
def int_amdgcn_image_atomic_add : AMDGPUImageAtomic;
|
|
|
|
def int_amdgcn_image_atomic_sub : AMDGPUImageAtomic;
|
|
|
|
def int_amdgcn_image_atomic_smin : AMDGPUImageAtomic;
|
|
|
|
def int_amdgcn_image_atomic_umin : AMDGPUImageAtomic;
|
|
|
|
def int_amdgcn_image_atomic_smax : AMDGPUImageAtomic;
|
|
|
|
def int_amdgcn_image_atomic_umax : AMDGPUImageAtomic;
|
|
|
|
def int_amdgcn_image_atomic_and : AMDGPUImageAtomic;
|
|
|
|
def int_amdgcn_image_atomic_or : AMDGPUImageAtomic;
|
|
|
|
def int_amdgcn_image_atomic_xor : AMDGPUImageAtomic;
|
|
|
|
def int_amdgcn_image_atomic_inc : AMDGPUImageAtomic;
|
|
|
|
def int_amdgcn_image_atomic_dec : AMDGPUImageAtomic;
|
|
|
|
def int_amdgcn_image_atomic_cmpswap : Intrinsic <
|
|
|
|
[llvm_i32_ty],
|
|
|
|
[llvm_i32_ty, // src(VGPR)
|
|
|
|
llvm_i32_ty, // cmp(VGPR)
|
|
|
|
llvm_anyint_ty, // vaddr(VGPR)
|
|
|
|
llvm_v8i32_ty, // rsrc(SGPR)
|
|
|
|
llvm_i1_ty, // r128(imm)
|
|
|
|
llvm_i1_ty, // da(imm)
|
|
|
|
llvm_i1_ty], // slc(imm)
|
|
|
|
[]>;
|
|
|
|
|
2016-04-12 23:18:10 +02:00
|
|
|
class AMDGPUBufferLoad : Intrinsic <
|
2016-03-18 17:24:40 +01:00
|
|
|
[llvm_anyfloat_ty],
|
AMDGPU/SI: add llvm.amdgcn.buffer.load/store.format intrinsics
Summary:
They correspond to BUFFER_LOAD/STORE_FORMAT_XYZW and will be used by Mesa
to implement the GL_ARB_shader_image_load_store extension.
The intention is that for llvm.amdgcn.buffer.load.format, LLVM will decide
whether one of the _X/_XY/_XYZ opcodes can be used (similar to image sampling
and loads). However, this is not currently implemented.
For llvm.amdgcn.buffer.store, LLVM cannot decide to use one of the "smaller"
opcodes and therefore the intrinsic is overloaded. Currently, only the v4f32
is actually implemented since GLSL also only has a vec4 variant of the store
instructions, although it's conceivable that Mesa will want to be smarter
about this in the future.
BUFFER_LOAD_FORMAT_XYZW is already exposed via llvm.SI.vs.load.input, which
has a legacy name, pretends not to access memory, and does not capture the
full flexibility of the instruction.
Reviewers: arsenm, tstellarAMD, mareko
Subscribers: arsenm, llvm-commits
Differential Revision: http://reviews.llvm.org/D17277
llvm-svn: 263140
2016-03-10 19:43:50 +01:00
|
|
|
[llvm_v4i32_ty, // rsrc(SGPR)
|
|
|
|
llvm_i32_ty, // vindex(VGPR)
|
2016-03-18 17:24:20 +01:00
|
|
|
llvm_i32_ty, // offset(SGPR/VGPR/imm)
|
AMDGPU/SI: add llvm.amdgcn.buffer.load/store.format intrinsics
Summary:
They correspond to BUFFER_LOAD/STORE_FORMAT_XYZW and will be used by Mesa
to implement the GL_ARB_shader_image_load_store extension.
The intention is that for llvm.amdgcn.buffer.load.format, LLVM will decide
whether one of the _X/_XY/_XYZ opcodes can be used (similar to image sampling
and loads). However, this is not currently implemented.
For llvm.amdgcn.buffer.store, LLVM cannot decide to use one of the "smaller"
opcodes and therefore the intrinsic is overloaded. Currently, only the v4f32
is actually implemented since GLSL also only has a vec4 variant of the store
instructions, although it's conceivable that Mesa will want to be smarter
about this in the future.
BUFFER_LOAD_FORMAT_XYZW is already exposed via llvm.SI.vs.load.input, which
has a legacy name, pretends not to access memory, and does not capture the
full flexibility of the instruction.
Reviewers: arsenm, tstellarAMD, mareko
Subscribers: arsenm, llvm-commits
Differential Revision: http://reviews.llvm.org/D17277
llvm-svn: 263140
2016-03-10 19:43:50 +01:00
|
|
|
llvm_i1_ty, // glc(imm)
|
|
|
|
llvm_i1_ty], // slc(imm)
|
|
|
|
[IntrReadMem]>;
|
2016-04-12 23:18:10 +02:00
|
|
|
def int_amdgcn_buffer_load_format : AMDGPUBufferLoad;
|
|
|
|
def int_amdgcn_buffer_load : AMDGPUBufferLoad;
|
AMDGPU/SI: add llvm.amdgcn.buffer.load/store.format intrinsics
Summary:
They correspond to BUFFER_LOAD/STORE_FORMAT_XYZW and will be used by Mesa
to implement the GL_ARB_shader_image_load_store extension.
The intention is that for llvm.amdgcn.buffer.load.format, LLVM will decide
whether one of the _X/_XY/_XYZ opcodes can be used (similar to image sampling
and loads). However, this is not currently implemented.
For llvm.amdgcn.buffer.store, LLVM cannot decide to use one of the "smaller"
opcodes and therefore the intrinsic is overloaded. Currently, only the v4f32
is actually implemented since GLSL also only has a vec4 variant of the store
instructions, although it's conceivable that Mesa will want to be smarter
about this in the future.
BUFFER_LOAD_FORMAT_XYZW is already exposed via llvm.SI.vs.load.input, which
has a legacy name, pretends not to access memory, and does not capture the
full flexibility of the instruction.
Reviewers: arsenm, tstellarAMD, mareko
Subscribers: arsenm, llvm-commits
Differential Revision: http://reviews.llvm.org/D17277
llvm-svn: 263140
2016-03-10 19:43:50 +01:00
|
|
|
|
2016-04-12 23:18:10 +02:00
|
|
|
class AMDGPUBufferStore : Intrinsic <
|
AMDGPU/SI: add llvm.amdgcn.buffer.load/store.format intrinsics
Summary:
They correspond to BUFFER_LOAD/STORE_FORMAT_XYZW and will be used by Mesa
to implement the GL_ARB_shader_image_load_store extension.
The intention is that for llvm.amdgcn.buffer.load.format, LLVM will decide
whether one of the _X/_XY/_XYZ opcodes can be used (similar to image sampling
and loads). However, this is not currently implemented.
For llvm.amdgcn.buffer.store, LLVM cannot decide to use one of the "smaller"
opcodes and therefore the intrinsic is overloaded. Currently, only the v4f32
is actually implemented since GLSL also only has a vec4 variant of the store
instructions, although it's conceivable that Mesa will want to be smarter
about this in the future.
BUFFER_LOAD_FORMAT_XYZW is already exposed via llvm.SI.vs.load.input, which
has a legacy name, pretends not to access memory, and does not capture the
full flexibility of the instruction.
Reviewers: arsenm, tstellarAMD, mareko
Subscribers: arsenm, llvm-commits
Differential Revision: http://reviews.llvm.org/D17277
llvm-svn: 263140
2016-03-10 19:43:50 +01:00
|
|
|
[],
|
2016-04-12 23:18:10 +02:00
|
|
|
[llvm_anyfloat_ty, // vdata(VGPR) -- can currently only select f32, v2f32, v4f32
|
AMDGPU/SI: add llvm.amdgcn.buffer.load/store.format intrinsics
Summary:
They correspond to BUFFER_LOAD/STORE_FORMAT_XYZW and will be used by Mesa
to implement the GL_ARB_shader_image_load_store extension.
The intention is that for llvm.amdgcn.buffer.load.format, LLVM will decide
whether one of the _X/_XY/_XYZ opcodes can be used (similar to image sampling
and loads). However, this is not currently implemented.
For llvm.amdgcn.buffer.store, LLVM cannot decide to use one of the "smaller"
opcodes and therefore the intrinsic is overloaded. Currently, only the v4f32
is actually implemented since GLSL also only has a vec4 variant of the store
instructions, although it's conceivable that Mesa will want to be smarter
about this in the future.
BUFFER_LOAD_FORMAT_XYZW is already exposed via llvm.SI.vs.load.input, which
has a legacy name, pretends not to access memory, and does not capture the
full flexibility of the instruction.
Reviewers: arsenm, tstellarAMD, mareko
Subscribers: arsenm, llvm-commits
Differential Revision: http://reviews.llvm.org/D17277
llvm-svn: 263140
2016-03-10 19:43:50 +01:00
|
|
|
llvm_v4i32_ty, // rsrc(SGPR)
|
|
|
|
llvm_i32_ty, // vindex(VGPR)
|
2016-03-18 17:24:20 +01:00
|
|
|
llvm_i32_ty, // offset(SGPR/VGPR/imm)
|
AMDGPU/SI: add llvm.amdgcn.buffer.load/store.format intrinsics
Summary:
They correspond to BUFFER_LOAD/STORE_FORMAT_XYZW and will be used by Mesa
to implement the GL_ARB_shader_image_load_store extension.
The intention is that for llvm.amdgcn.buffer.load.format, LLVM will decide
whether one of the _X/_XY/_XYZ opcodes can be used (similar to image sampling
and loads). However, this is not currently implemented.
For llvm.amdgcn.buffer.store, LLVM cannot decide to use one of the "smaller"
opcodes and therefore the intrinsic is overloaded. Currently, only the v4f32
is actually implemented since GLSL also only has a vec4 variant of the store
instructions, although it's conceivable that Mesa will want to be smarter
about this in the future.
BUFFER_LOAD_FORMAT_XYZW is already exposed via llvm.SI.vs.load.input, which
has a legacy name, pretends not to access memory, and does not capture the
full flexibility of the instruction.
Reviewers: arsenm, tstellarAMD, mareko
Subscribers: arsenm, llvm-commits
Differential Revision: http://reviews.llvm.org/D17277
llvm-svn: 263140
2016-03-10 19:43:50 +01:00
|
|
|
llvm_i1_ty, // glc(imm)
|
|
|
|
llvm_i1_ty], // slc(imm)
|
2016-04-19 23:58:33 +02:00
|
|
|
[IntrWriteMem]>;
|
2016-04-12 23:18:10 +02:00
|
|
|
def int_amdgcn_buffer_store_format : AMDGPUBufferStore;
|
|
|
|
def int_amdgcn_buffer_store : AMDGPUBufferStore;
|
2016-03-04 11:39:50 +01:00
|
|
|
|
2016-03-18 17:24:31 +01:00
|
|
|
class AMDGPUBufferAtomic : Intrinsic <
|
|
|
|
[llvm_i32_ty],
|
|
|
|
[llvm_i32_ty, // vdata(VGPR)
|
|
|
|
llvm_v4i32_ty, // rsrc(SGPR)
|
|
|
|
llvm_i32_ty, // vindex(VGPR)
|
|
|
|
llvm_i32_ty, // offset(SGPR/VGPR/imm)
|
|
|
|
llvm_i1_ty], // slc(imm)
|
|
|
|
[]>;
|
|
|
|
def int_amdgcn_buffer_atomic_swap : AMDGPUBufferAtomic;
|
|
|
|
def int_amdgcn_buffer_atomic_add : AMDGPUBufferAtomic;
|
|
|
|
def int_amdgcn_buffer_atomic_sub : AMDGPUBufferAtomic;
|
|
|
|
def int_amdgcn_buffer_atomic_smin : AMDGPUBufferAtomic;
|
|
|
|
def int_amdgcn_buffer_atomic_umin : AMDGPUBufferAtomic;
|
|
|
|
def int_amdgcn_buffer_atomic_smax : AMDGPUBufferAtomic;
|
|
|
|
def int_amdgcn_buffer_atomic_umax : AMDGPUBufferAtomic;
|
|
|
|
def int_amdgcn_buffer_atomic_and : AMDGPUBufferAtomic;
|
|
|
|
def int_amdgcn_buffer_atomic_or : AMDGPUBufferAtomic;
|
|
|
|
def int_amdgcn_buffer_atomic_xor : AMDGPUBufferAtomic;
|
|
|
|
def int_amdgcn_buffer_atomic_cmpswap : Intrinsic<
|
|
|
|
[llvm_i32_ty],
|
|
|
|
[llvm_i32_ty, // src(VGPR)
|
|
|
|
llvm_i32_ty, // cmp(VGPR)
|
|
|
|
llvm_v4i32_ty, // rsrc(SGPR)
|
|
|
|
llvm_i32_ty, // vindex(VGPR)
|
|
|
|
llvm_i32_ty, // offset(SGPR/VGPR/imm)
|
|
|
|
llvm_i1_ty], // slc(imm)
|
|
|
|
[]>;
|
|
|
|
|
2016-01-22 22:30:34 +01:00
|
|
|
def int_amdgcn_read_workdim : AMDGPUReadPreloadRegisterIntrinsic <
|
|
|
|
"__builtin_amdgcn_read_workdim">;
|
2015-09-24 21:52:21 +02:00
|
|
|
|
|
|
|
|
|
|
|
def int_amdgcn_buffer_wbinvl1_sc :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_buffer_wbinvl1_sc">,
|
|
|
|
Intrinsic<[], [], []>;
|
|
|
|
|
|
|
|
def int_amdgcn_buffer_wbinvl1 :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_buffer_wbinvl1">,
|
|
|
|
Intrinsic<[], [], []>;
|
|
|
|
|
2015-09-24 21:52:27 +02:00
|
|
|
def int_amdgcn_s_dcache_inv :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_s_dcache_inv">,
|
|
|
|
Intrinsic<[], [], []>;
|
|
|
|
|
2016-02-27 09:53:46 +01:00
|
|
|
def int_amdgcn_s_memtime :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_s_memtime">,
|
|
|
|
Intrinsic<[llvm_i64_ty], [], []>;
|
|
|
|
|
2016-02-27 09:53:52 +01:00
|
|
|
def int_amdgcn_s_sleep :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_s_sleep">,
|
|
|
|
Intrinsic<[], [llvm_i32_ty], []> {
|
|
|
|
}
|
|
|
|
|
2016-03-10 17:47:15 +01:00
|
|
|
def int_amdgcn_s_getreg :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_s_getreg">,
|
|
|
|
Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrReadMem]>;
|
|
|
|
|
2016-03-15 18:28:44 +01:00
|
|
|
def int_amdgcn_groupstaticsize :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_groupstaticsize">,
|
|
|
|
Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>;
|
|
|
|
|
2015-11-26 01:43:29 +01:00
|
|
|
def int_amdgcn_dispatch_ptr :
|
2015-11-26 03:04:11 +01:00
|
|
|
GCCBuiltin<"__builtin_amdgcn_dispatch_ptr">,
|
2015-11-26 01:43:29 +01:00
|
|
|
Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 2>], [], [IntrNoMem]>;
|
|
|
|
|
2016-04-25 21:27:18 +02:00
|
|
|
def int_amdgcn_queue_ptr :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_queue_ptr">,
|
|
|
|
Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 2>], [], [IntrNoMem]>;
|
|
|
|
|
2016-04-29 23:16:52 +02:00
|
|
|
def int_amdgcn_kernarg_segment_ptr :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_kernarg_segment_ptr">,
|
|
|
|
Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 2>], [], [IntrNoMem]>;
|
|
|
|
|
2015-12-15 18:02:49 +01:00
|
|
|
// __builtin_amdgcn_interp_p1 <i>, <attr_chan>, <attr>, <m0>
|
|
|
|
def int_amdgcn_interp_p1 :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_interp_p1">,
|
|
|
|
Intrinsic<[llvm_float_ty],
|
|
|
|
[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
|
|
|
|
[IntrNoMem]>; // This intrinsic reads from lds, but the memory
|
|
|
|
// values are constant, so it behaves like IntrNoMem.
|
|
|
|
|
|
|
|
// __builtin_amdgcn_interp_p2 <p1>, <j>, <attr_chan>, <attr>, <m0>
|
|
|
|
def int_amdgcn_interp_p2 :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_interp_p2">,
|
|
|
|
Intrinsic<[llvm_float_ty],
|
|
|
|
[llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
|
|
|
|
[IntrNoMem]>; // See int_amdgcn_v_interp_p1 for why this is
|
|
|
|
// IntrNoMem.
|
2015-12-15 18:02:52 +01:00
|
|
|
|
2016-04-22 06:04:08 +02:00
|
|
|
// Pixel shaders only: whether the current pixel is live (i.e. not a helper
|
|
|
|
// invocation for derivative computation).
|
|
|
|
def int_amdgcn_ps_live : Intrinsic <
|
|
|
|
[llvm_i1_ty],
|
|
|
|
[],
|
|
|
|
[IntrNoMem]>;
|
|
|
|
|
2015-12-15 18:02:52 +01:00
|
|
|
def int_amdgcn_mbcnt_lo :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_mbcnt_lo">,
|
|
|
|
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
|
|
|
|
|
|
|
|
def int_amdgcn_mbcnt_hi :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_mbcnt_hi">,
|
|
|
|
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
|
2016-02-13 01:29:57 +01:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// CI+ Intrinsics
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
def int_amdgcn_s_dcache_inv_vol :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_s_dcache_inv_vol">,
|
|
|
|
Intrinsic<[], [], []>;
|
|
|
|
|
|
|
|
def int_amdgcn_buffer_wbinvl1_vol :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_buffer_wbinvl1_vol">,
|
|
|
|
Intrinsic<[], [], []>;
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// VI Intrinsics
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2016-03-09 13:29:31 +01:00
|
|
|
// llvm.amdgcn.mov.dpp.i32 <src> <dpp_ctrl> <row_mask> <bank_mask> <bound_ctrl>
|
2016-02-13 03:09:49 +01:00
|
|
|
def int_amdgcn_mov_dpp :
|
|
|
|
Intrinsic<[llvm_anyint_ty],
|
2016-03-09 13:29:31 +01:00
|
|
|
[LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
|
|
|
|
llvm_i1_ty], [IntrNoMem, IntrConvergent]>;
|
2016-02-13 03:09:49 +01:00
|
|
|
|
2016-02-13 01:29:57 +01:00
|
|
|
def int_amdgcn_s_dcache_wb :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_s_dcache_wb">,
|
|
|
|
Intrinsic<[], [], []>;
|
|
|
|
|
|
|
|
def int_amdgcn_s_dcache_wb_vol :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_s_dcache_wb_vol">,
|
|
|
|
Intrinsic<[], [], []>;
|
|
|
|
|
2016-02-27 09:53:46 +01:00
|
|
|
def int_amdgcn_s_memrealtime :
|
|
|
|
GCCBuiltin<"__builtin_amdgcn_s_memrealtime">,
|
|
|
|
Intrinsic<[llvm_i64_ty], [], []>;
|
2016-03-01 18:51:23 +01:00
|
|
|
|
|
|
|
// llvm.amdgcn.ds.permute <index> <src>
|
|
|
|
def int_amdgcn_ds_permute :
|
|
|
|
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrConvergent]>;
|
|
|
|
|
|
|
|
// llvm.amdgcn.ds.bpermute <index> <src>
|
|
|
|
def int_amdgcn_ds_bpermute :
|
|
|
|
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrConvergent]>;
|
|
|
|
|
2015-09-24 21:52:21 +02:00
|
|
|
}
|