mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-21 18:22:53 +01:00
[AMDGPU] Rename llvm.amdgcn.msaa.load to llvm.amdgcn.msaa.load.x
While the underlying instruction is called image_msaa_load, the resource must be x component only. Rename the intrinsic for clarity. Reviewed By: foad Differential Revision: https://reviews.llvm.org/D97829
This commit is contained in:
parent
df37872b97
commit
c2913dde5c
@ -798,8 +798,8 @@ defset list<AMDGPUImageDimIntrinsic> AMDGPUImageDimIntrinsics = {
|
||||
"STORE_MIP", [], [AMDGPUArg<llvm_anyfloat_ty, "vdata">],
|
||||
[IntrWriteMem, IntrWillReturn], [SDNPMemOperand], 1>;
|
||||
|
||||
defm int_amdgcn_image_msaa_load
|
||||
: AMDGPUImageDimIntrinsicsAll<"MSAA_LOAD", [llvm_any_ty], [], [IntrReadMem],
|
||||
defm int_amdgcn_image_msaa_load_x
|
||||
: AMDGPUImageDimIntrinsicsAll<"MSAA_LOAD_X", [llvm_any_ty], [], [IntrReadMem],
|
||||
[SDNPMemOperand]>,
|
||||
AMDGPUImageDMaskIntrinsic;
|
||||
|
||||
|
@ -1035,7 +1035,7 @@ defm IMAGE_SAMPLE_C_CD_CL_O_G16 : MIMG_Sampler <mimgopc<0xef>, AMDGPUSample_c_cd
|
||||
//def IMAGE_SAMPLER : MIMG_NoPattern_ <"image_sampler", 0x0000007f>;
|
||||
|
||||
let SubtargetPredicate = HasGFX10_BEncoding in
|
||||
defm IMAGE_MSAA_LOAD : MIMG_NoSampler <mimgopc<0x80>, "image_msaa_load", 1, 0, 0, 1>;
|
||||
defm IMAGE_MSAA_LOAD_X : MIMG_NoSampler <mimgopc<0x80>, "image_msaa_load", 1, 0, 0, 1>;
|
||||
|
||||
defm IMAGE_BVH_INTERSECT_RAY : MIMG_IntersectRay<mimgopc<0xe6>, "image_bvh_intersect_ray", 11, 0>;
|
||||
defm IMAGE_BVH_INTERSECT_RAY_a16 : MIMG_IntersectRay<mimgopc<0xe6>, "image_bvh_intersect_ray", 8, 1>;
|
||||
|
@ -4,7 +4,7 @@
|
||||
; GFX10: image_msaa_load v[0:3], v0, s[0:7] dmask:0xf dim:SQ_RSRC_IMG_1D unorm ;
|
||||
define amdgpu_ps <4 x float> @load_1d(<8 x i32> inreg %rsrc, i32 %s) {
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.msaa.load.1d.v4f32.i32(i32 15, i32 %s, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
%v = call <4 x float> @llvm.amdgcn.image.msaa.load.x.1d.v4f32.i32(i32 15, i32 %s, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
@ -12,7 +12,7 @@ main_body:
|
||||
; GFX10: image_msaa_load v[0:4], v{{[0-9]+}}, s[0:7] dmask:0xf dim:SQ_RSRC_IMG_1D unorm tfe ;
|
||||
define amdgpu_ps <4 x float> @load_1d_tfe(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s) {
|
||||
main_body:
|
||||
%v = call {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.1d.v4f32i32.i32(i32 15, i32 %s, <8 x i32> %rsrc, i32 1, i32 0)
|
||||
%v = call {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.x.1d.v4f32i32.i32(i32 15, i32 %s, <8 x i32> %rsrc, i32 1, i32 0)
|
||||
%v.vec = extractvalue {<4 x float>, i32} %v, 0
|
||||
%v.err = extractvalue {<4 x float>, i32} %v, 1
|
||||
store i32 %v.err, i32 addrspace(1)* %out, align 4
|
||||
@ -23,7 +23,7 @@ main_body:
|
||||
; GFX10: image_msaa_load v[0:4], v{{[0-9]+}}, s[0:7] dmask:0xf dim:SQ_RSRC_IMG_1D unorm lwe ;
|
||||
define amdgpu_ps <4 x float> @load_1d_lwe(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s) {
|
||||
main_body:
|
||||
%v = call {<4 x float>, i32} @llvm.amdgcn.image.msaa.load.1d.v4f32i32.i32(i32 15, i32 %s, <8 x i32> %rsrc, i32 2, i32 0)
|
||||
%v = call {<4 x float>, i32} @llvm.amdgcn.image.msaa.load.x.1d.v4f32i32.i32(i32 15, i32 %s, <8 x i32> %rsrc, i32 2, i32 0)
|
||||
%v.vec = extractvalue {<4 x float>, i32} %v, 0
|
||||
%v.err = extractvalue {<4 x float>, i32} %v, 1
|
||||
store i32 %v.err, i32 addrspace(1)* %out, align 4
|
||||
@ -34,7 +34,7 @@ main_body:
|
||||
; GFX10: image_msaa_load v[0:3], v[0:1], s[0:7] dmask:0xf dim:SQ_RSRC_IMG_2D unorm ;
|
||||
define amdgpu_ps <4 x float> @load_2d(<8 x i32> inreg %rsrc, i32 %s, i32 %t) {
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.msaa.load.2d.v4f32.i32(i32 15, i32 %s, i32 %t, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
%v = call <4 x float> @llvm.amdgcn.image.msaa.load.x.2d.v4f32.i32(i32 15, i32 %s, i32 %t, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
@ -42,7 +42,7 @@ main_body:
|
||||
; GFX10: image_msaa_load v[0:4], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0xf dim:SQ_RSRC_IMG_2D unorm tfe ;
|
||||
define amdgpu_ps <4 x float> @load_2d_tfe(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s, i32 %t) {
|
||||
main_body:
|
||||
%v = call {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.2d.v4f32i32.i32(i32 15, i32 %s, i32 %t, <8 x i32> %rsrc, i32 1, i32 0)
|
||||
%v = call {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.x.2d.v4f32i32.i32(i32 15, i32 %s, i32 %t, <8 x i32> %rsrc, i32 1, i32 0)
|
||||
%v.vec = extractvalue {<4 x float>, i32} %v, 0
|
||||
%v.err = extractvalue {<4 x float>, i32} %v, 1
|
||||
store i32 %v.err, i32 addrspace(1)* %out, align 4
|
||||
@ -53,7 +53,7 @@ main_body:
|
||||
; GFX10: image_msaa_load v[0:3], v[0:2], s[0:7] dmask:0xf dim:SQ_RSRC_IMG_3D unorm ;
|
||||
define amdgpu_ps <4 x float> @load_3d(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %r) {
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.msaa.load.3d.v4f32.i32(i32 15, i32 %s, i32 %t, i32 %r, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
%v = call <4 x float> @llvm.amdgcn.image.msaa.load.x.3d.v4f32.i32(i32 15, i32 %s, i32 %t, i32 %r, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
@ -61,7 +61,7 @@ main_body:
|
||||
; GFX10: image_msaa_load v[0:4], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0xf dim:SQ_RSRC_IMG_3D unorm tfe lwe ;
|
||||
define amdgpu_ps <4 x float> @load_3d_tfe_lwe(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s, i32 %t, i32 %r) {
|
||||
main_body:
|
||||
%v = call {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.3d.v4f32i32.i32(i32 15, i32 %s, i32 %t, i32 %r, <8 x i32> %rsrc, i32 3, i32 0)
|
||||
%v = call {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.x.3d.v4f32i32.i32(i32 15, i32 %s, i32 %t, i32 %r, <8 x i32> %rsrc, i32 3, i32 0)
|
||||
%v.vec = extractvalue {<4 x float>, i32} %v, 0
|
||||
%v.err = extractvalue {<4 x float>, i32} %v, 1
|
||||
store i32 %v.err, i32 addrspace(1)* %out, align 4
|
||||
@ -72,7 +72,7 @@ main_body:
|
||||
; GFX10: image_msaa_load v[0:3], v[0:1], s[0:7] dmask:0xf dim:SQ_RSRC_IMG_1D_ARRAY unorm ;
|
||||
define amdgpu_ps <4 x float> @load_1darray(<8 x i32> inreg %rsrc, i32 %s, i32 %slice) {
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.msaa.load.1darray.v4f32.i32(i32 15, i32 %s, i32 %slice, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
%v = call <4 x float> @llvm.amdgcn.image.msaa.load.x.1darray.v4f32.i32(i32 15, i32 %s, i32 %slice, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
@ -80,7 +80,7 @@ main_body:
|
||||
; GFX10: image_msaa_load v[0:4], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0xf dim:SQ_RSRC_IMG_1D_ARRAY unorm tfe ;
|
||||
define amdgpu_ps <4 x float> @load_1darray_tfe(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s, i32 %slice) {
|
||||
main_body:
|
||||
%v = call {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.1darray.v4f32i32.i32(i32 15, i32 %s, i32 %slice, <8 x i32> %rsrc, i32 1, i32 0)
|
||||
%v = call {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.x.1darray.v4f32i32.i32(i32 15, i32 %s, i32 %slice, <8 x i32> %rsrc, i32 1, i32 0)
|
||||
%v.vec = extractvalue {<4 x float>, i32} %v, 0
|
||||
%v.err = extractvalue {<4 x float>, i32} %v, 1
|
||||
store i32 %v.err, i32 addrspace(1)* %out, align 4
|
||||
@ -91,7 +91,7 @@ main_body:
|
||||
; GFX10: image_msaa_load v[0:3], v[0:2], s[0:7] dmask:0xf dim:SQ_RSRC_IMG_2D_ARRAY unorm ;
|
||||
define amdgpu_ps <4 x float> @load_2darray(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %slice) {
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.msaa.load.2darray.v4f32.i32(i32 15, i32 %s, i32 %t, i32 %slice, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
%v = call <4 x float> @llvm.amdgcn.image.msaa.load.x.2darray.v4f32.i32(i32 15, i32 %s, i32 %t, i32 %slice, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
@ -99,7 +99,7 @@ main_body:
|
||||
; GFX10: image_msaa_load v[0:4], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0xf dim:SQ_RSRC_IMG_2D_ARRAY unorm lwe ;
|
||||
define amdgpu_ps <4 x float> @load_2darray_lwe(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s, i32 %t, i32 %slice) {
|
||||
main_body:
|
||||
%v = call {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.2darray.v4f32i32.i32(i32 15, i32 %s, i32 %t, i32 %slice, <8 x i32> %rsrc, i32 2, i32 0)
|
||||
%v = call {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.x.2darray.v4f32i32.i32(i32 15, i32 %s, i32 %t, i32 %slice, <8 x i32> %rsrc, i32 2, i32 0)
|
||||
%v.vec = extractvalue {<4 x float>, i32} %v, 0
|
||||
%v.err = extractvalue {<4 x float>, i32} %v, 1
|
||||
store i32 %v.err, i32 addrspace(1)* %out, align 4
|
||||
@ -110,7 +110,7 @@ main_body:
|
||||
; GFX10: image_msaa_load v[0:3], v[0:2], s[0:7] dmask:0xf dim:SQ_RSRC_IMG_2D_MSAA unorm ;
|
||||
define amdgpu_ps <4 x float> @load_2dmsaa(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %fragid) {
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.msaa.load.2dmsaa.v4f32.i32(i32 15, i32 %s, i32 %t, i32 %fragid, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
%v = call <4 x float> @llvm.amdgcn.image.msaa.load.x.2dmsaa.v4f32.i32(i32 15, i32 %s, i32 %t, i32 %fragid, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
@ -118,7 +118,7 @@ main_body:
|
||||
; GFX10: image_msaa_load v[0:4], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0xf dim:SQ_RSRC_IMG_2D_MSAA unorm tfe lwe ;
|
||||
define amdgpu_ps <4 x float> @load_2dmsaa_both(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s, i32 %t, i32 %fragid) {
|
||||
main_body:
|
||||
%v = call {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.2dmsaa.v4f32i32.i32(i32 15, i32 %s, i32 %t, i32 %fragid, <8 x i32> %rsrc, i32 3, i32 0)
|
||||
%v = call {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.x.2dmsaa.v4f32i32.i32(i32 15, i32 %s, i32 %t, i32 %fragid, <8 x i32> %rsrc, i32 3, i32 0)
|
||||
%v.vec = extractvalue {<4 x float>, i32} %v, 0
|
||||
%v.err = extractvalue {<4 x float>, i32} %v, 1
|
||||
store i32 %v.err, i32 addrspace(1)* %out, align 4
|
||||
@ -129,7 +129,7 @@ main_body:
|
||||
; GFX10: image_msaa_load v[0:3], v[0:3], s[0:7] dmask:0xf dim:SQ_RSRC_IMG_2D_MSAA_ARRAY unorm ;
|
||||
define amdgpu_ps <4 x float> @load_2darraymsaa(<8 x i32> inreg %rsrc, i32 %s, i32 %t, i32 %slice, i32 %fragid) {
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.msaa.load.2darraymsaa.v4f32.i32(i32 15, i32 %s, i32 %t, i32 %slice, i32 %fragid, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
%v = call <4 x float> @llvm.amdgcn.image.msaa.load.x.2darraymsaa.v4f32.i32(i32 15, i32 %s, i32 %t, i32 %slice, i32 %fragid, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
@ -137,7 +137,7 @@ main_body:
|
||||
; GFX10: image_msaa_load v[0:4], v[{{[0-9]+:[0-9]+}}], s[0:7] dmask:0xf dim:SQ_RSRC_IMG_2D_MSAA_ARRAY unorm tfe ;
|
||||
define amdgpu_ps <4 x float> @load_2darraymsaa_tfe(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s, i32 %t, i32 %slice, i32 %fragid) {
|
||||
main_body:
|
||||
%v = call {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.2darraymsaa.v4f32i32.i32(i32 15, i32 %s, i32 %t, i32 %slice, i32 %fragid, <8 x i32> %rsrc, i32 1, i32 0)
|
||||
%v = call {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.x.2darraymsaa.v4f32i32.i32(i32 15, i32 %s, i32 %t, i32 %slice, i32 %fragid, <8 x i32> %rsrc, i32 1, i32 0)
|
||||
%v.vec = extractvalue {<4 x float>, i32} %v, 0
|
||||
%v.err = extractvalue {<4 x float>, i32} %v, 1
|
||||
store i32 %v.err, i32 addrspace(1)* %out, align 4
|
||||
@ -148,7 +148,7 @@ main_body:
|
||||
; GFX10: image_msaa_load v[0:3], v{{[0-9]+}}, s[0:7] dmask:0x7 dim:SQ_RSRC_IMG_1D unorm tfe ;
|
||||
define amdgpu_ps <4 x float> @load_1d_tfe_V4_dmask3(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s) {
|
||||
main_body:
|
||||
%v = call {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.1d.v4f32i32.i32(i32 7, i32 %s, <8 x i32> %rsrc, i32 1, i32 0)
|
||||
%v = call {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.x.1d.v4f32i32.i32(i32 7, i32 %s, <8 x i32> %rsrc, i32 1, i32 0)
|
||||
%v.vec = extractvalue {<4 x float>, i32} %v, 0
|
||||
%v.err = extractvalue {<4 x float>, i32} %v, 1
|
||||
store i32 %v.err, i32 addrspace(1)* %out, align 4
|
||||
@ -159,7 +159,7 @@ main_body:
|
||||
; GFX10: image_msaa_load v[0:2], v{{[0-9]+}}, s[0:7] dmask:0x6 dim:SQ_RSRC_IMG_1D unorm tfe ;
|
||||
define amdgpu_ps <4 x float> @load_1d_tfe_V4_dmask2(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s) {
|
||||
main_body:
|
||||
%v = call {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.1d.v4f32i32.i32(i32 6, i32 %s, <8 x i32> %rsrc, i32 1, i32 0)
|
||||
%v = call {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.x.1d.v4f32i32.i32(i32 6, i32 %s, <8 x i32> %rsrc, i32 1, i32 0)
|
||||
%v.vec = extractvalue {<4 x float>, i32} %v, 0
|
||||
%v.err = extractvalue {<4 x float>, i32} %v, 1
|
||||
store i32 %v.err, i32 addrspace(1)* %out, align 4
|
||||
@ -170,7 +170,7 @@ main_body:
|
||||
; GFX10: image_msaa_load v[0:1], v{{[0-9]+}}, s[0:7] dmask:0x8 dim:SQ_RSRC_IMG_1D unorm tfe ;
|
||||
define amdgpu_ps <4 x float> @load_1d_tfe_V4_dmask1(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s) {
|
||||
main_body:
|
||||
%v = call {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.1d.v4f32i32.i32(i32 8, i32 %s, <8 x i32> %rsrc, i32 1, i32 0)
|
||||
%v = call {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.x.1d.v4f32i32.i32(i32 8, i32 %s, <8 x i32> %rsrc, i32 1, i32 0)
|
||||
%v.vec = extractvalue {<4 x float>, i32} %v, 0
|
||||
%v.err = extractvalue {<4 x float>, i32} %v, 1
|
||||
store i32 %v.err, i32 addrspace(1)* %out, align 4
|
||||
@ -181,7 +181,7 @@ main_body:
|
||||
; GFX10: image_msaa_load v[0:1], v{{[0-9]+}}, s[0:7] dmask:0x8 dim:SQ_RSRC_IMG_1D unorm tfe ;
|
||||
define amdgpu_ps <2 x float> @load_1d_tfe_V2_dmask1(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i32 %s) {
|
||||
main_body:
|
||||
%v = call {<2 x float>,i32} @llvm.amdgcn.image.msaa.load.1d.v2f32i32.i32(i32 8, i32 %s, <8 x i32> %rsrc, i32 1, i32 0)
|
||||
%v = call {<2 x float>,i32} @llvm.amdgcn.image.msaa.load.x.1d.v2f32i32.i32(i32 8, i32 %s, <8 x i32> %rsrc, i32 1, i32 0)
|
||||
%v.vec = extractvalue {<2 x float>, i32} %v, 0
|
||||
%v.err = extractvalue {<2 x float>, i32} %v, 1
|
||||
store i32 %v.err, i32 addrspace(1)* %out, align 4
|
||||
@ -192,7 +192,7 @@ main_body:
|
||||
; GFX10: image_msaa_load v0, v0, s[0:7] dmask:0x8 dim:SQ_RSRC_IMG_1D unorm ;
|
||||
define amdgpu_ps float @load_1d_V1(<8 x i32> inreg %rsrc, i32 %s) {
|
||||
main_body:
|
||||
%v = call float @llvm.amdgcn.image.msaa.load.1d.f32.i32(i32 8, i32 %s, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
%v = call float @llvm.amdgcn.image.msaa.load.x.1d.f32.i32(i32 8, i32 %s, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret float %v
|
||||
}
|
||||
|
||||
@ -200,7 +200,7 @@ main_body:
|
||||
; GFX10: image_msaa_load v[0:1], v0, s[0:7] dmask:0x9 dim:SQ_RSRC_IMG_1D unorm ;
|
||||
define amdgpu_ps <2 x float> @load_1d_V2(<8 x i32> inreg %rsrc, i32 %s) {
|
||||
main_body:
|
||||
%v = call <2 x float> @llvm.amdgcn.image.msaa.load.1d.v2f32.i32(i32 9, i32 %s, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
%v = call <2 x float> @llvm.amdgcn.image.msaa.load.x.1d.v2f32.i32(i32 9, i32 %s, <8 x i32> %rsrc, i32 0, i32 0)
|
||||
ret <2 x float> %v
|
||||
}
|
||||
|
||||
@ -208,7 +208,7 @@ main_body:
|
||||
; GFX10: image_msaa_load v[0:3], v0, s[0:7] dmask:0xf dim:SQ_RSRC_IMG_1D unorm glc ;
|
||||
define amdgpu_ps <4 x float> @load_1d_glc(<8 x i32> inreg %rsrc, i32 %s) {
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.msaa.load.1d.v4f32.i32(i32 15, i32 %s, <8 x i32> %rsrc, i32 0, i32 1)
|
||||
%v = call <4 x float> @llvm.amdgcn.image.msaa.load.x.1d.v4f32.i32(i32 15, i32 %s, <8 x i32> %rsrc, i32 0, i32 1)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
@ -216,7 +216,7 @@ main_body:
|
||||
; GFX10: image_msaa_load v[0:3], v0, s[0:7] dmask:0xf dim:SQ_RSRC_IMG_1D unorm slc ;
|
||||
define amdgpu_ps <4 x float> @load_1d_slc(<8 x i32> inreg %rsrc, i32 %s) {
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.msaa.load.1d.v4f32.i32(i32 15, i32 %s, <8 x i32> %rsrc, i32 0, i32 2)
|
||||
%v = call <4 x float> @llvm.amdgcn.image.msaa.load.x.1d.v4f32.i32(i32 15, i32 %s, <8 x i32> %rsrc, i32 0, i32 2)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
@ -224,30 +224,30 @@ main_body:
|
||||
; GFX10: image_msaa_load v[0:3], v0, s[0:7] dmask:0xf dim:SQ_RSRC_IMG_1D unorm glc slc ;
|
||||
define amdgpu_ps <4 x float> @load_1d_glc_slc(<8 x i32> inreg %rsrc, i32 %s) {
|
||||
main_body:
|
||||
%v = call <4 x float> @llvm.amdgcn.image.msaa.load.1d.v4f32.i32(i32 15, i32 %s, <8 x i32> %rsrc, i32 0, i32 3)
|
||||
%v = call <4 x float> @llvm.amdgcn.image.msaa.load.x.1d.v4f32.i32(i32 15, i32 %s, <8 x i32> %rsrc, i32 0, i32 3)
|
||||
ret <4 x float> %v
|
||||
}
|
||||
|
||||
declare <4 x float> @llvm.amdgcn.image.msaa.load.1d.v4f32.i32(i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare {float,i32} @llvm.amdgcn.image.msaa.load.1d.f32i32.i32(i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare {<2 x float>,i32} @llvm.amdgcn.image.msaa.load.1d.v2f32i32.i32(i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.1d.v4f32i32.i32(i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare <4 x float> @llvm.amdgcn.image.msaa.load.2d.v4f32.i32(i32, i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.2d.v4f32i32.i32(i32, i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare <4 x float> @llvm.amdgcn.image.msaa.load.3d.v4f32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.3d.v4f32i32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare <4 x float> @llvm.amdgcn.image.msaa.load.1darray.v4f32.i32(i32, i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.1darray.v4f32i32.i32(i32, i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare <4 x float> @llvm.amdgcn.image.msaa.load.2darray.v4f32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.2darray.v4f32i32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare <4 x float> @llvm.amdgcn.image.msaa.load.2dmsaa.v4f32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.2dmsaa.v4f32i32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare <4 x float> @llvm.amdgcn.image.msaa.load.2darraymsaa.v4f32.i32(i32, i32, i32, i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.2darraymsaa.v4f32i32.i32(i32, i32, i32, i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare <4 x float> @llvm.amdgcn.image.msaa.load.x.1d.v4f32.i32(i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare {float,i32} @llvm.amdgcn.image.msaa.load.x.1d.f32i32.i32(i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare {<2 x float>,i32} @llvm.amdgcn.image.msaa.load.x.1d.v2f32i32.i32(i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.x.1d.v4f32i32.i32(i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare <4 x float> @llvm.amdgcn.image.msaa.load.x.2d.v4f32.i32(i32, i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.x.2d.v4f32i32.i32(i32, i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare <4 x float> @llvm.amdgcn.image.msaa.load.x.3d.v4f32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.x.3d.v4f32i32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare <4 x float> @llvm.amdgcn.image.msaa.load.x.1darray.v4f32.i32(i32, i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.x.1darray.v4f32i32.i32(i32, i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare <4 x float> @llvm.amdgcn.image.msaa.load.x.2darray.v4f32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.x.2darray.v4f32i32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare <4 x float> @llvm.amdgcn.image.msaa.load.x.2dmsaa.v4f32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.x.2dmsaa.v4f32i32.i32(i32, i32, i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare <4 x float> @llvm.amdgcn.image.msaa.load.x.2darraymsaa.v4f32.i32(i32, i32, i32, i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare {<4 x float>,i32} @llvm.amdgcn.image.msaa.load.x.2darraymsaa.v4f32i32.i32(i32, i32, i32, i32, i32, <8 x i32>, i32, i32) #1
|
||||
|
||||
declare float @llvm.amdgcn.image.msaa.load.1d.f32.i32(i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare float @llvm.amdgcn.image.msaa.load.2d.f32.i32(i32, i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare <2 x float> @llvm.amdgcn.image.msaa.load.1d.v2f32.i32(i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare float @llvm.amdgcn.image.msaa.load.x.1d.f32.i32(i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare float @llvm.amdgcn.image.msaa.load.x.2d.f32.i32(i32, i32, i32, <8 x i32>, i32, i32) #1
|
||||
declare <2 x float> @llvm.amdgcn.image.msaa.load.x.1d.v2f32.i32(i32, i32, <8 x i32>, i32, i32) #1
|
||||
|
||||
attributes #0 = { nounwind }
|
||||
attributes #1 = { nounwind readonly }
|
Loading…
Reference in New Issue
Block a user