1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 20:51:52 +01:00

[AMDGPU] Make bfi patterns divergence-aware

This tends to increase code size but more importantly it reduces vgpr
usage, and could avoid costly readfirstlanes if the result needs to be
in an sgpr.

Differential Revision: https://reviews.llvm.org/D88245
This commit is contained in:
Jay Foad 2020-09-24 16:52:41 +01:00
parent 343d947d8d
commit 905b53ab6b
6 changed files with 118 additions and 93 deletions

View File

@ -1551,18 +1551,17 @@ def : IMad24Pat<V_MAD_I32_I24, 1>;
def : UMad24Pat<V_MAD_U32_U24, 1>;
// BFI patterns
// FIXME: This should only be done for VALU inputs
// Definition from ISA doc:
// (y & x) | (z & ~x)
def : AMDGPUPat <
(or (and i32:$y, i32:$x), (and i32:$z, (not i32:$x))),
(DivergentBinFrag<or> (and i32:$y, i32:$x), (and i32:$z, (not i32:$x))),
(V_BFI_B32 $x, $y, $z)
>;
// 64-bit version
def : AMDGPUPat <
(or (and i64:$y, i64:$x), (and i64:$z, (not i64:$x))),
(DivergentBinFrag<or> (and i64:$y, i64:$x), (and i64:$z, (not i64:$x))),
(REG_SEQUENCE SReg_64,
(V_BFI_B32 (i32 (EXTRACT_SUBREG SReg_64:$x, sub0)),
(i32 (EXTRACT_SUBREG SReg_64:$y, sub0)),
@ -1575,13 +1574,13 @@ def : AMDGPUPat <
// SHA-256 Ch function
// z ^ (x & (y ^ z))
def : AMDGPUPat <
(xor i32:$z, (and i32:$x, (xor i32:$y, i32:$z))),
(DivergentBinFrag<xor> i32:$z, (and i32:$x, (xor i32:$y, i32:$z))),
(V_BFI_B32 $x, $y, $z)
>;
// 64-bit version
def : AMDGPUPat <
(xor i64:$z, (and i64:$x, (xor i64:$y, i64:$z))),
(DivergentBinFrag<xor> i64:$z, (and i64:$x, (xor i64:$y, i64:$z))),
(REG_SEQUENCE SReg_64,
(V_BFI_B32 (i32 (EXTRACT_SUBREG SReg_64:$x, sub0)),
(i32 (EXTRACT_SUBREG SReg_64:$y, sub0)),
@ -2305,12 +2304,14 @@ defm : BFEPattern <V_BFE_U32, V_BFE_I32, S_MOV_B32>;
// ((x & z) | (y & (x | z))) -> BFI (XOR x, y), z, y
def : AMDGPUPat <
(or (and i32:$x, i32:$z), (and i32:$y, (or i32:$x, i32:$z))),
(DivergentBinFrag<or> (and i32:$x, i32:$z),
(and i32:$y, (or i32:$x, i32:$z))),
(V_BFI_B32 (V_XOR_B32_e64 i32:$x, i32:$y), i32:$z, i32:$y)
>;
def : AMDGPUPat <
(or (and i64:$x, i64:$z), (and i64:$y, (or i64:$x, i64:$z))),
(DivergentBinFrag<or> (and i64:$x, i64:$z),
(and i64:$y, (or i64:$x, i64:$z))),
(REG_SEQUENCE SReg_64,
(V_BFI_B32 (V_XOR_B32_e64 (i32 (EXTRACT_SUBREG SReg_64:$x, sub0)),
(i32 (EXTRACT_SUBREG SReg_64:$y, sub0))),

View File

@ -429,6 +429,18 @@ class UniformBinFrag<SDPatternOperator Op> : PatFrag <
let GISelPredicateCode = [{return true;}];
}
class DivergentBinFrag<SDPatternOperator Op> : PatFrag <
(ops node:$src0, node:$src1),
(Op $src0, $src1),
[{ return N->isDivergent(); }]> {
// This check is unnecessary as it's captured by the result register
// bank constraint.
//
// FIXME: Should add a way for the emitter to recognize this is a
// trivially true predicate to eliminate the check.
let GISelPredicateCode = [{return true;}];
}
let Defs = [SCC] in { // Carry out goes to SCC
let isCommutable = 1 in {
def S_ADD_U32 : SOP2_32 <"s_add_u32">;

View File

@ -8,7 +8,9 @@
; FUNC-LABEL: {{^}}bfi_def:
; R600: BFI_INT
; GCN: v_bfi_b32
; GCN: s_andn2_b32
; GCN: s_and_b32
; GCN: s_or_b32
define amdgpu_kernel void @bfi_def(i32 addrspace(1)* %out, i32 %x, i32 %y, i32 %z) {
entry:
%0 = xor i32 %x, -1
@ -24,7 +26,9 @@ entry:
; FUNC-LABEL: {{^}}bfi_sha256_ch:
; R600: BFI_INT
; GCN: v_bfi_b32
; GCN: s_xor_b32
; GCN: s_and_b32
; GCN: s_xor_b32
define amdgpu_kernel void @bfi_sha256_ch(i32 addrspace(1)* %out, i32 %x, i32 %y, i32 %z) {
entry:
%0 = xor i32 %y, %z
@ -40,8 +44,10 @@ entry:
; R600: XOR_INT * [[DST:T[0-9]+\.[XYZW]]], KC0[2].Z, KC0[2].W
; R600: BFI_INT * {{T[0-9]+\.[XYZW]}}, {{[[DST]]|PV\.[XYZW]}}, KC0[3].X, KC0[2].W
; GCN: v_xor_b32_e32 [[DST:v[0-9]+]], {{s[0-9]+, v[0-9]+}}
; GCN: v_bfi_b32 {{v[0-9]+}}, [[DST]], {{s[0-9]+, v[0-9]+}}
; GCN: s_and_b32
; GCN: s_or_b32
; GCN: s_and_b32
; GCN: s_or_b32
define amdgpu_kernel void @bfi_sha256_ma(i32 addrspace(1)* %out, i32 %x, i32 %y, i32 %z) {
entry:
%0 = and i32 %x, %z
@ -117,12 +123,9 @@ entry:
; FIXME: Should leave as 64-bit SALU ops
; FUNC-LABEL: {{^}}s_bitselect_i64_pat_0:
; GCN: v_mov_b32_e32 v{{[0-9]+}}, s
; GCN: v_mov_b32_e32 v{{[0-9]+}}, s
; GCN: v_mov_b32_e32 v{{[0-9]+}}, s
; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s
; GCN-DAG: v_bfi_b32
; GCN-DAG: v_bfi_b32
; GCN: s_and_b64
; GCN: s_andn2_b64
; GCN: s_or_b64
define amdgpu_kernel void @s_bitselect_i64_pat_0(i64 %a, i64 %b, i64 %mask) {
%and0 = and i64 %a, %b
%not.a = xor i64 %a, -1
@ -134,12 +137,9 @@ define amdgpu_kernel void @s_bitselect_i64_pat_0(i64 %a, i64 %b, i64 %mask) {
}
; FUNC-LABEL: {{^}}s_bitselect_i64_pat_1:
; GCN: v_mov_b32_e32 v{{[0-9]+}}, s
; GCN: v_mov_b32_e32 v{{[0-9]+}}, s
; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s
; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s
; GCN-DAG: v_bfi_b32
; GCN-DAG: v_bfi_b32
; GCN: s_xor_b64
; GCN: s_and_b64
; GCN: s_xor_b64
define amdgpu_kernel void @s_bitselect_i64_pat_1(i64 %a, i64 %b, i64 %mask) {
%xor.0 = xor i64 %a, %mask
%and = and i64 %xor.0, %b
@ -151,12 +151,9 @@ define amdgpu_kernel void @s_bitselect_i64_pat_1(i64 %a, i64 %b, i64 %mask) {
}
; FUNC-LABEL: {{^}}s_bitselect_i64_pat_2:
; GCN: v_mov_b32_e32 v{{[0-9]+}}, s
; GCN: v_mov_b32_e32 v{{[0-9]+}}, s
; GCN-DAG: v_bfi_b32
; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s
; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s
; GCN-DAG: v_bfi_b32
; GCN: s_xor_b64
; GCN: s_and_b64
; GCN: s_xor_b64
define amdgpu_kernel void @s_bitselect_i64_pat_2(i64 %a, i64 %b, i64 %mask) {
%xor.0 = xor i64 %a, %mask
%and = and i64 %xor.0, %b
@ -168,12 +165,10 @@ define amdgpu_kernel void @s_bitselect_i64_pat_2(i64 %a, i64 %b, i64 %mask) {
}
; FUNC-LABEL: {{^}}s_bfi_sha256_ma_i64:
; GCN: v_mov_b32_e32 v{{[0-9]+}}, s
; GCN-DAG: v_xor_b32
; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s
; GCN-DAG: v_xor_b32
; GCN-DAG: v_bfi_b32
; GCN: v_bfi_b32
; GCN: s_and_b64
; GCN: s_or_b64
; GCN: s_and_b64
; GCN: s_or_b64
define amdgpu_kernel void @s_bfi_sha256_ma_i64(i64 %x, i64 %y, i64 %z) {
entry:
%and0 = and i64 %x, %z

View File

@ -125,10 +125,11 @@ entry:
; GCN-NOT: buffer_
; GCN: s_lshl_b32 [[SEL:s[0-9]+]], s{{[0-9]+}}, 4
; GCN: s_lshl_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}], [[SEL]]
; GCN: s_mov_b32 [[K:s[0-9]+]], 0x3c003c00
; GCN: v_mov_b32_e32 [[V:v[0-9]+]], [[K]]
; GCN: v_bfi_b32 v{{[0-9]+}}, s{{[0-9]+}}, [[V]], v{{[0-9]+}}
; GCN: v_bfi_b32 v{{[0-9]+}}, s{{[0-9]+}}, [[V]], v{{[0-9]+}}
; GCN: s_andn2_b64
; GCN: s_mov_b32 s[[KLO:[0-9]+]], 0x3c003c00
; GCN: s_mov_b32 s[[KHI:[0-9]+]], s[[KLO]]
; GCN: s_and_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}], s{{\[}}[[KLO]]:[[KHI]]]
; GCN: s_or_b64
define amdgpu_kernel void @half4_inselt(<4 x half> addrspace(1)* %out, <4 x half> %vec, i32 %sel) {
entry:
%v = insertelement <4 x half> %vec, half 1.000000e+00, i32 %sel
@ -142,7 +143,9 @@ entry:
; GCN-NOT: buffer_
; GCN: s_lshl_b32 [[SEL:s[0-9]+]], s{{[0-9]+}}, 4
; GCN: s_lshl_b32 [[V:s[0-9]+]], 0xffff, [[SEL]]
; GCN: v_bfi_b32 v{{[0-9]+}}, [[V]], v{{[0-9]+}}, v{{[0-9]+}}
; GCN: s_andn2_b32
; GCN: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x3c003c00
; GCN: s_or_b32
define amdgpu_kernel void @half2_inselt(<2 x half> addrspace(1)* %out, <2 x half> %vec, i32 %sel) {
entry:
%v = insertelement <2 x half> %vec, half 1.000000e+00, i32 %sel
@ -184,10 +187,11 @@ entry:
; GCN-NOT: v_cndmask_b32
; GCN-NOT: v_movrel
; GCN-NOT: buffer_
; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 0x10001
; GCN: s_lshl_b32 [[SEL:s[0-9]+]], s{{[0-9]+}}, 4
; GCN: s_lshl_b32 [[V:s[0-9]+]], 0xffff, [[SEL]]
; GCN: v_bfi_b32 v{{[0-9]+}}, [[V]], [[K]], v{{[0-9]+}}
; GCN: s_andn2_b32
; GCN: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x10001
; GCN: s_or_b32
define amdgpu_kernel void @short2_inselt(<2 x i16> addrspace(1)* %out, <2 x i16> %vec, i32 %sel) {
entry:
%v = insertelement <2 x i16> %vec, i16 1, i32 %sel
@ -201,10 +205,11 @@ entry:
; GCN-NOT: buffer_
; GCN: s_lshl_b32 [[SEL:s[0-9]+]], s{{[0-9]+}}, 4
; GCN: s_lshl_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}], [[SEL]]
; GCN: s_mov_b32 [[K:s[0-9]+]], 0x10001
; GCN: v_mov_b32_e32 [[V:v[0-9]+]], [[K]]
; GCN: v_bfi_b32 v{{[0-9]+}}, s{{[0-9]+}}, [[V]], v{{[0-9]+}}
; GCN: v_bfi_b32 v{{[0-9]+}}, s{{[0-9]+}}, [[V]], v{{[0-9]+}}
; GCN: s_andn2_b64
; GCN: s_mov_b32 s[[KLO:[0-9]+]], 0x10001
; GCN: s_mov_b32 s[[KHI:[0-9]+]], s[[KLO]]
; GCN: s_and_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}], s{{\[}}[[KLO]]:[[KHI]]]
; GCN: s_or_b64
define amdgpu_kernel void @short4_inselt(<4 x i16> addrspace(1)* %out, <4 x i16> %vec, i32 %sel) {
entry:
%v = insertelement <4 x i16> %vec, i16 1, i32 %sel

View File

@ -861,14 +861,15 @@ define amdgpu_kernel void @dynamic_insertelement_v2i16(<2 x i16> addrspace(1)* %
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
; SI-NEXT: s_load_dword s6, s[4:5], 0x2
; SI-NEXT: s_load_dword s4, s[4:5], 0x3
; SI-NEXT: v_mov_b32_e32 v0, 0x50005
; SI-NEXT: s_mov_b32 s3, 0x100f000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v1, s6
; SI-NEXT: s_lshl_b32 s4, s4, 4
; SI-NEXT: s_lshl_b32 s4, 0xffff, s4
; SI-NEXT: v_bfi_b32 v0, s4, v0, v1
; SI-NEXT: s_andn2_b32 s5, s6, s4
; SI-NEXT: s_and_b32 s4, s4, 0x50005
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
@ -877,14 +878,15 @@ define amdgpu_kernel void @dynamic_insertelement_v2i16(<2 x i16> addrspace(1)* %
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
; VI-NEXT: s_load_dword s6, s[4:5], 0x8
; VI-NEXT: s_load_dword s4, s[4:5], 0xc
; VI-NEXT: v_mov_b32_e32 v0, 0x50005
; VI-NEXT: s_mov_b32 s3, 0x1100f000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v1, s6
; VI-NEXT: s_lshl_b32 s4, s4, 4
; VI-NEXT: s_lshl_b32 s4, 0xffff, s4
; VI-NEXT: v_bfi_b32 v0, s4, v0, v1
; VI-NEXT: s_andn2_b32 s5, s6, s4
; VI-NEXT: s_and_b32 s4, s4, 0x50005
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
%vecins = insertelement <2 x i16> %a, i16 5, i32 %b
@ -925,18 +927,18 @@ define amdgpu_kernel void @dynamic_insertelement_v3i16(<3 x i16> addrspace(1)* %
; VI-NEXT: s_mov_b32 s3, 0x1100f000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v1, s7
; VI-NEXT: s_lshl_b32 s8, s4, 4
; VI-NEXT: s_mov_b32 s4, 0xffff
; VI-NEXT: s_lshl_b64 s[4:5], s[4:5], s8
; VI-NEXT: s_mov_b32 s8, 0x50005
; VI-NEXT: v_mov_b32_e32 v0, s8
; VI-NEXT: v_bfi_b32 v0, s5, v0, v1
; VI-NEXT: v_mov_b32_e32 v1, s8
; VI-NEXT: v_mov_b32_e32 v2, s6
; VI-NEXT: v_bfi_b32 v1, s4, v1, v2
; VI-NEXT: s_mov_b32 s9, s8
; VI-NEXT: s_andn2_b64 s[6:7], s[6:7], s[4:5]
; VI-NEXT: s_and_b64 s[4:5], s[4:5], s[8:9]
; VI-NEXT: s_or_b64 s[4:5], s[4:5], s[6:7]
; VI-NEXT: v_mov_b32_e32 v0, s5
; VI-NEXT: buffer_store_short v0, off, s[0:3], 0 offset:4
; VI-NEXT: buffer_store_dword v1, off, s[0:3], 0
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
%vecins = insertelement <3 x i16> %a, i16 5, i32 %b
store <3 x i16> %vecins, <3 x i16> addrspace(1)* %out, align 8
@ -949,14 +951,15 @@ define amdgpu_kernel void @dynamic_insertelement_v2i8(<2 x i8> addrspace(1)* %ou
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
; SI-NEXT: s_load_dword s6, s[4:5], 0xa
; SI-NEXT: s_load_dword s4, s[4:5], 0x13
; SI-NEXT: v_mov_b32_e32 v0, 0x505
; SI-NEXT: s_mov_b32 s3, 0x100f000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v1, s6
; SI-NEXT: s_lshl_b32 s4, s4, 3
; SI-NEXT: s_lshl_b32 s4, -1, s4
; SI-NEXT: v_bfi_b32 v0, s4, v0, v1
; SI-NEXT: s_andn2_b32 s5, s6, s4
; SI-NEXT: s_and_b32 s4, s4, 0x505
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
@ -989,17 +992,19 @@ define amdgpu_kernel void @dynamic_insertelement_v3i8(<3 x i8> addrspace(1)* %ou
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
; SI-NEXT: s_load_dword s6, s[4:5], 0xa
; SI-NEXT: s_load_dword s4, s[4:5], 0x13
; SI-NEXT: v_mov_b32_e32 v0, 0x5050505
; SI-NEXT: s_mov_b32 s3, 0x100f000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v1, s6
; SI-NEXT: s_lshl_b32 s4, s4, 3
; SI-NEXT: s_lshl_b32 s4, 0xffff, s4
; SI-NEXT: v_bfi_b32 v0, s4, v0, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v0
; SI-NEXT: s_andn2_b32 s5, s6, s4
; SI-NEXT: s_and_b32 s4, s4, 0x5050505
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: s_lshr_b32 s5, s4, 16
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
; SI-NEXT: buffer_store_byte v1, off, s[0:3], 0 offset:2
; SI-NEXT: v_mov_b32_e32 v0, s5
; SI-NEXT: buffer_store_byte v0, off, s[0:3], 0 offset:2
; SI-NEXT: s_endpgm
;
; VI-LABEL: dynamic_insertelement_v3i8:
@ -1007,17 +1012,19 @@ define amdgpu_kernel void @dynamic_insertelement_v3i8(<3 x i8> addrspace(1)* %ou
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
; VI-NEXT: s_load_dword s6, s[4:5], 0x28
; VI-NEXT: s_load_dword s4, s[4:5], 0x4c
; VI-NEXT: v_mov_b32_e32 v0, 0x5050505
; VI-NEXT: s_mov_b32 s3, 0x1100f000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v1, s6
; VI-NEXT: s_lshl_b32 s4, s4, 3
; VI-NEXT: s_lshl_b32 s4, 0xffff, s4
; VI-NEXT: v_bfi_b32 v0, s4, v0, v1
; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v0
; VI-NEXT: s_andn2_b32 s5, s6, s4
; VI-NEXT: s_and_b32 s4, s4, 0x5050505
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: s_lshr_b32 s5, s4, 16
; VI-NEXT: buffer_store_short v0, off, s[0:3], 0
; VI-NEXT: buffer_store_byte v1, off, s[0:3], 0 offset:2
; VI-NEXT: v_mov_b32_e32 v0, s5
; VI-NEXT: buffer_store_byte v0, off, s[0:3], 0 offset:2
; VI-NEXT: s_endpgm
%vecins = insertelement <3 x i8> %a, i8 5, i32 %b
store <3 x i8> %vecins, <3 x i8> addrspace(1)* %out, align 4
@ -1030,14 +1037,15 @@ define amdgpu_kernel void @dynamic_insertelement_v4i8(<4 x i8> addrspace(1)* %ou
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
; SI-NEXT: s_load_dword s6, s[4:5], 0xa
; SI-NEXT: s_load_dword s4, s[4:5], 0x13
; SI-NEXT: v_mov_b32_e32 v0, 0x5050505
; SI-NEXT: s_mov_b32 s3, 0x100f000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v1, s6
; SI-NEXT: s_lshl_b32 s4, s4, 3
; SI-NEXT: s_lshl_b32 s4, 0xffff, s4
; SI-NEXT: v_bfi_b32 v0, s4, v0, v1
; SI-NEXT: s_andn2_b32 s5, s6, s4
; SI-NEXT: s_and_b32 s4, s4, 0x5050505
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
@ -1046,14 +1054,15 @@ define amdgpu_kernel void @dynamic_insertelement_v4i8(<4 x i8> addrspace(1)* %ou
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
; VI-NEXT: s_load_dword s6, s[4:5], 0x28
; VI-NEXT: s_load_dword s4, s[4:5], 0x4c
; VI-NEXT: v_mov_b32_e32 v0, 0x5050505
; VI-NEXT: s_mov_b32 s3, 0x1100f000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v1, s6
; VI-NEXT: s_lshl_b32 s4, s4, 3
; VI-NEXT: s_lshl_b32 s4, 0xffff, s4
; VI-NEXT: v_bfi_b32 v0, s4, v0, v1
; VI-NEXT: s_andn2_b32 s5, s6, s4
; VI-NEXT: s_and_b32 s4, s4, 0x5050505
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
%vecins = insertelement <4 x i8> %a, i8 5, i32 %b

View File

@ -1050,17 +1050,18 @@ define amdgpu_kernel void @s_insertelement_v2i16_dynamic(<2 x i16> addrspace(1)*
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x10
; GFX9-NEXT: v_mov_b32_e32 v2, 0x3e703e7
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v0, s0
; GFX9-NEXT: s_load_dword s4, s[4:5], 0x0
; GFX9-NEXT: s_load_dword s0, s[4:5], 0x0
; GFX9-NEXT: s_load_dword s2, s[2:3], 0x0
; GFX9-NEXT: v_mov_b32_e32 v1, s1
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b32 s0, s4, 4
; GFX9-NEXT: s_lshl_b32 s0, s0, 4
; GFX9-NEXT: s_lshl_b32 s0, 0xffff, s0
; GFX9-NEXT: v_mov_b32_e32 v3, s2
; GFX9-NEXT: v_bfi_b32 v2, s0, v2, v3
; GFX9-NEXT: s_andn2_b32 s1, s2, s0
; GFX9-NEXT: s_and_b32 s0, s0, 0x3e703e7
; GFX9-NEXT: s_or_b32 s0, s0, s1
; GFX9-NEXT: v_mov_b32_e32 v2, s0
; GFX9-NEXT: global_store_dword v[0:1], v2, off
; GFX9-NEXT: s_endpgm
;
@ -1068,17 +1069,18 @@ define amdgpu_kernel void @s_insertelement_v2i16_dynamic(<2 x i16> addrspace(1)*
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
; VI-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x10
; VI-NEXT: v_mov_b32_e32 v2, 0x3e703e7
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: s_load_dword s4, s[4:5], 0x0
; VI-NEXT: s_load_dword s0, s[4:5], 0x0
; VI-NEXT: s_load_dword s2, s[2:3], 0x0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b32 s0, s4, 4
; VI-NEXT: s_lshl_b32 s0, s0, 4
; VI-NEXT: s_lshl_b32 s0, 0xffff, s0
; VI-NEXT: v_mov_b32_e32 v3, s2
; VI-NEXT: v_bfi_b32 v2, s0, v2, v3
; VI-NEXT: s_andn2_b32 s1, s2, s0
; VI-NEXT: s_and_b32 s0, s0, 0x3e703e7
; VI-NEXT: s_or_b32 s0, s0, s1
; VI-NEXT: v_mov_b32_e32 v2, s0
; VI-NEXT: flat_store_dword v[0:1], v2
; VI-NEXT: s_endpgm
;
@ -1086,17 +1088,18 @@ define amdgpu_kernel void @s_insertelement_v2i16_dynamic(<2 x i16> addrspace(1)*
; CI: ; %bb.0:
; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
; CI-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x4
; CI-NEXT: v_mov_b32_e32 v2, 0x3e703e7
; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: v_mov_b32_e32 v0, s0
; CI-NEXT: s_load_dword s4, s[4:5], 0x0
; CI-NEXT: s_load_dword s0, s[4:5], 0x0
; CI-NEXT: s_load_dword s2, s[2:3], 0x0
; CI-NEXT: v_mov_b32_e32 v1, s1
; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: s_lshl_b32 s0, s4, 4
; CI-NEXT: s_lshl_b32 s0, s0, 4
; CI-NEXT: s_lshl_b32 s0, 0xffff, s0
; CI-NEXT: v_mov_b32_e32 v3, s2
; CI-NEXT: v_bfi_b32 v2, s0, v2, v3
; CI-NEXT: s_andn2_b32 s1, s2, s0
; CI-NEXT: s_and_b32 s0, s0, 0x3e703e7
; CI-NEXT: s_or_b32 s0, s0, s1
; CI-NEXT: v_mov_b32_e32 v2, s0
; CI-NEXT: flat_store_dword v[0:1], v2
; CI-NEXT: s_endpgm
%idx = load volatile i32, i32 addrspace(4)* %idx.ptr