mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 18:54:02 +01:00
cc12b285b6
This will currently accept the old number of bytes syntax, and convert it to a scalar. This should be removed in the near future (I think I converted all of the tests already, but likely missed a few). Not sure what the exact syntax and policy should be. We can continue printing the number of bytes for non-generic instructions to avoid test churn and only allow non-scalar types for generic instructions. This will currently print the LLT in parentheses, but accept parsing the existing integers and implicitly converting to scalar. The parentheses are a bit ugly, but the parser logic seems unable to deal without either parentheses or some keyword to indicate the start of a type.
221 lines
11 KiB
YAML
221 lines
11 KiB
YAML
# RUN: llc -march=amdgcn -verify-machineinstrs -run-pass si-load-store-opt -o - %s | FileCheck %s
|
|
|
|
# Check that SILoadStoreOptimizer honors memory dependencies between moved
|
|
# instructions.
|
|
#
|
|
# The following IR snippet would usually be optimized by the peephole optimizer.
|
|
# However, an equivalent situation can occur with buffer instructions as well.
|
|
|
|
# CHECK-LABEL: name: mem_dependency
|
|
# CHECK: DS_READ2_B32 %0, 0, 1,
|
|
# CHECK: DS_WRITE_B32 %0, killed %1, 64,
|
|
# CHECK: DS_READ2_B32 %0, 16, 17,
|
|
# CHECK: DS_WRITE_B32 killed %0, %5, 0
|
|
|
|
--- |
|
|
define amdgpu_kernel void @mem_dependency(i32 addrspace(3)* %ptr.0) nounwind {
|
|
%ptr.4 = getelementptr i32, i32 addrspace(3)* %ptr.0, i32 1
|
|
%ptr.64 = getelementptr i32, i32 addrspace(3)* %ptr.0, i32 16
|
|
%1 = load i32, i32 addrspace(3)* %ptr.0
|
|
store i32 %1, i32 addrspace(3)* %ptr.64
|
|
%2 = load i32, i32 addrspace(3)* %ptr.64
|
|
%3 = load i32, i32 addrspace(3)* %ptr.4
|
|
%4 = add i32 %2, %3
|
|
store i32 %4, i32 addrspace(3)* %ptr.0
|
|
ret void
|
|
}
|
|
|
|
@lds0 = external dso_local unnamed_addr addrspace(3) global [256 x i32], align 4
|
|
@lds1 = external dso_local unnamed_addr addrspace(3) global [256 x i32], align 4
|
|
@lds2 = external dso_local unnamed_addr addrspace(3) global [256 x i32], align 4
|
|
@lds3 = external dso_local unnamed_addr addrspace(3) global [256 x i32], align 4
|
|
|
|
define void @asm_defines_address() #0 {
|
|
bb:
|
|
%tmp1 = load i32, i32 addrspace(3)* getelementptr inbounds ([256 x i32], [256 x i32] addrspace(3)* @lds0, i32 0, i32 0), align 4
|
|
%0 = and i32 %tmp1, 255
|
|
%tmp3 = load i32, i32 addrspace(3)* getelementptr ([256 x i32], [256 x i32] addrspace(3)* @lds1, i32 0, i32 undef), align 4
|
|
%tmp6 = load i32, i32 addrspace(3)* getelementptr ([256 x i32], [256 x i32] addrspace(3)* @lds3, i32 0, i32 undef), align 4
|
|
%tmp7 = tail call i32 asm "v_or_b32 $0, 0, $1", "=v,v"(i32 %tmp6) #1
|
|
%tmp10 = lshr i32 %tmp7, 16
|
|
%tmp11 = and i32 %tmp10, 255
|
|
%tmp12 = getelementptr inbounds [256 x i32], [256 x i32] addrspace(3)* @lds1, i32 0, i32 %tmp11
|
|
%tmp13 = load i32, i32 addrspace(3)* %tmp12, align 4
|
|
%tmp14 = xor i32 %tmp3, %tmp13
|
|
%tmp15 = lshr i32 %tmp14, 8
|
|
%tmp16 = and i32 %tmp15, 16711680
|
|
%tmp19 = lshr i32 %tmp16, 16
|
|
%tmp20 = and i32 %tmp19, 255
|
|
%tmp21 = getelementptr inbounds [256 x i32], [256 x i32] addrspace(3)* @lds1, i32 0, i32 %tmp20
|
|
%tmp22 = load i32, i32 addrspace(3)* %tmp21, align 4
|
|
%tmp24 = load i32, i32 addrspace(3)* getelementptr ([256 x i32], [256 x i32] addrspace(3)* @lds2, i32 0, i32 undef), align 4
|
|
%tmp25 = xor i32 %tmp22, %tmp24
|
|
%tmp26 = and i32 %tmp25, -16777216
|
|
%tmp28 = or i32 %0, %tmp26
|
|
store volatile i32 %tmp28, i32 addrspace(1)* undef
|
|
ret void
|
|
}
|
|
|
|
attributes #0 = { convergent nounwind }
|
|
attributes #1 = { convergent nounwind readnone }
|
|
|
|
define amdgpu_kernel void @move_waw_hazards() #0 {
|
|
ret void
|
|
}
|
|
|
|
attributes #0 = { convergent nounwind }
|
|
|
|
define amdgpu_kernel void @merge_mmos(i32 addrspace(1)* %ptr_addr1) { ret void }
|
|
define amdgpu_kernel void @reorder_offsets(i32 addrspace(1)* %reorder_addr1) { ret void }
|
|
|
|
...
|
|
---
|
|
name: mem_dependency
|
|
alignment: 1
|
|
exposesReturnsTwice: false
|
|
legalized: false
|
|
regBankSelected: false
|
|
selected: false
|
|
tracksRegLiveness: true
|
|
liveins:
|
|
- { reg: '$vgpr0', virtual-reg: '%1' }
|
|
frameInfo:
|
|
isFrameAddressTaken: false
|
|
isReturnAddressTaken: false
|
|
hasStackMap: false
|
|
hasPatchPoint: false
|
|
stackSize: 0
|
|
offsetAdjustment: 0
|
|
maxAlignment: 0
|
|
adjustsStack: false
|
|
hasCalls: false
|
|
maxCallFrameSize: 0
|
|
hasOpaqueSPAdjustment: false
|
|
hasVAStart: false
|
|
hasMustTailInVarArgFunc: false
|
|
body: |
|
|
bb.0:
|
|
liveins: $vgpr0
|
|
|
|
%1:vgpr_32 = COPY $vgpr0
|
|
$m0 = S_MOV_B32 -1
|
|
%2:vgpr_32 = DS_READ_B32 %1, 0, 0, implicit $m0, implicit $exec :: (load (s32) from %ir.ptr.0)
|
|
DS_WRITE_B32 %1, killed %2, 64, 0, implicit $m0, implicit $exec :: (store (s32) into %ir.ptr.64)
|
|
|
|
; Make this load unmergeable, to tempt SILoadStoreOptimizer into merging the
|
|
; other two loads.
|
|
%6:vreg_64 = DS_READ2_B32 %1, 16, 17, 0, implicit $m0, implicit $exec :: (load (s64) from %ir.ptr.64, align 4)
|
|
%3:vgpr_32 = COPY %6.sub0
|
|
%4:vgpr_32 = DS_READ_B32 %1, 4, 0, implicit $m0, implicit $exec :: (load (s32) from %ir.ptr.4)
|
|
%5:vgpr_32 = V_ADD_CO_U32_e32 killed %3, killed %4, implicit-def $vcc, implicit $exec
|
|
DS_WRITE_B32 killed %1, %5, 0, 0, implicit killed $m0, implicit $exec :: (store (s32) into %ir.ptr.0)
|
|
S_ENDPGM 0
|
|
|
|
...
|
|
---
|
|
# Make sure the asm def isn't moved after the point where it's used for
|
|
# the address.
|
|
# CHECK-LABEL: name: asm_defines_address
|
|
# CHECK: DS_READ2ST64_B32
|
|
# CHECK: DS_READ2ST64_B32
|
|
# CHECK: INLINEASM
|
|
# CHECK: DS_READ_B32
|
|
# CHECK: DS_READ_B32
|
|
name: asm_defines_address
|
|
tracksRegLiveness: true
|
|
registers:
|
|
- { id: 0, class: vgpr_32, preferred-register: '' }
|
|
body: |
|
|
bb.0:
|
|
%1:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
|
|
%2:vgpr_32 = DS_READ_B32 %1, 3072, 0, implicit $m0, implicit $exec :: (dereferenceable load (s32) from `i32 addrspace(3)* getelementptr inbounds ([256 x i32], [256 x i32] addrspace(3)* @lds0, i32 0, i32 0)`, addrspace 3)
|
|
%3:vgpr_32 = DS_READ_B32 %1, 2048, 0, implicit $m0, implicit $exec :: (load (s32) from `i32 addrspace(3)* getelementptr ([256 x i32], [256 x i32] addrspace(3)* @lds1, i32 0, i32 undef)`, addrspace 3)
|
|
%4:vgpr_32 = DS_READ_B32 %1, 1024, 0, implicit $m0, implicit $exec :: (load (s32) from `i32 addrspace(3)* getelementptr ([256 x i32], [256 x i32] addrspace(3)* @lds3, i32 0, i32 undef)`, addrspace 3)
|
|
INLINEASM &"v_or_b32 $0, 0, $1", 32, 327690, def %0, 327689, %4
|
|
%5:vgpr_32 = DS_READ_B32 %0, 2048, 0, implicit $m0, implicit $exec :: (load (s32) from %ir.tmp12, addrspace 3)
|
|
%6:vgpr_32 = DS_READ_B32 %5, 2048, 0, implicit $m0, implicit $exec :: (load (s32) from %ir.tmp21, addrspace 3)
|
|
%7:vgpr_32 = DS_READ_B32 %1, 0, 0, implicit $m0, implicit $exec :: (load (s32) from `i32 addrspace(3)* getelementptr ([256 x i32], [256 x i32] addrspace(3)* @lds2, i32 0, i32 undef)`, addrspace 3)
|
|
S_SETPC_B64_return undef $sgpr30_sgpr31, implicit %6, implicit %7
|
|
|
|
...
|
|
---
|
|
# Make sure Write-after-Write hazards are correctly detected and the
|
|
# instructions moved accordingly.
|
|
# operations.
|
|
# CHECK-LABEL: name: move_waw_hazards
|
|
# CHECK: S_AND_B64
|
|
# CHECK: S_CMP_EQ_U32
|
|
name: move_waw_hazards
|
|
tracksRegLiveness: true
|
|
body: |
|
|
bb.0:
|
|
liveins: $sgpr0_sgpr1
|
|
|
|
%3:sgpr_64 = COPY $sgpr0_sgpr1
|
|
%6:sreg_32_xm0_xexec = S_MOV_B32 0
|
|
%7:sreg_32_xm0 = S_MOV_B32 0
|
|
%8:sreg_64_xexec = REG_SEQUENCE killed %6, %subreg.sub0, %7, %subreg.sub1
|
|
%9:sgpr_128 = S_LOAD_DWORDX4_IMM killed %8, 0, 0 :: (invariant load (s128), addrspace 6)
|
|
%31:sreg_64_xexec = S_BUFFER_LOAD_DWORDX2_IMM %9, 0, 0 :: (dereferenceable invariant load (s32))
|
|
%10:sreg_32_xm0_xexec = COPY %31.sub0
|
|
%11:sreg_32_xm0_xexec = COPY killed %31.sub1
|
|
%12:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM %9, 2, 0 :: (dereferenceable invariant load (s32))
|
|
%13:sreg_64 = V_CMP_NE_U32_e64 killed %11, 0, implicit $exec
|
|
%15:sreg_64 = V_CMP_NE_U32_e64 killed %12, 0, implicit $exec
|
|
%17:sreg_64_xexec = S_AND_B64 killed %13, killed %15, implicit-def dead $scc
|
|
S_CMP_EQ_U32 killed %10, 0, implicit-def $scc
|
|
%18:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM %9, 3, 0 :: (dereferenceable invariant load (s32))
|
|
S_ENDPGM 0
|
|
...
|
|
---
|
|
# CHECK-LABEL: merge_mmos
|
|
# CHECK: S_BUFFER_LOAD_DWORDX2_IMM %0, 0, 0 :: (dereferenceable invariant load (s64), align 4)
|
|
# CHECK: BUFFER_LOAD_DWORDX2_OFFSET %0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 4)
|
|
# CHECK: BUFFER_STORE_DWORDX2_OFFSET_exact killed %{{[0-9]+}}, %0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store (s64), align 4)
|
|
# CHECK: BUFFER_LOAD_DWORDX2_OFFSET %0, 0, 64, 0, 0, 0, implicit $exec :: (dereferenceable load (s64) from %ir.ptr_addr1 + 64, align 4
|
|
# CHECK: BUFFER_STORE_DWORDX2_OFFSET_exact killed %{{[0-9]+}}, %0, 0, 64, 0, 0, 0, implicit $exec :: (dereferenceable store (s64) into %ir.ptr_addr1 + 64, align 4
|
|
name: merge_mmos
|
|
tracksRegLiveness: true
|
|
body: |
|
|
bb.0:
|
|
liveins: $sgpr0_sgpr1_sgpr2_sgpr3
|
|
|
|
%0:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
|
|
%1:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM %0, 0, 0 :: (dereferenceable invariant load (s32))
|
|
%2:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM %0, 1, 0 :: (dereferenceable invariant load (s32))
|
|
%3:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32))
|
|
%4:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %0, 0, 4, 0, 0, 0, implicit $exec :: (dereferenceable load (s32))
|
|
BUFFER_STORE_DWORD_OFFSET_exact %3, %0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store (s32))
|
|
BUFFER_STORE_DWORD_OFFSET_exact %4, %0, 0, 4, 0, 0, 0, implicit $exec :: (dereferenceable store (s32))
|
|
%5:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %0, 0, 64, 0, 0, 0, implicit $exec :: (dereferenceable load (s32) from %ir.ptr_addr1 + 64)
|
|
%6:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %0, 0, 68, 0, 0, 0, implicit $exec :: (dereferenceable load (s32) from %ir.ptr_addr1 + 68)
|
|
BUFFER_STORE_DWORD_OFFSET_exact %5, %0, 0, 64, 0, 0, 0, implicit $exec :: (dereferenceable store (s32) into %ir.ptr_addr1 + 64)
|
|
BUFFER_STORE_DWORD_OFFSET_exact %6, %0, 0, 68, 0, 0, 0, implicit $exec :: (dereferenceable store (s32) into %ir.ptr_addr1 + 68)
|
|
|
|
S_ENDPGM 0
|
|
|
|
...
|
|
---
|
|
# CHECK-LABEL: reorder_offsets
|
|
# CHECK-DAG: BUFFER_STORE_DWORDX2_OFFSET_exact killed %{{[0-9]+}}, %0, 0, 16, 0, 0, 0, implicit $exec :: (dereferenceable store (s64) into %ir.reorder_addr1 + 16, align 4, addrspace 1)
|
|
# CHECK-DAG: BUFFER_STORE_DWORDX4_OFFSET_exact killed %{{[0-9]+}}, %0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store (s128) into %ir.reorder_addr1, align 4, addrspace 1)
|
|
|
|
name: reorder_offsets
|
|
tracksRegLiveness: true
|
|
body: |
|
|
bb.0:
|
|
liveins: $sgpr0_sgpr1_sgpr2_sgpr3
|
|
|
|
%0:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
|
|
%1:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
|
|
BUFFER_STORE_DWORD_OFFSET_exact %1, %0, 0, 4, 0, 0, 0, implicit $exec :: (dereferenceable store (s32) into %ir.reorder_addr1 + 4)
|
|
BUFFER_STORE_DWORD_OFFSET_exact %1, %0, 0, 8, 0, 0, 0, implicit $exec :: (dereferenceable store (s32) into %ir.reorder_addr1 + 8)
|
|
BUFFER_STORE_DWORD_OFFSET_exact %1, %0, 0, 12, 0, 0, 0, implicit $exec :: (dereferenceable store (s32) into %ir.reorder_addr1 + 12)
|
|
BUFFER_STORE_DWORD_OFFSET_exact %1, %0, 0, 16, 0, 0, 0, implicit $exec :: (dereferenceable store (s32) into %ir.reorder_addr1 + 16)
|
|
BUFFER_STORE_DWORD_OFFSET_exact %1, %0, 0, 20, 0, 0, 0, implicit $exec :: (dereferenceable store (s32) into %ir.reorder_addr1 + 20)
|
|
BUFFER_STORE_DWORD_OFFSET_exact %1, %0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store (s32) into %ir.reorder_addr1)
|
|
S_ENDPGM 0
|
|
|
|
|
|
...
|