1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-19 19:12:56 +02:00
llvm-mirror/test/CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll
Nirav Dave afe2eccae3 In visitSTORE, always use FindBetterChain, rather than only when UseAA is enabled.
Retrying after fixing after removing load-store factoring through
token factors in favor of improved token factor operand pruning

Simplify Consecutive Merge Store Candidate Search

Now that address aliasing is much less conservative, push through
simplified store merging search which only checks for parallel stores
through the chain subgraph. This is cleaner as the separation of
non-interfering loads/stores from the store-merging logic.

Whem merging stores, search up the chain through a single load, and
finds all possible stores by looking down from through a load and a
TokenFactor to all stores visited. This improves the quality of the
output SelectionDAG and generally the output CodeGen (with some
exceptions).

Additional Minor Changes:

   1. Finishes removing unused AliasLoad code
   2. Unifies the the chain aggregation in the merged stores across
      code paths
   3. Re-add the Store node to the worklist after calling
      SimplifyDemandedBits.
   4. Increase GatherAllAliasesMaxDepth from 6 to 18. That number is
      arbitrary, but seemed sufficient to not cause regressions in
      tests.

This finishes the change Matt Arsenault started in r246307 and
jyknight's original patch.

Many tests required some changes as memory operations are now
reorderable. Some tests relying on the order were changed to use
volatile memory operations

Noteworthy tests:

    CodeGen/AArch64/argument-blocks.ll -
      It's not entirely clear what the test_varargs_stackalign test is
      supposed to be asserting, but the new code looks right.

    CodeGen/AArch64/arm64-memset-inline.lli -
    CodeGen/AArch64/arm64-stur.ll -
    CodeGen/ARM/memset-inline.ll -

      The backend now generates *worse* code due to store merging
      succeeding, as we do do a 16-byte constant-zero store efficiently.

    CodeGen/AArch64/merge-store.ll -
      Improved, but there still seems to be an extraneous vector insert
      from an element to itself?

    CodeGen/PowerPC/ppc64-align-long-double.ll -
      Worse code emitted in this case, due to the improved store->load
      forwarding.

    CodeGen/X86/dag-merge-fast-accesses.ll -
    CodeGen/X86/MergeConsecutiveStores.ll -
    CodeGen/X86/stores-merging.ll -
    CodeGen/Mips/load-store-left-right.ll -
      Restored correct merging of non-aligned stores

    CodeGen/AMDGPU/promote-alloca-stored-pointer-value.ll -
      Improved. Correctly merges buffer_store_dword calls

    CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll -
      Improved. Sidesteps loading a stored value and
      merges two stores

    CodeGen/X86/pr18023.ll -
      This test has been removed, as it was asserting incorrect
      behavior. Non-volatile stores *CAN* be moved past volatile loads,
      and now are.

    CodeGen/X86/vector-idiv.ll -
    CodeGen/X86/vector-lzcnt-128.ll -
      It's basically impossible to tell what these tests are actually
      testing. But, looks like the code got better due to the memory
      operations being recognized as non-aliasing.

    CodeGen/X86/win32-eh.ll -
      Both loads of the securitycookie are now merged.

Reviewers: arsenm, hfinkel, tstellarAMD, jyknight, nhaehnle

Subscribers: wdng, nhaehnle, nemanjai, arsenm, weimingz, niravd, RKSimon, aemerson, qcolombet, dsanders, resistor, tstellarAMD, t.p.northover, spatel

Differential Revision: https://reviews.llvm.org/D14834

llvm-svn: 289659
2016-12-14 15:44:26 +00:00

276 lines
12 KiB
LLVM

; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs -enable-misched -enable-aa-sched-mi < %s | FileCheck -check-prefix=FUNC -check-prefix=CI %s
declare void @llvm.SI.tbuffer.store.i32(<16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)
declare void @llvm.SI.tbuffer.store.v4i32(<16 x i8>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)
declare void @llvm.amdgcn.s.barrier() #1
declare i32 @llvm.amdgcn.workitem.id.x() #2
@stored_lds_ptr = addrspace(3) global i32 addrspace(3)* undef, align 4
@stored_constant_ptr = addrspace(3) global i32 addrspace(2)* undef, align 8
@stored_global_ptr = addrspace(3) global i32 addrspace(1)* undef, align 8
; FUNC-LABEL: @reorder_local_load_global_store_local_load
; CI: ds_read2_b32 {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}} offset0:1 offset1:3
; CI: buffer_store_dword
define void @reorder_local_load_global_store_local_load(i32 addrspace(1)* %out, i32 addrspace(1)* %gptr) #0 {
%ptr0 = load i32 addrspace(3)*, i32 addrspace(3)* addrspace(3)* @stored_lds_ptr, align 4
%ptr1 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 1
%ptr2 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 3
%tmp1 = load i32, i32 addrspace(3)* %ptr1, align 4
store i32 99, i32 addrspace(1)* %gptr, align 4
%tmp2 = load i32, i32 addrspace(3)* %ptr2, align 4
%add = add nsw i32 %tmp1, %tmp2
store i32 %add, i32 addrspace(1)* %out, align 4
ret void
}
; FUNC-LABEL: @no_reorder_local_load_volatile_global_store_local_load
; CI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:4
; CI: buffer_store_dword
; CI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:12
define void @no_reorder_local_load_volatile_global_store_local_load(i32 addrspace(1)* %out, i32 addrspace(1)* %gptr) #0 {
%ptr0 = load i32 addrspace(3)*, i32 addrspace(3)* addrspace(3)* @stored_lds_ptr, align 4
%ptr1 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 1
%ptr2 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 3
%tmp1 = load i32, i32 addrspace(3)* %ptr1, align 4
store volatile i32 99, i32 addrspace(1)* %gptr, align 4
%tmp2 = load i32, i32 addrspace(3)* %ptr2, align 4
%add = add nsw i32 %tmp1, %tmp2
store i32 %add, i32 addrspace(1)* %out, align 4
ret void
}
; FUNC-LABEL: @no_reorder_barrier_local_load_global_store_local_load
; CI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:4
; CI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:12
; CI: buffer_store_dword
define void @no_reorder_barrier_local_load_global_store_local_load(i32 addrspace(1)* %out, i32 addrspace(1)* %gptr) #0 {
%ptr0 = load i32 addrspace(3)*, i32 addrspace(3)* addrspace(3)* @stored_lds_ptr, align 4
%ptr1 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 1
%ptr2 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 3
%tmp1 = load i32, i32 addrspace(3)* %ptr1, align 4
store i32 99, i32 addrspace(1)* %gptr, align 4
call void @llvm.amdgcn.s.barrier() #1
%tmp2 = load i32, i32 addrspace(3)* %ptr2, align 4
%add = add nsw i32 %tmp1, %tmp2
store i32 %add, i32 addrspace(1)* %out, align 4
ret void
}
; FUNC-LABEL: @reorder_constant_load_global_store_constant_load
; CI-DAG: v_readfirstlane_b32 s[[PTR_LO:[0-9]+]], v{{[0-9]+}}
; CI: v_readfirstlane_b32 s[[PTR_HI:[0-9]+]], v{{[0-9]+}}
; CI: buffer_store_dword
; CI-DAG: s_load_dword s{{[0-9]+}}, s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0x1
; CI-DAG: s_load_dword s{{[0-9]+}}, s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0x3
; CI: buffer_store_dword
define void @reorder_constant_load_global_store_constant_load(i32 addrspace(1)* %out, i32 addrspace(1)* %gptr) #0 {
%ptr0 = load i32 addrspace(2)*, i32 addrspace(2)* addrspace(3)* @stored_constant_ptr, align 8
%ptr1 = getelementptr inbounds i32, i32 addrspace(2)* %ptr0, i64 1
%ptr2 = getelementptr inbounds i32, i32 addrspace(2)* %ptr0, i64 3
%tmp1 = load i32, i32 addrspace(2)* %ptr1, align 4
store i32 99, i32 addrspace(1)* %gptr, align 4
%tmp2 = load i32, i32 addrspace(2)* %ptr2, align 4
%add = add nsw i32 %tmp1, %tmp2
store i32 %add, i32 addrspace(1)* %out, align 4
ret void
}
; FUNC-LABEL: @reorder_constant_load_local_store_constant_load
; CI: v_readfirstlane_b32 s[[PTR_LO:[0-9]+]], v{{[0-9]+}}
; CI: v_readfirstlane_b32 s[[PTR_HI:[0-9]+]], v{{[0-9]+}}
; CI-DAG: s_load_dword s{{[0-9]+}}, s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0x1
; CI-DAG: s_load_dword s{{[0-9]+}}, s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0x3
; CI: ds_write_b32
; CI: buffer_store_dword
define void @reorder_constant_load_local_store_constant_load(i32 addrspace(1)* %out, i32 addrspace(3)* %lptr) #0 {
%ptr0 = load i32 addrspace(2)*, i32 addrspace(2)* addrspace(3)* @stored_constant_ptr, align 8
%ptr1 = getelementptr inbounds i32, i32 addrspace(2)* %ptr0, i64 1
%ptr2 = getelementptr inbounds i32, i32 addrspace(2)* %ptr0, i64 3
%tmp1 = load i32, i32 addrspace(2)* %ptr1, align 4
store i32 99, i32 addrspace(3)* %lptr, align 4
%tmp2 = load i32, i32 addrspace(2)* %ptr2, align 4
%add = add nsw i32 %tmp1, %tmp2
store i32 %add, i32 addrspace(1)* %out, align 4
ret void
}
; FUNC-LABEL: @reorder_smrd_load_local_store_smrd_load
; CI: s_load_dword
; CI: s_load_dword
; CI: s_load_dword
; CI: ds_write_b32
; CI: buffer_store_dword
define void @reorder_smrd_load_local_store_smrd_load(i32 addrspace(1)* %out, i32 addrspace(3)* noalias %lptr, i32 addrspace(2)* %ptr0) #0 {
%ptr1 = getelementptr inbounds i32, i32 addrspace(2)* %ptr0, i64 1
%ptr2 = getelementptr inbounds i32, i32 addrspace(2)* %ptr0, i64 2
%tmp1 = load i32, i32 addrspace(2)* %ptr1, align 4
store i32 99, i32 addrspace(3)* %lptr, align 4
%tmp2 = load i32, i32 addrspace(2)* %ptr2, align 4
%add = add nsw i32 %tmp1, %tmp2
store i32 %add, i32 addrspace(1)* %out, align 4
ret void
}
; FUNC-LABEL: @reorder_global_load_local_store_global_load
; CI: ds_write_b32
; CI: buffer_load_dword
; CI: buffer_load_dword
; CI: buffer_store_dword
define void @reorder_global_load_local_store_global_load(i32 addrspace(1)* %out, i32 addrspace(3)* %lptr, i32 addrspace(1)* %ptr0) #0 {
%ptr1 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i64 1
%ptr2 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i64 3
%tmp1 = load i32, i32 addrspace(1)* %ptr1, align 4
store i32 99, i32 addrspace(3)* %lptr, align 4
%tmp2 = load i32, i32 addrspace(1)* %ptr2, align 4
%add = add nsw i32 %tmp1, %tmp2
store i32 %add, i32 addrspace(1)* %out, align 4
ret void
}
; FUNC-LABEL: @reorder_local_offsets
; CI: ds_read2_b32 {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}} offset0:100 offset1:102
; CI-DAG: ds_write2_b32 {{v[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset0:3 offset1:100
; CI-DAG: ds_write_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:408
; CI: buffer_store_dword
; CI: s_endpgm
define void @reorder_local_offsets(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* noalias nocapture readnone %gptr, i32 addrspace(3)* noalias nocapture %ptr0) #0 {
%ptr1 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 3
%ptr2 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 100
%ptr3 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 102
store i32 123, i32 addrspace(3)* %ptr1, align 4
%tmp1 = load i32, i32 addrspace(3)* %ptr2, align 4
%tmp2 = load i32, i32 addrspace(3)* %ptr3, align 4
store i32 123, i32 addrspace(3)* %ptr2, align 4
%tmp3 = load i32, i32 addrspace(3)* %ptr1, align 4
store i32 789, i32 addrspace(3)* %ptr3, align 4
%add.0 = add nsw i32 %tmp2, %tmp1
%add.1 = add nsw i32 %add.0, %tmp3
store i32 %add.1, i32 addrspace(1)* %out, align 4
ret void
}
; FUNC-LABEL: @reorder_global_offsets
; CI-DAG: buffer_load_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:400
; CI-DAG: buffer_load_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:408
; CI-DAG: buffer_store_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:12
; CI-DAG: buffer_store_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:400
; CI-DAG: buffer_store_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:408
; CI: buffer_store_dword
; CI: s_endpgm
define void @reorder_global_offsets(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* noalias nocapture readnone %gptr, i32 addrspace(1)* noalias nocapture %ptr0) #0 {
%ptr1 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 3
%ptr2 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 100
%ptr3 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 102
store i32 123, i32 addrspace(1)* %ptr1, align 4
%tmp1 = load i32, i32 addrspace(1)* %ptr2, align 4
%tmp2 = load i32, i32 addrspace(1)* %ptr3, align 4
store i32 123, i32 addrspace(1)* %ptr2, align 4
%tmp3 = load i32, i32 addrspace(1)* %ptr1, align 4
store i32 789, i32 addrspace(1)* %ptr3, align 4
%add.0 = add nsw i32 %tmp2, %tmp1
%add.1 = add nsw i32 %add.0, %tmp3
store i32 %add.1, i32 addrspace(1)* %out, align 4
ret void
}
; FUNC-LABEL: {{^}}reorder_global_offsets_addr64_soffset0:
; GCN: buffer_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}} 0 addr64 offset:12{{$}}
; GCN-NEXT: buffer_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}} 0 addr64 offset:28{{$}}
; GCN-NEXT: buffer_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}} 0 addr64 offset:44{{$}}
; GCN: v_mov_b32
; GCN: v_mov_b32
; GCN: buffer_store_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}} 0 addr64{{$}}
; GCN-NEXT: buffer_store_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}} 0 addr64 offset:20{{$}}
; GCN: v_add_i32
; GCN: v_add_i32
; GCN: buffer_store_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}} 0 addr64 offset:36{{$}}
; GCN-NEXT: buffer_store_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}} 0 addr64 offset:52{{$}}
define void @reorder_global_offsets_addr64_soffset0(i32 addrspace(1)* noalias nocapture %ptr.base) #0 {
%id = call i32 @llvm.amdgcn.workitem.id.x()
%id.ext = sext i32 %id to i64
%ptr0 = getelementptr inbounds i32, i32 addrspace(1)* %ptr.base, i64 %id.ext
%ptr1 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 3
%ptr2 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 5
%ptr3 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 7
%ptr4 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 9
%ptr5 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 11
%ptr6 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 13
store i32 789, i32 addrspace(1)* %ptr0, align 4
%tmp1 = load i32, i32 addrspace(1)* %ptr1, align 4
store i32 123, i32 addrspace(1)* %ptr2, align 4
%tmp2 = load i32, i32 addrspace(1)* %ptr3, align 4
%add.0 = add nsw i32 %tmp1, %tmp2
store i32 %add.0, i32 addrspace(1)* %ptr4, align 4
%tmp3 = load i32, i32 addrspace(1)* %ptr5, align 4
%add.1 = add nsw i32 %add.0, %tmp3
store i32 %add.1, i32 addrspace(1)* %ptr6, align 4
ret void
}
; XFUNC-LABEL: @reorder_local_load_tbuffer_store_local_load
; XCI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}}, 0x4
; XCI: TBUFFER_STORE_FORMAT
; XCI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}}, 0x8
; define amdgpu_vs void @reorder_local_load_tbuffer_store_local_load(i32 addrspace(1)* %out, i32 %a1, i32 %vaddr) #0 {
; %ptr0 = load i32 addrspace(3)*, i32 addrspace(3)* addrspace(3)* @stored_lds_ptr, align 4
; %ptr1 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 1
; %ptr2 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 2
; %tmp1 = load i32, i32 addrspace(3)* %ptr1, align 4
; %vdata = insertelement <4 x i32> undef, i32 %a1, i32 0
; call void @llvm.SI.tbuffer.store.v4i32(<16 x i8> undef, <4 x i32> %vdata,
; i32 4, i32 %vaddr, i32 0, i32 32, i32 14, i32 4, i32 1, i32 0, i32 1,
; i32 1, i32 0)
; %tmp2 = load i32, i32 addrspace(3)* %ptr2, align 4
; %add = add nsw i32 %tmp1, %tmp2
; store i32 %add, i32 addrspace(1)* %out, align 4
; ret void
; }
attributes #0 = { nounwind }
attributes #1 = { nounwind convergent }
attributes #2 = { nounwind readnone }