1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 11:13:28 +01:00
llvm-mirror/test/CodeGen/AMDGPU/smrd_vmem_war.ll
cdevadas 4033a61f5d Resubmit: [AMDGPU] Invert the handling of skip insertion.
The current implementation of skip insertion (SIInsertSkip) makes it a
mandatory pass required for correctness. Initially, the idea was to
have an optional pass. This patch inserts the s_cbranch_execz upfront
during SILowerControlFlow to skip over the sections of code when no
lanes are active. Later, SIRemoveShortExecBranches removes the skips
for short branches, unless there is a sideeffect and the skip branch is
really necessary.

This new pass will replace the handling of skip insertion in the
existing SIInsertSkip Pass.

Differential revision: https://reviews.llvm.org/D68092
2020-01-22 13:18:32 +09:00

30 lines
1.1 KiB
LLVM

; RUN: llc -mtriple=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck %s -check-prefix=GCN
; GCN-LABEL: ; %bb.0:
; GCN: s_load_dword s{{[0-9]+}}, s{{\[}}[[ADDR_LO:[0-9]+]]{{\:}}[[ADDR_HI:[0-9]+]]{{\]}}, 0x0
; GCN: s_waitcnt lgkmcnt(0)
; GCN: global_store_dword v{{\[}}[[ADDR_LO]]{{\:}}[[ADDR_HI]]{{\]}}, v{{[0-9]+}}, off
define amdgpu_kernel void @zot(i32 addrspace(1)* nocapture %arg, i64 addrspace(1)* nocapture %arg1) {
bb:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
%tmp2 = icmp eq i32 %tmp, 0
br i1 %tmp2, label %bb3, label %bb8
bb3: ; preds = %bb
%tmp4 = load i32, i32 addrspace(1)* %arg, align 4
store i32 0, i32 addrspace(1)* %arg, align 4
%tmp5 = zext i32 %tmp4 to i64
%tmp6 = load i64, i64 addrspace(1)* %arg1, align 8
%tmp7 = add i64 %tmp6, %tmp5
store i64 %tmp7, i64 addrspace(1)* %arg1, align 8
br label %bb8
bb8: ; preds = %bb3, %bb
ret void
}
; Function Attrs: nounwind readnone speculatable
declare i32 @llvm.amdgcn.workitem.id.x() #0
attributes #0 = { nounwind readnone speculatable }