1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-25 20:23:11 +01:00
llvm-mirror/test/CodeGen/AMDGPU/bug-sdag-scheduler-cycle.ll
Nicolai Hähnle 7d05776a25 SelectionDAG: Fix bug in ClusterNeighboringLoads
Summary:
The method attempts to find loads that can be legally clustered by
looking for loads consuming the same chain glue token.

However, the old code looks at _all_ users of values produced by the
chain node -- including uses of the loaded/returned value of volatile
loads or atomics. This could lead to circular dependencies which then
failed during scheduling.

With this change, we filter out users by getResNo, i.e. by which
SDValue value they use, to ensure that we only look at users of the
chain glue token.

This appears to be a rather old bug, which is perhaps surprising.
However, the test case is actually quite fragile (i.e., it is hidden
by fairly small changes), and the test _must_ use volatile loads for
the bug to manifest.

Reviewers: arsenm, bogner, craig.topper, foad

Subscribers: MatzeB, jvesely, wdng, hiraditya, javed.absar, jfb, kerbowa, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D74253
2020-02-12 09:12:55 +01:00

28 lines
1.0 KiB
LLVM

; RUN: llc < %s -mtriple=amdgcn--amdpal -mcpu=gfx1010 -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
; This used to cause a circular chain dependency during
; SelectionDAG instruction scheduling.
; CHECK-LABEL: {{^}}_amdgpu_gs_main:
; CHECK: ds_read_b32
; CHECK: ds_read_b32
; CHECK: ds_read_b32
; CHECK: ds_read_b32
define amdgpu_gs float @_amdgpu_gs_main(i8 addrspace(3)* %arg0, i8 addrspace(3)* %arg1, i8 addrspace(3)* %arg2) #0 {
%tmp0 = bitcast i8 addrspace(3)* %arg0 to i32 addrspace(3)* addrspace(3)*
%tmp = load volatile i32 addrspace(3)*, i32 addrspace(3)* addrspace(3)* %tmp0, align 4
%tmp3 = load volatile i32, i32 addrspace(3)* %tmp, align 4
%tmp4a = bitcast i8 addrspace(3)* %arg1 to i32 addrspace(3)*
%tmp4 = load volatile i32, i32 addrspace(3)* %tmp4a, align 4
%tmp7a = getelementptr i32, i32 addrspace(3)* %tmp, i32 8
%tmp8 = load volatile i32, i32 addrspace(3)* %tmp7a, align 4
%tmp9 = add i32 %tmp3, %tmp8
%tmp10 = add i32 %tmp9, %tmp4
%tmp14 = bitcast i32 %tmp10 to float
ret float %tmp14
}