2017-03-24 20:52:05 +01:00
|
|
|
//===- AMDGPUUnifyDivergentExitNodes.cpp ----------------------------------===//
|
|
|
|
//
|
2019-01-19 09:50:56 +01:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2017-03-24 20:52:05 +01:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This is a variant of the UnifyDivergentExitNodes pass. Rather than ensuring
|
|
|
|
// there is at most one ret and one unreachable instruction, it ensures there is
|
|
|
|
// at most one divergent exiting block.
|
|
|
|
//
|
|
|
|
// StructurizeCFG can't deal with multi-exit regions formed by branches to
|
|
|
|
// multiple return nodes. It is not desirable to structurize regions with
|
|
|
|
// uniform branches, so unifying those to the same return block as divergent
|
|
|
|
// branches inhibits use of scalar branching. It still can't deal with the case
|
|
|
|
// where one branch goes to return, and one unreachable. Replace unreachable in
|
|
|
|
// this case with a return.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "AMDGPU.h"
|
2017-10-17 23:27:42 +02:00
|
|
|
#include "llvm/ADT/ArrayRef.h"
|
|
|
|
#include "llvm/ADT/SmallPtrSet.h"
|
|
|
|
#include "llvm/ADT/SmallVector.h"
|
|
|
|
#include "llvm/ADT/StringRef.h"
|
2018-08-30 16:21:36 +02:00
|
|
|
#include "llvm/Analysis/LegacyDivergenceAnalysis.h"
|
2017-03-24 20:52:05 +01:00
|
|
|
#include "llvm/Analysis/PostDominators.h"
|
|
|
|
#include "llvm/Analysis/TargetTransformInfo.h"
|
|
|
|
#include "llvm/IR/BasicBlock.h"
|
|
|
|
#include "llvm/IR/CFG.h"
|
2017-10-17 23:27:42 +02:00
|
|
|
#include "llvm/IR/Constants.h"
|
2017-03-24 20:52:05 +01:00
|
|
|
#include "llvm/IR/Function.h"
|
2017-10-17 23:27:42 +02:00
|
|
|
#include "llvm/IR/InstrTypes.h"
|
2017-03-24 20:52:05 +01:00
|
|
|
#include "llvm/IR/Instructions.h"
|
2017-10-17 23:27:42 +02:00
|
|
|
#include "llvm/IR/Intrinsics.h"
|
AMDGPU: Fix handling of infinite loops in fragment shaders
Summary:
Due to the fact that kill is just a normal intrinsic, even though it's
supposed to terminate the thread, we can end up with provably infinite
loops that are actually supposed to end successfully. The
AMDGPUUnifyDivergentExitNodes pass breaks up these loops, but because
there's no obvious place to make the loop branch to, it just makes it
return immediately, which skips the exports that are supposed to happen
at the end and hangs the GPU if all the threads end up being killed.
While it would be nice if the fact that kill terminates the thread were
modeled in the IR, I think that the structurizer as-is would make a mess if we
did that when the kill is inside control flow. For now, we just add a null
export at the end to make sure that it always exports something, which fixes
the immediate problem without penalizing the more common case. This means that
we sometimes do two "done" exports when only some of the threads enter the
discard loop, but from tests the hardware seems ok with that.
This fixes dEQP-VK.graphicsfuzz.while-inside-switch with radv.
Reviewers: arsenm, nhaehnle
Subscribers: kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70781
2019-11-27 14:09:13 +01:00
|
|
|
#include "llvm/IR/IRBuilder.h"
|
2017-03-24 20:52:05 +01:00
|
|
|
#include "llvm/IR/Type.h"
|
Sink all InitializePasses.h includes
This file lists every pass in LLVM, and is included by Pass.h, which is
very popular. Every time we add, remove, or rename a pass in LLVM, it
caused lots of recompilation.
I found this fact by looking at this table, which is sorted by the
number of times a file was changed over the last 100,000 git commits
multiplied by the number of object files that depend on it in the
current checkout:
recompiles touches affected_files header
342380 95 3604 llvm/include/llvm/ADT/STLExtras.h
314730 234 1345 llvm/include/llvm/InitializePasses.h
307036 118 2602 llvm/include/llvm/ADT/APInt.h
213049 59 3611 llvm/include/llvm/Support/MathExtras.h
170422 47 3626 llvm/include/llvm/Support/Compiler.h
162225 45 3605 llvm/include/llvm/ADT/Optional.h
158319 63 2513 llvm/include/llvm/ADT/Triple.h
140322 39 3598 llvm/include/llvm/ADT/StringRef.h
137647 59 2333 llvm/include/llvm/Support/Error.h
131619 73 1803 llvm/include/llvm/Support/FileSystem.h
Before this change, touching InitializePasses.h would cause 1345 files
to recompile. After this change, touching it only causes 550 compiles in
an incremental rebuild.
Reviewers: bkramer, asbirlea, bollu, jdoerfert
Differential Revision: https://reviews.llvm.org/D70211
2019-11-13 22:15:01 +01:00
|
|
|
#include "llvm/InitializePasses.h"
|
2017-10-17 23:27:42 +02:00
|
|
|
#include "llvm/Pass.h"
|
|
|
|
#include "llvm/Support/Casting.h"
|
2017-03-24 20:52:05 +01:00
|
|
|
#include "llvm/Transforms/Scalar.h"
|
2018-03-28 19:44:36 +02:00
|
|
|
#include "llvm/Transforms/Utils.h"
|
Sink all InitializePasses.h includes
This file lists every pass in LLVM, and is included by Pass.h, which is
very popular. Every time we add, remove, or rename a pass in LLVM, it
caused lots of recompilation.
I found this fact by looking at this table, which is sorted by the
number of times a file was changed over the last 100,000 git commits
multiplied by the number of object files that depend on it in the
current checkout:
recompiles touches affected_files header
342380 95 3604 llvm/include/llvm/ADT/STLExtras.h
314730 234 1345 llvm/include/llvm/InitializePasses.h
307036 118 2602 llvm/include/llvm/ADT/APInt.h
213049 59 3611 llvm/include/llvm/Support/MathExtras.h
170422 47 3626 llvm/include/llvm/Support/Compiler.h
162225 45 3605 llvm/include/llvm/ADT/Optional.h
158319 63 2513 llvm/include/llvm/ADT/Triple.h
140322 39 3598 llvm/include/llvm/ADT/StringRef.h
137647 59 2333 llvm/include/llvm/Support/Error.h
131619 73 1803 llvm/include/llvm/Support/FileSystem.h
Before this change, touching InitializePasses.h would cause 1345 files
to recompile. After this change, touching it only causes 550 compiles in
an incremental rebuild.
Reviewers: bkramer, asbirlea, bollu, jdoerfert
Differential Revision: https://reviews.llvm.org/D70211
2019-11-13 22:15:01 +01:00
|
|
|
#include "llvm/Transforms/Utils/Local.h"
|
2017-10-17 23:27:42 +02:00
|
|
|
|
2017-03-24 20:52:05 +01:00
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
#define DEBUG_TYPE "amdgpu-unify-divergent-exit-nodes"
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
class AMDGPUUnifyDivergentExitNodes : public FunctionPass {
|
|
|
|
public:
|
|
|
|
static char ID; // Pass identification, replacement for typeid
|
2017-10-17 23:27:42 +02:00
|
|
|
|
2017-03-24 20:52:05 +01:00
|
|
|
AMDGPUUnifyDivergentExitNodes() : FunctionPass(ID) {
|
|
|
|
initializeAMDGPUUnifyDivergentExitNodesPass(*PassRegistry::getPassRegistry());
|
|
|
|
}
|
|
|
|
|
|
|
|
// We can preserve non-critical-edgeness when we unify function exit nodes
|
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override;
|
|
|
|
bool runOnFunction(Function &F) override;
|
|
|
|
};
|
|
|
|
|
2017-10-17 23:27:42 +02:00
|
|
|
} // end anonymous namespace
|
2017-03-24 20:52:05 +01:00
|
|
|
|
|
|
|
char AMDGPUUnifyDivergentExitNodes::ID = 0;
|
2017-10-17 23:27:42 +02:00
|
|
|
|
|
|
|
char &llvm::AMDGPUUnifyDivergentExitNodesID = AMDGPUUnifyDivergentExitNodes::ID;
|
|
|
|
|
2017-03-24 20:52:05 +01:00
|
|
|
INITIALIZE_PASS_BEGIN(AMDGPUUnifyDivergentExitNodes, DEBUG_TYPE,
|
|
|
|
"Unify divergent function exit nodes", false, false)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass)
|
2018-08-30 16:21:36 +02:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(LegacyDivergenceAnalysis)
|
2017-03-24 20:52:05 +01:00
|
|
|
INITIALIZE_PASS_END(AMDGPUUnifyDivergentExitNodes, DEBUG_TYPE,
|
|
|
|
"Unify divergent function exit nodes", false, false)
|
|
|
|
|
|
|
|
void AMDGPUUnifyDivergentExitNodes::getAnalysisUsage(AnalysisUsage &AU) const{
|
|
|
|
// TODO: Preserve dominator tree.
|
|
|
|
AU.addRequired<PostDominatorTreeWrapperPass>();
|
|
|
|
|
2018-08-30 16:21:36 +02:00
|
|
|
AU.addRequired<LegacyDivergenceAnalysis>();
|
2017-03-24 20:52:05 +01:00
|
|
|
|
|
|
|
// No divergent values are changed, only blocks and branch edges.
|
2018-08-30 16:21:36 +02:00
|
|
|
AU.addPreserved<LegacyDivergenceAnalysis>();
|
2017-03-24 20:52:05 +01:00
|
|
|
|
|
|
|
// We preserve the non-critical-edgeness property
|
|
|
|
AU.addPreservedID(BreakCriticalEdgesID);
|
|
|
|
|
|
|
|
// This is a cluster of orthogonal Transforms
|
|
|
|
AU.addPreservedID(LowerSwitchID);
|
|
|
|
FunctionPass::getAnalysisUsage(AU);
|
|
|
|
|
|
|
|
AU.addRequired<TargetTransformInfoWrapperPass>();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \returns true if \p BB is reachable through only uniform branches.
|
|
|
|
/// XXX - Is there a more efficient way to find this?
|
2018-08-30 16:21:36 +02:00
|
|
|
static bool isUniformlyReached(const LegacyDivergenceAnalysis &DA,
|
2017-03-24 20:52:05 +01:00
|
|
|
BasicBlock &BB) {
|
|
|
|
SmallVector<BasicBlock *, 8> Stack;
|
|
|
|
SmallPtrSet<BasicBlock *, 8> Visited;
|
|
|
|
|
|
|
|
for (BasicBlock *Pred : predecessors(&BB))
|
|
|
|
Stack.push_back(Pred);
|
|
|
|
|
|
|
|
while (!Stack.empty()) {
|
|
|
|
BasicBlock *Top = Stack.pop_back_val();
|
|
|
|
if (!DA.isUniform(Top->getTerminator()))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (BasicBlock *Pred : predecessors(Top)) {
|
|
|
|
if (Visited.insert(Pred).second)
|
|
|
|
Stack.push_back(Pred);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
AMDGPU: Fix handling of infinite loops in fragment shaders
Summary:
Due to the fact that kill is just a normal intrinsic, even though it's
supposed to terminate the thread, we can end up with provably infinite
loops that are actually supposed to end successfully. The
AMDGPUUnifyDivergentExitNodes pass breaks up these loops, but because
there's no obvious place to make the loop branch to, it just makes it
return immediately, which skips the exports that are supposed to happen
at the end and hangs the GPU if all the threads end up being killed.
While it would be nice if the fact that kill terminates the thread were
modeled in the IR, I think that the structurizer as-is would make a mess if we
did that when the kill is inside control flow. For now, we just add a null
export at the end to make sure that it always exports something, which fixes
the immediate problem without penalizing the more common case. This means that
we sometimes do two "done" exports when only some of the threads enter the
discard loop, but from tests the hardware seems ok with that.
This fixes dEQP-VK.graphicsfuzz.while-inside-switch with radv.
Reviewers: arsenm, nhaehnle
Subscribers: kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70781
2019-11-27 14:09:13 +01:00
|
|
|
static void removeDoneExport(Function &F) {
|
|
|
|
ConstantInt *BoolFalse = ConstantInt::getFalse(F.getContext());
|
|
|
|
for (BasicBlock &BB : F) {
|
|
|
|
for (Instruction &I : BB) {
|
|
|
|
if (IntrinsicInst *Intrin = llvm::dyn_cast<IntrinsicInst>(&I)) {
|
|
|
|
if (Intrin->getIntrinsicID() == Intrinsic::amdgcn_exp) {
|
|
|
|
Intrin->setArgOperand(6, BoolFalse); // done
|
|
|
|
} else if (Intrin->getIntrinsicID() == Intrinsic::amdgcn_exp_compr) {
|
|
|
|
Intrin->setArgOperand(4, BoolFalse); // done
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-24 20:52:05 +01:00
|
|
|
static BasicBlock *unifyReturnBlockSet(Function &F,
|
|
|
|
ArrayRef<BasicBlock *> ReturningBlocks,
|
AMDGPU: Fix handling of infinite loops in fragment shaders
Summary:
Due to the fact that kill is just a normal intrinsic, even though it's
supposed to terminate the thread, we can end up with provably infinite
loops that are actually supposed to end successfully. The
AMDGPUUnifyDivergentExitNodes pass breaks up these loops, but because
there's no obvious place to make the loop branch to, it just makes it
return immediately, which skips the exports that are supposed to happen
at the end and hangs the GPU if all the threads end up being killed.
While it would be nice if the fact that kill terminates the thread were
modeled in the IR, I think that the structurizer as-is would make a mess if we
did that when the kill is inside control flow. For now, we just add a null
export at the end to make sure that it always exports something, which fixes
the immediate problem without penalizing the more common case. This means that
we sometimes do two "done" exports when only some of the threads enter the
discard loop, but from tests the hardware seems ok with that.
This fixes dEQP-VK.graphicsfuzz.while-inside-switch with radv.
Reviewers: arsenm, nhaehnle
Subscribers: kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70781
2019-11-27 14:09:13 +01:00
|
|
|
bool InsertExport,
|
2017-03-24 20:52:05 +01:00
|
|
|
const TargetTransformInfo &TTI,
|
|
|
|
StringRef Name) {
|
|
|
|
// Otherwise, we need to insert a new basic block into the function, add a PHI
|
|
|
|
// nodes (if the function returns values), and convert all of the return
|
|
|
|
// instructions into unconditional branches.
|
|
|
|
BasicBlock *NewRetBlock = BasicBlock::Create(F.getContext(), Name, &F);
|
AMDGPU: Fix handling of infinite loops in fragment shaders
Summary:
Due to the fact that kill is just a normal intrinsic, even though it's
supposed to terminate the thread, we can end up with provably infinite
loops that are actually supposed to end successfully. The
AMDGPUUnifyDivergentExitNodes pass breaks up these loops, but because
there's no obvious place to make the loop branch to, it just makes it
return immediately, which skips the exports that are supposed to happen
at the end and hangs the GPU if all the threads end up being killed.
While it would be nice if the fact that kill terminates the thread were
modeled in the IR, I think that the structurizer as-is would make a mess if we
did that when the kill is inside control flow. For now, we just add a null
export at the end to make sure that it always exports something, which fixes
the immediate problem without penalizing the more common case. This means that
we sometimes do two "done" exports when only some of the threads enter the
discard loop, but from tests the hardware seems ok with that.
This fixes dEQP-VK.graphicsfuzz.while-inside-switch with radv.
Reviewers: arsenm, nhaehnle
Subscribers: kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70781
2019-11-27 14:09:13 +01:00
|
|
|
IRBuilder<> B(NewRetBlock);
|
|
|
|
|
|
|
|
if (InsertExport) {
|
|
|
|
// Ensure that there's only one "done" export in the shader by removing the
|
|
|
|
// "done" bit set on the original final export. More than one "done" export
|
|
|
|
// can lead to undefined behavior.
|
|
|
|
removeDoneExport(F);
|
|
|
|
|
|
|
|
Value *Undef = UndefValue::get(B.getFloatTy());
|
|
|
|
B.CreateIntrinsic(Intrinsic::amdgcn_exp, { B.getFloatTy() },
|
|
|
|
{
|
|
|
|
B.getInt32(9), // target, SQ_EXP_NULL
|
|
|
|
B.getInt32(0), // enabled channels
|
|
|
|
Undef, Undef, Undef, Undef, // values
|
|
|
|
B.getTrue(), // done
|
|
|
|
B.getTrue(), // valid mask
|
|
|
|
});
|
|
|
|
}
|
2017-03-24 20:52:05 +01:00
|
|
|
|
|
|
|
PHINode *PN = nullptr;
|
|
|
|
if (F.getReturnType()->isVoidTy()) {
|
AMDGPU: Fix handling of infinite loops in fragment shaders
Summary:
Due to the fact that kill is just a normal intrinsic, even though it's
supposed to terminate the thread, we can end up with provably infinite
loops that are actually supposed to end successfully. The
AMDGPUUnifyDivergentExitNodes pass breaks up these loops, but because
there's no obvious place to make the loop branch to, it just makes it
return immediately, which skips the exports that are supposed to happen
at the end and hangs the GPU if all the threads end up being killed.
While it would be nice if the fact that kill terminates the thread were
modeled in the IR, I think that the structurizer as-is would make a mess if we
did that when the kill is inside control flow. For now, we just add a null
export at the end to make sure that it always exports something, which fixes
the immediate problem without penalizing the more common case. This means that
we sometimes do two "done" exports when only some of the threads enter the
discard loop, but from tests the hardware seems ok with that.
This fixes dEQP-VK.graphicsfuzz.while-inside-switch with radv.
Reviewers: arsenm, nhaehnle
Subscribers: kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70781
2019-11-27 14:09:13 +01:00
|
|
|
B.CreateRetVoid();
|
2017-03-24 20:52:05 +01:00
|
|
|
} else {
|
|
|
|
// If the function doesn't return void... add a PHI node to the block...
|
AMDGPU: Fix handling of infinite loops in fragment shaders
Summary:
Due to the fact that kill is just a normal intrinsic, even though it's
supposed to terminate the thread, we can end up with provably infinite
loops that are actually supposed to end successfully. The
AMDGPUUnifyDivergentExitNodes pass breaks up these loops, but because
there's no obvious place to make the loop branch to, it just makes it
return immediately, which skips the exports that are supposed to happen
at the end and hangs the GPU if all the threads end up being killed.
While it would be nice if the fact that kill terminates the thread were
modeled in the IR, I think that the structurizer as-is would make a mess if we
did that when the kill is inside control flow. For now, we just add a null
export at the end to make sure that it always exports something, which fixes
the immediate problem without penalizing the more common case. This means that
we sometimes do two "done" exports when only some of the threads enter the
discard loop, but from tests the hardware seems ok with that.
This fixes dEQP-VK.graphicsfuzz.while-inside-switch with radv.
Reviewers: arsenm, nhaehnle
Subscribers: kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70781
2019-11-27 14:09:13 +01:00
|
|
|
PN = B.CreatePHI(F.getReturnType(), ReturningBlocks.size(),
|
|
|
|
"UnifiedRetVal");
|
|
|
|
assert(!InsertExport);
|
|
|
|
B.CreateRet(PN);
|
2017-03-24 20:52:05 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Loop over all of the blocks, replacing the return instruction with an
|
|
|
|
// unconditional branch.
|
|
|
|
for (BasicBlock *BB : ReturningBlocks) {
|
|
|
|
// Add an incoming element to the PHI node for every return instruction that
|
|
|
|
// is merging into this new block...
|
|
|
|
if (PN)
|
|
|
|
PN->addIncoming(BB->getTerminator()->getOperand(0), BB);
|
|
|
|
|
2018-05-08 20:32:35 +02:00
|
|
|
// Remove and delete the return inst.
|
|
|
|
BB->getTerminator()->eraseFromParent();
|
2017-03-24 20:52:05 +01:00
|
|
|
BranchInst::Create(NewRetBlock, BB);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (BasicBlock *BB : ReturningBlocks) {
|
|
|
|
// Cleanup possible branch to unconditional branch to the return.
|
2017-10-04 22:26:25 +02:00
|
|
|
simplifyCFG(BB, TTI, {2});
|
2017-03-24 20:52:05 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return NewRetBlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool AMDGPUUnifyDivergentExitNodes::runOnFunction(Function &F) {
|
|
|
|
auto &PDT = getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree();
|
AMDGPU: Fix AMDGPUUnifyDivergentExitNodes with no normal returns
Summary:
The code was assuming in a few places that if there was only one exit
from the function that it was a normal return, which is invalid. It
could be an infinite loop, in which case we still need to insert the
usual fake edge so that the null export happens. This fixes shaders that
end with an infinite loop that discards.
Reviewers: arsenm, nhaehnle, critson
Subscribers: kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D71192
2019-12-09 12:04:00 +01:00
|
|
|
|
|
|
|
// If there's only one exit, we don't need to do anything, unless this is a
|
|
|
|
// pixel shader and that exit is an infinite loop, since we still have to
|
|
|
|
// insert an export in that case.
|
2020-05-18 16:28:28 +02:00
|
|
|
if (PDT.root_size() <= 1 && F.getCallingConv() != CallingConv::AMDGPU_PS)
|
2020-01-29 16:14:49 +01:00
|
|
|
return false;
|
2017-03-24 20:52:05 +01:00
|
|
|
|
2018-08-30 16:21:36 +02:00
|
|
|
LegacyDivergenceAnalysis &DA = getAnalysis<LegacyDivergenceAnalysis>();
|
2017-03-24 20:52:05 +01:00
|
|
|
|
|
|
|
// Loop over all of the blocks in a function, tracking all of the blocks that
|
|
|
|
// return.
|
|
|
|
SmallVector<BasicBlock *, 4> ReturningBlocks;
|
[AMDGPU] Fix AMDGPUUnifyDivergentExitNodes
Summary:
For the case where "done" bits on existing exports are removed
by unifyReturnBlockSet(), unify all return blocks - even the
uniformly reached ones. We do not want to end up with a non-unified,
uniformly reached block containing a normal export with the "done"
bit cleared.
That case is believed to be rare - possible with infinite loops
in pixel shaders.
This is a fix for D71192.
Subscribers: arsenm, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, kerbowa, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D76364
2020-03-18 14:24:09 +01:00
|
|
|
SmallVector<BasicBlock *, 4> UniformlyReachedRetBlocks;
|
2017-03-24 20:52:05 +01:00
|
|
|
SmallVector<BasicBlock *, 4> UnreachableBlocks;
|
|
|
|
|
2018-05-17 18:45:01 +02:00
|
|
|
// Dummy return block for infinite loop.
|
|
|
|
BasicBlock *DummyReturnBB = nullptr;
|
|
|
|
|
AMDGPU: Fix handling of infinite loops in fragment shaders
Summary:
Due to the fact that kill is just a normal intrinsic, even though it's
supposed to terminate the thread, we can end up with provably infinite
loops that are actually supposed to end successfully. The
AMDGPUUnifyDivergentExitNodes pass breaks up these loops, but because
there's no obvious place to make the loop branch to, it just makes it
return immediately, which skips the exports that are supposed to happen
at the end and hangs the GPU if all the threads end up being killed.
While it would be nice if the fact that kill terminates the thread were
modeled in the IR, I think that the structurizer as-is would make a mess if we
did that when the kill is inside control flow. For now, we just add a null
export at the end to make sure that it always exports something, which fixes
the immediate problem without penalizing the more common case. This means that
we sometimes do two "done" exports when only some of the threads enter the
discard loop, but from tests the hardware seems ok with that.
This fixes dEQP-VK.graphicsfuzz.while-inside-switch with radv.
Reviewers: arsenm, nhaehnle
Subscribers: kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70781
2019-11-27 14:09:13 +01:00
|
|
|
bool InsertExport = false;
|
|
|
|
|
2020-06-05 18:12:32 +02:00
|
|
|
bool Changed = false;
|
2020-05-18 16:28:28 +02:00
|
|
|
for (BasicBlock *BB : PDT.roots()) {
|
2017-03-24 20:52:05 +01:00
|
|
|
if (isa<ReturnInst>(BB->getTerminator())) {
|
|
|
|
if (!isUniformlyReached(DA, *BB))
|
|
|
|
ReturningBlocks.push_back(BB);
|
[AMDGPU] Fix AMDGPUUnifyDivergentExitNodes
Summary:
For the case where "done" bits on existing exports are removed
by unifyReturnBlockSet(), unify all return blocks - even the
uniformly reached ones. We do not want to end up with a non-unified,
uniformly reached block containing a normal export with the "done"
bit cleared.
That case is believed to be rare - possible with infinite loops
in pixel shaders.
This is a fix for D71192.
Subscribers: arsenm, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, kerbowa, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D76364
2020-03-18 14:24:09 +01:00
|
|
|
else
|
|
|
|
UniformlyReachedRetBlocks.push_back(BB);
|
2017-03-24 20:52:05 +01:00
|
|
|
} else if (isa<UnreachableInst>(BB->getTerminator())) {
|
|
|
|
if (!isUniformlyReached(DA, *BB))
|
|
|
|
UnreachableBlocks.push_back(BB);
|
2018-05-17 18:45:01 +02:00
|
|
|
} else if (BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator())) {
|
|
|
|
|
|
|
|
ConstantInt *BoolTrue = ConstantInt::getTrue(F.getContext());
|
|
|
|
if (DummyReturnBB == nullptr) {
|
|
|
|
DummyReturnBB = BasicBlock::Create(F.getContext(),
|
|
|
|
"DummyReturnBlock", &F);
|
|
|
|
Type *RetTy = F.getReturnType();
|
|
|
|
Value *RetVal = RetTy->isVoidTy() ? nullptr : UndefValue::get(RetTy);
|
AMDGPU: Fix handling of infinite loops in fragment shaders
Summary:
Due to the fact that kill is just a normal intrinsic, even though it's
supposed to terminate the thread, we can end up with provably infinite
loops that are actually supposed to end successfully. The
AMDGPUUnifyDivergentExitNodes pass breaks up these loops, but because
there's no obvious place to make the loop branch to, it just makes it
return immediately, which skips the exports that are supposed to happen
at the end and hangs the GPU if all the threads end up being killed.
While it would be nice if the fact that kill terminates the thread were
modeled in the IR, I think that the structurizer as-is would make a mess if we
did that when the kill is inside control flow. For now, we just add a null
export at the end to make sure that it always exports something, which fixes
the immediate problem without penalizing the more common case. This means that
we sometimes do two "done" exports when only some of the threads enter the
discard loop, but from tests the hardware seems ok with that.
This fixes dEQP-VK.graphicsfuzz.while-inside-switch with radv.
Reviewers: arsenm, nhaehnle
Subscribers: kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70781
2019-11-27 14:09:13 +01:00
|
|
|
|
|
|
|
// For pixel shaders, the producer guarantees that an export is
|
|
|
|
// executed before each return instruction. However, if there is an
|
|
|
|
// infinite loop and we insert a return ourselves, we need to uphold
|
|
|
|
// that guarantee by inserting a null export. This can happen e.g. in
|
|
|
|
// an infinite loop with kill instructions, which is supposed to
|
|
|
|
// terminate. However, we don't need to do this if there is a non-void
|
|
|
|
// return value, since then there is an epilog afterwards which will
|
|
|
|
// still export.
|
|
|
|
//
|
|
|
|
// Note: In the case where only some threads enter the infinite loop,
|
|
|
|
// this can result in the null export happening redundantly after the
|
|
|
|
// original exports. However, The last "real" export happens after all
|
|
|
|
// the threads that didn't enter an infinite loop converged, which
|
|
|
|
// means that the only extra threads to execute the null export are
|
|
|
|
// threads that entered the infinite loop, and they only could've
|
|
|
|
// exited through being killed which sets their exec bit to 0.
|
|
|
|
// Therefore, unless there's an actual infinite loop, which can have
|
|
|
|
// invalid results, or there's a kill after the last export, which we
|
|
|
|
// assume the frontend won't do, this export will have the same exec
|
|
|
|
// mask as the last "real" export, and therefore the valid mask will be
|
|
|
|
// overwritten with the same value and will still be correct. Also,
|
|
|
|
// even though this forces an extra unnecessary export wait, we assume
|
|
|
|
// that this happens rare enough in practice to that we don't have to
|
|
|
|
// worry about performance.
|
|
|
|
if (F.getCallingConv() == CallingConv::AMDGPU_PS &&
|
|
|
|
RetTy->isVoidTy()) {
|
|
|
|
InsertExport = true;
|
|
|
|
}
|
|
|
|
|
2018-05-17 18:45:01 +02:00
|
|
|
ReturnInst::Create(F.getContext(), RetVal, DummyReturnBB);
|
|
|
|
ReturningBlocks.push_back(DummyReturnBB);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (BI->isUnconditional()) {
|
|
|
|
BasicBlock *LoopHeaderBB = BI->getSuccessor(0);
|
|
|
|
BI->eraseFromParent(); // Delete the unconditional branch.
|
|
|
|
// Add a new conditional branch with a dummy edge to the return block.
|
|
|
|
BranchInst::Create(LoopHeaderBB, DummyReturnBB, BoolTrue, BB);
|
|
|
|
} else { // Conditional branch.
|
|
|
|
// Create a new transition block to hold the conditional branch.
|
2019-06-25 20:55:16 +02:00
|
|
|
BasicBlock *TransitionBB = BB->splitBasicBlock(BI, "TransitionBlock");
|
2018-05-17 18:45:01 +02:00
|
|
|
|
2019-06-25 20:55:16 +02:00
|
|
|
// Create a branch that will always branch to the transition block and
|
|
|
|
// references DummyReturnBB.
|
|
|
|
BB->getTerminator()->eraseFromParent();
|
2018-05-17 18:45:01 +02:00
|
|
|
BranchInst::Create(TransitionBB, DummyReturnBB, BoolTrue, BB);
|
|
|
|
}
|
2020-06-05 18:12:32 +02:00
|
|
|
Changed = true;
|
2017-03-24 20:52:05 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!UnreachableBlocks.empty()) {
|
|
|
|
BasicBlock *UnreachableBlock = nullptr;
|
|
|
|
|
|
|
|
if (UnreachableBlocks.size() == 1) {
|
|
|
|
UnreachableBlock = UnreachableBlocks.front();
|
|
|
|
} else {
|
|
|
|
UnreachableBlock = BasicBlock::Create(F.getContext(),
|
|
|
|
"UnifiedUnreachableBlock", &F);
|
|
|
|
new UnreachableInst(F.getContext(), UnreachableBlock);
|
|
|
|
|
|
|
|
for (BasicBlock *BB : UnreachableBlocks) {
|
2018-05-08 20:32:35 +02:00
|
|
|
// Remove and delete the unreachable inst.
|
|
|
|
BB->getTerminator()->eraseFromParent();
|
2017-03-24 20:52:05 +01:00
|
|
|
BranchInst::Create(UnreachableBlock, BB);
|
|
|
|
}
|
2020-06-05 18:12:32 +02:00
|
|
|
Changed = true;
|
2017-03-24 20:52:05 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!ReturningBlocks.empty()) {
|
|
|
|
// Don't create a new unreachable inst if we have a return. The
|
|
|
|
// structurizer/annotator can't handle the multiple exits
|
|
|
|
|
|
|
|
Type *RetTy = F.getReturnType();
|
|
|
|
Value *RetVal = RetTy->isVoidTy() ? nullptr : UndefValue::get(RetTy);
|
2018-05-08 20:32:35 +02:00
|
|
|
// Remove and delete the unreachable inst.
|
|
|
|
UnreachableBlock->getTerminator()->eraseFromParent();
|
2017-03-24 20:52:05 +01:00
|
|
|
|
|
|
|
Function *UnreachableIntrin =
|
|
|
|
Intrinsic::getDeclaration(F.getParent(), Intrinsic::amdgcn_unreachable);
|
|
|
|
|
|
|
|
// Insert a call to an intrinsic tracking that this is an unreachable
|
|
|
|
// point, in case we want to kill the active lanes or something later.
|
|
|
|
CallInst::Create(UnreachableIntrin, {}, "", UnreachableBlock);
|
|
|
|
|
|
|
|
// Don't create a scalar trap. We would only want to trap if this code was
|
|
|
|
// really reached, but a scalar trap would happen even if no lanes
|
|
|
|
// actually reached here.
|
|
|
|
ReturnInst::Create(F.getContext(), RetVal, UnreachableBlock);
|
|
|
|
ReturningBlocks.push_back(UnreachableBlock);
|
2020-06-05 18:12:32 +02:00
|
|
|
Changed = true;
|
2017-03-24 20:52:05 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now handle return blocks.
|
|
|
|
if (ReturningBlocks.empty())
|
2020-06-05 18:12:32 +02:00
|
|
|
return Changed; // No blocks return
|
2017-03-24 20:52:05 +01:00
|
|
|
|
AMDGPU: Fix AMDGPUUnifyDivergentExitNodes with no normal returns
Summary:
The code was assuming in a few places that if there was only one exit
from the function that it was a normal return, which is invalid. It
could be an infinite loop, in which case we still need to insert the
usual fake edge so that the null export happens. This fixes shaders that
end with an infinite loop that discards.
Reviewers: arsenm, nhaehnle, critson
Subscribers: kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D71192
2019-12-09 12:04:00 +01:00
|
|
|
if (ReturningBlocks.size() == 1 && !InsertExport)
|
2020-06-05 18:12:32 +02:00
|
|
|
return Changed; // Already has a single return block
|
2017-03-24 20:52:05 +01:00
|
|
|
|
|
|
|
const TargetTransformInfo &TTI
|
|
|
|
= getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
|
|
|
|
|
[AMDGPU] Fix AMDGPUUnifyDivergentExitNodes
Summary:
For the case where "done" bits on existing exports are removed
by unifyReturnBlockSet(), unify all return blocks - even the
uniformly reached ones. We do not want to end up with a non-unified,
uniformly reached block containing a normal export with the "done"
bit cleared.
That case is believed to be rare - possible with infinite loops
in pixel shaders.
This is a fix for D71192.
Subscribers: arsenm, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, kerbowa, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D76364
2020-03-18 14:24:09 +01:00
|
|
|
// Unify returning blocks. If we are going to insert the export it is also
|
|
|
|
// necessary to include blocks that are uniformly reached, because in addition
|
|
|
|
// to inserting the export the "done" bits on existing exports will be cleared
|
|
|
|
// and we do not want to end up with the normal export in a non-unified,
|
|
|
|
// uniformly reached block with the "done" bit cleared.
|
|
|
|
auto BlocksToUnify = std::move(ReturningBlocks);
|
|
|
|
if (InsertExport) {
|
|
|
|
BlocksToUnify.insert(BlocksToUnify.end(), UniformlyReachedRetBlocks.begin(),
|
|
|
|
UniformlyReachedRetBlocks.end());
|
|
|
|
}
|
|
|
|
|
|
|
|
unifyReturnBlockSet(F, BlocksToUnify, InsertExport, TTI,
|
|
|
|
"UnifiedReturnBlock");
|
2017-03-24 20:52:05 +01:00
|
|
|
return true;
|
|
|
|
}
|