1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-26 12:43:36 +01:00

[DA][TTI][AMDGPU] Add option to select GPUDA with TTI

Summary: Enable the new diveregence analysis by default for AMDGPU.

Reviewers: rampitec, nhaehnle, arsenm

Subscribers: kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D73049
This commit is contained in:
Austin Kerbow 2020-01-20 07:25:20 -08:00
parent 30dce9dfef
commit 4a9e67110b
8 changed files with 31 additions and 6 deletions

View File

@ -54,7 +54,8 @@ public:
private:
// Whether analysis should be performed by GPUDivergenceAnalysis.
bool shouldUseGPUDivergenceAnalysis(const Function &F) const;
bool shouldUseGPUDivergenceAnalysis(const Function &F,
const TargetTransformInfo &TTI) const;
// (optional) handle to new DivergenceAnalysis
std::unique_ptr<GPUDivergenceAnalysis> gpuDA;

View File

@ -342,6 +342,10 @@ public:
/// branches.
bool hasBranchDivergence() const;
/// Return true if the target prefers to use GPU divergence analysis to
/// replace the legacy version.
bool useGPUDivergenceAnalysis() const;
/// Returns whether V is a source of divergence.
///
/// This function provides the target-dependent information for
@ -1198,6 +1202,7 @@ public:
virtual int
getUserCost(const User *U, ArrayRef<const Value *> Operands) = 0;
virtual bool hasBranchDivergence() = 0;
virtual bool useGPUDivergenceAnalysis() = 0;
virtual bool isSourceOfDivergence(const Value *V) = 0;
virtual bool isAlwaysUniform(const Value *V) = 0;
virtual unsigned getFlatAddressSpace() = 0;
@ -1452,6 +1457,7 @@ public:
return Impl.getUserCost(U, Operands);
}
bool hasBranchDivergence() override { return Impl.hasBranchDivergence(); }
bool useGPUDivergenceAnalysis() override { return Impl.useGPUDivergenceAnalysis(); }
bool isSourceOfDivergence(const Value *V) override {
return Impl.isSourceOfDivergence(V);
}

View File

@ -152,6 +152,8 @@ public:
bool hasBranchDivergence() { return false; }
bool useGPUDivergenceAnalysis() { return false; }
bool isSourceOfDivergence(const Value *V) { return false; }
bool isAlwaysUniform(const Value *V) { return false; }

View File

@ -207,6 +207,8 @@ public:
bool hasBranchDivergence() { return false; }
bool useGPUDivergenceAnalysis() { return false; }
bool isSourceOfDivergence(const Value *V) { return false; }
bool isAlwaysUniform(const Value *V) { return false; }

View File

@ -301,14 +301,13 @@ FunctionPass *llvm::createLegacyDivergenceAnalysisPass() {
void LegacyDivergenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<PostDominatorTreeWrapperPass>();
if (UseGPUDA)
AU.addRequired<LoopInfoWrapperPass>();
AU.addRequired<LoopInfoWrapperPass>();
AU.setPreservesAll();
}
bool LegacyDivergenceAnalysis::shouldUseGPUDivergenceAnalysis(
const Function &F) const {
if (!UseGPUDA)
const Function &F, const TargetTransformInfo &TTI) const {
if (!(UseGPUDA || TTI.useGPUDivergenceAnalysis()))
return false;
// GPUDivergenceAnalysis requires a reducible CFG.
@ -337,7 +336,7 @@ bool LegacyDivergenceAnalysis::runOnFunction(Function &F) {
auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
auto &PDT = getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree();
if (shouldUseGPUDivergenceAnalysis(F)) {
if (shouldUseGPUDivergenceAnalysis(F, TTI)) {
// run the new GPU divergence analysis
auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
gpuDA = std::make_unique<GPUDivergenceAnalysis>(F, DT, PDT, LI, TTI);

View File

@ -212,6 +212,10 @@ bool TargetTransformInfo::hasBranchDivergence() const {
return TTIImpl->hasBranchDivergence();
}
bool TargetTransformInfo::useGPUDivergenceAnalysis() const {
return TTIImpl->useGPUDivergenceAnalysis();
}
bool TargetTransformInfo::isSourceOfDivergence(const Value *V) const {
return TTIImpl->isSourceOfDivergence(V);
}

View File

@ -69,6 +69,11 @@ static cl::opt<unsigned> UnrollThresholdIf(
cl::desc("Unroll threshold increment for AMDGPU for each if statement inside loop"),
cl::init(150), cl::Hidden);
static cl::opt<bool> UseLegacyDA(
"amdgpu-use-legacy-divergence-analysis",
cl::desc("Enable legacy divergence analysis for AMDGPU"),
cl::init(false), cl::Hidden);
static bool dependsOnLocalPhi(const Loop *L, const Value *Cond,
unsigned Depth = 0) {
const Instruction *I = dyn_cast<Instruction>(Cond);
@ -601,6 +606,11 @@ static bool isArgPassedInSGPR(const Argument *A) {
}
}
/// \returns true if the new GPU divergence analysis is enabled.
bool GCNTTIImpl::useGPUDivergenceAnalysis() const {
return !UseLegacyDA;
}
/// \returns true if the result of the value could potentially be
/// different across workitems in a wavefront.
bool GCNTTIImpl::isSourceOfDivergence(const Value *V) const {

View File

@ -136,6 +136,7 @@ public:
HasFP32Denormals(ST->hasFP32Denormals(F)) { }
bool hasBranchDivergence() { return true; }
bool useGPUDivergenceAnalysis() const;
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
TTI::UnrollingPreferences &UP);