1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-19 02:52:53 +02:00
llvm-mirror/include/llvm/Analysis/LegacyDivergenceAnalysis.h
Austin Kerbow 0fa8b03aac Resubmit: [DA][TTI][AMDGPU] Add option to select GPUDA with TTI
Summary:
Enable the new diveregence analysis by default for AMDGPU.

Resubmit with test updates since GPUDA was causing failures on Windows.

Reviewers: rampitec, nhaehnle, arsenm, thakis

Subscribers: kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, hiraditya, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D73315
2020-01-24 10:39:40 -08:00

72 lines
2.5 KiB
C++

//===- llvm/Analysis/LegacyDivergenceAnalysis.h - KernelDivergence Analysis -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// The kernel divergence analysis is an LLVM pass which can be used to find out
// if a branch instruction in a GPU program (kernel) is divergent or not. It can help
// branch optimizations such as jump threading and loop unswitching to make
// better decisions.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_LEGACY_DIVERGENCE_ANALYSIS_H
#define LLVM_ANALYSIS_LEGACY_DIVERGENCE_ANALYSIS_H
#include "llvm/ADT/DenseSet.h"
#include "llvm/Analysis/DivergenceAnalysis.h"
#include "llvm/Pass.h"
namespace llvm {
class Value;
class Function;
class GPUDivergenceAnalysis;
class LegacyDivergenceAnalysis : public FunctionPass {
public:
static char ID;
LegacyDivergenceAnalysis();
void getAnalysisUsage(AnalysisUsage &AU) const override;
bool runOnFunction(Function &F) override;
// Print all divergent branches in the function.
void print(raw_ostream &OS, const Module *) const override;
// Returns true if V is divergent at its definition.
bool isDivergent(const Value *V) const;
// Returns true if U is divergent. Uses of a uniform value can be divergent.
bool isDivergentUse(const Use *U) const;
// Returns true if V is uniform/non-divergent.
bool isUniform(const Value *V) const { return !isDivergent(V); }
// Returns true if U is uniform/non-divergent. Uses of a uniform value can be
// divergent.
bool isUniformUse(const Use *U) const { return !isDivergentUse(U); }
// Keep the analysis results uptodate by removing an erased value.
void removeValue(const Value *V) { DivergentValues.erase(V); }
private:
// Whether analysis should be performed by GPUDivergenceAnalysis.
bool shouldUseGPUDivergenceAnalysis(const Function &F,
const TargetTransformInfo &TTI) const;
// (optional) handle to new DivergenceAnalysis
std::unique_ptr<GPUDivergenceAnalysis> gpuDA;
// Stores all divergent values.
DenseSet<const Value *> DivergentValues;
// Stores divergent uses of possibly uniform values.
DenseSet<const Use *> DivergentUses;
};
} // End llvm namespace
#endif //LLVM_ANALYSIS_LEGACY_DIVERGENCE_ANALYSIS_H