2016-03-11 09:50:55 +01:00
|
|
|
//===- GVN.h - Eliminate redundant values and loads -------------*- C++ -*-===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
/// \file
|
|
|
|
/// This file provides the interface for LLVM's Global Value Numbering pass
|
|
|
|
/// which eliminates fully redundant instructions. It also does somewhat Ad-Hoc
|
|
|
|
/// PRE and dead load elimination.
|
|
|
|
///
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#ifndef LLVM_TRANSFORMS_SCALAR_GVN_H
|
|
|
|
#define LLVM_TRANSFORMS_SCALAR_GVN_H
|
|
|
|
|
|
|
|
#include "llvm/ADT/DenseMap.h"
|
|
|
|
#include "llvm/ADT/MapVector.h"
|
|
|
|
#include "llvm/ADT/SetVector.h"
|
|
|
|
#include "llvm/ADT/SmallPtrSet.h"
|
|
|
|
#include "llvm/Analysis/AliasAnalysis.h"
|
2016-12-19 09:22:17 +01:00
|
|
|
#include "llvm/Analysis/AssumptionCache.h"
|
2016-12-01 04:56:43 +01:00
|
|
|
#include "llvm/Analysis/LoopInfo.h"
|
2016-03-11 09:50:55 +01:00
|
|
|
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
|
|
|
|
#include "llvm/IR/Dominators.h"
|
|
|
|
#include "llvm/IR/PassManager.h"
|
|
|
|
|
|
|
|
namespace llvm {
|
2017-09-08 01:27:44 +02:00
|
|
|
class IntrinsicInst;
|
2016-12-01 17:40:32 +01:00
|
|
|
class OptimizationRemarkEmitter;
|
2016-03-11 09:50:55 +01:00
|
|
|
|
|
|
|
/// A private "module" namespace for types and utilities used by GVN. These
|
|
|
|
/// are implementation details and should not be used by clients.
|
|
|
|
namespace gvn LLVM_LIBRARY_VISIBILITY {
|
|
|
|
struct AvailableValue;
|
|
|
|
struct AvailableValueInBlock;
|
|
|
|
class GVNLegacyPass;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// The core GVN pass object.
|
|
|
|
///
|
|
|
|
/// FIXME: We should have a good summary of the GVN algorithm implemented by
|
|
|
|
/// this particular pass here.
|
2016-03-11 11:33:22 +01:00
|
|
|
class GVN : public PassInfoMixin<GVN> {
|
2016-03-11 09:50:55 +01:00
|
|
|
public:
|
|
|
|
|
|
|
|
/// \brief Run the pass over the function.
|
2016-08-09 02:28:15 +02:00
|
|
|
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
|
2016-03-11 09:50:55 +01:00
|
|
|
|
|
|
|
/// This removes the specified instruction from
|
|
|
|
/// our various maps and marks it for deletion.
|
|
|
|
void markInstructionForDeletion(Instruction *I) {
|
|
|
|
VN.erase(I);
|
|
|
|
InstrsToErase.push_back(I);
|
|
|
|
}
|
|
|
|
|
|
|
|
DominatorTree &getDominatorTree() const { return *DT; }
|
|
|
|
AliasAnalysis *getAliasAnalysis() const { return VN.getAliasAnalysis(); }
|
|
|
|
MemoryDependenceResults &getMemDep() const { return *MD; }
|
|
|
|
|
2016-03-11 17:25:19 +01:00
|
|
|
struct Expression;
|
|
|
|
|
2016-03-11 09:50:55 +01:00
|
|
|
/// This class holds the mapping between values and value numbers. It is used
|
|
|
|
/// as an efficient mechanism to determine the expression-wise equivalence of
|
|
|
|
/// two values.
|
|
|
|
class ValueTable {
|
|
|
|
DenseMap<Value *, uint32_t> valueNumbering;
|
2016-03-11 17:25:19 +01:00
|
|
|
DenseMap<Expression, uint32_t> expressionNumbering;
|
[GVN] Recommit the patch "Add phi-translate support in scalarpre"
Recommit after workaround the bug PR31652.
Three bugs fixed in previous recommits: The first one is to use CurrentBlock
instead of PREInstr's Parent as param of performScalarPREInsertion because
the Parent of a clone instruction may be uninitialized. The second one is stop
PRE when CurrentBlock to its predecessor is a backedge and an operand of CurInst
is defined inside of CurrentBlock. The same value defined inside of loop in last
iteration can not be regarded as available. The third one is an out-of-bound
array access in a flipped if guard.
Right now scalarpre doesn't have phi-translate support, so it will miss some
simple pre opportunities. Like the following testcase, current scalarpre cannot
recognize the last "a * b" is fully redundent because a and b used by the last
"a * b" expr are both defined by phis.
long a[100], b[100], g1, g2, g3;
__attribute__((pure)) long goo();
void foo(long a, long b, long c, long d) {
g1 = a * b;
if (__builtin_expect(g2 > 3, 0)) {
a = c;
b = d;
g2 = a * b;
}
g3 = a * b; // fully redundant.
}
The patch adds phi-translate support in scalarpre. This is only a temporary
solution before the newpre based on newgvn is available.
Differential Revision: https://reviews.llvm.org/D32252
llvm-svn: 309397
2017-07-28 17:47:25 +02:00
|
|
|
|
|
|
|
// Expressions is the vector of Expression. ExprIdx is the mapping from
|
|
|
|
// value number to the index of Expression in Expressions. We use it
|
|
|
|
// instead of a DenseMap because filling such mapping is faster than
|
|
|
|
// filling a DenseMap and the compile time is a little better.
|
|
|
|
uint32_t nextExprNumber;
|
|
|
|
std::vector<Expression> Expressions;
|
|
|
|
std::vector<uint32_t> ExprIdx;
|
|
|
|
// Value number to PHINode mapping. Used for phi-translate in scalarpre.
|
|
|
|
DenseMap<uint32_t, PHINode *> NumberingPhi;
|
|
|
|
// Cache for phi-translate in scalarpre.
|
|
|
|
typedef DenseMap<std::pair<uint32_t, const BasicBlock *>, uint32_t>
|
|
|
|
PhiTranslateMap;
|
|
|
|
PhiTranslateMap PhiTranslateTable;
|
|
|
|
|
2016-03-11 09:50:55 +01:00
|
|
|
AliasAnalysis *AA;
|
|
|
|
MemoryDependenceResults *MD;
|
|
|
|
DominatorTree *DT;
|
|
|
|
|
|
|
|
uint32_t nextValueNumber;
|
|
|
|
|
2016-04-28 18:00:15 +02:00
|
|
|
Expression createExpr(Instruction *I);
|
|
|
|
Expression createCmpExpr(unsigned Opcode, CmpInst::Predicate Predicate,
|
|
|
|
Value *LHS, Value *RHS);
|
|
|
|
Expression createExtractvalueExpr(ExtractValueInst *EI);
|
|
|
|
uint32_t lookupOrAddCall(CallInst *C);
|
[GVN] Recommit the patch "Add phi-translate support in scalarpre"
Recommit after workaround the bug PR31652.
Three bugs fixed in previous recommits: The first one is to use CurrentBlock
instead of PREInstr's Parent as param of performScalarPREInsertion because
the Parent of a clone instruction may be uninitialized. The second one is stop
PRE when CurrentBlock to its predecessor is a backedge and an operand of CurInst
is defined inside of CurrentBlock. The same value defined inside of loop in last
iteration can not be regarded as available. The third one is an out-of-bound
array access in a flipped if guard.
Right now scalarpre doesn't have phi-translate support, so it will miss some
simple pre opportunities. Like the following testcase, current scalarpre cannot
recognize the last "a * b" is fully redundent because a and b used by the last
"a * b" expr are both defined by phis.
long a[100], b[100], g1, g2, g3;
__attribute__((pure)) long goo();
void foo(long a, long b, long c, long d) {
g1 = a * b;
if (__builtin_expect(g2 > 3, 0)) {
a = c;
b = d;
g2 = a * b;
}
g3 = a * b; // fully redundant.
}
The patch adds phi-translate support in scalarpre. This is only a temporary
solution before the newpre based on newgvn is available.
Differential Revision: https://reviews.llvm.org/D32252
llvm-svn: 309397
2017-07-28 17:47:25 +02:00
|
|
|
uint32_t phiTranslateImpl(const BasicBlock *BB, const BasicBlock *PhiBlock,
|
|
|
|
uint32_t Num, GVN &Gvn);
|
|
|
|
std::pair<uint32_t, bool> assignExpNewValueNum(Expression &exp);
|
|
|
|
bool areAllValsInBB(uint32_t num, const BasicBlock *BB, GVN &Gvn);
|
2016-03-11 09:50:55 +01:00
|
|
|
|
|
|
|
public:
|
|
|
|
ValueTable();
|
|
|
|
ValueTable(const ValueTable &Arg);
|
|
|
|
ValueTable(ValueTable &&Arg);
|
|
|
|
~ValueTable();
|
|
|
|
|
2016-04-28 18:00:15 +02:00
|
|
|
uint32_t lookupOrAdd(Value *V);
|
[GVN] Recommit the patch "Add phi-translate support in scalarpre"
Recommit after workaround the bug PR31652.
Three bugs fixed in previous recommits: The first one is to use CurrentBlock
instead of PREInstr's Parent as param of performScalarPREInsertion because
the Parent of a clone instruction may be uninitialized. The second one is stop
PRE when CurrentBlock to its predecessor is a backedge and an operand of CurInst
is defined inside of CurrentBlock. The same value defined inside of loop in last
iteration can not be regarded as available. The third one is an out-of-bound
array access in a flipped if guard.
Right now scalarpre doesn't have phi-translate support, so it will miss some
simple pre opportunities. Like the following testcase, current scalarpre cannot
recognize the last "a * b" is fully redundent because a and b used by the last
"a * b" expr are both defined by phis.
long a[100], b[100], g1, g2, g3;
__attribute__((pure)) long goo();
void foo(long a, long b, long c, long d) {
g1 = a * b;
if (__builtin_expect(g2 > 3, 0)) {
a = c;
b = d;
g2 = a * b;
}
g3 = a * b; // fully redundant.
}
The patch adds phi-translate support in scalarpre. This is only a temporary
solution before the newpre based on newgvn is available.
Differential Revision: https://reviews.llvm.org/D32252
llvm-svn: 309397
2017-07-28 17:47:25 +02:00
|
|
|
uint32_t lookup(Value *V, bool Verify = true) const;
|
2016-04-28 18:00:15 +02:00
|
|
|
uint32_t lookupOrAddCmp(unsigned Opcode, CmpInst::Predicate Pred,
|
|
|
|
Value *LHS, Value *RHS);
|
[GVN] Recommit the patch "Add phi-translate support in scalarpre"
Recommit after workaround the bug PR31652.
Three bugs fixed in previous recommits: The first one is to use CurrentBlock
instead of PREInstr's Parent as param of performScalarPREInsertion because
the Parent of a clone instruction may be uninitialized. The second one is stop
PRE when CurrentBlock to its predecessor is a backedge and an operand of CurInst
is defined inside of CurrentBlock. The same value defined inside of loop in last
iteration can not be regarded as available. The third one is an out-of-bound
array access in a flipped if guard.
Right now scalarpre doesn't have phi-translate support, so it will miss some
simple pre opportunities. Like the following testcase, current scalarpre cannot
recognize the last "a * b" is fully redundent because a and b used by the last
"a * b" expr are both defined by phis.
long a[100], b[100], g1, g2, g3;
__attribute__((pure)) long goo();
void foo(long a, long b, long c, long d) {
g1 = a * b;
if (__builtin_expect(g2 > 3, 0)) {
a = c;
b = d;
g2 = a * b;
}
g3 = a * b; // fully redundant.
}
The patch adds phi-translate support in scalarpre. This is only a temporary
solution before the newpre based on newgvn is available.
Differential Revision: https://reviews.llvm.org/D32252
llvm-svn: 309397
2017-07-28 17:47:25 +02:00
|
|
|
uint32_t phiTranslate(const BasicBlock *BB, const BasicBlock *PhiBlock,
|
|
|
|
uint32_t Num, GVN &Gvn);
|
2017-08-08 23:40:14 +02:00
|
|
|
void eraseTranslateCacheEntry(uint32_t Num, const BasicBlock &CurrBlock);
|
2016-03-11 09:50:55 +01:00
|
|
|
bool exists(Value *V) const;
|
|
|
|
void add(Value *V, uint32_t num);
|
|
|
|
void clear();
|
|
|
|
void erase(Value *v);
|
|
|
|
void setAliasAnalysis(AliasAnalysis *A) { AA = A; }
|
|
|
|
AliasAnalysis *getAliasAnalysis() const { return AA; }
|
|
|
|
void setMemDep(MemoryDependenceResults *M) { MD = M; }
|
|
|
|
void setDomTree(DominatorTree *D) { DT = D; }
|
|
|
|
uint32_t getNextUnusedValueNumber() { return nextValueNumber; }
|
|
|
|
void verifyRemoved(const Value *) const;
|
|
|
|
};
|
|
|
|
|
2016-07-15 15:45:20 +02:00
|
|
|
private:
|
|
|
|
friend class gvn::GVNLegacyPass;
|
|
|
|
friend struct DenseMapInfo<Expression>;
|
|
|
|
|
2016-03-11 09:50:55 +01:00
|
|
|
MemoryDependenceResults *MD;
|
|
|
|
DominatorTree *DT;
|
|
|
|
const TargetLibraryInfo *TLI;
|
2016-12-19 09:22:17 +01:00
|
|
|
AssumptionCache *AC;
|
2016-03-11 09:50:55 +01:00
|
|
|
SetVector<BasicBlock *> DeadBlocks;
|
2016-12-01 17:40:32 +01:00
|
|
|
OptimizationRemarkEmitter *ORE;
|
2016-03-11 09:50:55 +01:00
|
|
|
|
|
|
|
ValueTable VN;
|
|
|
|
|
|
|
|
/// A mapping from value numbers to lists of Value*'s that
|
|
|
|
/// have that value number. Use findLeader to query it.
|
|
|
|
struct LeaderTableEntry {
|
|
|
|
Value *Val;
|
|
|
|
const BasicBlock *BB;
|
|
|
|
LeaderTableEntry *Next;
|
|
|
|
};
|
|
|
|
DenseMap<uint32_t, LeaderTableEntry> LeaderTable;
|
|
|
|
BumpPtrAllocator TableAllocator;
|
|
|
|
|
|
|
|
// Block-local map of equivalent values to their leader, does not
|
|
|
|
// propagate to any successors. Entries added mid-block are applied
|
|
|
|
// to the remaining instructions in the block.
|
|
|
|
SmallMapVector<llvm::Value *, llvm::Constant *, 4> ReplaceWithConstMap;
|
|
|
|
SmallVector<Instruction *, 8> InstrsToErase;
|
|
|
|
|
[GVN] Recommit the patch "Add phi-translate support in scalarpre"
Recommit after workaround the bug PR31652.
Three bugs fixed in previous recommits: The first one is to use CurrentBlock
instead of PREInstr's Parent as param of performScalarPREInsertion because
the Parent of a clone instruction may be uninitialized. The second one is stop
PRE when CurrentBlock to its predecessor is a backedge and an operand of CurInst
is defined inside of CurrentBlock. The same value defined inside of loop in last
iteration can not be regarded as available. The third one is an out-of-bound
array access in a flipped if guard.
Right now scalarpre doesn't have phi-translate support, so it will miss some
simple pre opportunities. Like the following testcase, current scalarpre cannot
recognize the last "a * b" is fully redundent because a and b used by the last
"a * b" expr are both defined by phis.
long a[100], b[100], g1, g2, g3;
__attribute__((pure)) long goo();
void foo(long a, long b, long c, long d) {
g1 = a * b;
if (__builtin_expect(g2 > 3, 0)) {
a = c;
b = d;
g2 = a * b;
}
g3 = a * b; // fully redundant.
}
The patch adds phi-translate support in scalarpre. This is only a temporary
solution before the newpre based on newgvn is available.
Differential Revision: https://reviews.llvm.org/D32252
llvm-svn: 309397
2017-07-28 17:47:25 +02:00
|
|
|
// Map the block to reversed postorder traversal number. It is used to
|
|
|
|
// find back edge easily.
|
|
|
|
DenseMap<const BasicBlock *, uint32_t> BlockRPONumber;
|
|
|
|
|
2016-03-11 09:50:55 +01:00
|
|
|
typedef SmallVector<NonLocalDepResult, 64> LoadDepVect;
|
|
|
|
typedef SmallVector<gvn::AvailableValueInBlock, 64> AvailValInBlkVect;
|
|
|
|
typedef SmallVector<BasicBlock *, 64> UnavailBlkVect;
|
|
|
|
|
2016-12-19 09:22:17 +01:00
|
|
|
bool runImpl(Function &F, AssumptionCache &RunAC, DominatorTree &RunDT,
|
2016-03-11 09:50:55 +01:00
|
|
|
const TargetLibraryInfo &RunTLI, AAResults &RunAA,
|
2016-12-01 17:40:32 +01:00
|
|
|
MemoryDependenceResults *RunMD, LoopInfo *LI,
|
|
|
|
OptimizationRemarkEmitter *ORE);
|
2016-03-11 09:50:55 +01:00
|
|
|
|
|
|
|
/// Push a new Value to the LeaderTable onto the list for its value number.
|
|
|
|
void addToLeaderTable(uint32_t N, Value *V, const BasicBlock *BB) {
|
|
|
|
LeaderTableEntry &Curr = LeaderTable[N];
|
|
|
|
if (!Curr.Val) {
|
|
|
|
Curr.Val = V;
|
|
|
|
Curr.BB = BB;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
LeaderTableEntry *Node = TableAllocator.Allocate<LeaderTableEntry>();
|
|
|
|
Node->Val = V;
|
|
|
|
Node->BB = BB;
|
|
|
|
Node->Next = Curr.Next;
|
|
|
|
Curr.Next = Node;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Scan the list of values corresponding to a given
|
|
|
|
/// value number, and remove the given instruction if encountered.
|
|
|
|
void removeFromLeaderTable(uint32_t N, Instruction *I, BasicBlock *BB) {
|
|
|
|
LeaderTableEntry *Prev = nullptr;
|
|
|
|
LeaderTableEntry *Curr = &LeaderTable[N];
|
|
|
|
|
|
|
|
while (Curr && (Curr->Val != I || Curr->BB != BB)) {
|
|
|
|
Prev = Curr;
|
|
|
|
Curr = Curr->Next;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!Curr)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (Prev) {
|
|
|
|
Prev->Next = Curr->Next;
|
|
|
|
} else {
|
|
|
|
if (!Curr->Next) {
|
|
|
|
Curr->Val = nullptr;
|
|
|
|
Curr->BB = nullptr;
|
|
|
|
} else {
|
|
|
|
LeaderTableEntry *Next = Curr->Next;
|
|
|
|
Curr->Val = Next->Val;
|
|
|
|
Curr->BB = Next->BB;
|
|
|
|
Curr->Next = Next->Next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// List of critical edges to be split between iterations.
|
|
|
|
SmallVector<std::pair<TerminatorInst *, unsigned>, 4> toSplit;
|
|
|
|
|
|
|
|
// Helper functions of redundant load elimination
|
|
|
|
bool processLoad(LoadInst *L);
|
|
|
|
bool processNonLocalLoad(LoadInst *L);
|
|
|
|
bool processAssumeIntrinsic(IntrinsicInst *II);
|
|
|
|
/// Given a local dependency (Def or Clobber) determine if a value is
|
|
|
|
/// available for the load. Returns true if an value is known to be
|
|
|
|
/// available and populates Res. Returns false otherwise.
|
|
|
|
bool AnalyzeLoadAvailability(LoadInst *LI, MemDepResult DepInfo,
|
|
|
|
Value *Address, gvn::AvailableValue &Res);
|
|
|
|
/// Given a list of non-local dependencies, determine if a value is
|
|
|
|
/// available for the load in each specified block. If it is, add it to
|
|
|
|
/// ValuesPerBlock. If not, add it to UnavailableBlocks.
|
|
|
|
void AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
|
|
|
|
AvailValInBlkVect &ValuesPerBlock,
|
|
|
|
UnavailBlkVect &UnavailableBlocks);
|
|
|
|
bool PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
|
|
|
|
UnavailBlkVect &UnavailableBlocks);
|
|
|
|
|
|
|
|
// Other helper routines
|
|
|
|
bool processInstruction(Instruction *I);
|
|
|
|
bool processBlock(BasicBlock *BB);
|
2017-06-22 00:19:17 +02:00
|
|
|
void dump(DenseMap<uint32_t, Value *> &d) const;
|
2016-03-11 09:50:55 +01:00
|
|
|
bool iterateOnFunction(Function &F);
|
|
|
|
bool performPRE(Function &F);
|
|
|
|
bool performScalarPRE(Instruction *I);
|
|
|
|
bool performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred,
|
[GVN] Recommit the patch "Add phi-translate support in scalarpre"
Recommit after workaround the bug PR31652.
Three bugs fixed in previous recommits: The first one is to use CurrentBlock
instead of PREInstr's Parent as param of performScalarPREInsertion because
the Parent of a clone instruction may be uninitialized. The second one is stop
PRE when CurrentBlock to its predecessor is a backedge and an operand of CurInst
is defined inside of CurrentBlock. The same value defined inside of loop in last
iteration can not be regarded as available. The third one is an out-of-bound
array access in a flipped if guard.
Right now scalarpre doesn't have phi-translate support, so it will miss some
simple pre opportunities. Like the following testcase, current scalarpre cannot
recognize the last "a * b" is fully redundent because a and b used by the last
"a * b" expr are both defined by phis.
long a[100], b[100], g1, g2, g3;
__attribute__((pure)) long goo();
void foo(long a, long b, long c, long d) {
g1 = a * b;
if (__builtin_expect(g2 > 3, 0)) {
a = c;
b = d;
g2 = a * b;
}
g3 = a * b; // fully redundant.
}
The patch adds phi-translate support in scalarpre. This is only a temporary
solution before the newpre based on newgvn is available.
Differential Revision: https://reviews.llvm.org/D32252
llvm-svn: 309397
2017-07-28 17:47:25 +02:00
|
|
|
BasicBlock *Curr, unsigned int ValNo);
|
2016-03-11 09:50:55 +01:00
|
|
|
Value *findLeader(const BasicBlock *BB, uint32_t num);
|
|
|
|
void cleanupGlobalSets();
|
|
|
|
void verifyRemoved(const Instruction *I) const;
|
|
|
|
bool splitCriticalEdges();
|
|
|
|
BasicBlock *splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ);
|
|
|
|
bool replaceOperandsWithConsts(Instruction *I) const;
|
|
|
|
bool propagateEquality(Value *LHS, Value *RHS, const BasicBlockEdge &Root,
|
|
|
|
bool DominatesByEdge);
|
|
|
|
bool processFoldableCondBr(BranchInst *BI);
|
|
|
|
void addDeadBlock(BasicBlock *BB);
|
|
|
|
void assignValNumForDeadCode();
|
[GVN] Recommit the patch "Add phi-translate support in scalarpre"
Recommit after workaround the bug PR31652.
Three bugs fixed in previous recommits: The first one is to use CurrentBlock
instead of PREInstr's Parent as param of performScalarPREInsertion because
the Parent of a clone instruction may be uninitialized. The second one is stop
PRE when CurrentBlock to its predecessor is a backedge and an operand of CurInst
is defined inside of CurrentBlock. The same value defined inside of loop in last
iteration can not be regarded as available. The third one is an out-of-bound
array access in a flipped if guard.
Right now scalarpre doesn't have phi-translate support, so it will miss some
simple pre opportunities. Like the following testcase, current scalarpre cannot
recognize the last "a * b" is fully redundent because a and b used by the last
"a * b" expr are both defined by phis.
long a[100], b[100], g1, g2, g3;
__attribute__((pure)) long goo();
void foo(long a, long b, long c, long d) {
g1 = a * b;
if (__builtin_expect(g2 > 3, 0)) {
a = c;
b = d;
g2 = a * b;
}
g3 = a * b; // fully redundant.
}
The patch adds phi-translate support in scalarpre. This is only a temporary
solution before the newpre based on newgvn is available.
Differential Revision: https://reviews.llvm.org/D32252
llvm-svn: 309397
2017-07-28 17:47:25 +02:00
|
|
|
void assignBlockRPONumber(Function &F);
|
2016-03-11 09:50:55 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
/// Create a legacy GVN pass. This also allows parameterizing whether or not
|
|
|
|
/// loads are eliminated by the pass.
|
|
|
|
FunctionPass *createGVNPass(bool NoLoads = false);
|
|
|
|
|
2016-07-15 15:45:20 +02:00
|
|
|
/// \brief A simple and fast domtree-based GVN pass to hoist common expressions
|
|
|
|
/// from sibling branches.
|
|
|
|
struct GVNHoistPass : PassInfoMixin<GVNHoistPass> {
|
|
|
|
/// \brief Run the pass over the function.
|
2016-08-09 02:28:15 +02:00
|
|
|
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
|
2016-07-15 15:45:20 +02:00
|
|
|
};
|
[GVNSink] GVNSink pass
This patch provides an initial prototype for a pass that sinks instructions based on GVN information, similar to GVNHoist. It is not yet ready for commiting but I've uploaded it to gather some initial thoughts.
This pass attempts to sink instructions into successors, reducing static
instruction count and enabling if-conversion.
We use a variant of global value numbering to decide what can be sunk.
Consider:
[ %a1 = add i32 %b, 1 ] [ %c1 = add i32 %d, 1 ]
[ %a2 = xor i32 %a1, 1 ] [ %c2 = xor i32 %c1, 1 ]
\ /
[ %e = phi i32 %a2, %c2 ]
[ add i32 %e, 4 ]
GVN would number %a1 and %c1 differently because they compute different
results - the VN of an instruction is a function of its opcode and the
transitive closure of its operands. This is the key property for hoisting
and CSE.
What we want when sinking however is for a numbering that is a function of
the *uses* of an instruction, which allows us to answer the question "if I
replace %a1 with %c1, will it contribute in an equivalent way to all
successive instructions?". The (new) PostValueTable class in GVN provides this
mapping.
This pass has some shown really impressive improvements especially for codesize already on internal benchmarks, so I have high hopes it can replace all the sinking logic in SimplifyCFG.
Differential revision: https://reviews.llvm.org/D24805
llvm-svn: 303850
2017-05-25 14:51:11 +02:00
|
|
|
/// \brief Uses an "inverted" value numbering to decide the similarity of
|
|
|
|
/// expressions and sinks similar expressions into successors.
|
|
|
|
struct GVNSinkPass : PassInfoMixin<GVNSinkPass> {
|
|
|
|
/// \brief Run the pass over the function.
|
|
|
|
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
|
|
|
|
};
|
2016-03-11 09:50:55 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|