1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 10:42:39 +01:00

DataLayout is mandatory, update the API to reflect it with references.

Summary:
Now that the DataLayout is a mandatory part of the module, let's start
cleaning the codebase. This patch is a first attempt at doing that.

This patch is not exactly NFC as for instance some places were passing
a nullptr instead of the DataLayout, possibly just because there was a
default value on the DataLayout argument to many functions in the API.
Even though it is not purely NFC, there is no change in the
validation.

I turned as many pointer to DataLayout to references, this helped
figuring out all the places where a nullptr could come up.

I had initially a local version of this patch broken into over 30
independant, commits but some later commit were cleaning the API and
touching part of the code modified in the previous commits, so it
seemed cleaner without the intermediate state.

Test Plan:

Reviewers: echristo

Subscribers: llvm-commits

From: Mehdi Amini <mehdi.amini@apple.com>
llvm-svn: 231740
This commit is contained in:
Mehdi Amini 2015-03-10 02:37:25 +00:00
parent 1c5044f9e7
commit f88efe5f8a
138 changed files with 2479 additions and 2877 deletions

View File

@ -84,11 +84,6 @@ public:
/// know the sizes of the potential memory references.
static uint64_t const UnknownSize = ~UINT64_C(0);
/// getDataLayout - Return a pointer to the current DataLayout object, or
/// null if no DataLayout object is available.
///
const DataLayout *getDataLayout() const { return DL; }
/// getTargetLibraryInfo - Return a pointer to the current TargetLibraryInfo
/// object, or null if no TargetLibraryInfo object is available.
///

View File

@ -36,16 +36,15 @@ namespace llvm {
/// Note that this fails if not all of the operands are constant. Otherwise,
/// this function can only fail when attempting to fold instructions like loads
/// and stores, which have no constant expression form.
Constant *ConstantFoldInstruction(Instruction *I,
const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI = nullptr);
Constant *ConstantFoldInstruction(Instruction *I, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr);
/// ConstantFoldConstantExpression - Attempt to fold the constant expression
/// using the specified DataLayout. If successful, the constant result is
/// result is returned, if not, null is returned.
Constant *ConstantFoldConstantExpression(const ConstantExpr *CE,
const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI =nullptr);
Constant *
ConstantFoldConstantExpression(const ConstantExpr *CE, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr);
/// ConstantFoldInstOperands - Attempt to constant fold an instruction with the
/// specified operands. If successful, the constant result is returned, if not,
@ -53,19 +52,19 @@ Constant *ConstantFoldConstantExpression(const ConstantExpr *CE,
/// fold instructions like loads and stores, which have no constant expression
/// form.
///
Constant *ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
ArrayRef<Constant *> Ops,
const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI = nullptr);
Constant *ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
ArrayRef<Constant *> Ops,
const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr);
/// ConstantFoldCompareInstOperands - Attempt to constant fold a compare
/// instruction (icmp/fcmp) with the specified operands. If it fails, it
/// returns a constant expression of the specified operands.
///
Constant *ConstantFoldCompareInstOperands(unsigned Predicate,
Constant *LHS, Constant *RHS,
const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI=nullptr);
Constant *
ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS,
Constant *RHS, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr);
/// ConstantFoldInsertValueInstruction - Attempt to constant fold an insertvalue
/// instruction with the specified operands and indices. The constant result is
@ -76,8 +75,7 @@ Constant *ConstantFoldInsertValueInstruction(Constant *Agg, Constant *Val,
/// ConstantFoldLoadFromConstPtr - Return the value that a load from C would
/// produce if it is constant and determinable. If this is not determinable,
/// return null.
Constant *ConstantFoldLoadFromConstPtr(Constant *C,
const DataLayout *TD = nullptr);
Constant *ConstantFoldLoadFromConstPtr(Constant *C, const DataLayout &DL);
/// ConstantFoldLoadThroughGEPConstantExpr - Given a constant and a
/// getelementptr constantexpr, return the constant value being addressed by the

View File

@ -122,7 +122,6 @@ class IVUsers : public LoopPass {
LoopInfo *LI;
DominatorTree *DT;
ScalarEvolution *SE;
const DataLayout *DL;
SmallPtrSet<Instruction*,16> Processed;
/// IVUses - A list of all tracked IV uses of induction variable expressions

View File

@ -49,7 +49,7 @@ namespace llvm {
/// SimplifyAddInst - Given operands for an Add, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyAddInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
const DataLayout *TD = nullptr,
const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
@ -58,7 +58,7 @@ namespace llvm {
/// SimplifySubInst - Given operands for a Sub, see if we can
/// fold the result. If not, this returns null.
Value *SimplifySubInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
const DataLayout *TD = nullptr,
const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
@ -67,7 +67,7 @@ namespace llvm {
/// Given operands for an FAdd, see if we can fold the result. If not, this
/// returns null.
Value *SimplifyFAddInst(Value *LHS, Value *RHS, FastMathFlags FMF,
const DataLayout *TD = nullptr,
const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
@ -76,7 +76,7 @@ namespace llvm {
/// Given operands for an FSub, see if we can fold the result. If not, this
/// returns null.
Value *SimplifyFSubInst(Value *LHS, Value *RHS, FastMathFlags FMF,
const DataLayout *TD = nullptr,
const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
@ -85,7 +85,7 @@ namespace llvm {
/// Given operands for an FMul, see if we can fold the result. If not, this
/// returns null.
Value *SimplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF,
const DataLayout *TD = nullptr,
const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
@ -93,7 +93,7 @@ namespace llvm {
/// SimplifyMulInst - Given operands for a Mul, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyMulInst(Value *LHS, Value *RHS, const DataLayout *TD = nullptr,
Value *SimplifyMulInst(Value *LHS, Value *RHS, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
@ -101,8 +101,7 @@ namespace llvm {
/// SimplifySDivInst - Given operands for an SDiv, see if we can
/// fold the result. If not, this returns null.
Value *SimplifySDivInst(Value *LHS, Value *RHS,
const DataLayout *TD = nullptr,
Value *SimplifySDivInst(Value *LHS, Value *RHS, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
@ -110,8 +109,7 @@ namespace llvm {
/// SimplifyUDivInst - Given operands for a UDiv, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyUDivInst(Value *LHS, Value *RHS,
const DataLayout *TD = nullptr,
Value *SimplifyUDivInst(Value *LHS, Value *RHS, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
@ -120,7 +118,7 @@ namespace llvm {
/// SimplifyFDivInst - Given operands for an FDiv, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyFDivInst(Value *LHS, Value *RHS, FastMathFlags FMF,
const DataLayout *TD = nullptr,
const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
@ -128,8 +126,7 @@ namespace llvm {
/// SimplifySRemInst - Given operands for an SRem, see if we can
/// fold the result. If not, this returns null.
Value *SimplifySRemInst(Value *LHS, Value *RHS,
const DataLayout *TD = nullptr,
Value *SimplifySRemInst(Value *LHS, Value *RHS, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
@ -137,8 +134,7 @@ namespace llvm {
/// SimplifyURemInst - Given operands for a URem, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyURemInst(Value *LHS, Value *RHS,
const DataLayout *TD = nullptr,
Value *SimplifyURemInst(Value *LHS, Value *RHS, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
@ -147,7 +143,7 @@ namespace llvm {
/// SimplifyFRemInst - Given operands for an FRem, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyFRemInst(Value *LHS, Value *RHS, FastMathFlags FMF,
const DataLayout *TD = nullptr,
const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
@ -156,7 +152,7 @@ namespace llvm {
/// SimplifyShlInst - Given operands for a Shl, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
const DataLayout *TD = nullptr,
const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
@ -165,7 +161,7 @@ namespace llvm {
/// SimplifyLShrInst - Given operands for a LShr, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
const DataLayout *TD = nullptr,
const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
@ -174,7 +170,7 @@ namespace llvm {
/// SimplifyAShrInst - Given operands for a AShr, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
const DataLayout *TD = nullptr,
const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
@ -182,7 +178,7 @@ namespace llvm {
/// SimplifyAndInst - Given operands for an And, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyAndInst(Value *LHS, Value *RHS, const DataLayout *TD = nullptr,
Value *SimplifyAndInst(Value *LHS, Value *RHS, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
@ -190,7 +186,7 @@ namespace llvm {
/// SimplifyOrInst - Given operands for an Or, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyOrInst(Value *LHS, Value *RHS, const DataLayout *TD = nullptr,
Value *SimplifyOrInst(Value *LHS, Value *RHS, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
@ -198,7 +194,7 @@ namespace llvm {
/// SimplifyXorInst - Given operands for a Xor, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyXorInst(Value *LHS, Value *RHS, const DataLayout *TD = nullptr,
Value *SimplifyXorInst(Value *LHS, Value *RHS, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
@ -207,7 +203,7 @@ namespace llvm {
/// SimplifyICmpInst - Given operands for an ICmpInst, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
const DataLayout *TD = nullptr,
const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
@ -216,7 +212,7 @@ namespace llvm {
/// SimplifyFCmpInst - Given operands for an FCmpInst, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
const DataLayout *TD = nullptr,
const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
@ -225,7 +221,7 @@ namespace llvm {
/// SimplifySelectInst - Given operands for a SelectInst, see if we can fold
/// the result. If not, this returns null.
Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
const DataLayout *TD = nullptr,
const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
@ -233,7 +229,7 @@ namespace llvm {
/// SimplifyGEPInst - Given operands for an GetElementPtrInst, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyGEPInst(ArrayRef<Value *> Ops, const DataLayout *TD = nullptr,
Value *SimplifyGEPInst(ArrayRef<Value *> Ops, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
@ -242,8 +238,7 @@ namespace llvm {
/// SimplifyInsertValueInst - Given operands for an InsertValueInst, see if we
/// can fold the result. If not, this returns null.
Value *SimplifyInsertValueInst(Value *Agg, Value *Val,
ArrayRef<unsigned> Idxs,
const DataLayout *TD = nullptr,
ArrayRef<unsigned> Idxs, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
@ -251,7 +246,7 @@ namespace llvm {
/// SimplifyTruncInst - Given operands for an TruncInst, see if we can fold
/// the result. If not, this returns null.
Value *SimplifyTruncInst(Value *Op, Type *Ty, const DataLayout *TD = nullptr,
Value *SimplifyTruncInst(Value *Op, Type *Ty, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
@ -263,7 +258,7 @@ namespace llvm {
/// SimplifyCmpInst - Given operands for a CmpInst, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
const DataLayout *TD = nullptr,
const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
@ -272,7 +267,7 @@ namespace llvm {
/// SimplifyBinOp - Given operands for a BinaryOperator, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
const DataLayout *TD = nullptr,
const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
@ -282,8 +277,7 @@ namespace llvm {
/// In contrast to SimplifyBinOp, try to use FastMathFlag when folding the
/// result. In case we don't need FastMathFlags, simply fall to SimplifyBinOp.
Value *SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS,
const FastMathFlags &FMF,
const DataLayout *TD = nullptr,
const FastMathFlags &FMF, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
@ -294,7 +288,7 @@ namespace llvm {
///
/// If this call could not be simplified returns null.
Value *SimplifyCall(Value *V, User::op_iterator ArgBegin,
User::op_iterator ArgEnd, const DataLayout *TD = nullptr,
User::op_iterator ArgEnd, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
@ -304,8 +298,7 @@ namespace llvm {
/// result.
///
/// If this call could not be simplified returns null.
Value *SimplifyCall(Value *V, ArrayRef<Value *> Args,
const DataLayout *TD = nullptr,
Value *SimplifyCall(Value *V, ArrayRef<Value *> Args, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
@ -313,7 +306,7 @@ namespace llvm {
/// SimplifyInstruction - See if we can compute a simplified version of this
/// instruction. If not, this returns null.
Value *SimplifyInstruction(Instruction *I, const DataLayout *TD = nullptr,
Value *SimplifyInstruction(Instruction *I, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr);
@ -327,7 +320,6 @@ namespace llvm {
///
/// The function returns true if any simplifications were performed.
bool replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr);
@ -339,7 +331,6 @@ namespace llvm {
/// of the users impacted. It returns true if any simplifications were
/// performed.
bool recursivelySimplifyInstruction(Instruction *I,
const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr);

View File

@ -29,7 +29,6 @@ namespace llvm {
/// This pass computes, caches, and vends lazy value constraint information.
class LazyValueInfo : public FunctionPass {
AssumptionCache *AC;
const DataLayout *DL;
class TargetLibraryInfo *TLI;
DominatorTree *DT;
void *PImpl;

View File

@ -27,8 +27,7 @@ class MDNode;
/// specified pointer, we do a quick local scan of the basic block containing
/// ScanFrom, to determine if the address is already accessed.
bool isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
unsigned Align,
const DataLayout *TD = nullptr);
unsigned Align);
/// FindAvailableLoadedValue - Scan the ScanBB block backwards (starting at
/// the instruction before ScanFrom) checking to see if we have the value at

View File

@ -150,7 +150,7 @@ public:
SmallVector<unsigned, 2> AliasSetId;
};
LoopAccessInfo(Loop *L, ScalarEvolution *SE, const DataLayout *DL,
LoopAccessInfo(Loop *L, ScalarEvolution *SE, const DataLayout &DL,
const TargetLibraryInfo *TLI, AliasAnalysis *AA,
DominatorTree *DT, const ValueToValueMap &Strides);
@ -209,7 +209,7 @@ private:
RuntimePointerCheck PtrRtCheck;
Loop *TheLoop;
ScalarEvolution *SE;
const DataLayout *DL;
const DataLayout &DL;
const TargetLibraryInfo *TLI;
AliasAnalysis *AA;
DominatorTree *DT;
@ -280,7 +280,6 @@ private:
// The used analysis passes.
ScalarEvolution *SE;
const DataLayout *DL;
const TargetLibraryInfo *TLI;
AliasAnalysis *AA;
DominatorTree *DT;

View File

@ -101,11 +101,10 @@ Type *getMallocAllocatedType(const CallInst *CI, const TargetLibraryInfo *TLI);
/// then return that multiple. For non-array mallocs, the multiple is
/// constant 1. Otherwise, return NULL for mallocs whose array size cannot be
/// determined.
Value *getMallocArraySize(CallInst *CI, const DataLayout *DL,
Value *getMallocArraySize(CallInst *CI, const DataLayout &DL,
const TargetLibraryInfo *TLI,
bool LookThroughSExt = false);
//===----------------------------------------------------------------------===//
// calloc Call Utility Functions.
//
@ -141,11 +140,9 @@ static inline CallInst *isFreeCall(Value *I, const TargetLibraryInfo *TLI) {
/// underlying object pointed to by Ptr.
/// If RoundToAlign is true, then Size is rounded up to the aligment of allocas,
/// byval arguments, and global variables.
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout *DL,
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL,
const TargetLibraryInfo *TLI, bool RoundToAlign = false);
typedef std::pair<APInt, APInt> SizeOffsetType;
/// \brief Evaluate the size and offset of an object pointed to by a Value*
@ -153,7 +150,7 @@ typedef std::pair<APInt, APInt> SizeOffsetType;
class ObjectSizeOffsetVisitor
: public InstVisitor<ObjectSizeOffsetVisitor, SizeOffsetType> {
const DataLayout *DL;
const DataLayout &DL;
const TargetLibraryInfo *TLI;
bool RoundToAlign;
unsigned IntTyBits;
@ -167,7 +164,7 @@ class ObjectSizeOffsetVisitor
}
public:
ObjectSizeOffsetVisitor(const DataLayout *DL, const TargetLibraryInfo *TLI,
ObjectSizeOffsetVisitor(const DataLayout &DL, const TargetLibraryInfo *TLI,
LLVMContext &Context, bool RoundToAlign = false);
SizeOffsetType compute(Value *V);
@ -216,7 +213,7 @@ class ObjectSizeOffsetEvaluator
typedef DenseMap<const Value*, WeakEvalType> CacheMapTy;
typedef SmallPtrSet<const Value*, 8> PtrSetTy;
const DataLayout *DL;
const DataLayout &DL;
const TargetLibraryInfo *TLI;
LLVMContext &Context;
BuilderTy Builder;
@ -232,7 +229,7 @@ class ObjectSizeOffsetEvaluator
SizeOffsetEvalType compute_(Value *V);
public:
ObjectSizeOffsetEvaluator(const DataLayout *DL, const TargetLibraryInfo *TLI,
ObjectSizeOffsetEvaluator(const DataLayout &DL, const TargetLibraryInfo *TLI,
LLVMContext &Context, bool RoundToAlign = false);
SizeOffsetEvalType compute(Value *V);

View File

@ -29,7 +29,6 @@ namespace llvm {
class CallSite;
class AliasAnalysis;
class AssumptionCache;
class DataLayout;
class MemoryDependenceAnalysis;
class PredIteratorCache;
class DominatorTree;
@ -324,7 +323,6 @@ namespace llvm {
/// Current AA implementation, just a cache.
AliasAnalysis *AA;
const DataLayout *DL;
DominatorTree *DT;
AssumptionCache *AC;
std::unique_ptr<PredIteratorCache> PredCache;
@ -421,8 +419,7 @@ namespace llvm {
static unsigned getLoadLoadClobberFullWidthSize(const Value *MemLocBase,
int64_t MemLocOffs,
unsigned MemLocSize,
const LoadInst *LI,
const DataLayout &DL);
const LoadInst *LI);
private:
MemDepResult getCallSiteDependencyFrom(CallSite C, bool isReadOnlyCall,

View File

@ -36,9 +36,9 @@ namespace llvm {
class PHITransAddr {
/// Addr - The actual address we're analyzing.
Value *Addr;
/// The DataLayout we are playing with if known, otherwise null.
const DataLayout *DL;
/// The DataLayout we are playing with.
const DataLayout &DL;
/// TLI - The target library info if known, otherwise null.
const TargetLibraryInfo *TLI;
@ -49,7 +49,7 @@ class PHITransAddr {
/// InstInputs - The inputs for our symbolic address.
SmallVector<Instruction*, 4> InstInputs;
public:
PHITransAddr(Value *addr, const DataLayout *DL, AssumptionCache *AC)
PHITransAddr(Value *addr, const DataLayout &DL, AssumptionCache *AC)
: Addr(addr), DL(DL), TLI(nullptr), AC(AC) {
// If the address is an instruction, the whole thing is considered an input.
if (Instruction *I = dyn_cast<Instruction>(Addr))

View File

@ -232,10 +232,6 @@ namespace llvm {
///
LoopInfo *LI;
/// The DataLayout information for the target we are targeting.
///
const DataLayout *DL;
/// TLI - The target library information for the target we are targeting.
///
TargetLibraryInfo *TLI;

View File

@ -36,6 +36,7 @@ namespace llvm {
/// memory.
class SCEVExpander : public SCEVVisitor<SCEVExpander, Value*> {
ScalarEvolution &SE;
const DataLayout &DL;
// New instructions receive a name to identifies them with the current pass.
const char* IVName;
@ -91,10 +92,11 @@ namespace llvm {
public:
/// SCEVExpander - Construct a SCEVExpander in "canonical" mode.
explicit SCEVExpander(ScalarEvolution &se, const char *name)
: SE(se), IVName(name), IVIncInsertLoop(nullptr), IVIncInsertPos(nullptr),
CanonicalMode(true), LSRMode(false),
Builder(se.getContext(), TargetFolder(se.DL)) {
explicit SCEVExpander(ScalarEvolution &se, const DataLayout &DL,
const char *name)
: SE(se), DL(DL), IVName(name), IVIncInsertLoop(nullptr),
IVIncInsertPos(nullptr), CanonicalMode(true), LSRMode(false),
Builder(se.getContext(), TargetFolder(DL)) {
#ifndef NDEBUG
DebugType = "";
#endif

View File

@ -30,7 +30,7 @@ class DataLayout;
/// TargetFolder - Create constants with target dependent folding.
class TargetFolder {
const DataLayout *DL;
const DataLayout &DL;
/// Fold - Fold the constant using target specific information.
Constant *Fold(Constant *C) const {
@ -41,7 +41,7 @@ class TargetFolder {
}
public:
explicit TargetFolder(const DataLayout *DL) : DL(DL) {}
explicit TargetFolder(const DataLayout &DL) : DL(DL) {}
//===--------------------------------------------------------------------===//
// Binary Operators

View File

@ -33,12 +33,12 @@ namespace llvm {
/// them in the KnownZero/KnownOne bit sets.
///
/// This function is defined on values with integer type, values with pointer
/// type (but only if TD is non-null), and vectors of integers. In the case
/// type, and vectors of integers. In the case
/// where V is a vector, the known zero and known one values are the
/// same width as the vector element, and the bit is set only if it is true
/// for all of the elements in the vector.
void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
const DataLayout *TD = nullptr, unsigned Depth = 0,
const DataLayout &DL, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
@ -50,7 +50,7 @@ namespace llvm {
/// ComputeSignBit - Determine whether the sign bit is known to be zero or
/// one. Convenience wrapper around computeKnownBits.
void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
const DataLayout *TD = nullptr, unsigned Depth = 0,
const DataLayout &DL, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
@ -60,7 +60,8 @@ namespace llvm {
/// element is known to be a power of two when defined. Supports values with
/// integer or pointer type and vectors of integers. If 'OrZero' is set then
/// returns true if the given value is either a power of two or zero.
bool isKnownToBeAPowerOfTwo(Value *V, bool OrZero = false, unsigned Depth = 0,
bool isKnownToBeAPowerOfTwo(Value *V, const DataLayout &DL,
bool OrZero = false, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
@ -69,8 +70,8 @@ namespace llvm {
/// when defined. For vectors return true if every element is known to be
/// non-zero when defined. Supports values with integer or pointer type and
/// vectors of integers.
bool isKnownNonZero(Value *V, const DataLayout *TD = nullptr,
unsigned Depth = 0, AssumptionCache *AC = nullptr,
bool isKnownNonZero(Value *V, const DataLayout &DL, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
@ -79,13 +80,12 @@ namespace llvm {
/// zero for bits that V cannot have.
///
/// This function is defined on values with integer type, values with pointer
/// type (but only if TD is non-null), and vectors of integers. In the case
/// type, and vectors of integers. In the case
/// where V is a vector, the mask, known zero, and known one values are the
/// same width as the vector element, and the bit is set only if it is true
/// for all of the elements in the vector.
bool MaskedValueIsZero(Value *V, const APInt &Mask,
const DataLayout *TD = nullptr, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
bool MaskedValueIsZero(Value *V, const APInt &Mask, const DataLayout &DL,
unsigned Depth = 0, AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
@ -97,7 +97,7 @@ namespace llvm {
///
/// 'Op' must have a scalar integer type.
///
unsigned ComputeNumSignBits(Value *Op, const DataLayout *TD = nullptr,
unsigned ComputeNumSignBits(Value *Op, const DataLayout &DL,
unsigned Depth = 0, AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
@ -142,11 +142,12 @@ namespace llvm {
/// it can be expressed as a base pointer plus a constant offset. Return the
/// base and offset to the caller.
Value *GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
const DataLayout *TD);
const DataLayout &DL);
static inline const Value *
GetPointerBaseWithConstantOffset(const Value *Ptr, int64_t &Offset,
const DataLayout *TD) {
return GetPointerBaseWithConstantOffset(const_cast<Value*>(Ptr), Offset,TD);
const DataLayout &DL) {
return GetPointerBaseWithConstantOffset(const_cast<Value *>(Ptr), Offset,
DL);
}
/// getConstantStringInfo - This function computes the length of a
@ -167,21 +168,19 @@ namespace llvm {
/// being addressed. Note that the returned value has pointer type if the
/// specified value does. If the MaxLookup value is non-zero, it limits the
/// number of instructions to be stripped off.
Value *GetUnderlyingObject(Value *V, const DataLayout *TD = nullptr,
Value *GetUnderlyingObject(Value *V, const DataLayout &DL,
unsigned MaxLookup = 6);
static inline const Value *
GetUnderlyingObject(const Value *V, const DataLayout *TD = nullptr,
unsigned MaxLookup = 6) {
return GetUnderlyingObject(const_cast<Value *>(V), TD, MaxLookup);
static inline const Value *GetUnderlyingObject(const Value *V,
const DataLayout &DL,
unsigned MaxLookup = 6) {
return GetUnderlyingObject(const_cast<Value *>(V), DL, MaxLookup);
}
/// GetUnderlyingObjects - This method is similar to GetUnderlyingObject
/// except that it can look through phi and select instructions and return
/// multiple objects.
void GetUnderlyingObjects(Value *V,
SmallVectorImpl<Value *> &Objects,
const DataLayout *TD = nullptr,
unsigned MaxLookup = 6);
void GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects,
const DataLayout &DL, unsigned MaxLookup = 6);
/// onlyUsedByLifetimeMarkers - Return true if the only users of this pointer
/// are lifetime markers.
@ -205,8 +204,7 @@ namespace llvm {
/// the correct dominance relationships for the operands and users hold.
/// However, this method can return true for instructions that read memory;
/// for such instructions, moving them may change the resulting value.
bool isSafeToSpeculativelyExecute(const Value *V,
const DataLayout *TD = nullptr);
bool isSafeToSpeculativelyExecute(const Value *V);
/// isKnownNonNull - Return true if this pointer couldn't possibly be null by
/// its definition. This returns true for allocas, non-extern-weak globals
@ -217,17 +215,16 @@ namespace llvm {
/// assume intrinsic, I, at the point in the control-flow identified by the
/// context instruction, CxtI.
bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI,
const DataLayout *DL = nullptr,
const DominatorTree *DT = nullptr);
enum class OverflowResult { AlwaysOverflows, MayOverflow, NeverOverflows };
OverflowResult computeOverflowForUnsignedMul(Value *LHS, Value *RHS,
const DataLayout *DL,
const DataLayout &DL,
AssumptionCache *AC,
const Instruction *CxtI,
const DominatorTree *DT);
OverflowResult computeOverflowForUnsignedAdd(Value *LHS, Value *RHS,
const DataLayout *DL,
const DataLayout &DL,
AssumptionCache *AC,
const Instruction *CxtI,
const DominatorTree *DT);

View File

@ -351,8 +351,8 @@ public:
}
/// \brief Fetch the type representing a pointer to an integer value.
IntegerType* getIntPtrTy(const DataLayout *DL, unsigned AddrSpace = 0) {
return DL->getIntPtrType(Context, AddrSpace);
IntegerType *getIntPtrTy(const DataLayout &DL, unsigned AddrSpace = 0) {
return DL.getIntPtrType(Context, AddrSpace);
}
//===--------------------------------------------------------------------===//
@ -1595,7 +1595,7 @@ public:
"trying to create an alignment assumption on a non-pointer?");
PointerType *PtrTy = cast<PointerType>(PtrValue->getType());
Type *IntPtrTy = getIntPtrTy(&DL, PtrTy->getAddressSpace());
Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace());
Value *PtrIntValue = CreatePtrToInt(PtrValue, IntPtrTy, "ptrint");
Value *Mask = ConstantInt::get(IntPtrTy,

View File

@ -570,10 +570,9 @@ public:
/// This ensures that any pointer<->integer cast has enough bits in the
/// integer and any other cast is a bitcast.
static bool isBitOrNoopPointerCastable(
Type *SrcTy, ///< The Type from which the value should be cast.
Type *DestTy, ///< The Type to which the value should be cast.
const DataLayout *Layout = 0 ///< Optional DataLayout.
);
Type *SrcTy, ///< The Type from which the value should be cast.
Type *DestTy, ///< The Type to which the value should be cast.
const DataLayout &DL);
/// Returns the opcode necessary to cast Val into Ty using usual casting
/// rules.
@ -621,9 +620,9 @@ public:
) const;
/// @brief Determine if this cast is a no-op cast.
bool isNoopCast(
const DataLayout *DL ///< DataLayout to get the Int Ptr type from.
) const;
///
/// \param DL is the DataLayout to get the Int Ptr type from.
bool isNoopCast(const DataLayout &DL) const;
/// Determine how a pair of casts can be eliminated, if they can be at all.
/// This is a helper function for both CastInst and ConstantExpr.

View File

@ -450,7 +450,7 @@ public:
///
/// Test if this value is always a pointer to allocated and suitably aligned
/// memory for a simple load or store.
bool isDereferenceablePointer(const DataLayout *DL = nullptr) const;
bool isDereferenceablePointer(const DataLayout &DL) const;
/// \brief Translate PHI node to its predecessor from the given basic block.
///

View File

@ -160,7 +160,7 @@ protected:
public:
const TargetMachine &getTargetMachine() const { return TM; }
const DataLayout *getDataLayout() const { return DL; }
const DataLayout *getDataLayout() const { return TM.getDataLayout(); }
bool isBigEndian() const { return !IsLittleEndian; }
bool isLittleEndian() const { return IsLittleEndian; }
@ -1639,7 +1639,6 @@ public:
private:
const TargetMachine &TM;
const DataLayout *DL;
/// True if this is a little endian target.
bool IsLittleEndian;

View File

@ -54,7 +54,7 @@ struct BitSetInfo {
bool containsGlobalOffset(uint64_t Offset) const;
bool containsValue(const DataLayout *DL,
bool containsValue(const DataLayout &DL,
const DenseMap<GlobalVariable *, uint64_t> &GlobalLayout,
Value *V, uint64_t COffset = 0) const;
};

View File

@ -28,52 +28,50 @@ namespace llvm {
/// EmitStrLen - Emit a call to the strlen function to the builder, for the
/// specified pointer. Ptr is required to be some pointer type, and the
/// return value has 'intptr_t' type.
Value *EmitStrLen(Value *Ptr, IRBuilder<> &B, const DataLayout *TD,
Value *EmitStrLen(Value *Ptr, IRBuilder<> &B, const DataLayout &DL,
const TargetLibraryInfo *TLI);
/// EmitStrNLen - Emit a call to the strnlen function to the builder, for the
/// specified pointer. Ptr is required to be some pointer type, MaxLen must
/// be of size_t type, and the return value has 'intptr_t' type.
Value *EmitStrNLen(Value *Ptr, Value *MaxLen, IRBuilder<> &B,
const DataLayout *TD, const TargetLibraryInfo *TLI);
const DataLayout &DL, const TargetLibraryInfo *TLI);
/// EmitStrChr - Emit a call to the strchr function to the builder, for the
/// specified pointer and character. Ptr is required to be some pointer type,
/// and the return value has 'i8*' type.
Value *EmitStrChr(Value *Ptr, char C, IRBuilder<> &B, const DataLayout *TD,
Value *EmitStrChr(Value *Ptr, char C, IRBuilder<> &B,
const TargetLibraryInfo *TLI);
/// EmitStrNCmp - Emit a call to the strncmp function to the builder.
Value *EmitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B,
const DataLayout *TD, const TargetLibraryInfo *TLI);
const DataLayout &DL, const TargetLibraryInfo *TLI);
/// EmitStrCpy - Emit a call to the strcpy function to the builder, for the
/// specified pointer arguments.
Value *EmitStrCpy(Value *Dst, Value *Src, IRBuilder<> &B,
const DataLayout *TD, const TargetLibraryInfo *TLI,
StringRef Name = "strcpy");
const TargetLibraryInfo *TLI, StringRef Name = "strcpy");
/// EmitStrNCpy - Emit a call to the strncpy function to the builder, for the
/// specified pointer arguments and length.
Value *EmitStrNCpy(Value *Dst, Value *Src, Value *Len, IRBuilder<> &B,
const DataLayout *TD, const TargetLibraryInfo *TLI,
StringRef Name = "strncpy");
const TargetLibraryInfo *TLI, StringRef Name = "strncpy");
/// EmitMemCpyChk - Emit a call to the __memcpy_chk function to the builder.
/// This expects that the Len and ObjSize have type 'intptr_t' and Dst/Src
/// are pointers.
Value *EmitMemCpyChk(Value *Dst, Value *Src, Value *Len, Value *ObjSize,
IRBuilder<> &B, const DataLayout *TD,
IRBuilder<> &B, const DataLayout &DL,
const TargetLibraryInfo *TLI);
/// EmitMemChr - Emit a call to the memchr function. This assumes that Ptr is
/// a pointer, Val is an i32 value, and Len is an 'intptr_t' value.
Value *EmitMemChr(Value *Ptr, Value *Val, Value *Len, IRBuilder<> &B,
const DataLayout *TD, const TargetLibraryInfo *TLI);
const DataLayout &DL, const TargetLibraryInfo *TLI);
/// EmitMemCmp - Emit a call to the memcmp function.
Value *EmitMemCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B,
const DataLayout *TD, const TargetLibraryInfo *TLI);
const DataLayout &DL, const TargetLibraryInfo *TLI);
/// EmitUnaryFloatFnCall - Emit a call to the unary function named 'Name'
/// (e.g. 'floor'). This function is known to take a single of type matching
@ -93,28 +91,26 @@ namespace llvm {
/// EmitPutChar - Emit a call to the putchar function. This assumes that Char
/// is an integer.
Value *EmitPutChar(Value *Char, IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI);
Value *EmitPutChar(Value *Char, IRBuilder<> &B, const TargetLibraryInfo *TLI);
/// EmitPutS - Emit a call to the puts function. This assumes that Str is
/// some pointer.
Value *EmitPutS(Value *Str, IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI);
Value *EmitPutS(Value *Str, IRBuilder<> &B, const TargetLibraryInfo *TLI);
/// EmitFPutC - Emit a call to the fputc function. This assumes that Char is
/// an i32, and File is a pointer to FILE.
Value *EmitFPutC(Value *Char, Value *File, IRBuilder<> &B,
const DataLayout *TD, const TargetLibraryInfo *TLI);
const TargetLibraryInfo *TLI);
/// EmitFPutS - Emit a call to the puts function. Str is required to be a
/// pointer and File is a pointer to FILE.
Value *EmitFPutS(Value *Str, Value *File, IRBuilder<> &B, const DataLayout *TD,
Value *EmitFPutS(Value *Str, Value *File, IRBuilder<> &B,
const TargetLibraryInfo *TLI);
/// EmitFWrite - Emit a call to the fwrite function. This assumes that Ptr is
/// a pointer, Size is an 'intptr_t', and File is a pointer to FILE.
Value *EmitFWrite(Value *Ptr, Value *Size, Value *File, IRBuilder<> &B,
const DataLayout *TD, const TargetLibraryInfo *TLI);
const DataLayout &DL, const TargetLibraryInfo *TLI);
}
#endif

View File

@ -164,7 +164,6 @@ void CloneAndPruneIntoFromInst(Function *NewFunc, const Function *OldFunc,
SmallVectorImpl<ReturnInst*> &Returns,
const char *NameSuffix = "",
ClonedCodeInfo *CodeInfo = nullptr,
const DataLayout *DL = nullptr,
CloningDirector *Director = nullptr);
@ -184,7 +183,6 @@ void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
SmallVectorImpl<ReturnInst*> &Returns,
const char *NameSuffix = "",
ClonedCodeInfo *CodeInfo = nullptr,
const DataLayout *DL = nullptr,
Instruction *TheCall = nullptr);
/// InlineFunctionInfo - This class captures the data input to the

View File

@ -88,7 +88,7 @@ bool RecursivelyDeleteDeadPHINode(PHINode *PN,
///
/// This returns true if it changed the code, note that it can delete
/// instructions in other blocks as well in this block.
bool SimplifyInstructionsInBlock(BasicBlock *BB, const DataLayout *TD = nullptr,
bool SimplifyInstructionsInBlock(BasicBlock *BB,
const TargetLibraryInfo *TLI = nullptr);
//===----------------------------------------------------------------------===//
@ -106,8 +106,7 @@ bool SimplifyInstructionsInBlock(BasicBlock *BB, const DataLayout *TD = nullptr,
///
/// .. and delete the predecessor corresponding to the '1', this will attempt to
/// recursively fold the 'and' to 0.
void RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred,
DataLayout *TD = nullptr);
void RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred);
/// MergeBasicBlockIntoOnlyPred - BB is a block with one predecessor and its
/// predecessor is known to have one successor (BB!). Eliminate the edge
@ -137,8 +136,7 @@ bool EliminateDuplicatePHINodes(BasicBlock *BB);
/// the basic block that was pointed to.
///
bool SimplifyCFG(BasicBlock *BB, const TargetTransformInfo &TTI,
unsigned BonusInstThreshold, const DataLayout *TD = nullptr,
AssumptionCache *AC = nullptr);
unsigned BonusInstThreshold, AssumptionCache *AC = nullptr);
/// FlatternCFG - This function is used to flatten a CFG. For
/// example, it uses parallel-and and parallel-or mode to collapse
@ -150,8 +148,7 @@ bool FlattenCFG(BasicBlock *BB, AliasAnalysis *AA = nullptr);
/// and if a predecessor branches to us and one of our successors, fold the
/// setcc into the predecessor and use logical operations to pick the right
/// destination.
bool FoldBranchToCommonDest(BranchInst *BI, const DataLayout *DL = nullptr,
unsigned BonusInstThreshold = 1);
bool FoldBranchToCommonDest(BranchInst *BI, unsigned BonusInstThreshold = 1);
/// DemoteRegToStack - This function takes a virtual register computed by an
/// Instruction and replaces it with a slot in the stack frame, allocated via
@ -173,18 +170,17 @@ AllocaInst *DemotePHIToStack(PHINode *P, Instruction *AllocaPoint = nullptr);
/// and it is more than the alignment of the ultimate object, see if we can
/// increase the alignment of the ultimate object, making this check succeed.
unsigned getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
const DataLayout *TD = nullptr,
AssumptionCache *AC = nullptr,
const DataLayout &DL,
const Instruction *CxtI = nullptr,
AssumptionCache *AC = nullptr,
const DominatorTree *DT = nullptr);
/// getKnownAlignment - Try to infer an alignment for the specified pointer.
static inline unsigned getKnownAlignment(Value *V,
const DataLayout *TD = nullptr,
AssumptionCache *AC = nullptr,
static inline unsigned getKnownAlignment(Value *V, const DataLayout &DL,
const Instruction *CxtI = nullptr,
AssumptionCache *AC = nullptr,
const DominatorTree *DT = nullptr) {
return getOrEnforceKnownAlignment(V, 0, TD, AC, CxtI, DT);
return getOrEnforceKnownAlignment(V, 0, DL, CxtI, AC, DT);
}
/// EmitGEPOffset - Given a getelementptr instruction/constantexpr, emit the
@ -192,11 +188,11 @@ static inline unsigned getKnownAlignment(Value *V,
/// in the base pointer). Return the result as a signed integer of intptr size.
/// When NoAssumptions is true, no assumptions about index computation not
/// overflowing is made.
template<typename IRBuilderTy>
Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &TD, User *GEP,
template <typename IRBuilderTy>
Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &DL, User *GEP,
bool NoAssumptions = false) {
GEPOperator *GEPOp = cast<GEPOperator>(GEP);
Type *IntPtrTy = TD.getIntPtrType(GEP->getType());
Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
Value *Result = Constant::getNullValue(IntPtrTy);
// If the GEP is inbounds, we know that none of the addressing operations will
@ -211,7 +207,7 @@ Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &TD, User *GEP,
for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e;
++i, ++GTI) {
Value *Op = *i;
uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask;
uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask;
if (Constant *OpC = dyn_cast<Constant>(Op)) {
if (OpC->isZeroValue())
continue;
@ -222,7 +218,7 @@ Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &TD, User *GEP,
OpC = OpC->getSplatValue();
uint64_t OpValue = cast<ConstantInt>(OpC)->getZExtValue();
Size = TD.getStructLayout(STy)->getElementOffset(OpValue);
Size = DL.getStructLayout(STy)->getElementOffset(OpValue);
if (Size)
Result = Builder->CreateAdd(Result, ConstantInt::get(IntPtrTy, Size),

View File

@ -52,7 +52,6 @@ BasicBlock *InsertPreheaderForLoop(Loop *L, Pass *P);
/// passed into it.
bool simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, Pass *PP,
AliasAnalysis *AA = nullptr, ScalarEvolution *SE = nullptr,
const DataLayout *DL = nullptr,
AssumptionCache *AC = nullptr);
/// \brief Put loop into LCSSA form.
@ -85,13 +84,13 @@ bool formLCSSARecursively(Loop &L, DominatorTree &DT, LoopInfo *LI,
/// dominated by the specified block, and that are in the current loop) in
/// reverse depth first order w.r.t the DominatorTree. This allows us to visit
/// uses before definitions, allowing us to sink a loop body in one pass without
/// iteration. Takes DomTreeNode, AliasAnalysis, LoopInfo, DominatorTree,
/// DataLayout, TargetLibraryInfo, Loop, AliasSet information for all
/// instructions of the loop and loop safety information as arguments.
/// It returns changed status.
/// iteration. Takes DomTreeNode, AliasAnalysis, LoopInfo, DominatorTree,
/// DataLayout, TargetLibraryInfo, Loop, AliasSet information for all
/// instructions of the loop and loop safety information as arguments.
/// It returns changed status.
bool sinkRegion(DomTreeNode *, AliasAnalysis *, LoopInfo *, DominatorTree *,
const DataLayout *, TargetLibraryInfo *, Loop *,
AliasSetTracker *, LICMSafetyInfo *);
TargetLibraryInfo *, Loop *, AliasSetTracker *,
LICMSafetyInfo *);
/// \brief Walk the specified region of the CFG (defined by all blocks
/// dominated by the specified block, and that are in the current loop) in depth
@ -101,8 +100,8 @@ bool sinkRegion(DomTreeNode *, AliasAnalysis *, LoopInfo *, DominatorTree *,
/// TargetLibraryInfo, Loop, AliasSet information for all instructions of the
/// loop and loop safety information as arguments. It returns changed status.
bool hoistRegion(DomTreeNode *, AliasAnalysis *, LoopInfo *, DominatorTree *,
const DataLayout *, TargetLibraryInfo *, Loop *,
AliasSetTracker *, LICMSafetyInfo *);
TargetLibraryInfo *, Loop *, AliasSetTracker *,
LICMSafetyInfo *);
/// \brief Try to promote memory values to scalars by sinking stores out of
/// the loop and moving loads to before the loop. We do this by looping over

View File

@ -37,12 +37,11 @@ class Function;
/// is unknown) by passing true for OnlyLowerUnknownSize.
class FortifiedLibCallSimplifier {
private:
const DataLayout *DL;
const TargetLibraryInfo *TLI;
bool OnlyLowerUnknownSize;
public:
FortifiedLibCallSimplifier(const DataLayout *DL, const TargetLibraryInfo *TLI,
FortifiedLibCallSimplifier(const TargetLibraryInfo *TLI,
bool OnlyLowerUnknownSize = false);
/// \brief Take the given call instruction and return a more
@ -72,7 +71,7 @@ private:
class LibCallSimplifier {
private:
FortifiedLibCallSimplifier FortifiedSimplifier;
const DataLayout *DL;
const DataLayout &DL;
const TargetLibraryInfo *TLI;
bool UnsafeFPShrink;
function_ref<void(Instruction *, Value *)> Replacer;
@ -87,7 +86,7 @@ private:
void replaceAllUsesWith(Instruction *I, Value *With);
public:
LibCallSimplifier(const DataLayout *TD, const TargetLibraryInfo *TLI,
LibCallSimplifier(const DataLayout &DL, const TargetLibraryInfo *TLI,
function_ref<void(Instruction *, Value *)> Replacer =
&replaceAllUsesWithDefault);

View File

@ -407,9 +407,10 @@ AliasAnalysis::ModRefResult
AliasAnalysis::callCapturesBefore(const Instruction *I,
const AliasAnalysis::Location &MemLoc,
DominatorTree *DT) {
if (!DT || !DL) return AliasAnalysis::ModRef;
if (!DT)
return AliasAnalysis::ModRef;
const Value *Object = GetUnderlyingObject(MemLoc.Ptr, DL);
const Value *Object = GetUnderlyingObject(MemLoc.Ptr, *DL);
if (!isIdentifiedObject(Object) || isa<GlobalValue>(Object) ||
isa<Constant>(Object))
return AliasAnalysis::ModRef;

View File

@ -103,7 +103,7 @@ static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
const TargetLibraryInfo &TLI,
bool RoundToAlign = false) {
uint64_t Size;
if (getObjectSize(V, Size, &DL, &TLI, RoundToAlign))
if (getObjectSize(V, Size, DL, &TLI, RoundToAlign))
return Size;
return AliasAnalysis::UnknownSize;
}
@ -221,7 +221,7 @@ static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
case Instruction::Or:
// X|C == X+C if all the bits in C are unset in X. Otherwise we can't
// analyze it.
if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), &DL, 0, AC,
if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
BOp, DT))
break;
// FALL THROUGH.
@ -292,7 +292,7 @@ static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
static const Value *
DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
SmallVectorImpl<VariableGEPIndex> &VarIndices,
bool &MaxLookupReached, const DataLayout *DL,
bool &MaxLookupReached, const DataLayout &DL,
AssumptionCache *AC, DominatorTree *DT) {
// Limit recursion depth to limit compile time in crazy cases.
unsigned MaxLookup = MaxLookupSearchDepth;
@ -341,16 +341,6 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
if (!GEPOp->getOperand(0)->getType()->getPointerElementType()->isSized())
return V;
// If we are lacking DataLayout information, we can't compute the offets of
// elements computed by GEPs. However, we can handle bitcast equivalent
// GEPs.
if (!DL) {
if (!GEPOp->hasAllZeroIndices())
return V;
V = GEPOp->getOperand(0);
continue;
}
unsigned AS = GEPOp->getPointerAddressSpace();
// Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
gep_type_iterator GTI = gep_type_begin(GEPOp);
@ -363,30 +353,30 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
if (FieldNo == 0) continue;
BaseOffs += DL->getStructLayout(STy)->getElementOffset(FieldNo);
BaseOffs += DL.getStructLayout(STy)->getElementOffset(FieldNo);
continue;
}
// For an array/pointer, add the element offset, explicitly scaled.
if (ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
if (CIdx->isZero()) continue;
BaseOffs += DL->getTypeAllocSize(*GTI)*CIdx->getSExtValue();
BaseOffs += DL.getTypeAllocSize(*GTI) * CIdx->getSExtValue();
continue;
}
uint64_t Scale = DL->getTypeAllocSize(*GTI);
uint64_t Scale = DL.getTypeAllocSize(*GTI);
ExtensionKind Extension = EK_NotExtended;
// If the integer type is smaller than the pointer size, it is implicitly
// sign extended to pointer size.
unsigned Width = Index->getType()->getIntegerBitWidth();
if (DL->getPointerSizeInBits(AS) > Width)
if (DL.getPointerSizeInBits(AS) > Width)
Extension = EK_SignExt;
// Use GetLinearExpression to decompose the index into a C1*V+C2 form.
APInt IndexScale(Width, 0), IndexOffset(Width, 0);
Index = GetLinearExpression(Index, IndexScale, IndexOffset, Extension,
*DL, 0, AC, DT);
Index = GetLinearExpression(Index, IndexScale, IndexOffset, Extension, DL,
0, AC, DT);
// The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
// This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
@ -408,7 +398,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
// Make sure that we have a scale that makes sense for this target's
// pointer size.
if (unsigned ShiftBits = 64 - DL->getPointerSizeInBits(AS)) {
if (unsigned ShiftBits = 64 - DL.getPointerSizeInBits(AS)) {
Scale <<= ShiftBits;
Scale = (int64_t)Scale >> ShiftBits;
}
@ -610,7 +600,7 @@ BasicAliasAnalysis::pointsToConstantMemory(const Location &Loc, bool OrLocal) {
SmallVector<const Value *, 16> Worklist;
Worklist.push_back(Loc.Ptr);
do {
const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), DL);
const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), *DL);
if (!Visited.insert(V).second) {
Visited.clear();
return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
@ -828,7 +818,7 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
assert(notDifferentParent(CS.getInstruction(), Loc.Ptr) &&
"AliasAnalysis query involving multiple functions!");
const Value *Object = GetUnderlyingObject(Loc.Ptr, DL);
const Value *Object = GetUnderlyingObject(Loc.Ptr, *DL);
// If this is a tail call and Loc.Ptr points to a stack location, we know that
// the tail call cannot access or modify the local stack.
@ -1045,10 +1035,10 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
SmallVector<VariableGEPIndex, 4> GEP2VariableIndices;
const Value *GEP2BasePtr =
DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices,
GEP2MaxLookupReached, DL, AC2, DT);
GEP2MaxLookupReached, *DL, AC2, DT);
const Value *GEP1BasePtr =
DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices,
GEP1MaxLookupReached, DL, AC1, DT);
GEP1MaxLookupReached, *DL, AC1, DT);
// DecomposeGEPExpression and GetUnderlyingObject should return the
// same result except when DecomposeGEPExpression has no DataLayout.
if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
@ -1077,14 +1067,14 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
// about the relation of the resulting pointer.
const Value *GEP1BasePtr =
DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices,
GEP1MaxLookupReached, DL, AC1, DT);
GEP1MaxLookupReached, *DL, AC1, DT);
int64_t GEP2BaseOffset;
bool GEP2MaxLookupReached;
SmallVector<VariableGEPIndex, 4> GEP2VariableIndices;
const Value *GEP2BasePtr =
DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices,
GEP2MaxLookupReached, DL, AC2, DT);
GEP2MaxLookupReached, *DL, AC2, DT);
// DecomposeGEPExpression and GetUnderlyingObject should return the
// same result except when DecomposeGEPExpression has no DataLayout.
@ -1134,7 +1124,7 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
const Value *GEP1BasePtr =
DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices,
GEP1MaxLookupReached, DL, AC1, DT);
GEP1MaxLookupReached, *DL, AC1, DT);
// DecomposeGEPExpression and GetUnderlyingObject should return the
// same result except when DecomposeGEPExpression has no DataLayout.
@ -1203,7 +1193,7 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
const Value *V = GEP1VariableIndices[i].V;
bool SignKnownZero, SignKnownOne;
ComputeSignBit(const_cast<Value *>(V), SignKnownZero, SignKnownOne, DL,
ComputeSignBit(const_cast<Value *>(V), SignKnownZero, SignKnownOne, *DL,
0, AC1, nullptr, DT);
// Zero-extension widens the variable, and so forces the sign
@ -1412,8 +1402,8 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
return NoAlias; // Scalars cannot alias each other
// Figure out what objects these things are pointing to if we can.
const Value *O1 = GetUnderlyingObject(V1, DL, MaxLookupSearchDepth);
const Value *O2 = GetUnderlyingObject(V2, DL, MaxLookupSearchDepth);
const Value *O1 = GetUnderlyingObject(V1, *DL, MaxLookupSearchDepth);
const Value *O2 = GetUnderlyingObject(V2, *DL, MaxLookupSearchDepth);
// Null values in the default address space don't point to any object, so they
// don't alias any other pointer.

View File

@ -50,8 +50,7 @@ using namespace llvm;
/// Constant fold bitcast, symbolically evaluating it with DataLayout.
/// This always returns a non-null constant, but it may be a
/// ConstantExpr if unfoldable.
static Constant *FoldBitCast(Constant *C, Type *DestTy,
const DataLayout &TD) {
static Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) {
// Catch the obvious splat cases.
if (C->isNullValue() && !DestTy->isX86_MMXTy())
return Constant::getNullValue(DestTy);
@ -84,11 +83,11 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
// Now that we know that the input value is a vector of integers, just shift
// and insert them into our result.
unsigned BitShift = TD.getTypeAllocSizeInBits(SrcEltTy);
unsigned BitShift = DL.getTypeAllocSizeInBits(SrcEltTy);
APInt Result(IT->getBitWidth(), 0);
for (unsigned i = 0; i != NumSrcElts; ++i) {
Result <<= BitShift;
if (TD.isLittleEndian())
if (DL.isLittleEndian())
Result |= CDV->getElementAsInteger(NumSrcElts-i-1);
else
Result |= CDV->getElementAsInteger(i);
@ -106,7 +105,7 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
// vector so the code below can handle it uniformly.
if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) {
Constant *Ops = C; // don't take the address of C!
return FoldBitCast(ConstantVector::get(Ops), DestTy, TD);
return FoldBitCast(ConstantVector::get(Ops), DestTy, DL);
}
// If this is a bitcast from constant vector -> vector, fold it.
@ -138,7 +137,7 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
Type *DestIVTy =
VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt);
// Recursively handle this integer conversion, if possible.
C = FoldBitCast(C, DestIVTy, TD);
C = FoldBitCast(C, DestIVTy, DL);
// Finally, IR can handle this now that #elts line up.
return ConstantExpr::getBitCast(C, DestTy);
@ -162,7 +161,7 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
// of the same size, and that their #elements is not the same. Do the
// conversion here, which depends on whether the input or output has
// more elements.
bool isLittleEndian = TD.isLittleEndian();
bool isLittleEndian = DL.isLittleEndian();
SmallVector<Constant*, 32> Result;
if (NumDstElt < NumSrcElt) {
@ -198,7 +197,7 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
// Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
unsigned Ratio = NumDstElt/NumSrcElt;
unsigned DstBitSize = TD.getTypeSizeInBits(DstEltTy);
unsigned DstBitSize = DL.getTypeSizeInBits(DstEltTy);
// Loop over each source value, expanding into multiple results.
for (unsigned i = 0; i != NumSrcElt; ++i) {
@ -235,10 +234,10 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
/// If this constant is a constant offset from a global, return the global and
/// the constant. Because of constantexprs, this function is recursive.
static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
APInt &Offset, const DataLayout &TD) {
APInt &Offset, const DataLayout &DL) {
// Trivial case, constant is the global.
if ((GV = dyn_cast<GlobalValue>(C))) {
unsigned BitWidth = TD.getPointerTypeSizeInBits(GV->getType());
unsigned BitWidth = DL.getPointerTypeSizeInBits(GV->getType());
Offset = APInt(BitWidth, 0);
return true;
}
@ -251,22 +250,22 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
if (CE->getOpcode() == Instruction::PtrToInt ||
CE->getOpcode() == Instruction::BitCast ||
CE->getOpcode() == Instruction::AddrSpaceCast)
return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, TD);
return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, DL);
// i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
GEPOperator *GEP = dyn_cast<GEPOperator>(CE);
if (!GEP)
return false;
unsigned BitWidth = TD.getPointerTypeSizeInBits(GEP->getType());
unsigned BitWidth = DL.getPointerTypeSizeInBits(GEP->getType());
APInt TmpOffset(BitWidth, 0);
// If the base isn't a global+constant, we aren't either.
if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, TmpOffset, TD))
if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, TmpOffset, DL))
return false;
// Otherwise, add any offset that our operands provide.
if (!GEP->accumulateConstantOffset(TD, TmpOffset))
if (!GEP->accumulateConstantOffset(DL, TmpOffset))
return false;
Offset = TmpOffset;
@ -276,11 +275,11 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
/// Recursive helper to read bits out of global. C is the constant being copied
/// out of. ByteOffset is an offset into C. CurPtr is the pointer to copy
/// results into and BytesLeft is the number of bytes left in
/// the CurPtr buffer. TD is the target data.
/// the CurPtr buffer. DL is the DataLayout.
static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
unsigned char *CurPtr, unsigned BytesLeft,
const DataLayout &TD) {
assert(ByteOffset <= TD.getTypeAllocSize(C->getType()) &&
const DataLayout &DL) {
assert(ByteOffset <= DL.getTypeAllocSize(C->getType()) &&
"Out of range access");
// If this element is zero or undefined, we can just return since *CurPtr is
@ -298,7 +297,7 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
int n = ByteOffset;
if (!TD.isLittleEndian())
if (!DL.isLittleEndian())
n = IntBytes - n - 1;
CurPtr[i] = (unsigned char)(Val >> (n * 8));
++ByteOffset;
@ -308,22 +307,22 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
if (CFP->getType()->isDoubleTy()) {
C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), TD);
return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, TD);
C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), DL);
return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
}
if (CFP->getType()->isFloatTy()){
C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), TD);
return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, TD);
C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), DL);
return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
}
if (CFP->getType()->isHalfTy()){
C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), TD);
return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, TD);
C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), DL);
return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
}
return false;
}
if (ConstantStruct *CS = dyn_cast<ConstantStruct>(C)) {
const StructLayout *SL = TD.getStructLayout(CS->getType());
const StructLayout *SL = DL.getStructLayout(CS->getType());
unsigned Index = SL->getElementContainingOffset(ByteOffset);
uint64_t CurEltOffset = SL->getElementOffset(Index);
ByteOffset -= CurEltOffset;
@ -331,11 +330,11 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
while (1) {
// If the element access is to the element itself and not to tail padding,
// read the bytes from the element.
uint64_t EltSize = TD.getTypeAllocSize(CS->getOperand(Index)->getType());
uint64_t EltSize = DL.getTypeAllocSize(CS->getOperand(Index)->getType());
if (ByteOffset < EltSize &&
!ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr,
BytesLeft, TD))
BytesLeft, DL))
return false;
++Index;
@ -362,7 +361,7 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
if (isa<ConstantArray>(C) || isa<ConstantVector>(C) ||
isa<ConstantDataSequential>(C)) {
Type *EltTy = C->getType()->getSequentialElementType();
uint64_t EltSize = TD.getTypeAllocSize(EltTy);
uint64_t EltSize = DL.getTypeAllocSize(EltTy);
uint64_t Index = ByteOffset / EltSize;
uint64_t Offset = ByteOffset - Index * EltSize;
uint64_t NumElts;
@ -373,7 +372,7 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
for (; Index != NumElts; ++Index) {
if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr,
BytesLeft, TD))
BytesLeft, DL))
return false;
uint64_t BytesWritten = EltSize - Offset;
@ -390,9 +389,9 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
if (CE->getOpcode() == Instruction::IntToPtr &&
CE->getOperand(0)->getType() == TD.getIntPtrType(CE->getType())) {
CE->getOperand(0)->getType() == DL.getIntPtrType(CE->getType())) {
return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
BytesLeft, TD);
BytesLeft, DL);
}
}
@ -401,7 +400,7 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
}
static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
const DataLayout &TD) {
const DataLayout &DL) {
PointerType *PTy = cast<PointerType>(C->getType());
Type *LoadTy = PTy->getElementType();
IntegerType *IntType = dyn_cast<IntegerType>(LoadTy);
@ -423,14 +422,13 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
MapTy = Type::getInt64PtrTy(C->getContext(), AS);
else if (LoadTy->isVectorTy()) {
MapTy = PointerType::getIntNPtrTy(C->getContext(),
TD.getTypeAllocSizeInBits(LoadTy),
AS);
DL.getTypeAllocSizeInBits(LoadTy), AS);
} else
return nullptr;
C = FoldBitCast(C, MapTy, TD);
if (Constant *Res = FoldReinterpretLoadFromConstPtr(C, TD))
return FoldBitCast(Res, LoadTy, TD);
C = FoldBitCast(C, MapTy, DL);
if (Constant *Res = FoldReinterpretLoadFromConstPtr(C, DL))
return FoldBitCast(Res, LoadTy, DL);
return nullptr;
}
@ -440,7 +438,7 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
GlobalValue *GVal;
APInt Offset;
if (!IsConstantOffsetFromGlobal(C, GVal, Offset, TD))
if (!IsConstantOffsetFromGlobal(C, GVal, Offset, DL))
return nullptr;
GlobalVariable *GV = dyn_cast<GlobalVariable>(GVal);
@ -455,16 +453,16 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
// If we're not accessing anything in this constant, the result is undefined.
if (Offset.getZExtValue() >=
TD.getTypeAllocSize(GV->getInitializer()->getType()))
DL.getTypeAllocSize(GV->getInitializer()->getType()))
return UndefValue::get(IntType);
unsigned char RawBytes[32] = {0};
if (!ReadDataFromGlobal(GV->getInitializer(), Offset.getZExtValue(), RawBytes,
BytesLoaded, TD))
BytesLoaded, DL))
return nullptr;
APInt ResultVal = APInt(IntType->getBitWidth(), 0);
if (TD.isLittleEndian()) {
if (DL.isLittleEndian()) {
ResultVal = RawBytes[BytesLoaded - 1];
for (unsigned i = 1; i != BytesLoaded; ++i) {
ResultVal <<= 8;
@ -482,9 +480,7 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
}
static Constant *ConstantFoldLoadThroughBitcast(ConstantExpr *CE,
const DataLayout *DL) {
if (!DL)
return nullptr;
const DataLayout &DL) {
auto *DestPtrTy = dyn_cast<PointerType>(CE->getType());
if (!DestPtrTy)
return nullptr;
@ -499,7 +495,7 @@ static Constant *ConstantFoldLoadThroughBitcast(ConstantExpr *CE,
// If the type sizes are the same and a cast is legal, just directly
// cast the constant.
if (DL->getTypeSizeInBits(DestTy) == DL->getTypeSizeInBits(SrcTy)) {
if (DL.getTypeSizeInBits(DestTy) == DL.getTypeSizeInBits(SrcTy)) {
Instruction::CastOps Cast = Instruction::BitCast;
// If we are going from a pointer to int or vice versa, we spell the cast
// differently.
@ -530,7 +526,7 @@ static Constant *ConstantFoldLoadThroughBitcast(ConstantExpr *CE,
/// Return the value that a load from C would produce if it is constant and
/// determinable. If this is not determinable, return null.
Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
const DataLayout *TD) {
const DataLayout &DL) {
// First, try the easy cases:
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C))
if (GV->isConstant() && GV->hasDefinitiveInitializer())
@ -552,13 +548,13 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
}
if (CE->getOpcode() == Instruction::BitCast)
if (Constant *LoadedC = ConstantFoldLoadThroughBitcast(CE, TD))
if (Constant *LoadedC = ConstantFoldLoadThroughBitcast(CE, DL))
return LoadedC;
// Instead of loading constant c string, use corresponding integer value
// directly if string length is small enough.
StringRef Str;
if (TD && getConstantStringInfo(CE, Str) && !Str.empty()) {
if (getConstantStringInfo(CE, Str) && !Str.empty()) {
unsigned StrLen = Str.size();
Type *Ty = cast<PointerType>(CE->getType())->getElementType();
unsigned NumBits = Ty->getPrimitiveSizeInBits();
@ -568,7 +564,7 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
(isa<IntegerType>(Ty) || Ty->isFloatingPointTy())) {
APInt StrVal(NumBits, 0);
APInt SingleChar(NumBits, 0);
if (TD->isLittleEndian()) {
if (DL.isLittleEndian()) {
for (signed i = StrLen-1; i >= 0; i--) {
SingleChar = (uint64_t) Str[i] & UCHAR_MAX;
StrVal = (StrVal << 8) | SingleChar;
@ -593,7 +589,7 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
// If this load comes from anywhere in a constant global, and if the global
// is all undef or zero, we know what it loads.
if (GlobalVariable *GV =
dyn_cast<GlobalVariable>(GetUnderlyingObject(CE, TD))) {
dyn_cast<GlobalVariable>(GetUnderlyingObject(CE, DL))) {
if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
Type *ResTy = cast<PointerType>(C->getType())->getElementType();
if (GV->getInitializer()->isNullValue())
@ -604,16 +600,15 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
}
// Try hard to fold loads from bitcasted strange and non-type-safe things.
if (TD)
return FoldReinterpretLoadFromConstPtr(CE, *TD);
return nullptr;
return FoldReinterpretLoadFromConstPtr(CE, DL);
}
static Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout *TD){
static Constant *ConstantFoldLoadInst(const LoadInst *LI,
const DataLayout &DL) {
if (LI->isVolatile()) return nullptr;
if (Constant *C = dyn_cast<Constant>(LI->getOperand(0)))
return ConstantFoldLoadFromConstPtr(C, TD);
return ConstantFoldLoadFromConstPtr(C, DL);
return nullptr;
}
@ -623,16 +618,16 @@ static Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout *TD){
/// these together. If target data info is available, it is provided as DL,
/// otherwise DL is null.
static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
Constant *Op1, const DataLayout *DL){
Constant *Op1,
const DataLayout &DL) {
// SROA
// Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
// Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
// bits.
if (Opc == Instruction::And && DL) {
unsigned BitWidth = DL->getTypeSizeInBits(Op0->getType()->getScalarType());
if (Opc == Instruction::And) {
unsigned BitWidth = DL.getTypeSizeInBits(Op0->getType()->getScalarType());
APInt KnownZero0(BitWidth, 0), KnownOne0(BitWidth, 0);
APInt KnownZero1(BitWidth, 0), KnownOne1(BitWidth, 0);
computeKnownBits(Op0, KnownZero0, KnownOne0, DL);
@ -655,14 +650,13 @@ static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
// If the constant expr is something like &A[123] - &A[4].f, fold this into a
// constant. This happens frequently when iterating over a global array.
if (Opc == Instruction::Sub && DL) {
if (Opc == Instruction::Sub) {
GlobalValue *GV1, *GV2;
APInt Offs1, Offs2;
if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, *DL))
if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, *DL) &&
GV1 == GV2) {
unsigned OpSize = DL->getTypeSizeInBits(Op0->getType());
if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, DL))
if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, DL) && GV1 == GV2) {
unsigned OpSize = DL.getTypeSizeInBits(Op0->getType());
// (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow.
// PtrToInt may change the bitwidth so we have convert to the right size
@ -677,13 +671,10 @@ static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
/// If array indices are not pointer-sized integers, explicitly cast them so
/// that they aren't implicitly casted by the getelementptr.
static Constant *CastGEPIndices(ArrayRef<Constant *> Ops,
Type *ResultTy, const DataLayout *TD,
static Constant *CastGEPIndices(ArrayRef<Constant *> Ops, Type *ResultTy,
const DataLayout &DL,
const TargetLibraryInfo *TLI) {
if (!TD)
return nullptr;
Type *IntPtrTy = TD->getIntPtrType(ResultTy);
Type *IntPtrTy = DL.getIntPtrType(ResultTy);
bool Any = false;
SmallVector<Constant*, 32> NewIdxs;
@ -708,7 +699,7 @@ static Constant *CastGEPIndices(ArrayRef<Constant *> Ops,
Constant *C = ConstantExpr::getGetElementPtr(Ops[0], NewIdxs);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
if (Constant *Folded = ConstantFoldConstantExpression(CE, TD, TLI))
if (Constant *Folded = ConstantFoldConstantExpression(CE, DL, TLI))
C = Folded;
}
@ -733,14 +724,14 @@ static Constant* StripPtrCastKeepAS(Constant* Ptr) {
/// If we can symbolically evaluate the GEP constant expression, do so.
static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
Type *ResultTy, const DataLayout *TD,
Type *ResultTy, const DataLayout &DL,
const TargetLibraryInfo *TLI) {
Constant *Ptr = Ops[0];
if (!TD || !Ptr->getType()->getPointerElementType()->isSized() ||
if (!Ptr->getType()->getPointerElementType()->isSized() ||
!Ptr->getType()->isPointerTy())
return nullptr;
Type *IntPtrTy = TD->getIntPtrType(Ptr->getType());
Type *IntPtrTy = DL.getIntPtrType(Ptr->getType());
Type *ResultElementTy = ResultTy->getPointerElementType();
// If this is a constant expr gep that is effectively computing an
@ -760,19 +751,19 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
Res = ConstantExpr::getSub(Res, CE->getOperand(1));
Res = ConstantExpr::getIntToPtr(Res, ResultTy);
if (ConstantExpr *ResCE = dyn_cast<ConstantExpr>(Res))
Res = ConstantFoldConstantExpression(ResCE, TD, TLI);
Res = ConstantFoldConstantExpression(ResCE, DL, TLI);
return Res;
}
}
return nullptr;
}
unsigned BitWidth = TD->getTypeSizeInBits(IntPtrTy);
unsigned BitWidth = DL.getTypeSizeInBits(IntPtrTy);
APInt Offset =
APInt(BitWidth, TD->getIndexedOffset(Ptr->getType(),
makeArrayRef((Value *const*)
Ops.data() + 1,
Ops.size() - 1)));
APInt(BitWidth,
DL.getIndexedOffset(
Ptr->getType(),
makeArrayRef((Value * const *)Ops.data() + 1, Ops.size() - 1)));
Ptr = StripPtrCastKeepAS(Ptr);
// If this is a GEP of a GEP, fold it all into a single GEP.
@ -790,8 +781,7 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
break;
Ptr = cast<Constant>(GEP->getOperand(0));
Offset += APInt(BitWidth,
TD->getIndexedOffset(Ptr->getType(), NestedOps));
Offset += APInt(BitWidth, DL.getIndexedOffset(Ptr->getType(), NestedOps));
Ptr = StripPtrCastKeepAS(Ptr);
}
@ -831,7 +821,7 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
}
// Determine which element of the array the offset points into.
APInt ElemSize(BitWidth, TD->getTypeAllocSize(ATy->getElementType()));
APInt ElemSize(BitWidth, DL.getTypeAllocSize(ATy->getElementType()));
if (ElemSize == 0)
// The element size is 0. This may be [0 x Ty]*, so just use a zero
// index for this level and proceed to the next level to see if it can
@ -850,7 +840,7 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
// can't re-form this GEP in a regular form, so bail out. The pointer
// operand likely went through casts that are necessary to make the GEP
// sensible.
const StructLayout &SL = *TD->getStructLayout(STy);
const StructLayout &SL = *DL.getStructLayout(STy);
if (Offset.uge(SL.getSizeInBytes()))
break;
@ -882,7 +872,7 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
// If we ended up indexing a member with a type that doesn't match
// the type of what the original indices indexed, add a cast.
if (Ty != ResultElementTy)
C = FoldBitCast(C, ResultTy, *TD);
C = FoldBitCast(C, ResultTy, DL);
return C;
}
@ -898,8 +888,7 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
/// Note that this fails if not all of the operands are constant. Otherwise,
/// this function can only fail when attempting to fold instructions like loads
/// and stores, which have no constant expression form.
Constant *llvm::ConstantFoldInstruction(Instruction *I,
const DataLayout *TD,
Constant *llvm::ConstantFoldInstruction(Instruction *I, const DataLayout &DL,
const TargetLibraryInfo *TLI) {
// Handle PHI nodes quickly here...
if (PHINode *PN = dyn_cast<PHINode>(I)) {
@ -919,7 +908,7 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I,
return nullptr;
// Fold the PHI's operands.
if (ConstantExpr *NewC = dyn_cast<ConstantExpr>(C))
C = ConstantFoldConstantExpression(NewC, TD, TLI);
C = ConstantFoldConstantExpression(NewC, DL, TLI);
// If the incoming value is a different constant to
// the one we saw previously, then give up.
if (CommonValue && C != CommonValue)
@ -942,17 +931,17 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I,
// Fold the Instruction's operands.
if (ConstantExpr *NewCE = dyn_cast<ConstantExpr>(Op))
Op = ConstantFoldConstantExpression(NewCE, TD, TLI);
Op = ConstantFoldConstantExpression(NewCE, DL, TLI);
Ops.push_back(Op);
}
if (const CmpInst *CI = dyn_cast<CmpInst>(I))
return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1],
TD, TLI);
DL, TLI);
if (const LoadInst *LI = dyn_cast<LoadInst>(I))
return ConstantFoldLoadInst(LI, TD);
return ConstantFoldLoadInst(LI, DL);
if (InsertValueInst *IVI = dyn_cast<InsertValueInst>(I)) {
return ConstantExpr::getInsertValue(
@ -967,11 +956,11 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I,
EVI->getIndices());
}
return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Ops, TD, TLI);
return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Ops, DL, TLI);
}
static Constant *
ConstantFoldConstantExpressionImpl(const ConstantExpr *CE, const DataLayout *TD,
ConstantFoldConstantExpressionImpl(const ConstantExpr *CE, const DataLayout &DL,
const TargetLibraryInfo *TLI,
SmallPtrSetImpl<ConstantExpr *> &FoldedOps) {
SmallVector<Constant *, 8> Ops;
@ -982,25 +971,25 @@ ConstantFoldConstantExpressionImpl(const ConstantExpr *CE, const DataLayout *TD,
// a ConstantExpr, we don't have to process it again.
if (ConstantExpr *NewCE = dyn_cast<ConstantExpr>(NewC)) {
if (FoldedOps.insert(NewCE).second)
NewC = ConstantFoldConstantExpressionImpl(NewCE, TD, TLI, FoldedOps);
NewC = ConstantFoldConstantExpressionImpl(NewCE, DL, TLI, FoldedOps);
}
Ops.push_back(NewC);
}
if (CE->isCompare())
return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1],
TD, TLI);
return ConstantFoldInstOperands(CE->getOpcode(), CE->getType(), Ops, TD, TLI);
DL, TLI);
return ConstantFoldInstOperands(CE->getOpcode(), CE->getType(), Ops, DL, TLI);
}
/// Attempt to fold the constant expression
/// using the specified DataLayout. If successful, the constant result is
/// result is returned, if not, null is returned.
Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE,
const DataLayout *TD,
const DataLayout &DL,
const TargetLibraryInfo *TLI) {
SmallPtrSet<ConstantExpr *, 4> FoldedOps;
return ConstantFoldConstantExpressionImpl(CE, TD, TLI, FoldedOps);
return ConstantFoldConstantExpressionImpl(CE, DL, TLI, FoldedOps);
}
/// Attempt to constant fold an instruction with the
@ -1015,12 +1004,12 @@ Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE,
///
Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
ArrayRef<Constant *> Ops,
const DataLayout *TD,
const DataLayout &DL,
const TargetLibraryInfo *TLI) {
// Handle easy binops first.
if (Instruction::isBinaryOp(Opcode)) {
if (isa<ConstantExpr>(Ops[0]) || isa<ConstantExpr>(Ops[1])) {
if (Constant *C = SymbolicallyEvaluateBinop(Opcode, Ops[0], Ops[1], TD))
if (Constant *C = SymbolicallyEvaluateBinop(Opcode, Ops[0], Ops[1], DL))
return C;
}
@ -1040,10 +1029,10 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
// If the input is a inttoptr, eliminate the pair. This requires knowing
// the width of a pointer, so it can't be done in ConstantExpr::getCast.
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[0])) {
if (TD && CE->getOpcode() == Instruction::IntToPtr) {
if (CE->getOpcode() == Instruction::IntToPtr) {
Constant *Input = CE->getOperand(0);
unsigned InWidth = Input->getType()->getScalarSizeInBits();
unsigned PtrWidth = TD->getPointerTypeSizeInBits(CE->getType());
unsigned PtrWidth = DL.getPointerTypeSizeInBits(CE->getType());
if (PtrWidth < InWidth) {
Constant *Mask =
ConstantInt::get(CE->getContext(),
@ -1061,15 +1050,15 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
// This requires knowing the width of a pointer, so it can't be done in
// ConstantExpr::getCast.
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[0])) {
if (TD && CE->getOpcode() == Instruction::PtrToInt) {
if (CE->getOpcode() == Instruction::PtrToInt) {
Constant *SrcPtr = CE->getOperand(0);
unsigned SrcPtrSize = TD->getPointerTypeSizeInBits(SrcPtr->getType());
unsigned SrcPtrSize = DL.getPointerTypeSizeInBits(SrcPtr->getType());
unsigned MidIntSize = CE->getType()->getScalarSizeInBits();
if (MidIntSize >= SrcPtrSize) {
unsigned SrcAS = SrcPtr->getType()->getPointerAddressSpace();
if (SrcAS == DestTy->getPointerAddressSpace())
return FoldBitCast(CE->getOperand(0), DestTy, *TD);
return FoldBitCast(CE->getOperand(0), DestTy, DL);
}
}
}
@ -1087,9 +1076,7 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
case Instruction::AddrSpaceCast:
return ConstantExpr::getCast(Opcode, Ops[0], DestTy);
case Instruction::BitCast:
if (TD)
return FoldBitCast(Ops[0], DestTy, *TD);
return ConstantExpr::getBitCast(Ops[0], DestTy);
return FoldBitCast(Ops[0], DestTy, DL);
case Instruction::Select:
return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2]);
case Instruction::ExtractElement:
@ -1099,9 +1086,9 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
case Instruction::ShuffleVector:
return ConstantExpr::getShuffleVector(Ops[0], Ops[1], Ops[2]);
case Instruction::GetElementPtr:
if (Constant *C = CastGEPIndices(Ops, DestTy, TD, TLI))
if (Constant *C = CastGEPIndices(Ops, DestTy, DL, TLI))
return C;
if (Constant *C = SymbolicallyEvaluateGEP(Ops, DestTy, TD, TLI))
if (Constant *C = SymbolicallyEvaluateGEP(Ops, DestTy, DL, TLI))
return C;
return ConstantExpr::getGetElementPtr(Ops[0], Ops.slice(1));
@ -1113,43 +1100,44 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
/// returns a constant expression of the specified operands.
Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
Constant *Ops0, Constant *Ops1,
const DataLayout *TD,
const DataLayout &DL,
const TargetLibraryInfo *TLI) {
// fold: icmp (inttoptr x), null -> icmp x, 0
// fold: icmp (ptrtoint x), 0 -> icmp x, null
// fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y
// fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y
//
// ConstantExpr::getCompare cannot do this, because it doesn't have TD
// FIXME: The following comment is out of data and the DataLayout is here now.
// ConstantExpr::getCompare cannot do this, because it doesn't have DL
// around to know if bit truncation is happening.
if (ConstantExpr *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
if (TD && Ops1->isNullValue()) {
if (Ops1->isNullValue()) {
if (CE0->getOpcode() == Instruction::IntToPtr) {
Type *IntPtrTy = TD->getIntPtrType(CE0->getType());
Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
// Convert the integer value to the right size to ensure we get the
// proper extension or truncation.
Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0),
IntPtrTy, false);
Constant *Null = Constant::getNullValue(C->getType());
return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI);
return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
}
// Only do this transformation if the int is intptrty in size, otherwise
// there is a truncation or extension that we aren't modeling.
if (CE0->getOpcode() == Instruction::PtrToInt) {
Type *IntPtrTy = TD->getIntPtrType(CE0->getOperand(0)->getType());
Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
if (CE0->getType() == IntPtrTy) {
Constant *C = CE0->getOperand(0);
Constant *Null = Constant::getNullValue(C->getType());
return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI);
return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
}
}
}
if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
if (TD && CE0->getOpcode() == CE1->getOpcode()) {
if (CE0->getOpcode() == CE1->getOpcode()) {
if (CE0->getOpcode() == Instruction::IntToPtr) {
Type *IntPtrTy = TD->getIntPtrType(CE0->getType());
Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
// Convert the integer value to the right size to ensure we get the
// proper extension or truncation.
@ -1157,20 +1145,17 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
IntPtrTy, false);
Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0),
IntPtrTy, false);
return ConstantFoldCompareInstOperands(Predicate, C0, C1, TD, TLI);
return ConstantFoldCompareInstOperands(Predicate, C0, C1, DL, TLI);
}
// Only do this transformation if the int is intptrty in size, otherwise
// there is a truncation or extension that we aren't modeling.
if (CE0->getOpcode() == Instruction::PtrToInt) {
Type *IntPtrTy = TD->getIntPtrType(CE0->getOperand(0)->getType());
Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
if (CE0->getType() == IntPtrTy &&
CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) {
return ConstantFoldCompareInstOperands(Predicate,
CE0->getOperand(0),
CE1->getOperand(0),
TD,
TLI);
return ConstantFoldCompareInstOperands(
Predicate, CE0->getOperand(0), CE1->getOperand(0), DL, TLI);
}
}
}
@ -1180,16 +1165,14 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
// icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0)
if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) &&
CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) {
Constant *LHS =
ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(0), Ops1,
TD, TLI);
Constant *RHS =
ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(1), Ops1,
TD, TLI);
Constant *LHS = ConstantFoldCompareInstOperands(
Predicate, CE0->getOperand(0), Ops1, DL, TLI);
Constant *RHS = ConstantFoldCompareInstOperands(
Predicate, CE0->getOperand(1), Ops1, DL, TLI);
unsigned OpC =
Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
Constant *Ops[] = { LHS, RHS };
return ConstantFoldInstOperands(OpC, LHS->getType(), Ops, TD, TLI);
return ConstantFoldInstOperands(OpC, LHS->getType(), Ops, DL, TLI);
}
}

View File

@ -60,6 +60,7 @@
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
@ -624,14 +625,12 @@ void Dependence::dump(raw_ostream &OS) const {
OS << "!\n";
}
static
AliasAnalysis::AliasResult underlyingObjectsAlias(AliasAnalysis *AA,
const Value *A,
const Value *B) {
const Value *AObj = GetUnderlyingObject(A);
const Value *BObj = GetUnderlyingObject(B);
static AliasAnalysis::AliasResult underlyingObjectsAlias(AliasAnalysis *AA,
const DataLayout &DL,
const Value *A,
const Value *B) {
const Value *AObj = GetUnderlyingObject(A, DL);
const Value *BObj = GetUnderlyingObject(B, DL);
return AA->alias(AObj, AA->getTypeStoreSize(AObj->getType()),
BObj, AA->getTypeStoreSize(BObj->getType()));
}
@ -3313,7 +3312,8 @@ DependenceAnalysis::depends(Instruction *Src, Instruction *Dst,
Value *SrcPtr = getPointerOperand(Src);
Value *DstPtr = getPointerOperand(Dst);
switch (underlyingObjectsAlias(AA, DstPtr, SrcPtr)) {
switch (underlyingObjectsAlias(AA, F->getParent()->getDataLayout(), DstPtr,
SrcPtr)) {
case AliasAnalysis::MayAlias:
case AliasAnalysis::PartialAlias:
// cannot analyse objects if we don't understand their aliasing.
@ -3757,8 +3757,8 @@ const SCEV *DependenceAnalysis::getSplitIteration(const Dependence &Dep,
assert(isLoadOrStore(Dst));
Value *SrcPtr = getPointerOperand(Src);
Value *DstPtr = getPointerOperand(Dst);
assert(underlyingObjectsAlias(AA, DstPtr, SrcPtr) ==
AliasAnalysis::MustAlias);
assert(underlyingObjectsAlias(AA, F->getParent()->getDataLayout(), DstPtr,
SrcPtr) == AliasAnalysis::MustAlias);
// establish loop nesting levels
establishNestingLevels(Src, Dst);

View File

@ -322,7 +322,8 @@ bool GlobalsModRef::AnalyzeIndirectGlobalMemory(GlobalValue *GV) {
continue;
// Check the value being stored.
Value *Ptr = GetUnderlyingObject(SI->getOperand(0));
Value *Ptr = GetUnderlyingObject(SI->getOperand(0),
GV->getParent()->getDataLayout());
if (!isAllocLikeFn(Ptr, TLI))
return false; // Too hard to analyze.
@ -481,8 +482,8 @@ AliasAnalysis::AliasResult
GlobalsModRef::alias(const Location &LocA,
const Location &LocB) {
// Get the base object these pointers point to.
const Value *UV1 = GetUnderlyingObject(LocA.Ptr);
const Value *UV2 = GetUnderlyingObject(LocB.Ptr);
const Value *UV1 = GetUnderlyingObject(LocA.Ptr, *DL);
const Value *UV2 = GetUnderlyingObject(LocB.Ptr, *DL);
// If either of the underlying values is a global, they may be non-addr-taken
// globals, which we can answer queries about.
@ -540,8 +541,9 @@ GlobalsModRef::getModRefInfo(ImmutableCallSite CS,
// If we are asking for mod/ref info of a direct call with a pointer to a
// global we are tracking, return information if we have it.
const DataLayout &DL = CS.getCaller()->getParent()->getDataLayout();
if (const GlobalValue *GV =
dyn_cast<GlobalValue>(GetUnderlyingObject(Loc.Ptr)))
dyn_cast<GlobalValue>(GetUnderlyingObject(Loc.Ptr, DL)))
if (GV->hasLocalLinkage())
if (const Function *F = CS.getCalledFunction())
if (NonAddressTakenGlobals.count(GV))

View File

@ -45,9 +45,6 @@ class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
typedef InstVisitor<CallAnalyzer, bool> Base;
friend class InstVisitor<CallAnalyzer, bool>;
// DataLayout if available, or null.
const DataLayout *const DL;
/// The TargetTransformInfo available for this compilation.
const TargetTransformInfo &TTI;
@ -145,9 +142,9 @@ class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
bool visitUnreachableInst(UnreachableInst &I);
public:
CallAnalyzer(const DataLayout *DL, const TargetTransformInfo &TTI,
AssumptionCacheTracker *ACT, Function &Callee, int Threshold)
: DL(DL), TTI(TTI), ACT(ACT), F(Callee), Threshold(Threshold), Cost(0),
CallAnalyzer(const TargetTransformInfo &TTI, AssumptionCacheTracker *ACT,
Function &Callee, int Threshold)
: TTI(TTI), ACT(ACT), F(Callee), Threshold(Threshold), Cost(0),
IsCallerRecursive(false), IsRecursiveCall(false),
ExposesReturnsTwice(false), HasDynamicAlloca(false),
ContainsNoDuplicateCall(false), HasReturn(false), HasIndirectBr(false),
@ -244,10 +241,8 @@ bool CallAnalyzer::isGEPOffsetConstant(GetElementPtrInst &GEP) {
/// Returns false if unable to compute the offset for any reason. Respects any
/// simplified values known during the analysis of this callsite.
bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) {
if (!DL)
return false;
unsigned IntPtrWidth = DL->getPointerSizeInBits();
const DataLayout &DL = F.getParent()->getDataLayout();
unsigned IntPtrWidth = DL.getPointerSizeInBits();
assert(IntPtrWidth == Offset.getBitWidth());
for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
@ -263,12 +258,12 @@ bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) {
// Handle a struct index, which adds its field offset to the pointer.
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
unsigned ElementIdx = OpC->getZExtValue();
const StructLayout *SL = DL->getStructLayout(STy);
const StructLayout *SL = DL.getStructLayout(STy);
Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx));
continue;
}
APInt TypeSize(IntPtrWidth, DL->getTypeAllocSize(GTI.getIndexedType()));
APInt TypeSize(IntPtrWidth, DL.getTypeAllocSize(GTI.getIndexedType()));
Offset += OpC->getValue().sextOrTrunc(IntPtrWidth) * TypeSize;
}
return true;
@ -289,9 +284,9 @@ bool CallAnalyzer::visitAlloca(AllocaInst &I) {
// Accumulate the allocated size.
if (I.isStaticAlloca()) {
const DataLayout &DL = F.getParent()->getDataLayout();
Type *Ty = I.getAllocatedType();
AllocatedSize += (DL ? DL->getTypeAllocSize(Ty) :
Ty->getPrimitiveSizeInBits());
AllocatedSize += DL.getTypeAllocSize(Ty);
}
// We will happily inline static alloca instructions.
@ -327,7 +322,7 @@ bool CallAnalyzer::visitGetElementPtr(GetElementPtrInst &I) {
// Try to fold GEPs of constant-offset call site argument pointers. This
// requires target data and inbounds GEPs.
if (DL && I.isInBounds()) {
if (I.isInBounds()) {
// Check if we have a base + offset for the pointer.
Value *Ptr = I.getPointerOperand();
std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Ptr);
@ -409,7 +404,7 @@ bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) {
// Track base/offset pairs when converted to a plain integer provided the
// integer is large enough to represent the pointer.
unsigned IntegerSize = I.getType()->getScalarSizeInBits();
const DataLayout &DL = I.getModule()->getDataLayout();
const DataLayout &DL = F.getParent()->getDataLayout();
if (IntegerSize >= DL.getPointerSizeInBits()) {
std::pair<Value *, APInt> BaseAndOffset
= ConstantOffsetPtrs.lookup(I.getOperand(0));
@ -447,7 +442,7 @@ bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) {
// modifications provided the integer is not too large.
Value *Op = I.getOperand(0);
unsigned IntegerSize = Op->getType()->getScalarSizeInBits();
const DataLayout &DL = I.getModule()->getDataLayout();
const DataLayout &DL = F.getParent()->getDataLayout();
if (IntegerSize <= DL.getPointerSizeInBits()) {
std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op);
if (BaseAndOffset.first)
@ -485,12 +480,14 @@ bool CallAnalyzer::visitUnaryInstruction(UnaryInstruction &I) {
Constant *COp = dyn_cast<Constant>(Operand);
if (!COp)
COp = SimplifiedValues.lookup(Operand);
if (COp)
if (COp) {
const DataLayout &DL = F.getParent()->getDataLayout();
if (Constant *C = ConstantFoldInstOperands(I.getOpcode(), I.getType(),
COp, DL)) {
SimplifiedValues[&I] = C;
return true;
}
}
// Disable any SROA on the argument to arbitrary unary operators.
disableSROA(Operand);
@ -595,6 +592,7 @@ bool CallAnalyzer::visitSub(BinaryOperator &I) {
bool CallAnalyzer::visitBinaryOperator(BinaryOperator &I) {
Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
const DataLayout &DL = F.getParent()->getDataLayout();
if (!isa<Constant>(LHS))
if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS))
LHS = SimpleLHS;
@ -788,7 +786,7 @@ bool CallAnalyzer::visitCallSite(CallSite CS) {
// during devirtualization and so we want to give it a hefty bonus for
// inlining, but cap that bonus in the event that inlining wouldn't pan
// out. Pretend to inline the function, with a custom threshold.
CallAnalyzer CA(DL, TTI, ACT, *F, InlineConstants::IndirectCallThreshold);
CallAnalyzer CA(TTI, ACT, *F, InlineConstants::IndirectCallThreshold);
if (CA.analyzeCall(CS)) {
// We were able to inline the indirect call! Subtract the cost from the
// bonus we want to apply, but don't go below zero.
@ -976,10 +974,11 @@ bool CallAnalyzer::analyzeBlock(BasicBlock *BB,
/// returns 0 if V is not a pointer, and returns the constant '0' if there are
/// no constant offsets applied.
ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
if (!DL || !V->getType()->isPointerTy())
if (!V->getType()->isPointerTy())
return nullptr;
unsigned IntPtrWidth = DL->getPointerSizeInBits();
const DataLayout &DL = F.getParent()->getDataLayout();
unsigned IntPtrWidth = DL.getPointerSizeInBits();
APInt Offset = APInt::getNullValue(IntPtrWidth);
// Even though we don't look through PHI nodes, we could be called on an
@ -1003,7 +1002,7 @@ ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
assert(V->getType()->isPointerTy() && "Unexpected operand type!");
} while (Visited.insert(V).second);
Type *IntPtrTy = DL->getIntPtrType(V->getContext());
Type *IntPtrTy = DL.getIntPtrType(V->getContext());
return cast<ConstantInt>(ConstantInt::get(IntPtrTy, Offset));
}
@ -1034,16 +1033,17 @@ bool CallAnalyzer::analyzeCall(CallSite CS) {
assert(NumVectorInstructions == 0);
FiftyPercentVectorBonus = Threshold;
TenPercentVectorBonus = Threshold / 2;
const DataLayout &DL = F.getParent()->getDataLayout();
// Give out bonuses per argument, as the instructions setting them up will
// be gone after inlining.
for (unsigned I = 0, E = CS.arg_size(); I != E; ++I) {
if (DL && CS.isByValArgument(I)) {
if (CS.isByValArgument(I)) {
// We approximate the number of loads and stores needed by dividing the
// size of the byval type by the target's pointer size.
PointerType *PTy = cast<PointerType>(CS.getArgument(I)->getType());
unsigned TypeSize = DL->getTypeSizeInBits(PTy->getElementType());
unsigned PointerSize = DL->getPointerSizeInBits();
unsigned TypeSize = DL.getTypeSizeInBits(PTy->getElementType());
unsigned PointerSize = DL.getPointerSizeInBits();
// Ceiling division.
unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize;
@ -1333,8 +1333,7 @@ InlineCost InlineCostAnalysis::getInlineCost(CallSite CS, Function *Callee,
DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName()
<< "...\n");
CallAnalyzer CA(&Callee->getParent()->getDataLayout(), TTIWP->getTTI(*Callee),
ACT, *Callee, Threshold);
CallAnalyzer CA(TTIWP->getTTI(*Callee), ACT, *Callee, Threshold);
bool ShouldInline = CA.analyzeCall(CS);
DEBUG(CA.dump());

View File

@ -114,6 +114,8 @@ static bool isSimplifiedLoopNest(BasicBlock *BB, const DominatorTree *DT,
/// return true. Otherwise, return false.
bool IVUsers::AddUsersImpl(Instruction *I,
SmallPtrSetImpl<Loop*> &SimpleLoopNests) {
const DataLayout &DL = I->getModule()->getDataLayout();
// Add this IV user to the Processed set before returning false to ensure that
// all IV users are members of the set. See IVUsers::isIVUserOrOperand.
if (!Processed.insert(I).second)
@ -125,14 +127,14 @@ bool IVUsers::AddUsersImpl(Instruction *I,
// IVUsers is used by LSR which assumes that all SCEV expressions are safe to
// pass to SCEVExpander. Expressions are not safe to expand if they represent
// operations that are not safe to speculate, namely integer division.
if (!isa<PHINode>(I) && !isSafeToSpeculativelyExecute(I, DL))
if (!isa<PHINode>(I) && !isSafeToSpeculativelyExecute(I))
return false;
// LSR is not APInt clean, do not touch integers bigger than 64-bits.
// Also avoid creating IVs of non-native types. For example, we don't want a
// 64-bit IV in 32-bit code just because the loop has one 64-bit cast.
uint64_t Width = SE->getTypeSizeInBits(I->getType());
if (Width > 64 || (DL && !DL->isLegalInteger(Width)))
if (Width > 64 || !DL.isLegalInteger(Width))
return false;
// Get the symbolic expression for this instruction.
@ -254,7 +256,6 @@ bool IVUsers::runOnLoop(Loop *l, LPPassManager &LPM) {
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
SE = &getAnalysis<ScalarEvolution>();
DL = &L->getHeader()->getModule()->getDataLayout();
// Find all uses of induction variables in this loop, and categorize
// them by stride. Start by finding all of the PHI nodes in the header for

View File

@ -45,13 +45,13 @@ STATISTIC(NumReassoc, "Number of reassociations");
namespace {
struct Query {
const DataLayout *DL;
const DataLayout &DL;
const TargetLibraryInfo *TLI;
const DominatorTree *DT;
AssumptionCache *AC;
const Instruction *CxtI;
Query(const DataLayout *DL, const TargetLibraryInfo *tli,
Query(const DataLayout &DL, const TargetLibraryInfo *tli,
const DominatorTree *dt, AssumptionCache *ac = nullptr,
const Instruction *cxti = nullptr)
: DL(DL), TLI(tli), DT(dt), AC(ac), CxtI(cxti) {}
@ -584,7 +584,7 @@ static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
}
Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
const DataLayout *DL, const TargetLibraryInfo *TLI,
const DataLayout &DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyAddInst(Op0, Op1, isNSW, isNUW, Query(DL, TLI, DT, AC, CxtI),
@ -601,17 +601,11 @@ Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
/// This is very similar to GetPointerBaseWithConstantOffset except it doesn't
/// follow non-inbounds geps. This allows it to remain usable for icmp ult/etc.
/// folding.
static Constant *stripAndComputeConstantOffsets(const DataLayout *DL,
Value *&V,
static Constant *stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V,
bool AllowNonInbounds = false) {
assert(V->getType()->getScalarType()->isPointerTy());
// Without DataLayout, just be conservative for now. Theoretically, more could
// be done in this case.
if (!DL)
return ConstantInt::get(IntegerType::get(V->getContext(), 64), 0);
Type *IntPtrTy = DL->getIntPtrType(V->getType())->getScalarType();
Type *IntPtrTy = DL.getIntPtrType(V->getType())->getScalarType();
APInt Offset = APInt::getNullValue(IntPtrTy->getIntegerBitWidth());
// Even though we don't look through PHI nodes, we could be called on an
@ -621,7 +615,7 @@ static Constant *stripAndComputeConstantOffsets(const DataLayout *DL,
do {
if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
if ((!AllowNonInbounds && !GEP->isInBounds()) ||
!GEP->accumulateConstantOffset(*DL, Offset))
!GEP->accumulateConstantOffset(DL, Offset))
break;
V = GEP->getPointerOperand();
} else if (Operator::getOpcode(V) == Instruction::BitCast) {
@ -646,8 +640,8 @@ static Constant *stripAndComputeConstantOffsets(const DataLayout *DL,
/// \brief Compute the constant difference between two pointer values.
/// If the difference is not a constant, returns zero.
static Constant *computePointerDifference(const DataLayout *DL,
Value *LHS, Value *RHS) {
static Constant *computePointerDifference(const DataLayout &DL, Value *LHS,
Value *RHS) {
Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS);
Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS);
@ -783,7 +777,7 @@ static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
}
Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
const DataLayout *DL, const TargetLibraryInfo *TLI,
const DataLayout &DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Query(DL, TLI, DT, AC, CxtI),
@ -962,7 +956,7 @@ static Value *SimplifyMulInst(Value *Op0, Value *Op1, const Query &Q,
}
Value *llvm::SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
const DataLayout *DL,
const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
@ -971,7 +965,7 @@ Value *llvm::SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
}
Value *llvm::SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
const DataLayout *DL,
const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
@ -980,7 +974,7 @@ Value *llvm::SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
}
Value *llvm::SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
const DataLayout *DL,
const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
@ -988,7 +982,7 @@ Value *llvm::SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
RecursionLimit);
}
Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const DataLayout *DL,
Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
@ -1092,7 +1086,7 @@ static Value *SimplifySDivInst(Value *Op0, Value *Op1, const Query &Q,
return nullptr;
}
Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const DataLayout *DL,
Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
@ -1110,7 +1104,7 @@ static Value *SimplifyUDivInst(Value *Op0, Value *Op1, const Query &Q,
return nullptr;
}
Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const DataLayout *DL,
Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
@ -1138,7 +1132,7 @@ static Value *SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
}
Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
const DataLayout *DL,
const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
@ -1217,7 +1211,7 @@ static Value *SimplifySRemInst(Value *Op0, Value *Op1, const Query &Q,
return nullptr;
}
Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const DataLayout *DL,
Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
@ -1235,7 +1229,7 @@ static Value *SimplifyURemInst(Value *Op0, Value *Op1, const Query &Q,
return nullptr;
}
Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const DataLayout *DL,
Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
@ -1263,7 +1257,7 @@ static Value *SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
}
Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
const DataLayout *DL,
const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
@ -1387,7 +1381,7 @@ static Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
}
Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
const DataLayout *DL, const TargetLibraryInfo *TLI,
const DataLayout &DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Query(DL, TLI, DT, AC, CxtI),
@ -1411,7 +1405,7 @@ static Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
}
Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
const DataLayout *DL,
const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
@ -1445,7 +1439,7 @@ static Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
}
Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
const DataLayout *DL,
const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
@ -1596,9 +1590,11 @@ static Value *SimplifyAndInst(Value *Op0, Value *Op1, const Query &Q,
// A & (-A) = A if A is a power of two or zero.
if (match(Op0, m_Neg(m_Specific(Op1))) ||
match(Op1, m_Neg(m_Specific(Op0)))) {
if (isKnownToBeAPowerOfTwo(Op0, /*OrZero*/ true, 0, Q.AC, Q.CxtI, Q.DT))
if (isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI,
Q.DT))
return Op0;
if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, 0, Q.AC, Q.CxtI, Q.DT))
if (isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI,
Q.DT))
return Op1;
}
@ -1643,7 +1639,7 @@ static Value *SimplifyAndInst(Value *Op0, Value *Op1, const Query &Q,
return nullptr;
}
Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const DataLayout *DL,
Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
@ -1831,7 +1827,7 @@ static Value *SimplifyOrInst(Value *Op0, Value *Op1, const Query &Q,
return nullptr;
}
Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const DataLayout *DL,
Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
@ -1888,7 +1884,7 @@ static Value *SimplifyXorInst(Value *Op0, Value *Op1, const Query &Q,
return nullptr;
}
Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const DataLayout *DL,
Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
@ -1948,10 +1944,10 @@ static Value *ExtractEquivalentCondition(Value *V, CmpInst::Predicate Pred,
// If the C and C++ standards are ever made sufficiently restrictive in this
// area, it may be possible to update LLVM's semantics accordingly and reinstate
// this optimization.
static Constant *computePointerICmp(const DataLayout *DL,
static Constant *computePointerICmp(const DataLayout &DL,
const TargetLibraryInfo *TLI,
CmpInst::Predicate Pred,
Value *LHS, Value *RHS) {
CmpInst::Predicate Pred, Value *LHS,
Value *RHS) {
// First, skip past any trivial no-ops.
LHS = LHS->stripPointerCasts();
RHS = RHS->stripPointerCasts();
@ -2369,8 +2365,8 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
// Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input
// if the integer type is the same size as the pointer type.
if (MaxRecurse && Q.DL && isa<PtrToIntInst>(LI) &&
Q.DL->getTypeSizeInBits(SrcTy) == DstTy->getPrimitiveSizeInBits()) {
if (MaxRecurse && isa<PtrToIntInst>(LI) &&
Q.DL.getTypeSizeInBits(SrcTy) == DstTy->getPrimitiveSizeInBits()) {
if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
// Transfer the cast to the constant.
if (Value *V = SimplifyICmpInst(Pred, SrcOp,
@ -3024,7 +3020,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
}
Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
const DataLayout *DL,
const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
Instruction *CxtI) {
@ -3140,7 +3136,7 @@ static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
}
Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
const DataLayout *DL,
const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
@ -3235,7 +3231,7 @@ static Value *SimplifySelectInst(Value *CondVal, Value *TrueVal,
}
Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
const DataLayout *DL,
const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
@ -3269,10 +3265,10 @@ static Value *SimplifyGEPInst(ArrayRef<Value *> Ops, const Query &Q, unsigned) {
return Ops[0];
Type *Ty = PtrTy->getElementType();
if (Q.DL && Ty->isSized()) {
if (Ty->isSized()) {
Value *P;
uint64_t C;
uint64_t TyAllocSize = Q.DL->getTypeAllocSize(Ty);
uint64_t TyAllocSize = Q.DL.getTypeAllocSize(Ty);
// getelementptr P, N -> P if P points to a type of zero size.
if (TyAllocSize == 0)
return Ops[0];
@ -3280,7 +3276,7 @@ static Value *SimplifyGEPInst(ArrayRef<Value *> Ops, const Query &Q, unsigned) {
// The following transforms are only safe if the ptrtoint cast
// doesn't truncate the pointers.
if (Ops[1]->getType()->getScalarSizeInBits() ==
Q.DL->getPointerSizeInBits(AS)) {
Q.DL.getPointerSizeInBits(AS)) {
auto PtrToIntOrZero = [GEPTy](Value *P) -> Value * {
if (match(P, m_Zero()))
return Constant::getNullValue(GEPTy);
@ -3325,7 +3321,7 @@ static Value *SimplifyGEPInst(ArrayRef<Value *> Ops, const Query &Q, unsigned) {
return ConstantExpr::getGetElementPtr(cast<Constant>(Ops[0]), Ops.slice(1));
}
Value *llvm::SimplifyGEPInst(ArrayRef<Value *> Ops, const DataLayout *DL,
Value *llvm::SimplifyGEPInst(ArrayRef<Value *> Ops, const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
@ -3362,7 +3358,7 @@ static Value *SimplifyInsertValueInst(Value *Agg, Value *Val,
}
Value *llvm::SimplifyInsertValueInst(
Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, const DataLayout *DL,
Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, const DataLayout &DL,
const TargetLibraryInfo *TLI, const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyInsertValueInst(Agg, Val, Idxs, Query(DL, TLI, DT, AC, CxtI),
@ -3410,7 +3406,7 @@ static Value *SimplifyTruncInst(Value *Op, Type *Ty, const Query &Q, unsigned) {
return nullptr;
}
Value *llvm::SimplifyTruncInst(Value *Op, Type *Ty, const DataLayout *DL,
Value *llvm::SimplifyTruncInst(Value *Op, Type *Ty, const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
@ -3507,7 +3503,7 @@ static Value *SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS,
}
Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
const DataLayout *DL, const TargetLibraryInfo *TLI,
const DataLayout &DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyBinOp(Opcode, LHS, RHS, Query(DL, TLI, DT, AC, CxtI),
@ -3515,7 +3511,7 @@ Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
}
Value *llvm::SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS,
const FastMathFlags &FMF, const DataLayout *DL,
const FastMathFlags &FMF, const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
@ -3533,7 +3529,7 @@ static Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
}
Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
const DataLayout *DL, const TargetLibraryInfo *TLI,
const DataLayout &DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyCmpInst(Predicate, LHS, RHS, Query(DL, TLI, DT, AC, CxtI),
@ -3609,7 +3605,7 @@ static Value *SimplifyCall(Value *V, IterTy ArgBegin, IterTy ArgEnd,
}
Value *llvm::SimplifyCall(Value *V, User::op_iterator ArgBegin,
User::op_iterator ArgEnd, const DataLayout *DL,
User::op_iterator ArgEnd, const DataLayout &DL,
const TargetLibraryInfo *TLI, const DominatorTree *DT,
AssumptionCache *AC, const Instruction *CxtI) {
return ::SimplifyCall(V, ArgBegin, ArgEnd, Query(DL, TLI, DT, AC, CxtI),
@ -3617,7 +3613,7 @@ Value *llvm::SimplifyCall(Value *V, User::op_iterator ArgBegin,
}
Value *llvm::SimplifyCall(Value *V, ArrayRef<Value *> Args,
const DataLayout *DL, const TargetLibraryInfo *TLI,
const DataLayout &DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyCall(V, Args.begin(), Args.end(),
@ -3626,7 +3622,7 @@ Value *llvm::SimplifyCall(Value *V, ArrayRef<Value *> Args,
/// SimplifyInstruction - See if we can compute a simplified version of this
/// instruction. If not, this returns null.
Value *llvm::SimplifyInstruction(Instruction *I, const DataLayout *DL,
Value *llvm::SimplifyInstruction(Instruction *I, const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC) {
Value *Result;
@ -3774,12 +3770,12 @@ Value *llvm::SimplifyInstruction(Instruction *I, const DataLayout *DL,
/// This routine returns 'true' only when *it* simplifies something. The passed
/// in simplified value does not count toward this.
static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV,
const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT,
AssumptionCache *AC) {
bool Simplified = false;
SmallSetVector<Instruction *, 8> Worklist;
const DataLayout &DL = I->getModule()->getDataLayout();
// If we have an explicit value to collapse to, do that round of the
// simplification loop by hand initially.
@ -3827,19 +3823,18 @@ static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV,
return Simplified;
}
bool llvm::recursivelySimplifyInstruction(Instruction *I, const DataLayout *DL,
bool llvm::recursivelySimplifyInstruction(Instruction *I,
const TargetLibraryInfo *TLI,
const DominatorTree *DT,
AssumptionCache *AC) {
return replaceAndRecursivelySimplifyImpl(I, nullptr, DL, TLI, DT, AC);
return replaceAndRecursivelySimplifyImpl(I, nullptr, TLI, DT, AC);
}
bool llvm::replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT,
AssumptionCache *AC) {
assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!");
assert(SimpleV && "Must provide a simplified value.");
return replaceAndRecursivelySimplifyImpl(I, SimpleV, DL, TLI, DT, AC);
return replaceAndRecursivelySimplifyImpl(I, SimpleV, TLI, DT, AC);
}

View File

@ -191,7 +191,7 @@ public:
/// Merge the specified lattice value into this one, updating this
/// one and returning true if anything changed.
bool mergeIn(const LVILatticeVal &RHS) {
bool mergeIn(const LVILatticeVal &RHS, const DataLayout &DL) {
if (RHS.isUndefined() || isOverdefined()) return false;
if (RHS.isOverdefined()) return markOverdefined();
@ -215,11 +215,9 @@ public:
// Unless we can prove that the two Constants are different, we must
// move to overdefined.
// FIXME: use DataLayout/TargetLibraryInfo for smarter constant folding.
if (ConstantInt *Res = dyn_cast<ConstantInt>(
ConstantFoldCompareInstOperands(CmpInst::ICMP_NE,
getConstant(),
RHS.getNotConstant())))
if (ConstantInt *Res =
dyn_cast<ConstantInt>(ConstantFoldCompareInstOperands(
CmpInst::ICMP_NE, getConstant(), RHS.getNotConstant(), DL)))
if (Res->isOne())
return markNotConstant(RHS.getNotConstant());
@ -241,11 +239,9 @@ public:
// Unless we can prove that the two Constants are different, we must
// move to overdefined.
// FIXME: use DataLayout/TargetLibraryInfo for smarter constant folding.
if (ConstantInt *Res = dyn_cast<ConstantInt>(
ConstantFoldCompareInstOperands(CmpInst::ICMP_NE,
getNotConstant(),
RHS.getConstant())))
if (ConstantInt *Res =
dyn_cast<ConstantInt>(ConstantFoldCompareInstOperands(
CmpInst::ICMP_NE, getNotConstant(), RHS.getConstant(), DL)))
if (Res->isOne())
return false;
@ -353,13 +349,10 @@ namespace {
return true;
}
/// A pointer to the cache of @llvm.assume calls.
AssumptionCache *AC;
/// An optional DL pointer.
const DataLayout *DL;
/// An optional DT pointer.
DominatorTree *DT;
AssumptionCache *AC; ///< A pointer to the cache of @llvm.assume calls.
const DataLayout &DL; ///< A mandatory DataLayout
DominatorTree *DT; ///< An optional DT pointer.
friend struct LVIValueHandle;
void insertResult(Value *Val, BasicBlock *BB, const LVILatticeVal &Result) {
@ -425,7 +418,7 @@ namespace {
OverDefinedCache.clear();
}
LazyValueInfoCache(AssumptionCache *AC, const DataLayout *DL = nullptr,
LazyValueInfoCache(AssumptionCache *AC, const DataLayout &DL,
DominatorTree *DT = nullptr)
: AC(AC), DL(DL), DT(DT) {}
};
@ -578,11 +571,13 @@ bool LazyValueInfoCache::solveBlockValue(Value *Val, BasicBlock *BB) {
static bool InstructionDereferencesPointer(Instruction *I, Value *Ptr) {
if (LoadInst *L = dyn_cast<LoadInst>(I)) {
return L->getPointerAddressSpace() == 0 &&
GetUnderlyingObject(L->getPointerOperand()) == Ptr;
GetUnderlyingObject(L->getPointerOperand(),
L->getModule()->getDataLayout()) == Ptr;
}
if (StoreInst *S = dyn_cast<StoreInst>(I)) {
return S->getPointerAddressSpace() == 0 &&
GetUnderlyingObject(S->getPointerOperand()) == Ptr;
GetUnderlyingObject(S->getPointerOperand(),
S->getModule()->getDataLayout()) == Ptr;
}
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
if (MI->isVolatile()) return false;
@ -592,11 +587,13 @@ static bool InstructionDereferencesPointer(Instruction *I, Value *Ptr) {
if (!Len || Len->isZero()) return false;
if (MI->getDestAddressSpace() == 0)
if (GetUnderlyingObject(MI->getRawDest()) == Ptr)
if (GetUnderlyingObject(MI->getRawDest(),
MI->getModule()->getDataLayout()) == Ptr)
return true;
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI))
if (MTI->getSourceAddressSpace() == 0)
if (GetUnderlyingObject(MTI->getRawSource()) == Ptr)
if (GetUnderlyingObject(MTI->getRawSource(),
MTI->getModule()->getDataLayout()) == Ptr)
return true;
}
return false;
@ -613,10 +610,11 @@ bool LazyValueInfoCache::solveBlockValueNonLocal(LVILatticeVal &BBLV,
if (isKnownNonNull(Val)) {
NotNull = true;
} else {
Value *UnderlyingVal = GetUnderlyingObject(Val);
const DataLayout &DL = BB->getModule()->getDataLayout();
Value *UnderlyingVal = GetUnderlyingObject(Val, DL);
// If 'GetUnderlyingObject' didn't converge, skip it. It won't converge
// inside InstructionDereferencesPointer either.
if (UnderlyingVal == GetUnderlyingObject(UnderlyingVal, nullptr, 1)) {
if (UnderlyingVal == GetUnderlyingObject(UnderlyingVal, DL, 1)) {
for (Instruction &I : *BB) {
if (InstructionDereferencesPointer(&I, UnderlyingVal)) {
NotNull = true;
@ -650,7 +648,7 @@ bool LazyValueInfoCache::solveBlockValueNonLocal(LVILatticeVal &BBLV,
if (EdgesMissing)
continue;
Result.mergeIn(EdgeResult);
Result.mergeIn(EdgeResult, DL);
// If we hit overdefined, exit early. The BlockVals entry is already set
// to overdefined.
@ -695,7 +693,7 @@ bool LazyValueInfoCache::solveBlockValuePHINode(LVILatticeVal &BBLV,
if (EdgesMissing)
continue;
Result.mergeIn(EdgeResult);
Result.mergeIn(EdgeResult, DL);
// If we hit overdefined, exit early. The BlockVals entry is already set
// to overdefined.
@ -734,7 +732,7 @@ void LazyValueInfoCache::mergeAssumeBlockValueConstantRange(Value *Val,
if (!AssumeVH)
continue;
auto *I = cast<CallInst>(AssumeVH);
if (!isValidAssumeForContext(I, BBI, DL, DT))
if (!isValidAssumeForContext(I, BBI, DT))
continue;
Value *C = I->getArgOperand(0);
@ -744,7 +742,7 @@ void LazyValueInfoCache::mergeAssumeBlockValueConstantRange(Value *Val,
if (BBLV.isOverdefined())
BBLV = Result;
else
BBLV.mergeIn(Result);
BBLV.mergeIn(Result, DL);
}
}
}
@ -1103,26 +1101,27 @@ void LazyValueInfoCache::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
/// This lazily constructs the LazyValueInfoCache.
static LazyValueInfoCache &getCache(void *&PImpl, AssumptionCache *AC,
const DataLayout *DL = nullptr,
const DataLayout *DL,
DominatorTree *DT = nullptr) {
if (!PImpl)
PImpl = new LazyValueInfoCache(AC, DL, DT);
if (!PImpl) {
assert(DL && "getCache() called with a null DataLayout");
PImpl = new LazyValueInfoCache(AC, *DL, DT);
}
return *static_cast<LazyValueInfoCache*>(PImpl);
}
bool LazyValueInfo::runOnFunction(Function &F) {
AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
const DataLayout &DL = F.getParent()->getDataLayout();
DominatorTreeWrapperPass *DTWP =
getAnalysisIfAvailable<DominatorTreeWrapperPass>();
DT = DTWP ? &DTWP->getDomTree() : nullptr;
DL = &F.getParent()->getDataLayout();
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
if (PImpl)
getCache(PImpl, AC, DL, DT).clear();
getCache(PImpl, AC, &DL, DT).clear();
// Fully lazy.
return false;
@ -1137,15 +1136,16 @@ void LazyValueInfo::getAnalysisUsage(AnalysisUsage &AU) const {
void LazyValueInfo::releaseMemory() {
// If the cache was allocated, free it.
if (PImpl) {
delete &getCache(PImpl, AC);
delete &getCache(PImpl, AC, nullptr);
PImpl = nullptr;
}
}
Constant *LazyValueInfo::getConstant(Value *V, BasicBlock *BB,
Instruction *CxtI) {
const DataLayout &DL = BB->getModule()->getDataLayout();
LVILatticeVal Result =
getCache(PImpl, AC, DL, DT).getValueInBlock(V, BB, CxtI);
getCache(PImpl, AC, &DL, DT).getValueInBlock(V, BB, CxtI);
if (Result.isConstant())
return Result.getConstant();
@ -1162,8 +1162,9 @@ Constant *LazyValueInfo::getConstant(Value *V, BasicBlock *BB,
Constant *LazyValueInfo::getConstantOnEdge(Value *V, BasicBlock *FromBB,
BasicBlock *ToBB,
Instruction *CxtI) {
const DataLayout &DL = FromBB->getModule()->getDataLayout();
LVILatticeVal Result =
getCache(PImpl, AC, DL, DT).getValueOnEdge(V, FromBB, ToBB, CxtI);
getCache(PImpl, AC, &DL, DT).getValueOnEdge(V, FromBB, ToBB, CxtI);
if (Result.isConstant())
return Result.getConstant();
@ -1175,9 +1176,10 @@ Constant *LazyValueInfo::getConstantOnEdge(Value *V, BasicBlock *FromBB,
return nullptr;
}
static LazyValueInfo::Tristate
getPredicateResult(unsigned Pred, Constant *C, LVILatticeVal &Result,
const DataLayout *DL, TargetLibraryInfo *TLI) {
static LazyValueInfo::Tristate getPredicateResult(unsigned Pred, Constant *C,
LVILatticeVal &Result,
const DataLayout &DL,
TargetLibraryInfo *TLI) {
// If we know the value is a constant, evaluate the conditional.
Constant *Res = nullptr;
@ -1248,8 +1250,9 @@ LazyValueInfo::Tristate
LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
BasicBlock *FromBB, BasicBlock *ToBB,
Instruction *CxtI) {
const DataLayout &DL = FromBB->getModule()->getDataLayout();
LVILatticeVal Result =
getCache(PImpl, AC, DL, DT).getValueOnEdge(V, FromBB, ToBB, CxtI);
getCache(PImpl, AC, &DL, DT).getValueOnEdge(V, FromBB, ToBB, CxtI);
return getPredicateResult(Pred, C, Result, DL, TLI);
}
@ -1257,18 +1260,23 @@ LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
LazyValueInfo::Tristate
LazyValueInfo::getPredicateAt(unsigned Pred, Value *V, Constant *C,
Instruction *CxtI) {
LVILatticeVal Result = getCache(PImpl, AC, DL, DT).getValueAt(V, CxtI);
const DataLayout &DL = CxtI->getModule()->getDataLayout();
LVILatticeVal Result = getCache(PImpl, AC, &DL, DT).getValueAt(V, CxtI);
return getPredicateResult(Pred, C, Result, DL, TLI);
}
void LazyValueInfo::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
BasicBlock *NewSucc) {
if (PImpl)
getCache(PImpl, AC, DL, DT).threadEdge(PredBB, OldSucc, NewSucc);
if (PImpl) {
const DataLayout &DL = PredBB->getModule()->getDataLayout();
getCache(PImpl, AC, &DL, DT).threadEdge(PredBB, OldSucc, NewSucc);
}
}
void LazyValueInfo::eraseBlock(BasicBlock *BB) {
if (PImpl)
getCache(PImpl, AC, DL, DT).eraseBlock(BB);
if (PImpl) {
const DataLayout &DL = BB->getModule()->getDataLayout();
getCache(PImpl, AC, &DL, DT).eraseBlock(BB);
}
}

View File

@ -98,8 +98,8 @@ namespace {
void visitInsertElementInst(InsertElementInst &I);
void visitUnreachableInst(UnreachableInst &I);
Value *findValue(Value *V, bool OffsetOk) const;
Value *findValueImpl(Value *V, bool OffsetOk,
Value *findValue(Value *V, const DataLayout &DL, bool OffsetOk) const;
Value *findValueImpl(Value *V, const DataLayout &DL, bool OffsetOk,
SmallPtrSetImpl<Value *> &Visited) const;
public:
@ -107,7 +107,6 @@ namespace {
AliasAnalysis *AA;
AssumptionCache *AC;
DominatorTree *DT;
const DataLayout *DL;
TargetLibraryInfo *TLI;
std::string Messages;
@ -175,7 +174,6 @@ bool Lint::runOnFunction(Function &F) {
AA = &getAnalysis<AliasAnalysis>();
AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
DL = &F.getParent()->getDataLayout();
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
visit(F);
dbgs() << MessagesStr.str();
@ -195,11 +193,13 @@ void Lint::visitFunction(Function &F) {
void Lint::visitCallSite(CallSite CS) {
Instruction &I = *CS.getInstruction();
Value *Callee = CS.getCalledValue();
const DataLayout &DL = CS->getModule()->getDataLayout();
visitMemoryReference(I, Callee, AliasAnalysis::UnknownSize,
0, nullptr, MemRef::Callee);
if (Function *F = dyn_cast<Function>(findValue(Callee, /*OffsetOk=*/false))) {
if (Function *F = dyn_cast<Function>(findValue(Callee, DL,
/*OffsetOk=*/false))) {
Assert(CS.getCallingConv() == F->getCallingConv(),
"Undefined behavior: Caller and callee calling convention differ",
&I);
@ -248,8 +248,8 @@ void Lint::visitCallSite(CallSite CS) {
Type *Ty =
cast<PointerType>(Formal->getType())->getElementType();
visitMemoryReference(I, Actual, AA->getTypeStoreSize(Ty),
DL ? DL->getABITypeAlignment(Ty) : 0,
Ty, MemRef::Read | MemRef::Write);
DL.getABITypeAlignment(Ty), Ty,
MemRef::Read | MemRef::Write);
}
}
}
@ -258,9 +258,10 @@ void Lint::visitCallSite(CallSite CS) {
if (CS.isCall() && cast<CallInst>(CS.getInstruction())->isTailCall())
for (CallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end();
AI != AE; ++AI) {
Value *Obj = findValue(*AI, /*OffsetOk=*/true);
Value *Obj = findValue(*AI, DL, /*OffsetOk=*/true);
Assert(!isa<AllocaInst>(Obj),
"Undefined behavior: Call with \"tail\" keyword references alloca",
"Undefined behavior: Call with \"tail\" keyword references "
"alloca",
&I);
}
@ -286,8 +287,8 @@ void Lint::visitCallSite(CallSite CS) {
// overlap is not distinguished from the case where nothing is known.
uint64_t Size = 0;
if (const ConstantInt *Len =
dyn_cast<ConstantInt>(findValue(MCI->getLength(),
/*OffsetOk=*/false)))
dyn_cast<ConstantInt>(findValue(MCI->getLength(), DL,
/*OffsetOk=*/false)))
if (Len->getValue().isIntN(32))
Size = Len->getValue().getZExtValue();
Assert(AA->alias(MCI->getSource(), Size, MCI->getDest(), Size) !=
@ -365,7 +366,8 @@ void Lint::visitReturnInst(ReturnInst &I) {
"Unusual: Return statement in function with noreturn attribute", &I);
if (Value *V = I.getReturnValue()) {
Value *Obj = findValue(V, /*OffsetOk=*/true);
Value *Obj =
findValue(V, F->getParent()->getDataLayout(), /*OffsetOk=*/true);
Assert(!isa<AllocaInst>(Obj), "Unusual: Returning alloca value", &I);
}
}
@ -380,7 +382,8 @@ void Lint::visitMemoryReference(Instruction &I,
if (Size == 0)
return;
Value *UnderlyingObject = findValue(Ptr, /*OffsetOk=*/true);
Value *UnderlyingObject =
findValue(Ptr, I.getModule()->getDataLayout(), /*OffsetOk=*/true);
Assert(!isa<ConstantPointerNull>(UnderlyingObject),
"Undefined behavior: Null pointer dereference", &I);
Assert(!isa<UndefValue>(UnderlyingObject),
@ -419,6 +422,7 @@ void Lint::visitMemoryReference(Instruction &I,
// Check for buffer overflows and misalignment.
// Only handles memory references that read/write something simple like an
// alloca instruction or a global variable.
auto &DL = I.getModule()->getDataLayout();
int64_t Offset = 0;
if (Value *Base = GetPointerBaseWithConstantOffset(Ptr, Offset, DL)) {
// OK, so the access is to a constant offset from Ptr. Check that Ptr is
@ -429,21 +433,21 @@ void Lint::visitMemoryReference(Instruction &I,
if (AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {
Type *ATy = AI->getAllocatedType();
if (DL && !AI->isArrayAllocation() && ATy->isSized())
BaseSize = DL->getTypeAllocSize(ATy);
if (!AI->isArrayAllocation() && ATy->isSized())
BaseSize = DL.getTypeAllocSize(ATy);
BaseAlign = AI->getAlignment();
if (DL && BaseAlign == 0 && ATy->isSized())
BaseAlign = DL->getABITypeAlignment(ATy);
if (BaseAlign == 0 && ATy->isSized())
BaseAlign = DL.getABITypeAlignment(ATy);
} else if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Base)) {
// If the global may be defined differently in another compilation unit
// then don't warn about funky memory accesses.
if (GV->hasDefinitiveInitializer()) {
Type *GTy = GV->getType()->getElementType();
if (DL && GTy->isSized())
BaseSize = DL->getTypeAllocSize(GTy);
if (GTy->isSized())
BaseSize = DL.getTypeAllocSize(GTy);
BaseAlign = GV->getAlignment();
if (DL && BaseAlign == 0 && GTy->isSized())
BaseAlign = DL->getABITypeAlignment(GTy);
if (BaseAlign == 0 && GTy->isSized())
BaseAlign = DL.getABITypeAlignment(GTy);
}
}
@ -456,8 +460,8 @@ void Lint::visitMemoryReference(Instruction &I,
// Accesses that say that the memory is more aligned than it is are not
// defined.
if (DL && Align == 0 && Ty && Ty->isSized())
Align = DL->getABITypeAlignment(Ty);
if (Align == 0 && Ty && Ty->isSized())
Align = DL.getABITypeAlignment(Ty);
Assert(!BaseAlign || Align <= MinAlign(BaseAlign, Offset),
"Undefined behavior: Memory reference address is misaligned", &I);
}
@ -487,22 +491,23 @@ void Lint::visitSub(BinaryOperator &I) {
}
void Lint::visitLShr(BinaryOperator &I) {
if (ConstantInt *CI =
dyn_cast<ConstantInt>(findValue(I.getOperand(1), /*OffsetOk=*/false)))
if (ConstantInt *CI = dyn_cast<ConstantInt>(
findValue(I.getOperand(1), I.getModule()->getDataLayout(),
/*OffsetOk=*/false)))
Assert(CI->getValue().ult(cast<IntegerType>(I.getType())->getBitWidth()),
"Undefined result: Shift count out of range", &I);
}
void Lint::visitAShr(BinaryOperator &I) {
if (ConstantInt *CI =
dyn_cast<ConstantInt>(findValue(I.getOperand(1), /*OffsetOk=*/false)))
if (ConstantInt *CI = dyn_cast<ConstantInt>(findValue(
I.getOperand(1), I.getModule()->getDataLayout(), /*OffsetOk=*/false)))
Assert(CI->getValue().ult(cast<IntegerType>(I.getType())->getBitWidth()),
"Undefined result: Shift count out of range", &I);
}
void Lint::visitShl(BinaryOperator &I) {
if (ConstantInt *CI =
dyn_cast<ConstantInt>(findValue(I.getOperand(1), /*OffsetOk=*/false)))
if (ConstantInt *CI = dyn_cast<ConstantInt>(findValue(
I.getOperand(1), I.getModule()->getDataLayout(), /*OffsetOk=*/false)))
Assert(CI->getValue().ult(cast<IntegerType>(I.getType())->getBitWidth()),
"Undefined result: Shift count out of range", &I);
}
@ -688,7 +693,7 @@ void Lint::visitEHEndCatch(IntrinsicInst *II) {
II);
}
static bool isZero(Value *V, const DataLayout *DL, DominatorTree *DT,
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT,
AssumptionCache *AC) {
// Assume undef could be zero.
if (isa<UndefValue>(V))
@ -729,22 +734,22 @@ static bool isZero(Value *V, const DataLayout *DL, DominatorTree *DT,
}
void Lint::visitSDiv(BinaryOperator &I) {
Assert(!isZero(I.getOperand(1), DL, DT, AC),
Assert(!isZero(I.getOperand(1), I.getModule()->getDataLayout(), DT, AC),
"Undefined behavior: Division by zero", &I);
}
void Lint::visitUDiv(BinaryOperator &I) {
Assert(!isZero(I.getOperand(1), DL, DT, AC),
Assert(!isZero(I.getOperand(1), I.getModule()->getDataLayout(), DT, AC),
"Undefined behavior: Division by zero", &I);
}
void Lint::visitSRem(BinaryOperator &I) {
Assert(!isZero(I.getOperand(1), DL, DT, AC),
Assert(!isZero(I.getOperand(1), I.getModule()->getDataLayout(), DT, AC),
"Undefined behavior: Division by zero", &I);
}
void Lint::visitURem(BinaryOperator &I) {
Assert(!isZero(I.getOperand(1), DL, DT, AC),
Assert(!isZero(I.getOperand(1), I.getModule()->getDataLayout(), DT, AC),
"Undefined behavior: Division by zero", &I);
}
@ -771,17 +776,17 @@ void Lint::visitIndirectBrInst(IndirectBrInst &I) {
}
void Lint::visitExtractElementInst(ExtractElementInst &I) {
if (ConstantInt *CI =
dyn_cast<ConstantInt>(findValue(I.getIndexOperand(),
/*OffsetOk=*/false)))
if (ConstantInt *CI = dyn_cast<ConstantInt>(
findValue(I.getIndexOperand(), I.getModule()->getDataLayout(),
/*OffsetOk=*/false)))
Assert(CI->getValue().ult(I.getVectorOperandType()->getNumElements()),
"Undefined result: extractelement index out of range", &I);
}
void Lint::visitInsertElementInst(InsertElementInst &I) {
if (ConstantInt *CI =
dyn_cast<ConstantInt>(findValue(I.getOperand(2),
/*OffsetOk=*/false)))
if (ConstantInt *CI = dyn_cast<ConstantInt>(
findValue(I.getOperand(2), I.getModule()->getDataLayout(),
/*OffsetOk=*/false)))
Assert(CI->getValue().ult(I.getType()->getNumElements()),
"Undefined result: insertelement index out of range", &I);
}
@ -802,13 +807,13 @@ void Lint::visitUnreachableInst(UnreachableInst &I) {
/// Most analysis passes don't require this logic, because instcombine
/// will simplify most of these kinds of things away. But it's a goal of
/// this Lint pass to be useful even on non-optimized IR.
Value *Lint::findValue(Value *V, bool OffsetOk) const {
Value *Lint::findValue(Value *V, const DataLayout &DL, bool OffsetOk) const {
SmallPtrSet<Value *, 4> Visited;
return findValueImpl(V, OffsetOk, Visited);
return findValueImpl(V, DL, OffsetOk, Visited);
}
/// findValueImpl - Implementation helper for findValue.
Value *Lint::findValueImpl(Value *V, bool OffsetOk,
Value *Lint::findValueImpl(Value *V, const DataLayout &DL, bool OffsetOk,
SmallPtrSetImpl<Value *> &Visited) const {
// Detect self-referential values.
if (!Visited.insert(V).second)
@ -829,7 +834,7 @@ Value *Lint::findValueImpl(Value *V, bool OffsetOk,
break;
if (Value *U = FindAvailableLoadedValue(L->getPointerOperand(),
BB, BBI, 6, AA))
return findValueImpl(U, OffsetOk, Visited);
return findValueImpl(U, DL, OffsetOk, Visited);
if (BBI != BB->begin()) break;
BB = BB->getUniquePredecessor();
if (!BB) break;
@ -838,40 +843,38 @@ Value *Lint::findValueImpl(Value *V, bool OffsetOk,
} else if (PHINode *PN = dyn_cast<PHINode>(V)) {
if (Value *W = PN->hasConstantValue())
if (W != V)
return findValueImpl(W, OffsetOk, Visited);
return findValueImpl(W, DL, OffsetOk, Visited);
} else if (CastInst *CI = dyn_cast<CastInst>(V)) {
if (CI->isNoopCast(DL))
return findValueImpl(CI->getOperand(0), OffsetOk, Visited);
return findValueImpl(CI->getOperand(0), DL, OffsetOk, Visited);
} else if (ExtractValueInst *Ex = dyn_cast<ExtractValueInst>(V)) {
if (Value *W = FindInsertedValue(Ex->getAggregateOperand(),
Ex->getIndices()))
if (W != V)
return findValueImpl(W, OffsetOk, Visited);
return findValueImpl(W, DL, OffsetOk, Visited);
} else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
// Same as above, but for ConstantExpr instead of Instruction.
if (Instruction::isCast(CE->getOpcode())) {
if (CastInst::isNoopCast(Instruction::CastOps(CE->getOpcode()),
CE->getOperand(0)->getType(),
CE->getType(),
DL ? DL->getIntPtrType(V->getType()) :
Type::getInt64Ty(V->getContext())))
return findValueImpl(CE->getOperand(0), OffsetOk, Visited);
CE->getOperand(0)->getType(), CE->getType(),
DL.getIntPtrType(V->getType())))
return findValueImpl(CE->getOperand(0), DL, OffsetOk, Visited);
} else if (CE->getOpcode() == Instruction::ExtractValue) {
ArrayRef<unsigned> Indices = CE->getIndices();
if (Value *W = FindInsertedValue(CE->getOperand(0), Indices))
if (W != V)
return findValueImpl(W, OffsetOk, Visited);
return findValueImpl(W, DL, OffsetOk, Visited);
}
}
// As a last resort, try SimplifyInstruction or constant folding.
if (Instruction *Inst = dyn_cast<Instruction>(V)) {
if (Value *W = SimplifyInstruction(Inst, DL, TLI, DT, AC))
return findValueImpl(W, OffsetOk, Visited);
return findValueImpl(W, DL, OffsetOk, Visited);
} else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
if (Value *W = ConstantFoldConstantExpression(CE, DL, TLI))
if (W != V)
return findValueImpl(W, OffsetOk, Visited);
return findValueImpl(W, DL, OffsetOk, Visited);
}
return V;

View File

@ -63,7 +63,8 @@ static bool AreEquivalentAddressValues(const Value *A, const Value *B) {
/// This uses the pointee type to determine how many bytes need to be safe to
/// load from the pointer.
bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
unsigned Align, const DataLayout *DL) {
unsigned Align) {
const DataLayout &DL = ScanFrom->getModule()->getDataLayout();
int64_t ByteOffset = 0;
Value *Base = V;
Base = GetPointerBaseWithConstantOffset(V, ByteOffset, DL);
@ -88,19 +89,19 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
}
PointerType *AddrTy = cast<PointerType>(V->getType());
uint64_t LoadSize = DL ? DL->getTypeStoreSize(AddrTy->getElementType()) : 0;
uint64_t LoadSize = DL.getTypeStoreSize(AddrTy->getElementType());
// If we found a base allocated type from either an alloca or global variable,
// try to see if we are definitively within the allocated region. We need to
// know the size of the base type and the loaded type to do anything in this
// case, so only try this when we have the DataLayout available.
if (BaseType && BaseType->isSized() && DL) {
// case.
if (BaseType && BaseType->isSized()) {
if (BaseAlign == 0)
BaseAlign = DL->getPrefTypeAlignment(BaseType);
BaseAlign = DL.getPrefTypeAlignment(BaseType);
if (Align <= BaseAlign) {
// Check if the load is within the bounds of the underlying object.
if (ByteOffset + LoadSize <= DL->getTypeAllocSize(BaseType) &&
if (ByteOffset + LoadSize <= DL.getTypeAllocSize(BaseType) &&
(Align == 0 || (ByteOffset % Align) == 0))
return true;
}
@ -134,16 +135,13 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
else
continue;
// Handle trivial cases even w/o DataLayout or other work.
// Handle trivial cases.
if (AccessedPtr == V)
return true;
if (!DL)
continue;
auto *AccessedTy = cast<PointerType>(AccessedPtr->getType());
if (AreEquivalentAddressValues(AccessedPtr->stripPointerCasts(), V) &&
LoadSize <= DL->getTypeStoreSize(AccessedTy->getElementType()))
LoadSize <= DL.getTypeStoreSize(AccessedTy->getElementType()))
return true;
}
return false;
@ -177,8 +175,6 @@ Value *llvm::FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
Type *AccessTy = cast<PointerType>(Ptr->getType())->getElementType();
// Try to get the DataLayout for this module. This may be null, in which case
// the optimizations will be limited.
const DataLayout &DL = ScanBB->getModule()->getDataLayout();
// Try to get the store size for the type.
@ -207,7 +203,7 @@ Value *llvm::FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
if (AreEquivalentAddressValues(
LI->getPointerOperand()->stripPointerCasts(), StrippedPtr) &&
CastInst::isBitOrNoopPointerCastable(LI->getType(), AccessTy, &DL)) {
CastInst::isBitOrNoopPointerCastable(LI->getType(), AccessTy, DL)) {
if (AATags)
LI->getAAMetadata(*AATags);
return LI;
@ -220,7 +216,7 @@ Value *llvm::FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
// those cases are unlikely.)
if (AreEquivalentAddressValues(StorePtr, StrippedPtr) &&
CastInst::isBitOrNoopPointerCastable(SI->getValueOperand()->getType(),
AccessTy, &DL)) {
AccessTy, DL)) {
if (AATags)
SI->getAAMetadata(*AATags);
return SI->getOperand(0);

View File

@ -168,8 +168,8 @@ public:
/// \brief Set of potential dependent memory accesses.
typedef EquivalenceClasses<MemAccessInfo> DepCandidates;
AccessAnalysis(const DataLayout *Dl, AliasAnalysis *AA, DepCandidates &DA) :
DL(Dl), AST(*AA), DepCands(DA), IsRTCheckNeeded(false) {}
AccessAnalysis(const DataLayout &Dl, AliasAnalysis *AA, DepCandidates &DA)
: DL(Dl), AST(*AA), DepCands(DA), IsRTCheckNeeded(false) {}
/// \brief Register a load and whether it is only read from.
void addLoad(AliasAnalysis::Location &Loc, bool IsReadOnly) {
@ -217,14 +217,14 @@ private:
/// Set of all accesses.
PtrAccessSet Accesses;
const DataLayout &DL;
/// Set of accesses that need a further dependence check.
MemAccessInfoSet CheckDeps;
/// Set of pointers that are read only.
SmallPtrSet<Value*, 16> ReadOnlyPtr;
const DataLayout *DL;
/// An alias set tracker to partition the access set by underlying object and
//intrinsic property (such as TBAA metadata).
AliasSetTracker AST;
@ -252,8 +252,8 @@ static bool hasComputableBounds(ScalarEvolution *SE,
/// \brief Check the stride of the pointer and ensure that it does not wrap in
/// the address space.
static int isStridedPtr(ScalarEvolution *SE, const DataLayout *DL, Value *Ptr,
const Loop *Lp, const ValueToValueMap &StridesMap);
static int isStridedPtr(ScalarEvolution *SE, Value *Ptr, const Loop *Lp,
const ValueToValueMap &StridesMap);
bool AccessAnalysis::canCheckPtrAtRT(
LoopAccessInfo::RuntimePointerCheck &RtCheck, unsigned &NumComparisons,
@ -289,10 +289,10 @@ bool AccessAnalysis::canCheckPtrAtRT(
++NumReadPtrChecks;
if (hasComputableBounds(SE, StridesMap, Ptr) &&
// When we run after a failing dependency check we have to make sure we
// don't have wrapping pointers.
// When we run after a failing dependency check we have to make sure
// we don't have wrapping pointers.
(!ShouldCheckStride ||
isStridedPtr(SE, DL, Ptr, TheLoop, StridesMap) == 1)) {
isStridedPtr(SE, Ptr, TheLoop, StridesMap) == 1)) {
// The id of the dependence set.
unsigned DepId;
@ -498,8 +498,8 @@ public:
typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
typedef SmallPtrSet<MemAccessInfo, 8> MemAccessInfoSet;
MemoryDepChecker(ScalarEvolution *Se, const DataLayout *Dl, const Loop *L)
: SE(Se), DL(Dl), InnermostLoop(L), AccessIdx(0),
MemoryDepChecker(ScalarEvolution *Se, const Loop *L)
: SE(Se), InnermostLoop(L), AccessIdx(0),
ShouldRetryWithRuntimeCheck(false) {}
/// \brief Register the location (instructions are given increasing numbers)
@ -536,7 +536,6 @@ public:
private:
ScalarEvolution *SE;
const DataLayout *DL;
const Loop *InnermostLoop;
/// \brief Maps access locations (ptr, read/write) to program order.
@ -585,8 +584,8 @@ static bool isInBoundsGep(Value *Ptr) {
}
/// \brief Check whether the access through \p Ptr has a constant stride.
static int isStridedPtr(ScalarEvolution *SE, const DataLayout *DL, Value *Ptr,
const Loop *Lp, const ValueToValueMap &StridesMap) {
static int isStridedPtr(ScalarEvolution *SE, Value *Ptr, const Loop *Lp,
const ValueToValueMap &StridesMap) {
const Type *Ty = Ptr->getType();
assert(Ty->isPointerTy() && "Unexpected non-ptr");
@ -640,7 +639,8 @@ static int isStridedPtr(ScalarEvolution *SE, const DataLayout *DL, Value *Ptr,
return 0;
}
int64_t Size = DL->getTypeAllocSize(PtrTy->getElementType());
auto &DL = Lp->getHeader()->getModule()->getDataLayout();
int64_t Size = DL.getTypeAllocSize(PtrTy->getElementType());
const APInt &APStepVal = C->getValue()->getValue();
// Huge step value - give up.
@ -726,8 +726,8 @@ bool MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
const SCEV *AScev = replaceSymbolicStrideSCEV(SE, Strides, APtr);
const SCEV *BScev = replaceSymbolicStrideSCEV(SE, Strides, BPtr);
int StrideAPtr = isStridedPtr(SE, DL, APtr, InnermostLoop, Strides);
int StrideBPtr = isStridedPtr(SE, DL, BPtr, InnermostLoop, Strides);
int StrideAPtr = isStridedPtr(SE, APtr, InnermostLoop, Strides);
int StrideBPtr = isStridedPtr(SE, BPtr, InnermostLoop, Strides);
const SCEV *Src = AScev;
const SCEV *Sink = BScev;
@ -768,7 +768,8 @@ bool MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
Type *ATy = APtr->getType()->getPointerElementType();
Type *BTy = BPtr->getType()->getPointerElementType();
unsigned TypeByteSize = DL->getTypeAllocSize(ATy);
auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout();
unsigned TypeByteSize = DL.getTypeAllocSize(ATy);
// Negative distances are not plausible dependencies.
const APInt &Val = C->getValue()->getValue();
@ -939,7 +940,7 @@ void LoopAccessInfo::analyzeLoop(const ValueToValueMap &Strides) {
PtrRtCheck.Need = false;
const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
MemoryDepChecker DepChecker(SE, DL, TheLoop);
MemoryDepChecker DepChecker(SE, TheLoop);
// For each block.
for (Loop::block_iterator bb = TheLoop->block_begin(),
@ -1009,7 +1010,8 @@ void LoopAccessInfo::analyzeLoop(const ValueToValueMap &Strides) {
}
AccessAnalysis::DepCandidates DependentAccesses;
AccessAnalysis Accesses(DL, AA, DependentAccesses);
AccessAnalysis Accesses(TheLoop->getHeader()->getModule()->getDataLayout(),
AA, DependentAccesses);
// Holds the analyzed pointers. We don't want to call GetUnderlyingObjects
// multiple times on the same object. If the ptr is accessed twice, once
@ -1068,8 +1070,7 @@ void LoopAccessInfo::analyzeLoop(const ValueToValueMap &Strides) {
// read a few words, modify, and write a few words, and some of the
// words may be written to the same address.
bool IsReadOnlyPtr = false;
if (Seen.insert(Ptr).second ||
!isStridedPtr(SE, DL, Ptr, TheLoop, Strides)) {
if (Seen.insert(Ptr).second || !isStridedPtr(SE, Ptr, TheLoop, Strides)) {
++NumReads;
IsReadOnlyPtr = true;
}
@ -1223,7 +1224,7 @@ LoopAccessInfo::addRuntimeCheck(Instruction *Loc) const {
SmallVector<TrackingVH<Value> , 2> Ends;
LLVMContext &Ctx = Loc->getContext();
SCEVExpander Exp(*SE, "induction");
SCEVExpander Exp(*SE, DL, "induction");
Instruction *FirstInst = nullptr;
for (unsigned i = 0; i < NumPointers; ++i) {
@ -1298,7 +1299,7 @@ LoopAccessInfo::addRuntimeCheck(Instruction *Loc) const {
}
LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE,
const DataLayout *DL,
const DataLayout &DL,
const TargetLibraryInfo *TLI, AliasAnalysis *AA,
DominatorTree *DT,
const ValueToValueMap &Strides)
@ -1336,6 +1337,7 @@ LoopAccessAnalysis::getInfo(Loop *L, const ValueToValueMap &Strides) {
#endif
if (!LAI) {
const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
LAI = llvm::make_unique<LoopAccessInfo>(L, SE, DL, TLI, AA, DT, Strides);
#ifndef NDEBUG
LAI->NumSymbolicStrides = Strides.size();
@ -1360,7 +1362,6 @@ void LoopAccessAnalysis::print(raw_ostream &OS, const Module *M) const {
bool LoopAccessAnalysis::runOnFunction(Function &F) {
SE = &getAnalysis<ScalarEvolution>();
DL = &F.getParent()->getDataLayout();
auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
TLI = TLIP ? &TLIP->getTLI() : nullptr;
AA = &getAnalysis<AliasAnalysis>();

View File

@ -53,7 +53,7 @@ bool MemDerefPrinter::runOnFunction(Function &F) {
for (auto &I: inst_range(F)) {
if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
Value *PO = LI->getPointerOperand();
if (PO->isDereferenceablePointer(&DL))
if (PO->isDereferenceablePointer(DL))
Vec.push_back(PO);
}
}

View File

@ -206,7 +206,7 @@ const CallInst *llvm::extractMallocCall(const Value *I,
return isMallocLikeFn(I, TLI) ? dyn_cast<CallInst>(I) : nullptr;
}
static Value *computeArraySize(const CallInst *CI, const DataLayout *DL,
static Value *computeArraySize(const CallInst *CI, const DataLayout &DL,
const TargetLibraryInfo *TLI,
bool LookThroughSExt = false) {
if (!CI)
@ -214,12 +214,12 @@ static Value *computeArraySize(const CallInst *CI, const DataLayout *DL,
// The size of the malloc's result type must be known to determine array size.
Type *T = getMallocAllocatedType(CI, TLI);
if (!T || !T->isSized() || !DL)
if (!T || !T->isSized())
return nullptr;
unsigned ElementSize = DL->getTypeAllocSize(T);
unsigned ElementSize = DL.getTypeAllocSize(T);
if (StructType *ST = dyn_cast<StructType>(T))
ElementSize = DL->getStructLayout(ST)->getSizeInBytes();
ElementSize = DL.getStructLayout(ST)->getSizeInBytes();
// If malloc call's arg can be determined to be a multiple of ElementSize,
// return the multiple. Otherwise, return NULL.
@ -280,7 +280,7 @@ Type *llvm::getMallocAllocatedType(const CallInst *CI,
/// then return that multiple. For non-array mallocs, the multiple is
/// constant 1. Otherwise, return NULL for mallocs whose array size cannot be
/// determined.
Value *llvm::getMallocArraySize(CallInst *CI, const DataLayout *DL,
Value *llvm::getMallocArraySize(CallInst *CI, const DataLayout &DL,
const TargetLibraryInfo *TLI,
bool LookThroughSExt) {
assert(isMallocLikeFn(CI, TLI) && "getMallocArraySize and not malloc call");
@ -350,11 +350,8 @@ const CallInst *llvm::isFreeCall(const Value *I, const TargetLibraryInfo *TLI) {
/// object size in Size if successful, and false otherwise.
/// If RoundToAlign is true, then Size is rounded up to the aligment of allocas,
/// byval arguments, and global variables.
bool llvm::getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout *DL,
bool llvm::getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL,
const TargetLibraryInfo *TLI, bool RoundToAlign) {
if (!DL)
return false;
ObjectSizeOffsetVisitor Visitor(DL, TLI, Ptr->getContext(), RoundToAlign);
SizeOffsetType Data = Visitor.compute(const_cast<Value*>(Ptr));
if (!Visitor.bothKnown(Data))
@ -382,17 +379,17 @@ APInt ObjectSizeOffsetVisitor::align(APInt Size, uint64_t Align) {
return Size;
}
ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const DataLayout *DL,
ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const DataLayout &DL,
const TargetLibraryInfo *TLI,
LLVMContext &Context,
bool RoundToAlign)
: DL(DL), TLI(TLI), RoundToAlign(RoundToAlign) {
: DL(DL), TLI(TLI), RoundToAlign(RoundToAlign) {
// Pointer size must be rechecked for each object visited since it could have
// a different address space.
}
SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) {
IntTyBits = DL->getPointerTypeSizeInBits(V->getType());
IntTyBits = DL.getPointerTypeSizeInBits(V->getType());
Zero = APInt::getNullValue(IntTyBits);
V = V->stripPointerCasts();
@ -432,7 +429,7 @@ SizeOffsetType ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) {
if (!I.getAllocatedType()->isSized())
return unknown();
APInt Size(IntTyBits, DL->getTypeAllocSize(I.getAllocatedType()));
APInt Size(IntTyBits, DL.getTypeAllocSize(I.getAllocatedType()));
if (!I.isArrayAllocation())
return std::make_pair(align(Size, I.getAlignment()), Zero);
@ -451,7 +448,7 @@ SizeOffsetType ObjectSizeOffsetVisitor::visitArgument(Argument &A) {
return unknown();
}
PointerType *PT = cast<PointerType>(A.getType());
APInt Size(IntTyBits, DL->getTypeAllocSize(PT->getElementType()));
APInt Size(IntTyBits, DL.getTypeAllocSize(PT->getElementType()));
return std::make_pair(align(Size, A.getParamAlignment()), Zero);
}
@ -524,7 +521,7 @@ ObjectSizeOffsetVisitor::visitExtractValueInst(ExtractValueInst&) {
SizeOffsetType ObjectSizeOffsetVisitor::visitGEPOperator(GEPOperator &GEP) {
SizeOffsetType PtrData = compute(GEP.getPointerOperand());
APInt Offset(IntTyBits, 0);
if (!bothKnown(PtrData) || !GEP.accumulateConstantOffset(*DL, Offset))
if (!bothKnown(PtrData) || !GEP.accumulateConstantOffset(DL, Offset))
return unknown();
return std::make_pair(PtrData.first, PtrData.second + Offset);
@ -540,7 +537,7 @@ SizeOffsetType ObjectSizeOffsetVisitor::visitGlobalVariable(GlobalVariable &GV){
if (!GV.hasDefinitiveInitializer())
return unknown();
APInt Size(IntTyBits, DL->getTypeAllocSize(GV.getType()->getElementType()));
APInt Size(IntTyBits, DL.getTypeAllocSize(GV.getType()->getElementType()));
return std::make_pair(align(Size, GV.getAlignment()), Zero);
}
@ -576,19 +573,18 @@ SizeOffsetType ObjectSizeOffsetVisitor::visitInstruction(Instruction &I) {
return unknown();
}
ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator(const DataLayout *DL,
const TargetLibraryInfo *TLI,
LLVMContext &Context,
bool RoundToAlign)
: DL(DL), TLI(TLI), Context(Context), Builder(Context, TargetFolder(DL)),
RoundToAlign(RoundToAlign) {
ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator(
const DataLayout &DL, const TargetLibraryInfo *TLI, LLVMContext &Context,
bool RoundToAlign)
: DL(DL), TLI(TLI), Context(Context), Builder(Context, TargetFolder(DL)),
RoundToAlign(RoundToAlign) {
// IntTy and Zero must be set for each compute() since the address space may
// be different for later objects.
}
SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute(Value *V) {
// XXX - Are vectors of pointers possible here?
IntTy = cast<IntegerType>(DL->getIntPtrType(V->getType()));
IntTy = cast<IntegerType>(DL.getIntPtrType(V->getType()));
Zero = ConstantInt::get(IntTy, 0);
SizeOffsetEvalType Result = compute_(V);
@ -670,7 +666,7 @@ SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitAllocaInst(AllocaInst &I) {
assert(I.isArrayAllocation());
Value *ArraySize = I.getArraySize();
Value *Size = ConstantInt::get(ArraySize->getType(),
DL->getTypeAllocSize(I.getAllocatedType()));
DL.getTypeAllocSize(I.getAllocatedType()));
Size = Builder.CreateMul(Size, ArraySize);
return std::make_pair(Size, Zero);
}
@ -722,7 +718,7 @@ ObjectSizeOffsetEvaluator::visitGEPOperator(GEPOperator &GEP) {
if (!bothKnown(PtrData))
return unknown();
Value *Offset = EmitGEPOffset(&Builder, *DL, &GEP, /*NoAssumptions=*/true);
Value *Offset = EmitGEPOffset(&Builder, DL, &GEP, /*NoAssumptions=*/true);
Offset = Builder.CreateAdd(PtrData.second, Offset);
return std::make_pair(PtrData.first, Offset);
}

View File

@ -93,7 +93,6 @@ void MemoryDependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
bool MemoryDependenceAnalysis::runOnFunction(Function &F) {
AA = &getAnalysis<AliasAnalysis>();
AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
DL = &F.getParent()->getDataLayout();
DominatorTreeWrapperPass *DTWP =
getAnalysisIfAvailable<DominatorTreeWrapperPass>();
DT = DTWP ? &DTWP->getDomTree() : nullptr;
@ -262,22 +261,17 @@ getCallSiteDependencyFrom(CallSite CS, bool isReadOnlyCall,
///
/// MemLocBase, MemLocOffset are lazily computed here the first time the
/// base/offs of memloc is needed.
static bool
isLoadLoadClobberIfExtendedToFullWidth(const AliasAnalysis::Location &MemLoc,
const Value *&MemLocBase,
int64_t &MemLocOffs,
const LoadInst *LI,
const DataLayout *DL) {
// If we have no target data, we can't do this.
if (!DL) return false;
static bool isLoadLoadClobberIfExtendedToFullWidth(
const AliasAnalysis::Location &MemLoc, const Value *&MemLocBase,
int64_t &MemLocOffs, const LoadInst *LI) {
const DataLayout &DL = LI->getModule()->getDataLayout();
// If we haven't already computed the base/offset of MemLoc, do so now.
if (!MemLocBase)
MemLocBase = GetPointerBaseWithConstantOffset(MemLoc.Ptr, MemLocOffs, DL);
unsigned Size = MemoryDependenceAnalysis::
getLoadLoadClobberFullWidthSize(MemLocBase, MemLocOffs, MemLoc.Size,
LI, *DL);
unsigned Size = MemoryDependenceAnalysis::getLoadLoadClobberFullWidthSize(
MemLocBase, MemLocOffs, MemLoc.Size, LI);
return Size != 0;
}
@ -288,10 +282,9 @@ isLoadLoadClobberIfExtendedToFullWidth(const AliasAnalysis::Location &MemLoc,
/// 2) safe for the target, and 3) would provide the specified memory
/// location value, then this function returns the size in bytes of the
/// load width to use. If not, this returns zero.
unsigned MemoryDependenceAnalysis::
getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs,
unsigned MemLocSize, const LoadInst *LI,
const DataLayout &DL) {
unsigned MemoryDependenceAnalysis::getLoadLoadClobberFullWidthSize(
const Value *MemLocBase, int64_t MemLocOffs, unsigned MemLocSize,
const LoadInst *LI) {
// We can only extend simple integer loads.
if (!isa<IntegerType>(LI->getType()) || !LI->isSimple()) return 0;
@ -300,10 +293,12 @@ getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs,
if (LI->getParent()->getParent()->hasFnAttribute(Attribute::SanitizeThread))
return 0;
const DataLayout &DL = LI->getModule()->getDataLayout();
// Get the base of this load.
int64_t LIOffs = 0;
const Value *LIBase =
GetPointerBaseWithConstantOffset(LI->getPointerOperand(), LIOffs, &DL);
GetPointerBaseWithConstantOffset(LI->getPointerOperand(), LIOffs, DL);
// If the two pointers are not based on the same pointer, we can't tell that
// they are related.
@ -420,6 +415,8 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
isInvariantLoad = true;
}
const DataLayout &DL = BB->getModule()->getDataLayout();
// Walk backwards through the basic block, looking for dependencies.
while (ScanIt != BB->begin()) {
Instruction *Inst = --ScanIt;
@ -504,12 +501,12 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
// location is 1 byte at P+1). If so, return it as a load/load
// clobber result, allowing the client to decide to widen the load if
// it wants to.
if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType()))
if (LI->getAlignment()*8 > ITy->getPrimitiveSizeInBits() &&
if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType())) {
if (LI->getAlignment() * 8 > ITy->getPrimitiveSizeInBits() &&
isLoadLoadClobberIfExtendedToFullWidth(MemLoc, MemLocBase,
MemLocOffset, LI, DL))
MemLocOffset, LI))
return MemDepResult::getClobber(Inst);
}
continue;
}
@ -922,8 +919,7 @@ getNonLocalPointerDependency(Instruction *QueryInst,
const_cast<Value *>(Loc.Ptr)));
return;
}
const DataLayout &DL = FromBB->getModule()->getDataLayout();
PHITransAddr Address(const_cast<Value *>(Loc.Ptr), DL, AC);
// This is the set of blocks we've inspected, and the pointer we consider in

View File

@ -3130,39 +3130,23 @@ const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
}
const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) {
// If we have DataLayout, we can bypass creating a target-independent
// We can bypass creating a target-independent
// constant expression and then folding it back into a ConstantInt.
// This is just a compile-time optimization.
if (DL)
return getConstant(IntTy, DL->getTypeAllocSize(AllocTy));
Constant *C = ConstantExpr::getSizeOf(AllocTy);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
if (Constant *Folded = ConstantFoldConstantExpression(CE, DL, TLI))
C = Folded;
Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
assert(Ty == IntTy && "Effective SCEV type doesn't match");
return getTruncateOrZeroExtend(getSCEV(C), Ty);
return getConstant(IntTy,
F->getParent()->getDataLayout().getTypeAllocSize(AllocTy));
}
const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy,
StructType *STy,
unsigned FieldNo) {
// If we have DataLayout, we can bypass creating a target-independent
// We can bypass creating a target-independent
// constant expression and then folding it back into a ConstantInt.
// This is just a compile-time optimization.
if (DL) {
return getConstant(IntTy,
DL->getStructLayout(STy)->getElementOffset(FieldNo));
}
Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
if (Constant *Folded = ConstantFoldConstantExpression(CE, DL, TLI))
C = Folded;
Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
return getTruncateOrZeroExtend(getSCEV(C), Ty);
return getConstant(
IntTy,
F->getParent()->getDataLayout().getStructLayout(STy)->getElementOffset(
FieldNo));
}
const SCEV *ScalarEvolution::getUnknown(Value *V) {
@ -3204,19 +3188,7 @@ bool ScalarEvolution::isSCEVable(Type *Ty) const {
/// for which isSCEVable must return true.
uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const {
assert(isSCEVable(Ty) && "Type is not SCEVable!");
// If we have a DataLayout, use it!
if (DL)
return DL->getTypeSizeInBits(Ty);
// Integer types have fixed sizes.
if (Ty->isIntegerTy())
return Ty->getPrimitiveSizeInBits();
// The only other support type is pointer. Without DataLayout, conservatively
// assume pointers are 64-bit.
assert(Ty->isPointerTy() && "isSCEVable permitted a non-SCEVable type!");
return 64;
return F->getParent()->getDataLayout().getTypeSizeInBits(Ty);
}
/// getEffectiveSCEVType - Return a type with the same bitwidth as
@ -3232,12 +3204,7 @@ Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const {
// The only other support type is pointer.
assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
if (DL)
return DL->getIntPtrType(Ty);
// Without DataLayout, conservatively assume pointers are 64-bit.
return Type::getInt64Ty(getContext());
return F->getParent()->getDataLayout().getIntPtrType(Ty);
}
const SCEV *ScalarEvolution::getCouldNotCompute() {
@ -3701,7 +3668,8 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
// PHI's incoming blocks are in a different loop, in which case doing so
// risks breaking LCSSA form. Instcombine would normally zap these, but
// it doesn't have DominatorTree information, so it may miss cases.
if (Value *V = SimplifyInstruction(PN, DL, TLI, DT, AC))
if (Value *V =
SimplifyInstruction(PN, F->getParent()->getDataLayout(), TLI, DT, AC))
if (LI->replacementPreservesLCSSAForm(PN, V))
return getSCEV(V);
@ -3833,7 +3801,8 @@ ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
// For a SCEVUnknown, ask ValueTracking.
unsigned BitWidth = getTypeSizeInBits(U->getType());
APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
computeKnownBits(U->getValue(), Zeros, Ones, DL, 0, AC, nullptr, DT);
computeKnownBits(U->getValue(), Zeros, Ones,
F->getParent()->getDataLayout(), 0, AC, nullptr, DT);
return Zeros.countTrailingOnes();
}
@ -4063,7 +4032,7 @@ ScalarEvolution::getRange(const SCEV *S,
// Split here to avoid paying the compile-time cost of calling both
// computeKnownBits and ComputeNumSignBits. This restriction can be lifted
// if needed.
const DataLayout &DL = F->getParent()->getDataLayout();
if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) {
// For a SCEVUnknown, ask ValueTracking.
APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
@ -4074,13 +4043,11 @@ ScalarEvolution::getRange(const SCEV *S,
} else {
assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED &&
"generalize as needed!");
if (U->getValue()->getType()->isIntegerTy() || DL) {
unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, AC, nullptr, DT);
if (NS > 1)
ConservativeResult = ConservativeResult.intersectWith(ConstantRange(
APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1));
}
unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, AC, nullptr, DT);
if (NS > 1)
ConservativeResult = ConservativeResult.intersectWith(
ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1));
}
return setRange(U, SignHint, ConservativeResult);
@ -4185,8 +4152,8 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
unsigned TZ = A.countTrailingZeros();
unsigned BitWidth = A.getBitWidth();
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
computeKnownBits(U->getOperand(0), KnownZero, KnownOne, DL, 0, AC,
nullptr, DT);
computeKnownBits(U->getOperand(0), KnownZero, KnownOne,
F->getParent()->getDataLayout(), 0, AC, nullptr, DT);
APInt EffectiveMask =
APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ);
@ -5413,7 +5380,7 @@ static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
/// reason, return null.
static Constant *EvaluateExpression(Value *V, const Loop *L,
DenseMap<Instruction *, Constant *> &Vals,
const DataLayout *DL,
const DataLayout &DL,
const TargetLibraryInfo *TLI) {
// Convenient constant check, but redundant for recursive calls.
if (Constant *C = dyn_cast<Constant>(V)) return C;
@ -5502,6 +5469,7 @@ ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
unsigned NumIterations = BEs.getZExtValue(); // must be in range
unsigned IterationNum = 0;
const DataLayout &DL = F->getParent()->getDataLayout();
for (; ; ++IterationNum) {
if (IterationNum == NumIterations)
return RetVal = CurrentIterVals[PN]; // Got exit value!
@ -5509,8 +5477,8 @@ ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
// Compute the value of the PHIs for the next iteration.
// EvaluateExpression adds non-phi values to the CurrentIterVals map.
DenseMap<Instruction *, Constant *> NextIterVals;
Constant *NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL,
TLI);
Constant *NextPHI =
EvaluateExpression(BEValue, L, CurrentIterVals, DL, TLI);
if (!NextPHI)
return nullptr; // Couldn't evaluate!
NextIterVals[PN] = NextPHI;
@ -5586,12 +5554,11 @@ const SCEV *ScalarEvolution::ComputeExitCountExhaustively(const Loop *L,
// Okay, we find a PHI node that defines the trip count of this loop. Execute
// the loop symbolically to determine when the condition gets a value of
// "ExitWhen".
unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis.
const DataLayout &DL = F->getParent()->getDataLayout();
for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){
ConstantInt *CondVal =
dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, L, CurrentIterVals,
DL, TLI));
ConstantInt *CondVal = dyn_cast_or_null<ConstantInt>(
EvaluateExpression(Cond, L, CurrentIterVals, DL, TLI));
// Couldn't symbolically evaluate.
if (!CondVal) return getCouldNotCompute();
@ -5824,16 +5791,16 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
// Check to see if getSCEVAtScope actually made an improvement.
if (MadeImprovement) {
Constant *C = nullptr;
const DataLayout &DL = F->getParent()->getDataLayout();
if (const CmpInst *CI = dyn_cast<CmpInst>(I))
C = ConstantFoldCompareInstOperands(CI->getPredicate(),
Operands[0], Operands[1], DL,
TLI);
C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
Operands[1], DL, TLI);
else if (const LoadInst *LI = dyn_cast<LoadInst>(I)) {
if (!LI->isVolatile())
C = ConstantFoldLoadFromConstPtr(Operands[0], DL);
} else
C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
Operands, DL, TLI);
C = ConstantFoldInstOperands(I->getOpcode(), I->getType(), Operands,
DL, TLI);
if (!C) return V;
return getSCEV(C);
}
@ -7966,7 +7933,6 @@ bool ScalarEvolution::runOnFunction(Function &F) {
this->F = &F;
AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
DL = &F.getParent()->getDataLayout();
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
return false;

View File

@ -204,11 +204,9 @@ Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
/// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made
/// unnecessary; in its place, just signed-divide Ops[i] by the scale and
/// check to see if the divide was folded.
static bool FactorOutConstant(const SCEV *&S,
const SCEV *&Remainder,
const SCEV *Factor,
ScalarEvolution &SE,
const DataLayout *DL) {
static bool FactorOutConstant(const SCEV *&S, const SCEV *&Remainder,
const SCEV *Factor, ScalarEvolution &SE,
const DataLayout &DL) {
// Everything is divisible by one.
if (Factor->isOne())
return true;
@ -248,35 +246,17 @@ static bool FactorOutConstant(const SCEV *&S,
// In a Mul, check if there is a constant operand which is a multiple
// of the given factor.
if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
if (DL) {
// With DataLayout, the size is known. Check if there is a constant
// operand which is a multiple of the given factor. If so, we can
// factor it.
const SCEVConstant *FC = cast<SCEVConstant>(Factor);
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0)))
if (!C->getValue()->getValue().srem(FC->getValue()->getValue())) {
SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
NewMulOps[0] =
SE.getConstant(C->getValue()->getValue().sdiv(
FC->getValue()->getValue()));
S = SE.getMulExpr(NewMulOps);
return true;
}
} else {
// Without DataLayout, check if Factor can be factored out of any of the
// Mul's operands. If so, we can just remove it.
for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
const SCEV *SOp = M->getOperand(i);
const SCEV *Remainder = SE.getConstant(SOp->getType(), 0);
if (FactorOutConstant(SOp, Remainder, Factor, SE, DL) &&
Remainder->isZero()) {
SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
NewMulOps[i] = SOp;
S = SE.getMulExpr(NewMulOps);
return true;
}
// Size is known, check if there is a constant operand which is a multiple
// of the given factor. If so, we can factor it.
const SCEVConstant *FC = cast<SCEVConstant>(Factor);
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0)))
if (!C->getValue()->getValue().srem(FC->getValue()->getValue())) {
SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
NewMulOps[0] = SE.getConstant(
C->getValue()->getValue().sdiv(FC->getValue()->getValue()));
S = SE.getMulExpr(NewMulOps);
return true;
}
}
}
// In an AddRec, check if both start and step are divisible.
@ -402,9 +382,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
// without the other.
SplitAddRecs(Ops, Ty, SE);
Type *IntPtrTy = SE.DL
? SE.DL->getIntPtrType(PTy)
: Type::getInt64Ty(PTy->getContext());
Type *IntPtrTy = DL.getIntPtrType(PTy);
// Descend down the pointer's type and attempt to convert the other
// operands into GEP indices, at each level. The first index in a GEP
@ -422,7 +400,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
const SCEV *Op = Ops[i];
const SCEV *Remainder = SE.getConstant(Ty, 0);
if (FactorOutConstant(Op, Remainder, ElSize, SE, SE.DL)) {
if (FactorOutConstant(Op, Remainder, ElSize, SE, DL)) {
// Op now has ElSize factored out.
ScaledOps.push_back(Op);
if (!Remainder->isZero())
@ -456,43 +434,25 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
bool FoundFieldNo = false;
// An empty struct has no fields.
if (STy->getNumElements() == 0) break;
if (SE.DL) {
// With DataLayout, field offsets are known. See if a constant offset
// falls within any of the struct fields.
if (Ops.empty()) break;
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0]))
if (SE.getTypeSizeInBits(C->getType()) <= 64) {
const StructLayout &SL = *SE.DL->getStructLayout(STy);
uint64_t FullOffset = C->getValue()->getZExtValue();
if (FullOffset < SL.getSizeInBytes()) {
unsigned ElIdx = SL.getElementContainingOffset(FullOffset);
GepIndices.push_back(
ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx));
ElTy = STy->getTypeAtIndex(ElIdx);
Ops[0] =
// Field offsets are known. See if a constant offset falls within any of
// the struct fields.
if (Ops.empty())
break;
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0]))
if (SE.getTypeSizeInBits(C->getType()) <= 64) {
const StructLayout &SL = *DL.getStructLayout(STy);
uint64_t FullOffset = C->getValue()->getZExtValue();
if (FullOffset < SL.getSizeInBytes()) {
unsigned ElIdx = SL.getElementContainingOffset(FullOffset);
GepIndices.push_back(
ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx));
ElTy = STy->getTypeAtIndex(ElIdx);
Ops[0] =
SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx));
AnyNonZeroIndices = true;
FoundFieldNo = true;
}
AnyNonZeroIndices = true;
FoundFieldNo = true;
}
} else {
// Without DataLayout, just check for an offsetof expression of the
// appropriate struct type.
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Ops[i])) {
Type *CTy;
Constant *FieldNo;
if (U->isOffsetOf(CTy, FieldNo) && CTy == STy) {
GepIndices.push_back(FieldNo);
ElTy =
STy->getTypeAtIndex(cast<ConstantInt>(FieldNo)->getZExtValue());
Ops[i] = SE.getConstant(Ty, 0);
AnyNonZeroIndices = true;
FoundFieldNo = true;
break;
}
}
}
}
// If no struct field offsets were found, tentatively assume that
// field zero was selected (since the zero offset would obviously
// be folded away).
@ -1746,7 +1706,7 @@ unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
// Fold constant phis. They may be congruent to other constant phis and
// would confuse the logic below that expects proper IVs.
if (Value *V = SimplifyInstruction(Phi, SE.DL, SE.TLI, SE.DT, SE.AC)) {
if (Value *V = SimplifyInstruction(Phi, DL, SE.TLI, SE.DT, SE.AC)) {
Phi->replaceAllUsesWith(V);
DeadInsts.push_back(Phi);
++NumElim;

File diff suppressed because it is too large Load Diff

View File

@ -77,11 +77,11 @@ static gcp_map_type &getGCMap(void *&P) {
/// getGVAlignmentLog2 - Return the alignment to use for the specified global
/// value in log2 form. This rounds up to the preferred alignment if possible
/// and legal.
static unsigned getGVAlignmentLog2(const GlobalValue *GV, const DataLayout &TD,
static unsigned getGVAlignmentLog2(const GlobalValue *GV, const DataLayout &DL,
unsigned InBits = 0) {
unsigned NumBits = 0;
if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
NumBits = TD.getPreferredAlignmentLog(GVar);
NumBits = DL.getPreferredAlignmentLog(GVar);
// If InBits is specified, round it to it.
if (InBits > NumBits)
@ -1642,8 +1642,7 @@ const MCExpr *AsmPrinter::lowerConstant(const Constant *CV) {
// If the code isn't optimized, there may be outstanding folding
// opportunities. Attempt to fold the expression using DataLayout as a
// last resort before giving up.
if (Constant *C = ConstantFoldConstantExpression(
CE, TM.getDataLayout()))
if (Constant *C = ConstantFoldConstantExpression(CE, *TM.getDataLayout()))
if (C != CE)
return lowerConstant(C);
@ -2187,7 +2186,7 @@ static void emitGlobalConstantImpl(const Constant *CV, AsmPrinter &AP,
// If the constant expression's size is greater than 64-bits, then we have
// to emit the value in chunks. Try to constant fold the value and emit it
// that way.
Constant *New = ConstantFoldConstantExpression(CE, DL);
Constant *New = ConstantFoldConstantExpression(CE, *DL);
if (New && New != CE)
return emitGlobalConstantImpl(New, AP);
}

View File

@ -1244,7 +1244,6 @@ bool CodeGenPrepare::OptimizeCallInst(CallInst *CI, bool& ModifiedDT) {
WeakVH IterHandle(CurInstIterator);
replaceAndRecursivelySimplify(CI, RetVal,
TLI ? TLI->getDataLayout() : nullptr,
TLInfo, ModifiedDT ? nullptr : DT);
// If the iterator instruction was recursively deleted, start over at the
@ -1287,15 +1286,11 @@ bool CodeGenPrepare::OptimizeCallInst(CallInst *CI, bool& ModifiedDT) {
// From here on out we're working with named functions.
if (!CI->getCalledFunction()) return false;
// We'll need DataLayout from here on out.
const DataLayout *TD = TLI ? TLI->getDataLayout() : nullptr;
if (!TD) return false;
// Lower all default uses of _chk calls. This is very similar
// to what InstCombineCalls does, but here we are only lowering calls
// to fortified library functions (e.g. __memcpy_chk) that have the default
// "don't know" as the objectsize. Anything else should be left alone.
FortifiedLibCallSimplifier Simplifier(TD, TLInfo, true);
FortifiedLibCallSimplifier Simplifier(TLInfo, true);
if (Value *V = Simplifier.optimizeCall(CI)) {
CI->replaceAllUsesWith(V);
CI->eraseFromParent();
@ -4196,8 +4191,8 @@ bool CodeGenPrepare::OptimizeInst(Instruction *I, bool& ModifiedDT) {
// It is possible for very late stage optimizations (such as SimplifyCFG)
// to introduce PHI nodes too late to be cleaned up. If we detect such a
// trivial PHI, go ahead and zap it here.
if (Value *V = SimplifyInstruction(P, TLI ? TLI->getDataLayout() : nullptr,
TLInfo, DT)) {
const DataLayout &DL = I->getModule()->getDataLayout();
if (Value *V = SimplifyInstruction(P, DL, TLInfo, DT)) {
P->replaceAllUsesWith(V);
P->eraseFromParent();
++NumPHIsElim;

View File

@ -168,7 +168,7 @@ size_t DwarfEHPrepare::pruneUnreachableResumes(
BasicBlock *BB = RI->getParent();
new UnreachableInst(Ctx, RI);
RI->eraseFromParent();
SimplifyCFG(BB, TTI, 1, TLI->getDataLayout());
SimplifyCFG(BB, TTI, 1);
}
}
Resumes.resize(ResumesLeft);

View File

@ -903,16 +903,16 @@ static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
// DataLayout.
if (isa<PointerType>(A->getType()))
A = ConstantFoldInstOperands(Instruction::PtrToInt, IntTy,
const_cast<Constant*>(A), TD);
const_cast<Constant *>(A), *TD);
else if (A->getType() != IntTy)
A = ConstantFoldInstOperands(Instruction::BitCast, IntTy,
const_cast<Constant*>(A), TD);
const_cast<Constant *>(A), *TD);
if (isa<PointerType>(B->getType()))
B = ConstantFoldInstOperands(Instruction::PtrToInt, IntTy,
const_cast<Constant*>(B), TD);
const_cast<Constant *>(B), *TD);
else if (B->getType() != IntTy)
B = ConstantFoldInstOperands(Instruction::BitCast, IntTy,
const_cast<Constant*>(B), TD);
const_cast<Constant *>(B), *TD);
return A == B;
}

View File

@ -96,14 +96,15 @@ static const Value *getUnderlyingObjectFromInt(const Value *V) {
/// getUnderlyingObjects - This is a wrapper around GetUnderlyingObjects
/// and adds support for basic ptrtoint+arithmetic+inttoptr sequences.
static void getUnderlyingObjects(const Value *V,
SmallVectorImpl<Value *> &Objects) {
SmallVectorImpl<Value *> &Objects,
const DataLayout &DL) {
SmallPtrSet<const Value *, 16> Visited;
SmallVector<const Value *, 4> Working(1, V);
do {
V = Working.pop_back_val();
SmallVector<Value *, 4> Objs;
GetUnderlyingObjects(const_cast<Value *>(V), Objs);
GetUnderlyingObjects(const_cast<Value *>(V), Objs, DL);
for (SmallVectorImpl<Value *>::iterator I = Objs.begin(), IE = Objs.end();
I != IE; ++I) {
@ -132,7 +133,8 @@ UnderlyingObjectsVector;
/// object, return the Value for that object.
static void getUnderlyingObjectsForInstr(const MachineInstr *MI,
const MachineFrameInfo *MFI,
UnderlyingObjectsVector &Objects) {
UnderlyingObjectsVector &Objects,
const DataLayout &DL) {
if (!MI->hasOneMemOperand() ||
(!(*MI->memoperands_begin())->getValue() &&
!(*MI->memoperands_begin())->getPseudoValue()) ||
@ -156,7 +158,7 @@ static void getUnderlyingObjectsForInstr(const MachineInstr *MI,
return;
SmallVector<Value *, 4> Objs;
getUnderlyingObjects(V, Objs);
getUnderlyingObjects(V, Objs, DL);
for (SmallVectorImpl<Value *>::iterator I = Objs.begin(), IE = Objs.end();
I != IE; ++I) {
@ -468,7 +470,8 @@ static inline bool isGlobalMemoryObject(AliasAnalysis *AA, MachineInstr *MI) {
// This MI might have either incomplete info, or known to be unsafe
// to deal with (i.e. volatile object).
static inline bool isUnsafeMemoryObject(MachineInstr *MI,
const MachineFrameInfo *MFI) {
const MachineFrameInfo *MFI,
const DataLayout &DL) {
if (!MI || MI->memoperands_empty())
return true;
// We purposefully do no check for hasOneMemOperand() here
@ -491,7 +494,7 @@ static inline bool isUnsafeMemoryObject(MachineInstr *MI,
return true;
SmallVector<Value *, 4> Objs;
getUnderlyingObjects(V, Objs);
getUnderlyingObjects(V, Objs, DL);
for (SmallVectorImpl<Value *>::iterator I = Objs.begin(),
IE = Objs.end(); I != IE; ++I) {
// Does this pointer refer to a distinct and identifiable object?
@ -508,7 +511,7 @@ static inline bool isUnsafeMemoryObject(MachineInstr *MI,
/// these two MIs be reordered during scheduling from memory dependency
/// point of view.
static bool MIsNeedChainEdge(AliasAnalysis *AA, const MachineFrameInfo *MFI,
MachineInstr *MIa,
const DataLayout &DL, MachineInstr *MIa,
MachineInstr *MIb) {
const MachineFunction *MF = MIa->getParent()->getParent();
const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
@ -527,7 +530,7 @@ static bool MIsNeedChainEdge(AliasAnalysis *AA, const MachineFrameInfo *MFI,
if (!MIa->hasOneMemOperand() || !MIb->hasOneMemOperand())
return true;
if (isUnsafeMemoryObject(MIa, MFI) || isUnsafeMemoryObject(MIb, MFI))
if (isUnsafeMemoryObject(MIa, MFI, DL) || isUnsafeMemoryObject(MIb, MFI, DL))
return true;
// If we are dealing with two "normal" loads, we do not need an edge
@ -579,10 +582,10 @@ static bool MIsNeedChainEdge(AliasAnalysis *AA, const MachineFrameInfo *MFI,
/// This recursive function iterates over chain deps of SUb looking for
/// "latest" node that needs a chain edge to SUa.
static unsigned
iterateChainSucc(AliasAnalysis *AA, const MachineFrameInfo *MFI,
SUnit *SUa, SUnit *SUb, SUnit *ExitSU, unsigned *Depth,
SmallPtrSetImpl<const SUnit*> &Visited) {
static unsigned iterateChainSucc(AliasAnalysis *AA, const MachineFrameInfo *MFI,
const DataLayout &DL, SUnit *SUa, SUnit *SUb,
SUnit *ExitSU, unsigned *Depth,
SmallPtrSetImpl<const SUnit *> &Visited) {
if (!SUa || !SUb || SUb == ExitSU)
return *Depth;
@ -607,7 +610,7 @@ iterateChainSucc(AliasAnalysis *AA, const MachineFrameInfo *MFI,
// add that edge to the predecessors chain of SUb,
// and stop descending.
if (*Depth > 200 ||
MIsNeedChainEdge(AA, MFI, SUa->getInstr(), SUb->getInstr())) {
MIsNeedChainEdge(AA, MFI, DL, SUa->getInstr(), SUb->getInstr())) {
SUb->addPred(SDep(SUa, SDep::MayAliasMem));
return *Depth;
}
@ -617,7 +620,7 @@ iterateChainSucc(AliasAnalysis *AA, const MachineFrameInfo *MFI,
for (SUnit::const_succ_iterator I = SUb->Succs.begin(), E = SUb->Succs.end();
I != E; ++I)
if (I->isNormalMemoryOrBarrier())
iterateChainSucc (AA, MFI, SUa, I->getSUnit(), ExitSU, Depth, Visited);
iterateChainSucc(AA, MFI, DL, SUa, I->getSUnit(), ExitSU, Depth, Visited);
return *Depth;
}
@ -626,7 +629,8 @@ iterateChainSucc(AliasAnalysis *AA, const MachineFrameInfo *MFI,
/// checks whether SU can be aliasing any node dominated
/// by it.
static void adjustChainDeps(AliasAnalysis *AA, const MachineFrameInfo *MFI,
SUnit *SU, SUnit *ExitSU, std::set<SUnit *> &CheckList,
const DataLayout &DL, SUnit *SU, SUnit *ExitSU,
std::set<SUnit *> &CheckList,
unsigned LatencyToLoad) {
if (!SU)
return;
@ -638,7 +642,7 @@ static void adjustChainDeps(AliasAnalysis *AA, const MachineFrameInfo *MFI,
I != IE; ++I) {
if (SU == *I)
continue;
if (MIsNeedChainEdge(AA, MFI, SU->getInstr(), (*I)->getInstr())) {
if (MIsNeedChainEdge(AA, MFI, DL, SU->getInstr(), (*I)->getInstr())) {
SDep Dep(SU, SDep::MayAliasMem);
Dep.setLatency(((*I)->getInstr()->mayLoad()) ? LatencyToLoad : 0);
(*I)->addPred(Dep);
@ -649,22 +653,22 @@ static void adjustChainDeps(AliasAnalysis *AA, const MachineFrameInfo *MFI,
for (SUnit::const_succ_iterator J = (*I)->Succs.begin(),
JE = (*I)->Succs.end(); J != JE; ++J)
if (J->isNormalMemoryOrBarrier())
iterateChainSucc (AA, MFI, SU, J->getSUnit(),
ExitSU, &Depth, Visited);
iterateChainSucc(AA, MFI, DL, SU, J->getSUnit(), ExitSU, &Depth,
Visited);
}
}
/// Check whether two objects need a chain edge, if so, add it
/// otherwise remember the rejected SU.
static inline
void addChainDependency (AliasAnalysis *AA, const MachineFrameInfo *MFI,
SUnit *SUa, SUnit *SUb,
std::set<SUnit *> &RejectList,
unsigned TrueMemOrderLatency = 0,
bool isNormalMemory = false) {
static inline void addChainDependency(AliasAnalysis *AA,
const MachineFrameInfo *MFI,
const DataLayout &DL, SUnit *SUa,
SUnit *SUb, std::set<SUnit *> &RejectList,
unsigned TrueMemOrderLatency = 0,
bool isNormalMemory = false) {
// If this is a false dependency,
// do not add the edge, but rememeber the rejected node.
if (MIsNeedChainEdge(AA, MFI, SUa->getInstr(), SUb->getInstr())) {
if (MIsNeedChainEdge(AA, MFI, DL, SUa->getInstr(), SUb->getInstr())) {
SDep Dep(SUa, isNormalMemory ? SDep::MayAliasMem : SDep::Barrier);
Dep.setLatency(TrueMemOrderLatency);
SUb->addPred(Dep);
@ -883,7 +887,7 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
BarrierChain = SU;
// This is a barrier event that acts as a pivotal node in the DAG,
// so it is safe to clear list of exposed nodes.
adjustChainDeps(AA, MFI, SU, &ExitSU, RejectMemNodes,
adjustChainDeps(AA, MFI, *TM.getDataLayout(), SU, &ExitSU, RejectMemNodes,
TrueMemOrderLatency);
RejectMemNodes.clear();
NonAliasMemDefs.clear();
@ -896,25 +900,27 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
unsigned ChainLatency = 0;
if (AliasChain->getInstr()->mayLoad())
ChainLatency = TrueMemOrderLatency;
addChainDependency(AAForDep, MFI, SU, AliasChain, RejectMemNodes,
ChainLatency);
addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU, AliasChain,
RejectMemNodes, ChainLatency);
}
AliasChain = SU;
for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k)
addChainDependency(AAForDep, MFI, SU, PendingLoads[k], RejectMemNodes,
addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU,
PendingLoads[k], RejectMemNodes,
TrueMemOrderLatency);
for (MapVector<ValueType, std::vector<SUnit *> >::iterator I =
AliasMemDefs.begin(), E = AliasMemDefs.end(); I != E; ++I) {
for (unsigned i = 0, e = I->second.size(); i != e; ++i)
addChainDependency(AAForDep, MFI, SU, I->second[i], RejectMemNodes);
addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU,
I->second[i], RejectMemNodes);
}
for (MapVector<ValueType, std::vector<SUnit *> >::iterator I =
AliasMemUses.begin(), E = AliasMemUses.end(); I != E; ++I) {
for (unsigned i = 0, e = I->second.size(); i != e; ++i)
addChainDependency(AAForDep, MFI, SU, I->second[i], RejectMemNodes,
TrueMemOrderLatency);
addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU,
I->second[i], RejectMemNodes, TrueMemOrderLatency);
}
adjustChainDeps(AA, MFI, SU, &ExitSU, RejectMemNodes,
adjustChainDeps(AA, MFI, *TM.getDataLayout(), SU, &ExitSU, RejectMemNodes,
TrueMemOrderLatency);
PendingLoads.clear();
AliasMemDefs.clear();
@ -928,7 +934,7 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
BarrierChain->addPred(SDep(SU, SDep::Barrier));
UnderlyingObjectsVector Objs;
getUnderlyingObjectsForInstr(MI, MFI, Objs);
getUnderlyingObjectsForInstr(MI, MFI, Objs, *TM.getDataLayout());
if (Objs.empty()) {
// Treat all other stores conservatively.
@ -952,8 +958,8 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
((ThisMayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end());
if (I != IE) {
for (unsigned i = 0, e = I->second.size(); i != e; ++i)
addChainDependency(AAForDep, MFI, SU, I->second[i], RejectMemNodes,
0, true);
addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU,
I->second[i], RejectMemNodes, 0, true);
// If we're not using AA, then we only need one store per object.
if (!AAForDep)
@ -977,7 +983,8 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
((ThisMayAlias) ? AliasMemUses.end() : NonAliasMemUses.end());
if (J != JE) {
for (unsigned i = 0, e = J->second.size(); i != e; ++i)
addChainDependency(AAForDep, MFI, SU, J->second[i], RejectMemNodes,
addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU,
J->second[i], RejectMemNodes,
TrueMemOrderLatency, true);
J->second.clear();
}
@ -986,13 +993,15 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
// Add dependencies from all the PendingLoads, i.e. loads
// with no underlying object.
for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k)
addChainDependency(AAForDep, MFI, SU, PendingLoads[k], RejectMemNodes,
addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU,
PendingLoads[k], RejectMemNodes,
TrueMemOrderLatency);
// Add dependence on alias chain, if needed.
if (AliasChain)
addChainDependency(AAForDep, MFI, SU, AliasChain, RejectMemNodes);
addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU, AliasChain,
RejectMemNodes);
}
adjustChainDeps(AA, MFI, SU, &ExitSU, RejectMemNodes,
adjustChainDeps(AA, MFI, *TM.getDataLayout(), SU, &ExitSU, RejectMemNodes,
TrueMemOrderLatency);
} else if (MI->mayLoad()) {
bool MayAlias = true;
@ -1000,7 +1009,7 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
// Invariant load, no chain dependencies needed!
} else {
UnderlyingObjectsVector Objs;
getUnderlyingObjectsForInstr(MI, MFI, Objs);
getUnderlyingObjectsForInstr(MI, MFI, Objs, *TM.getDataLayout());
if (Objs.empty()) {
// A load with no underlying object. Depend on all
@ -1008,8 +1017,8 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
for (MapVector<ValueType, std::vector<SUnit *> >::iterator I =
AliasMemDefs.begin(), E = AliasMemDefs.end(); I != E; ++I)
for (unsigned i = 0, e = I->second.size(); i != e; ++i)
addChainDependency(AAForDep, MFI, SU, I->second[i],
RejectMemNodes);
addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU,
I->second[i], RejectMemNodes);
PendingLoads.push_back(SU);
MayAlias = true;
@ -1032,18 +1041,20 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
((ThisMayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end());
if (I != IE)
for (unsigned i = 0, e = I->second.size(); i != e; ++i)
addChainDependency(AAForDep, MFI, SU, I->second[i],
RejectMemNodes, 0, true);
addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU,
I->second[i], RejectMemNodes, 0, true);
if (ThisMayAlias)
AliasMemUses[V].push_back(SU);
else
NonAliasMemUses[V].push_back(SU);
}
if (MayAlias)
adjustChainDeps(AA, MFI, SU, &ExitSU, RejectMemNodes, /*Latency=*/0);
adjustChainDeps(AA, MFI, *TM.getDataLayout(), SU, &ExitSU,
RejectMemNodes, /*Latency=*/0);
// Add dependencies on alias and barrier chains, if needed.
if (MayAlias && AliasChain)
addChainDependency(AAForDep, MFI, SU, AliasChain, RejectMemNodes);
addChainDependency(AAForDep, MFI, *TM.getDataLayout(), SU, AliasChain,
RejectMemNodes);
if (BarrierChain)
BarrierChain->addPred(SDep(SU, SDep::Barrier));
}

View File

@ -6648,8 +6648,8 @@ unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
unsigned PtrWidth = TLI->getPointerTypeSizeInBits(GV->getType());
APInt KnownZero(PtrWidth, 0), KnownOne(PtrWidth, 0);
llvm::computeKnownBits(const_cast<GlobalValue*>(GV), KnownZero, KnownOne,
TLI->getDataLayout());
llvm::computeKnownBits(const_cast<GlobalValue *>(GV), KnownZero, KnownOne,
*TLI->getDataLayout());
unsigned AlignBits = KnownZero.countTrailingOnes();
unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0;
if (Align)

View File

@ -5381,7 +5381,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
return nullptr;
SmallVector<Value *, 4> Allocas;
GetUnderlyingObjects(I.getArgOperand(1), Allocas, DL);
GetUnderlyingObjects(I.getArgOperand(1), Allocas, *DL);
for (SmallVectorImpl<Value*>::iterator Object = Allocas.begin(),
E = Allocas.end(); Object != E; ++Object) {
@ -5649,9 +5649,8 @@ static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
LoadInput = ConstantExpr::getBitCast(const_cast<Constant *>(LoadInput),
PointerType::getUnqual(LoadTy));
if (const Constant *LoadCst =
ConstantFoldLoadFromConstPtr(const_cast<Constant *>(LoadInput),
Builder.DL))
if (const Constant *LoadCst = ConstantFoldLoadFromConstPtr(
const_cast<Constant *>(LoadInput), *Builder.DL))
return Builder.getValue(LoadCst);
}

View File

@ -733,12 +733,11 @@ static void InitCmpLibcallCCs(ISD::CondCode *CCs) {
}
/// NOTE: The TargetMachine owns TLOF.
TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm)
: TM(tm), DL(TM.getDataLayout()) {
TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) {
initActions();
// Perform these initializations only once.
IsLittleEndian = DL->isLittleEndian();
IsLittleEndian = getDataLayout()->isLittleEndian();
MaxStoresPerMemset = MaxStoresPerMemcpy = MaxStoresPerMemmove = 8;
MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize
= MaxStoresPerMemmoveOptSize = 4;
@ -897,7 +896,7 @@ MVT TargetLoweringBase::getPointerTy(uint32_t AS) const {
}
unsigned TargetLoweringBase::getPointerSizeInBits(uint32_t AS) const {
return DL->getPointerSizeInBits(AS);
return getDataLayout()->getPointerSizeInBits(AS);
}
unsigned TargetLoweringBase::getPointerTypeSizeInBits(Type *Ty) const {
@ -906,7 +905,7 @@ unsigned TargetLoweringBase::getPointerTypeSizeInBits(Type *Ty) const {
}
MVT TargetLoweringBase::getScalarShiftAmountTy(EVT LHSTy) const {
return MVT::getIntegerVT(8*DL->getPointerSize(0));
return MVT::getIntegerVT(8 * getDataLayout()->getPointerSize(0));
}
EVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy) const {
@ -1540,7 +1539,7 @@ void llvm::GetReturnInfo(Type* ReturnType, AttributeSet attr,
/// function arguments in the caller parameter area. This is the actual
/// alignment, not its logarithm.
unsigned TargetLoweringBase::getByValTypeAlignment(Type *Ty) const {
return DL->getABITypeAlignment(Ty);
return getDataLayout()->getABITypeAlignment(Ty);
}
//===----------------------------------------------------------------------===//

View File

@ -454,10 +454,9 @@ bool WinEHPrepare::outlineHandler(HandlerType CatchOrCleanup, Function *SrcFn,
BasicBlock::iterator II = LPad;
CloneAndPruneIntoFromInst(
Handler, SrcFn, ++II, VMap,
/*ModuleLevelChanges=*/false, Returns, "", &InlinedFunctionInfo,
&SrcFn->getParent()->getDataLayout(), Director.get());
CloneAndPruneIntoFromInst(Handler, SrcFn, ++II, VMap,
/*ModuleLevelChanges=*/false, Returns, "",
&InlinedFunctionInfo, Director.get());
// Move all the instructions in the first cloned block into our entry block.
BasicBlock *FirstClonedBB = std::next(Function::iterator(Entry));

View File

@ -2048,21 +2048,15 @@ bool CastInst::isNoopCast(Type *IntPtrTy) const {
return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), IntPtrTy);
}
bool CastInst::isNoopCast(const DataLayout *DL) const {
if (!DL) {
// Assume maximum pointer size.
return isNoopCast(Type::getInt64Ty(getContext()));
}
bool CastInst::isNoopCast(const DataLayout &DL) const {
Type *PtrOpTy = nullptr;
if (getOpcode() == Instruction::PtrToInt)
PtrOpTy = getOperand(0)->getType();
else if (getOpcode() == Instruction::IntToPtr)
PtrOpTy = getType();
Type *IntPtrTy = PtrOpTy
? DL->getIntPtrType(PtrOpTy)
: DL->getIntPtrType(getContext(), 0);
Type *IntPtrTy =
PtrOpTy ? DL.getIntPtrType(PtrOpTy) : DL.getIntPtrType(getContext(), 0);
return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), IntPtrTy);
}
@ -2616,13 +2610,13 @@ bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
}
bool CastInst::isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy,
const DataLayout *DL) {
const DataLayout &DL) {
if (auto *PtrTy = dyn_cast<PointerType>(SrcTy))
if (auto *IntTy = dyn_cast<IntegerType>(DestTy))
return DL && IntTy->getBitWidth() == DL->getPointerTypeSizeInBits(PtrTy);
return IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy);
if (auto *PtrTy = dyn_cast<PointerType>(DestTy))
if (auto *IntTy = dyn_cast<IntegerType>(SrcTy))
return DL && IntTy->getBitWidth() == DL->getPointerTypeSizeInBits(PtrTy);
return IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy);
return isBitCastable(SrcTy, DestTy);
}

View File

@ -73,7 +73,7 @@ static bool hasByteCountSuffix(CallingConv::ID CC) {
/// Microsoft fastcall and stdcall functions require a suffix on their name
/// indicating the number of words of arguments they take.
static void addByteCountSuffix(raw_ostream &OS, const Function *F,
const DataLayout &TD) {
const DataLayout &DL) {
// Calculate arguments size total.
unsigned ArgWords = 0;
for (Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end();
@ -83,8 +83,8 @@ static void addByteCountSuffix(raw_ostream &OS, const Function *F,
if (AI->hasByValOrInAllocaAttr())
Ty = cast<PointerType>(Ty)->getElementType();
// Size should be aligned to pointer size.
unsigned PtrSize = TD.getPointerSize();
ArgWords += RoundUpToAlignment(TD.getTypeAllocSize(Ty), PtrSize);
unsigned PtrSize = DL.getPointerSize();
ArgWords += RoundUpToAlignment(DL.getTypeAllocSize(Ty), PtrSize);
}
OS << '@' << ArgWords;

View File

@ -482,7 +482,7 @@ Value *Value::stripInBoundsOffsets() {
///
/// Test if V is always a pointer to allocated and suitably aligned memory for
/// a simple load or store.
static bool isDereferenceablePointer(const Value *V, const DataLayout *DL,
static bool isDereferenceablePointer(const Value *V, const DataLayout &DL,
SmallPtrSetImpl<const Value *> &Visited) {
// Note that it is not safe to speculate into a malloc'd region because
// malloc may return null.
@ -497,17 +497,14 @@ static bool isDereferenceablePointer(const Value *V, const DataLayout *DL,
// to a type of smaller size (or the same size), and the alignment
// is at least as large as for the resulting pointer type, then
// we can look through the bitcast.
if (DL)
if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V)) {
Type *STy = BC->getSrcTy()->getPointerElementType(),
*DTy = BC->getDestTy()->getPointerElementType();
if (STy->isSized() && DTy->isSized() &&
(DL->getTypeStoreSize(STy) >=
DL->getTypeStoreSize(DTy)) &&
(DL->getABITypeAlignment(STy) >=
DL->getABITypeAlignment(DTy)))
return isDereferenceablePointer(BC->getOperand(0), DL, Visited);
}
if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V)) {
Type *STy = BC->getSrcTy()->getPointerElementType(),
*DTy = BC->getDestTy()->getPointerElementType();
if (STy->isSized() && DTy->isSized() &&
(DL.getTypeStoreSize(STy) >= DL.getTypeStoreSize(DTy)) &&
(DL.getABITypeAlignment(STy) >= DL.getABITypeAlignment(DTy)))
return isDereferenceablePointer(BC->getOperand(0), DL, Visited);
}
// Global variables which can't collapse to null are ok.
if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
@ -520,7 +517,7 @@ static bool isDereferenceablePointer(const Value *V, const DataLayout *DL,
return true;
else if (uint64_t Bytes = A->getDereferenceableBytes()) {
Type *Ty = V->getType()->getPointerElementType();
if (Ty->isSized() && DL && DL->getTypeStoreSize(Ty) <= Bytes)
if (Ty->isSized() && DL.getTypeStoreSize(Ty) <= Bytes)
return true;
}
@ -532,7 +529,7 @@ static bool isDereferenceablePointer(const Value *V, const DataLayout *DL,
if (ImmutableCallSite CS = V) {
if (uint64_t Bytes = CS.getDereferenceableBytes(0)) {
Type *Ty = V->getType()->getPointerElementType();
if (Ty->isSized() && DL && DL->getTypeStoreSize(Ty) <= Bytes)
if (Ty->isSized() && DL.getTypeStoreSize(Ty) <= Bytes)
return true;
}
}
@ -586,15 +583,15 @@ static bool isDereferenceablePointer(const Value *V, const DataLayout *DL,
return false;
}
bool Value::isDereferenceablePointer(const DataLayout *DL) const {
bool Value::isDereferenceablePointer(const DataLayout &DL) const {
// When dereferenceability information is provided by a dereferenceable
// attribute, we know exactly how many bytes are dereferenceable. If we can
// determine the exact offset to the attributed variable, we can use that
// information here.
Type *Ty = getType()->getPointerElementType();
if (Ty->isSized() && DL) {
APInt Offset(DL->getTypeStoreSizeInBits(getType()), 0);
const Value *BV = stripAndAccumulateInBoundsConstantOffsets(*DL, Offset);
if (Ty->isSized()) {
APInt Offset(DL.getTypeStoreSizeInBits(getType()), 0);
const Value *BV = stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
APInt DerefBytes(Offset.getBitWidth(), 0);
if (const Argument *A = dyn_cast<Argument>(BV))
@ -603,7 +600,7 @@ bool Value::isDereferenceablePointer(const DataLayout *DL) const {
DerefBytes = CS.getDereferenceableBytes(0);
if (DerefBytes.getBoolValue() && Offset.isNonNegative()) {
if (DerefBytes.uge(Offset + DL->getTypeStoreSize(Ty)))
if (DerefBytes.uge(Offset + DL.getTypeStoreSize(Ty)))
return true;
}
}

View File

@ -764,9 +764,7 @@ bool ModuleLinker::shouldLinkFromSource(bool &LinkFromSrc,
return false;
}
// FIXME: Make datalayout mandatory and just use getDataLayout().
DataLayout DL(Dest.getParent());
const DataLayout &DL = Dest.getParent()->getDataLayout();
uint64_t DestSize = DL.getTypeAllocSize(Dest.getType()->getElementType());
uint64_t SrcSize = DL.getTypeAllocSize(Src.getType()->getElementType());
LinkFromSrc = SrcSize > DestSize;

View File

@ -140,7 +140,7 @@ namespace {
/// memory instruction can be moved to a delay slot.
class MemDefsUses : public InspectMemInstr {
public:
MemDefsUses(const MachineFrameInfo *MFI);
MemDefsUses(const DataLayout &DL, const MachineFrameInfo *MFI);
private:
typedef PointerUnion<const Value *, const PseudoSourceValue *> ValueType;
@ -158,6 +158,7 @@ namespace {
const MachineFrameInfo *MFI;
SmallPtrSet<ValueType, 4> Uses, Defs;
const DataLayout &DL;
/// Flags indicating whether loads or stores with no underlying objects have
/// been seen.
@ -427,9 +428,9 @@ bool LoadFromStackOrConst::hasHazard_(const MachineInstr &MI) {
return true;
}
MemDefsUses::MemDefsUses(const MachineFrameInfo *MFI_)
: InspectMemInstr(false), MFI(MFI_), SeenNoObjLoad(false),
SeenNoObjStore(false) {}
MemDefsUses::MemDefsUses(const DataLayout &DL, const MachineFrameInfo *MFI_)
: InspectMemInstr(false), MFI(MFI_), DL(DL), SeenNoObjLoad(false),
SeenNoObjStore(false) {}
bool MemDefsUses::hasHazard_(const MachineInstr &MI) {
bool HasHazard = false;
@ -482,7 +483,7 @@ getUnderlyingObjects(const MachineInstr &MI,
const Value *V = (*MI.memoperands_begin())->getValue();
SmallVector<Value *, 4> Objs;
GetUnderlyingObjects(const_cast<Value *>(V), Objs);
GetUnderlyingObjects(const_cast<Value *>(V), Objs, DL);
for (SmallVectorImpl<Value *>::iterator I = Objs.begin(), E = Objs.end();
I != E; ++I) {
@ -688,7 +689,7 @@ bool Filler::searchBackward(MachineBasicBlock &MBB, Iter Slot) const {
return false;
RegDefsUses RegDU(*MBB.getParent()->getSubtarget().getRegisterInfo());
MemDefsUses MemDU(MBB.getParent()->getFrameInfo());
MemDefsUses MemDU(*TM.getDataLayout(), MBB.getParent()->getFrameInfo());
ReverseIter Filler;
RegDU.init(*Slot);
@ -754,7 +755,7 @@ bool Filler::searchSuccBBs(MachineBasicBlock &MBB, Iter Slot) const {
IM.reset(new LoadFromStackOrConst());
} else {
const MachineFrameInfo *MFI = MBB.getParent()->getFrameInfo();
IM.reset(new MemDefsUses(MFI));
IM.reset(new MemDefsUses(*TM.getDataLayout(), MFI));
}
if (!searchRange(MBB, SuccBB->begin(), SuccBB->end(), RegDU, *IM, Slot,

View File

@ -1788,7 +1788,7 @@ void NVPTXAsmPrinter::bufferLEByte(const Constant *CPV, int Bytes,
break;
} else if (const ConstantExpr *Cexpr = dyn_cast<ConstantExpr>(CPV)) {
if (const ConstantInt *constInt = dyn_cast<ConstantInt>(
ConstantFoldConstantExpression(Cexpr, TD))) {
ConstantFoldConstantExpression(Cexpr, *TD))) {
int int32 = (int)(constInt->getZExtValue());
ptr = (unsigned char *)&int32;
aggBuffer->addBytes(ptr, 4, Bytes);
@ -1810,7 +1810,7 @@ void NVPTXAsmPrinter::bufferLEByte(const Constant *CPV, int Bytes,
break;
} else if (const ConstantExpr *Cexpr = dyn_cast<ConstantExpr>(CPV)) {
if (const ConstantInt *constInt = dyn_cast<ConstantInt>(
ConstantFoldConstantExpression(Cexpr, TD))) {
ConstantFoldConstantExpression(Cexpr, *TD))) {
long long int64 = (long long)(constInt->getZExtValue());
ptr = (unsigned char *)&int64;
aggBuffer->addBytes(ptr, 8, Bytes);

View File

@ -532,7 +532,7 @@ bool PPCCTRLoops::convertToCTRLoop(Loop *L) {
// selected branch.
MadeChange = true;
SCEVExpander SCEVE(*SE, "loopcnt");
SCEVExpander SCEVE(*SE, Preheader->getModule()->getDataLayout(), "loopcnt");
LLVMContext &C = SE->getContext();
Type *CountType = TT.isArch64Bit() ? Type::getInt64Ty(C) :
Type::getInt32Ty(C);

View File

@ -211,7 +211,7 @@ bool PPCLoopDataPrefetch::runOnLoop(Loop *L) {
PrefLoads.push_back(std::make_pair(MemI, LSCEVAddRec));
Type *I8Ptr = Type::getInt8PtrTy((*I)->getContext(), PtrAddrSpace);
SCEVExpander SCEVE(*SE, "prefaddr");
SCEVExpander SCEVE(*SE, J->getModule()->getDataLayout(), "prefaddr");
Value *PrefPtrValue = SCEVE.expandCodeFor(NextLSCEV, I8Ptr, MemI);
IRBuilder<> Builder(MemI);

View File

@ -274,7 +274,7 @@ bool PPCLoopPreIncPrep::runOnLoop(Loop *L) {
MemI->hasName() ? MemI->getName() + ".phi" : "",
Header->getFirstNonPHI());
SCEVExpander SCEVE(*SE, "pistart");
SCEVExpander SCEVE(*SE, Header->getModule()->getDataLayout(), "pistart");
Value *BasePtrStart = SCEVE.expandCodeFor(BasePtrStartSCEV, I8PtrTy,
LoopPredecessor->getTerminator());

View File

@ -20,6 +20,7 @@
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/CodeGen/BasicTTIImpl.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/Debug.h"
#include "llvm/Target/CostTable.h"
#include "llvm/Target/TargetLowering.h"
@ -36,13 +37,15 @@ void AMDGPUTTIImpl::getUnrollingPreferences(Loop *L,
// TODO: Do we want runtime unrolling?
for (const BasicBlock *BB : L->getBlocks()) {
const DataLayout &DL = BB->getModule()->getDataLayout();
for (const Instruction &I : *BB) {
const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I);
if (!GEP || GEP->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS)
continue;
const Value *Ptr = GEP->getPointerOperand();
const AllocaInst *Alloca = dyn_cast<AllocaInst>(GetUnderlyingObject(Ptr));
const AllocaInst *Alloca =
dyn_cast<AllocaInst>(GetUnderlyingObject(Ptr, DL));
if (Alloca) {
// We want to do whatever we can to limit the number of alloca
// instructions that make it through to the code generator. allocas

View File

@ -1479,8 +1479,8 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const
// Lower loads constant address space global variable loads
if (LoadNode->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS &&
isa<GlobalVariable>(
GetUnderlyingObject(LoadNode->getMemOperand()->getValue()))) {
isa<GlobalVariable>(GetUnderlyingObject(
LoadNode->getMemOperand()->getValue(), *getDataLayout()))) {
SDValue Ptr = DAG.getZExtOrTrunc(LoadNode->getBasePtr(), DL,
getPointerTy(AMDGPUAS::PRIVATE_ADDRESS));

View File

@ -69,16 +69,15 @@ namespace {
bool runOnSCC(CallGraphSCC &SCC) override;
static char ID; // Pass identification, replacement for typeid
explicit ArgPromotion(unsigned maxElements = 3)
: CallGraphSCCPass(ID), DL(nullptr), maxElements(maxElements) {
: CallGraphSCCPass(ID), maxElements(maxElements) {
initializeArgPromotionPass(*PassRegistry::getPassRegistry());
}
/// A vector used to hold the indices of a single GEP instruction
typedef std::vector<uint64_t> IndicesVector;
const DataLayout *DL;
private:
bool isDenselyPacked(Type *type);
bool isDenselyPacked(Type *type, const DataLayout &DL);
bool canPaddingBeAccessed(Argument *Arg);
CallGraphNode *PromoteArguments(CallGraphNode *CGN);
bool isSafeToPromoteArgument(Argument *Arg, bool isByVal) const;
@ -125,7 +124,7 @@ bool ArgPromotion::runOnSCC(CallGraphSCC &SCC) {
}
/// \brief Checks if a type could have padding bytes.
bool ArgPromotion::isDenselyPacked(Type *type) {
bool ArgPromotion::isDenselyPacked(Type *type, const DataLayout &DL) {
// There is no size information, so be conservative.
if (!type->isSized())
@ -133,7 +132,7 @@ bool ArgPromotion::isDenselyPacked(Type *type) {
// If the alloc size is not equal to the storage size, then there are padding
// bytes. For x86_fp80 on x86-64, size: 80 alloc size: 128.
if (!DL || DL->getTypeSizeInBits(type) != DL->getTypeAllocSizeInBits(type))
if (DL.getTypeSizeInBits(type) != DL.getTypeAllocSizeInBits(type))
return false;
if (!isa<CompositeType>(type))
@ -141,19 +140,20 @@ bool ArgPromotion::isDenselyPacked(Type *type) {
// For homogenous sequential types, check for padding within members.
if (SequentialType *seqTy = dyn_cast<SequentialType>(type))
return isa<PointerType>(seqTy) || isDenselyPacked(seqTy->getElementType());
return isa<PointerType>(seqTy) ||
isDenselyPacked(seqTy->getElementType(), DL);
// Check for padding within and between elements of a struct.
StructType *StructTy = cast<StructType>(type);
const StructLayout *Layout = DL->getStructLayout(StructTy);
const StructLayout *Layout = DL.getStructLayout(StructTy);
uint64_t StartPos = 0;
for (unsigned i = 0, E = StructTy->getNumElements(); i < E; ++i) {
Type *ElTy = StructTy->getElementType(i);
if (!isDenselyPacked(ElTy))
if (!isDenselyPacked(ElTy, DL))
return false;
if (StartPos != Layout->getElementOffsetInBits(i))
return false;
StartPos += DL->getTypeAllocSizeInBits(ElTy);
StartPos += DL.getTypeAllocSizeInBits(ElTy);
}
return true;
@ -207,8 +207,6 @@ CallGraphNode *ArgPromotion::PromoteArguments(CallGraphNode *CGN) {
// Make sure that it is local to this module.
if (!F || !F->hasLocalLinkage()) return nullptr;
DL = &F->getParent()->getDataLayout();
// First check: see if there are any pointer arguments! If not, quick exit.
SmallVector<Argument*, 16> PointerArgs;
for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I)
@ -235,6 +233,7 @@ CallGraphNode *ArgPromotion::PromoteArguments(CallGraphNode *CGN) {
// IR, while in the callee the classification is determined dynamically based
// on the number of registers consumed so far.
if (F->isVarArg()) return nullptr;
const DataLayout &DL = F->getParent()->getDataLayout();
// Check to see which arguments are promotable. If an argument is promotable,
// add it to ArgsToPromote.
@ -249,8 +248,8 @@ CallGraphNode *ArgPromotion::PromoteArguments(CallGraphNode *CGN) {
// packed or if we can prove the padding bytes are never accessed. This does
// not apply to inalloca.
bool isSafeToPromote =
PtrArg->hasByValAttr() &&
(isDenselyPacked(AgTy) || !canPaddingBeAccessed(PtrArg));
PtrArg->hasByValAttr() &&
(isDenselyPacked(AgTy, DL) || !canPaddingBeAccessed(PtrArg));
if (isSafeToPromote) {
if (StructType *STy = dyn_cast<StructType>(AgTy)) {
if (maxElements > 0 && STy->getNumElements() > maxElements) {
@ -309,9 +308,9 @@ CallGraphNode *ArgPromotion::PromoteArguments(CallGraphNode *CGN) {
/// AllCallersPassInValidPointerForArgument - Return true if we can prove that
/// all callees pass in a valid pointer for the specified function argument.
static bool AllCallersPassInValidPointerForArgument(Argument *Arg,
const DataLayout *DL) {
static bool AllCallersPassInValidPointerForArgument(Argument *Arg) {
Function *Callee = Arg->getParent();
const DataLayout &DL = Callee->getParent()->getDataLayout();
unsigned ArgNo = Arg->getArgNo();
@ -429,7 +428,7 @@ bool ArgPromotion::isSafeToPromoteArgument(Argument *Arg,
GEPIndicesSet ToPromote;
// If the pointer is always valid, any load with first index 0 is valid.
if (isByValOrInAlloca || AllCallersPassInValidPointerForArgument(Arg, DL))
if (isByValOrInAlloca || AllCallersPassInValidPointerForArgument(Arg))
SafeToUnconditionallyLoad.insert(IndicesVector(1, 0));
// First, iterate the entry block and mark loads of (geps of) arguments as

View File

@ -52,7 +52,6 @@ namespace {
// alignment to a concrete value.
unsigned getAlignment(GlobalVariable *GV) const;
const DataLayout *DL;
};
}
@ -89,31 +88,22 @@ static bool IsBetterCanonical(const GlobalVariable &A,
return A.hasUnnamedAddr();
}
bool ConstantMerge::hasKnownAlignment(GlobalVariable *GV) const {
return DL || GV->getAlignment() != 0;
}
unsigned ConstantMerge::getAlignment(GlobalVariable *GV) const {
unsigned Align = GV->getAlignment();
if (Align)
return Align;
if (DL)
return DL->getPreferredAlignment(GV);
return 0;
return GV->getParent()->getDataLayout().getPreferredAlignment(GV);
}
bool ConstantMerge::runOnModule(Module &M) {
DL = &M.getDataLayout();
// Find all the globals that are marked "used". These cannot be merged.
SmallPtrSet<const GlobalValue*, 8> UsedGlobals;
FindUsedValues(M.getGlobalVariable("llvm.used"), UsedGlobals);
FindUsedValues(M.getGlobalVariable("llvm.compiler.used"), UsedGlobals);
// Map unique <constants, has-unknown-alignment> pairs to globals. We don't
// want to merge globals of unknown alignment with those of explicit
// alignment. If we have DataLayout, we always know the alignment.
DenseMap<PointerIntPair<Constant*, 1, bool>, GlobalVariable*> CMap;
// Map unique constants to globals.
DenseMap<Constant *, GlobalVariable *> CMap;
// Replacements - This vector contains a list of replacements to perform.
SmallVector<std::pair<GlobalVariable*, GlobalVariable*>, 32> Replacements;
@ -155,8 +145,7 @@ bool ConstantMerge::runOnModule(Module &M) {
Constant *Init = GV->getInitializer();
// Check to see if the initializer is already known.
PointerIntPair<Constant*, 1, bool> Pair(Init, hasKnownAlignment(GV));
GlobalVariable *&Slot = CMap[Pair];
GlobalVariable *&Slot = CMap[Init];
// If this is the first constant we find or if the old one is local,
// replace with the current one. If the current is externally visible
@ -187,8 +176,7 @@ bool ConstantMerge::runOnModule(Module &M) {
Constant *Init = GV->getInitializer();
// Check to see if the initializer is already known.
PointerIntPair<Constant*, 1, bool> Pair(Init, hasKnownAlignment(GV));
GlobalVariable *Slot = CMap[Pair];
GlobalVariable *Slot = CMap[Init];
if (!Slot || Slot == GV)
continue;

View File

@ -86,7 +86,6 @@ namespace {
const GlobalStatus &GS);
bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn);
// const DataLayout *DL;
TargetLibraryInfo *TLI;
SmallSet<const Comdat *, 8> NotDiscardableComdats;
};
@ -319,7 +318,7 @@ static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
Constant *SubInit = nullptr;
if (!isa<ConstantExpr>(GEP->getOperand(0))) {
ConstantExpr *CE = dyn_cast_or_null<ConstantExpr>(
ConstantFoldInstruction(GEP, &DL, TLI));
ConstantFoldInstruction(GEP, DL, TLI));
if (Init && CE && CE->getOpcode() == Instruction::GetElementPtr)
SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
@ -806,7 +805,7 @@ static void ConstantPropUsersOf(Value *V, const DataLayout &DL,
TargetLibraryInfo *TLI) {
for (Value::user_iterator UI = V->user_begin(), E = V->user_end(); UI != E; )
if (Instruction *I = dyn_cast<Instruction>(*UI++))
if (Constant *NewC = ConstantFoldInstruction(I, &DL, TLI)) {
if (Constant *NewC = ConstantFoldInstruction(I, DL, TLI)) {
I->replaceAllUsesWith(NewC);
// Advance UI to the next non-I use to avoid invalidating it!
@ -1490,7 +1489,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, CallInst *CI,
// This eliminates dynamic allocation, avoids an indirection accessing the
// data, and exposes the resultant global to further GlobalOpt.
// We cannot optimize the malloc if we cannot determine malloc array size.
Value *NElems = getMallocArraySize(CI, &DL, TLI, true);
Value *NElems = getMallocArraySize(CI, DL, TLI, true);
if (!NElems)
return false;
@ -1544,7 +1543,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, CallInst *CI,
CI = cast<CallInst>(Malloc);
}
GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, &DL, TLI, true),
GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, DL, TLI, true),
DL, TLI);
return true;
}
@ -1948,7 +1947,7 @@ bool GlobalOpt::OptimizeGlobalVars(Module &M) {
if (GV->hasInitializer())
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GV->getInitializer())) {
auto &DL = M.getDataLayout();
Constant *New = ConstantFoldConstantExpression(CE, &DL, TLI);
Constant *New = ConstantFoldConstantExpression(CE, DL, TLI);
if (New && New != CE)
GV->setInitializer(New);
}
@ -2296,7 +2295,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
Constant *Ptr = getVal(SI->getOperand(1));
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) {
DEBUG(dbgs() << "Folding constant ptr expression: " << *Ptr);
Ptr = ConstantFoldConstantExpression(CE, &DL, TLI);
Ptr = ConstantFoldConstantExpression(CE, DL, TLI);
DEBUG(dbgs() << "; To: " << *Ptr << "\n");
}
if (!isSimpleEnoughPointerToCommit(Ptr)) {
@ -2341,7 +2340,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
Ptr = ConstantExpr::getGetElementPtr(Ptr, IdxList);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
Ptr = ConstantFoldConstantExpression(CE, &DL, TLI);
Ptr = ConstantFoldConstantExpression(CE, DL, TLI);
// If we can't improve the situation by introspecting NewTy,
// we have to give up.
@ -2416,7 +2415,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
Constant *Ptr = getVal(LI->getOperand(0));
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) {
Ptr = ConstantFoldConstantExpression(CE, &DL, TLI);
Ptr = ConstantFoldConstantExpression(CE, DL, TLI);
DEBUG(dbgs() << "Found a constant pointer expression, constant "
"folding: " << *Ptr << "\n");
}
@ -2600,7 +2599,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
if (!CurInst->use_empty()) {
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(InstResult))
InstResult = ConstantFoldConstantExpression(CE, &DL, TLI);
InstResult = ConstantFoldConstantExpression(CE, DL, TLI);
setVal(CurInst, InstResult);
}

View File

@ -52,7 +52,7 @@ bool BitSetInfo::containsGlobalOffset(uint64_t Offset) const {
}
bool BitSetInfo::containsValue(
const DataLayout *DL,
const DataLayout &DL,
const DenseMap<GlobalVariable *, uint64_t> &GlobalLayout, Value *V,
uint64_t COffset) const {
if (auto GV = dyn_cast<GlobalVariable>(V)) {
@ -63,8 +63,8 @@ bool BitSetInfo::containsValue(
}
if (auto GEP = dyn_cast<GEPOperator>(V)) {
APInt APOffset(DL->getPointerSizeInBits(0), 0);
bool Result = GEP->accumulateConstantOffset(*DL, APOffset);
APInt APOffset(DL.getPointerSizeInBits(0), 0);
bool Result = GEP->accumulateConstantOffset(DL, APOffset);
if (!Result)
return false;
COffset += APOffset.getZExtValue();
@ -186,7 +186,6 @@ struct LowerBitSets : public ModulePass {
Module *M;
const DataLayout *DL;
IntegerType *Int1Ty;
IntegerType *Int8Ty;
IntegerType *Int32Ty;
@ -234,14 +233,14 @@ ModulePass *llvm::createLowerBitSetsPass() { return new LowerBitSets; }
bool LowerBitSets::doInitialization(Module &Mod) {
M = &Mod;
DL = &Mod.getDataLayout();
const DataLayout &DL = Mod.getDataLayout();
Int1Ty = Type::getInt1Ty(M->getContext());
Int8Ty = Type::getInt8Ty(M->getContext());
Int32Ty = Type::getInt32Ty(M->getContext());
Int32PtrTy = PointerType::getUnqual(Int32Ty);
Int64Ty = Type::getInt64Ty(M->getContext());
IntPtrTy = DL->getIntPtrType(M->getContext(), 0);
IntPtrTy = DL.getIntPtrType(M->getContext(), 0);
BitSetNM = M->getNamedMetadata("llvm.bitsets");
@ -396,6 +395,7 @@ Value *LowerBitSets::lowerBitSetCall(
GlobalVariable *CombinedGlobal,
const DenseMap<GlobalVariable *, uint64_t> &GlobalLayout) {
Value *Ptr = CI->getArgOperand(0);
const DataLayout &DL = M->getDataLayout();
if (BSI.containsValue(DL, GlobalLayout, Ptr))
return ConstantInt::getTrue(CombinedGlobal->getParent()->getContext());
@ -430,8 +430,8 @@ Value *LowerBitSets::lowerBitSetCall(
Value *OffsetSHR =
B.CreateLShr(PtrOffset, ConstantInt::get(IntPtrTy, BSI.AlignLog2));
Value *OffsetSHL = B.CreateShl(
PtrOffset, ConstantInt::get(IntPtrTy, DL->getPointerSizeInBits(0) -
BSI.AlignLog2));
PtrOffset,
ConstantInt::get(IntPtrTy, DL.getPointerSizeInBits(0) - BSI.AlignLog2));
BitOffset = B.CreateOr(OffsetSHR, OffsetSHL);
}
@ -466,9 +466,10 @@ void LowerBitSets::buildBitSetsFromGlobals(
const std::vector<GlobalVariable *> &Globals) {
// Build a new global with the combined contents of the referenced globals.
std::vector<Constant *> GlobalInits;
const DataLayout &DL = M->getDataLayout();
for (GlobalVariable *G : Globals) {
GlobalInits.push_back(G->getInitializer());
uint64_t InitSize = DL->getTypeAllocSize(G->getInitializer()->getType());
uint64_t InitSize = DL.getTypeAllocSize(G->getInitializer()->getType());
// Compute the amount of padding required to align the next element to the
// next power of 2.
@ -490,7 +491,7 @@ void LowerBitSets::buildBitSetsFromGlobals(
GlobalValue::PrivateLinkage, NewInit);
const StructLayout *CombinedGlobalLayout =
DL->getStructLayout(cast<StructType>(NewInit->getType()));
DL.getStructLayout(cast<StructType>(NewInit->getType()));
// Compute the offsets of the original globals within the new global.
DenseMap<GlobalVariable *, uint64_t> GlobalLayout;

View File

@ -127,9 +127,8 @@ namespace {
/// side of claiming that two functions are different).
class FunctionComparator {
public:
FunctionComparator(const DataLayout *DL, const Function *F1,
const Function *F2)
: FnL(F1), FnR(F2), DL(DL) {}
FunctionComparator(const Function *F1, const Function *F2)
: FnL(F1), FnR(F2) {}
/// Test whether the two functions have equivalent behaviour.
int compare();
@ -292,8 +291,7 @@ private:
/// Parts to be compared for each comparison stage,
/// most significant stage first:
/// 1. Address space. As numbers.
/// 2. Constant offset, (if "DataLayout *DL" field is not NULL,
/// using GEPOperator::accumulateConstantOffset method).
/// 2. Constant offset, (using GEPOperator::accumulateConstantOffset method).
/// 3. Pointer operand type (using cmpType method).
/// 4. Number of operands.
/// 5. Compare operands, using cmpValues method.
@ -354,8 +352,6 @@ private:
// The two functions undergoing comparison.
const Function *FnL, *FnR;
const DataLayout *DL;
/// Assign serial numbers to values from left function, and values from
/// right function.
/// Explanation:
@ -394,14 +390,13 @@ private:
class FunctionNode {
AssertingVH<Function> F;
const DataLayout *DL;
public:
FunctionNode(Function *F, const DataLayout *DL) : F(F), DL(DL) {}
FunctionNode(Function *F) : F(F) {}
Function *getFunc() const { return F; }
void release() { F = 0; }
bool operator<(const FunctionNode &RHS) const {
return (FunctionComparator(DL, F, RHS.getFunc()).compare()) == -1;
return (FunctionComparator(F, RHS.getFunc()).compare()) == -1;
}
};
}
@ -620,10 +615,11 @@ int FunctionComparator::cmpTypes(Type *TyL, Type *TyR) const {
PointerType *PTyL = dyn_cast<PointerType>(TyL);
PointerType *PTyR = dyn_cast<PointerType>(TyR);
if (DL) {
if (PTyL && PTyL->getAddressSpace() == 0) TyL = DL->getIntPtrType(TyL);
if (PTyR && PTyR->getAddressSpace() == 0) TyR = DL->getIntPtrType(TyR);
}
const DataLayout &DL = FnL->getParent()->getDataLayout();
if (PTyL && PTyL->getAddressSpace() == 0)
TyL = DL.getIntPtrType(TyL);
if (PTyR && PTyR->getAddressSpace() == 0)
TyR = DL.getIntPtrType(TyR);
if (TyL == TyR)
return 0;
@ -855,13 +851,12 @@ int FunctionComparator::cmpGEPs(const GEPOperator *GEPL,
// When we have target data, we can reduce the GEP down to the value in bytes
// added to the address.
if (DL) {
unsigned BitWidth = DL->getPointerSizeInBits(ASL);
APInt OffsetL(BitWidth, 0), OffsetR(BitWidth, 0);
if (GEPL->accumulateConstantOffset(*DL, OffsetL) &&
GEPR->accumulateConstantOffset(*DL, OffsetR))
return cmpAPInts(OffsetL, OffsetR);
}
const DataLayout &DL = FnL->getParent()->getDataLayout();
unsigned BitWidth = DL.getPointerSizeInBits(ASL);
APInt OffsetL(BitWidth, 0), OffsetR(BitWidth, 0);
if (GEPL->accumulateConstantOffset(DL, OffsetL) &&
GEPR->accumulateConstantOffset(DL, OffsetR))
return cmpAPInts(OffsetL, OffsetR);
if (int Res = cmpNumbers((uint64_t)GEPL->getPointerOperand()->getType(),
(uint64_t)GEPR->getPointerOperand()->getType()))
@ -1122,9 +1117,6 @@ private:
/// to modify it.
FnTreeType FnTree;
/// DataLayout for more accurate GEP comparisons. May be NULL.
const DataLayout *DL;
/// Whether or not the target supports global aliases.
bool HasGlobalAliases;
};
@ -1152,8 +1144,8 @@ bool MergeFunctions::doSanityCheck(std::vector<WeakVH> &Worklist) {
for (std::vector<WeakVH>::iterator J = I; J != E && j < Max; ++J, ++j) {
Function *F1 = cast<Function>(*I);
Function *F2 = cast<Function>(*J);
int Res1 = FunctionComparator(DL, F1, F2).compare();
int Res2 = FunctionComparator(DL, F2, F1).compare();
int Res1 = FunctionComparator(F1, F2).compare();
int Res2 = FunctionComparator(F2, F1).compare();
// If F1 <= F2, then F2 >= F1, otherwise report failure.
if (Res1 != -Res2) {
@ -1174,8 +1166,8 @@ bool MergeFunctions::doSanityCheck(std::vector<WeakVH> &Worklist) {
continue;
Function *F3 = cast<Function>(*K);
int Res3 = FunctionComparator(DL, F1, F3).compare();
int Res4 = FunctionComparator(DL, F2, F3).compare();
int Res3 = FunctionComparator(F1, F3).compare();
int Res4 = FunctionComparator(F2, F3).compare();
bool Transitive = true;
@ -1212,7 +1204,6 @@ bool MergeFunctions::doSanityCheck(std::vector<WeakVH> &Worklist) {
bool MergeFunctions::runOnModule(Module &M) {
bool Changed = false;
DL = &M.getDataLayout();
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) {
if (!I->isDeclaration() && !I->hasAvailableExternallyLinkage())
@ -1419,7 +1410,7 @@ void MergeFunctions::mergeTwoFunctions(Function *F, Function *G) {
// that was already inserted.
bool MergeFunctions::insert(Function *NewFunction) {
std::pair<FnTreeType::iterator, bool> Result =
FnTree.insert(FunctionNode(NewFunction, DL));
FnTree.insert(FunctionNode(NewFunction));
if (Result.second) {
DEBUG(dbgs() << "Inserting as unique: " << NewFunction->getName() << '\n');
@ -1456,7 +1447,7 @@ bool MergeFunctions::insert(Function *NewFunction) {
void MergeFunctions::remove(Function *F) {
// We need to make sure we remove F, not a function "equal" to F per the
// function equality comparator.
FnTreeType::iterator found = FnTree.find(FunctionNode(F, DL));
FnTreeType::iterator found = FnTree.find(FunctionNode(F));
size_t Erased = 0;
if (found != FnTree.end() && found->getFunc() == F) {
Erased = 1;

View File

@ -891,7 +891,7 @@ static bool checkRippleForAdd(const APInt &Op0KnownZero,
/// This basically requires proving that the add in the original type would not
/// overflow to change the sign bit or have a carry out.
bool InstCombiner::WillNotOverflowSignedAdd(Value *LHS, Value *RHS,
Instruction *CxtI) {
Instruction &CxtI) {
// There are different heuristics we can use for this. Here are some simple
// ones.
@ -909,18 +909,18 @@ bool InstCombiner::WillNotOverflowSignedAdd(Value *LHS, Value *RHS,
//
// Since the carry into the most significant position is always equal to
// the carry out of the addition, there is no signed overflow.
if (ComputeNumSignBits(LHS, 0, CxtI) > 1 &&
ComputeNumSignBits(RHS, 0, CxtI) > 1)
if (ComputeNumSignBits(LHS, 0, &CxtI) > 1 &&
ComputeNumSignBits(RHS, 0, &CxtI) > 1)
return true;
unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
APInt LHSKnownZero(BitWidth, 0);
APInt LHSKnownOne(BitWidth, 0);
computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, 0, CxtI);
computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, 0, &CxtI);
APInt RHSKnownZero(BitWidth, 0);
APInt RHSKnownOne(BitWidth, 0);
computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, 0, CxtI);
computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, 0, &CxtI);
// Addition of two 2's compliment numbers having opposite signs will never
// overflow.
@ -943,21 +943,21 @@ bool InstCombiner::WillNotOverflowSignedAdd(Value *LHS, Value *RHS,
/// overflow to change the sign bit or have a carry out.
/// TODO: Handle this for Vectors.
bool InstCombiner::WillNotOverflowSignedSub(Value *LHS, Value *RHS,
Instruction *CxtI) {
Instruction &CxtI) {
// If LHS and RHS each have at least two sign bits, the subtraction
// cannot overflow.
if (ComputeNumSignBits(LHS, 0, CxtI) > 1 &&
ComputeNumSignBits(RHS, 0, CxtI) > 1)
if (ComputeNumSignBits(LHS, 0, &CxtI) > 1 &&
ComputeNumSignBits(RHS, 0, &CxtI) > 1)
return true;
unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
APInt LHSKnownZero(BitWidth, 0);
APInt LHSKnownOne(BitWidth, 0);
computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, 0, CxtI);
computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, 0, &CxtI);
APInt RHSKnownZero(BitWidth, 0);
APInt RHSKnownOne(BitWidth, 0);
computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, 0, CxtI);
computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, 0, &CxtI);
// Subtraction of two 2's compliment numbers having identical signs will
// never overflow.
@ -972,12 +972,14 @@ bool InstCombiner::WillNotOverflowSignedSub(Value *LHS, Value *RHS,
/// \brief Return true if we can prove that:
/// (sub LHS, RHS) === (sub nuw LHS, RHS)
bool InstCombiner::WillNotOverflowUnsignedSub(Value *LHS, Value *RHS,
Instruction *CxtI) {
Instruction &CxtI) {
// If the LHS is negative and the RHS is non-negative, no unsigned wrap.
bool LHSKnownNonNegative, LHSKnownNegative;
bool RHSKnownNonNegative, RHSKnownNegative;
ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, /*Depth=*/0, CxtI);
ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, /*Depth=*/0, CxtI);
ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, /*Depth=*/0,
&CxtI);
ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, /*Depth=*/0,
&CxtI);
if (LHSKnownNegative && RHSKnownNonNegative)
return true;
@ -1046,15 +1048,15 @@ static Value *checkForNegativeOperand(BinaryOperator &I,
}
Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
bool Changed = SimplifyAssociativeOrCommutative(I);
Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
bool Changed = SimplifyAssociativeOrCommutative(I);
Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
if (Value *V = SimplifyVectorOp(I))
return ReplaceInstUsesWith(I, V);
if (Value *V = SimplifyVectorOp(I))
return ReplaceInstUsesWith(I, V);
if (Value *V = SimplifyAddInst(LHS, RHS, I.hasNoSignedWrap(),
I.hasNoUnsignedWrap(), DL, TLI, DT, AC))
return ReplaceInstUsesWith(I, V);
if (Value *V = SimplifyAddInst(LHS, RHS, I.hasNoSignedWrap(),
I.hasNoUnsignedWrap(), DL, TLI, DT, AC))
return ReplaceInstUsesWith(I, V);
// (A*B)+(A*C) -> A*(B+C) etc
if (Value *V = SimplifyUsingDistributiveLaws(I))
@ -1243,7 +1245,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
ConstantExpr::getTrunc(RHSC, LHSConv->getOperand(0)->getType());
if (LHSConv->hasOneUse() &&
ConstantExpr::getSExt(CI, I.getType()) == RHSC &&
WillNotOverflowSignedAdd(LHSConv->getOperand(0), CI, &I)) {
WillNotOverflowSignedAdd(LHSConv->getOperand(0), CI, I)) {
// Insert the new, smaller add.
Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0),
CI, "addconv");
@ -1256,10 +1258,11 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
// Only do this if x/y have the same type, if at last one of them has a
// single use (so we don't increase the number of sexts), and if the
// integer add will not overflow.
if (LHSConv->getOperand(0)->getType()==RHSConv->getOperand(0)->getType()&&
if (LHSConv->getOperand(0)->getType() ==
RHSConv->getOperand(0)->getType() &&
(LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
WillNotOverflowSignedAdd(LHSConv->getOperand(0),
RHSConv->getOperand(0), &I)) {
RHSConv->getOperand(0), I)) {
// Insert the new integer add.
Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0),
RHSConv->getOperand(0), "addconv");
@ -1307,7 +1310,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
// TODO(jingyue): Consider WillNotOverflowSignedAdd and
// WillNotOverflowUnsignedAdd to reduce the number of invocations of
// computeKnownBits.
if (!I.hasNoSignedWrap() && WillNotOverflowSignedAdd(LHS, RHS, &I)) {
if (!I.hasNoSignedWrap() && WillNotOverflowSignedAdd(LHS, RHS, I)) {
Changed = true;
I.setHasNoSignedWrap(true);
}
@ -1371,7 +1374,7 @@ Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
ConstantExpr::getFPToSI(CFP, LHSConv->getOperand(0)->getType());
if (LHSConv->hasOneUse() &&
ConstantExpr::getSIToFP(CI, I.getType()) == CFP &&
WillNotOverflowSignedAdd(LHSConv->getOperand(0), CI, &I)) {
WillNotOverflowSignedAdd(LHSConv->getOperand(0), CI, I)) {
// Insert the new integer add.
Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0),
CI, "addconv");
@ -1384,10 +1387,11 @@ Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
// Only do this if x/y have the same type, if at last one of them has a
// single use (so we don't increase the number of int->fp conversions),
// and if the integer add will not overflow.
if (LHSConv->getOperand(0)->getType()==RHSConv->getOperand(0)->getType()&&
if (LHSConv->getOperand(0)->getType() ==
RHSConv->getOperand(0)->getType() &&
(LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
WillNotOverflowSignedAdd(LHSConv->getOperand(0),
RHSConv->getOperand(0), &I)) {
RHSConv->getOperand(0), I)) {
// Insert the new integer add.
Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0),
RHSConv->getOperand(0),"addconv");
@ -1436,8 +1440,6 @@ Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
///
Value *InstCombiner::OptimizePointerDifference(Value *LHS, Value *RHS,
Type *Ty) {
assert(DL && "Must have target data info for this");
// If LHS is a gep based on RHS or RHS is a gep based on LHS, we can optimize
// this.
bool Swapped = false;
@ -1662,26 +1664,24 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
// Optimize pointer differences into the same array into a size. Consider:
// &A[10] - &A[0]: we should compile this to "10".
if (DL) {
Value *LHSOp, *RHSOp;
if (match(Op0, m_PtrToInt(m_Value(LHSOp))) &&
match(Op1, m_PtrToInt(m_Value(RHSOp))))
if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType()))
return ReplaceInstUsesWith(I, Res);
Value *LHSOp, *RHSOp;
if (match(Op0, m_PtrToInt(m_Value(LHSOp))) &&
match(Op1, m_PtrToInt(m_Value(RHSOp))))
if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType()))
return ReplaceInstUsesWith(I, Res);
// trunc(p)-trunc(q) -> trunc(p-q)
if (match(Op0, m_Trunc(m_PtrToInt(m_Value(LHSOp)))) &&
match(Op1, m_Trunc(m_PtrToInt(m_Value(RHSOp)))))
if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType()))
return ReplaceInstUsesWith(I, Res);
}
// trunc(p)-trunc(q) -> trunc(p-q)
if (match(Op0, m_Trunc(m_PtrToInt(m_Value(LHSOp)))) &&
match(Op1, m_Trunc(m_PtrToInt(m_Value(RHSOp)))))
if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType()))
return ReplaceInstUsesWith(I, Res);
bool Changed = false;
if (!I.hasNoSignedWrap() && WillNotOverflowSignedSub(Op0, Op1, &I)) {
if (!I.hasNoSignedWrap() && WillNotOverflowSignedSub(Op0, Op1, I)) {
Changed = true;
I.setHasNoSignedWrap(true);
}
if (!I.hasNoUnsignedWrap() && WillNotOverflowUnsignedSub(Op0, Op1, &I)) {
if (!I.hasNoUnsignedWrap() && WillNotOverflowUnsignedSub(Op0, Op1, I)) {
Changed = true;
I.setHasNoUnsignedWrap(true);
}

View File

@ -1709,15 +1709,17 @@ Value *InstCombiner::FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
Value *Mask = nullptr;
Value *Masked = nullptr;
if (LAnd->getOperand(0) == RAnd->getOperand(0) &&
isKnownToBeAPowerOfTwo(LAnd->getOperand(1), false, 0, AC, CxtI, DT) &&
isKnownToBeAPowerOfTwo(RAnd->getOperand(1), false, 0, AC, CxtI, DT)) {
isKnownToBeAPowerOfTwo(LAnd->getOperand(1), DL, false, 0, AC, CxtI,
DT) &&
isKnownToBeAPowerOfTwo(RAnd->getOperand(1), DL, false, 0, AC, CxtI,
DT)) {
Mask = Builder->CreateOr(LAnd->getOperand(1), RAnd->getOperand(1));
Masked = Builder->CreateAnd(LAnd->getOperand(0), Mask);
} else if (LAnd->getOperand(1) == RAnd->getOperand(1) &&
isKnownToBeAPowerOfTwo(LAnd->getOperand(0), false, 0, AC, CxtI,
DT) &&
isKnownToBeAPowerOfTwo(RAnd->getOperand(0), false, 0, AC, CxtI,
DT)) {
isKnownToBeAPowerOfTwo(LAnd->getOperand(0), DL, false, 0, AC,
CxtI, DT) &&
isKnownToBeAPowerOfTwo(RAnd->getOperand(0), DL, false, 0, AC,
CxtI, DT)) {
Mask = Builder->CreateOr(LAnd->getOperand(0), RAnd->getOperand(0));
Masked = Builder->CreateAnd(LAnd->getOperand(1), Mask);
}

View File

@ -15,7 +15,6 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/IR/Statepoint.h"
@ -61,8 +60,8 @@ static Type *reduceToSingleValueType(Type *T) {
}
Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), DL, AC, MI, DT);
unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), DL, AC, MI, DT);
unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), DL, MI, AC, DT);
unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), DL, MI, AC, DT);
unsigned MinAlign = std::min(DstAlign, SrcAlign);
unsigned CopyAlign = MI->getAlignment();
@ -108,7 +107,7 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
if (StrippedDest != MI->getArgOperand(0)) {
Type *SrcETy = cast<PointerType>(StrippedDest->getType())
->getElementType();
if (DL && SrcETy->isSized() && DL->getTypeStoreSize(SrcETy) == Size) {
if (SrcETy->isSized() && DL.getTypeStoreSize(SrcETy) == Size) {
// The SrcETy might be something like {{{double}}} or [1 x double]. Rip
// down through these levels if so.
SrcETy = reduceToSingleValueType(SrcETy);
@ -156,7 +155,7 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
}
Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
unsigned Alignment = getKnownAlignment(MI->getDest(), DL, AC, MI, DT);
unsigned Alignment = getKnownAlignment(MI->getDest(), DL, MI, AC, DT);
if (MI->getAlignment() < Alignment) {
MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
Alignment, false));
@ -386,7 +385,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// can prove that it will never overflow.
if (II->getIntrinsicID() == Intrinsic::sadd_with_overflow) {
Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
if (WillNotOverflowSignedAdd(LHS, RHS, II)) {
if (WillNotOverflowSignedAdd(LHS, RHS, *II)) {
return CreateOverflowTuple(II, Builder->CreateNSWAdd(LHS, RHS), false);
}
}
@ -407,11 +406,11 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
}
}
if (II->getIntrinsicID() == Intrinsic::ssub_with_overflow) {
if (WillNotOverflowSignedSub(LHS, RHS, II)) {
if (WillNotOverflowSignedSub(LHS, RHS, *II)) {
return CreateOverflowTuple(II, Builder->CreateNSWSub(LHS, RHS), false);
}
} else {
if (WillNotOverflowUnsignedSub(LHS, RHS, II)) {
if (WillNotOverflowUnsignedSub(LHS, RHS, *II)) {
return CreateOverflowTuple(II, Builder->CreateNUWSub(LHS, RHS), false);
}
}
@ -452,7 +451,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
}
if (II->getIntrinsicID() == Intrinsic::smul_with_overflow) {
Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
if (WillNotOverflowSignedMul(LHS, RHS, II)) {
if (WillNotOverflowSignedMul(LHS, RHS, *II)) {
return CreateOverflowTuple(II, Builder->CreateNSWMul(LHS, RHS), false);
}
}
@ -544,7 +543,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::ppc_altivec_lvx:
case Intrinsic::ppc_altivec_lvxl:
// Turn PPC lvx -> load if the pointer is known aligned.
if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, AC, II, DT) >=
if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, AC, DT) >=
16) {
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
PointerType::getUnqual(II->getType()));
@ -561,7 +560,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::ppc_altivec_stvx:
case Intrinsic::ppc_altivec_stvxl:
// Turn stvx -> store if the pointer is known aligned.
if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, AC, II, DT) >=
if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, AC, DT) >=
16) {
Type *OpPtrTy =
PointerType::getUnqual(II->getArgOperand(0)->getType());
@ -578,7 +577,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
}
case Intrinsic::ppc_qpx_qvlfs:
// Turn PPC QPX qvlfs -> load if the pointer is known aligned.
if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, AC, II, DT) >=
if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, AC, DT) >=
16) {
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
PointerType::getUnqual(II->getType()));
@ -587,7 +586,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
break;
case Intrinsic::ppc_qpx_qvlfd:
// Turn PPC QPX qvlfd -> load if the pointer is known aligned.
if (getOrEnforceKnownAlignment(II->getArgOperand(0), 32, DL, AC, II, DT) >=
if (getOrEnforceKnownAlignment(II->getArgOperand(0), 32, DL, II, AC, DT) >=
32) {
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
PointerType::getUnqual(II->getType()));
@ -596,7 +595,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
break;
case Intrinsic::ppc_qpx_qvstfs:
// Turn PPC QPX qvstfs -> store if the pointer is known aligned.
if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, AC, II, DT) >=
if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, AC, DT) >=
16) {
Type *OpPtrTy =
PointerType::getUnqual(II->getArgOperand(0)->getType());
@ -606,7 +605,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
break;
case Intrinsic::ppc_qpx_qvstfd:
// Turn PPC QPX qvstfd -> store if the pointer is known aligned.
if (getOrEnforceKnownAlignment(II->getArgOperand(1), 32, DL, AC, II, DT) >=
if (getOrEnforceKnownAlignment(II->getArgOperand(1), 32, DL, II, AC, DT) >=
32) {
Type *OpPtrTy =
PointerType::getUnqual(II->getArgOperand(0)->getType());
@ -618,7 +617,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::x86_sse2_storeu_pd:
case Intrinsic::x86_sse2_storeu_dq:
// Turn X86 storeu -> store if the pointer is known aligned.
if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, AC, II, DT) >=
if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, AC, DT) >=
16) {
Type *OpPtrTy =
PointerType::getUnqual(II->getArgOperand(1)->getType());
@ -735,9 +734,8 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
unsigned LowHalfElts = VWidth / 2;
APInt InputDemandedElts(APInt::getBitsSet(VWidth, 0, LowHalfElts));
APInt UndefElts(VWidth, 0);
if (Value *TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0),
InputDemandedElts,
UndefElts)) {
if (Value *TmpV = SimplifyDemandedVectorElts(
II->getArgOperand(0), InputDemandedElts, UndefElts)) {
II->setArgOperand(0, TmpV);
return II;
}
@ -945,12 +943,12 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
unsigned Idx =
cast<ConstantInt>(Mask->getAggregateElement(i))->getZExtValue();
Idx &= 31; // Match the hardware behavior.
if (DL && DL->isLittleEndian())
if (DL.isLittleEndian())
Idx = 31 - Idx;
if (!ExtractedElts[Idx]) {
Value *Op0ToUse = (DL && DL->isLittleEndian()) ? Op1 : Op0;
Value *Op1ToUse = (DL && DL->isLittleEndian()) ? Op0 : Op1;
Value *Op0ToUse = (DL.isLittleEndian()) ? Op1 : Op0;
Value *Op1ToUse = (DL.isLittleEndian()) ? Op0 : Op1;
ExtractedElts[Idx] =
Builder->CreateExtractElement(Idx < 16 ? Op0ToUse : Op1ToUse,
Builder->getInt32(Idx&15));
@ -979,7 +977,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::arm_neon_vst2lane:
case Intrinsic::arm_neon_vst3lane:
case Intrinsic::arm_neon_vst4lane: {
unsigned MemAlign = getKnownAlignment(II->getArgOperand(0), DL, AC, II, DT);
unsigned MemAlign = getKnownAlignment(II->getArgOperand(0), DL, II, AC, DT);
unsigned AlignArg = II->getNumArgOperands() - 1;
ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg));
if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) {
@ -1118,7 +1116,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
RHS->getType()->isPointerTy() &&
cast<Constant>(RHS)->isNullValue()) {
LoadInst* LI = cast<LoadInst>(LHS);
if (isValidAssumeForContext(II, LI, DL, DT)) {
if (isValidAssumeForContext(II, LI, DT)) {
MDNode *MD = MDNode::get(II->getContext(), None);
LI->setMetadata(LLVMContext::MD_nonnull, MD);
return EraseInstFromFunction(*II);
@ -1192,8 +1190,8 @@ Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
/// isSafeToEliminateVarargsCast - If this cast does not affect the value
/// passed through the varargs area, we can eliminate the use of the cast.
static bool isSafeToEliminateVarargsCast(const CallSite CS,
const CastInst * const CI,
const DataLayout * const DL,
const DataLayout &DL,
const CastInst *const CI,
const int ix) {
if (!CI->isLosslessCast())
return false;
@ -1217,7 +1215,7 @@ static bool isSafeToEliminateVarargsCast(const CallSite CS,
Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
if (!SrcTy->isSized() || !DstTy->isSized())
return false;
if (!DL || DL->getTypeAllocSize(SrcTy) != DL->getTypeAllocSize(DstTy))
if (DL.getTypeAllocSize(SrcTy) != DL.getTypeAllocSize(DstTy))
return false;
return true;
}
@ -1226,7 +1224,7 @@ static bool isSafeToEliminateVarargsCast(const CallSite CS,
// Currently we're only working with the checking functions, memcpy_chk,
// mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk,
// strcat_chk and strncat_chk.
Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const DataLayout *DL) {
Instruction *InstCombiner::tryOptimizeCall(CallInst *CI) {
if (!CI->getCalledFunction()) return nullptr;
auto InstCombineRAUW = [this](Instruction *From, Value *With) {
@ -1391,7 +1389,7 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
for (CallSite::arg_iterator I = CS.arg_begin() + FTy->getNumParams(),
E = CS.arg_end(); I != E; ++I, ++ix) {
CastInst *CI = dyn_cast<CastInst>(*I);
if (CI && isSafeToEliminateVarargsCast(CS, CI, DL, ix)) {
if (CI && isSafeToEliminateVarargsCast(CS, DL, CI, ix)) {
*I = CI->getOperand(0);
Changed = true;
}
@ -1408,7 +1406,7 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
// this. None of these calls are seen as possibly dead so go ahead and
// delete the instruction now.
if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
Instruction *I = tryOptimizeCall(CI, DL);
Instruction *I = tryOptimizeCall(CI);
// If we changed something return the result, etc. Otherwise let
// the fallthrough check.
if (I) return EraseInstFromFunction(*I);
@ -1512,12 +1510,12 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
CallerPAL.getParamAttributes(i + 1).hasAttribute(i + 1,
Attribute::ByVal)) {
PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
if (!ParamPTy || !ParamPTy->getElementType()->isSized() || !DL)
if (!ParamPTy || !ParamPTy->getElementType()->isSized())
return false;
Type *CurElTy = ActTy->getPointerElementType();
if (DL->getTypeAllocSize(CurElTy) !=
DL->getTypeAllocSize(ParamPTy->getElementType()))
if (DL.getTypeAllocSize(CurElTy) !=
DL.getTypeAllocSize(ParamPTy->getElementType()))
return false;
}
}

View File

@ -80,9 +80,6 @@ static Value *DecomposeSimpleLinearExpr(Value *Val, unsigned &Scale,
/// try to eliminate the cast by moving the type information into the alloc.
Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
AllocaInst &AI) {
// This requires DataLayout to get the alloca alignment and size information.
if (!DL) return nullptr;
PointerType *PTy = cast<PointerType>(CI.getType());
BuilderTy AllocaBuilder(*Builder);
@ -93,8 +90,8 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
Type *CastElTy = PTy->getElementType();
if (!AllocElTy->isSized() || !CastElTy->isSized()) return nullptr;
unsigned AllocElTyAlign = DL->getABITypeAlignment(AllocElTy);
unsigned CastElTyAlign = DL->getABITypeAlignment(CastElTy);
unsigned AllocElTyAlign = DL.getABITypeAlignment(AllocElTy);
unsigned CastElTyAlign = DL.getABITypeAlignment(CastElTy);
if (CastElTyAlign < AllocElTyAlign) return nullptr;
// If the allocation has multiple uses, only promote it if we are strictly
@ -102,14 +99,14 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
// same, we open the door to infinite loops of various kinds.
if (!AI.hasOneUse() && CastElTyAlign == AllocElTyAlign) return nullptr;
uint64_t AllocElTySize = DL->getTypeAllocSize(AllocElTy);
uint64_t CastElTySize = DL->getTypeAllocSize(CastElTy);
uint64_t AllocElTySize = DL.getTypeAllocSize(AllocElTy);
uint64_t CastElTySize = DL.getTypeAllocSize(CastElTy);
if (CastElTySize == 0 || AllocElTySize == 0) return nullptr;
// If the allocation has multiple uses, only promote it if we're not
// shrinking the amount of memory being allocated.
uint64_t AllocElTyStoreSize = DL->getTypeStoreSize(AllocElTy);
uint64_t CastElTyStoreSize = DL->getTypeStoreSize(CastElTy);
uint64_t AllocElTyStoreSize = DL.getTypeStoreSize(AllocElTy);
uint64_t CastElTyStoreSize = DL.getTypeStoreSize(CastElTy);
if (!AI.hasOneUse() && CastElTyStoreSize < AllocElTyStoreSize) return nullptr;
// See if we can satisfy the modulus by pulling a scale out of the array
@ -215,7 +212,8 @@ Value *InstCombiner::EvaluateInDifferentType(Value *V, Type *Ty,
PHINode *OPN = cast<PHINode>(I);
PHINode *NPN = PHINode::Create(Ty, OPN->getNumIncomingValues());
for (unsigned i = 0, e = OPN->getNumIncomingValues(); i != e; ++i) {
Value *V =EvaluateInDifferentType(OPN->getIncomingValue(i), Ty, isSigned);
Value *V =
EvaluateInDifferentType(OPN->getIncomingValue(i), Ty, isSigned);
NPN->addIncoming(V, OPN->getIncomingBlock(i));
}
Res = NPN;
@ -234,25 +232,22 @@ Value *InstCombiner::EvaluateInDifferentType(Value *V, Type *Ty,
/// This function is a wrapper around CastInst::isEliminableCastPair. It
/// simply extracts arguments and returns what that function returns.
static Instruction::CastOps
isEliminableCastPair(
const CastInst *CI, ///< The first cast instruction
unsigned opcode, ///< The opcode of the second cast instruction
Type *DstTy, ///< The target type for the second cast instruction
const DataLayout *DL ///< The target data for pointer size
) {
isEliminableCastPair(const CastInst *CI, ///< First cast instruction
unsigned opcode, ///< Opcode for the second cast
Type *DstTy, ///< Target type for the second cast
const DataLayout &DL) {
Type *SrcTy = CI->getOperand(0)->getType(); // A from above
Type *MidTy = CI->getType(); // B from above
// Get the opcodes of the two Cast instructions
Instruction::CastOps firstOp = Instruction::CastOps(CI->getOpcode());
Instruction::CastOps secondOp = Instruction::CastOps(opcode);
Type *SrcIntPtrTy = DL && SrcTy->isPtrOrPtrVectorTy() ?
DL->getIntPtrType(SrcTy) : nullptr;
Type *MidIntPtrTy = DL && MidTy->isPtrOrPtrVectorTy() ?
DL->getIntPtrType(MidTy) : nullptr;
Type *DstIntPtrTy = DL && DstTy->isPtrOrPtrVectorTy() ?
DL->getIntPtrType(DstTy) : nullptr;
Type *SrcIntPtrTy =
SrcTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(SrcTy) : nullptr;
Type *MidIntPtrTy =
MidTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(MidTy) : nullptr;
Type *DstIntPtrTy =
DstTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(DstTy) : nullptr;
unsigned Res = CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy,
DstTy, SrcIntPtrTy, MidIntPtrTy,
DstIntPtrTy);
@ -298,7 +293,7 @@ Instruction *InstCombiner::commonCastTransforms(CastInst &CI) {
// eliminate it now.
if (CastInst *CSrc = dyn_cast<CastInst>(Src)) { // A->B->C cast
if (Instruction::CastOps opc =
isEliminableCastPair(CSrc, CI.getOpcode(), CI.getType(), DL)) {
isEliminableCastPair(CSrc, CI.getOpcode(), CI.getType(), DL)) {
// The first cast (CSrc) is eliminable so we need to fix up or replace
// the second cast (CI). CSrc will then have a good chance of being dead.
return CastInst::Create(opc, CSrc->getOperand(0), CI.getType());
@ -314,8 +309,7 @@ Instruction *InstCombiner::commonCastTransforms(CastInst &CI) {
if (isa<PHINode>(Src)) {
// We don't do this if this would create a PHI node with an illegal type if
// it is currently legal.
if (!Src->getType()->isIntegerTy() ||
!CI.getType()->isIntegerTy() ||
if (!Src->getType()->isIntegerTy() || !CI.getType()->isIntegerTy() ||
ShouldChangeType(CI.getType(), Src->getType()))
if (Instruction *NV = FoldOpIntoPhi(CI))
return NV;
@ -1419,18 +1413,15 @@ Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) {
// If the source integer type is not the intptr_t type for this target, do a
// trunc or zext to the intptr_t type, then inttoptr of it. This allows the
// cast to be exposed to other transforms.
unsigned AS = CI.getAddressSpace();
if (CI.getOperand(0)->getType()->getScalarSizeInBits() !=
DL.getPointerSizeInBits(AS)) {
Type *Ty = DL.getIntPtrType(CI.getContext(), AS);
if (CI.getType()->isVectorTy()) // Handle vectors of pointers.
Ty = VectorType::get(Ty, CI.getType()->getVectorNumElements());
if (DL) {
unsigned AS = CI.getAddressSpace();
if (CI.getOperand(0)->getType()->getScalarSizeInBits() !=
DL->getPointerSizeInBits(AS)) {
Type *Ty = DL->getIntPtrType(CI.getContext(), AS);
if (CI.getType()->isVectorTy()) // Handle vectors of pointers.
Ty = VectorType::get(Ty, CI.getType()->getVectorNumElements());
Value *P = Builder->CreateZExtOrTrunc(CI.getOperand(0), Ty);
return new IntToPtrInst(P, CI.getType());
}
Value *P = Builder->CreateZExtOrTrunc(CI.getOperand(0), Ty);
return new IntToPtrInst(P, CI.getType());
}
if (Instruction *I = commonCastTransforms(CI))
@ -1460,25 +1451,19 @@ Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) {
return &CI;
}
if (!DL)
return commonCastTransforms(CI);
// If the GEP has a single use, and the base pointer is a bitcast, and the
// GEP computes a constant offset, see if we can convert these three
// instructions into fewer. This typically happens with unions and other
// non-type-safe code.
unsigned AS = GEP->getPointerAddressSpace();
unsigned OffsetBits = DL->getPointerSizeInBits(AS);
unsigned OffsetBits = DL.getPointerSizeInBits(AS);
APInt Offset(OffsetBits, 0);
BitCastInst *BCI = dyn_cast<BitCastInst>(GEP->getOperand(0));
if (GEP->hasOneUse() &&
BCI &&
GEP->accumulateConstantOffset(*DL, Offset)) {
if (GEP->hasOneUse() && BCI && GEP->accumulateConstantOffset(DL, Offset)) {
// Get the base pointer input of the bitcast, and the type it points to.
Value *OrigBase = BCI->getOperand(0);
SmallVector<Value*, 8> NewIndices;
if (FindElementAtOffset(OrigBase->getType(),
Offset.getSExtValue(),
if (FindElementAtOffset(OrigBase->getType(), Offset.getSExtValue(),
NewIndices)) {
// If we were able to index down into an element, create the GEP
// and bitcast the result. This eliminates one bitcast, potentially
@ -1504,16 +1489,13 @@ Instruction *InstCombiner::visitPtrToInt(PtrToIntInst &CI) {
// do a ptrtoint to intptr_t then do a trunc or zext. This allows the cast
// to be exposed to other transforms.
if (!DL)
return commonPointerCastTransforms(CI);
Type *Ty = CI.getType();
unsigned AS = CI.getPointerAddressSpace();
if (Ty->getScalarSizeInBits() == DL->getPointerSizeInBits(AS))
if (Ty->getScalarSizeInBits() == DL.getPointerSizeInBits(AS))
return commonPointerCastTransforms(CI);
Type *PtrTy = DL->getIntPtrType(CI.getContext(), AS);
Type *PtrTy = DL.getIntPtrType(CI.getContext(), AS);
if (Ty->isVectorTy()) // Handle vectors of pointers.
PtrTy = VectorType::get(PtrTy, Ty->getVectorNumElements());
@ -1597,8 +1579,8 @@ static unsigned getTypeSizeIndex(unsigned Value, Type *Ty) {
/// This returns false if the pattern can't be matched or true if it can,
/// filling in Elements with the elements found here.
static bool CollectInsertionElements(Value *V, unsigned Shift,
SmallVectorImpl<Value*> &Elements,
Type *VecEltTy, InstCombiner &IC) {
SmallVectorImpl<Value *> &Elements,
Type *VecEltTy, bool isBigEndian) {
assert(isMultipleOfTypeSize(Shift, VecEltTy) &&
"Shift should be a multiple of the element type size");
@ -1614,7 +1596,7 @@ static bool CollectInsertionElements(Value *V, unsigned Shift,
return true;
unsigned ElementIndex = getTypeSizeIndex(Shift, VecEltTy);
if (IC.getDataLayout()->isBigEndian())
if (isBigEndian)
ElementIndex = Elements.size() - ElementIndex - 1;
// Fail if multiple elements are inserted into this slot.
@ -1634,7 +1616,7 @@ static bool CollectInsertionElements(Value *V, unsigned Shift,
// it to the right type so it gets properly inserted.
if (NumElts == 1)
return CollectInsertionElements(ConstantExpr::getBitCast(C, VecEltTy),
Shift, Elements, VecEltTy, IC);
Shift, Elements, VecEltTy, isBigEndian);
// Okay, this is a constant that covers multiple elements. Slice it up into
// pieces and insert each element-sized piece into the vector.
@ -1649,7 +1631,8 @@ static bool CollectInsertionElements(Value *V, unsigned Shift,
Constant *Piece = ConstantExpr::getLShr(C, ConstantInt::get(C->getType(),
ShiftI));
Piece = ConstantExpr::getTrunc(Piece, ElementIntTy);
if (!CollectInsertionElements(Piece, ShiftI, Elements, VecEltTy, IC))
if (!CollectInsertionElements(Piece, ShiftI, Elements, VecEltTy,
isBigEndian))
return false;
}
return true;
@ -1662,28 +1645,28 @@ static bool CollectInsertionElements(Value *V, unsigned Shift,
switch (I->getOpcode()) {
default: return false; // Unhandled case.
case Instruction::BitCast:
return CollectInsertionElements(I->getOperand(0), Shift,
Elements, VecEltTy, IC);
return CollectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy,
isBigEndian);
case Instruction::ZExt:
if (!isMultipleOfTypeSize(
I->getOperand(0)->getType()->getPrimitiveSizeInBits(),
VecEltTy))
return false;
return CollectInsertionElements(I->getOperand(0), Shift,
Elements, VecEltTy, IC);
return CollectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy,
isBigEndian);
case Instruction::Or:
return CollectInsertionElements(I->getOperand(0), Shift,
Elements, VecEltTy, IC) &&
CollectInsertionElements(I->getOperand(1), Shift,
Elements, VecEltTy, IC);
return CollectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy,
isBigEndian) &&
CollectInsertionElements(I->getOperand(1), Shift, Elements, VecEltTy,
isBigEndian);
case Instruction::Shl: {
// Must be shifting by a constant that is a multiple of the element size.
ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1));
if (!CI) return false;
Shift += CI->getZExtValue();
if (!isMultipleOfTypeSize(Shift, VecEltTy)) return false;
return CollectInsertionElements(I->getOperand(0), Shift,
Elements, VecEltTy, IC);
return CollectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy,
isBigEndian);
}
}
@ -1706,15 +1689,13 @@ static bool CollectInsertionElements(Value *V, unsigned Shift,
/// Into two insertelements that do "buildvector{%inc, %inc5}".
static Value *OptimizeIntegerToVectorInsertions(BitCastInst &CI,
InstCombiner &IC) {
// We need to know the target byte order to perform this optimization.
if (!IC.getDataLayout()) return nullptr;
VectorType *DestVecTy = cast<VectorType>(CI.getType());
Value *IntInput = CI.getOperand(0);
SmallVector<Value*, 8> Elements(DestVecTy->getNumElements());
if (!CollectInsertionElements(IntInput, 0, Elements,
DestVecTy->getElementType(), IC))
DestVecTy->getElementType(),
IC.getDataLayout().isBigEndian()))
return nullptr;
// If we succeeded, we know that all of the element are specified by Elements
@ -1734,10 +1715,8 @@ static Value *OptimizeIntegerToVectorInsertions(BitCastInst &CI,
/// OptimizeIntToFloatBitCast - See if we can optimize an integer->float/double
/// bitcast. The various long double bitcasts can't get in here.
static Instruction *OptimizeIntToFloatBitCast(BitCastInst &CI,InstCombiner &IC){
// We need to know the target byte order to perform this optimization.
if (!IC.getDataLayout()) return nullptr;
static Instruction *OptimizeIntToFloatBitCast(BitCastInst &CI, InstCombiner &IC,
const DataLayout &DL) {
Value *Src = CI.getOperand(0);
Type *DestTy = CI.getType();
@ -1760,7 +1739,7 @@ static Instruction *OptimizeIntToFloatBitCast(BitCastInst &CI,InstCombiner &IC){
}
unsigned Elt = 0;
if (IC.getDataLayout()->isBigEndian())
if (DL.isBigEndian())
Elt = VecTy->getPrimitiveSizeInBits() / DestWidth - 1;
return ExtractElementInst::Create(VecInput, IC.Builder->getInt32(Elt));
}
@ -1784,7 +1763,7 @@ static Instruction *OptimizeIntToFloatBitCast(BitCastInst &CI,InstCombiner &IC){
}
unsigned Elt = ShAmt->getZExtValue() / DestWidth;
if (IC.getDataLayout()->isBigEndian())
if (DL.isBigEndian())
Elt = VecTy->getPrimitiveSizeInBits() / DestWidth - 1 - Elt;
return ExtractElementInst::Create(VecInput, IC.Builder->getInt32(Elt));
}
@ -1839,7 +1818,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
// Try to optimize int -> float bitcasts.
if ((DestTy->isFloatTy() || DestTy->isDoubleTy()) && isa<IntegerType>(SrcTy))
if (Instruction *I = OptimizeIntToFloatBitCast(CI, *this))
if (Instruction *I = OptimizeIntToFloatBitCast(CI, *this, DL))
return I;
if (VectorType *DestVTy = dyn_cast<VectorType>(DestTy)) {

View File

@ -229,10 +229,6 @@ static void ComputeUnsignedMinMaxValuesFromKnownBits(const APInt &KnownZero,
Instruction *InstCombiner::
FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
CmpInst &ICI, ConstantInt *AndCst) {
// We need TD information to know the pointer size unless this is inbounds.
if (!GEP->isInBounds() && !DL)
return nullptr;
Constant *Init = GV->getInitializer();
if (!isa<ConstantArray>(Init) && !isa<ConstantDataArray>(Init))
return nullptr;
@ -303,7 +299,6 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
// the array, this will fully represent all the comparison results.
uint64_t MagicBitvector = 0;
// Scan the array and see if one of our patterns matches.
Constant *CompareRHS = cast<Constant>(ICI.getOperand(1));
for (unsigned i = 0, e = ArrayElementCount; i != e; ++i) {
@ -398,7 +393,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
// index down like the GEP would do implicitly. We don't have to do this for
// an inbounds GEP because the index can't be out of range.
if (!GEP->isInBounds()) {
Type *IntPtrTy = DL->getIntPtrType(GEP->getType());
Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
unsigned PtrSize = IntPtrTy->getIntegerBitWidth();
if (Idx->getType()->getPrimitiveSizeInBits() > PtrSize)
Idx = Builder->CreateTrunc(Idx, IntPtrTy);
@ -487,10 +482,8 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
// - Default to i32
if (ArrayElementCount <= Idx->getType()->getIntegerBitWidth())
Ty = Idx->getType();
else if (DL)
Ty = DL->getSmallestLegalIntType(Init->getContext(), ArrayElementCount);
else if (ArrayElementCount <= 32)
Ty = Type::getInt32Ty(Init->getContext());
else
Ty = DL.getSmallestLegalIntType(Init->getContext(), ArrayElementCount);
if (Ty) {
Value *V = Builder->CreateIntCast(Idx, Ty, false);
@ -514,8 +507,8 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
///
/// If we can't emit an optimized form for this expression, this returns null.
///
static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
const DataLayout &DL = *IC.getDataLayout();
static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC,
const DataLayout &DL) {
gep_type_iterator GTI = gep_type_begin(GEP);
// Check to see if this gep only has a single variable index. If so, and if
@ -628,12 +621,12 @@ Instruction *InstCombiner::FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
RHS = RHS->stripPointerCasts();
Value *PtrBase = GEPLHS->getOperand(0);
if (DL && PtrBase == RHS && GEPLHS->isInBounds()) {
if (PtrBase == RHS && GEPLHS->isInBounds()) {
// ((gep Ptr, OFFSET) cmp Ptr) ---> (OFFSET cmp 0).
// This transformation (ignoring the base and scales) is valid because we
// know pointers can't overflow since the gep is inbounds. See if we can
// output an optimized form.
Value *Offset = EvaluateGEPOffsetExpression(GEPLHS, *this);
Value *Offset = EvaluateGEPOffsetExpression(GEPLHS, *this, DL);
// If not, synthesize the offset the hard way.
if (!Offset)
@ -661,11 +654,11 @@ Instruction *InstCombiner::FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
// If we're comparing GEPs with two base pointers that only differ in type
// and both GEPs have only constant indices or just one use, then fold
// the compare with the adjusted indices.
if (DL && GEPLHS->isInBounds() && GEPRHS->isInBounds() &&
if (GEPLHS->isInBounds() && GEPRHS->isInBounds() &&
(GEPLHS->hasAllConstantIndices() || GEPLHS->hasOneUse()) &&
(GEPRHS->hasAllConstantIndices() || GEPRHS->hasOneUse()) &&
PtrBase->stripPointerCasts() ==
GEPRHS->getOperand(0)->stripPointerCasts()) {
GEPRHS->getOperand(0)->stripPointerCasts()) {
Value *LOffset = EmitGEPOffset(GEPLHS);
Value *ROffset = EmitGEPOffset(GEPRHS);
@ -733,9 +726,7 @@ Instruction *InstCombiner::FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
// Only lower this if the icmp is the only user of the GEP or if we expect
// the result to fold to a constant!
if (DL &&
GEPsInBounds &&
(isa<ConstantExpr>(GEPLHS) || GEPLHS->hasOneUse()) &&
if (GEPsInBounds && (isa<ConstantExpr>(GEPLHS) || GEPLHS->hasOneUse()) &&
(isa<ConstantExpr>(GEPRHS) || GEPRHS->hasOneUse())) {
// ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2) ---> (OFFSET1 cmp OFFSET2)
Value *L = EmitGEPOffset(GEPLHS);
@ -1928,8 +1919,8 @@ Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) {
// Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
// integer type is the same size as the pointer type.
if (DL && LHSCI->getOpcode() == Instruction::PtrToInt &&
DL->getPointerTypeSizeInBits(SrcTy) == DestTy->getIntegerBitWidth()) {
if (LHSCI->getOpcode() == Instruction::PtrToInt &&
DL.getPointerTypeSizeInBits(SrcTy) == DestTy->getIntegerBitWidth()) {
Value *RHSOp = nullptr;
if (PtrToIntOperator *RHSC = dyn_cast<PtrToIntOperator>(ICI.getOperand(1))) {
Value *RHSCIOp = RHSC->getOperand(0);
@ -2660,8 +2651,8 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
unsigned BitWidth = 0;
if (Ty->isIntOrIntVectorTy())
BitWidth = Ty->getScalarSizeInBits();
else if (DL) // Pointers require DL info to get their size.
BitWidth = DL->getTypeSizeInBits(Ty->getScalarType());
else // Get pointer size.
BitWidth = DL.getTypeSizeInBits(Ty->getScalarType());
bool isSignBit = false;
@ -2774,8 +2765,8 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
Op0KnownZero, Op0KnownOne, 0))
return &I;
if (SimplifyDemandedBits(I.getOperandUse(1),
APInt::getAllOnesValue(BitWidth),
Op1KnownZero, Op1KnownOne, 0))
APInt::getAllOnesValue(BitWidth), Op1KnownZero,
Op1KnownOne, 0))
return &I;
// Given the known and unknown bits, compute a range that the LHS could be
@ -3094,9 +3085,8 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
}
case Instruction::IntToPtr:
// icmp pred inttoptr(X), null -> icmp pred X, 0
if (RHSC->isNullValue() && DL &&
DL->getIntPtrType(RHSC->getType()) ==
LHSI->getOperand(0)->getType())
if (RHSC->isNullValue() &&
DL.getIntPtrType(RHSC->getType()) == LHSI->getOperand(0)->getType())
return new ICmpInst(I.getPredicate(), LHSI->getOperand(0),
Constant::getNullValue(LHSI->getOperand(0)->getType()));
break;
@ -3428,7 +3418,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
// if A is a power of 2.
if (match(Op0, m_And(m_Value(A), m_Not(m_Value(B)))) &&
match(Op1, m_Zero()) &&
isKnownToBeAPowerOfTwo(A, false, 0, AC, &I, DT) && I.isEquality())
isKnownToBeAPowerOfTwo(A, DL, false, 0, AC, &I, DT) && I.isEquality())
return new ICmpInst(I.getInversePredicate(),
Builder->CreateAnd(A, B),
Op1);

View File

@ -158,10 +158,10 @@ private:
AssumptionCache *AC;
TargetLibraryInfo *TLI;
DominatorTree *DT;
const DataLayout &DL;
// Optional analyses. When non-null, these can both be used to do better
// combining and will be updated to reflect any changes.
const DataLayout *DL;
LoopInfo *LI;
bool MadeIRChange;
@ -169,7 +169,7 @@ private:
public:
InstCombiner(InstCombineWorklist &Worklist, BuilderTy *Builder,
bool MinimizeSize, AssumptionCache *AC, TargetLibraryInfo *TLI,
DominatorTree *DT, const DataLayout *DL, LoopInfo *LI)
DominatorTree *DT, const DataLayout &DL, LoopInfo *LI)
: Worklist(Worklist), Builder(Builder), MinimizeSize(MinimizeSize),
AC(AC), TLI(TLI), DT(DT), DL(DL), LI(LI), MadeIRChange(false) {}
@ -180,7 +180,7 @@ public:
AssumptionCache *getAssumptionCache() const { return AC; }
const DataLayout *getDataLayout() const { return DL; }
const DataLayout &getDataLayout() const { return DL; }
DominatorTree *getDominatorTree() const { return DT; }
@ -330,17 +330,17 @@ private:
Type *Ty);
Instruction *visitCallSite(CallSite CS);
Instruction *tryOptimizeCall(CallInst *CI, const DataLayout *DL);
Instruction *tryOptimizeCall(CallInst *CI);
bool transformConstExprCastCall(CallSite CS);
Instruction *transformCallThroughTrampoline(CallSite CS,
IntrinsicInst *Tramp);
Instruction *transformZExtICmp(ICmpInst *ICI, Instruction &CI,
bool DoXform = true);
Instruction *transformSExtICmp(ICmpInst *ICI, Instruction &CI);
bool WillNotOverflowSignedAdd(Value *LHS, Value *RHS, Instruction *CxtI);
bool WillNotOverflowSignedSub(Value *LHS, Value *RHS, Instruction *CxtI);
bool WillNotOverflowUnsignedSub(Value *LHS, Value *RHS, Instruction *CxtI);
bool WillNotOverflowSignedMul(Value *LHS, Value *RHS, Instruction *CxtI);
bool WillNotOverflowSignedAdd(Value *LHS, Value *RHS, Instruction &CxtI);
bool WillNotOverflowSignedSub(Value *LHS, Value *RHS, Instruction &CxtI);
bool WillNotOverflowUnsignedSub(Value *LHS, Value *RHS, Instruction &CxtI);
bool WillNotOverflowSignedMul(Value *LHS, Value *RHS, Instruction &CxtI);
Value *EmitGEPOffset(User *GEP);
Instruction *scalarizePHI(ExtractElementInst &EI, PHINode *PN);
Value *EvaluateInDifferentElementOrder(Value *V, ArrayRef<int> Mask);
@ -423,7 +423,7 @@ public:
}
void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
unsigned Depth = 0, Instruction *CxtI = nullptr) const {
unsigned Depth, Instruction *CxtI) const {
return llvm::computeKnownBits(V, KnownZero, KnownOne, DL, Depth, AC, CxtI,
DT);
}
@ -468,7 +468,7 @@ private:
/// bits.
Value *SimplifyDemandedUseBits(Value *V, APInt DemandedMask, APInt &KnownZero,
APInt &KnownOne, unsigned Depth,
Instruction *CxtI = nullptr);
Instruction *CxtI);
bool SimplifyDemandedBits(Use &U, APInt DemandedMask, APInt &KnownZero,
APInt &KnownOne, unsigned Depth = 0);
/// Helper routine of SimplifyDemandedUseBits. It tries to simplify demanded

View File

@ -167,14 +167,11 @@ isOnlyCopiedFromConstantGlobal(AllocaInst *AI,
Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
// Ensure that the alloca array size argument has type intptr_t, so that
// any casting is exposed early.
if (DL) {
Type *IntPtrTy = DL->getIntPtrType(AI.getType());
if (AI.getArraySize()->getType() != IntPtrTy) {
Value *V = Builder->CreateIntCast(AI.getArraySize(),
IntPtrTy, false);
AI.setOperand(0, V);
return &AI;
}
Type *IntPtrTy = DL.getIntPtrType(AI.getType());
if (AI.getArraySize()->getType() != IntPtrTy) {
Value *V = Builder->CreateIntCast(AI.getArraySize(), IntPtrTy, false);
AI.setOperand(0, V);
return &AI;
}
// Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
@ -194,9 +191,7 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
// Now that I is pointing to the first non-allocation-inst in the block,
// insert our getelementptr instruction...
//
Type *IdxTy = DL
? DL->getIntPtrType(AI.getType())
: Type::getInt64Ty(AI.getContext());
Type *IdxTy = DL.getIntPtrType(AI.getType());
Value *NullIdx = Constant::getNullValue(IdxTy);
Value *Idx[2] = { NullIdx, NullIdx };
Instruction *GEP =
@ -211,15 +206,15 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
}
}
if (DL && AI.getAllocatedType()->isSized()) {
if (AI.getAllocatedType()->isSized()) {
// If the alignment is 0 (unspecified), assign it the preferred alignment.
if (AI.getAlignment() == 0)
AI.setAlignment(DL->getPrefTypeAlignment(AI.getAllocatedType()));
AI.setAlignment(DL.getPrefTypeAlignment(AI.getAllocatedType()));
// Move all alloca's of zero byte objects to the entry block and merge them
// together. Note that we only do this for alloca's, because malloc should
// allocate and return a unique pointer, even for a zero byte allocation.
if (DL->getTypeAllocSize(AI.getAllocatedType()) == 0) {
if (DL.getTypeAllocSize(AI.getAllocatedType()) == 0) {
// For a zero sized alloca there is no point in doing an array allocation.
// This is helpful if the array size is a complicated expression not used
// elsewhere.
@ -237,7 +232,7 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
// dominance as the array size was forced to a constant earlier already.
AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
DL->getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
DL.getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
AI.moveBefore(FirstInst);
return &AI;
}
@ -246,7 +241,7 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
// assign it the preferred alignment.
if (EntryAI->getAlignment() == 0)
EntryAI->setAlignment(
DL->getPrefTypeAlignment(EntryAI->getAllocatedType()));
DL.getPrefTypeAlignment(EntryAI->getAllocatedType()));
// Replace this zero-sized alloca with the one at the start of the entry
// block after ensuring that the address will be aligned enough for both
// types.
@ -270,7 +265,7 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
SmallVector<Instruction *, 4> ToDelete;
if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
unsigned SourceAlign = getOrEnforceKnownAlignment(
Copy->getSource(), AI.getAlignment(), DL, AC, &AI, DT);
Copy->getSource(), AI.getAlignment(), DL, &AI, AC, DT);
if (AI.getAlignment() <= SourceAlign) {
DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
@ -439,22 +434,22 @@ static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) {
return nullptr;
Type *Ty = LI.getType();
const DataLayout &DL = IC.getDataLayout();
// Try to canonicalize loads which are only ever stored to operate over
// integers instead of any other type. We only do this when the loaded type
// is sized and has a size exactly the same as its store size and the store
// size is a legal integer type.
const DataLayout *DL = IC.getDataLayout();
if (!Ty->isIntegerTy() && Ty->isSized() && DL &&
DL->isLegalInteger(DL->getTypeStoreSizeInBits(Ty)) &&
DL->getTypeStoreSizeInBits(Ty) == DL->getTypeSizeInBits(Ty)) {
if (!Ty->isIntegerTy() && Ty->isSized() &&
DL.isLegalInteger(DL.getTypeStoreSizeInBits(Ty)) &&
DL.getTypeStoreSizeInBits(Ty) == DL.getTypeSizeInBits(Ty)) {
if (std::all_of(LI.user_begin(), LI.user_end(), [&LI](User *U) {
auto *SI = dyn_cast<StoreInst>(U);
return SI && SI->getPointerOperand() != &LI;
})) {
LoadInst *NewLoad = combineLoadToNewType(
IC, LI,
Type::getIntNTy(LI.getContext(), DL->getTypeStoreSizeInBits(Ty)));
Type::getIntNTy(LI.getContext(), DL.getTypeStoreSizeInBits(Ty)));
// Replace all the stores with stores of the newly loaded value.
for (auto UI = LI.user_begin(), UE = LI.user_end(); UI != UE;) {
auto *SI = cast<StoreInst>(*UI++);
@ -489,7 +484,7 @@ static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) {
//
// FIXME: This should probably live in ValueTracking (or similar).
static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize,
const DataLayout *DL) {
const DataLayout &DL) {
SmallPtrSet<Value *, 4> Visited;
SmallVector<Value *, 4> Worklist(1, V);
@ -529,7 +524,7 @@ static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize,
if (!CS)
return false;
uint64_t TypeSize = DL->getTypeAllocSize(AI->getAllocatedType());
uint64_t TypeSize = DL.getTypeAllocSize(AI->getAllocatedType());
// Make sure that, even if the multiplication below would wrap as an
// uint64_t, we still do the right thing.
if ((CS->getValue().zextOrSelf(128)*APInt(128, TypeSize)).ugt(MaxSize))
@ -541,7 +536,7 @@ static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize,
if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
return false;
uint64_t InitSize = DL->getTypeAllocSize(GV->getType()->getElementType());
uint64_t InitSize = DL.getTypeAllocSize(GV->getType()->getElementType());
if (InitSize > MaxSize)
return false;
continue;
@ -570,8 +565,7 @@ static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize,
// offsets those indices implied.
static bool canReplaceGEPIdxWithZero(InstCombiner &IC, GetElementPtrInst *GEPI,
Instruction *MemI, unsigned &Idx) {
const DataLayout *DL = IC.getDataLayout();
if (GEPI->getNumOperands() < 2 || !DL)
if (GEPI->getNumOperands() < 2)
return false;
// Find the first non-zero index of a GEP. If all indices are zero, return
@ -603,7 +597,8 @@ static bool canReplaceGEPIdxWithZero(InstCombiner &IC, GetElementPtrInst *GEPI,
GetElementPtrInst::getIndexedType(GEPI->getOperand(0)->getType(), Ops);
if (!AllocTy || !AllocTy->isSized())
return false;
uint64_t TyAllocSize = DL->getTypeAllocSize(AllocTy);
const DataLayout &DL = IC.getDataLayout();
uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy);
// If there are more indices after the one we might replace with a zero, make
// sure they're all non-negative. If any of them are negative, the overall
@ -665,18 +660,16 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
return Res;
// Attempt to improve the alignment.
if (DL) {
unsigned KnownAlign = getOrEnforceKnownAlignment(
Op, DL->getPrefTypeAlignment(LI.getType()), DL, AC, &LI, DT);
unsigned LoadAlign = LI.getAlignment();
unsigned EffectiveLoadAlign = LoadAlign != 0 ? LoadAlign :
DL->getABITypeAlignment(LI.getType());
unsigned KnownAlign = getOrEnforceKnownAlignment(
Op, DL.getPrefTypeAlignment(LI.getType()), DL, &LI, AC, DT);
unsigned LoadAlign = LI.getAlignment();
unsigned EffectiveLoadAlign =
LoadAlign != 0 ? LoadAlign : DL.getABITypeAlignment(LI.getType());
if (KnownAlign > EffectiveLoadAlign)
LI.setAlignment(KnownAlign);
else if (LoadAlign == 0)
LI.setAlignment(EffectiveLoadAlign);
}
if (KnownAlign > EffectiveLoadAlign)
LI.setAlignment(KnownAlign);
else if (LoadAlign == 0)
LI.setAlignment(EffectiveLoadAlign);
// Replace GEP indices if possible.
if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) {
@ -738,8 +731,8 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
// load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
unsigned Align = LI.getAlignment();
if (isSafeToLoadUnconditionally(SI->getOperand(1), SI, Align, DL) &&
isSafeToLoadUnconditionally(SI->getOperand(2), SI, Align, DL)) {
if (isSafeToLoadUnconditionally(SI->getOperand(1), SI, Align) &&
isSafeToLoadUnconditionally(SI->getOperand(2), SI, Align)) {
LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1),
SI->getOperand(1)->getName()+".val");
LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2),
@ -845,18 +838,16 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
return EraseInstFromFunction(SI);
// Attempt to improve the alignment.
if (DL) {
unsigned KnownAlign = getOrEnforceKnownAlignment(
Ptr, DL->getPrefTypeAlignment(Val->getType()), DL, AC, &SI, DT);
unsigned StoreAlign = SI.getAlignment();
unsigned EffectiveStoreAlign = StoreAlign != 0 ? StoreAlign :
DL->getABITypeAlignment(Val->getType());
unsigned KnownAlign = getOrEnforceKnownAlignment(
Ptr, DL.getPrefTypeAlignment(Val->getType()), DL, &SI, AC, DT);
unsigned StoreAlign = SI.getAlignment();
unsigned EffectiveStoreAlign =
StoreAlign != 0 ? StoreAlign : DL.getABITypeAlignment(Val->getType());
if (KnownAlign > EffectiveStoreAlign)
SI.setAlignment(KnownAlign);
else if (StoreAlign == 0)
SI.setAlignment(EffectiveStoreAlign);
}
if (KnownAlign > EffectiveStoreAlign)
SI.setAlignment(KnownAlign);
else if (StoreAlign == 0)
SI.setAlignment(EffectiveStoreAlign);
// Replace GEP indices if possible.
if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI)) {

View File

@ -26,7 +26,7 @@ using namespace PatternMatch;
/// where it is known to be non-zero. If this allows us to simplify the
/// computation, do so and return the new operand, otherwise return null.
static Value *simplifyValueKnownNonZero(Value *V, InstCombiner &IC,
Instruction *CxtI) {
Instruction &CxtI) {
// If V has multiple uses, then we would have to do more analysis to determine
// if this is safe. For example, the use could be in dynamically unreached
// code.
@ -47,8 +47,8 @@ static Value *simplifyValueKnownNonZero(Value *V, InstCombiner &IC,
// inexact. Similarly for <<.
if (BinaryOperator *I = dyn_cast<BinaryOperator>(V))
if (I->isLogicalShift() &&
isKnownToBeAPowerOfTwo(I->getOperand(0), false, 0,
IC.getAssumptionCache(), CxtI,
isKnownToBeAPowerOfTwo(I->getOperand(0), IC.getDataLayout(), false, 0,
IC.getAssumptionCache(), &CxtI,
IC.getDominatorTree())) {
// We know that this is an exact/nuw shift and that the input is a
// non-zero context as well.
@ -126,7 +126,7 @@ static Constant *getLogBase2Vector(ConstantDataVector *CV) {
/// \brief Return true if we can prove that:
/// (mul LHS, RHS) === (mul nsw LHS, RHS)
bool InstCombiner::WillNotOverflowSignedMul(Value *LHS, Value *RHS,
Instruction *CxtI) {
Instruction &CxtI) {
// Multiplying n * m significant bits yields a result of n + m significant
// bits. If the total number of significant bits does not exceed the
// result bit width (minus 1), there is no overflow.
@ -137,8 +137,8 @@ bool InstCombiner::WillNotOverflowSignedMul(Value *LHS, Value *RHS,
// Note that underestimating the number of sign bits gives a more
// conservative answer.
unsigned SignBits = ComputeNumSignBits(LHS, 0, CxtI) +
ComputeNumSignBits(RHS, 0, CxtI);
unsigned SignBits =
ComputeNumSignBits(LHS, 0, &CxtI) + ComputeNumSignBits(RHS, 0, &CxtI);
// First handle the easy case: if we have enough sign bits there's
// definitely no overflow.
@ -157,8 +157,8 @@ bool InstCombiner::WillNotOverflowSignedMul(Value *LHS, Value *RHS,
// For simplicity we just check if at least one side is not negative.
bool LHSNonNegative, LHSNegative;
bool RHSNonNegative, RHSNegative;
ComputeSignBit(LHS, LHSNonNegative, LHSNegative, /*Depth=*/0, CxtI);
ComputeSignBit(RHS, RHSNonNegative, RHSNegative, /*Depth=*/0, CxtI);
ComputeSignBit(LHS, LHSNonNegative, LHSNegative, /*Depth=*/0, &CxtI);
ComputeSignBit(RHS, RHSNonNegative, RHSNegative, /*Depth=*/0, &CxtI);
if (LHSNonNegative || RHSNonNegative)
return true;
}
@ -375,7 +375,7 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
}
}
if (!I.hasNoSignedWrap() && WillNotOverflowSignedMul(Op0, Op1, &I)) {
if (!I.hasNoSignedWrap() && WillNotOverflowSignedMul(Op0, Op1, I)) {
Changed = true;
I.setHasNoSignedWrap(true);
}
@ -780,7 +780,7 @@ Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) {
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
// The RHS is known non-zero.
if (Value *V = simplifyValueKnownNonZero(I.getOperand(1), *this, &I)) {
if (Value *V = simplifyValueKnownNonZero(I.getOperand(1), *this, I)) {
I.setOperand(1, V);
return &I;
}
@ -1155,7 +1155,7 @@ Instruction *InstCombiner::visitSDiv(BinaryOperator &I) {
return BO;
}
if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, 0, AC, &I, DT)) {
if (isKnownToBeAPowerOfTwo(Op1, DL, /*OrZero*/ true, 0, AC, &I, DT)) {
// X sdiv (1 << Y) -> X udiv (1 << Y) ( -> X u>> Y)
// Safe because the only negative value (1 << Y) can take on is
// INT_MIN, and X sdiv INT_MIN == X udiv INT_MIN == 0 if X doesn't have
@ -1338,7 +1338,7 @@ Instruction *InstCombiner::commonIRemTransforms(BinaryOperator &I) {
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
// The RHS is known non-zero.
if (Value *V = simplifyValueKnownNonZero(I.getOperand(1), *this, &I)) {
if (Value *V = simplifyValueKnownNonZero(I.getOperand(1), *this, I)) {
I.setOperand(1, V);
return &I;
}
@ -1385,7 +1385,7 @@ Instruction *InstCombiner::visitURem(BinaryOperator &I) {
I.getType());
// X urem Y -> X and Y-1, where Y is a power of 2,
if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, 0, AC, &I, DT)) {
if (isKnownToBeAPowerOfTwo(Op1, DL, /*OrZero*/ true, 0, AC, &I, DT)) {
Constant *N1 = Constant::getAllOnesValue(I.getType());
Value *Add = Builder->CreateAdd(Op1, N1);
return BinaryOperator::CreateAnd(Op0, Add);

View File

@ -15,7 +15,6 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/IR/DataLayout.h"
using namespace llvm;
#define DEBUG_TYPE "instcombine"
@ -891,8 +890,8 @@ Instruction *InstCombiner::visitPHINode(PHINode &PN) {
// it is only used by trunc or trunc(lshr) operations. If so, we split the
// PHI into the various pieces being extracted. This sort of thing is
// introduced when SROA promotes an aggregate to a single large integer type.
if (PN.getType()->isIntegerTy() && DL &&
!DL->isLegalInteger(PN.getType()->getPrimitiveSizeInBits()))
if (PN.getType()->isIntegerTy() &&
!DL.isLegalInteger(PN.getType()->getPrimitiveSizeInBits()))
if (Instruction *Res = SliceUpIllegalIntegerPHI(PN))
return Res;

View File

@ -312,9 +312,9 @@ Instruction *InstCombiner::FoldSelectIntoOp(SelectInst &SI, Value *TrueVal,
/// SimplifyWithOpReplaced - See if V simplifies when its operand Op is
/// replaced with RepOp.
static Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
const DataLayout *TD,
const TargetLibraryInfo *TLI,
DominatorTree *DT, AssumptionCache *AC) {
const DataLayout &DL, DominatorTree *DT,
AssumptionCache *AC) {
// Trivial replacement.
if (V == Op)
return RepOp;
@ -326,18 +326,18 @@ static Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
// If this is a binary operator, try to simplify it with the replaced op.
if (BinaryOperator *B = dyn_cast<BinaryOperator>(I)) {
if (B->getOperand(0) == Op)
return SimplifyBinOp(B->getOpcode(), RepOp, B->getOperand(1), TD, TLI);
return SimplifyBinOp(B->getOpcode(), RepOp, B->getOperand(1), DL, TLI);
if (B->getOperand(1) == Op)
return SimplifyBinOp(B->getOpcode(), B->getOperand(0), RepOp, TD, TLI);
return SimplifyBinOp(B->getOpcode(), B->getOperand(0), RepOp, DL, TLI);
}
// Same for CmpInsts.
if (CmpInst *C = dyn_cast<CmpInst>(I)) {
if (C->getOperand(0) == Op)
return SimplifyCmpInst(C->getPredicate(), RepOp, C->getOperand(1), TD,
return SimplifyCmpInst(C->getPredicate(), RepOp, C->getOperand(1), DL,
TLI, DT, AC);
if (C->getOperand(1) == Op)
return SimplifyCmpInst(C->getPredicate(), C->getOperand(0), RepOp, TD,
return SimplifyCmpInst(C->getPredicate(), C->getOperand(0), RepOp, DL,
TLI, DT, AC);
}
@ -361,14 +361,14 @@ static Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
if (ConstOps.size() == I->getNumOperands()) {
if (CmpInst *C = dyn_cast<CmpInst>(I))
return ConstantFoldCompareInstOperands(C->getPredicate(), ConstOps[0],
ConstOps[1], TD, TLI);
ConstOps[1], DL, TLI);
if (LoadInst *LI = dyn_cast<LoadInst>(I))
if (!LI->isVolatile())
return ConstantFoldLoadFromConstPtr(ConstOps[0], TD);
return ConstantFoldLoadFromConstPtr(ConstOps[0], DL);
return ConstantFoldInstOperands(I->getOpcode(), I->getType(),
ConstOps, TD, TLI);
return ConstantFoldInstOperands(I->getOpcode(), I->getType(), ConstOps,
DL, TLI);
}
}
@ -635,25 +635,25 @@ Instruction *InstCombiner::visitSelectInstWithICmp(SelectInst &SI,
// arms of the select. See if substituting this value into the arm and
// simplifying the result yields the same value as the other arm.
if (Pred == ICmpInst::ICMP_EQ) {
if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, DL, TLI, DT, AC) ==
if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, TLI, DL, DT, AC) ==
TrueVal ||
SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, DL, TLI, DT, AC) ==
SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, TLI, DL, DT, AC) ==
TrueVal)
return ReplaceInstUsesWith(SI, FalseVal);
if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, DL, TLI, DT, AC) ==
if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, TLI, DL, DT, AC) ==
FalseVal ||
SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, DL, TLI, DT, AC) ==
SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, TLI, DL, DT, AC) ==
FalseVal)
return ReplaceInstUsesWith(SI, FalseVal);
} else if (Pred == ICmpInst::ICMP_NE) {
if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, DL, TLI, DT, AC) ==
if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, TLI, DL, DT, AC) ==
FalseVal ||
SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, DL, TLI, DT, AC) ==
SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, TLI, DL, DT, AC) ==
FalseVal)
return ReplaceInstUsesWith(SI, TrueVal);
if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, DL, TLI, DT, AC) ==
if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, TLI, DL, DT, AC) ==
TrueVal ||
SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, DL, TLI, DT, AC) ==
SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, TLI, DL, DT, AC) ==
TrueVal)
return ReplaceInstUsesWith(SI, TrueVal);
}

View File

@ -187,7 +187,7 @@ static bool CanEvaluateShifted(Value *V, unsigned NumBits, bool isLeftShift,
/// GetShiftedValue - When CanEvaluateShifted returned true for an expression,
/// this value inserts the new computation that produces the shifted value.
static Value *GetShiftedValue(Value *V, unsigned NumBits, bool isLeftShift,
InstCombiner &IC) {
InstCombiner &IC, const DataLayout &DL) {
// We can always evaluate constants shifted.
if (Constant *C = dyn_cast<Constant>(V)) {
if (isLeftShift)
@ -196,8 +196,7 @@ static Value *GetShiftedValue(Value *V, unsigned NumBits, bool isLeftShift,
V = IC.Builder->CreateLShr(C, NumBits);
// If we got a constantexpr back, try to simplify it with TD info.
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
V = ConstantFoldConstantExpression(CE, IC.getDataLayout(),
IC.getTargetLibraryInfo());
V = ConstantFoldConstantExpression(CE, DL, IC.getTargetLibraryInfo());
return V;
}
@ -210,8 +209,10 @@ static Value *GetShiftedValue(Value *V, unsigned NumBits, bool isLeftShift,
case Instruction::Or:
case Instruction::Xor:
// Bitwise operators can all arbitrarily be arbitrarily evaluated shifted.
I->setOperand(0, GetShiftedValue(I->getOperand(0), NumBits,isLeftShift,IC));
I->setOperand(1, GetShiftedValue(I->getOperand(1), NumBits,isLeftShift,IC));
I->setOperand(
0, GetShiftedValue(I->getOperand(0), NumBits, isLeftShift, IC, DL));
I->setOperand(
1, GetShiftedValue(I->getOperand(1), NumBits, isLeftShift, IC, DL));
return I;
case Instruction::Shl: {
@ -297,8 +298,10 @@ static Value *GetShiftedValue(Value *V, unsigned NumBits, bool isLeftShift,
}
case Instruction::Select:
I->setOperand(1, GetShiftedValue(I->getOperand(1), NumBits,isLeftShift,IC));
I->setOperand(2, GetShiftedValue(I->getOperand(2), NumBits,isLeftShift,IC));
I->setOperand(
1, GetShiftedValue(I->getOperand(1), NumBits, isLeftShift, IC, DL));
I->setOperand(
2, GetShiftedValue(I->getOperand(2), NumBits, isLeftShift, IC, DL));
return I;
case Instruction::PHI: {
// We can change a phi if we can change all operands. Note that we never
@ -306,8 +309,8 @@ static Value *GetShiftedValue(Value *V, unsigned NumBits, bool isLeftShift,
// instructions with a single use.
PHINode *PN = cast<PHINode>(I);
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
PN->setIncomingValue(i, GetShiftedValue(PN->getIncomingValue(i),
NumBits, isLeftShift, IC));
PN->setIncomingValue(i, GetShiftedValue(PN->getIncomingValue(i), NumBits,
isLeftShift, IC, DL));
return PN;
}
}
@ -337,8 +340,8 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, Constant *Op1,
DEBUG(dbgs() << "ICE: GetShiftedValue propagating shift through expression"
" to eliminate shift:\n IN: " << *Op0 << "\n SH: " << I <<"\n");
return ReplaceInstUsesWith(I,
GetShiftedValue(Op0, COp1->getZExtValue(), isLeftShift, *this));
return ReplaceInstUsesWith(
I, GetShiftedValue(Op0, COp1->getZExtValue(), isLeftShift, *this, DL));
}
// See if we can simplify any instructions used by the instruction whose sole

View File

@ -13,7 +13,6 @@
//===----------------------------------------------------------------------===//
#include "InstCombineInternal.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/PatternMatch.h"
@ -70,8 +69,8 @@ bool InstCombiner::SimplifyDemandedInstructionBits(Instruction &Inst) {
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
APInt DemandedMask(APInt::getAllOnesValue(BitWidth));
Value *V = SimplifyDemandedUseBits(&Inst, DemandedMask,
KnownZero, KnownOne, 0, &Inst);
Value *V = SimplifyDemandedUseBits(&Inst, DemandedMask, KnownZero, KnownOne,
0, &Inst);
if (!V) return false;
if (V == &Inst) return true;
ReplaceInstUsesWith(Inst, V);
@ -84,9 +83,9 @@ bool InstCombiner::SimplifyDemandedInstructionBits(Instruction &Inst) {
bool InstCombiner::SimplifyDemandedBits(Use &U, APInt DemandedMask,
APInt &KnownZero, APInt &KnownOne,
unsigned Depth) {
Value *NewVal = SimplifyDemandedUseBits(U.get(), DemandedMask,
KnownZero, KnownOne, Depth,
dyn_cast<Instruction>(U.getUser()));
Value *NewVal =
SimplifyDemandedUseBits(U.get(), DemandedMask, KnownZero, KnownOne, Depth,
dyn_cast<Instruction>(U.getUser()));
if (!NewVal) return false;
U = NewVal;
return true;
@ -122,15 +121,12 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
assert(Depth <= 6 && "Limit Search Depth");
uint32_t BitWidth = DemandedMask.getBitWidth();
Type *VTy = V->getType();
assert((DL || !VTy->isPointerTy()) &&
"SimplifyDemandedBits needs to know bit widths!");
assert((!DL || DL->getTypeSizeInBits(VTy->getScalarType()) == BitWidth) &&
(!VTy->isIntOrIntVectorTy() ||
VTy->getScalarSizeInBits() == BitWidth) &&
KnownZero.getBitWidth() == BitWidth &&
KnownOne.getBitWidth() == BitWidth &&
"Value *V, DemandedMask, KnownZero and KnownOne "
"must have same BitWidth");
assert(
(!VTy->isIntOrIntVectorTy() || VTy->getScalarSizeInBits() == BitWidth) &&
KnownZero.getBitWidth() == BitWidth &&
KnownOne.getBitWidth() == BitWidth &&
"Value *V, DemandedMask, KnownZero and KnownOne "
"must have same BitWidth");
if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
// We know all of the bits for a constant!
KnownOne = CI->getValue() & DemandedMask;
@ -174,9 +170,9 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// this instruction has a simpler value in that context.
if (I->getOpcode() == Instruction::And) {
// If either the LHS or the RHS are Zero, the result is zero.
computeKnownBits(I->getOperand(1), RHSKnownZero, RHSKnownOne, Depth+1,
computeKnownBits(I->getOperand(1), RHSKnownZero, RHSKnownOne, Depth + 1,
CxtI);
computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth+1,
computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth + 1,
CxtI);
// If all of the demanded bits are known 1 on one side, return the other.
@ -198,9 +194,9 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// only bits from X or Y are demanded.
// If either the LHS or the RHS are One, the result is One.
computeKnownBits(I->getOperand(1), RHSKnownZero, RHSKnownOne, Depth+1,
computeKnownBits(I->getOperand(1), RHSKnownZero, RHSKnownOne, Depth + 1,
CxtI);
computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth+1,
computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth + 1,
CxtI);
// If all of the demanded bits are known zero on one side, return the
@ -225,9 +221,9 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// We can simplify (X^Y) -> X or Y in the user's context if we know that
// only bits from X or Y are demanded.
computeKnownBits(I->getOperand(1), RHSKnownZero, RHSKnownOne, Depth+1,
computeKnownBits(I->getOperand(1), RHSKnownZero, RHSKnownOne, Depth + 1,
CxtI);
computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth+1,
computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth + 1,
CxtI);
// If all of the demanded bits are known zero on one side, return the
@ -256,10 +252,10 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
break;
case Instruction::And:
// If either the LHS or the RHS are Zero, the result is zero.
if (SimplifyDemandedBits(I->getOperandUse(1), DemandedMask,
RHSKnownZero, RHSKnownOne, Depth+1) ||
if (SimplifyDemandedBits(I->getOperandUse(1), DemandedMask, RHSKnownZero,
RHSKnownOne, Depth + 1) ||
SimplifyDemandedBits(I->getOperandUse(0), DemandedMask & ~RHSKnownZero,
LHSKnownZero, LHSKnownOne, Depth+1))
LHSKnownZero, LHSKnownOne, Depth + 1))
return I;
assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?");
@ -294,10 +290,10 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
break;
case Instruction::Or:
// If either the LHS or the RHS are One, the result is One.
if (SimplifyDemandedBits(I->getOperandUse(1), DemandedMask,
RHSKnownZero, RHSKnownOne, Depth+1) ||
if (SimplifyDemandedBits(I->getOperandUse(1), DemandedMask, RHSKnownZero,
RHSKnownOne, Depth + 1) ||
SimplifyDemandedBits(I->getOperandUse(0), DemandedMask & ~RHSKnownOne,
LHSKnownZero, LHSKnownOne, Depth+1))
LHSKnownZero, LHSKnownOne, Depth + 1))
return I;
assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?");
@ -336,10 +332,10 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
KnownOne = RHSKnownOne | LHSKnownOne;
break;
case Instruction::Xor: {
if (SimplifyDemandedBits(I->getOperandUse(1), DemandedMask,
RHSKnownZero, RHSKnownOne, Depth+1) ||
SimplifyDemandedBits(I->getOperandUse(0), DemandedMask,
LHSKnownZero, LHSKnownOne, Depth+1))
if (SimplifyDemandedBits(I->getOperandUse(1), DemandedMask, RHSKnownZero,
RHSKnownOne, Depth + 1) ||
SimplifyDemandedBits(I->getOperandUse(0), DemandedMask, LHSKnownZero,
LHSKnownOne, Depth + 1))
return I;
assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?");
@ -423,10 +419,10 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
break;
}
case Instruction::Select:
if (SimplifyDemandedBits(I->getOperandUse(2), DemandedMask,
RHSKnownZero, RHSKnownOne, Depth+1) ||
SimplifyDemandedBits(I->getOperandUse(1), DemandedMask,
LHSKnownZero, LHSKnownOne, Depth+1))
if (SimplifyDemandedBits(I->getOperandUse(2), DemandedMask, RHSKnownZero,
RHSKnownOne, Depth + 1) ||
SimplifyDemandedBits(I->getOperandUse(1), DemandedMask, LHSKnownZero,
LHSKnownOne, Depth + 1))
return I;
assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?");
@ -445,8 +441,8 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
DemandedMask = DemandedMask.zext(truncBf);
KnownZero = KnownZero.zext(truncBf);
KnownOne = KnownOne.zext(truncBf);
if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMask,
KnownZero, KnownOne, Depth+1))
if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMask, KnownZero,
KnownOne, Depth + 1))
return I;
DemandedMask = DemandedMask.trunc(BitWidth);
KnownZero = KnownZero.trunc(BitWidth);
@ -471,8 +467,8 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// Don't touch a vector-to-scalar bitcast.
return nullptr;
if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMask,
KnownZero, KnownOne, Depth+1))
if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMask, KnownZero,
KnownOne, Depth + 1))
return I;
assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
break;
@ -483,8 +479,8 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
DemandedMask = DemandedMask.trunc(SrcBitWidth);
KnownZero = KnownZero.trunc(SrcBitWidth);
KnownOne = KnownOne.trunc(SrcBitWidth);
if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMask,
KnownZero, KnownOne, Depth+1))
if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMask, KnownZero,
KnownOne, Depth + 1))
return I;
DemandedMask = DemandedMask.zext(BitWidth);
KnownZero = KnownZero.zext(BitWidth);
@ -510,8 +506,8 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
InputDemandedBits = InputDemandedBits.trunc(SrcBitWidth);
KnownZero = KnownZero.trunc(SrcBitWidth);
KnownOne = KnownOne.trunc(SrcBitWidth);
if (SimplifyDemandedBits(I->getOperandUse(0), InputDemandedBits,
KnownZero, KnownOne, Depth+1))
if (SimplifyDemandedBits(I->getOperandUse(0), InputDemandedBits, KnownZero,
KnownOne, Depth + 1))
return I;
InputDemandedBits = InputDemandedBits.zext(BitWidth);
KnownZero = KnownZero.zext(BitWidth);
@ -552,7 +548,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// Find information about known zero/one bits in the input.
if (SimplifyDemandedBits(I->getOperandUse(0), InDemandedBits,
LHSKnownZero, LHSKnownOne, Depth+1))
LHSKnownZero, LHSKnownOne, Depth + 1))
return I;
// If the RHS of the add has bits set that can't affect the input, reduce
@ -602,9 +598,9 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// significant bit and all those below it.
APInt DemandedFromOps(APInt::getLowBitsSet(BitWidth, BitWidth-NLZ));
if (SimplifyDemandedBits(I->getOperandUse(0), DemandedFromOps,
LHSKnownZero, LHSKnownOne, Depth+1) ||
LHSKnownZero, LHSKnownOne, Depth + 1) ||
SimplifyDemandedBits(I->getOperandUse(1), DemandedFromOps,
LHSKnownZero, LHSKnownOne, Depth+1))
LHSKnownZero, LHSKnownOne, Depth + 1))
return I;
}
}
@ -619,9 +615,9 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
uint32_t NLZ = DemandedMask.countLeadingZeros();
APInt DemandedFromOps(APInt::getLowBitsSet(BitWidth, BitWidth-NLZ));
if (SimplifyDemandedBits(I->getOperandUse(0), DemandedFromOps,
LHSKnownZero, LHSKnownOne, Depth+1) ||
LHSKnownZero, LHSKnownOne, Depth + 1) ||
SimplifyDemandedBits(I->getOperandUse(1), DemandedFromOps,
LHSKnownZero, LHSKnownOne, Depth+1))
LHSKnownZero, LHSKnownOne, Depth + 1))
return I;
}
@ -662,8 +658,8 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
else if (IOp->hasNoUnsignedWrap())
DemandedMaskIn |= APInt::getHighBitsSet(BitWidth, ShiftAmt);
if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMaskIn,
KnownZero, KnownOne, Depth+1))
if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMaskIn, KnownZero,
KnownOne, Depth + 1))
return I;
assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
KnownZero <<= ShiftAmt;
@ -686,8 +682,8 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
if (cast<LShrOperator>(I)->isExact())
DemandedMaskIn |= APInt::getLowBitsSet(BitWidth, ShiftAmt);
if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMaskIn,
KnownZero, KnownOne, Depth+1))
if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMaskIn, KnownZero,
KnownOne, Depth + 1))
return I;
assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
KnownZero = APIntOps::lshr(KnownZero, ShiftAmt);
@ -731,8 +727,8 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
if (cast<AShrOperator>(I)->isExact())
DemandedMaskIn |= APInt::getLowBitsSet(BitWidth, ShiftAmt);
if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMaskIn,
KnownZero, KnownOne, Depth+1))
if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMaskIn, KnownZero,
KnownOne, Depth + 1))
return I;
assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
// Compute the new bits that are at the top now.
@ -772,8 +768,8 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
APInt LowBits = RA - 1;
APInt Mask2 = LowBits | APInt::getSignBit(BitWidth);
if (SimplifyDemandedBits(I->getOperandUse(0), Mask2,
LHSKnownZero, LHSKnownOne, Depth+1))
if (SimplifyDemandedBits(I->getOperandUse(0), Mask2, LHSKnownZero,
LHSKnownOne, Depth + 1))
return I;
// The low bits of LHS are unchanged by the srem.
@ -798,7 +794,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// remainder is zero.
if (DemandedMask.isNegative() && KnownZero.isNonNegative()) {
APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth+1,
computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth + 1,
CxtI);
// If it's known zero, our sign bit is also zero.
if (LHSKnownZero.isNegative())
@ -808,10 +804,10 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
case Instruction::URem: {
APInt KnownZero2(BitWidth, 0), KnownOne2(BitWidth, 0);
APInt AllOnes = APInt::getAllOnesValue(BitWidth);
if (SimplifyDemandedBits(I->getOperandUse(0), AllOnes,
KnownZero2, KnownOne2, Depth+1) ||
SimplifyDemandedBits(I->getOperandUse(1), AllOnes,
KnownZero2, KnownOne2, Depth+1))
if (SimplifyDemandedBits(I->getOperandUse(0), AllOnes, KnownZero2,
KnownOne2, Depth + 1) ||
SimplifyDemandedBits(I->getOperandUse(1), AllOnes, KnownZero2,
KnownOne2, Depth + 1))
return I;
unsigned Leaders = KnownZero2.countLeadingOnes();
@ -1051,7 +1047,7 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
// Note that we can't propagate undef elt info, because we don't know
// which elt is getting updated.
TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts,
UndefElts2, Depth+1);
UndefElts2, Depth + 1);
if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
break;
}
@ -1069,7 +1065,7 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
APInt DemandedElts2 = DemandedElts;
DemandedElts2.clearBit(IdxNo);
TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts2,
UndefElts, Depth+1);
UndefElts, Depth + 1);
if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
// The inserted element is defined.
@ -1097,12 +1093,12 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
APInt UndefElts4(LHSVWidth, 0);
TmpV = SimplifyDemandedVectorElts(I->getOperand(0), LeftDemanded,
UndefElts4, Depth+1);
UndefElts4, Depth + 1);
if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
APInt UndefElts3(LHSVWidth, 0);
TmpV = SimplifyDemandedVectorElts(I->getOperand(1), RightDemanded,
UndefElts3, Depth+1);
UndefElts3, Depth + 1);
if (TmpV) { I->setOperand(1, TmpV); MadeChange = true; }
bool NewUndefElts = false;
@ -1152,12 +1148,12 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
}
}
TmpV = SimplifyDemandedVectorElts(I->getOperand(1), LeftDemanded,
UndefElts, Depth+1);
TmpV = SimplifyDemandedVectorElts(I->getOperand(1), LeftDemanded, UndefElts,
Depth + 1);
if (TmpV) { I->setOperand(1, TmpV); MadeChange = true; }
TmpV = SimplifyDemandedVectorElts(I->getOperand(2), RightDemanded,
UndefElts2, Depth+1);
UndefElts2, Depth + 1);
if (TmpV) { I->setOperand(2, TmpV); MadeChange = true; }
// Output elements are undefined if both are undefined.
@ -1204,7 +1200,7 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
// div/rem demand all inputs, because they don't want divide by zero.
TmpV = SimplifyDemandedVectorElts(I->getOperand(0), InputDemandedElts,
UndefElts2, Depth+1);
UndefElts2, Depth + 1);
if (TmpV) {
I->setOperand(0, TmpV);
MadeChange = true;
@ -1238,11 +1234,11 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
case Instruction::Sub:
case Instruction::Mul:
// div/rem demand all inputs, because they don't want divide by zero.
TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts,
UndefElts, Depth+1);
TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts, UndefElts,
Depth + 1);
if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
TmpV = SimplifyDemandedVectorElts(I->getOperand(1), DemandedElts,
UndefElts2, Depth+1);
UndefElts2, Depth + 1);
if (TmpV) { I->setOperand(1, TmpV); MadeChange = true; }
// Output elements are undefined if both are undefined. Consider things
@ -1251,8 +1247,8 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
break;
case Instruction::FPTrunc:
case Instruction::FPExt:
TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts,
UndefElts, Depth+1);
TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts, UndefElts,
Depth + 1);
if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
break;
@ -1273,10 +1269,10 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
case Intrinsic::x86_sse2_min_sd:
case Intrinsic::x86_sse2_max_sd:
TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts,
UndefElts, Depth+1);
UndefElts, Depth + 1);
if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; }
TmpV = SimplifyDemandedVectorElts(II->getArgOperand(1), DemandedElts,
UndefElts2, Depth+1);
UndefElts2, Depth + 1);
if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; }
// If only the low elt is demanded and this is a scalarizable intrinsic,

View File

@ -202,8 +202,8 @@ Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
APInt UndefElts(VectorWidth, 0);
APInt DemandedMask(VectorWidth, 0);
DemandedMask.setBit(IndexVal);
if (Value *V = SimplifyDemandedVectorElts(EI.getOperand(0),
DemandedMask, UndefElts)) {
if (Value *V = SimplifyDemandedVectorElts(EI.getOperand(0), DemandedMask,
UndefElts)) {
EI.setOperand(0, V);
return &EI;
}

View File

@ -75,7 +75,7 @@ STATISTIC(NumFactor , "Number of factorizations");
STATISTIC(NumReassoc , "Number of reassociations");
Value *InstCombiner::EmitGEPOffset(User *GEP) {
return llvm::EmitGEPOffset(Builder, *getDataLayout(), GEP);
return llvm::EmitGEPOffset(Builder, DL, GEP);
}
/// ShouldChangeType - Return true if it is desirable to convert a computation
@ -84,13 +84,10 @@ Value *InstCombiner::EmitGEPOffset(User *GEP) {
bool InstCombiner::ShouldChangeType(Type *From, Type *To) const {
assert(From->isIntegerTy() && To->isIntegerTy());
// If we don't have DL, we don't know if the source/dest are legal.
if (!DL) return false;
unsigned FromWidth = From->getPrimitiveSizeInBits();
unsigned ToWidth = To->getPrimitiveSizeInBits();
bool FromLegal = DL->isLegalInteger(FromWidth);
bool ToLegal = DL->isLegalInteger(ToWidth);
bool FromLegal = DL.isLegalInteger(FromWidth);
bool ToLegal = DL.isLegalInteger(ToWidth);
// If this is a legal integer from type, and the result would be an illegal
// type, don't do the transformation.
@ -445,7 +442,7 @@ getBinOpsForFactorization(Instruction::BinaryOps TopLevelOpcode,
/// This tries to simplify binary operations by factorizing out common terms
/// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
static Value *tryFactorization(InstCombiner::BuilderTy *Builder,
const DataLayout *DL, BinaryOperator &I,
const DataLayout &DL, BinaryOperator &I,
Instruction::BinaryOps InnerOpcode, Value *A,
Value *B, Value *C, Value *D) {
@ -872,12 +869,9 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
/// will land us at the specified offset. If so, fill them into NewIndices and
/// return the resultant element type, otherwise return null.
Type *InstCombiner::FindElementAtOffset(Type *PtrTy, int64_t Offset,
SmallVectorImpl<Value*> &NewIndices) {
SmallVectorImpl<Value *> &NewIndices) {
assert(PtrTy->isPtrOrPtrVectorTy());
if (!DL)
return nullptr;
Type *Ty = PtrTy->getPointerElementType();
if (!Ty->isSized())
return nullptr;
@ -885,9 +879,9 @@ Type *InstCombiner::FindElementAtOffset(Type *PtrTy, int64_t Offset,
// Start with the index over the outer type. Note that the type size
// might be zero (even if the offset isn't zero) if the indexed type
// is something like [0 x {int, int}]
Type *IntPtrTy = DL->getIntPtrType(PtrTy);
Type *IntPtrTy = DL.getIntPtrType(PtrTy);
int64_t FirstIdx = 0;
if (int64_t TySize = DL->getTypeAllocSize(Ty)) {
if (int64_t TySize = DL.getTypeAllocSize(Ty)) {
FirstIdx = Offset/TySize;
Offset -= FirstIdx*TySize;
@ -905,11 +899,11 @@ Type *InstCombiner::FindElementAtOffset(Type *PtrTy, int64_t Offset,
// Index into the types. If we fail, set OrigBase to null.
while (Offset) {
// Indexing into tail padding between struct/array elements.
if (uint64_t(Offset*8) >= DL->getTypeSizeInBits(Ty))
if (uint64_t(Offset * 8) >= DL.getTypeSizeInBits(Ty))
return nullptr;
if (StructType *STy = dyn_cast<StructType>(Ty)) {
const StructLayout *SL = DL->getStructLayout(STy);
const StructLayout *SL = DL.getStructLayout(STy);
assert(Offset < (int64_t)SL->getSizeInBytes() &&
"Offset must stay within the indexed type");
@ -920,7 +914,7 @@ Type *InstCombiner::FindElementAtOffset(Type *PtrTy, int64_t Offset,
Offset -= SL->getElementOffset(Elt);
Ty = STy->getElementType(Elt);
} else if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
uint64_t EltSize = DL->getTypeAllocSize(AT->getElementType());
uint64_t EltSize = DL.getTypeAllocSize(AT->getElementType());
assert(EltSize && "Cannot index into a zero-sized array");
NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize));
Offset %= EltSize;
@ -1214,7 +1208,8 @@ Value *InstCombiner::SimplifyVectorOp(BinaryOperator &Inst) {
// It may not be safe to reorder shuffles and things like div, urem, etc.
// because we may trap when executing those ops on unknown vector elements.
// See PR20059.
if (!isSafeToSpeculativelyExecute(&Inst, DL)) return nullptr;
if (!isSafeToSpeculativelyExecute(&Inst))
return nullptr;
unsigned VWidth = cast<VectorType>(Inst.getType())->getNumElements();
Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1);
@ -1300,37 +1295,37 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// Eliminate unneeded casts for indices, and replace indices which displace
// by multiples of a zero size type with zero.
if (DL) {
bool MadeChange = false;
Type *IntPtrTy = DL->getIntPtrType(GEP.getPointerOperandType());
bool MadeChange = false;
Type *IntPtrTy = DL.getIntPtrType(GEP.getPointerOperandType());
gep_type_iterator GTI = gep_type_begin(GEP);
for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end();
I != E; ++I, ++GTI) {
// Skip indices into struct types.
SequentialType *SeqTy = dyn_cast<SequentialType>(*GTI);
if (!SeqTy) continue;
gep_type_iterator GTI = gep_type_begin(GEP);
for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E;
++I, ++GTI) {
// Skip indices into struct types.
SequentialType *SeqTy = dyn_cast<SequentialType>(*GTI);
if (!SeqTy)
continue;
// If the element type has zero size then any index over it is equivalent
// to an index of zero, so replace it with zero if it is not zero already.
if (SeqTy->getElementType()->isSized() &&
DL->getTypeAllocSize(SeqTy->getElementType()) == 0)
if (!isa<Constant>(*I) || !cast<Constant>(*I)->isNullValue()) {
*I = Constant::getNullValue(IntPtrTy);
MadeChange = true;
}
Type *IndexTy = (*I)->getType();
if (IndexTy != IntPtrTy) {
// If we are using a wider index than needed for this platform, shrink
// it to what we need. If narrower, sign-extend it to what we need.
// This explicit cast can make subsequent optimizations more obvious.
*I = Builder->CreateIntCast(*I, IntPtrTy, true);
// If the element type has zero size then any index over it is equivalent
// to an index of zero, so replace it with zero if it is not zero already.
if (SeqTy->getElementType()->isSized() &&
DL.getTypeAllocSize(SeqTy->getElementType()) == 0)
if (!isa<Constant>(*I) || !cast<Constant>(*I)->isNullValue()) {
*I = Constant::getNullValue(IntPtrTy);
MadeChange = true;
}
Type *IndexTy = (*I)->getType();
if (IndexTy != IntPtrTy) {
// If we are using a wider index than needed for this platform, shrink
// it to what we need. If narrower, sign-extend it to what we need.
// This explicit cast can make subsequent optimizations more obvious.
*I = Builder->CreateIntCast(*I, IntPtrTy, true);
MadeChange = true;
}
if (MadeChange) return &GEP;
}
if (MadeChange)
return &GEP;
// Check to see if the inputs to the PHI node are getelementptr instructions.
if (PHINode *PN = dyn_cast<PHINode>(PtrOp)) {
@ -1487,13 +1482,13 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
GetElementPtrInst::Create(Src->getOperand(0), Indices, GEP.getName());
}
if (DL && GEP.getNumIndices() == 1) {
if (GEP.getNumIndices() == 1) {
unsigned AS = GEP.getPointerAddressSpace();
if (GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
DL->getPointerSizeInBits(AS)) {
DL.getPointerSizeInBits(AS)) {
Type *PtrTy = GEP.getPointerOperandType();
Type *Ty = PtrTy->getPointerElementType();
uint64_t TyAllocSize = DL->getTypeAllocSize(Ty);
uint64_t TyAllocSize = DL.getTypeAllocSize(Ty);
bool Matched = false;
uint64_t C;
@ -1612,10 +1607,10 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// into: %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast
Type *SrcElTy = StrippedPtrTy->getElementType();
Type *ResElTy = PtrOp->getType()->getPointerElementType();
if (DL && SrcElTy->isArrayTy() &&
DL->getTypeAllocSize(SrcElTy->getArrayElementType()) ==
DL->getTypeAllocSize(ResElTy)) {
Type *IdxType = DL->getIntPtrType(GEP.getType());
if (SrcElTy->isArrayTy() &&
DL.getTypeAllocSize(SrcElTy->getArrayElementType()) ==
DL.getTypeAllocSize(ResElTy)) {
Type *IdxType = DL.getIntPtrType(GEP.getType());
Value *Idx[2] = { Constant::getNullValue(IdxType), GEP.getOperand(1) };
Value *NewGEP = GEP.isInBounds() ?
Builder->CreateInBoundsGEP(StrippedPtr, Idx, GEP.getName()) :
@ -1630,11 +1625,11 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// %V = mul i64 %N, 4
// %t = getelementptr i8* bitcast (i32* %arr to i8*), i32 %V
// into: %t1 = getelementptr i32* %arr, i32 %N; bitcast
if (DL && ResElTy->isSized() && SrcElTy->isSized()) {
if (ResElTy->isSized() && SrcElTy->isSized()) {
// Check that changing the type amounts to dividing the index by a scale
// factor.
uint64_t ResSize = DL->getTypeAllocSize(ResElTy);
uint64_t SrcSize = DL->getTypeAllocSize(SrcElTy);
uint64_t ResSize = DL.getTypeAllocSize(ResElTy);
uint64_t SrcSize = DL.getTypeAllocSize(SrcElTy);
if (ResSize && SrcSize % ResSize == 0) {
Value *Idx = GEP.getOperand(1);
unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits();
@ -1642,7 +1637,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// Earlier transforms ensure that the index has type IntPtrType, which
// considerably simplifies the logic by eliminating implicit casts.
assert(Idx->getType() == DL->getIntPtrType(GEP.getType()) &&
assert(Idx->getType() == DL.getIntPtrType(GEP.getType()) &&
"Index not cast to pointer width?");
bool NSW;
@ -1665,13 +1660,12 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp
// (where tmp = 8*tmp2) into:
// getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast
if (DL && ResElTy->isSized() && SrcElTy->isSized() &&
SrcElTy->isArrayTy()) {
if (ResElTy->isSized() && SrcElTy->isSized() && SrcElTy->isArrayTy()) {
// Check that changing to the array element type amounts to dividing the
// index by a scale factor.
uint64_t ResSize = DL->getTypeAllocSize(ResElTy);
uint64_t ArrayEltSize
= DL->getTypeAllocSize(SrcElTy->getArrayElementType());
uint64_t ResSize = DL.getTypeAllocSize(ResElTy);
uint64_t ArrayEltSize =
DL.getTypeAllocSize(SrcElTy->getArrayElementType());
if (ResSize && ArrayEltSize % ResSize == 0) {
Value *Idx = GEP.getOperand(1);
unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits();
@ -1679,7 +1673,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// Earlier transforms ensure that the index has type IntPtrType, which
// considerably simplifies the logic by eliminating implicit casts.
assert(Idx->getType() == DL->getIntPtrType(GEP.getType()) &&
assert(Idx->getType() == DL.getIntPtrType(GEP.getType()) &&
"Index not cast to pointer width?");
bool NSW;
@ -1688,9 +1682,8 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// If the multiplication NewIdx * Scale may overflow then the new
// GEP may not be "inbounds".
Value *Off[2] = {
Constant::getNullValue(DL->getIntPtrType(GEP.getType())),
NewIdx
};
Constant::getNullValue(DL.getIntPtrType(GEP.getType())),
NewIdx};
Value *NewGEP = GEP.isInBounds() && NSW ?
Builder->CreateInBoundsGEP(StrippedPtr, Off, GEP.getName()) :
@ -1704,9 +1697,6 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
}
}
if (!DL)
return nullptr;
// addrspacecast between types is canonicalized as a bitcast, then an
// addrspacecast. To take advantage of the below bitcast + struct GEP, look
// through the addrspacecast.
@ -1727,10 +1717,10 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
if (BitCastInst *BCI = dyn_cast<BitCastInst>(PtrOp)) {
Value *Operand = BCI->getOperand(0);
PointerType *OpType = cast<PointerType>(Operand->getType());
unsigned OffsetBits = DL->getPointerTypeSizeInBits(GEP.getType());
unsigned OffsetBits = DL.getPointerTypeSizeInBits(GEP.getType());
APInt Offset(OffsetBits, 0);
if (!isa<BitCastInst>(Operand) &&
GEP.accumulateConstantOffset(*DL, Offset)) {
GEP.accumulateConstantOffset(DL, Offset)) {
// If this GEP instruction doesn't move the pointer, just replace the GEP
// with a bitcast of the real input to the dest type.
@ -2051,7 +2041,7 @@ Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
Value *Cond = SI.getCondition();
unsigned BitWidth = cast<IntegerType>(Cond->getType())->getBitWidth();
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
computeKnownBits(Cond, KnownZero, KnownOne);
computeKnownBits(Cond, KnownZero, KnownOne, 0, &SI);
unsigned LeadingKnownZeros = KnownZero.countLeadingOnes();
unsigned LeadingKnownOnes = KnownOne.countLeadingOnes();
@ -2070,8 +2060,7 @@ Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
// x86 generates redundant zero-extenstion instructions if the operand is
// truncated to i8 or i16.
bool TruncCond = false;
if (DL && BitWidth > NewWidth &&
NewWidth >= DL->getLargestLegalIntTypeSize()) {
if (BitWidth > NewWidth && NewWidth >= DL.getLargestLegalIntTypeSize()) {
TruncCond = true;
IntegerType *Ty = IntegerType::get(SI.getContext(), NewWidth);
Builder->SetInsertPoint(&SI);
@ -2632,7 +2621,7 @@ bool InstCombiner::run() {
}
// Instruction isn't dead, see if we can constant propagate it.
if (!I->use_empty() && isa<Constant>(I->getOperand(0)))
if (!I->use_empty() && isa<Constant>(I->getOperand(0))) {
if (Constant *C = ConstantFoldInstruction(I, DL, TLI)) {
DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n');
@ -2643,6 +2632,7 @@ bool InstCombiner::run() {
MadeIRChange = true;
continue;
}
}
// See if we can trivially sink this instruction to a successor basic block.
if (I->hasOneUse()) {
@ -2756,10 +2746,9 @@ bool InstCombiner::run() {
/// many instructions are dead or constant). Additionally, if we find a branch
/// whose condition is a known constant, we only visit the reachable successors.
///
static bool AddReachableCodeToWorklist(BasicBlock *BB,
SmallPtrSetImpl<BasicBlock*> &Visited,
static bool AddReachableCodeToWorklist(BasicBlock *BB, const DataLayout &DL,
SmallPtrSetImpl<BasicBlock *> &Visited,
InstCombineWorklist &ICWorklist,
const DataLayout *DL,
const TargetLibraryInfo *TLI) {
bool MadeIRChange = false;
SmallVector<BasicBlock*, 256> Worklist;
@ -2797,23 +2786,22 @@ static bool AddReachableCodeToWorklist(BasicBlock *BB,
continue;
}
if (DL) {
// See if we can constant fold its operands.
for (User::op_iterator i = Inst->op_begin(), e = Inst->op_end();
i != e; ++i) {
ConstantExpr *CE = dyn_cast<ConstantExpr>(i);
if (CE == nullptr) continue;
// See if we can constant fold its operands.
for (User::op_iterator i = Inst->op_begin(), e = Inst->op_end(); i != e;
++i) {
ConstantExpr *CE = dyn_cast<ConstantExpr>(i);
if (CE == nullptr)
continue;
Constant*& FoldRes = FoldedConstants[CE];
if (!FoldRes)
FoldRes = ConstantFoldConstantExpression(CE, DL, TLI);
if (!FoldRes)
FoldRes = CE;
Constant *&FoldRes = FoldedConstants[CE];
if (!FoldRes)
FoldRes = ConstantFoldConstantExpression(CE, DL, TLI);
if (!FoldRes)
FoldRes = CE;
if (FoldRes != CE) {
*i = FoldRes;
MadeIRChange = true;
}
if (FoldRes != CE) {
*i = FoldRes;
MadeIRChange = true;
}
}
@ -2867,7 +2855,7 @@ static bool AddReachableCodeToWorklist(BasicBlock *BB,
///
/// This also does basic constant propagation and other forward fixing to make
/// the combiner itself run much faster.
static bool prepareICWorklistFromFunction(Function &F, const DataLayout *DL,
static bool prepareICWorklistFromFunction(Function &F, const DataLayout &DL,
TargetLibraryInfo *TLI,
InstCombineWorklist &ICWorklist) {
bool MadeIRChange = false;
@ -2877,7 +2865,7 @@ static bool prepareICWorklistFromFunction(Function &F, const DataLayout *DL,
// track of which blocks we visit.
SmallPtrSet<BasicBlock *, 64> Visited;
MadeIRChange |=
AddReachableCodeToWorklist(F.begin(), Visited, ICWorklist, DL, TLI);
AddReachableCodeToWorklist(F.begin(), DL, Visited, ICWorklist, TLI);
// Do a quick scan over the function. If we find any blocks that are
// unreachable, remove any instructions inside of them. This prevents
@ -2916,12 +2904,12 @@ combineInstructionsOverFunction(Function &F, InstCombineWorklist &Worklist,
DominatorTree &DT, LoopInfo *LI = nullptr) {
// Minimizing size?
bool MinimizeSize = F.hasFnAttribute(Attribute::MinSize);
const DataLayout &DL = F.getParent()->getDataLayout();
auto &DL = F.getParent()->getDataLayout();
/// Builder - This is an IRBuilder that automatically inserts new
/// instructions into the worklist when they are created.
IRBuilder<true, TargetFolder, InstCombineIRInserter> Builder(
F.getContext(), TargetFolder(&DL), InstCombineIRInserter(Worklist, &AC));
F.getContext(), TargetFolder(DL), InstCombineIRInserter(Worklist, &AC));
// Lower dbg.declare intrinsics otherwise their value may be clobbered
// by instcombiner.
@ -2935,10 +2923,10 @@ combineInstructionsOverFunction(Function &F, InstCombineWorklist &Worklist,
<< F.getName() << "\n");
bool Changed = false;
if (prepareICWorklistFromFunction(F, &DL, &TLI, Worklist))
if (prepareICWorklistFromFunction(F, DL, &TLI, Worklist))
Changed = true;
InstCombiner IC(Worklist, &Builder, MinimizeSize, &AC, &TLI, &DT, &DL, LI);
InstCombiner IC(Worklist, &Builder, MinimizeSize, &AC, &TLI, &DT, DL, LI);
if (IC.run())
Changed = true;

View File

@ -396,7 +396,8 @@ struct AddressSanitizer : public FunctionPass {
}
uint64_t getAllocaSizeInBytes(AllocaInst *AI) const {
Type *Ty = AI->getAllocatedType();
uint64_t SizeInBytes = DL->getTypeAllocSize(Ty);
uint64_t SizeInBytes =
AI->getModule()->getDataLayout().getTypeAllocSize(Ty);
return SizeInBytes;
}
/// Check if we want (and can) handle this alloca.
@ -407,7 +408,7 @@ struct AddressSanitizer : public FunctionPass {
uint64_t *TypeSize,
unsigned *Alignment) const;
void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, Instruction *I,
bool UseCalls);
bool UseCalls, const DataLayout &DL);
void instrumentPointerComparisonOrSubtraction(Instruction *I);
void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
Value *Addr, uint32_t TypeSize, bool IsWrite,
@ -435,7 +436,6 @@ struct AddressSanitizer : public FunctionPass {
uint64_t TypeSize) const;
LLVMContext *C;
const DataLayout *DL;
Triple TargetTriple;
int LongSize;
Type *IntptrTy;
@ -478,7 +478,6 @@ class AddressSanitizerModule : public ModulePass {
GlobalsMetadata GlobalsMD;
Type *IntptrTy;
LLVMContext *C;
const DataLayout *DL;
Triple TargetTriple;
ShadowMapping Mapping;
Function *AsanPoisonGlobals;
@ -605,8 +604,9 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
// Right shift for BigEndian and left shift for LittleEndian.
Value *shiftAllocaMagic(Value *Val, IRBuilder<> &IRB, Value *Shift) {
return ASan.DL->isLittleEndian() ? IRB.CreateShl(Val, Shift)
: IRB.CreateLShr(Val, Shift);
auto &DL = F.getParent()->getDataLayout();
return DL.isLittleEndian() ? IRB.CreateShl(Val, Shift)
: IRB.CreateLShr(Val, Shift);
}
// Compute PartialRzMagic for dynamic alloca call. Since we don't know the
@ -818,29 +818,29 @@ Value *AddressSanitizer::isInterestingMemoryAccess(Instruction *I,
if (I->getMetadata("nosanitize")) return nullptr;
Value *PtrOperand = nullptr;
const DataLayout &DL = I->getModule()->getDataLayout();
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
if (!ClInstrumentReads) return nullptr;
*IsWrite = false;
*TypeSize = DL->getTypeStoreSizeInBits(LI->getType());
*TypeSize = DL.getTypeStoreSizeInBits(LI->getType());
*Alignment = LI->getAlignment();
PtrOperand = LI->getPointerOperand();
} else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
if (!ClInstrumentWrites) return nullptr;
*IsWrite = true;
*TypeSize = DL->getTypeStoreSizeInBits(SI->getValueOperand()->getType());
*TypeSize = DL.getTypeStoreSizeInBits(SI->getValueOperand()->getType());
*Alignment = SI->getAlignment();
PtrOperand = SI->getPointerOperand();
} else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
if (!ClInstrumentAtomics) return nullptr;
*IsWrite = true;
*TypeSize = DL->getTypeStoreSizeInBits(RMW->getValOperand()->getType());
*TypeSize = DL.getTypeStoreSizeInBits(RMW->getValOperand()->getType());
*Alignment = 0;
PtrOperand = RMW->getPointerOperand();
} else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
if (!ClInstrumentAtomics) return nullptr;
*IsWrite = true;
*TypeSize =
DL->getTypeStoreSizeInBits(XCHG->getCompareOperand()->getType());
*TypeSize = DL.getTypeStoreSizeInBits(XCHG->getCompareOperand()->getType());
*Alignment = 0;
PtrOperand = XCHG->getPointerOperand();
}
@ -896,7 +896,8 @@ void AddressSanitizer::instrumentPointerComparisonOrSubtraction(
}
void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
Instruction *I, bool UseCalls) {
Instruction *I, bool UseCalls,
const DataLayout &DL) {
bool IsWrite = false;
unsigned Alignment = 0;
uint64_t TypeSize = 0;
@ -906,8 +907,7 @@ void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
if (ClOpt && ClOptGlobals) {
// If initialization order checking is disabled, a simple access to a
// dynamically initialized global is always valid.
GlobalVariable *G =
dyn_cast<GlobalVariable>(GetUnderlyingObject(Addr, nullptr));
GlobalVariable *G = dyn_cast<GlobalVariable>(GetUnderlyingObject(Addr, DL));
if (G != NULL && (!ClInitializers || GlobalIsLinkerInitialized(G)) &&
isSafeAccess(ObjSizeVis, Addr, TypeSize)) {
NumOptimizedAccessesToGlobalVar++;
@ -917,7 +917,7 @@ void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
if (ClOpt && ClOptStack) {
// A direct inbounds access to a stack variable is always valid.
if (isa<AllocaInst>(GetUnderlyingObject(Addr, nullptr)) &&
if (isa<AllocaInst>(GetUnderlyingObject(Addr, DL)) &&
isSafeAccess(ObjSizeVis, Addr, TypeSize)) {
NumOptimizedAccessesToStackVar++;
return;
@ -1221,6 +1221,7 @@ bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) {
GlobalVariable *ModuleName = createPrivateGlobalForString(
M, M.getModuleIdentifier(), /*AllowMerging*/ false);
auto &DL = M.getDataLayout();
for (size_t i = 0; i < n; i++) {
static const uint64_t kMaxGlobalRedzone = 1 << 18;
GlobalVariable *G = GlobalsToChange[i];
@ -1234,7 +1235,7 @@ bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) {
PointerType *PtrTy = cast<PointerType>(G->getType());
Type *Ty = PtrTy->getElementType();
uint64_t SizeInBytes = DL->getTypeAllocSize(Ty);
uint64_t SizeInBytes = DL.getTypeAllocSize(Ty);
uint64_t MinRZ = MinRedzoneSizeForGlobal();
// MinRZ <= RZ <= kMaxGlobalRedzone
// and trying to make RZ to be ~ 1/4 of SizeInBytes.
@ -1320,9 +1321,8 @@ bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) {
}
bool AddressSanitizerModule::runOnModule(Module &M) {
DL = &M.getDataLayout();
C = &(M.getContext());
int LongSize = DL->getPointerSizeInBits();
int LongSize = M.getDataLayout().getPointerSizeInBits();
IntptrTy = Type::getIntNTy(*C, LongSize);
TargetTriple = Triple(M.getTargetTriple());
Mapping = getShadowMapping(TargetTriple, LongSize);
@ -1396,12 +1396,11 @@ void AddressSanitizer::initializeCallbacks(Module &M) {
// virtual
bool AddressSanitizer::doInitialization(Module &M) {
// Initialize the private fields. No one has accessed them before.
DL = &M.getDataLayout();
GlobalsMD.init(M);
C = &(M.getContext());
LongSize = DL->getPointerSizeInBits();
LongSize = M.getDataLayout().getPointerSizeInBits();
IntptrTy = Type::getIntNTy(*C, LongSize);
TargetTriple = Triple(M.getTargetTriple());
@ -1507,6 +1506,7 @@ bool AddressSanitizer::runOnFunction(Function &F) {
const TargetLibraryInfo *TLI =
&getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
const DataLayout &DL = F.getParent()->getDataLayout();
ObjectSizeOffsetVisitor ObjSizeVis(DL, TLI, F.getContext(),
/*RoundToAlign=*/true);
@ -1516,7 +1516,8 @@ bool AddressSanitizer::runOnFunction(Function &F) {
if (ClDebugMin < 0 || ClDebugMax < 0 ||
(NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) {
if (isInterestingMemoryAccess(Inst, &IsWrite, &TypeSize, &Alignment))
instrumentMop(ObjSizeVis, Inst, UseCalls);
instrumentMop(ObjSizeVis, Inst, UseCalls,
F.getParent()->getDataLayout());
else
instrumentMemIntrinsic(cast<MemIntrinsic>(Inst));
}
@ -1588,7 +1589,7 @@ void FunctionStackPoisoner::poisonRedZones(ArrayRef<uint8_t> ShadowBytes,
for (; i + LargeStoreSizeInBytes - 1 < n; i += LargeStoreSizeInBytes) {
uint64_t Val = 0;
for (size_t j = 0; j < LargeStoreSizeInBytes; j++) {
if (ASan.DL->isLittleEndian())
if (F.getParent()->getDataLayout().isLittleEndian())
Val |= (uint64_t)ShadowBytes[i + j] << (8 * j);
else
Val = (Val << 8) | ShadowBytes[i + j];
@ -1932,14 +1933,14 @@ Value *FunctionStackPoisoner::computePartialRzMagic(Value *PartialSize,
Value *Shift = IRB.CreateAnd(PartialSize, IRB.getInt32(~7));
unsigned Val1Int = kAsanAllocaPartialVal1;
unsigned Val2Int = kAsanAllocaPartialVal2;
if (!ASan.DL->isLittleEndian()) {
if (!F.getParent()->getDataLayout().isLittleEndian()) {
Val1Int = sys::getSwappedBytes(Val1Int);
Val2Int = sys::getSwappedBytes(Val2Int);
}
Value *Val1 = shiftAllocaMagic(IRB.getInt32(Val1Int), IRB, Shift);
Value *PartialBits = IRB.CreateAnd(PartialSize, IRB.getInt32(7));
// For BigEndian get 0x000000YZ -> 0xYZ000000.
if (ASan.DL->isBigEndian())
if (F.getParent()->getDataLayout().isBigEndian())
PartialBits = IRB.CreateShl(PartialBits, IRB.getInt32(24));
Value *Val2 = IRB.getInt32(Val2Int);
Value *Cond =
@ -1973,7 +1974,8 @@ void FunctionStackPoisoner::handleDynamicAllocaCall(
// redzones, and OldSize is number of allocated blocks with
// ElementSize size, get allocated memory size in bytes by
// OldSize * ElementSize.
unsigned ElementSize = ASan.DL->getTypeAllocSize(AI->getAllocatedType());
unsigned ElementSize =
F.getParent()->getDataLayout().getTypeAllocSize(AI->getAllocatedType());
Value *OldSize = IRB.CreateMul(AI->getArraySize(),
ConstantInt::get(IntptrTy, ElementSize));

View File

@ -53,7 +53,6 @@ namespace {
}
private:
const DataLayout *DL;
const TargetLibraryInfo *TLI;
ObjectSizeOffsetEvaluator *ObjSizeEval;
BuilderTy *Builder;
@ -62,7 +61,7 @@ namespace {
BasicBlock *getTrapBB();
void emitBranchToTrap(Value *Cmp = nullptr);
bool instrument(Value *Ptr, Value *Val);
bool instrument(Value *Ptr, Value *Val, const DataLayout &DL);
};
}
@ -124,8 +123,9 @@ void BoundsChecking::emitBranchToTrap(Value *Cmp) {
/// result from the load or the value being stored. It is used to determine the
/// size of memory block that is touched.
/// Returns true if any change was made to the IR, false otherwise.
bool BoundsChecking::instrument(Value *Ptr, Value *InstVal) {
uint64_t NeededSize = DL->getTypeStoreSize(InstVal->getType());
bool BoundsChecking::instrument(Value *Ptr, Value *InstVal,
const DataLayout &DL) {
uint64_t NeededSize = DL.getTypeStoreSize(InstVal->getType());
DEBUG(dbgs() << "Instrument " << *Ptr << " for " << Twine(NeededSize)
<< " bytes\n");
@ -140,7 +140,7 @@ bool BoundsChecking::instrument(Value *Ptr, Value *InstVal) {
Value *Offset = SizeOffset.second;
ConstantInt *SizeCI = dyn_cast<ConstantInt>(Size);
Type *IntTy = DL->getIntPtrType(Ptr->getType());
Type *IntTy = DL.getIntPtrType(Ptr->getType());
Value *NeededSizeVal = ConstantInt::get(IntTy, NeededSize);
// three checks are required to ensure safety:
@ -164,7 +164,7 @@ bool BoundsChecking::instrument(Value *Ptr, Value *InstVal) {
}
bool BoundsChecking::runOnFunction(Function &F) {
DL = &F.getParent()->getDataLayout();
const DataLayout &DL = F.getParent()->getDataLayout();
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
TrapBB = nullptr;
@ -191,13 +191,16 @@ bool BoundsChecking::runOnFunction(Function &F) {
Builder->SetInsertPoint(Inst);
if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
MadeChange |= instrument(LI->getPointerOperand(), LI);
MadeChange |= instrument(LI->getPointerOperand(), LI, DL);
} else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
MadeChange |= instrument(SI->getPointerOperand(), SI->getValueOperand());
MadeChange |=
instrument(SI->getPointerOperand(), SI->getValueOperand(), DL);
} else if (AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(Inst)) {
MadeChange |= instrument(AI->getPointerOperand(),AI->getCompareOperand());
MadeChange |=
instrument(AI->getPointerOperand(), AI->getCompareOperand(), DL);
} else if (AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(Inst)) {
MadeChange |= instrument(AI->getPointerOperand(), AI->getValOperand());
MadeChange |=
instrument(AI->getPointerOperand(), AI->getValOperand(), DL);
} else {
llvm_unreachable("unknown Instruction type");
}

View File

@ -217,7 +217,6 @@ class DataFlowSanitizer : public ModulePass {
WK_Custom
};
const DataLayout *DL;
Module *Mod;
LLVMContext *Ctx;
IntegerType *ShadowTy;
@ -422,13 +421,13 @@ bool DataFlowSanitizer::doInitialization(Module &M) {
bool IsMIPS64 = TargetTriple.getArch() == llvm::Triple::mips64 ||
TargetTriple.getArch() == llvm::Triple::mips64el;
DL = &M.getDataLayout();
const DataLayout &DL = M.getDataLayout();
Mod = &M;
Ctx = &M.getContext();
ShadowTy = IntegerType::get(*Ctx, ShadowWidth);
ShadowPtrTy = PointerType::getUnqual(ShadowTy);
IntptrTy = DL->getIntPtrType(*Ctx);
IntptrTy = DL.getIntPtrType(*Ctx);
ZeroShadow = ConstantInt::getSigned(ShadowTy, 0);
ShadowPtrMul = ConstantInt::getSigned(IntptrTy, ShadowWidth / 8);
if (IsX86_64)
@ -1050,7 +1049,7 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
uint64_t ShadowAlign = Align * DFS.ShadowWidth / 8;
SmallVector<Value *, 2> Objs;
GetUnderlyingObjects(Addr, Objs, DFS.DL);
GetUnderlyingObjects(Addr, Objs, Pos->getModule()->getDataLayout());
bool AllConstants = true;
for (SmallVector<Value *, 2>::iterator i = Objs.begin(), e = Objs.end();
i != e; ++i) {
@ -1151,7 +1150,8 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
}
void DFSanVisitor::visitLoadInst(LoadInst &LI) {
uint64_t Size = DFSF.DFS.DL->getTypeStoreSize(LI.getType());
auto &DL = LI.getModule()->getDataLayout();
uint64_t Size = DL.getTypeStoreSize(LI.getType());
if (Size == 0) {
DFSF.setShadow(&LI, DFSF.DFS.ZeroShadow);
return;
@ -1161,7 +1161,7 @@ void DFSanVisitor::visitLoadInst(LoadInst &LI) {
if (ClPreserveAlignment) {
Align = LI.getAlignment();
if (Align == 0)
Align = DFSF.DFS.DL->getABITypeAlignment(LI.getType());
Align = DL.getABITypeAlignment(LI.getType());
} else {
Align = 1;
}
@ -1229,8 +1229,8 @@ void DFSanFunction::storeShadow(Value *Addr, uint64_t Size, uint64_t Align,
}
void DFSanVisitor::visitStoreInst(StoreInst &SI) {
uint64_t Size =
DFSF.DFS.DL->getTypeStoreSize(SI.getValueOperand()->getType());
auto &DL = SI.getModule()->getDataLayout();
uint64_t Size = DL.getTypeStoreSize(SI.getValueOperand()->getType());
if (Size == 0)
return;
@ -1238,7 +1238,7 @@ void DFSanVisitor::visitStoreInst(StoreInst &SI) {
if (ClPreserveAlignment) {
Align = SI.getAlignment();
if (Align == 0)
Align = DFSF.DFS.DL->getABITypeAlignment(SI.getValueOperand()->getType());
Align = DL.getABITypeAlignment(SI.getValueOperand()->getType());
} else {
Align = 1;
}

View File

@ -274,7 +274,6 @@ class MemorySanitizer : public FunctionPass {
MemorySanitizer(int TrackOrigins = 0)
: FunctionPass(ID),
TrackOrigins(std::max(TrackOrigins, (int)ClTrackOrigins)),
DL(nullptr),
WarningFn(nullptr) {}
const char *getPassName() const override { return "MemorySanitizer"; }
bool runOnFunction(Function &F) override;
@ -287,7 +286,6 @@ class MemorySanitizer : public FunctionPass {
/// \brief Track origins (allocation points) of uninitialized values.
int TrackOrigins;
const DataLayout *DL;
LLVMContext *C;
Type *IntptrTy;
Type *OriginTy;
@ -449,7 +447,7 @@ void MemorySanitizer::initializeCallbacks(Module &M) {
///
/// inserts a call to __msan_init to the module's constructor list.
bool MemorySanitizer::doInitialization(Module &M) {
DL = &M.getDataLayout();
auto &DL = M.getDataLayout();
Triple TargetTriple(M.getTargetTriple());
switch (TargetTriple.getOS()) {
@ -601,7 +599,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
}
Value *originToIntptr(IRBuilder<> &IRB, Value *Origin) {
unsigned IntptrSize = MS.DL->getTypeStoreSize(MS.IntptrTy);
const DataLayout &DL = F.getParent()->getDataLayout();
unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
if (IntptrSize == kOriginSize) return Origin;
assert(IntptrSize == kOriginSize * 2);
Origin = IRB.CreateIntCast(Origin, MS.IntptrTy, /* isSigned */ false);
@ -611,8 +610,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// \brief Fill memory range with the given origin value.
void paintOrigin(IRBuilder<> &IRB, Value *Origin, Value *OriginPtr,
unsigned Size, unsigned Alignment) {
unsigned IntptrAlignment = MS.DL->getABITypeAlignment(MS.IntptrTy);
unsigned IntptrSize = MS.DL->getTypeStoreSize(MS.IntptrTy);
const DataLayout &DL = F.getParent()->getDataLayout();
unsigned IntptrAlignment = DL.getABITypeAlignment(MS.IntptrTy);
unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
assert(IntptrAlignment >= kMinOriginAlignment);
assert(IntptrSize >= kOriginSize);
@ -640,8 +640,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
void storeOrigin(IRBuilder<> &IRB, Value *Addr, Value *Shadow, Value *Origin,
unsigned Alignment, bool AsCall) {
const DataLayout &DL = F.getParent()->getDataLayout();
unsigned OriginAlignment = std::max(kMinOriginAlignment, Alignment);
unsigned StoreSize = MS.DL->getTypeStoreSize(Shadow->getType());
unsigned StoreSize = DL.getTypeStoreSize(Shadow->getType());
if (isa<StructType>(Shadow->getType())) {
paintOrigin(IRB, updateOrigin(Origin, IRB),
getOriginPtr(Addr, IRB, Alignment), StoreSize,
@ -658,7 +659,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
}
unsigned TypeSizeInBits =
MS.DL->getTypeSizeInBits(ConvertedShadow->getType());
DL.getTypeSizeInBits(ConvertedShadow->getType());
unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
if (AsCall && SizeIndex < kNumberOfAccessSizes) {
Value *Fn = MS.MaybeStoreOriginFn[SizeIndex];
@ -728,8 +729,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
return;
}
unsigned TypeSizeInBits =
MS.DL->getTypeSizeInBits(ConvertedShadow->getType());
const DataLayout &DL = OrigIns->getModule()->getDataLayout();
unsigned TypeSizeInBits = DL.getTypeSizeInBits(ConvertedShadow->getType());
unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
if (AsCall && SizeIndex < kNumberOfAccessSizes) {
Value *Fn = MS.MaybeWarningFn[SizeIndex];
@ -769,7 +771,6 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// \brief Add MemorySanitizer instrumentation to a function.
bool runOnFunction() {
MS.initializeCallbacks(*F.getParent());
if (!MS.DL) return false;
// In the presence of unreachable blocks, we may see Phi nodes with
// incoming nodes from such blocks. Since InstVisitor skips unreachable
@ -825,8 +826,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// This may return weird-sized types like i1.
if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy))
return IT;
const DataLayout &DL = F.getParent()->getDataLayout();
if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) {
uint32_t EltSize = MS.DL->getTypeSizeInBits(VT->getElementType());
uint32_t EltSize = DL.getTypeSizeInBits(VT->getElementType());
return VectorType::get(IntegerType::get(*MS.C, EltSize),
VT->getNumElements());
}
@ -842,7 +844,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n");
return Res;
}
uint32_t TypeSize = MS.DL->getTypeSizeInBits(OrigTy);
uint32_t TypeSize = DL.getTypeSizeInBits(OrigTy);
return IntegerType::get(*MS.C, TypeSize);
}
@ -1035,14 +1037,16 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Function *F = A->getParent();
IRBuilder<> EntryIRB(F->getEntryBlock().getFirstNonPHI());
unsigned ArgOffset = 0;
const DataLayout &DL = F->getParent()->getDataLayout();
for (auto &FArg : F->args()) {
if (!FArg.getType()->isSized()) {
DEBUG(dbgs() << "Arg is not sized\n");
continue;
}
unsigned Size = FArg.hasByValAttr()
? MS.DL->getTypeAllocSize(FArg.getType()->getPointerElementType())
: MS.DL->getTypeAllocSize(FArg.getType());
unsigned Size =
FArg.hasByValAttr()
? DL.getTypeAllocSize(FArg.getType()->getPointerElementType())
: DL.getTypeAllocSize(FArg.getType());
if (A == &FArg) {
bool Overflow = ArgOffset + Size > kParamTLSSize;
Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset);
@ -1053,7 +1057,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
unsigned ArgAlign = FArg.getParamAlignment();
if (ArgAlign == 0) {
Type *EltType = A->getType()->getPointerElementType();
ArgAlign = MS.DL->getABITypeAlignment(EltType);
ArgAlign = DL.getABITypeAlignment(EltType);
}
if (Overflow) {
// ParamTLS overflow.
@ -2424,10 +2428,11 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
DEBUG(dbgs() << " Arg#" << i << ": " << *A <<
" Shadow: " << *ArgShadow << "\n");
bool ArgIsInitialized = false;
const DataLayout &DL = F.getParent()->getDataLayout();
if (CS.paramHasAttr(i + 1, Attribute::ByVal)) {
assert(A->getType()->isPointerTy() &&
"ByVal argument is not a pointer!");
Size = MS.DL->getTypeAllocSize(A->getType()->getPointerElementType());
Size = DL.getTypeAllocSize(A->getType()->getPointerElementType());
if (ArgOffset + Size > kParamTLSSize) break;
unsigned ParamAlignment = CS.getParamAlignment(i + 1);
unsigned Alignment = std::min(ParamAlignment, kShadowTLSAlignment);
@ -2435,7 +2440,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
getShadowPtr(A, Type::getInt8Ty(*MS.C), IRB),
Size, Alignment);
} else {
Size = MS.DL->getTypeAllocSize(A->getType());
Size = DL.getTypeAllocSize(A->getType());
if (ArgOffset + Size > kParamTLSSize) break;
Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase,
kShadowTLSAlignment);
@ -2528,7 +2533,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
setShadow(&I, getCleanShadow(&I));
setOrigin(&I, getCleanOrigin());
IRBuilder<> IRB(I.getNextNode());
uint64_t Size = MS.DL->getTypeAllocSize(I.getAllocatedType());
const DataLayout &DL = F.getParent()->getDataLayout();
uint64_t Size = DL.getTypeAllocSize(I.getAllocatedType());
if (PoisonStack && ClPoisonStackWithCall) {
IRB.CreateCall2(MS.MsanPoisonStackFn,
IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
@ -2720,6 +2726,7 @@ struct VarArgAMD64Helper : public VarArgHelper {
unsigned GpOffset = 0;
unsigned FpOffset = AMD64GpEndOffset;
unsigned OverflowOffset = AMD64FpEndOffset;
const DataLayout &DL = F.getParent()->getDataLayout();
for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
ArgIt != End; ++ArgIt) {
Value *A = *ArgIt;
@ -2729,7 +2736,7 @@ struct VarArgAMD64Helper : public VarArgHelper {
// ByVal arguments always go to the overflow area.
assert(A->getType()->isPointerTy());
Type *RealTy = A->getType()->getPointerElementType();
uint64_t ArgSize = MS.DL->getTypeAllocSize(RealTy);
uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
Value *Base = getShadowPtrForVAArgument(RealTy, IRB, OverflowOffset);
OverflowOffset += RoundUpToAlignment(ArgSize, 8);
IRB.CreateMemCpy(Base, MSV.getShadowPtr(A, IRB.getInt8Ty(), IRB),
@ -2751,7 +2758,7 @@ struct VarArgAMD64Helper : public VarArgHelper {
FpOffset += 16;
break;
case AK_Memory:
uint64_t ArgSize = MS.DL->getTypeAllocSize(A->getType());
uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset);
OverflowOffset += RoundUpToAlignment(ArgSize, 8);
}
@ -2859,11 +2866,12 @@ struct VarArgMIPS64Helper : public VarArgHelper {
void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
unsigned VAArgOffset = 0;
const DataLayout &DL = F.getParent()->getDataLayout();
for (CallSite::arg_iterator ArgIt = CS.arg_begin() + 1, End = CS.arg_end();
ArgIt != End; ++ArgIt) {
Value *A = *ArgIt;
Value *Base;
uint64_t ArgSize = MS.DL->getTypeAllocSize(A->getType());
uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
#if defined(__MIPSEB__) || defined(MIPSEB)
// Adjusting the shadow for argument with size < 8 to match the placement
// of bits in big endian system

View File

@ -76,7 +76,7 @@ namespace {
/// ThreadSanitizer: instrument the code in module to find races.
struct ThreadSanitizer : public FunctionPass {
ThreadSanitizer() : FunctionPass(ID), DL(nullptr) {}
ThreadSanitizer() : FunctionPass(ID) {}
const char *getPassName() const override;
bool runOnFunction(Function &F) override;
bool doInitialization(Module &M) override;
@ -84,15 +84,15 @@ struct ThreadSanitizer : public FunctionPass {
private:
void initializeCallbacks(Module &M);
bool instrumentLoadOrStore(Instruction *I);
bool instrumentAtomic(Instruction *I);
bool instrumentLoadOrStore(Instruction *I, const DataLayout &DL);
bool instrumentAtomic(Instruction *I, const DataLayout &DL);
bool instrumentMemIntrinsic(Instruction *I);
void chooseInstructionsToInstrument(SmallVectorImpl<Instruction*> &Local,
SmallVectorImpl<Instruction*> &All);
void chooseInstructionsToInstrument(SmallVectorImpl<Instruction *> &Local,
SmallVectorImpl<Instruction *> &All,
const DataLayout &DL);
bool addrPointsToConstantData(Value *Addr);
int getMemoryAccessFuncIndex(Value *Addr);
int getMemoryAccessFuncIndex(Value *Addr, const DataLayout &DL);
const DataLayout *DL;
Type *IntptrTy;
IntegerType *OrdTy;
// Callbacks to run-time library are computed in doInitialization.
@ -230,7 +230,7 @@ void ThreadSanitizer::initializeCallbacks(Module &M) {
}
bool ThreadSanitizer::doInitialization(Module &M) {
DL = &M.getDataLayout();
const DataLayout &DL = M.getDataLayout();
// Always insert a call to __tsan_init into the module's CTORs.
IRBuilder<> IRB(M.getContext());
@ -282,8 +282,8 @@ bool ThreadSanitizer::addrPointsToConstantData(Value *Addr) {
// 'Local' is a vector of insns within the same BB (no calls between).
// 'All' is a vector of insns that will be instrumented.
void ThreadSanitizer::chooseInstructionsToInstrument(
SmallVectorImpl<Instruction*> &Local,
SmallVectorImpl<Instruction*> &All) {
SmallVectorImpl<Instruction *> &Local, SmallVectorImpl<Instruction *> &All,
const DataLayout &DL) {
SmallSet<Value*, 8> WriteTargets;
// Iterate from the end.
for (SmallVectorImpl<Instruction*>::reverse_iterator It = Local.rbegin(),
@ -307,7 +307,7 @@ void ThreadSanitizer::chooseInstructionsToInstrument(
Value *Addr = isa<StoreInst>(*I)
? cast<StoreInst>(I)->getPointerOperand()
: cast<LoadInst>(I)->getPointerOperand();
if (isa<AllocaInst>(GetUnderlyingObject(Addr, nullptr)) &&
if (isa<AllocaInst>(GetUnderlyingObject(Addr, DL)) &&
!PointerMayBeCaptured(Addr, true, true)) {
// The variable is addressable but not captured, so it cannot be
// referenced from a different thread and participate in a data race
@ -335,7 +335,6 @@ static bool isAtomic(Instruction *I) {
}
bool ThreadSanitizer::runOnFunction(Function &F) {
if (!DL) return false;
initializeCallbacks(*F.getParent());
SmallVector<Instruction*, 8> RetVec;
SmallVector<Instruction*, 8> AllLoadsAndStores;
@ -345,6 +344,7 @@ bool ThreadSanitizer::runOnFunction(Function &F) {
bool Res = false;
bool HasCalls = false;
bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeThread);
const DataLayout &DL = F.getParent()->getDataLayout();
// Traverse all instructions, collect loads/stores/returns, check for calls.
for (auto &BB : F) {
@ -359,10 +359,11 @@ bool ThreadSanitizer::runOnFunction(Function &F) {
if (isa<MemIntrinsic>(Inst))
MemIntrinCalls.push_back(&Inst);
HasCalls = true;
chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores);
chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores,
DL);
}
}
chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores);
chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores, DL);
}
// We have collected all loads and stores.
@ -372,14 +373,14 @@ bool ThreadSanitizer::runOnFunction(Function &F) {
// Instrument memory accesses only if we want to report bugs in the function.
if (ClInstrumentMemoryAccesses && SanitizeFunction)
for (auto Inst : AllLoadsAndStores) {
Res |= instrumentLoadOrStore(Inst);
Res |= instrumentLoadOrStore(Inst, DL);
}
// Instrument atomic memory accesses in any case (they can be used to
// implement synchronization).
if (ClInstrumentAtomics)
for (auto Inst : AtomicAccesses) {
Res |= instrumentAtomic(Inst);
Res |= instrumentAtomic(Inst, DL);
}
if (ClInstrumentMemIntrinsics && SanitizeFunction)
@ -403,13 +404,14 @@ bool ThreadSanitizer::runOnFunction(Function &F) {
return Res;
}
bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I) {
bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I,
const DataLayout &DL) {
IRBuilder<> IRB(I);
bool IsWrite = isa<StoreInst>(*I);
Value *Addr = IsWrite
? cast<StoreInst>(I)->getPointerOperand()
: cast<LoadInst>(I)->getPointerOperand();
int Idx = getMemoryAccessFuncIndex(Addr);
int Idx = getMemoryAccessFuncIndex(Addr, DL);
if (Idx < 0)
return false;
if (IsWrite && isVtableAccess(I)) {
@ -440,7 +442,7 @@ bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I) {
? cast<StoreInst>(I)->getAlignment()
: cast<LoadInst>(I)->getAlignment();
Type *OrigTy = cast<PointerType>(Addr->getType())->getElementType();
const uint32_t TypeSize = DL->getTypeStoreSizeInBits(OrigTy);
const uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy);
Value *OnAccessFunc = nullptr;
if (Alignment == 0 || Alignment >= 8 || (Alignment % (TypeSize / 8)) == 0)
OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx];
@ -501,11 +503,11 @@ bool ThreadSanitizer::instrumentMemIntrinsic(Instruction *I) {
// The following page contains more background information:
// http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
bool ThreadSanitizer::instrumentAtomic(Instruction *I) {
bool ThreadSanitizer::instrumentAtomic(Instruction *I, const DataLayout &DL) {
IRBuilder<> IRB(I);
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
Value *Addr = LI->getPointerOperand();
int Idx = getMemoryAccessFuncIndex(Addr);
int Idx = getMemoryAccessFuncIndex(Addr, DL);
if (Idx < 0)
return false;
const size_t ByteSize = 1 << Idx;
@ -519,7 +521,7 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I) {
} else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
Value *Addr = SI->getPointerOperand();
int Idx = getMemoryAccessFuncIndex(Addr);
int Idx = getMemoryAccessFuncIndex(Addr, DL);
if (Idx < 0)
return false;
const size_t ByteSize = 1 << Idx;
@ -533,7 +535,7 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I) {
ReplaceInstWithInst(I, C);
} else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) {
Value *Addr = RMWI->getPointerOperand();
int Idx = getMemoryAccessFuncIndex(Addr);
int Idx = getMemoryAccessFuncIndex(Addr, DL);
if (Idx < 0)
return false;
Function *F = TsanAtomicRMW[RMWI->getOperation()][Idx];
@ -550,7 +552,7 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I) {
ReplaceInstWithInst(I, C);
} else if (AtomicCmpXchgInst *CASI = dyn_cast<AtomicCmpXchgInst>(I)) {
Value *Addr = CASI->getPointerOperand();
int Idx = getMemoryAccessFuncIndex(Addr);
int Idx = getMemoryAccessFuncIndex(Addr, DL);
if (Idx < 0)
return false;
const size_t ByteSize = 1 << Idx;
@ -580,11 +582,12 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I) {
return true;
}
int ThreadSanitizer::getMemoryAccessFuncIndex(Value *Addr) {
int ThreadSanitizer::getMemoryAccessFuncIndex(Value *Addr,
const DataLayout &DL) {
Type *OrigPtrTy = Addr->getType();
Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType();
assert(OrigTy->isSized());
uint32_t TypeSize = DL->getTypeStoreSizeInBits(OrigTy);
uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy);
if (TypeSize != 8 && TypeSize != 16 &&
TypeSize != 32 && TypeSize != 64 && TypeSize != 128) {
NumAccessesWithBadSize++;

View File

@ -53,10 +53,12 @@ bool llvm::objcarc::CanAlterRefCount(const Instruction *Inst, const Value *Ptr,
if (AliasAnalysis::onlyReadsMemory(MRB))
return false;
if (AliasAnalysis::onlyAccessesArgPointees(MRB)) {
const DataLayout &DL = Inst->getModule()->getDataLayout();
for (ImmutableCallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
I != E; ++I) {
const Value *Op = *I;
if (IsPotentialRetainableObjPtr(Op, *PA.getAA()) && PA.related(Ptr, Op))
if (IsPotentialRetainableObjPtr(Op, *PA.getAA()) &&
PA.related(Ptr, Op, DL))
return true;
}
return false;
@ -87,6 +89,8 @@ bool llvm::objcarc::CanUse(const Instruction *Inst, const Value *Ptr,
if (Class == ARCInstKind::Call)
return false;
const DataLayout &DL = Inst->getModule()->getDataLayout();
// Consider various instructions which may have pointer arguments which are
// not "uses".
if (const ICmpInst *ICI = dyn_cast<ICmpInst>(Inst)) {
@ -100,24 +104,26 @@ bool llvm::objcarc::CanUse(const Instruction *Inst, const Value *Ptr,
for (ImmutableCallSite::arg_iterator OI = CS.arg_begin(),
OE = CS.arg_end(); OI != OE; ++OI) {
const Value *Op = *OI;
if (IsPotentialRetainableObjPtr(Op, *PA.getAA()) && PA.related(Ptr, Op))
if (IsPotentialRetainableObjPtr(Op, *PA.getAA()) &&
PA.related(Ptr, Op, DL))
return true;
}
return false;
} else if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
// Special-case stores, because we don't care about the stored value, just
// the store address.
const Value *Op = GetUnderlyingObjCPtr(SI->getPointerOperand());
const Value *Op = GetUnderlyingObjCPtr(SI->getPointerOperand(), DL);
// If we can't tell what the underlying object was, assume there is a
// dependence.
return IsPotentialRetainableObjPtr(Op, *PA.getAA()) && PA.related(Op, Ptr);
return IsPotentialRetainableObjPtr(Op, *PA.getAA()) &&
PA.related(Op, Ptr, DL);
}
// Check each operand for a match.
for (User::const_op_iterator OI = Inst->op_begin(), OE = Inst->op_end();
OI != OE; ++OI) {
const Value *Op = *OI;
if (IsPotentialRetainableObjPtr(Op, *PA.getAA()) && PA.related(Ptr, Op))
if (IsPotentialRetainableObjPtr(Op, *PA.getAA()) && PA.related(Ptr, Op, DL))
return true;
}
return false;

View File

@ -72,9 +72,10 @@ static inline bool ModuleHasARC(const Module &M) {
/// \brief This is a wrapper around getUnderlyingObject which also knows how to
/// look through objc_retain and objc_autorelease calls, which we know to return
/// their argument verbatim.
static inline const Value *GetUnderlyingObjCPtr(const Value *V) {
static inline const Value *GetUnderlyingObjCPtr(const Value *V,
const DataLayout &DL) {
for (;;) {
V = GetUnderlyingObject(V);
V = GetUnderlyingObject(V, DL);
if (!IsForwarding(GetBasicARCInstKind(V)))
break;
V = cast<CallInst>(V)->getArgOperand(0);

View File

@ -74,8 +74,8 @@ ObjCARCAliasAnalysis::alias(const Location &LocA, const Location &LocB) {
// If that failed, climb to the underlying object, including climbing through
// ObjC-specific no-ops, and try making an imprecise alias query.
const Value *UA = GetUnderlyingObjCPtr(SA);
const Value *UB = GetUnderlyingObjCPtr(SB);
const Value *UA = GetUnderlyingObjCPtr(SA, *DL);
const Value *UB = GetUnderlyingObjCPtr(SB, *DL);
if (UA != SA || UB != SB) {
Result = AliasAnalysis::alias(Location(UA), Location(UB));
// We can't use MustAlias or PartialAlias results here because
@ -104,7 +104,7 @@ ObjCARCAliasAnalysis::pointsToConstantMemory(const Location &Loc,
// If that failed, climb to the underlying object, including climbing through
// ObjC-specific no-ops, and try making an imprecise alias query.
const Value *U = GetUnderlyingObjCPtr(S);
const Value *U = GetUnderlyingObjCPtr(S, *DL);
if (U != S)
return AliasAnalysis::pointsToConstantMemory(Location(U), OrLocal);

View File

@ -83,13 +83,14 @@ static const Value *FindSingleUseIdentifiedObject(const Value *Arg) {
/// This is a wrapper around getUnderlyingObjCPtr along the lines of
/// GetUnderlyingObjects except that it returns early when it sees the first
/// alloca.
static inline bool AreAnyUnderlyingObjectsAnAlloca(const Value *V) {
static inline bool AreAnyUnderlyingObjectsAnAlloca(const Value *V,
const DataLayout &DL) {
SmallPtrSet<const Value *, 4> Visited;
SmallVector<const Value *, 4> Worklist;
Worklist.push_back(V);
do {
const Value *P = Worklist.pop_back_val();
P = GetUnderlyingObjCPtr(P);
P = GetUnderlyingObjCPtr(P, DL);
if (isa<AllocaInst>(P))
return true;
@ -1092,7 +1093,8 @@ bool ObjCARCOpt::VisitInstructionBottomUp(
// in the presence of allocas we only unconditionally remove pointers if
// both our retain and our release are KnownSafe.
if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
if (AreAnyUnderlyingObjectsAnAlloca(SI->getPointerOperand())) {
const DataLayout &DL = BB->getModule()->getDataLayout();
if (AreAnyUnderlyingObjectsAnAlloca(SI->getPointerOperand(), DL)) {
auto I = MyStates.findPtrBottomUpState(
GetRCIdentityRoot(SI->getValueOperand()));
if (I != MyStates.bottom_up_ptr_end())

View File

@ -32,20 +32,22 @@ using namespace llvm::objcarc;
bool ProvenanceAnalysis::relatedSelect(const SelectInst *A,
const Value *B) {
const DataLayout &DL = A->getModule()->getDataLayout();
// If the values are Selects with the same condition, we can do a more precise
// check: just check for relations between the values on corresponding arms.
if (const SelectInst *SB = dyn_cast<SelectInst>(B))
if (A->getCondition() == SB->getCondition())
return related(A->getTrueValue(), SB->getTrueValue()) ||
related(A->getFalseValue(), SB->getFalseValue());
return related(A->getTrueValue(), SB->getTrueValue(), DL) ||
related(A->getFalseValue(), SB->getFalseValue(), DL);
// Check both arms of the Select node individually.
return related(A->getTrueValue(), B) ||
related(A->getFalseValue(), B);
return related(A->getTrueValue(), B, DL) ||
related(A->getFalseValue(), B, DL);
}
bool ProvenanceAnalysis::relatedPHI(const PHINode *A,
const Value *B) {
const DataLayout &DL = A->getModule()->getDataLayout();
// If the values are PHIs in the same block, we can do a more precise as well
// as efficient check: just check for relations between the values on
// corresponding edges.
@ -53,7 +55,7 @@ bool ProvenanceAnalysis::relatedPHI(const PHINode *A,
if (PNB->getParent() == A->getParent()) {
for (unsigned i = 0, e = A->getNumIncomingValues(); i != e; ++i)
if (related(A->getIncomingValue(i),
PNB->getIncomingValueForBlock(A->getIncomingBlock(i))))
PNB->getIncomingValueForBlock(A->getIncomingBlock(i)), DL))
return true;
return false;
}
@ -62,7 +64,7 @@ bool ProvenanceAnalysis::relatedPHI(const PHINode *A,
SmallPtrSet<const Value *, 4> UniqueSrc;
for (unsigned i = 0, e = A->getNumIncomingValues(); i != e; ++i) {
const Value *PV1 = A->getIncomingValue(i);
if (UniqueSrc.insert(PV1).second && related(PV1, B))
if (UniqueSrc.insert(PV1).second && related(PV1, B, DL))
return true;
}
@ -103,11 +105,11 @@ static bool IsStoredObjCPointer(const Value *P) {
return false;
}
bool ProvenanceAnalysis::relatedCheck(const Value *A,
const Value *B) {
bool ProvenanceAnalysis::relatedCheck(const Value *A, const Value *B,
const DataLayout &DL) {
// Skip past provenance pass-throughs.
A = GetUnderlyingObjCPtr(A);
B = GetUnderlyingObjCPtr(B);
A = GetUnderlyingObjCPtr(A, DL);
B = GetUnderlyingObjCPtr(B, DL);
// Quick check.
if (A == B)
@ -159,8 +161,8 @@ bool ProvenanceAnalysis::relatedCheck(const Value *A,
return true;
}
bool ProvenanceAnalysis::related(const Value *A,
const Value *B) {
bool ProvenanceAnalysis::related(const Value *A, const Value *B,
const DataLayout &DL) {
// Begin by inserting a conservative value into the map. If the insertion
// fails, we have the answer already. If it succeeds, leave it there until we
// compute the real answer to guard against recursive queries.
@ -170,7 +172,7 @@ bool ProvenanceAnalysis::related(const Value *A,
if (!Pair.second)
return Pair.first->second;
bool Result = relatedCheck(A, B);
bool Result = relatedCheck(A, B, DL);
CachedResults[ValuePairTy(A, B)] = Result;
return Result;
}

View File

@ -30,6 +30,7 @@
namespace llvm {
class Value;
class AliasAnalysis;
class DataLayout;
class PHINode;
class SelectInst;
}
@ -53,7 +54,7 @@ class ProvenanceAnalysis {
typedef DenseMap<ValuePairTy, bool> CachedResultsTy;
CachedResultsTy CachedResults;
bool relatedCheck(const Value *A, const Value *B);
bool relatedCheck(const Value *A, const Value *B, const DataLayout &DL);
bool relatedSelect(const SelectInst *A, const Value *B);
bool relatedPHI(const PHINode *A, const Value *B);
@ -67,7 +68,7 @@ public:
AliasAnalysis *getAA() const { return AA; }
bool related(const Value *A, const Value *B);
bool related(const Value *A, const Value *B, const DataLayout &DL);
void clear() {
CachedResults.clear();

View File

@ -14,6 +14,7 @@
#include "llvm/Analysis/Passes.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@ -65,6 +66,7 @@ bool PAEval::runOnFunction(Function &F) {
ProvenanceAnalysis PA;
PA.setAA(&getAnalysis<AliasAnalysis>());
const DataLayout &DL = F.getParent()->getDataLayout();
for (Value *V1 : Values) {
StringRef NameV1 = getName(V1);
@ -73,7 +75,7 @@ bool PAEval::runOnFunction(Function &F) {
if (NameV1 >= NameV2)
continue;
errs() << NameV1 << " and " << NameV2;
if (PA.related(V1, V2))
if (PA.related(V1, V2, DL))
errs() << " are related.\n";
else
errs() << " are not related.\n";

View File

@ -31,7 +31,6 @@
#include "llvm/IR/Instruction.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@ -72,7 +71,6 @@ struct AlignmentFromAssumptions : public FunctionPass {
ScalarEvolution *SE;
DominatorTree *DT;
const DataLayout *DL;
bool extractAlignmentInfo(CallInst *I, Value *&AAPtr, const SCEV *&AlignSCEV,
const SCEV *&OffSCEV);
@ -317,7 +315,7 @@ bool AlignmentFromAssumptions::processAssumption(CallInst *ACall) {
continue;
if (Instruction *K = dyn_cast<Instruction>(J))
if (isValidAssumeForContext(ACall, K, DL, DT))
if (isValidAssumeForContext(ACall, K, DT))
WorkList.push_back(K);
}
@ -401,7 +399,7 @@ bool AlignmentFromAssumptions::processAssumption(CallInst *ACall) {
Visited.insert(J);
for (User *UJ : J->users()) {
Instruction *K = cast<Instruction>(UJ);
if (!Visited.count(K) && isValidAssumeForContext(ACall, K, DL, DT))
if (!Visited.count(K) && isValidAssumeForContext(ACall, K, DT))
WorkList.push_back(K);
}
}
@ -414,7 +412,6 @@ bool AlignmentFromAssumptions::runOnFunction(Function &F) {
auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
SE = &getAnalysis<ScalarEvolution>();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
DL = &F.getParent()->getDataLayout();
NewDestAlignments.clear();
NewSrcAlignments.clear();

View File

@ -64,7 +64,6 @@ struct BDCE : public FunctionPass {
APInt &KnownZero2, APInt &KnownOne2);
AssumptionCache *AC;
const DataLayout *DL;
DominatorTree *DT;
};
}
@ -95,20 +94,21 @@ void BDCE::determineLiveOperandBits(const Instruction *UserI,
// however, want to do this twice, so we cache the result in APInts that live
// in the caller. For the two-relevant-operands case, both operand values are
// provided here.
auto ComputeKnownBits = [&](unsigned BitWidth, const Value *V1,
const Value *V2) {
KnownZero = APInt(BitWidth, 0);
KnownOne = APInt(BitWidth, 0);
computeKnownBits(const_cast<Value*>(V1), KnownZero, KnownOne, DL, 0, AC,
UserI, DT);
auto ComputeKnownBits =
[&](unsigned BitWidth, const Value *V1, const Value *V2) {
const DataLayout &DL = I->getModule()->getDataLayout();
KnownZero = APInt(BitWidth, 0);
KnownOne = APInt(BitWidth, 0);
computeKnownBits(const_cast<Value *>(V1), KnownZero, KnownOne, DL, 0,
AC, UserI, DT);
if (V2) {
KnownZero2 = APInt(BitWidth, 0);
KnownOne2 = APInt(BitWidth, 0);
computeKnownBits(const_cast<Value*>(V2), KnownZero2, KnownOne2, DL, 0, AC,
UserI, DT);
}
};
if (V2) {
KnownZero2 = APInt(BitWidth, 0);
KnownOne2 = APInt(BitWidth, 0);
computeKnownBits(const_cast<Value *>(V2), KnownZero2, KnownOne2, DL,
0, AC, UserI, DT);
}
};
switch (UserI->getOpcode()) {
default: break;
@ -263,7 +263,6 @@ bool BDCE::runOnFunction(Function& F) {
return false;
AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
DL = &F.getParent()->getDataLayout();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
DenseMap<Instruction *, APInt> AliveBits;

View File

@ -22,7 +22,6 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Instruction.h"
#include "llvm/Pass.h"
@ -77,7 +76,7 @@ bool ConstantPropagation::runOnFunction(Function &F) {
WorkList.erase(WorkList.begin()); // Get an element from the worklist...
if (!I->use_empty()) // Don't muck with dead instructions...
if (Constant *C = ConstantFoldInstruction(I, &DL, TLI)) {
if (Constant *C = ConstantFoldInstruction(I, DL, TLI)) {
// Add all of the users of this instruction to the worklist, they might
// be constant propagatable now...
for (User *U : I->users())

View File

@ -19,6 +19,7 @@
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@ -126,8 +127,9 @@ bool CorrelatedValuePropagation::processPHI(PHINode *P) {
Changed = true;
}
// FIXME: Provide DL, TLI, DT, AT to SimplifyInstruction.
if (Value *V = SimplifyInstruction(P)) {
// FIXME: Provide TLI, DT, AT to SimplifyInstruction.
const DataLayout &DL = BB->getModule()->getDataLayout();
if (Value *V = SimplifyInstruction(P, DL)) {
P->replaceAllUsesWith(V);
P->eraseFromParent();
Changed = true;

View File

@ -78,7 +78,8 @@ namespace {
bool HandleFree(CallInst *F);
bool handleEndBlock(BasicBlock &BB);
void RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
SmallSetVector<Value*, 16> &DeadStackObjects);
SmallSetVector<Value *, 16> &DeadStackObjects,
const DataLayout &DL);
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
@ -194,18 +195,12 @@ static bool hasMemoryWrite(Instruction *I, const TargetLibraryInfo *TLI) {
/// describe the memory operations for this instruction.
static AliasAnalysis::Location
getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
const DataLayout *DL = AA.getDataLayout();
if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
return AA.getLocation(SI);
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Inst)) {
// memcpy/memmove/memset.
AliasAnalysis::Location Loc = AA.getLocationForDest(MI);
// If we don't have target data around, an unknown size in Location means
// that we should use the size of the pointee type. This isn't valid for
// memset/memcpy, which writes more than an i8.
if (Loc.Size == AliasAnalysis::UnknownSize && DL == nullptr)
return AliasAnalysis::Location();
return Loc;
}
@ -215,11 +210,6 @@ getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
switch (II->getIntrinsicID()) {
default: return AliasAnalysis::Location(); // Unhandled intrinsic.
case Intrinsic::init_trampoline:
// If we don't have target data around, an unknown size in Location means
// that we should use the size of the pointee type. This isn't valid for
// init.trampoline, which writes more than an i8.
if (!DL) return AliasAnalysis::Location();
// FIXME: We don't know the size of the trampoline, so we can't really
// handle it here.
return AliasAnalysis::Location(II->getArgOperand(0));
@ -321,9 +311,10 @@ static Value *getStoredPointerOperand(Instruction *I) {
return CS.getArgument(0);
}
static uint64_t getPointerSize(const Value *V, AliasAnalysis &AA) {
static uint64_t getPointerSize(const Value *V, const DataLayout &DL,
const TargetLibraryInfo *TLI) {
uint64_t Size;
if (getObjectSize(V, Size, AA.getDataLayout(), AA.getTargetLibraryInfo()))
if (getObjectSize(V, Size, DL, TLI))
return Size;
return AliasAnalysis::UnknownSize;
}
@ -343,10 +334,9 @@ namespace {
/// overwritten by 'Later', or 'OverwriteUnknown' if nothing can be determined
static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
const AliasAnalysis::Location &Earlier,
AliasAnalysis &AA,
int64_t &EarlierOff,
int64_t &LaterOff) {
const DataLayout *DL = AA.getDataLayout();
const DataLayout &DL,
const TargetLibraryInfo *TLI,
int64_t &EarlierOff, int64_t &LaterOff) {
const Value *P1 = Earlier.Ptr->stripPointerCasts();
const Value *P2 = Later.Ptr->stripPointerCasts();
@ -367,7 +357,7 @@ static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
// Otherwise, we have to have size information, and the later store has to be
// larger than the earlier one.
if (Later.Size == AliasAnalysis::UnknownSize ||
Earlier.Size == AliasAnalysis::UnknownSize || DL == nullptr)
Earlier.Size == AliasAnalysis::UnknownSize)
return OverwriteUnknown;
// Check to see if the later store is to the entire object (either a global,
@ -382,7 +372,7 @@ static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
return OverwriteUnknown;
// If the "Later" store is to a recognizable object, get its size.
uint64_t ObjectSize = getPointerSize(UO2, AA);
uint64_t ObjectSize = getPointerSize(UO2, DL, TLI);
if (ObjectSize != AliasAnalysis::UnknownSize)
if (ObjectSize == Later.Size && ObjectSize >= Earlier.Size)
return OverwriteComplete;
@ -560,8 +550,10 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
if (isRemovable(DepWrite) &&
!isPossibleSelfRead(Inst, Loc, DepWrite, *AA)) {
int64_t InstWriteOffset, DepWriteOffset;
OverwriteResult OR = isOverwrite(Loc, DepLoc, *AA,
DepWriteOffset, InstWriteOffset);
const DataLayout &DL = BB.getModule()->getDataLayout();
OverwriteResult OR =
isOverwrite(Loc, DepLoc, DL, AA->getTargetLibraryInfo(),
DepWriteOffset, InstWriteOffset);
if (OR == OverwriteComplete) {
DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: "
<< *DepWrite << "\n KILLER: " << *Inst << '\n');
@ -655,6 +647,7 @@ bool DSE::HandleFree(CallInst *F) {
AliasAnalysis::Location Loc = AliasAnalysis::Location(F->getOperand(0));
SmallVector<BasicBlock *, 16> Blocks;
Blocks.push_back(F->getParent());
const DataLayout &DL = F->getModule()->getDataLayout();
while (!Blocks.empty()) {
BasicBlock *BB = Blocks.pop_back_val();
@ -668,7 +661,7 @@ bool DSE::HandleFree(CallInst *F) {
break;
Value *DepPointer =
GetUnderlyingObject(getStoredPointerOperand(Dependency));
GetUnderlyingObject(getStoredPointerOperand(Dependency), DL);
// Check for aliasing.
if (!AA->isMustAlias(F->getArgOperand(0), DepPointer))
@ -728,6 +721,8 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
if (AI->hasByValOrInAllocaAttr())
DeadStackObjects.insert(AI);
const DataLayout &DL = BB.getModule()->getDataLayout();
// Scan the basic block backwards
for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){
--BBI;
@ -736,7 +731,7 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
if (hasMemoryWrite(BBI, TLI) && isRemovable(BBI)) {
// See through pointer-to-pointer bitcasts
SmallVector<Value *, 4> Pointers;
GetUnderlyingObjects(getStoredPointerOperand(BBI), Pointers);
GetUnderlyingObjects(getStoredPointerOperand(BBI), Pointers, DL);
// Stores to stack values are valid candidates for removal.
bool AllDead = true;
@ -799,8 +794,8 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
// the call is live.
DeadStackObjects.remove_if([&](Value *I) {
// See if the call site touches the value.
AliasAnalysis::ModRefResult A =
AA->getModRefInfo(CS, I, getPointerSize(I, *AA));
AliasAnalysis::ModRefResult A = AA->getModRefInfo(
CS, I, getPointerSize(I, DL, AA->getTargetLibraryInfo()));
return A == AliasAnalysis::ModRef || A == AliasAnalysis::Ref;
});
@ -835,7 +830,7 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
// Remove any allocas from the DeadPointer set that are loaded, as this
// makes any stores above the access live.
RemoveAccessedObjects(LoadedLoc, DeadStackObjects);
RemoveAccessedObjects(LoadedLoc, DeadStackObjects, DL);
// If all of the allocas were clobbered by the access then we're not going
// to find anything else to process.
@ -850,8 +845,9 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
/// of the stack objects in the DeadStackObjects set. If so, they become live
/// because the location is being loaded.
void DSE::RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
SmallSetVector<Value*, 16> &DeadStackObjects) {
const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr);
SmallSetVector<Value *, 16> &DeadStackObjects,
const DataLayout &DL) {
const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr, DL);
// A constant can't be in the dead pointer set.
if (isa<Constant>(UnderlyingPointer))
@ -867,7 +863,8 @@ void DSE::RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
// Remove objects that could alias LoadedLoc.
DeadStackObjects.remove_if([&](Value *I) {
// See if the loaded location could alias the stack location.
AliasAnalysis::Location StackLoc(I, getPointerSize(I, *AA));
AliasAnalysis::Location StackLoc(
I, getPointerSize(I, DL, AA->getTargetLibraryInfo()));
return !AA->isNoAlias(StackLoc, LoadedLoc);
});
}

View File

@ -263,7 +263,6 @@ namespace {
class EarlyCSE {
public:
Function &F;
const DataLayout *DL;
const TargetLibraryInfo &TLI;
const TargetTransformInfo &TTI;
DominatorTree &DT;
@ -308,11 +307,10 @@ public:
unsigned CurrentGeneration;
/// \brief Set up the EarlyCSE runner for a particular function.
EarlyCSE(Function &F, const DataLayout *DL, const TargetLibraryInfo &TLI,
EarlyCSE(Function &F, const TargetLibraryInfo &TLI,
const TargetTransformInfo &TTI, DominatorTree &DT,
AssumptionCache &AC)
: F(F), DL(DL), TLI(TLI), TTI(TTI), DT(DT), AC(AC), CurrentGeneration(0) {
}
: F(F), TLI(TLI), TTI(TTI), DT(DT), AC(AC), CurrentGeneration(0) {}
bool run();
@ -469,6 +467,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
Instruction *LastStore = nullptr;
bool Changed = false;
const DataLayout &DL = BB->getModule()->getDataLayout();
// See if any instructions in the block can be eliminated. If so, do it. If
// not, add them to AvailableValues.
@ -685,14 +684,12 @@ bool EarlyCSE::run() {
PreservedAnalyses EarlyCSEPass::run(Function &F,
AnalysisManager<Function> *AM) {
const DataLayout &DL = F.getParent()->getDataLayout();
auto &TLI = AM->getResult<TargetLibraryAnalysis>(F);
auto &TTI = AM->getResult<TargetIRAnalysis>(F);
auto &DT = AM->getResult<DominatorTreeAnalysis>(F);
auto &AC = AM->getResult<AssumptionAnalysis>(F);
EarlyCSE CSE(F, &DL, TLI, TTI, DT, AC);
EarlyCSE CSE(F, TLI, TTI, DT, AC);
if (!CSE.run())
return PreservedAnalyses::all();
@ -724,13 +721,12 @@ public:
if (skipOptnoneFunction(F))
return false;
auto &DL = F.getParent()->getDataLayout();
auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
EarlyCSE CSE(F, &DL, TLI, TTI, DT, AC);
EarlyCSE CSE(F, TLI, TTI, DT, AC);
return CSE.run();
}

View File

@ -584,14 +584,13 @@ namespace {
/// Emit code into this block to adjust the value defined here to the
/// specified type. This handles various coercion cases.
Value *MaterializeAdjustedValue(Type *LoadTy, GVN &gvn) const;
Value *MaterializeAdjustedValue(LoadInst *LI, GVN &gvn) const;
};
class GVN : public FunctionPass {
bool NoLoads;
MemoryDependenceAnalysis *MD;
DominatorTree *DT;
const DataLayout *DL;
const TargetLibraryInfo *TLI;
AssumptionCache *AC;
SetVector<BasicBlock *> DeadBlocks;
@ -630,7 +629,6 @@ namespace {
InstrsToErase.push_back(I);
}
const DataLayout *getDataLayout() const { return DL; }
DominatorTree &getDominatorTree() const { return *DT; }
AliasAnalysis *getAliasAnalysis() const { return VN.getAliasAnalysis(); }
MemoryDependenceAnalysis &getMemDep() const { return *MD; }
@ -956,8 +954,9 @@ static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr,
return -1;
int64_t StoreOffset = 0, LoadOffset = 0;
Value *StoreBase = GetPointerBaseWithConstantOffset(WritePtr,StoreOffset,&DL);
Value *LoadBase = GetPointerBaseWithConstantOffset(LoadPtr, LoadOffset, &DL);
Value *StoreBase =
GetPointerBaseWithConstantOffset(WritePtr, StoreOffset, DL);
Value *LoadBase = GetPointerBaseWithConstantOffset(LoadPtr, LoadOffset, DL);
if (StoreBase != LoadBase)
return -1;
@ -1021,13 +1020,13 @@ static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr,
/// This function is called when we have a
/// memdep query of a load that ends up being a clobbering store.
static int AnalyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr,
StoreInst *DepSI,
const DataLayout &DL) {
StoreInst *DepSI) {
// Cannot handle reading from store of first-class aggregate yet.
if (DepSI->getValueOperand()->getType()->isStructTy() ||
DepSI->getValueOperand()->getType()->isArrayTy())
return -1;
const DataLayout &DL = DepSI->getModule()->getDataLayout();
Value *StorePtr = DepSI->getPointerOperand();
uint64_t StoreSize =DL.getTypeSizeInBits(DepSI->getValueOperand()->getType());
return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,
@ -1052,11 +1051,11 @@ static int AnalyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr,
// then we should widen it!
int64_t LoadOffs = 0;
const Value *LoadBase =
GetPointerBaseWithConstantOffset(LoadPtr, LoadOffs, &DL);
GetPointerBaseWithConstantOffset(LoadPtr, LoadOffs, DL);
unsigned LoadSize = DL.getTypeStoreSize(LoadTy);
unsigned Size = MemoryDependenceAnalysis::
getLoadLoadClobberFullWidthSize(LoadBase, LoadOffs, LoadSize, DepLI, DL);
unsigned Size = MemoryDependenceAnalysis::getLoadLoadClobberFullWidthSize(
LoadBase, LoadOffs, LoadSize, DepLI);
if (Size == 0) return -1;
return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, Size*8, DL);
@ -1086,7 +1085,7 @@ static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
Constant *Src = dyn_cast<Constant>(MTI->getSource());
if (!Src) return -1;
GlobalVariable *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(Src, &DL));
GlobalVariable *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(Src, DL));
if (!GV || !GV->isConstant()) return -1;
// See if the access is within the bounds of the transfer.
@ -1104,7 +1103,7 @@ static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
Src = ConstantExpr::getGetElementPtr(Src, OffsetCst);
Src = ConstantExpr::getBitCast(Src, PointerType::get(LoadTy, AS));
if (ConstantFoldLoadFromConstPtr(Src, &DL))
if (ConstantFoldLoadFromConstPtr(Src, DL))
return Offset;
return -1;
}
@ -1157,7 +1156,7 @@ static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset,
Type *LoadTy, Instruction *InsertPt,
GVN &gvn) {
const DataLayout &DL = *gvn.getDataLayout();
const DataLayout &DL = SrcVal->getModule()->getDataLayout();
// If Offset+LoadTy exceeds the size of SrcVal, then we must be wanting to
// widen SrcVal out to a larger load.
unsigned SrcValSize = DL.getTypeStoreSize(SrcVal->getType());
@ -1265,7 +1264,7 @@ static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
Src = ConstantExpr::getGetElementPtr(Src, OffsetCst);
Src = ConstantExpr::getBitCast(Src, PointerType::get(LoadTy, AS));
return ConstantFoldLoadFromConstPtr(Src, &DL);
return ConstantFoldLoadFromConstPtr(Src, DL);
}
@ -1281,7 +1280,7 @@ static Value *ConstructSSAForLoadSet(LoadInst *LI,
gvn.getDominatorTree().properlyDominates(ValuesPerBlock[0].BB,
LI->getParent())) {
assert(!ValuesPerBlock[0].isUndefValue() && "Dead BB dominate this block");
return ValuesPerBlock[0].MaterializeAdjustedValue(LI->getType(), gvn);
return ValuesPerBlock[0].MaterializeAdjustedValue(LI, gvn);
}
// Otherwise, we have to construct SSA form.
@ -1289,8 +1288,6 @@ static Value *ConstructSSAForLoadSet(LoadInst *LI,
SSAUpdater SSAUpdate(&NewPHIs);
SSAUpdate.Initialize(LI->getType(), LI->getName());
Type *LoadTy = LI->getType();
for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) {
const AvailableValueInBlock &AV = ValuesPerBlock[i];
BasicBlock *BB = AV.BB;
@ -1298,7 +1295,7 @@ static Value *ConstructSSAForLoadSet(LoadInst *LI,
if (SSAUpdate.HasValueForBlock(BB))
continue;
SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LoadTy, gvn));
SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LI, gvn));
}
// Perform PHI construction.
@ -1326,16 +1323,16 @@ static Value *ConstructSSAForLoadSet(LoadInst *LI,
return V;
}
Value *AvailableValueInBlock::MaterializeAdjustedValue(Type *LoadTy, GVN &gvn) const {
Value *AvailableValueInBlock::MaterializeAdjustedValue(LoadInst *LI,
GVN &gvn) const {
Value *Res;
Type *LoadTy = LI->getType();
const DataLayout &DL = LI->getModule()->getDataLayout();
if (isSimpleValue()) {
Res = getSimpleValue();
if (Res->getType() != LoadTy) {
const DataLayout *DL = gvn.getDataLayout();
assert(DL && "Need target data to handle type mismatch case");
Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(),
*DL);
Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(), DL);
DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " "
<< *getSimpleValue() << '\n'
<< *Res << '\n' << "\n\n\n");
@ -1353,10 +1350,8 @@ Value *AvailableValueInBlock::MaterializeAdjustedValue(Type *LoadTy, GVN &gvn) c
<< *Res << '\n' << "\n\n\n");
}
} else if (isMemIntrinValue()) {
const DataLayout *DL = gvn.getDataLayout();
assert(DL && "Need target data to handle type mismatch case");
Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset,
LoadTy, BB->getTerminator(), *DL);
Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset, LoadTy,
BB->getTerminator(), DL);
DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
<< " " << *getMemIntrinValue() << '\n'
<< *Res << '\n' << "\n\n\n");
@ -1383,6 +1378,7 @@ void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
// dependencies that produce an unknown value for the load (such as a call
// that could potentially clobber the load).
unsigned NumDeps = Deps.size();
const DataLayout &DL = LI->getModule()->getDataLayout();
for (unsigned i = 0, e = NumDeps; i != e; ++i) {
BasicBlock *DepBB = Deps[i].getBB();
MemDepResult DepInfo = Deps[i].getResult();
@ -1409,9 +1405,9 @@ void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
// read by the load, we can extract the bits we need for the load from the
// stored value.
if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) {
if (DL && Address) {
int Offset = AnalyzeLoadFromClobberingStore(LI->getType(), Address,
DepSI, *DL);
if (Address) {
int Offset =
AnalyzeLoadFromClobberingStore(LI->getType(), Address, DepSI);
if (Offset != -1) {
ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
DepSI->getValueOperand(),
@ -1428,9 +1424,9 @@ void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInfo.getInst())) {
// If this is a clobber and L is the first instruction in its block, then
// we have the first instruction in the entry block.
if (DepLI != LI && Address && DL) {
int Offset = AnalyzeLoadFromClobberingLoad(LI->getType(), Address,
DepLI, *DL);
if (DepLI != LI && Address) {
int Offset =
AnalyzeLoadFromClobberingLoad(LI->getType(), Address, DepLI, DL);
if (Offset != -1) {
ValuesPerBlock.push_back(AvailableValueInBlock::getLoad(DepBB,DepLI,
@ -1443,9 +1439,9 @@ void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
// If the clobbering value is a memset/memcpy/memmove, see if we can
// forward a value on from it.
if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) {
if (DL && Address) {
if (Address) {
int Offset = AnalyzeLoadFromClobberingMemInst(LI->getType(), Address,
DepMI, *DL);
DepMI, DL);
if (Offset != -1) {
ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI,
Offset));
@ -1484,8 +1480,8 @@ void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
if (S->getValueOperand()->getType() != LI->getType()) {
// If the stored value is larger or equal to the loaded value, we can
// reuse it.
if (!DL || !CanCoerceMustAliasedValueToLoad(S->getValueOperand(),
LI->getType(), *DL)) {
if (!CanCoerceMustAliasedValueToLoad(S->getValueOperand(),
LI->getType(), DL)) {
UnavailableBlocks.push_back(DepBB);
continue;
}
@ -1501,7 +1497,7 @@ void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
if (LD->getType() != LI->getType()) {
// If the stored value is larger or equal to the loaded value, we can
// reuse it.
if (!DL || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*DL)) {
if (!CanCoerceMustAliasedValueToLoad(LD, LI->getType(), DL)) {
UnavailableBlocks.push_back(DepBB);
continue;
}
@ -1613,6 +1609,7 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
// Check if the load can safely be moved to all the unavailable predecessors.
bool CanDoPRE = true;
const DataLayout &DL = LI->getModule()->getDataLayout();
SmallVector<Instruction*, 8> NewInsts;
for (auto &PredLoad : PredLoads) {
BasicBlock *UnavailablePred = PredLoad.first;
@ -1833,10 +1830,11 @@ bool GVN::processLoad(LoadInst *L) {
// ... to a pointer that has been loaded from before...
MemDepResult Dep = MD->getDependency(L);
const DataLayout &DL = L->getModule()->getDataLayout();
// If we have a clobber and target data is around, see if this is a clobber
// that we can fix up through code synthesis.
if (Dep.isClobber() && DL) {
if (Dep.isClobber()) {
// Check to see if we have something like this:
// store i32 123, i32* %P
// %A = bitcast i32* %P to i8*
@ -1849,12 +1847,11 @@ bool GVN::processLoad(LoadInst *L) {
// access code.
Value *AvailVal = nullptr;
if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst())) {
int Offset = AnalyzeLoadFromClobberingStore(L->getType(),
L->getPointerOperand(),
DepSI, *DL);
int Offset = AnalyzeLoadFromClobberingStore(
L->getType(), L->getPointerOperand(), DepSI);
if (Offset != -1)
AvailVal = GetStoreValueForLoad(DepSI->getValueOperand(), Offset,
L->getType(), L, *DL);
L->getType(), L, DL);
}
// Check to see if we have something like this:
@ -1867,9 +1864,8 @@ bool GVN::processLoad(LoadInst *L) {
if (DepLI == L)
return false;
int Offset = AnalyzeLoadFromClobberingLoad(L->getType(),
L->getPointerOperand(),
DepLI, *DL);
int Offset = AnalyzeLoadFromClobberingLoad(
L->getType(), L->getPointerOperand(), DepLI, DL);
if (Offset != -1)
AvailVal = GetLoadValueForLoad(DepLI, Offset, L->getType(), L, *this);
}
@ -1877,11 +1873,10 @@ bool GVN::processLoad(LoadInst *L) {
// If the clobbering value is a memset/memcpy/memmove, see if we can forward
// a value on from it.
if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) {
int Offset = AnalyzeLoadFromClobberingMemInst(L->getType(),
L->getPointerOperand(),
DepMI, *DL);
int Offset = AnalyzeLoadFromClobberingMemInst(
L->getType(), L->getPointerOperand(), DepMI, DL);
if (Offset != -1)
AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L, *DL);
AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L, DL);
}
if (AvailVal) {
@ -1932,17 +1927,13 @@ bool GVN::processLoad(LoadInst *L) {
// actually have the same type. See if we know how to reuse the stored
// value (depending on its type).
if (StoredVal->getType() != L->getType()) {
if (DL) {
StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(),
L, *DL);
if (!StoredVal)
return false;
DEBUG(dbgs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal
<< '\n' << *L << "\n\n\n");
}
else
StoredVal =
CoerceAvailableValueToLoadType(StoredVal, L->getType(), L, DL);
if (!StoredVal)
return false;
DEBUG(dbgs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal
<< '\n' << *L << "\n\n\n");
}
// Remove it!
@ -1961,17 +1952,12 @@ bool GVN::processLoad(LoadInst *L) {
// the same type. See if we know how to reuse the previously loaded value
// (depending on its type).
if (DepLI->getType() != L->getType()) {
if (DL) {
AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(),
L, *DL);
if (!AvailableVal)
return false;
DEBUG(dbgs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal
<< "\n" << *L << "\n\n\n");
}
else
AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(), L, DL);
if (!AvailableVal)
return false;
DEBUG(dbgs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal
<< "\n" << *L << "\n\n\n");
}
// Remove it!
@ -2239,6 +2225,7 @@ bool GVN::processInstruction(Instruction *I) {
// to value numbering it. Value numbering often exposes redundancies, for
// example if it determines that %y is equal to %x then the instruction
// "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify.
const DataLayout &DL = I->getModule()->getDataLayout();
if (Value *V = SimplifyInstruction(I, DL, TLI, DT, AC)) {
I->replaceAllUsesWith(V);
if (MD && V->getType()->getScalarType()->isPointerTy())
@ -2357,7 +2344,6 @@ bool GVN::runOnFunction(Function& F) {
if (!NoLoads)
MD = &getAnalysis<MemoryDependenceAnalysis>();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
DL = &F.getParent()->getDataLayout();
AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());

Some files were not shown because too many files have changed in this diff Show More