mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-02-01 05:01:59 +01:00
[CallSite removal] Migrate all Alias Analysis APIs to use the newly
minted `CallBase` class instead of the `CallSite` wrapper. This moves the largest interwoven collection of APIs that traffic in `CallSite`s. While a handful of these could have been migrated with a minorly more shallow migration by converting from a `CallSite` to a `CallBase`, it hardly seemed worth it. Most of the APIs needed to migrate together because of the complex interplay of AA APIs and the fact that converting from a `CallBase` to a `CallSite` isn't free in its current implementation. Out of tree users of these APIs can fairly reliably migrate with some combination of `.getInstruction()` on the `CallSite` instance and casting the resulting pointer. The most generic form will look like `CS` -> `cast_or_null<CallBase>(CS.getInstruction())` but in most cases there is a more elegant migration. Hopefully, this migrates enough APIs for users to fully move from `CallSite` to the base class. All of the in-tree users were easily migrated in that fashion. Thanks for the review from Saleem! Differential Revision: https://reviews.llvm.org/D55641 llvm-svn: 350503
This commit is contained in:
parent
b638387207
commit
cb1f5addb7
@ -43,7 +43,6 @@
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/Analysis/MemoryLocation.h"
|
||||
#include "llvm/Analysis/TargetLibraryInfo.h"
|
||||
#include "llvm/IR/CallSite.h"
|
||||
#include "llvm/IR/Function.h"
|
||||
#include "llvm/IR/Instruction.h"
|
||||
#include "llvm/IR/Instructions.h"
|
||||
@ -382,15 +381,15 @@ public:
|
||||
/// \name Simple mod/ref information
|
||||
/// @{
|
||||
|
||||
/// Get the ModRef info associated with a pointer argument of a callsite. The
|
||||
/// Get the ModRef info associated with a pointer argument of a call. The
|
||||
/// result's bits are set to indicate the allowed aliasing ModRef kinds. Note
|
||||
/// that these bits do not necessarily account for the overall behavior of
|
||||
/// the function, but rather only provide additional per-argument
|
||||
/// information. This never sets ModRefInfo::Must.
|
||||
ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx);
|
||||
ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx);
|
||||
|
||||
/// Return the behavior of the given call site.
|
||||
FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS);
|
||||
FunctionModRefBehavior getModRefBehavior(const CallBase *Call);
|
||||
|
||||
/// Return the behavior when calling the given function.
|
||||
FunctionModRefBehavior getModRefBehavior(const Function *F);
|
||||
@ -406,8 +405,8 @@ public:
|
||||
/// property (e.g. calls to 'sin' and 'cos').
|
||||
///
|
||||
/// This property corresponds to the GCC 'const' attribute.
|
||||
bool doesNotAccessMemory(ImmutableCallSite CS) {
|
||||
return getModRefBehavior(CS) == FMRB_DoesNotAccessMemory;
|
||||
bool doesNotAccessMemory(const CallBase *Call) {
|
||||
return getModRefBehavior(Call) == FMRB_DoesNotAccessMemory;
|
||||
}
|
||||
|
||||
/// Checks if the specified function is known to never read or write memory.
|
||||
@ -434,8 +433,8 @@ public:
|
||||
/// absence of interfering store instructions, such as CSE of strlen calls.
|
||||
///
|
||||
/// This property corresponds to the GCC 'pure' attribute.
|
||||
bool onlyReadsMemory(ImmutableCallSite CS) {
|
||||
return onlyReadsMemory(getModRefBehavior(CS));
|
||||
bool onlyReadsMemory(const CallBase *Call) {
|
||||
return onlyReadsMemory(getModRefBehavior(Call));
|
||||
}
|
||||
|
||||
/// Checks if the specified function is known to only read from non-volatile
|
||||
@ -500,36 +499,12 @@ public:
|
||||
|
||||
/// getModRefInfo (for call sites) - Return information about whether
|
||||
/// a particular call site modifies or reads the specified memory location.
|
||||
ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc);
|
||||
ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc);
|
||||
|
||||
/// getModRefInfo (for call sites) - A convenience wrapper.
|
||||
ModRefInfo getModRefInfo(ImmutableCallSite CS, const Value *P,
|
||||
ModRefInfo getModRefInfo(const CallBase *Call, const Value *P,
|
||||
LocationSize Size) {
|
||||
return getModRefInfo(CS, MemoryLocation(P, Size));
|
||||
}
|
||||
|
||||
/// getModRefInfo (for calls) - Return information about whether
|
||||
/// a particular call modifies or reads the specified memory location.
|
||||
ModRefInfo getModRefInfo(const CallInst *C, const MemoryLocation &Loc) {
|
||||
return getModRefInfo(ImmutableCallSite(C), Loc);
|
||||
}
|
||||
|
||||
/// getModRefInfo (for calls) - A convenience wrapper.
|
||||
ModRefInfo getModRefInfo(const CallInst *C, const Value *P,
|
||||
LocationSize Size) {
|
||||
return getModRefInfo(C, MemoryLocation(P, Size));
|
||||
}
|
||||
|
||||
/// getModRefInfo (for invokes) - Return information about whether
|
||||
/// a particular invoke modifies or reads the specified memory location.
|
||||
ModRefInfo getModRefInfo(const InvokeInst *I, const MemoryLocation &Loc) {
|
||||
return getModRefInfo(ImmutableCallSite(I), Loc);
|
||||
}
|
||||
|
||||
/// getModRefInfo (for invokes) - A convenience wrapper.
|
||||
ModRefInfo getModRefInfo(const InvokeInst *I, const Value *P,
|
||||
LocationSize Size) {
|
||||
return getModRefInfo(I, MemoryLocation(P, Size));
|
||||
return getModRefInfo(Call, MemoryLocation(P, Size));
|
||||
}
|
||||
|
||||
/// getModRefInfo (for loads) - Return information about whether
|
||||
@ -626,8 +601,8 @@ public:
|
||||
ModRefInfo getModRefInfo(const Instruction *I,
|
||||
const Optional<MemoryLocation> &OptLoc) {
|
||||
if (OptLoc == None) {
|
||||
if (auto CS = ImmutableCallSite(I)) {
|
||||
return createModRefInfo(getModRefBehavior(CS));
|
||||
if (const auto *Call = dyn_cast<CallBase>(I)) {
|
||||
return createModRefInfo(getModRefBehavior(Call));
|
||||
}
|
||||
}
|
||||
|
||||
@ -661,12 +636,12 @@ public:
|
||||
|
||||
/// Return information about whether a call and an instruction may refer to
|
||||
/// the same memory locations.
|
||||
ModRefInfo getModRefInfo(Instruction *I, ImmutableCallSite Call);
|
||||
ModRefInfo getModRefInfo(Instruction *I, const CallBase *Call);
|
||||
|
||||
/// Return information about whether two call sites may refer to the same set
|
||||
/// of memory locations. See the AA documentation for details:
|
||||
/// http://llvm.org/docs/AliasAnalysis.html#ModRefInfo
|
||||
ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2);
|
||||
ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2);
|
||||
|
||||
/// Return information about whether a particular call site modifies
|
||||
/// or reads the specified memory location \p MemLoc before instruction \p I
|
||||
@ -777,25 +752,25 @@ public:
|
||||
/// that these bits do not necessarily account for the overall behavior of
|
||||
/// the function, but rather only provide additional per-argument
|
||||
/// information.
|
||||
virtual ModRefInfo getArgModRefInfo(ImmutableCallSite CS,
|
||||
virtual ModRefInfo getArgModRefInfo(const CallBase *Call,
|
||||
unsigned ArgIdx) = 0;
|
||||
|
||||
/// Return the behavior of the given call site.
|
||||
virtual FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS) = 0;
|
||||
virtual FunctionModRefBehavior getModRefBehavior(const CallBase *Call) = 0;
|
||||
|
||||
/// Return the behavior when calling the given function.
|
||||
virtual FunctionModRefBehavior getModRefBehavior(const Function *F) = 0;
|
||||
|
||||
/// getModRefInfo (for call sites) - Return information about whether
|
||||
/// a particular call site modifies or reads the specified memory location.
|
||||
virtual ModRefInfo getModRefInfo(ImmutableCallSite CS,
|
||||
virtual ModRefInfo getModRefInfo(const CallBase *Call,
|
||||
const MemoryLocation &Loc) = 0;
|
||||
|
||||
/// Return information about whether two call sites may refer to the same set
|
||||
/// of memory locations. See the AA documentation for details:
|
||||
/// http://llvm.org/docs/AliasAnalysis.html#ModRefInfo
|
||||
virtual ModRefInfo getModRefInfo(ImmutableCallSite CS1,
|
||||
ImmutableCallSite CS2) = 0;
|
||||
virtual ModRefInfo getModRefInfo(const CallBase *Call1,
|
||||
const CallBase *Call2) = 0;
|
||||
|
||||
/// @}
|
||||
};
|
||||
@ -827,26 +802,26 @@ public:
|
||||
return Result.pointsToConstantMemory(Loc, OrLocal);
|
||||
}
|
||||
|
||||
ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) override {
|
||||
return Result.getArgModRefInfo(CS, ArgIdx);
|
||||
ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) override {
|
||||
return Result.getArgModRefInfo(Call, ArgIdx);
|
||||
}
|
||||
|
||||
FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS) override {
|
||||
return Result.getModRefBehavior(CS);
|
||||
FunctionModRefBehavior getModRefBehavior(const CallBase *Call) override {
|
||||
return Result.getModRefBehavior(Call);
|
||||
}
|
||||
|
||||
FunctionModRefBehavior getModRefBehavior(const Function *F) override {
|
||||
return Result.getModRefBehavior(F);
|
||||
}
|
||||
|
||||
ModRefInfo getModRefInfo(ImmutableCallSite CS,
|
||||
ModRefInfo getModRefInfo(const CallBase *Call,
|
||||
const MemoryLocation &Loc) override {
|
||||
return Result.getModRefInfo(CS, Loc);
|
||||
return Result.getModRefInfo(Call, Loc);
|
||||
}
|
||||
|
||||
ModRefInfo getModRefInfo(ImmutableCallSite CS1,
|
||||
ImmutableCallSite CS2) override {
|
||||
return Result.getModRefInfo(CS1, CS2);
|
||||
ModRefInfo getModRefInfo(const CallBase *Call1,
|
||||
const CallBase *Call2) override {
|
||||
return Result.getModRefInfo(Call1, Call2);
|
||||
}
|
||||
};
|
||||
|
||||
@ -901,25 +876,28 @@ protected:
|
||||
: CurrentResult.pointsToConstantMemory(Loc, OrLocal);
|
||||
}
|
||||
|
||||
ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) {
|
||||
return AAR ? AAR->getArgModRefInfo(CS, ArgIdx) : CurrentResult.getArgModRefInfo(CS, ArgIdx);
|
||||
ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) {
|
||||
return AAR ? AAR->getArgModRefInfo(Call, ArgIdx)
|
||||
: CurrentResult.getArgModRefInfo(Call, ArgIdx);
|
||||
}
|
||||
|
||||
FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS) {
|
||||
return AAR ? AAR->getModRefBehavior(CS) : CurrentResult.getModRefBehavior(CS);
|
||||
FunctionModRefBehavior getModRefBehavior(const CallBase *Call) {
|
||||
return AAR ? AAR->getModRefBehavior(Call)
|
||||
: CurrentResult.getModRefBehavior(Call);
|
||||
}
|
||||
|
||||
FunctionModRefBehavior getModRefBehavior(const Function *F) {
|
||||
return AAR ? AAR->getModRefBehavior(F) : CurrentResult.getModRefBehavior(F);
|
||||
}
|
||||
|
||||
ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc) {
|
||||
return AAR ? AAR->getModRefInfo(CS, Loc)
|
||||
: CurrentResult.getModRefInfo(CS, Loc);
|
||||
ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc) {
|
||||
return AAR ? AAR->getModRefInfo(Call, Loc)
|
||||
: CurrentResult.getModRefInfo(Call, Loc);
|
||||
}
|
||||
|
||||
ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) {
|
||||
return AAR ? AAR->getModRefInfo(CS1, CS2) : CurrentResult.getModRefInfo(CS1, CS2);
|
||||
ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2) {
|
||||
return AAR ? AAR->getModRefInfo(Call1, Call2)
|
||||
: CurrentResult.getModRefInfo(Call1, Call2);
|
||||
}
|
||||
};
|
||||
|
||||
@ -951,11 +929,11 @@ public:
|
||||
return false;
|
||||
}
|
||||
|
||||
ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) {
|
||||
ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) {
|
||||
return ModRefInfo::ModRef;
|
||||
}
|
||||
|
||||
FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS) {
|
||||
FunctionModRefBehavior getModRefBehavior(const CallBase *Call) {
|
||||
return FMRB_UnknownModRefBehavior;
|
||||
}
|
||||
|
||||
@ -963,11 +941,11 @@ public:
|
||||
return FMRB_UnknownModRefBehavior;
|
||||
}
|
||||
|
||||
ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc) {
|
||||
ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc) {
|
||||
return ModRefInfo::ModRef;
|
||||
}
|
||||
|
||||
ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) {
|
||||
ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2) {
|
||||
return ModRefInfo::ModRef;
|
||||
}
|
||||
};
|
||||
|
@ -21,7 +21,7 @@
|
||||
#include "llvm/Analysis/AliasAnalysis.h"
|
||||
#include "llvm/Analysis/AssumptionCache.h"
|
||||
#include "llvm/Analysis/MemoryLocation.h"
|
||||
#include "llvm/IR/CallSite.h"
|
||||
#include "llvm/IR/InstrTypes.h"
|
||||
#include "llvm/IR/PassManager.h"
|
||||
#include "llvm/Pass.h"
|
||||
#include <algorithm>
|
||||
@ -84,18 +84,18 @@ public:
|
||||
|
||||
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
|
||||
|
||||
ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc);
|
||||
ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc);
|
||||
|
||||
ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2);
|
||||
ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2);
|
||||
|
||||
/// Chases pointers until we find a (constant global) or not.
|
||||
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal);
|
||||
|
||||
/// Get the location associated with a pointer argument of a callsite.
|
||||
ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx);
|
||||
ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx);
|
||||
|
||||
/// Returns the behavior when calling the given call site.
|
||||
FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS);
|
||||
FunctionModRefBehavior getModRefBehavior(const CallBase *Call);
|
||||
|
||||
/// Returns the behavior when calling the given function. For use when the
|
||||
/// call site is not known.
|
||||
|
@ -88,7 +88,7 @@ public:
|
||||
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
|
||||
|
||||
using AAResultBase::getModRefInfo;
|
||||
ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc);
|
||||
ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc);
|
||||
|
||||
/// getModRefBehavior - Return the behavior of the specified function if
|
||||
/// called from the specified call site. The call site may be null in which
|
||||
@ -98,7 +98,7 @@ public:
|
||||
/// getModRefBehavior - Return the behavior of the specified function if
|
||||
/// called from the specified call site. The call site may be null in which
|
||||
/// case the most generic behavior of this function should be returned.
|
||||
FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS);
|
||||
FunctionModRefBehavior getModRefBehavior(const CallBase *Call);
|
||||
|
||||
private:
|
||||
FunctionInfo *getFunctionInfo(const Function *F);
|
||||
@ -113,7 +113,7 @@ private:
|
||||
void CollectSCCMembership(CallGraph &CG);
|
||||
|
||||
bool isNonEscapingGlobalNoAlias(const GlobalValue *GV, const Value *V);
|
||||
ModRefInfo getModRefInfoForArgument(ImmutableCallSite CS,
|
||||
ModRefInfo getModRefInfoForArgument(const CallBase *Call,
|
||||
const GlobalValue *GV);
|
||||
};
|
||||
|
||||
|
@ -37,7 +37,6 @@
|
||||
namespace llvm {
|
||||
|
||||
class AssumptionCache;
|
||||
class CallSite;
|
||||
class DominatorTree;
|
||||
class Function;
|
||||
class Instruction;
|
||||
@ -398,7 +397,7 @@ public:
|
||||
/// invalidated on the next non-local query or when an instruction is
|
||||
/// removed. Clients must copy this data if they want it around longer than
|
||||
/// that.
|
||||
const NonLocalDepInfo &getNonLocalCallDependency(CallSite QueryCS);
|
||||
const NonLocalDepInfo &getNonLocalCallDependency(CallBase *QueryCall);
|
||||
|
||||
/// Perform a full dependency query for an access to the QueryInst's
|
||||
/// specified memory location, returning the set of instructions that either
|
||||
@ -482,9 +481,9 @@ public:
|
||||
void releaseMemory();
|
||||
|
||||
private:
|
||||
MemDepResult getCallSiteDependencyFrom(CallSite C, bool isReadOnlyCall,
|
||||
BasicBlock::iterator ScanIt,
|
||||
BasicBlock *BB);
|
||||
MemDepResult getCallDependencyFrom(CallBase *Call, bool isReadOnlyCall,
|
||||
BasicBlock::iterator ScanIt,
|
||||
BasicBlock *BB);
|
||||
bool getNonLocalPointerDepFromBB(Instruction *QueryInst,
|
||||
const PHITransAddr &Pointer,
|
||||
const MemoryLocation &Loc, bool isLoad,
|
||||
|
@ -16,9 +16,9 @@
|
||||
#ifndef LLVM_ANALYSIS_MEMORYLOCATION_H
|
||||
#define LLVM_ANALYSIS_MEMORYLOCATION_H
|
||||
|
||||
#include "llvm/ADT/Optional.h"
|
||||
#include "llvm/ADT/DenseMapInfo.h"
|
||||
#include "llvm/IR/CallSite.h"
|
||||
#include "llvm/ADT/Optional.h"
|
||||
#include "llvm/IR/Instructions.h"
|
||||
#include "llvm/IR/Metadata.h"
|
||||
|
||||
namespace llvm {
|
||||
@ -234,11 +234,11 @@ public:
|
||||
static MemoryLocation getForDest(const AnyMemIntrinsic *MI);
|
||||
|
||||
/// Return a location representing a particular argument of a call.
|
||||
static MemoryLocation getForArgument(ImmutableCallSite CS, unsigned ArgIdx,
|
||||
static MemoryLocation getForArgument(const CallBase *Call, unsigned ArgIdx,
|
||||
const TargetLibraryInfo *TLI);
|
||||
static MemoryLocation getForArgument(ImmutableCallSite CS, unsigned ArgIdx,
|
||||
static MemoryLocation getForArgument(const CallBase *Call, unsigned ArgIdx,
|
||||
const TargetLibraryInfo &TLI) {
|
||||
return getForArgument(CS, ArgIdx, &TLI);
|
||||
return getForArgument(Call, ArgIdx, &TLI);
|
||||
}
|
||||
|
||||
explicit MemoryLocation(const Value *Ptr = nullptr,
|
||||
|
@ -60,7 +60,7 @@ public:
|
||||
FunctionModRefBehavior getModRefBehavior(const Function *F);
|
||||
|
||||
using AAResultBase::getModRefInfo;
|
||||
ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc);
|
||||
ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc);
|
||||
};
|
||||
|
||||
/// Analysis pass providing a never-invalidated alias analysis result.
|
||||
|
@ -16,7 +16,7 @@
|
||||
#define LLVM_ANALYSIS_SCOPEDNOALIASAA_H
|
||||
|
||||
#include "llvm/Analysis/AliasAnalysis.h"
|
||||
#include "llvm/IR/CallSite.h"
|
||||
#include "llvm/IR/InstrTypes.h"
|
||||
#include "llvm/IR/PassManager.h"
|
||||
#include "llvm/Pass.h"
|
||||
#include <memory>
|
||||
@ -41,8 +41,8 @@ public:
|
||||
}
|
||||
|
||||
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
|
||||
ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc);
|
||||
ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2);
|
||||
ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc);
|
||||
ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2);
|
||||
|
||||
private:
|
||||
bool mayAliasInScopes(const MDNode *Scopes, const MDNode *NoAlias) const;
|
||||
|
@ -17,7 +17,7 @@
|
||||
#define LLVM_ANALYSIS_TYPEBASEDALIASANALYSIS_H
|
||||
|
||||
#include "llvm/Analysis/AliasAnalysis.h"
|
||||
#include "llvm/IR/CallSite.h"
|
||||
#include "llvm/IR/InstrTypes.h"
|
||||
#include "llvm/IR/PassManager.h"
|
||||
#include "llvm/Pass.h"
|
||||
#include <memory>
|
||||
@ -43,10 +43,10 @@ public:
|
||||
|
||||
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
|
||||
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal);
|
||||
FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS);
|
||||
FunctionModRefBehavior getModRefBehavior(const CallBase *Call);
|
||||
FunctionModRefBehavior getModRefBehavior(const Function *F);
|
||||
ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc);
|
||||
ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2);
|
||||
ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc);
|
||||
ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2);
|
||||
|
||||
private:
|
||||
bool Aliases(const MDNode *A, const MDNode *B) const;
|
||||
|
@ -297,10 +297,10 @@ class Value;
|
||||
|
||||
/// This function returns call pointer argument that is considered the same by
|
||||
/// aliasing rules. You CAN'T use it to replace one value with another.
|
||||
const Value *getArgumentAliasingToReturnedPointer(ImmutableCallSite CS);
|
||||
inline Value *getArgumentAliasingToReturnedPointer(CallSite CS) {
|
||||
return const_cast<Value *>(
|
||||
getArgumentAliasingToReturnedPointer(ImmutableCallSite(CS)));
|
||||
const Value *getArgumentAliasingToReturnedPointer(const CallBase *Call);
|
||||
inline Value *getArgumentAliasingToReturnedPointer(CallBase *Call) {
|
||||
return const_cast<Value *>(getArgumentAliasingToReturnedPointer(
|
||||
const_cast<const CallBase *>(Call)));
|
||||
}
|
||||
|
||||
// {launder,strip}.invariant.group returns pointer that aliases its argument,
|
||||
@ -309,7 +309,7 @@ class Value;
|
||||
// considered as capture. The arguments are not marked as returned neither,
|
||||
// because it would make it useless.
|
||||
bool isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
|
||||
ImmutableCallSite CS);
|
||||
const CallBase *Call);
|
||||
|
||||
/// This method strips off any GEP address adjustments and pointer casts from
|
||||
/// the specified value, returning the original object being addressed. Note
|
||||
|
@ -40,7 +40,6 @@
|
||||
#include "llvm/IR/Argument.h"
|
||||
#include "llvm/IR/Attributes.h"
|
||||
#include "llvm/IR/BasicBlock.h"
|
||||
#include "llvm/IR/CallSite.h"
|
||||
#include "llvm/IR/Instruction.h"
|
||||
#include "llvm/IR/Instructions.h"
|
||||
#include "llvm/IR/Module.h"
|
||||
@ -118,11 +117,11 @@ bool AAResults::pointsToConstantMemory(const MemoryLocation &Loc,
|
||||
return false;
|
||||
}
|
||||
|
||||
ModRefInfo AAResults::getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) {
|
||||
ModRefInfo AAResults::getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) {
|
||||
ModRefInfo Result = ModRefInfo::ModRef;
|
||||
|
||||
for (const auto &AA : AAs) {
|
||||
Result = intersectModRef(Result, AA->getArgModRefInfo(CS, ArgIdx));
|
||||
Result = intersectModRef(Result, AA->getArgModRefInfo(Call, ArgIdx));
|
||||
|
||||
// Early-exit the moment we reach the bottom of the lattice.
|
||||
if (isNoModRef(Result))
|
||||
@ -132,11 +131,11 @@ ModRefInfo AAResults::getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) {
|
||||
return Result;
|
||||
}
|
||||
|
||||
ModRefInfo AAResults::getModRefInfo(Instruction *I, ImmutableCallSite Call) {
|
||||
ModRefInfo AAResults::getModRefInfo(Instruction *I, const CallBase *Call2) {
|
||||
// We may have two calls.
|
||||
if (auto CS = ImmutableCallSite(I)) {
|
||||
if (const auto *Call1 = dyn_cast<CallBase>(I)) {
|
||||
// Check if the two calls modify the same memory.
|
||||
return getModRefInfo(CS, Call);
|
||||
return getModRefInfo(Call1, Call2);
|
||||
} else if (I->isFenceLike()) {
|
||||
// If this is a fence, just return ModRef.
|
||||
return ModRefInfo::ModRef;
|
||||
@ -146,19 +145,19 @@ ModRefInfo AAResults::getModRefInfo(Instruction *I, ImmutableCallSite Call) {
|
||||
// is that if the call references what this instruction
|
||||
// defines, it must be clobbered by this location.
|
||||
const MemoryLocation DefLoc = MemoryLocation::get(I);
|
||||
ModRefInfo MR = getModRefInfo(Call, DefLoc);
|
||||
ModRefInfo MR = getModRefInfo(Call2, DefLoc);
|
||||
if (isModOrRefSet(MR))
|
||||
return setModAndRef(MR);
|
||||
}
|
||||
return ModRefInfo::NoModRef;
|
||||
}
|
||||
|
||||
ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS,
|
||||
ModRefInfo AAResults::getModRefInfo(const CallBase *Call,
|
||||
const MemoryLocation &Loc) {
|
||||
ModRefInfo Result = ModRefInfo::ModRef;
|
||||
|
||||
for (const auto &AA : AAs) {
|
||||
Result = intersectModRef(Result, AA->getModRefInfo(CS, Loc));
|
||||
Result = intersectModRef(Result, AA->getModRefInfo(Call, Loc));
|
||||
|
||||
// Early-exit the moment we reach the bottom of the lattice.
|
||||
if (isNoModRef(Result))
|
||||
@ -167,7 +166,7 @@ ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS,
|
||||
|
||||
// Try to refine the mod-ref info further using other API entry points to the
|
||||
// aggregate set of AA results.
|
||||
auto MRB = getModRefBehavior(CS);
|
||||
auto MRB = getModRefBehavior(Call);
|
||||
if (MRB == FMRB_DoesNotAccessMemory ||
|
||||
MRB == FMRB_OnlyAccessesInaccessibleMem)
|
||||
return ModRefInfo::NoModRef;
|
||||
@ -181,15 +180,16 @@ ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS,
|
||||
bool IsMustAlias = true;
|
||||
ModRefInfo AllArgsMask = ModRefInfo::NoModRef;
|
||||
if (doesAccessArgPointees(MRB)) {
|
||||
for (auto AI = CS.arg_begin(), AE = CS.arg_end(); AI != AE; ++AI) {
|
||||
for (auto AI = Call->arg_begin(), AE = Call->arg_end(); AI != AE; ++AI) {
|
||||
const Value *Arg = *AI;
|
||||
if (!Arg->getType()->isPointerTy())
|
||||
continue;
|
||||
unsigned ArgIdx = std::distance(CS.arg_begin(), AI);
|
||||
MemoryLocation ArgLoc = MemoryLocation::getForArgument(CS, ArgIdx, TLI);
|
||||
unsigned ArgIdx = std::distance(Call->arg_begin(), AI);
|
||||
MemoryLocation ArgLoc =
|
||||
MemoryLocation::getForArgument(Call, ArgIdx, TLI);
|
||||
AliasResult ArgAlias = alias(ArgLoc, Loc);
|
||||
if (ArgAlias != NoAlias) {
|
||||
ModRefInfo ArgMask = getArgModRefInfo(CS, ArgIdx);
|
||||
ModRefInfo ArgMask = getArgModRefInfo(Call, ArgIdx);
|
||||
AllArgsMask = unionModRef(AllArgsMask, ArgMask);
|
||||
}
|
||||
// Conservatively clear IsMustAlias unless only MustAlias is found.
|
||||
@ -213,12 +213,12 @@ ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS,
|
||||
return Result;
|
||||
}
|
||||
|
||||
ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS1,
|
||||
ImmutableCallSite CS2) {
|
||||
ModRefInfo AAResults::getModRefInfo(const CallBase *Call1,
|
||||
const CallBase *Call2) {
|
||||
ModRefInfo Result = ModRefInfo::ModRef;
|
||||
|
||||
for (const auto &AA : AAs) {
|
||||
Result = intersectModRef(Result, AA->getModRefInfo(CS1, CS2));
|
||||
Result = intersectModRef(Result, AA->getModRefInfo(Call1, Call2));
|
||||
|
||||
// Early-exit the moment we reach the bottom of the lattice.
|
||||
if (isNoModRef(Result))
|
||||
@ -228,59 +228,61 @@ ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS1,
|
||||
// Try to refine the mod-ref info further using other API entry points to the
|
||||
// aggregate set of AA results.
|
||||
|
||||
// If CS1 or CS2 are readnone, they don't interact.
|
||||
auto CS1B = getModRefBehavior(CS1);
|
||||
if (CS1B == FMRB_DoesNotAccessMemory)
|
||||
// If Call1 or Call2 are readnone, they don't interact.
|
||||
auto Call1B = getModRefBehavior(Call1);
|
||||
if (Call1B == FMRB_DoesNotAccessMemory)
|
||||
return ModRefInfo::NoModRef;
|
||||
|
||||
auto CS2B = getModRefBehavior(CS2);
|
||||
if (CS2B == FMRB_DoesNotAccessMemory)
|
||||
auto Call2B = getModRefBehavior(Call2);
|
||||
if (Call2B == FMRB_DoesNotAccessMemory)
|
||||
return ModRefInfo::NoModRef;
|
||||
|
||||
// If they both only read from memory, there is no dependence.
|
||||
if (onlyReadsMemory(CS1B) && onlyReadsMemory(CS2B))
|
||||
if (onlyReadsMemory(Call1B) && onlyReadsMemory(Call2B))
|
||||
return ModRefInfo::NoModRef;
|
||||
|
||||
// If CS1 only reads memory, the only dependence on CS2 can be
|
||||
// from CS1 reading memory written by CS2.
|
||||
if (onlyReadsMemory(CS1B))
|
||||
// If Call1 only reads memory, the only dependence on Call2 can be
|
||||
// from Call1 reading memory written by Call2.
|
||||
if (onlyReadsMemory(Call1B))
|
||||
Result = clearMod(Result);
|
||||
else if (doesNotReadMemory(CS1B))
|
||||
else if (doesNotReadMemory(Call1B))
|
||||
Result = clearRef(Result);
|
||||
|
||||
// If CS2 only access memory through arguments, accumulate the mod/ref
|
||||
// information from CS1's references to the memory referenced by
|
||||
// CS2's arguments.
|
||||
if (onlyAccessesArgPointees(CS2B)) {
|
||||
if (!doesAccessArgPointees(CS2B))
|
||||
// If Call2 only access memory through arguments, accumulate the mod/ref
|
||||
// information from Call1's references to the memory referenced by
|
||||
// Call2's arguments.
|
||||
if (onlyAccessesArgPointees(Call2B)) {
|
||||
if (!doesAccessArgPointees(Call2B))
|
||||
return ModRefInfo::NoModRef;
|
||||
ModRefInfo R = ModRefInfo::NoModRef;
|
||||
bool IsMustAlias = true;
|
||||
for (auto I = CS2.arg_begin(), E = CS2.arg_end(); I != E; ++I) {
|
||||
for (auto I = Call2->arg_begin(), E = Call2->arg_end(); I != E; ++I) {
|
||||
const Value *Arg = *I;
|
||||
if (!Arg->getType()->isPointerTy())
|
||||
continue;
|
||||
unsigned CS2ArgIdx = std::distance(CS2.arg_begin(), I);
|
||||
auto CS2ArgLoc = MemoryLocation::getForArgument(CS2, CS2ArgIdx, TLI);
|
||||
unsigned Call2ArgIdx = std::distance(Call2->arg_begin(), I);
|
||||
auto Call2ArgLoc =
|
||||
MemoryLocation::getForArgument(Call2, Call2ArgIdx, TLI);
|
||||
|
||||
// ArgModRefCS2 indicates what CS2 might do to CS2ArgLoc, and the
|
||||
// dependence of CS1 on that location is the inverse:
|
||||
// - If CS2 modifies location, dependence exists if CS1 reads or writes.
|
||||
// - If CS2 only reads location, dependence exists if CS1 writes.
|
||||
ModRefInfo ArgModRefCS2 = getArgModRefInfo(CS2, CS2ArgIdx);
|
||||
// ArgModRefC2 indicates what Call2 might do to Call2ArgLoc, and the
|
||||
// dependence of Call1 on that location is the inverse:
|
||||
// - If Call2 modifies location, dependence exists if Call1 reads or
|
||||
// writes.
|
||||
// - If Call2 only reads location, dependence exists if Call1 writes.
|
||||
ModRefInfo ArgModRefC2 = getArgModRefInfo(Call2, Call2ArgIdx);
|
||||
ModRefInfo ArgMask = ModRefInfo::NoModRef;
|
||||
if (isModSet(ArgModRefCS2))
|
||||
if (isModSet(ArgModRefC2))
|
||||
ArgMask = ModRefInfo::ModRef;
|
||||
else if (isRefSet(ArgModRefCS2))
|
||||
else if (isRefSet(ArgModRefC2))
|
||||
ArgMask = ModRefInfo::Mod;
|
||||
|
||||
// ModRefCS1 indicates what CS1 might do to CS2ArgLoc, and we use
|
||||
// ModRefC1 indicates what Call1 might do to Call2ArgLoc, and we use
|
||||
// above ArgMask to update dependence info.
|
||||
ModRefInfo ModRefCS1 = getModRefInfo(CS1, CS2ArgLoc);
|
||||
ArgMask = intersectModRef(ArgMask, ModRefCS1);
|
||||
ModRefInfo ModRefC1 = getModRefInfo(Call1, Call2ArgLoc);
|
||||
ArgMask = intersectModRef(ArgMask, ModRefC1);
|
||||
|
||||
// Conservatively clear IsMustAlias unless only MustAlias is found.
|
||||
IsMustAlias &= isMustSet(ModRefCS1);
|
||||
IsMustAlias &= isMustSet(ModRefC1);
|
||||
|
||||
R = intersectModRef(unionModRef(R, ArgMask), Result);
|
||||
if (R == Result) {
|
||||
@ -298,31 +300,32 @@ ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS1,
|
||||
return IsMustAlias ? setMust(R) : clearMust(R);
|
||||
}
|
||||
|
||||
// If CS1 only accesses memory through arguments, check if CS2 references
|
||||
// any of the memory referenced by CS1's arguments. If not, return NoModRef.
|
||||
if (onlyAccessesArgPointees(CS1B)) {
|
||||
if (!doesAccessArgPointees(CS1B))
|
||||
// If Call1 only accesses memory through arguments, check if Call2 references
|
||||
// any of the memory referenced by Call1's arguments. If not, return NoModRef.
|
||||
if (onlyAccessesArgPointees(Call1B)) {
|
||||
if (!doesAccessArgPointees(Call1B))
|
||||
return ModRefInfo::NoModRef;
|
||||
ModRefInfo R = ModRefInfo::NoModRef;
|
||||
bool IsMustAlias = true;
|
||||
for (auto I = CS1.arg_begin(), E = CS1.arg_end(); I != E; ++I) {
|
||||
for (auto I = Call1->arg_begin(), E = Call1->arg_end(); I != E; ++I) {
|
||||
const Value *Arg = *I;
|
||||
if (!Arg->getType()->isPointerTy())
|
||||
continue;
|
||||
unsigned CS1ArgIdx = std::distance(CS1.arg_begin(), I);
|
||||
auto CS1ArgLoc = MemoryLocation::getForArgument(CS1, CS1ArgIdx, TLI);
|
||||
unsigned Call1ArgIdx = std::distance(Call1->arg_begin(), I);
|
||||
auto Call1ArgLoc =
|
||||
MemoryLocation::getForArgument(Call1, Call1ArgIdx, TLI);
|
||||
|
||||
// ArgModRefCS1 indicates what CS1 might do to CS1ArgLoc; if CS1 might
|
||||
// Mod CS1ArgLoc, then we care about either a Mod or a Ref by CS2. If
|
||||
// CS1 might Ref, then we care only about a Mod by CS2.
|
||||
ModRefInfo ArgModRefCS1 = getArgModRefInfo(CS1, CS1ArgIdx);
|
||||
ModRefInfo ModRefCS2 = getModRefInfo(CS2, CS1ArgLoc);
|
||||
if ((isModSet(ArgModRefCS1) && isModOrRefSet(ModRefCS2)) ||
|
||||
(isRefSet(ArgModRefCS1) && isModSet(ModRefCS2)))
|
||||
R = intersectModRef(unionModRef(R, ArgModRefCS1), Result);
|
||||
// ArgModRefC1 indicates what Call1 might do to Call1ArgLoc; if Call1
|
||||
// might Mod Call1ArgLoc, then we care about either a Mod or a Ref by
|
||||
// Call2. If Call1 might Ref, then we care only about a Mod by Call2.
|
||||
ModRefInfo ArgModRefC1 = getArgModRefInfo(Call1, Call1ArgIdx);
|
||||
ModRefInfo ModRefC2 = getModRefInfo(Call2, Call1ArgLoc);
|
||||
if ((isModSet(ArgModRefC1) && isModOrRefSet(ModRefC2)) ||
|
||||
(isRefSet(ArgModRefC1) && isModSet(ModRefC2)))
|
||||
R = intersectModRef(unionModRef(R, ArgModRefC1), Result);
|
||||
|
||||
// Conservatively clear IsMustAlias unless only MustAlias is found.
|
||||
IsMustAlias &= isMustSet(ModRefCS2);
|
||||
IsMustAlias &= isMustSet(ModRefC2);
|
||||
|
||||
if (R == Result) {
|
||||
// On early exit, not all args were checked, cannot set Must.
|
||||
@ -342,11 +345,11 @@ ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS1,
|
||||
return Result;
|
||||
}
|
||||
|
||||
FunctionModRefBehavior AAResults::getModRefBehavior(ImmutableCallSite CS) {
|
||||
FunctionModRefBehavior AAResults::getModRefBehavior(const CallBase *Call) {
|
||||
FunctionModRefBehavior Result = FMRB_UnknownModRefBehavior;
|
||||
|
||||
for (const auto &AA : AAs) {
|
||||
Result = FunctionModRefBehavior(Result & AA->getModRefBehavior(CS));
|
||||
Result = FunctionModRefBehavior(Result & AA->getModRefBehavior(Call));
|
||||
|
||||
// Early-exit the moment we reach the bottom of the lattice.
|
||||
if (Result == FMRB_DoesNotAccessMemory)
|
||||
@ -558,8 +561,8 @@ ModRefInfo AAResults::callCapturesBefore(const Instruction *I,
|
||||
isa<Constant>(Object))
|
||||
return ModRefInfo::ModRef;
|
||||
|
||||
ImmutableCallSite CS(I);
|
||||
if (!CS.getInstruction() || CS.getInstruction() == Object)
|
||||
const auto *Call = dyn_cast<CallBase>(I);
|
||||
if (!Call || Call == Object)
|
||||
return ModRefInfo::ModRef;
|
||||
|
||||
if (PointerMayBeCapturedBefore(Object, /* ReturnCaptures */ true,
|
||||
@ -572,14 +575,14 @@ ModRefInfo AAResults::callCapturesBefore(const Instruction *I,
|
||||
ModRefInfo R = ModRefInfo::NoModRef;
|
||||
bool IsMustAlias = true;
|
||||
// Set flag only if no May found and all operands processed.
|
||||
for (auto CI = CS.data_operands_begin(), CE = CS.data_operands_end();
|
||||
for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
|
||||
CI != CE; ++CI, ++ArgNo) {
|
||||
// Only look at the no-capture or byval pointer arguments. If this
|
||||
// pointer were passed to arguments that were neither of these, then it
|
||||
// couldn't be no-capture.
|
||||
if (!(*CI)->getType()->isPointerTy() ||
|
||||
(!CS.doesNotCapture(ArgNo) &&
|
||||
ArgNo < CS.getNumArgOperands() && !CS.isByValArgument(ArgNo)))
|
||||
(!Call->doesNotCapture(ArgNo) && ArgNo < Call->getNumArgOperands() &&
|
||||
!Call->isByValArgument(ArgNo)))
|
||||
continue;
|
||||
|
||||
AliasResult AR = alias(MemoryLocation(*CI), MemoryLocation(Object));
|
||||
@ -591,9 +594,9 @@ ModRefInfo AAResults::callCapturesBefore(const Instruction *I,
|
||||
IsMustAlias = false;
|
||||
if (AR == NoAlias)
|
||||
continue;
|
||||
if (CS.doesNotAccessMemory(ArgNo))
|
||||
if (Call->doesNotAccessMemory(ArgNo))
|
||||
continue;
|
||||
if (CS.onlyReadsMemory(ArgNo)) {
|
||||
if (Call->onlyReadsMemory(ArgNo)) {
|
||||
R = ModRefInfo::Ref;
|
||||
continue;
|
||||
}
|
||||
@ -775,8 +778,8 @@ AAResults llvm::createLegacyPMAAResults(Pass &P, Function &F,
|
||||
}
|
||||
|
||||
bool llvm::isNoAliasCall(const Value *V) {
|
||||
if (auto CS = ImmutableCallSite(V))
|
||||
return CS.hasRetAttr(Attribute::NoAlias);
|
||||
if (const auto *Call = dyn_cast<CallBase>(V))
|
||||
return Call->hasRetAttr(Attribute::NoAlias);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -66,11 +66,10 @@ static inline void PrintModRefResults(const char *Msg, bool P, Instruction *I,
|
||||
}
|
||||
}
|
||||
|
||||
static inline void PrintModRefResults(const char *Msg, bool P, CallSite CSA,
|
||||
CallSite CSB, Module *M) {
|
||||
static inline void PrintModRefResults(const char *Msg, bool P, CallBase *CallA,
|
||||
CallBase *CallB, Module *M) {
|
||||
if (PrintAll || P) {
|
||||
errs() << " " << Msg << ": " << *CSA.getInstruction() << " <-> "
|
||||
<< *CSB.getInstruction() << '\n';
|
||||
errs() << " " << Msg << ": " << *CallA << " <-> " << *CallB << '\n';
|
||||
}
|
||||
}
|
||||
|
||||
@ -98,7 +97,7 @@ void AAEvaluator::runInternal(Function &F, AAResults &AA) {
|
||||
++FunctionCount;
|
||||
|
||||
SetVector<Value *> Pointers;
|
||||
SmallSetVector<CallSite, 16> CallSites;
|
||||
SmallSetVector<CallBase *, 16> Calls;
|
||||
SetVector<Value *> Loads;
|
||||
SetVector<Value *> Stores;
|
||||
|
||||
@ -114,16 +113,16 @@ void AAEvaluator::runInternal(Function &F, AAResults &AA) {
|
||||
if (EvalAAMD && isa<StoreInst>(&*I))
|
||||
Stores.insert(&*I);
|
||||
Instruction &Inst = *I;
|
||||
if (auto CS = CallSite(&Inst)) {
|
||||
Value *Callee = CS.getCalledValue();
|
||||
if (auto *Call = dyn_cast<CallBase>(&Inst)) {
|
||||
Value *Callee = Call->getCalledValue();
|
||||
// Skip actual functions for direct function calls.
|
||||
if (!isa<Function>(Callee) && isInterestingPointer(Callee))
|
||||
Pointers.insert(Callee);
|
||||
// Consider formals.
|
||||
for (Use &DataOp : CS.data_ops())
|
||||
for (Use &DataOp : Call->data_ops())
|
||||
if (isInterestingPointer(DataOp))
|
||||
Pointers.insert(DataOp);
|
||||
CallSites.insert(CS);
|
||||
Calls.insert(Call);
|
||||
} else {
|
||||
// Consider all operands.
|
||||
for (Instruction::op_iterator OI = Inst.op_begin(), OE = Inst.op_end();
|
||||
@ -136,7 +135,7 @@ void AAEvaluator::runInternal(Function &F, AAResults &AA) {
|
||||
if (PrintAll || PrintNoAlias || PrintMayAlias || PrintPartialAlias ||
|
||||
PrintMustAlias || PrintNoModRef || PrintMod || PrintRef || PrintModRef)
|
||||
errs() << "Function: " << F.getName() << ": " << Pointers.size()
|
||||
<< " pointers, " << CallSites.size() << " call sites\n";
|
||||
<< " pointers, " << Calls.size() << " call sites\n";
|
||||
|
||||
// iterate over the worklist, and run the full (n^2)/2 disambiguations
|
||||
for (SetVector<Value *>::iterator I1 = Pointers.begin(), E = Pointers.end();
|
||||
@ -230,50 +229,48 @@ void AAEvaluator::runInternal(Function &F, AAResults &AA) {
|
||||
}
|
||||
|
||||
// Mod/ref alias analysis: compare all pairs of calls and values
|
||||
for (CallSite C : CallSites) {
|
||||
Instruction *I = C.getInstruction();
|
||||
|
||||
for (CallBase *Call : Calls) {
|
||||
for (auto Pointer : Pointers) {
|
||||
auto Size = LocationSize::unknown();
|
||||
Type *ElTy = cast<PointerType>(Pointer->getType())->getElementType();
|
||||
if (ElTy->isSized())
|
||||
Size = LocationSize::precise(DL.getTypeStoreSize(ElTy));
|
||||
|
||||
switch (AA.getModRefInfo(C, Pointer, Size)) {
|
||||
switch (AA.getModRefInfo(Call, Pointer, Size)) {
|
||||
case ModRefInfo::NoModRef:
|
||||
PrintModRefResults("NoModRef", PrintNoModRef, I, Pointer,
|
||||
PrintModRefResults("NoModRef", PrintNoModRef, Call, Pointer,
|
||||
F.getParent());
|
||||
++NoModRefCount;
|
||||
break;
|
||||
case ModRefInfo::Mod:
|
||||
PrintModRefResults("Just Mod", PrintMod, I, Pointer, F.getParent());
|
||||
PrintModRefResults("Just Mod", PrintMod, Call, Pointer, F.getParent());
|
||||
++ModCount;
|
||||
break;
|
||||
case ModRefInfo::Ref:
|
||||
PrintModRefResults("Just Ref", PrintRef, I, Pointer, F.getParent());
|
||||
PrintModRefResults("Just Ref", PrintRef, Call, Pointer, F.getParent());
|
||||
++RefCount;
|
||||
break;
|
||||
case ModRefInfo::ModRef:
|
||||
PrintModRefResults("Both ModRef", PrintModRef, I, Pointer,
|
||||
PrintModRefResults("Both ModRef", PrintModRef, Call, Pointer,
|
||||
F.getParent());
|
||||
++ModRefCount;
|
||||
break;
|
||||
case ModRefInfo::Must:
|
||||
PrintModRefResults("Must", PrintMust, I, Pointer, F.getParent());
|
||||
PrintModRefResults("Must", PrintMust, Call, Pointer, F.getParent());
|
||||
++MustCount;
|
||||
break;
|
||||
case ModRefInfo::MustMod:
|
||||
PrintModRefResults("Just Mod (MustAlias)", PrintMustMod, I, Pointer,
|
||||
PrintModRefResults("Just Mod (MustAlias)", PrintMustMod, Call, Pointer,
|
||||
F.getParent());
|
||||
++MustModCount;
|
||||
break;
|
||||
case ModRefInfo::MustRef:
|
||||
PrintModRefResults("Just Ref (MustAlias)", PrintMustRef, I, Pointer,
|
||||
PrintModRefResults("Just Ref (MustAlias)", PrintMustRef, Call, Pointer,
|
||||
F.getParent());
|
||||
++MustRefCount;
|
||||
break;
|
||||
case ModRefInfo::MustModRef:
|
||||
PrintModRefResults("Both ModRef (MustAlias)", PrintMustModRef, I,
|
||||
PrintModRefResults("Both ModRef (MustAlias)", PrintMustModRef, Call,
|
||||
Pointer, F.getParent());
|
||||
++MustModRefCount;
|
||||
break;
|
||||
@ -282,44 +279,46 @@ void AAEvaluator::runInternal(Function &F, AAResults &AA) {
|
||||
}
|
||||
|
||||
// Mod/ref alias analysis: compare all pairs of calls
|
||||
for (auto C = CallSites.begin(), Ce = CallSites.end(); C != Ce; ++C) {
|
||||
for (auto D = CallSites.begin(); D != Ce; ++D) {
|
||||
if (D == C)
|
||||
for (CallBase *CallA : Calls) {
|
||||
for (CallBase *CallB : Calls) {
|
||||
if (CallA == CallB)
|
||||
continue;
|
||||
switch (AA.getModRefInfo(*C, *D)) {
|
||||
switch (AA.getModRefInfo(CallA, CallB)) {
|
||||
case ModRefInfo::NoModRef:
|
||||
PrintModRefResults("NoModRef", PrintNoModRef, *C, *D, F.getParent());
|
||||
PrintModRefResults("NoModRef", PrintNoModRef, CallA, CallB,
|
||||
F.getParent());
|
||||
++NoModRefCount;
|
||||
break;
|
||||
case ModRefInfo::Mod:
|
||||
PrintModRefResults("Just Mod", PrintMod, *C, *D, F.getParent());
|
||||
PrintModRefResults("Just Mod", PrintMod, CallA, CallB, F.getParent());
|
||||
++ModCount;
|
||||
break;
|
||||
case ModRefInfo::Ref:
|
||||
PrintModRefResults("Just Ref", PrintRef, *C, *D, F.getParent());
|
||||
PrintModRefResults("Just Ref", PrintRef, CallA, CallB, F.getParent());
|
||||
++RefCount;
|
||||
break;
|
||||
case ModRefInfo::ModRef:
|
||||
PrintModRefResults("Both ModRef", PrintModRef, *C, *D, F.getParent());
|
||||
PrintModRefResults("Both ModRef", PrintModRef, CallA, CallB,
|
||||
F.getParent());
|
||||
++ModRefCount;
|
||||
break;
|
||||
case ModRefInfo::Must:
|
||||
PrintModRefResults("Must", PrintMust, *C, *D, F.getParent());
|
||||
PrintModRefResults("Must", PrintMust, CallA, CallB, F.getParent());
|
||||
++MustCount;
|
||||
break;
|
||||
case ModRefInfo::MustMod:
|
||||
PrintModRefResults("Just Mod (MustAlias)", PrintMustMod, *C, *D,
|
||||
PrintModRefResults("Just Mod (MustAlias)", PrintMustMod, CallA, CallB,
|
||||
F.getParent());
|
||||
++MustModCount;
|
||||
break;
|
||||
case ModRefInfo::MustRef:
|
||||
PrintModRefResults("Just Ref (MustAlias)", PrintMustRef, *C, *D,
|
||||
PrintModRefResults("Just Ref (MustAlias)", PrintMustRef, CallA, CallB,
|
||||
F.getParent());
|
||||
++MustRefCount;
|
||||
break;
|
||||
case ModRefInfo::MustModRef:
|
||||
PrintModRefResults("Both ModRef (MustAlias)", PrintMustModRef, *C, *D,
|
||||
F.getParent());
|
||||
PrintModRefResults("Both ModRef (MustAlias)", PrintMustModRef, CallA,
|
||||
CallB, F.getParent());
|
||||
++MustModRefCount;
|
||||
break;
|
||||
}
|
||||
|
@ -16,7 +16,6 @@
|
||||
#include "llvm/Analysis/GuardUtils.h"
|
||||
#include "llvm/Analysis/MemoryLocation.h"
|
||||
#include "llvm/Config/llvm-config.h"
|
||||
#include "llvm/IR/CallSite.h"
|
||||
#include "llvm/IR/Constants.h"
|
||||
#include "llvm/IR/DataLayout.h"
|
||||
#include "llvm/IR/Function.h"
|
||||
@ -236,7 +235,8 @@ bool AliasSet::aliasesUnknownInst(const Instruction *Inst,
|
||||
|
||||
for (unsigned i = 0, e = UnknownInsts.size(); i != e; ++i) {
|
||||
if (auto *UnknownInst = getUnknownInst(i)) {
|
||||
ImmutableCallSite C1(UnknownInst), C2(Inst);
|
||||
const auto *C1 = dyn_cast<CallBase>(UnknownInst);
|
||||
const auto *C2 = dyn_cast<CallBase>(Inst);
|
||||
if (!C1 || !C2 || isModOrRefSet(AA.getModRefInfo(C1, C2)) ||
|
||||
isModOrRefSet(AA.getModRefInfo(C2, C1)))
|
||||
return true;
|
||||
@ -446,44 +446,44 @@ void AliasSetTracker::add(Instruction *I) {
|
||||
return add(MTI);
|
||||
|
||||
// Handle all calls with known mod/ref sets genericall
|
||||
CallSite CS(I);
|
||||
if (CS && CS.onlyAccessesArgMemory()) {
|
||||
auto getAccessFromModRef = [](ModRefInfo MRI) {
|
||||
if (isRefSet(MRI) && isModSet(MRI))
|
||||
return AliasSet::ModRefAccess;
|
||||
else if (isModSet(MRI))
|
||||
return AliasSet::ModAccess;
|
||||
else if (isRefSet(MRI))
|
||||
return AliasSet::RefAccess;
|
||||
else
|
||||
return AliasSet::NoAccess;
|
||||
|
||||
};
|
||||
|
||||
ModRefInfo CallMask = createModRefInfo(AA.getModRefBehavior(CS));
|
||||
if (auto *Call = dyn_cast<CallBase>(I))
|
||||
if (Call->onlyAccessesArgMemory()) {
|
||||
auto getAccessFromModRef = [](ModRefInfo MRI) {
|
||||
if (isRefSet(MRI) && isModSet(MRI))
|
||||
return AliasSet::ModRefAccess;
|
||||
else if (isModSet(MRI))
|
||||
return AliasSet::ModAccess;
|
||||
else if (isRefSet(MRI))
|
||||
return AliasSet::RefAccess;
|
||||
else
|
||||
return AliasSet::NoAccess;
|
||||
};
|
||||
|
||||
// Some intrinsics are marked as modifying memory for control flow
|
||||
// modelling purposes, but don't actually modify any specific memory
|
||||
// location.
|
||||
using namespace PatternMatch;
|
||||
if (I->use_empty() && match(I, m_Intrinsic<Intrinsic::invariant_start>()))
|
||||
CallMask = clearMod(CallMask);
|
||||
ModRefInfo CallMask = createModRefInfo(AA.getModRefBehavior(Call));
|
||||
|
||||
for (auto AI = CS.arg_begin(), AE = CS.arg_end(); AI != AE; ++AI) {
|
||||
const Value *Arg = *AI;
|
||||
if (!Arg->getType()->isPointerTy())
|
||||
continue;
|
||||
unsigned ArgIdx = std::distance(CS.arg_begin(), AI);
|
||||
MemoryLocation ArgLoc = MemoryLocation::getForArgument(CS, ArgIdx,
|
||||
nullptr);
|
||||
ModRefInfo ArgMask = AA.getArgModRefInfo(CS, ArgIdx);
|
||||
ArgMask = intersectModRef(CallMask, ArgMask);
|
||||
if (!isNoModRef(ArgMask))
|
||||
addPointer(ArgLoc, getAccessFromModRef(ArgMask));
|
||||
// Some intrinsics are marked as modifying memory for control flow
|
||||
// modelling purposes, but don't actually modify any specific memory
|
||||
// location.
|
||||
using namespace PatternMatch;
|
||||
if (Call->use_empty() &&
|
||||
match(Call, m_Intrinsic<Intrinsic::invariant_start>()))
|
||||
CallMask = clearMod(CallMask);
|
||||
|
||||
for (auto IdxArgPair : enumerate(Call->args())) {
|
||||
int ArgIdx = IdxArgPair.index();
|
||||
const Value *Arg = IdxArgPair.value();
|
||||
if (!Arg->getType()->isPointerTy())
|
||||
continue;
|
||||
MemoryLocation ArgLoc =
|
||||
MemoryLocation::getForArgument(Call, ArgIdx, nullptr);
|
||||
ModRefInfo ArgMask = AA.getArgModRefInfo(Call, ArgIdx);
|
||||
ArgMask = intersectModRef(CallMask, ArgMask);
|
||||
if (!isNoModRef(ArgMask))
|
||||
addPointer(ArgLoc, getAccessFromModRef(ArgMask));
|
||||
}
|
||||
return;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
return addUnknown(I);
|
||||
}
|
||||
|
||||
|
@ -31,7 +31,6 @@
|
||||
#include "llvm/Analysis/PhiValues.h"
|
||||
#include "llvm/IR/Argument.h"
|
||||
#include "llvm/IR/Attributes.h"
|
||||
#include "llvm/IR/CallSite.h"
|
||||
#include "llvm/IR/Constant.h"
|
||||
#include "llvm/IR/Constants.h"
|
||||
#include "llvm/IR/DataLayout.h"
|
||||
@ -144,7 +143,7 @@ static bool isNonEscapingLocalObject(const Value *V) {
|
||||
/// Returns true if the pointer is one which would have been considered an
|
||||
/// escape by isNonEscapingLocalObject.
|
||||
static bool isEscapeSource(const Value *V) {
|
||||
if (ImmutableCallSite(V))
|
||||
if (isa<CallBase>(V))
|
||||
return true;
|
||||
|
||||
if (isa<Argument>(V))
|
||||
@ -454,7 +453,7 @@ bool BasicAAResult::DecomposeGEPExpression(const Value *V,
|
||||
|
||||
const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
|
||||
if (!GEPOp) {
|
||||
if (auto CS = ImmutableCallSite(V)) {
|
||||
if (const auto *Call = dyn_cast<CallBase>(V)) {
|
||||
// CaptureTracking can know about special capturing properties of some
|
||||
// intrinsics like launder.invariant.group, that can't be expressed with
|
||||
// the attributes, but have properties like returning aliasing pointer.
|
||||
@ -464,7 +463,7 @@ bool BasicAAResult::DecomposeGEPExpression(const Value *V,
|
||||
// because it should be in sync with CaptureTracking. Not using it may
|
||||
// cause weird miscompilations where 2 aliasing pointers are assumed to
|
||||
// noalias.
|
||||
if (auto *RP = getArgumentAliasingToReturnedPointer(CS)) {
|
||||
if (auto *RP = getArgumentAliasingToReturnedPointer(Call)) {
|
||||
V = RP;
|
||||
continue;
|
||||
}
|
||||
@ -673,8 +672,8 @@ bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc,
|
||||
}
|
||||
|
||||
/// Returns the behavior when calling the given call site.
|
||||
FunctionModRefBehavior BasicAAResult::getModRefBehavior(ImmutableCallSite CS) {
|
||||
if (CS.doesNotAccessMemory())
|
||||
FunctionModRefBehavior BasicAAResult::getModRefBehavior(const CallBase *Call) {
|
||||
if (Call->doesNotAccessMemory())
|
||||
// Can't do better than this.
|
||||
return FMRB_DoesNotAccessMemory;
|
||||
|
||||
@ -682,23 +681,23 @@ FunctionModRefBehavior BasicAAResult::getModRefBehavior(ImmutableCallSite CS) {
|
||||
|
||||
// If the callsite knows it only reads memory, don't return worse
|
||||
// than that.
|
||||
if (CS.onlyReadsMemory())
|
||||
if (Call->onlyReadsMemory())
|
||||
Min = FMRB_OnlyReadsMemory;
|
||||
else if (CS.doesNotReadMemory())
|
||||
else if (Call->doesNotReadMemory())
|
||||
Min = FMRB_DoesNotReadMemory;
|
||||
|
||||
if (CS.onlyAccessesArgMemory())
|
||||
if (Call->onlyAccessesArgMemory())
|
||||
Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
|
||||
else if (CS.onlyAccessesInaccessibleMemory())
|
||||
else if (Call->onlyAccessesInaccessibleMemory())
|
||||
Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
|
||||
else if (CS.onlyAccessesInaccessibleMemOrArgMem())
|
||||
else if (Call->onlyAccessesInaccessibleMemOrArgMem())
|
||||
Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
|
||||
|
||||
// If CS has operand bundles then aliasing attributes from the function it
|
||||
// calls do not directly apply to the CallSite. This can be made more
|
||||
// precise in the future.
|
||||
if (!CS.hasOperandBundles())
|
||||
if (const Function *F = CS.getCalledFunction())
|
||||
// If the call has operand bundles then aliasing attributes from the function
|
||||
// it calls do not directly apply to the call. This can be made more precise
|
||||
// in the future.
|
||||
if (!Call->hasOperandBundles())
|
||||
if (const Function *F = Call->getCalledFunction())
|
||||
Min =
|
||||
FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F));
|
||||
|
||||
@ -731,9 +730,9 @@ FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) {
|
||||
}
|
||||
|
||||
/// Returns true if this is a writeonly (i.e Mod only) parameter.
|
||||
static bool isWriteOnlyParam(ImmutableCallSite CS, unsigned ArgIdx,
|
||||
static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx,
|
||||
const TargetLibraryInfo &TLI) {
|
||||
if (CS.paramHasAttr(ArgIdx, Attribute::WriteOnly))
|
||||
if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
|
||||
return true;
|
||||
|
||||
// We can bound the aliasing properties of memset_pattern16 just as we can
|
||||
@ -743,7 +742,8 @@ static bool isWriteOnlyParam(ImmutableCallSite CS, unsigned ArgIdx,
|
||||
// FIXME Consider handling this in InferFunctionAttr.cpp together with other
|
||||
// attributes.
|
||||
LibFunc F;
|
||||
if (CS.getCalledFunction() && TLI.getLibFunc(*CS.getCalledFunction(), F) &&
|
||||
if (Call->getCalledFunction() &&
|
||||
TLI.getLibFunc(*Call->getCalledFunction(), F) &&
|
||||
F == LibFunc_memset_pattern16 && TLI.has(F))
|
||||
if (ArgIdx == 0)
|
||||
return true;
|
||||
@ -755,23 +755,23 @@ static bool isWriteOnlyParam(ImmutableCallSite CS, unsigned ArgIdx,
|
||||
return false;
|
||||
}
|
||||
|
||||
ModRefInfo BasicAAResult::getArgModRefInfo(ImmutableCallSite CS,
|
||||
ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call,
|
||||
unsigned ArgIdx) {
|
||||
// Checking for known builtin intrinsics and target library functions.
|
||||
if (isWriteOnlyParam(CS, ArgIdx, TLI))
|
||||
if (isWriteOnlyParam(Call, ArgIdx, TLI))
|
||||
return ModRefInfo::Mod;
|
||||
|
||||
if (CS.paramHasAttr(ArgIdx, Attribute::ReadOnly))
|
||||
if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
|
||||
return ModRefInfo::Ref;
|
||||
|
||||
if (CS.paramHasAttr(ArgIdx, Attribute::ReadNone))
|
||||
if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
|
||||
return ModRefInfo::NoModRef;
|
||||
|
||||
return AAResultBase::getArgModRefInfo(CS, ArgIdx);
|
||||
return AAResultBase::getArgModRefInfo(Call, ArgIdx);
|
||||
}
|
||||
|
||||
static bool isIntrinsicCall(ImmutableCallSite CS, Intrinsic::ID IID) {
|
||||
const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction());
|
||||
static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
|
||||
const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call);
|
||||
return II && II->getIntrinsicID() == IID;
|
||||
}
|
||||
|
||||
@ -827,9 +827,9 @@ AliasResult BasicAAResult::alias(const MemoryLocation &LocA,
|
||||
/// Since we only look at local properties of this function, we really can't
|
||||
/// say much about this query. We do, however, use simple "address taken"
|
||||
/// analysis on local objects.
|
||||
ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
|
||||
ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
|
||||
const MemoryLocation &Loc) {
|
||||
assert(notDifferentParent(CS.getInstruction(), Loc.Ptr) &&
|
||||
assert(notDifferentParent(Call, Loc.Ptr) &&
|
||||
"AliasAnalysis query involving multiple functions!");
|
||||
|
||||
const Value *Object = GetUnderlyingObject(Loc.Ptr, DL);
|
||||
@ -840,7 +840,7 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
|
||||
// contents of the alloca into argument registers or stack slots, so there is
|
||||
// no lifetime issue.
|
||||
if (isa<AllocaInst>(Object))
|
||||
if (const CallInst *CI = dyn_cast<CallInst>(CS.getInstruction()))
|
||||
if (const CallInst *CI = dyn_cast<CallInst>(Call))
|
||||
if (CI->isTailCall() &&
|
||||
!CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
|
||||
return ModRefInfo::NoModRef;
|
||||
@ -848,13 +848,13 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
|
||||
// Stack restore is able to modify unescaped dynamic allocas. Assume it may
|
||||
// modify them even though the alloca is not escaped.
|
||||
if (auto *AI = dyn_cast<AllocaInst>(Object))
|
||||
if (!AI->isStaticAlloca() && isIntrinsicCall(CS, Intrinsic::stackrestore))
|
||||
if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore))
|
||||
return ModRefInfo::Mod;
|
||||
|
||||
// If the pointer is to a locally allocated object that does not escape,
|
||||
// then the call can not mod/ref the pointer unless the call takes the pointer
|
||||
// as an argument, and itself doesn't capture it.
|
||||
if (!isa<Constant>(Object) && CS.getInstruction() != Object &&
|
||||
if (!isa<Constant>(Object) && Call != Object &&
|
||||
isNonEscapingLocalObject(Object)) {
|
||||
|
||||
// Optimistically assume that call doesn't touch Object and check this
|
||||
@ -863,19 +863,20 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
|
||||
bool IsMustAlias = true;
|
||||
|
||||
unsigned OperandNo = 0;
|
||||
for (auto CI = CS.data_operands_begin(), CE = CS.data_operands_end();
|
||||
for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
|
||||
CI != CE; ++CI, ++OperandNo) {
|
||||
// Only look at the no-capture or byval pointer arguments. If this
|
||||
// pointer were passed to arguments that were neither of these, then it
|
||||
// couldn't be no-capture.
|
||||
if (!(*CI)->getType()->isPointerTy() ||
|
||||
(!CS.doesNotCapture(OperandNo) &&
|
||||
OperandNo < CS.getNumArgOperands() && !CS.isByValArgument(OperandNo)))
|
||||
(!Call->doesNotCapture(OperandNo) &&
|
||||
OperandNo < Call->getNumArgOperands() &&
|
||||
!Call->isByValArgument(OperandNo)))
|
||||
continue;
|
||||
|
||||
// Call doesn't access memory through this operand, so we don't care
|
||||
// if it aliases with Object.
|
||||
if (CS.doesNotAccessMemory(OperandNo))
|
||||
if (Call->doesNotAccessMemory(OperandNo))
|
||||
continue;
|
||||
|
||||
// If this is a no-capture pointer argument, see if we can tell that it
|
||||
@ -889,12 +890,12 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
|
||||
continue;
|
||||
// Operand aliases 'Object', but call doesn't modify it. Strengthen
|
||||
// initial assumption and keep looking in case if there are more aliases.
|
||||
if (CS.onlyReadsMemory(OperandNo)) {
|
||||
if (Call->onlyReadsMemory(OperandNo)) {
|
||||
Result = setRef(Result);
|
||||
continue;
|
||||
}
|
||||
// Operand aliases 'Object' but call only writes into it.
|
||||
if (CS.doesNotReadMemory(OperandNo)) {
|
||||
if (Call->doesNotReadMemory(OperandNo)) {
|
||||
Result = setMod(Result);
|
||||
continue;
|
||||
}
|
||||
@ -918,17 +919,16 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
|
||||
}
|
||||
}
|
||||
|
||||
// If the CallSite is to malloc or calloc, we can assume that it doesn't
|
||||
// If the call is to malloc or calloc, we can assume that it doesn't
|
||||
// modify any IR visible value. This is only valid because we assume these
|
||||
// routines do not read values visible in the IR. TODO: Consider special
|
||||
// casing realloc and strdup routines which access only their arguments as
|
||||
// well. Or alternatively, replace all of this with inaccessiblememonly once
|
||||
// that's implemented fully.
|
||||
auto *Inst = CS.getInstruction();
|
||||
if (isMallocOrCallocLikeFn(Inst, &TLI)) {
|
||||
if (isMallocOrCallocLikeFn(Call, &TLI)) {
|
||||
// Be conservative if the accessed pointer may alias the allocation -
|
||||
// fallback to the generic handling below.
|
||||
if (getBestAAResults().alias(MemoryLocation(Inst), Loc) == NoAlias)
|
||||
if (getBestAAResults().alias(MemoryLocation(Call), Loc) == NoAlias)
|
||||
return ModRefInfo::NoModRef;
|
||||
}
|
||||
|
||||
@ -936,7 +936,7 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
|
||||
// operands, i.e., source and destination of any given memcpy must no-alias.
|
||||
// If Loc must-aliases either one of these two locations, then it necessarily
|
||||
// no-aliases the other.
|
||||
if (auto *Inst = dyn_cast<AnyMemCpyInst>(CS.getInstruction())) {
|
||||
if (auto *Inst = dyn_cast<AnyMemCpyInst>(Call)) {
|
||||
AliasResult SrcAA, DestAA;
|
||||
|
||||
if ((SrcAA = getBestAAResults().alias(MemoryLocation::getForSource(Inst),
|
||||
@ -960,7 +960,7 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
|
||||
// While the assume intrinsic is marked as arbitrarily writing so that
|
||||
// proper control dependencies will be maintained, it never aliases any
|
||||
// particular memory location.
|
||||
if (isIntrinsicCall(CS, Intrinsic::assume))
|
||||
if (isIntrinsicCall(Call, Intrinsic::assume))
|
||||
return ModRefInfo::NoModRef;
|
||||
|
||||
// Like assumes, guard intrinsics are also marked as arbitrarily writing so
|
||||
@ -970,7 +970,7 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
|
||||
// *Unlike* assumes, guard intrinsics are modeled as reading memory since the
|
||||
// heap state at the point the guard is issued needs to be consistent in case
|
||||
// the guard invokes the "deopt" continuation.
|
||||
if (isIntrinsicCall(CS, Intrinsic::experimental_guard))
|
||||
if (isIntrinsicCall(Call, Intrinsic::experimental_guard))
|
||||
return ModRefInfo::Ref;
|
||||
|
||||
// Like assumes, invariant.start intrinsics were also marked as arbitrarily
|
||||
@ -996,20 +996,20 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
|
||||
// The transformation will cause the second store to be ignored (based on
|
||||
// rules of invariant.start) and print 40, while the first program always
|
||||
// prints 50.
|
||||
if (isIntrinsicCall(CS, Intrinsic::invariant_start))
|
||||
if (isIntrinsicCall(Call, Intrinsic::invariant_start))
|
||||
return ModRefInfo::Ref;
|
||||
|
||||
// The AAResultBase base class has some smarts, lets use them.
|
||||
return AAResultBase::getModRefInfo(CS, Loc);
|
||||
return AAResultBase::getModRefInfo(Call, Loc);
|
||||
}
|
||||
|
||||
ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS1,
|
||||
ImmutableCallSite CS2) {
|
||||
ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1,
|
||||
const CallBase *Call2) {
|
||||
// While the assume intrinsic is marked as arbitrarily writing so that
|
||||
// proper control dependencies will be maintained, it never aliases any
|
||||
// particular memory location.
|
||||
if (isIntrinsicCall(CS1, Intrinsic::assume) ||
|
||||
isIntrinsicCall(CS2, Intrinsic::assume))
|
||||
if (isIntrinsicCall(Call1, Intrinsic::assume) ||
|
||||
isIntrinsicCall(Call2, Intrinsic::assume))
|
||||
return ModRefInfo::NoModRef;
|
||||
|
||||
// Like assumes, guard intrinsics are also marked as arbitrarily writing so
|
||||
@ -1023,18 +1023,18 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS1,
|
||||
// NB! This function is *not* commutative, so we specical case two
|
||||
// possibilities for guard intrinsics.
|
||||
|
||||
if (isIntrinsicCall(CS1, Intrinsic::experimental_guard))
|
||||
return isModSet(createModRefInfo(getModRefBehavior(CS2)))
|
||||
if (isIntrinsicCall(Call1, Intrinsic::experimental_guard))
|
||||
return isModSet(createModRefInfo(getModRefBehavior(Call2)))
|
||||
? ModRefInfo::Ref
|
||||
: ModRefInfo::NoModRef;
|
||||
|
||||
if (isIntrinsicCall(CS2, Intrinsic::experimental_guard))
|
||||
return isModSet(createModRefInfo(getModRefBehavior(CS1)))
|
||||
if (isIntrinsicCall(Call2, Intrinsic::experimental_guard))
|
||||
return isModSet(createModRefInfo(getModRefBehavior(Call1)))
|
||||
? ModRefInfo::Mod
|
||||
: ModRefInfo::NoModRef;
|
||||
|
||||
// The AAResultBase base class has some smarts, lets use them.
|
||||
return AAResultBase::getModRefInfo(CS1, CS2);
|
||||
return AAResultBase::getModRefInfo(Call1, Call2);
|
||||
}
|
||||
|
||||
/// Provide ad-hoc rules to disambiguate accesses through two GEP operators,
|
||||
|
@ -23,7 +23,6 @@
|
||||
#include "llvm/Analysis/CFG.h"
|
||||
#include "llvm/Analysis/OrderedBasicBlock.h"
|
||||
#include "llvm/Analysis/ValueTracking.h"
|
||||
#include "llvm/IR/CallSite.h"
|
||||
#include "llvm/IR/Constants.h"
|
||||
#include "llvm/IR/Dominators.h"
|
||||
#include "llvm/IR/Instructions.h"
|
||||
@ -240,11 +239,12 @@ void llvm::PointerMayBeCaptured(const Value *V, CaptureTracker *Tracker,
|
||||
switch (I->getOpcode()) {
|
||||
case Instruction::Call:
|
||||
case Instruction::Invoke: {
|
||||
CallSite CS(I);
|
||||
auto *Call = cast<CallBase>(I);
|
||||
// Not captured if the callee is readonly, doesn't return a copy through
|
||||
// its return value and doesn't unwind (a readonly function can leak bits
|
||||
// by throwing an exception or not depending on the input value).
|
||||
if (CS.onlyReadsMemory() && CS.doesNotThrow() && I->getType()->isVoidTy())
|
||||
if (Call->onlyReadsMemory() && Call->doesNotThrow() &&
|
||||
Call->getType()->isVoidTy())
|
||||
break;
|
||||
|
||||
// The pointer is not captured if returned pointer is not captured.
|
||||
@ -252,14 +252,14 @@ void llvm::PointerMayBeCaptured(const Value *V, CaptureTracker *Tracker,
|
||||
// marked with nocapture do not capture. This means that places like
|
||||
// GetUnderlyingObject in ValueTracking or DecomposeGEPExpression
|
||||
// in BasicAA also need to know about this property.
|
||||
if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(CS)) {
|
||||
AddUses(I);
|
||||
if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(Call)) {
|
||||
AddUses(Call);
|
||||
break;
|
||||
}
|
||||
|
||||
// Volatile operations effectively capture the memory location that they
|
||||
// load and store to.
|
||||
if (auto *MI = dyn_cast<MemIntrinsic>(I))
|
||||
if (auto *MI = dyn_cast<MemIntrinsic>(Call))
|
||||
if (MI->isVolatile())
|
||||
if (Tracker->captured(U))
|
||||
return;
|
||||
@ -271,13 +271,14 @@ void llvm::PointerMayBeCaptured(const Value *V, CaptureTracker *Tracker,
|
||||
// that loading a value from a pointer does not cause the pointer to be
|
||||
// captured, even though the loaded value might be the pointer itself
|
||||
// (think of self-referential objects).
|
||||
CallSite::data_operand_iterator B =
|
||||
CS.data_operands_begin(), E = CS.data_operands_end();
|
||||
for (CallSite::data_operand_iterator A = B; A != E; ++A)
|
||||
if (A->get() == V && !CS.doesNotCapture(A - B))
|
||||
for (auto IdxOpPair : enumerate(Call->data_ops())) {
|
||||
int Idx = IdxOpPair.index();
|
||||
Value *A = IdxOpPair.value();
|
||||
if (A == V && !Call->doesNotCapture(Idx))
|
||||
// The parameter is not marked 'nocapture' - captured.
|
||||
if (Tracker->captured(U))
|
||||
return;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case Instruction::Load:
|
||||
|
@ -255,11 +255,11 @@ FunctionModRefBehavior GlobalsAAResult::getModRefBehavior(const Function *F) {
|
||||
}
|
||||
|
||||
FunctionModRefBehavior
|
||||
GlobalsAAResult::getModRefBehavior(ImmutableCallSite CS) {
|
||||
GlobalsAAResult::getModRefBehavior(const CallBase *Call) {
|
||||
FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
|
||||
|
||||
if (!CS.hasOperandBundles())
|
||||
if (const Function *F = CS.getCalledFunction())
|
||||
if (!Call->hasOperandBundles())
|
||||
if (const Function *F = Call->getCalledFunction())
|
||||
if (FunctionInfo *FI = getFunctionInfo(F)) {
|
||||
if (!isModOrRefSet(FI->getModRefInfo()))
|
||||
Min = FMRB_DoesNotAccessMemory;
|
||||
@ -267,7 +267,7 @@ GlobalsAAResult::getModRefBehavior(ImmutableCallSite CS) {
|
||||
Min = FMRB_OnlyReadsMemory;
|
||||
}
|
||||
|
||||
return FunctionModRefBehavior(AAResultBase::getModRefBehavior(CS) & Min);
|
||||
return FunctionModRefBehavior(AAResultBase::getModRefBehavior(Call) & Min);
|
||||
}
|
||||
|
||||
/// Returns the function info for the function, or null if we don't have
|
||||
@ -366,14 +366,14 @@ bool GlobalsAAResult::AnalyzeUsesOfPointer(Value *V,
|
||||
} else if (Operator::getOpcode(I) == Instruction::BitCast) {
|
||||
if (AnalyzeUsesOfPointer(I, Readers, Writers, OkayStoreDest))
|
||||
return true;
|
||||
} else if (auto CS = CallSite(I)) {
|
||||
} else if (auto *Call = dyn_cast<CallBase>(I)) {
|
||||
// Make sure that this is just the function being called, not that it is
|
||||
// passing into the function.
|
||||
if (CS.isDataOperand(&U)) {
|
||||
if (Call->isDataOperand(&U)) {
|
||||
// Detect calls to free.
|
||||
if (CS.isArgOperand(&U) && isFreeCall(I, &TLI)) {
|
||||
if (Call->isArgOperand(&U) && isFreeCall(I, &TLI)) {
|
||||
if (Writers)
|
||||
Writers->insert(CS->getParent()->getParent());
|
||||
Writers->insert(Call->getParent()->getParent());
|
||||
} else {
|
||||
return true; // Argument of an unknown call.
|
||||
}
|
||||
@ -576,15 +576,15 @@ void GlobalsAAResult::AnalyzeCallGraph(CallGraph &CG, Module &M) {
|
||||
|
||||
// We handle calls specially because the graph-relevant aspects are
|
||||
// handled above.
|
||||
if (auto CS = CallSite(&I)) {
|
||||
if (isAllocationFn(&I, &TLI) || isFreeCall(&I, &TLI)) {
|
||||
if (auto *Call = dyn_cast<CallBase>(&I)) {
|
||||
if (isAllocationFn(Call, &TLI) || isFreeCall(Call, &TLI)) {
|
||||
// FIXME: It is completely unclear why this is necessary and not
|
||||
// handled by the above graph code.
|
||||
FI.addModRefInfo(ModRefInfo::ModRef);
|
||||
} else if (Function *Callee = CS.getCalledFunction()) {
|
||||
} else if (Function *Callee = Call->getCalledFunction()) {
|
||||
// The callgraph doesn't include intrinsic calls.
|
||||
if (Callee->isIntrinsic()) {
|
||||
if (isa<DbgInfoIntrinsic>(I))
|
||||
if (isa<DbgInfoIntrinsic>(Call))
|
||||
// Don't let dbg intrinsics affect alias info.
|
||||
continue;
|
||||
|
||||
@ -885,16 +885,16 @@ AliasResult GlobalsAAResult::alias(const MemoryLocation &LocA,
|
||||
return AAResultBase::alias(LocA, LocB);
|
||||
}
|
||||
|
||||
ModRefInfo GlobalsAAResult::getModRefInfoForArgument(ImmutableCallSite CS,
|
||||
ModRefInfo GlobalsAAResult::getModRefInfoForArgument(const CallBase *Call,
|
||||
const GlobalValue *GV) {
|
||||
if (CS.doesNotAccessMemory())
|
||||
if (Call->doesNotAccessMemory())
|
||||
return ModRefInfo::NoModRef;
|
||||
ModRefInfo ConservativeResult =
|
||||
CS.onlyReadsMemory() ? ModRefInfo::Ref : ModRefInfo::ModRef;
|
||||
Call->onlyReadsMemory() ? ModRefInfo::Ref : ModRefInfo::ModRef;
|
||||
|
||||
// Iterate through all the arguments to the called function. If any argument
|
||||
// is based on GV, return the conservative result.
|
||||
for (auto &A : CS.args()) {
|
||||
for (auto &A : Call->args()) {
|
||||
SmallVector<Value*, 4> Objects;
|
||||
GetUnderlyingObjects(A, Objects, DL);
|
||||
|
||||
@ -914,7 +914,7 @@ ModRefInfo GlobalsAAResult::getModRefInfoForArgument(ImmutableCallSite CS,
|
||||
return ModRefInfo::NoModRef;
|
||||
}
|
||||
|
||||
ModRefInfo GlobalsAAResult::getModRefInfo(ImmutableCallSite CS,
|
||||
ModRefInfo GlobalsAAResult::getModRefInfo(const CallBase *Call,
|
||||
const MemoryLocation &Loc) {
|
||||
ModRefInfo Known = ModRefInfo::ModRef;
|
||||
|
||||
@ -923,15 +923,15 @@ ModRefInfo GlobalsAAResult::getModRefInfo(ImmutableCallSite CS,
|
||||
if (const GlobalValue *GV =
|
||||
dyn_cast<GlobalValue>(GetUnderlyingObject(Loc.Ptr, DL)))
|
||||
if (GV->hasLocalLinkage())
|
||||
if (const Function *F = CS.getCalledFunction())
|
||||
if (const Function *F = Call->getCalledFunction())
|
||||
if (NonAddressTakenGlobals.count(GV))
|
||||
if (const FunctionInfo *FI = getFunctionInfo(F))
|
||||
Known = unionModRef(FI->getModRefInfoForGlobal(*GV),
|
||||
getModRefInfoForArgument(CS, GV));
|
||||
getModRefInfoForArgument(Call, GV));
|
||||
|
||||
if (!isModOrRefSet(Known))
|
||||
return ModRefInfo::NoModRef; // No need to query other mod/ref analyses
|
||||
return intersectModRef(Known, AAResultBase::getModRefInfo(CS, Loc));
|
||||
return intersectModRef(Known, AAResultBase::getModRefInfo(Call, Loc));
|
||||
}
|
||||
|
||||
GlobalsAAResult::GlobalsAAResult(const DataLayout &DL,
|
||||
|
@ -107,8 +107,8 @@ static bool isDereferenceableAndAlignedPointer(
|
||||
return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Align, Size,
|
||||
DL, CtxI, DT, Visited);
|
||||
|
||||
if (auto CS = ImmutableCallSite(V))
|
||||
if (auto *RP = getArgumentAliasingToReturnedPointer(CS))
|
||||
if (const auto *Call = dyn_cast<CallBase>(V))
|
||||
if (auto *RP = getArgumentAliasingToReturnedPointer(Call))
|
||||
return isDereferenceableAndAlignedPointer(RP, Align, Size, DL, CtxI, DT,
|
||||
Visited);
|
||||
|
||||
|
@ -13,7 +13,6 @@
|
||||
#include "llvm/ADT/SetVector.h"
|
||||
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
|
||||
#include "llvm/Analysis/Passes.h"
|
||||
#include "llvm/IR/CallSite.h"
|
||||
#include "llvm/IR/InstIterator.h"
|
||||
#include "llvm/IR/LLVMContext.h"
|
||||
#include "llvm/Support/ErrorHandling.h"
|
||||
@ -106,9 +105,9 @@ bool MemDepPrinter::runOnFunction(Function &F) {
|
||||
if (!Res.isNonLocal()) {
|
||||
Deps[Inst].insert(std::make_pair(getInstTypePair(Res),
|
||||
static_cast<BasicBlock *>(nullptr)));
|
||||
} else if (auto CS = CallSite(Inst)) {
|
||||
} else if (auto *Call = dyn_cast<CallBase>(Inst)) {
|
||||
const MemoryDependenceResults::NonLocalDepInfo &NLDI =
|
||||
MDA.getNonLocalCallDependency(CS);
|
||||
MDA.getNonLocalCallDependency(Call);
|
||||
|
||||
DepSet &InstDeps = Deps[Inst];
|
||||
for (const NonLocalDepEntry &I : NLDI) {
|
||||
|
@ -31,7 +31,6 @@
|
||||
#include "llvm/Analysis/ValueTracking.h"
|
||||
#include "llvm/IR/Attributes.h"
|
||||
#include "llvm/IR/BasicBlock.h"
|
||||
#include "llvm/IR/CallSite.h"
|
||||
#include "llvm/IR/Constants.h"
|
||||
#include "llvm/IR/DataLayout.h"
|
||||
#include "llvm/IR/DerivedTypes.h"
|
||||
@ -182,8 +181,8 @@ static ModRefInfo GetLocation(const Instruction *Inst, MemoryLocation &Loc,
|
||||
}
|
||||
|
||||
/// Private helper for finding the local dependencies of a call site.
|
||||
MemDepResult MemoryDependenceResults::getCallSiteDependencyFrom(
|
||||
CallSite CS, bool isReadOnlyCall, BasicBlock::iterator ScanIt,
|
||||
MemDepResult MemoryDependenceResults::getCallDependencyFrom(
|
||||
CallBase *Call, bool isReadOnlyCall, BasicBlock::iterator ScanIt,
|
||||
BasicBlock *BB) {
|
||||
unsigned Limit = BlockScanLimit;
|
||||
|
||||
@ -205,21 +204,21 @@ MemDepResult MemoryDependenceResults::getCallSiteDependencyFrom(
|
||||
ModRefInfo MR = GetLocation(Inst, Loc, TLI);
|
||||
if (Loc.Ptr) {
|
||||
// A simple instruction.
|
||||
if (isModOrRefSet(AA.getModRefInfo(CS, Loc)))
|
||||
if (isModOrRefSet(AA.getModRefInfo(Call, Loc)))
|
||||
return MemDepResult::getClobber(Inst);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (auto InstCS = CallSite(Inst)) {
|
||||
if (auto *CallB = dyn_cast<CallBase>(Inst)) {
|
||||
// If these two calls do not interfere, look past it.
|
||||
if (isNoModRef(AA.getModRefInfo(CS, InstCS))) {
|
||||
// If the two calls are the same, return InstCS as a Def, so that
|
||||
// CS can be found redundant and eliminated.
|
||||
if (isNoModRef(AA.getModRefInfo(Call, CallB))) {
|
||||
// If the two calls are the same, return Inst as a Def, so that
|
||||
// Call can be found redundant and eliminated.
|
||||
if (isReadOnlyCall && !isModSet(MR) &&
|
||||
CS.getInstruction()->isIdenticalToWhenDefined(Inst))
|
||||
Call->isIdenticalToWhenDefined(CallB))
|
||||
return MemDepResult::getDef(Inst);
|
||||
|
||||
// Otherwise if the two calls don't interact (e.g. InstCS is readnone)
|
||||
// Otherwise if the two calls don't interact (e.g. CallB is readnone)
|
||||
// keep scanning.
|
||||
continue;
|
||||
} else
|
||||
@ -750,11 +749,10 @@ MemDepResult MemoryDependenceResults::getDependency(Instruction *QueryInst) {
|
||||
|
||||
LocalCache = getPointerDependencyFrom(
|
||||
MemLoc, isLoad, ScanPos->getIterator(), QueryParent, QueryInst);
|
||||
} else if (isa<CallInst>(QueryInst) || isa<InvokeInst>(QueryInst)) {
|
||||
CallSite QueryCS(QueryInst);
|
||||
bool isReadOnly = AA.onlyReadsMemory(QueryCS);
|
||||
LocalCache = getCallSiteDependencyFrom(
|
||||
QueryCS, isReadOnly, ScanPos->getIterator(), QueryParent);
|
||||
} else if (auto *QueryCall = dyn_cast<CallBase>(QueryInst)) {
|
||||
bool isReadOnly = AA.onlyReadsMemory(QueryCall);
|
||||
LocalCache = getCallDependencyFrom(QueryCall, isReadOnly,
|
||||
ScanPos->getIterator(), QueryParent);
|
||||
} else
|
||||
// Non-memory instruction.
|
||||
LocalCache = MemDepResult::getUnknown();
|
||||
@ -780,11 +778,11 @@ static void AssertSorted(MemoryDependenceResults::NonLocalDepInfo &Cache,
|
||||
#endif
|
||||
|
||||
const MemoryDependenceResults::NonLocalDepInfo &
|
||||
MemoryDependenceResults::getNonLocalCallDependency(CallSite QueryCS) {
|
||||
assert(getDependency(QueryCS.getInstruction()).isNonLocal() &&
|
||||
MemoryDependenceResults::getNonLocalCallDependency(CallBase *QueryCall) {
|
||||
assert(getDependency(QueryCall).isNonLocal() &&
|
||||
"getNonLocalCallDependency should only be used on calls with "
|
||||
"non-local deps!");
|
||||
PerInstNLInfo &CacheP = NonLocalDeps[QueryCS.getInstruction()];
|
||||
PerInstNLInfo &CacheP = NonLocalDeps[QueryCall];
|
||||
NonLocalDepInfo &Cache = CacheP.first;
|
||||
|
||||
// This is the set of blocks that need to be recomputed. In the cached case,
|
||||
@ -814,14 +812,14 @@ MemoryDependenceResults::getNonLocalCallDependency(CallSite QueryCS) {
|
||||
// << Cache.size() << " cached: " << *QueryInst;
|
||||
} else {
|
||||
// Seed DirtyBlocks with each of the preds of QueryInst's block.
|
||||
BasicBlock *QueryBB = QueryCS.getInstruction()->getParent();
|
||||
BasicBlock *QueryBB = QueryCall->getParent();
|
||||
for (BasicBlock *Pred : PredCache.get(QueryBB))
|
||||
DirtyBlocks.push_back(Pred);
|
||||
++NumUncacheNonLocal;
|
||||
}
|
||||
|
||||
// isReadonlyCall - If this is a read-only call, we can be more aggressive.
|
||||
bool isReadonlyCall = AA.onlyReadsMemory(QueryCS);
|
||||
bool isReadonlyCall = AA.onlyReadsMemory(QueryCall);
|
||||
|
||||
SmallPtrSet<BasicBlock *, 32> Visited;
|
||||
|
||||
@ -865,8 +863,8 @@ MemoryDependenceResults::getNonLocalCallDependency(CallSite QueryCS) {
|
||||
if (Instruction *Inst = ExistingResult->getResult().getInst()) {
|
||||
ScanPos = Inst->getIterator();
|
||||
// We're removing QueryInst's use of Inst.
|
||||
RemoveFromReverseMap(ReverseNonLocalDeps, Inst,
|
||||
QueryCS.getInstruction());
|
||||
RemoveFromReverseMap<Instruction *>(ReverseNonLocalDeps, Inst,
|
||||
QueryCall);
|
||||
}
|
||||
}
|
||||
|
||||
@ -874,8 +872,7 @@ MemoryDependenceResults::getNonLocalCallDependency(CallSite QueryCS) {
|
||||
MemDepResult Dep;
|
||||
|
||||
if (ScanPos != DirtyBB->begin()) {
|
||||
Dep =
|
||||
getCallSiteDependencyFrom(QueryCS, isReadonlyCall, ScanPos, DirtyBB);
|
||||
Dep = getCallDependencyFrom(QueryCall, isReadonlyCall, ScanPos, DirtyBB);
|
||||
} else if (DirtyBB != &DirtyBB->getParent()->getEntryBlock()) {
|
||||
// No dependence found. If this is the entry block of the function, it is
|
||||
// a clobber, otherwise it is unknown.
|
||||
@ -897,7 +894,7 @@ MemoryDependenceResults::getNonLocalCallDependency(CallSite QueryCS) {
|
||||
// Keep the ReverseNonLocalDeps map up to date so we can efficiently
|
||||
// update this when we remove instructions.
|
||||
if (Instruction *Inst = Dep.getInst())
|
||||
ReverseNonLocalDeps[Inst].insert(QueryCS.getInstruction());
|
||||
ReverseNonLocalDeps[Inst].insert(QueryCall);
|
||||
} else {
|
||||
|
||||
// If the block *is* completely transparent to the load, we need to check
|
||||
|
@ -125,15 +125,15 @@ MemoryLocation MemoryLocation::getForDest(const AnyMemIntrinsic *MI) {
|
||||
return MemoryLocation(MI->getRawDest(), Size, AATags);
|
||||
}
|
||||
|
||||
MemoryLocation MemoryLocation::getForArgument(ImmutableCallSite CS,
|
||||
MemoryLocation MemoryLocation::getForArgument(const CallBase *Call,
|
||||
unsigned ArgIdx,
|
||||
const TargetLibraryInfo *TLI) {
|
||||
AAMDNodes AATags;
|
||||
CS->getAAMetadata(AATags);
|
||||
const Value *Arg = CS.getArgument(ArgIdx);
|
||||
Call->getAAMetadata(AATags);
|
||||
const Value *Arg = Call->getArgOperand(ArgIdx);
|
||||
|
||||
// We may be able to produce an exact size for known intrinsics.
|
||||
if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) {
|
||||
if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call)) {
|
||||
const DataLayout &DL = II->getModule()->getDataLayout();
|
||||
|
||||
switch (II->getIntrinsicID()) {
|
||||
@ -193,19 +193,20 @@ MemoryLocation MemoryLocation::getForArgument(ImmutableCallSite CS,
|
||||
// LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
|
||||
// whenever possible.
|
||||
LibFunc F;
|
||||
if (TLI && CS.getCalledFunction() &&
|
||||
TLI->getLibFunc(*CS.getCalledFunction(), F) &&
|
||||
if (TLI && Call->getCalledFunction() &&
|
||||
TLI->getLibFunc(*Call->getCalledFunction(), F) &&
|
||||
F == LibFunc_memset_pattern16 && TLI->has(F)) {
|
||||
assert((ArgIdx == 0 || ArgIdx == 1) &&
|
||||
"Invalid argument index for memset_pattern16");
|
||||
if (ArgIdx == 1)
|
||||
return MemoryLocation(Arg, LocationSize::precise(16), AATags);
|
||||
if (const ConstantInt *LenCI = dyn_cast<ConstantInt>(CS.getArgument(2)))
|
||||
if (const ConstantInt *LenCI =
|
||||
dyn_cast<ConstantInt>(Call->getArgOperand(2)))
|
||||
return MemoryLocation(Arg, LocationSize::precise(LenCI->getZExtValue()),
|
||||
AATags);
|
||||
}
|
||||
// FIXME: Handle memset_pattern4 and memset_pattern8 also.
|
||||
|
||||
return MemoryLocation(CS.getArgument(ArgIdx), LocationSize::unknown(),
|
||||
return MemoryLocation(Call->getArgOperand(ArgIdx), LocationSize::unknown(),
|
||||
AATags);
|
||||
}
|
||||
|
@ -30,7 +30,6 @@
|
||||
#include "llvm/Config/llvm-config.h"
|
||||
#include "llvm/IR/AssemblyAnnotationWriter.h"
|
||||
#include "llvm/IR/BasicBlock.h"
|
||||
#include "llvm/IR/CallSite.h"
|
||||
#include "llvm/IR/Dominators.h"
|
||||
#include "llvm/IR/Function.h"
|
||||
#include "llvm/IR/Instruction.h"
|
||||
@ -131,9 +130,9 @@ public:
|
||||
: MemoryLocOrCall(MUD->getMemoryInst()) {}
|
||||
|
||||
MemoryLocOrCall(Instruction *Inst) {
|
||||
if (ImmutableCallSite(Inst)) {
|
||||
if (auto *C = dyn_cast<CallBase>(Inst)) {
|
||||
IsCall = true;
|
||||
CS = ImmutableCallSite(Inst);
|
||||
Call = C;
|
||||
} else {
|
||||
IsCall = false;
|
||||
// There is no such thing as a memorylocation for a fence inst, and it is
|
||||
@ -145,9 +144,9 @@ public:
|
||||
|
||||
explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {}
|
||||
|
||||
ImmutableCallSite getCS() const {
|
||||
const CallBase *getCall() const {
|
||||
assert(IsCall);
|
||||
return CS;
|
||||
return Call;
|
||||
}
|
||||
|
||||
MemoryLocation getLoc() const {
|
||||
@ -162,16 +161,17 @@ public:
|
||||
if (!IsCall)
|
||||
return Loc == Other.Loc;
|
||||
|
||||
if (CS.getCalledValue() != Other.CS.getCalledValue())
|
||||
if (Call->getCalledValue() != Other.Call->getCalledValue())
|
||||
return false;
|
||||
|
||||
return CS.arg_size() == Other.CS.arg_size() &&
|
||||
std::equal(CS.arg_begin(), CS.arg_end(), Other.CS.arg_begin());
|
||||
return Call->arg_size() == Other.Call->arg_size() &&
|
||||
std::equal(Call->arg_begin(), Call->arg_end(),
|
||||
Other.Call->arg_begin());
|
||||
}
|
||||
|
||||
private:
|
||||
union {
|
||||
ImmutableCallSite CS;
|
||||
const CallBase *Call;
|
||||
MemoryLocation Loc;
|
||||
};
|
||||
};
|
||||
@ -197,9 +197,9 @@ template <> struct DenseMapInfo<MemoryLocOrCall> {
|
||||
|
||||
hash_code hash =
|
||||
hash_combine(MLOC.IsCall, DenseMapInfo<const Value *>::getHashValue(
|
||||
MLOC.getCS().getCalledValue()));
|
||||
MLOC.getCall()->getCalledValue()));
|
||||
|
||||
for (const Value *Arg : MLOC.getCS().args())
|
||||
for (const Value *Arg : MLOC.getCall()->args())
|
||||
hash = hash_combine(hash, DenseMapInfo<const Value *>::getHashValue(Arg));
|
||||
return hash;
|
||||
}
|
||||
@ -258,7 +258,7 @@ static ClobberAlias instructionClobbersQuery(const MemoryDef *MD,
|
||||
AliasAnalysis &AA) {
|
||||
Instruction *DefInst = MD->getMemoryInst();
|
||||
assert(DefInst && "Defining instruction not actually an instruction");
|
||||
ImmutableCallSite UseCS(UseInst);
|
||||
const auto *UseCall = dyn_cast<CallBase>(UseInst);
|
||||
Optional<AliasResult> AR;
|
||||
|
||||
if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) {
|
||||
@ -271,7 +271,7 @@ static ClobberAlias instructionClobbersQuery(const MemoryDef *MD,
|
||||
// context.
|
||||
switch (II->getIntrinsicID()) {
|
||||
case Intrinsic::lifetime_start:
|
||||
if (UseCS)
|
||||
if (UseCall)
|
||||
return {false, NoAlias};
|
||||
AR = AA.alias(MemoryLocation(II->getArgOperand(1)), UseLoc);
|
||||
return {AR != NoAlias, AR};
|
||||
@ -285,8 +285,8 @@ static ClobberAlias instructionClobbersQuery(const MemoryDef *MD,
|
||||
}
|
||||
}
|
||||
|
||||
if (UseCS) {
|
||||
ModRefInfo I = AA.getModRefInfo(DefInst, UseCS);
|
||||
if (UseCall) {
|
||||
ModRefInfo I = AA.getModRefInfo(DefInst, UseCall);
|
||||
AR = isMustSet(I) ? MustAlias : MayAlias;
|
||||
return {isModOrRefSet(I), AR};
|
||||
}
|
||||
@ -336,7 +336,7 @@ struct UpwardsMemoryQuery {
|
||||
UpwardsMemoryQuery() = default;
|
||||
|
||||
UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access)
|
||||
: IsCall(ImmutableCallSite(Inst)), Inst(Inst), OriginalAccess(Access) {
|
||||
: IsCall(isa<CallBase>(Inst)), Inst(Inst), OriginalAccess(Access) {
|
||||
if (!IsCall)
|
||||
StartingLoc = MemoryLocation::get(Inst);
|
||||
}
|
||||
@ -2162,7 +2162,7 @@ MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess(
|
||||
|
||||
// Conservatively, fences are always clobbers, so don't perform the walk if we
|
||||
// hit a fence.
|
||||
if (!ImmutableCallSite(I) && I->isFenceLike())
|
||||
if (!isa<CallBase>(I) && I->isFenceLike())
|
||||
return StartingUseOrDef;
|
||||
|
||||
UpwardsMemoryQuery Q;
|
||||
@ -2202,7 +2202,7 @@ MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
|
||||
// We can't sanely do anything with a fence, since they conservatively clobber
|
||||
// all memory, and have no locations to get pointers from to try to
|
||||
// disambiguate.
|
||||
if (!ImmutableCallSite(I) && I->isFenceLike())
|
||||
if (!isa<CallBase>(I) && I->isFenceLike())
|
||||
return StartingAccess;
|
||||
|
||||
UpwardsMemoryQuery Q(I, StartingAccess);
|
||||
|
@ -106,12 +106,12 @@ FunctionModRefBehavior ObjCARCAAResult::getModRefBehavior(const Function *F) {
|
||||
return AAResultBase::getModRefBehavior(F);
|
||||
}
|
||||
|
||||
ModRefInfo ObjCARCAAResult::getModRefInfo(ImmutableCallSite CS,
|
||||
ModRefInfo ObjCARCAAResult::getModRefInfo(const CallBase *Call,
|
||||
const MemoryLocation &Loc) {
|
||||
if (!EnableARCOpts)
|
||||
return AAResultBase::getModRefInfo(CS, Loc);
|
||||
return AAResultBase::getModRefInfo(Call, Loc);
|
||||
|
||||
switch (GetBasicARCInstKind(CS.getInstruction())) {
|
||||
switch (GetBasicARCInstKind(Call)) {
|
||||
case ARCInstKind::Retain:
|
||||
case ARCInstKind::RetainRV:
|
||||
case ARCInstKind::Autorelease:
|
||||
@ -128,7 +128,7 @@ ModRefInfo ObjCARCAAResult::getModRefInfo(ImmutableCallSite CS,
|
||||
break;
|
||||
}
|
||||
|
||||
return AAResultBase::getModRefInfo(CS, Loc);
|
||||
return AAResultBase::getModRefInfo(Call, Loc);
|
||||
}
|
||||
|
||||
ObjCARCAAResult ObjCARCAA::run(Function &F, FunctionAnalysisManager &AM) {
|
||||
|
@ -95,39 +95,36 @@ AliasResult ScopedNoAliasAAResult::alias(const MemoryLocation &LocA,
|
||||
return AAResultBase::alias(LocA, LocB);
|
||||
}
|
||||
|
||||
ModRefInfo ScopedNoAliasAAResult::getModRefInfo(ImmutableCallSite CS,
|
||||
ModRefInfo ScopedNoAliasAAResult::getModRefInfo(const CallBase *Call,
|
||||
const MemoryLocation &Loc) {
|
||||
if (!EnableScopedNoAlias)
|
||||
return AAResultBase::getModRefInfo(CS, Loc);
|
||||
return AAResultBase::getModRefInfo(Call, Loc);
|
||||
|
||||
if (!mayAliasInScopes(Loc.AATags.Scope, CS.getInstruction()->getMetadata(
|
||||
LLVMContext::MD_noalias)))
|
||||
if (!mayAliasInScopes(Loc.AATags.Scope,
|
||||
Call->getMetadata(LLVMContext::MD_noalias)))
|
||||
return ModRefInfo::NoModRef;
|
||||
|
||||
if (!mayAliasInScopes(
|
||||
CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope),
|
||||
Loc.AATags.NoAlias))
|
||||
if (!mayAliasInScopes(Call->getMetadata(LLVMContext::MD_alias_scope),
|
||||
Loc.AATags.NoAlias))
|
||||
return ModRefInfo::NoModRef;
|
||||
|
||||
return AAResultBase::getModRefInfo(CS, Loc);
|
||||
return AAResultBase::getModRefInfo(Call, Loc);
|
||||
}
|
||||
|
||||
ModRefInfo ScopedNoAliasAAResult::getModRefInfo(ImmutableCallSite CS1,
|
||||
ImmutableCallSite CS2) {
|
||||
ModRefInfo ScopedNoAliasAAResult::getModRefInfo(const CallBase *Call1,
|
||||
const CallBase *Call2) {
|
||||
if (!EnableScopedNoAlias)
|
||||
return AAResultBase::getModRefInfo(CS1, CS2);
|
||||
return AAResultBase::getModRefInfo(Call1, Call2);
|
||||
|
||||
if (!mayAliasInScopes(
|
||||
CS1.getInstruction()->getMetadata(LLVMContext::MD_alias_scope),
|
||||
CS2.getInstruction()->getMetadata(LLVMContext::MD_noalias)))
|
||||
if (!mayAliasInScopes(Call1->getMetadata(LLVMContext::MD_alias_scope),
|
||||
Call2->getMetadata(LLVMContext::MD_noalias)))
|
||||
return ModRefInfo::NoModRef;
|
||||
|
||||
if (!mayAliasInScopes(
|
||||
CS2.getInstruction()->getMetadata(LLVMContext::MD_alias_scope),
|
||||
CS1.getInstruction()->getMetadata(LLVMContext::MD_noalias)))
|
||||
if (!mayAliasInScopes(Call2->getMetadata(LLVMContext::MD_alias_scope),
|
||||
Call1->getMetadata(LLVMContext::MD_noalias)))
|
||||
return ModRefInfo::NoModRef;
|
||||
|
||||
return AAResultBase::getModRefInfo(CS1, CS2);
|
||||
return AAResultBase::getModRefInfo(Call1, Call2);
|
||||
}
|
||||
|
||||
static void collectMDInDomain(const MDNode *List, const MDNode *Domain,
|
||||
|
@ -399,20 +399,20 @@ bool TypeBasedAAResult::pointsToConstantMemory(const MemoryLocation &Loc,
|
||||
}
|
||||
|
||||
FunctionModRefBehavior
|
||||
TypeBasedAAResult::getModRefBehavior(ImmutableCallSite CS) {
|
||||
TypeBasedAAResult::getModRefBehavior(const CallBase *Call) {
|
||||
if (!EnableTBAA)
|
||||
return AAResultBase::getModRefBehavior(CS);
|
||||
return AAResultBase::getModRefBehavior(Call);
|
||||
|
||||
FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
|
||||
|
||||
// If this is an "immutable" type, we can assume the call doesn't write
|
||||
// to memory.
|
||||
if (const MDNode *M = CS.getInstruction()->getMetadata(LLVMContext::MD_tbaa))
|
||||
if (const MDNode *M = Call->getMetadata(LLVMContext::MD_tbaa))
|
||||
if ((!isStructPathTBAA(M) && TBAANode(M).isTypeImmutable()) ||
|
||||
(isStructPathTBAA(M) && TBAAStructTagNode(M).isTypeImmutable()))
|
||||
Min = FMRB_OnlyReadsMemory;
|
||||
|
||||
return FunctionModRefBehavior(AAResultBase::getModRefBehavior(CS) & Min);
|
||||
return FunctionModRefBehavior(AAResultBase::getModRefBehavior(Call) & Min);
|
||||
}
|
||||
|
||||
FunctionModRefBehavior TypeBasedAAResult::getModRefBehavior(const Function *F) {
|
||||
@ -420,33 +420,30 @@ FunctionModRefBehavior TypeBasedAAResult::getModRefBehavior(const Function *F) {
|
||||
return AAResultBase::getModRefBehavior(F);
|
||||
}
|
||||
|
||||
ModRefInfo TypeBasedAAResult::getModRefInfo(ImmutableCallSite CS,
|
||||
ModRefInfo TypeBasedAAResult::getModRefInfo(const CallBase *Call,
|
||||
const MemoryLocation &Loc) {
|
||||
if (!EnableTBAA)
|
||||
return AAResultBase::getModRefInfo(CS, Loc);
|
||||
return AAResultBase::getModRefInfo(Call, Loc);
|
||||
|
||||
if (const MDNode *L = Loc.AATags.TBAA)
|
||||
if (const MDNode *M =
|
||||
CS.getInstruction()->getMetadata(LLVMContext::MD_tbaa))
|
||||
if (const MDNode *M = Call->getMetadata(LLVMContext::MD_tbaa))
|
||||
if (!Aliases(L, M))
|
||||
return ModRefInfo::NoModRef;
|
||||
|
||||
return AAResultBase::getModRefInfo(CS, Loc);
|
||||
return AAResultBase::getModRefInfo(Call, Loc);
|
||||
}
|
||||
|
||||
ModRefInfo TypeBasedAAResult::getModRefInfo(ImmutableCallSite CS1,
|
||||
ImmutableCallSite CS2) {
|
||||
ModRefInfo TypeBasedAAResult::getModRefInfo(const CallBase *Call1,
|
||||
const CallBase *Call2) {
|
||||
if (!EnableTBAA)
|
||||
return AAResultBase::getModRefInfo(CS1, CS2);
|
||||
return AAResultBase::getModRefInfo(Call1, Call2);
|
||||
|
||||
if (const MDNode *M1 =
|
||||
CS1.getInstruction()->getMetadata(LLVMContext::MD_tbaa))
|
||||
if (const MDNode *M2 =
|
||||
CS2.getInstruction()->getMetadata(LLVMContext::MD_tbaa))
|
||||
if (const MDNode *M1 = Call1->getMetadata(LLVMContext::MD_tbaa))
|
||||
if (const MDNode *M2 = Call2->getMetadata(LLVMContext::MD_tbaa))
|
||||
if (!Aliases(M1, M2))
|
||||
return ModRefInfo::NoModRef;
|
||||
|
||||
return AAResultBase::getModRefInfo(CS1, CS2);
|
||||
return AAResultBase::getModRefInfo(Call1, Call2);
|
||||
}
|
||||
|
||||
bool MDNode::isTBAAVtableAccess() const {
|
||||
|
@ -2023,10 +2023,10 @@ bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
|
||||
if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull))
|
||||
return true;
|
||||
|
||||
if (auto CS = ImmutableCallSite(V)) {
|
||||
if (CS.isReturnNonNull())
|
||||
if (const auto *Call = dyn_cast<CallBase>(V)) {
|
||||
if (Call->isReturnNonNull())
|
||||
return true;
|
||||
if (const auto *RP = getArgumentAliasingToReturnedPointer(CS))
|
||||
if (const auto *RP = getArgumentAliasingToReturnedPointer(Call))
|
||||
return isKnownNonZero(RP, Depth, Q);
|
||||
}
|
||||
}
|
||||
@ -3624,21 +3624,21 @@ uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
|
||||
return Len == ~0ULL ? 1 : Len;
|
||||
}
|
||||
|
||||
const Value *llvm::getArgumentAliasingToReturnedPointer(ImmutableCallSite CS) {
|
||||
assert(CS &&
|
||||
"getArgumentAliasingToReturnedPointer only works on nonnull CallSite");
|
||||
if (const Value *RV = CS.getReturnedArgOperand())
|
||||
const Value *llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call) {
|
||||
assert(Call &&
|
||||
"getArgumentAliasingToReturnedPointer only works on nonnull calls");
|
||||
if (const Value *RV = Call->getReturnedArgOperand())
|
||||
return RV;
|
||||
// This can be used only as a aliasing property.
|
||||
if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(CS))
|
||||
return CS.getArgOperand(0);
|
||||
if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(Call))
|
||||
return Call->getArgOperand(0);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
|
||||
ImmutableCallSite CS) {
|
||||
return CS.getIntrinsicID() == Intrinsic::launder_invariant_group ||
|
||||
CS.getIntrinsicID() == Intrinsic::strip_invariant_group;
|
||||
const CallBase *Call) {
|
||||
return Call->getIntrinsicID() == Intrinsic::launder_invariant_group ||
|
||||
Call->getIntrinsicID() == Intrinsic::strip_invariant_group;
|
||||
}
|
||||
|
||||
/// \p PN defines a loop-variant pointer to an object. Check if the
|
||||
@ -3686,7 +3686,7 @@ Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL,
|
||||
// An alloca can't be further simplified.
|
||||
return V;
|
||||
} else {
|
||||
if (auto CS = CallSite(V)) {
|
||||
if (auto *Call = dyn_cast<CallBase>(V)) {
|
||||
// CaptureTracking can know about special capturing properties of some
|
||||
// intrinsics like launder.invariant.group, that can't be expressed with
|
||||
// the attributes, but have properties like returning aliasing pointer.
|
||||
@ -3696,7 +3696,7 @@ Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL,
|
||||
// because it should be in sync with CaptureTracking. Not using it may
|
||||
// cause weird miscompilations where 2 aliasing pointers are assumed to
|
||||
// noalias.
|
||||
if (auto *RP = getArgumentAliasingToReturnedPointer(CS)) {
|
||||
if (auto *RP = getArgumentAliasingToReturnedPointer(Call)) {
|
||||
V = RP;
|
||||
continue;
|
||||
}
|
||||
|
@ -130,16 +130,15 @@ static MemoryAccessKind checkFunctionMemoryAccess(Function &F, bool ThisBody,
|
||||
|
||||
// Some instructions can be ignored even if they read or write memory.
|
||||
// Detect these now, skipping to the next instruction if one is found.
|
||||
CallSite CS(cast<Value>(I));
|
||||
if (CS) {
|
||||
if (auto *Call = dyn_cast<CallBase>(I)) {
|
||||
// Ignore calls to functions in the same SCC, as long as the call sites
|
||||
// don't have operand bundles. Calls with operand bundles are allowed to
|
||||
// have memory effects not described by the memory effects of the call
|
||||
// target.
|
||||
if (!CS.hasOperandBundles() && CS.getCalledFunction() &&
|
||||
SCCNodes.count(CS.getCalledFunction()))
|
||||
if (!Call->hasOperandBundles() && Call->getCalledFunction() &&
|
||||
SCCNodes.count(Call->getCalledFunction()))
|
||||
continue;
|
||||
FunctionModRefBehavior MRB = AAR.getModRefBehavior(CS);
|
||||
FunctionModRefBehavior MRB = AAR.getModRefBehavior(Call);
|
||||
ModRefInfo MRI = createModRefInfo(MRB);
|
||||
|
||||
// If the call doesn't access memory, we're done.
|
||||
@ -158,7 +157,7 @@ static MemoryAccessKind checkFunctionMemoryAccess(Function &F, bool ThisBody,
|
||||
|
||||
// Check whether all pointer arguments point to local memory, and
|
||||
// ignore calls that only access local memory.
|
||||
for (CallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end();
|
||||
for (CallSite::arg_iterator CI = Call->arg_begin(), CE = Call->arg_end();
|
||||
CI != CE; ++CI) {
|
||||
Value *Arg = *CI;
|
||||
if (!Arg->getType()->isPtrOrPtrVectorTy())
|
||||
|
@ -45,18 +45,15 @@ bool llvm::objcarc::CanAlterRefCount(const Instruction *Inst, const Value *Ptr,
|
||||
default: break;
|
||||
}
|
||||
|
||||
ImmutableCallSite CS(Inst);
|
||||
assert(CS && "Only calls can alter reference counts!");
|
||||
const auto *Call = cast<CallBase>(Inst);
|
||||
|
||||
// See if AliasAnalysis can help us with the call.
|
||||
FunctionModRefBehavior MRB = PA.getAA()->getModRefBehavior(CS);
|
||||
FunctionModRefBehavior MRB = PA.getAA()->getModRefBehavior(Call);
|
||||
if (AliasAnalysis::onlyReadsMemory(MRB))
|
||||
return false;
|
||||
if (AliasAnalysis::onlyAccessesArgPointees(MRB)) {
|
||||
const DataLayout &DL = Inst->getModule()->getDataLayout();
|
||||
for (ImmutableCallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
|
||||
I != E; ++I) {
|
||||
const Value *Op = *I;
|
||||
for (const Value *Op : Call->args()) {
|
||||
if (IsPotentialRetainableObjPtr(Op, *PA.getAA()) &&
|
||||
PA.related(Ptr, Op, DL))
|
||||
return true;
|
||||
|
@ -834,7 +834,7 @@ static bool handleEndBlock(BasicBlock &BB, AliasAnalysis *AA,
|
||||
continue;
|
||||
}
|
||||
|
||||
if (auto CS = CallSite(&*BBI)) {
|
||||
if (auto *Call = dyn_cast<CallBase>(&*BBI)) {
|
||||
// Remove allocation function calls from the list of dead stack objects;
|
||||
// there can't be any references before the definition.
|
||||
if (isAllocLikeFn(&*BBI, TLI))
|
||||
@ -842,15 +842,15 @@ static bool handleEndBlock(BasicBlock &BB, AliasAnalysis *AA,
|
||||
|
||||
// If this call does not access memory, it can't be loading any of our
|
||||
// pointers.
|
||||
if (AA->doesNotAccessMemory(CS))
|
||||
if (AA->doesNotAccessMemory(Call))
|
||||
continue;
|
||||
|
||||
// If the call might load from any of our allocas, then any store above
|
||||
// the call is live.
|
||||
DeadStackObjects.remove_if([&](Value *I) {
|
||||
// See if the call site touches the value.
|
||||
return isRefSet(AA->getModRefInfo(CS, I, getPointerSize(I, DL, *TLI,
|
||||
BB.getParent())));
|
||||
return isRefSet(AA->getModRefInfo(
|
||||
Call, I, getPointerSize(I, DL, *TLI, BB.getParent())));
|
||||
});
|
||||
|
||||
// If all of the allocas were clobbered by the call then we're not going
|
||||
|
@ -437,7 +437,7 @@ uint32_t GVN::ValueTable::lookupOrAddCall(CallInst *C) {
|
||||
|
||||
// Non-local case.
|
||||
const MemoryDependenceResults::NonLocalDepInfo &deps =
|
||||
MD->getNonLocalCallDependency(CallSite(C));
|
||||
MD->getNonLocalCallDependency(C);
|
||||
// FIXME: Move the checking logic to MemDep!
|
||||
CallInst* cdep = nullptr;
|
||||
|
||||
|
@ -360,10 +360,11 @@ bool LoopVersioningLICM::legalLoopMemoryAccesses() {
|
||||
bool LoopVersioningLICM::instructionSafeForVersioning(Instruction *I) {
|
||||
assert(I != nullptr && "Null instruction found!");
|
||||
// Check function call safety
|
||||
if (isa<CallInst>(I) && !AA->doesNotAccessMemory(CallSite(I))) {
|
||||
LLVM_DEBUG(dbgs() << " Unsafe call site found.\n");
|
||||
return false;
|
||||
}
|
||||
if (auto *Call = dyn_cast<CallBase>(I))
|
||||
if (!AA->doesNotAccessMemory(Call)) {
|
||||
LLVM_DEBUG(dbgs() << " Unsafe call site found.\n");
|
||||
return false;
|
||||
}
|
||||
// Avoid loops with possiblity of throw
|
||||
if (I->mayThrow()) {
|
||||
LLVM_DEBUG(dbgs() << " May throw instruction found in loop body\n");
|
||||
|
@ -546,8 +546,8 @@ static bool moveUp(AliasAnalysis &AA, StoreInst *SI, Instruction *P,
|
||||
// Memory locations of lifted instructions.
|
||||
SmallVector<MemoryLocation, 8> MemLocs{StoreLoc};
|
||||
|
||||
// Lifted callsites.
|
||||
SmallVector<ImmutableCallSite, 8> CallSites;
|
||||
// Lifted calls.
|
||||
SmallVector<const CallBase *, 8> Calls;
|
||||
|
||||
const MemoryLocation LoadLoc = MemoryLocation::get(LI);
|
||||
|
||||
@ -565,10 +565,9 @@ static bool moveUp(AliasAnalysis &AA, StoreInst *SI, Instruction *P,
|
||||
});
|
||||
|
||||
if (!NeedLift)
|
||||
NeedLift =
|
||||
llvm::any_of(CallSites, [C, &AA](const ImmutableCallSite &CS) {
|
||||
return isModOrRefSet(AA.getModRefInfo(C, CS));
|
||||
});
|
||||
NeedLift = llvm::any_of(Calls, [C, &AA](const CallBase *Call) {
|
||||
return isModOrRefSet(AA.getModRefInfo(C, Call));
|
||||
});
|
||||
}
|
||||
|
||||
if (!NeedLift)
|
||||
@ -579,12 +578,12 @@ static bool moveUp(AliasAnalysis &AA, StoreInst *SI, Instruction *P,
|
||||
// none of them may modify its source.
|
||||
if (isModSet(AA.getModRefInfo(C, LoadLoc)))
|
||||
return false;
|
||||
else if (auto CS = ImmutableCallSite(C)) {
|
||||
else if (const auto *Call = dyn_cast<CallBase>(C)) {
|
||||
// If we can't lift this before P, it's game over.
|
||||
if (isModOrRefSet(AA.getModRefInfo(P, CS)))
|
||||
if (isModOrRefSet(AA.getModRefInfo(P, Call)))
|
||||
return false;
|
||||
|
||||
CallSites.push_back(CS);
|
||||
Calls.push_back(Call);
|
||||
} else if (isa<LoadInst>(C) || isa<StoreInst>(C) || isa<VAArgInst>(C)) {
|
||||
// If we can't lift this before P, it's game over.
|
||||
auto ML = MemoryLocation::get(C);
|
||||
|
@ -76,14 +76,14 @@ static bool isSafeToMove(Instruction *Inst, AliasAnalysis &AA,
|
||||
Inst->mayThrow())
|
||||
return false;
|
||||
|
||||
if (auto CS = CallSite(Inst)) {
|
||||
if (auto *Call = dyn_cast<CallBase>(Inst)) {
|
||||
// Convergent operations cannot be made control-dependent on additional
|
||||
// values.
|
||||
if (CS.hasFnAttr(Attribute::Convergent))
|
||||
if (Call->hasFnAttr(Attribute::Convergent))
|
||||
return false;
|
||||
|
||||
for (Instruction *S : Stores)
|
||||
if (isModSet(AA.getModRefInfo(S, CS)))
|
||||
if (isModSet(AA.getModRefInfo(S, Call)))
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -999,22 +999,22 @@ static void AddAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap,
|
||||
PtrArgs.push_back(CXI->getPointerOperand());
|
||||
else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
|
||||
PtrArgs.push_back(RMWI->getPointerOperand());
|
||||
else if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
|
||||
else if (const auto *Call = dyn_cast<CallBase>(I)) {
|
||||
// If we know that the call does not access memory, then we'll still
|
||||
// know that about the inlined clone of this call site, and we don't
|
||||
// need to add metadata.
|
||||
if (ICS.doesNotAccessMemory())
|
||||
if (Call->doesNotAccessMemory())
|
||||
continue;
|
||||
|
||||
IsFuncCall = true;
|
||||
if (CalleeAAR) {
|
||||
FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(ICS);
|
||||
FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(Call);
|
||||
if (MRB == FMRB_OnlyAccessesArgumentPointees ||
|
||||
MRB == FMRB_OnlyReadsArgumentPointees)
|
||||
IsArgMemOnlyCall = true;
|
||||
}
|
||||
|
||||
for (Value *Arg : ICS.args()) {
|
||||
for (Value *Arg : Call->args()) {
|
||||
// We need to check the underlying objects of all arguments, not just
|
||||
// the pointer arguments, because we might be passing pointers as
|
||||
// integers, etc.
|
||||
|
Loading…
x
Reference in New Issue
Block a user