1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-18 18:42:46 +02:00

[NFC] Remove unused GetUnderlyingObject paramenter

Depends on D84617.

Differential Revision: https://reviews.llvm.org/D84621
This commit is contained in:
Vitaly Buka 2020-07-31 02:09:54 -07:00
parent 42bc0ed1ab
commit 1bae08d2a5
44 changed files with 114 additions and 143 deletions

View File

@ -64,10 +64,9 @@ inline bool ModuleHasARC(const Module &M) {
/// This is a wrapper around getUnderlyingObject which also knows how to
/// look through objc_retain and objc_autorelease calls, which we know to return
/// their argument verbatim.
inline const Value *GetUnderlyingObjCPtr(const Value *V,
const DataLayout &DL) {
inline const Value *GetUnderlyingObjCPtr(const Value *V) {
for (;;) {
V = getUnderlyingObject(V, DL);
V = getUnderlyingObject(V);
if (!IsForwarding(GetBasicARCInstKind(V)))
break;
V = cast<CallInst>(V)->getArgOperand(0);
@ -78,12 +77,12 @@ inline const Value *GetUnderlyingObjCPtr(const Value *V,
/// A wrapper for GetUnderlyingObjCPtr used for results memoization.
inline const Value *
GetUnderlyingObjCPtrCached(const Value *V, const DataLayout &DL,
GetUnderlyingObjCPtrCached(const Value *V,
DenseMap<const Value *, WeakTrackingVH> &Cache) {
if (auto InCache = Cache.lookup(V))
return InCache;
const Value *Computed = GetUnderlyingObjCPtr(V, DL);
const Value *Computed = GetUnderlyingObjCPtr(V);
Cache[V] = const_cast<Value *>(Computed);
return Computed;
}

View File

@ -368,11 +368,10 @@ class Value;
/// that the returned value has pointer type if the specified value does. If
/// the MaxLookup value is non-zero, it limits the number of instructions to
/// be stripped off.
Value *getUnderlyingObject(Value *V, const DataLayout &DL,
unsigned MaxLookup = 6);
inline const Value *getUnderlyingObject(const Value *V, const DataLayout &DL,
Value *getUnderlyingObject(Value *V, unsigned MaxLookup = 6);
inline const Value *getUnderlyingObject(const Value *V,
unsigned MaxLookup = 6) {
return getUnderlyingObject(const_cast<Value *>(V), DL, MaxLookup);
return getUnderlyingObject(const_cast<Value *>(V), MaxLookup);
}
/// This method is similar to getUnderlyingObject except that it can
@ -405,14 +404,12 @@ class Value;
/// it shouldn't look through the phi above.
void getUnderlyingObjects(const Value *V,
SmallVectorImpl<const Value *> &Objects,
const DataLayout &DL, LoopInfo *LI = nullptr,
unsigned MaxLookup = 6);
LoopInfo *LI = nullptr, unsigned MaxLookup = 6);
/// This is a wrapper around getUnderlyingObjects and adds support for basic
/// ptrtoint+arithmetic+inttoptr sequences.
bool getUnderlyingObjectsForCodeGen(const Value *V,
SmallVectorImpl<Value *> &Objects,
const DataLayout &DL);
SmallVectorImpl<Value *> &Objects);
/// Finds alloca where the value comes from.
AllocaInst *findAllocaForValue(Value *V);

View File

@ -641,8 +641,7 @@ ModRefInfo AAResults::callCapturesBefore(const Instruction *I,
if (!DT)
return ModRefInfo::ModRef;
const Value *Object =
getUnderlyingObject(MemLoc.Ptr, I->getModule()->getDataLayout());
const Value *Object = getUnderlyingObject(MemLoc.Ptr);
if (!isIdentifiedObject(Object) || isa<GlobalValue>(Object) ||
isa<Constant>(Object))
return ModRefInfo::ModRef;

View File

@ -661,7 +661,7 @@ bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc,
SmallVector<const Value *, 16> Worklist;
Worklist.push_back(Loc.Ptr);
do {
const Value *V = getUnderlyingObject(Worklist.pop_back_val(), DL);
const Value *V = getUnderlyingObject(Worklist.pop_back_val());
if (!Visited.insert(V).second) {
Visited.clear();
return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
@ -875,7 +875,7 @@ ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
assert(notDifferentParent(Call, Loc.Ptr) &&
"AliasAnalysis query involving multiple functions!");
const Value *Object = getUnderlyingObject(Loc.Ptr, DL);
const Value *Object = getUnderlyingObject(Loc.Ptr);
// Calls marked 'tail' cannot read or write allocas from the current frame
// because the current frame might be destroyed by the time they run. However,
@ -1309,7 +1309,7 @@ bool BasicAAResult::isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp,
/// another pointer.
///
/// We know that V1 is a GEP, but we don't know anything about V2.
/// UnderlyingV1 is getUnderlyingObject(GEP1, DL), UnderlyingV2 is the same for
/// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for
/// V2.
AliasResult BasicAAResult::aliasGEP(
const GEPOperator *GEP1, LocationSize V1Size, const AAMDNodes &V1AAInfo,
@ -1782,10 +1782,10 @@ AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
// Figure out what objects these things are pointing to if we can.
if (O1 == nullptr)
O1 = getUnderlyingObject(V1, DL, MaxLookupSearchDepth);
O1 = getUnderlyingObject(V1, MaxLookupSearchDepth);
if (O2 == nullptr)
O2 = getUnderlyingObject(V2, DL, MaxLookupSearchDepth);
O2 = getUnderlyingObject(V2, MaxLookupSearchDepth);
// Null values in the default address space don't point to any object, so they
// don't alias any other pointer.

View File

@ -718,7 +718,7 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
// If this load comes from anywhere in a constant global, and if the global
// is all undef or zero, we know what it loads.
if (auto *GV = dyn_cast<GlobalVariable>(getUnderlyingObject(CE, DL))) {
if (auto *GV = dyn_cast<GlobalVariable>(getUnderlyingObject(CE))) {
if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
if (GV->getInitializer()->isNullValue())
return Constant::getNullValue(Ty);

View File

@ -659,8 +659,8 @@ static AliasResult underlyingObjectsAlias(AAResults *AA,
return NoAlias;
// Check the underlying objects are the same
const Value *AObj = getUnderlyingObject(LocA.Ptr, DL);
const Value *BObj = getUnderlyingObject(LocB.Ptr, DL);
const Value *AObj = getUnderlyingObject(LocA.Ptr);
const Value *BObj = getUnderlyingObject(LocB.Ptr);
// If the underlying objects are the same, they must alias
if (AObj == BObj)

View File

@ -435,8 +435,7 @@ bool GlobalsAAResult::AnalyzeIndirectGlobalMemory(GlobalVariable *GV) {
continue;
// Check the value being stored.
Value *Ptr = getUnderlyingObject(SI->getOperand(0),
GV->getParent()->getDataLayout());
Value *Ptr = getUnderlyingObject(SI->getOperand(0));
if (!isAllocLikeFn(Ptr, &GetTLI(*SI->getFunction())))
return false; // Too hard to analyze.
@ -661,12 +660,12 @@ static bool isNonEscapingGlobalNoAliasWithLoad(const GlobalValue *GV,
return false;
if (auto *LI = dyn_cast<LoadInst>(Input)) {
Inputs.push_back(getUnderlyingObject(LI->getPointerOperand(), DL));
Inputs.push_back(getUnderlyingObject(LI->getPointerOperand()));
continue;
}
if (auto *SI = dyn_cast<SelectInst>(Input)) {
const Value *LHS = getUnderlyingObject(SI->getTrueValue(), DL);
const Value *RHS = getUnderlyingObject(SI->getFalseValue(), DL);
const Value *LHS = getUnderlyingObject(SI->getTrueValue());
const Value *RHS = getUnderlyingObject(SI->getFalseValue());
if (Visited.insert(LHS).second)
Inputs.push_back(LHS);
if (Visited.insert(RHS).second)
@ -675,7 +674,7 @@ static bool isNonEscapingGlobalNoAliasWithLoad(const GlobalValue *GV,
}
if (auto *PN = dyn_cast<PHINode>(Input)) {
for (const Value *Op : PN->incoming_values()) {
Op = getUnderlyingObject(Op, DL);
Op = getUnderlyingObject(Op);
if (Visited.insert(Op).second)
Inputs.push_back(Op);
}
@ -774,7 +773,7 @@ bool GlobalsAAResult::isNonEscapingGlobalNoAlias(const GlobalValue *GV,
if (auto *LI = dyn_cast<LoadInst>(Input)) {
// A pointer loaded from a global would have been captured, and we know
// that the global is non-escaping, so no alias.
const Value *Ptr = getUnderlyingObject(LI->getPointerOperand(), DL);
const Value *Ptr = getUnderlyingObject(LI->getPointerOperand());
if (isNonEscapingGlobalNoAliasWithLoad(GV, Ptr, Depth, DL))
// The load does not alias with GV.
continue;
@ -782,8 +781,8 @@ bool GlobalsAAResult::isNonEscapingGlobalNoAlias(const GlobalValue *GV,
return false;
}
if (auto *SI = dyn_cast<SelectInst>(Input)) {
const Value *LHS = getUnderlyingObject(SI->getTrueValue(), DL);
const Value *RHS = getUnderlyingObject(SI->getFalseValue(), DL);
const Value *LHS = getUnderlyingObject(SI->getTrueValue());
const Value *RHS = getUnderlyingObject(SI->getFalseValue());
if (Visited.insert(LHS).second)
Inputs.push_back(LHS);
if (Visited.insert(RHS).second)
@ -792,7 +791,7 @@ bool GlobalsAAResult::isNonEscapingGlobalNoAlias(const GlobalValue *GV,
}
if (auto *PN = dyn_cast<PHINode>(Input)) {
for (const Value *Op : PN->incoming_values()) {
Op = getUnderlyingObject(Op, DL);
Op = getUnderlyingObject(Op);
if (Visited.insert(Op).second)
Inputs.push_back(Op);
}
@ -827,8 +826,8 @@ AliasResult GlobalsAAResult::alias(const MemoryLocation &LocA,
const MemoryLocation &LocB,
AAQueryInfo &AAQI) {
// Get the base object these pointers point to.
const Value *UV1 = getUnderlyingObject(LocA.Ptr, DL);
const Value *UV2 = getUnderlyingObject(LocB.Ptr, DL);
const Value *UV1 = getUnderlyingObject(LocA.Ptr);
const Value *UV2 = getUnderlyingObject(LocB.Ptr);
// If either of the underlying values is a global, they may be non-addr-taken
// globals, which we can answer queries about.
@ -915,7 +914,7 @@ ModRefInfo GlobalsAAResult::getModRefInfoForArgument(const CallBase *Call,
// is based on GV, return the conservative result.
for (auto &A : Call->args()) {
SmallVector<const Value*, 4> Objects;
getUnderlyingObjects(A, Objects, DL);
getUnderlyingObjects(A, Objects);
// All objects must be identified.
if (!all_of(Objects, isIdentifiedObject) &&
@ -942,7 +941,7 @@ ModRefInfo GlobalsAAResult::getModRefInfo(const CallBase *Call,
// If we are asking for mod/ref info of a direct call with a pointer to a
// global we are tracking, return information if we have it.
if (const GlobalValue *GV =
dyn_cast<GlobalValue>(getUnderlyingObject(Loc.Ptr, DL)))
dyn_cast<GlobalValue>(getUnderlyingObject(Loc.Ptr)))
// If GV is internal to this IR and there is no function with local linkage
// that has had their address taken, keep looking for a tighter ModRefInfo.
if (GV->hasLocalLinkage() && !UnknownFunctionsWithLocalLinkage)

View File

@ -2524,8 +2524,8 @@ computePointerICmp(const DataLayout &DL, const TargetLibraryInfo *TLI,
// memory within the lifetime of the current function (allocas, byval
// arguments, globals), then determine the comparison result here.
SmallVector<const Value *, 8> LHSUObjs, RHSUObjs;
getUnderlyingObjects(LHS, LHSUObjs, DL);
getUnderlyingObjects(RHS, RHSUObjs, DL);
getUnderlyingObjects(LHS, LHSUObjs);
getUnderlyingObjects(RHS, RHSUObjs);
// Is the set of underlying objects all noalias calls?
auto IsNAC = [](ArrayRef<const Value *> Objects) {

View File

@ -606,13 +606,11 @@ Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueImpl(
static bool InstructionDereferencesPointer(Instruction *I, Value *Ptr) {
if (LoadInst *L = dyn_cast<LoadInst>(I)) {
return L->getPointerAddressSpace() == 0 &&
getUnderlyingObject(L->getPointerOperand(),
L->getModule()->getDataLayout()) == Ptr;
getUnderlyingObject(L->getPointerOperand()) == Ptr;
}
if (StoreInst *S = dyn_cast<StoreInst>(I)) {
return S->getPointerAddressSpace() == 0 &&
getUnderlyingObject(S->getPointerOperand(),
S->getModule()->getDataLayout()) == Ptr;
getUnderlyingObject(S->getPointerOperand()) == Ptr;
}
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
if (MI->isVolatile()) return false;
@ -622,13 +620,11 @@ static bool InstructionDereferencesPointer(Instruction *I, Value *Ptr) {
if (!Len || Len->isZero()) return false;
if (MI->getDestAddressSpace() == 0)
if (getUnderlyingObject(MI->getRawDest(),
MI->getModule()->getDataLayout()) == Ptr)
if (getUnderlyingObject(MI->getRawDest()) == Ptr)
return true;
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI))
if (MTI->getSourceAddressSpace() == 0)
if (getUnderlyingObject(MTI->getRawSource(),
MTI->getModule()->getDataLayout()) == Ptr)
if (getUnderlyingObject(MTI->getRawSource()) == Ptr)
return true;
}
return false;
@ -641,11 +637,10 @@ static bool InstructionDereferencesPointer(Instruction *I, Value *Ptr) {
static bool isObjectDereferencedInBlock(Value *Val, BasicBlock *BB) {
assert(Val->getType()->isPointerTy());
const DataLayout &DL = BB->getModule()->getDataLayout();
Value *UnderlyingVal = getUnderlyingObject(Val, DL);
Value *UnderlyingVal = getUnderlyingObject(Val);
// If 'getUnderlyingObject' didn't converge, skip it. It won't converge
// inside InstructionDereferencesPointer either.
if (UnderlyingVal == getUnderlyingObject(UnderlyingVal, DL, 1))
if (UnderlyingVal == getUnderlyingObject(UnderlyingVal, 1))
for (Instruction &I : *BB)
if (InstructionDereferencesPointer(&I, UnderlyingVal))
return true;

View File

@ -673,7 +673,7 @@ Value *Lint::findValueImpl(Value *V, bool OffsetOk,
// TODO: Look through eliminable cast pairs.
// TODO: Look through calls with unique return values.
// TODO: Look through vector insert/extract/shuffle.
V = OffsetOk ? getUnderlyingObject(V, *DL) : V->stripPointerCasts();
V = OffsetOk ? getUnderlyingObject(V) : V->stripPointerCasts();
if (LoadInst *L = dyn_cast<LoadInst>(V)) {
BasicBlock::iterator BBI = L->getIterator();
BasicBlock *BB = L->getParent();

View File

@ -508,10 +508,10 @@ public:
typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList;
AccessAnalysis(const DataLayout &Dl, Loop *TheLoop, AAResults *AA,
LoopInfo *LI, MemoryDepChecker::DepCandidates &DA,
AccessAnalysis(Loop *TheLoop, AAResults *AA, LoopInfo *LI,
MemoryDepChecker::DepCandidates &DA,
PredicatedScalarEvolution &PSE)
: DL(Dl), TheLoop(TheLoop), AST(*AA), LI(LI), DepCands(DA),
: TheLoop(TheLoop), AST(*AA), LI(LI), DepCands(DA),
IsRTCheckAnalysisNeeded(false), PSE(PSE) {}
/// Register a load and whether it is only read from.
@ -585,8 +585,6 @@ private:
/// Set of all accesses.
PtrAccessSet Accesses;
const DataLayout &DL;
/// The loop being checked.
const Loop *TheLoop;
@ -938,7 +936,7 @@ void AccessAnalysis::processMemAccesses() {
typedef SmallVector<const Value *, 16> ValueVector;
ValueVector TempObjects;
getUnderlyingObjects(Ptr, TempObjects, DL, LI);
getUnderlyingObjects(Ptr, TempObjects, LI);
LLVM_DEBUG(dbgs()
<< "Underlying objects for pointer " << *Ptr << "\n");
for (const Value *UnderlyingObj : TempObjects) {
@ -1142,7 +1140,7 @@ bool llvm::sortPtrAccesses(ArrayRef<Value *> VL, const DataLayout &DL,
// first pointer in the array.
Value *Ptr0 = VL[0];
const SCEV *Scev0 = SE.getSCEV(Ptr0);
Value *Obj0 = getUnderlyingObject(Ptr0, DL);
Value *Obj0 = getUnderlyingObject(Ptr0);
llvm::SmallSet<int64_t, 4> Offsets;
for (auto *Ptr : VL) {
@ -1153,7 +1151,7 @@ bool llvm::sortPtrAccesses(ArrayRef<Value *> VL, const DataLayout &DL,
return false;
// If a pointer refers to a different underlying object, bail - the
// pointers are by definition incomparable.
Value *CurrObj = getUnderlyingObject(Ptr, DL);
Value *CurrObj = getUnderlyingObject(Ptr);
if (CurrObj != Obj0)
return false;
@ -1947,8 +1945,7 @@ void LoopAccessInfo::analyzeLoop(AAResults *AA, LoopInfo *LI,
}
MemoryDepChecker::DepCandidates DependentAccesses;
AccessAnalysis Accesses(TheLoop->getHeader()->getModule()->getDataLayout(),
TheLoop, AA, LI, DependentAccesses, *PSE);
AccessAnalysis Accesses(TheLoop, AA, LI, DependentAccesses, *PSE);
// Holds the analyzed pointers. We don't want to call getUnderlyingObjects
// multiple times on the same object. If the ptr is accessed twice, once

View File

@ -406,8 +406,6 @@ MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom(
isInvariantLoad = true;
}
const DataLayout &DL = BB->getModule()->getDataLayout();
// Return "true" if and only if the instruction I is either a non-simple
// load or a non-simple store.
auto isNonSimpleLoadOrStore = [](Instruction *I) -> bool {
@ -576,7 +574,7 @@ MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom(
// looking for a clobber in many cases; that's an alias property and is
// handled by BasicAA.
if (isa<AllocaInst>(Inst) || isNoAliasFn(Inst, &TLI)) {
const Value *AccessPtr = getUnderlyingObject(MemLoc.Ptr, DL);
const Value *AccessPtr = getUnderlyingObject(MemLoc.Ptr);
if (AccessPtr == Inst || AA.isMustAlias(Inst, AccessPtr))
return MemDepResult::getDef(Inst);
}

View File

@ -54,8 +54,8 @@ AliasResult ObjCARCAAResult::alias(const MemoryLocation &LocA,
// If that failed, climb to the underlying object, including climbing through
// ObjC-specific no-ops, and try making an imprecise alias query.
const Value *UA = GetUnderlyingObjCPtr(SA, DL);
const Value *UB = GetUnderlyingObjCPtr(SB, DL);
const Value *UA = GetUnderlyingObjCPtr(SA);
const Value *UB = GetUnderlyingObjCPtr(SB);
if (UA != SA || UB != SB) {
Result = AAResultBase::alias(MemoryLocation(UA), MemoryLocation(UB), AAQI);
// We can't use MustAlias or PartialAlias results here because
@ -83,7 +83,7 @@ bool ObjCARCAAResult::pointsToConstantMemory(const MemoryLocation &Loc,
// If that failed, climb to the underlying object, including climbing through
// ObjC-specific no-ops, and try making an imprecise alias query.
const Value *U = GetUnderlyingObjCPtr(S, DL);
const Value *U = GetUnderlyingObjCPtr(S);
if (U != S)
return AAResultBase::pointsToConstantMemory(MemoryLocation(U), AAQI,
OrLocal);

View File

@ -4160,8 +4160,7 @@ static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
return true;
}
Value *llvm::getUnderlyingObject(Value *V, const DataLayout &DL,
unsigned MaxLookup) {
Value *llvm::getUnderlyingObject(Value *V, unsigned MaxLookup) {
if (!V->getType()->isPointerTy())
return V;
for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
@ -4208,14 +4207,13 @@ Value *llvm::getUnderlyingObject(Value *V, const DataLayout &DL,
void llvm::getUnderlyingObjects(const Value *V,
SmallVectorImpl<const Value *> &Objects,
const DataLayout &DL, LoopInfo *LI,
unsigned MaxLookup) {
LoopInfo *LI, unsigned MaxLookup) {
SmallPtrSet<const Value *, 4> Visited;
SmallVector<const Value *, 4> Worklist;
Worklist.push_back(V);
do {
const Value *P = Worklist.pop_back_val();
P = getUnderlyingObject(P, DL, MaxLookup);
P = getUnderlyingObject(P, MaxLookup);
if (!Visited.insert(P).second)
continue;
@ -4280,15 +4278,14 @@ static const Value *getUnderlyingObjectFromInt(const Value *V) {
/// ptrtoint+arithmetic+inttoptr sequences.
/// It returns false if unidentified object is found in getUnderlyingObjects.
bool llvm::getUnderlyingObjectsForCodeGen(const Value *V,
SmallVectorImpl<Value *> &Objects,
const DataLayout &DL) {
SmallVectorImpl<Value *> &Objects) {
SmallPtrSet<const Value *, 16> Visited;
SmallVector<const Value *, 4> Working(1, V);
do {
V = Working.pop_back_val();
SmallVector<const Value *, 4> Objs;
getUnderlyingObjects(V, Objs, DL);
getUnderlyingObjects(V, Objs);
for (const Value *V : Objs) {
if (!Visited.insert(V).second)

View File

@ -1387,7 +1387,7 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
// Get the underlying objects for the location passed on the lifetime
// marker.
SmallVector<const Value *, 4> Allocas;
getUnderlyingObjects(CI.getArgOperand(1), Allocas, *DL);
getUnderlyingObjects(CI.getArgOperand(1), Allocas);
// Iterate over each underlying object, creating lifetime markers for each
// static alloca. Quit if we find a non-static alloca.

View File

@ -705,14 +705,13 @@ static bool isDependenceBarrier(MachineInstr &MI, AliasAnalysis *AA) {
/// This function calls the code in ValueTracking, but first checks that the
/// instruction has a memory operand.
static void getUnderlyingObjects(const MachineInstr *MI,
SmallVectorImpl<const Value *> &Objs,
const DataLayout &DL) {
SmallVectorImpl<const Value *> &Objs) {
if (!MI->hasOneMemOperand())
return;
MachineMemOperand *MM = *MI->memoperands_begin();
if (!MM->getValue())
return;
getUnderlyingObjects(MM->getValue(), Objs, DL);
getUnderlyingObjects(MM->getValue(), Objs);
for (const Value *V : Objs) {
if (!isIdentifiedObject(V)) {
Objs.clear();
@ -736,7 +735,7 @@ void SwingSchedulerDAG::addLoopCarriedDependences(AliasAnalysis *AA) {
PendingLoads.clear();
else if (MI.mayLoad()) {
SmallVector<const Value *, 4> Objs;
::getUnderlyingObjects(&MI, Objs, MF.getDataLayout());
::getUnderlyingObjects(&MI, Objs);
if (Objs.empty())
Objs.push_back(UnknownValue);
for (auto V : Objs) {
@ -745,7 +744,7 @@ void SwingSchedulerDAG::addLoopCarriedDependences(AliasAnalysis *AA) {
}
} else if (MI.mayStore()) {
SmallVector<const Value *, 4> Objs;
::getUnderlyingObjects(&MI, Objs, MF.getDataLayout());
::getUnderlyingObjects(&MI, Objs);
if (Objs.empty())
Objs.push_back(UnknownValue);
for (auto V : Objs) {

View File

@ -154,7 +154,7 @@ static bool getUnderlyingObjectsForInstr(const MachineInstr *MI,
Objects.push_back(UnderlyingObjectsVector::value_type(PSV, MayAlias));
} else if (const Value *V = MMO->getValue()) {
SmallVector<Value *, 4> Objs;
if (!getUnderlyingObjectsForCodeGen(V, Objs, DL))
if (!getUnderlyingObjectsForCodeGen(V, Objs))
return false;
for (Value *V : Objs) {

View File

@ -6636,7 +6636,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
cast<ConstantInt>(I.getArgOperand(0))->getSExtValue();
Value *const ObjectPtr = I.getArgOperand(1);
SmallVector<const Value *, 4> Allocas;
getUnderlyingObjects(ObjectPtr, Allocas, *DL);
getUnderlyingObjects(ObjectPtr, Allocas);
for (SmallVectorImpl<const Value*>::iterator Object = Allocas.begin(),
E = Allocas.end(); Object != E; ++Object) {

View File

@ -1048,7 +1048,7 @@ void StackColoring::remapInstructions(DenseMap<int, int> &SlotRemap) {
if (MMO->getAAInfo()) {
if (const Value *MMOV = MMO->getValue()) {
SmallVector<Value *, 4> Objs;
getUnderlyingObjectsForCodeGen(MMOV, Objs, MF->getDataLayout());
getUnderlyingObjectsForCodeGen(MMOV, Objs);
if (Objs.empty())
MayHaveConflictingAAMD = true;

View File

@ -96,7 +96,7 @@ bool AMDGPUAAResult::pointsToConstantMemory(const MemoryLocation &Loc,
AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
return true;
const Value *Base = getUnderlyingObject(Loc.Ptr, DL);
const Value *Base = getUnderlyingObject(Loc.Ptr);
AS = Base->getType()->getPointerAddressSpace();
if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT)

View File

@ -134,7 +134,7 @@ unsigned AMDGPUInliner::getInlineThreshold(CallBase &CB) const {
Ty->getAddressSpace() != AMDGPUAS::FLAT_ADDRESS))
continue;
PtrArg = getUnderlyingObject(PtrArg, DL);
PtrArg = getUnderlyingObject(PtrArg);
if (const AllocaInst *AI = dyn_cast<AllocaInst>(PtrArg)) {
if (!AI->isStaticAlloca() || !AIVisited.insert(AI).second)
continue;

View File

@ -605,7 +605,7 @@ bool AMDGPUPromoteAlloca::binaryOpIsDerivedFromSameAlloca(Value *BaseAlloca,
if (isa<ConstantPointerNull>(OtherOp))
return true;
Value *OtherObj = getUnderlyingObject(OtherOp, *DL);
Value *OtherObj = getUnderlyingObject(OtherOp);
if (!isa<AllocaInst>(OtherObj))
return false;

View File

@ -169,7 +169,7 @@ void AMDGPUTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
const Value *Ptr = GEP->getPointerOperand();
const AllocaInst *Alloca =
dyn_cast<AllocaInst>(getUnderlyingObject(Ptr, DL));
dyn_cast<AllocaInst>(getUnderlyingObject(Ptr));
if (!Alloca || !Alloca->isStaticAlloca())
continue;
Type *Ty = Alloca->getAllocatedType();

View File

@ -354,7 +354,7 @@ class LoadVtxId1 <PatFrag load> : PatFrag <
return LD->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS ||
(LD->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS &&
!isa<GlobalValue>(getUnderlyingObject(
LD->getMemOperand()->getValue(), CurDAG->getDataLayout())));
LD->getMemOperand()->getValue())));
}]>;
def vtx_id1_az_extloadi8 : LoadVtxId1 <az_extloadi8>;
@ -366,7 +366,7 @@ class LoadVtxId2 <PatFrag load> : PatFrag <
const MemSDNode *LD = cast<MemSDNode>(N);
return LD->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS &&
isa<GlobalValue>(getUnderlyingObject(
LD->getMemOperand()->getValue(), CurDAG->getDataLayout()));
LD->getMemOperand()->getValue()));
}]>;
def vtx_id2_az_extloadi8 : LoadVtxId2 <az_extloadi8>;

View File

@ -459,10 +459,8 @@ static bool memOpsHaveSameBasePtr(const MachineInstr &MI1,
auto Base2 = MO2->getValue();
if (!Base1 || !Base2)
return false;
const MachineFunction &MF = *MI1.getParent()->getParent();
const DataLayout &DL = MF.getFunction().getParent()->getDataLayout();
Base1 = getUnderlyingObject(Base1, DL);
Base2 = getUnderlyingObject(Base2, DL);
Base1 = getUnderlyingObject(Base1);
Base2 = getUnderlyingObject(Base2);
if (isa<UndefValue>(Base1) || isa<UndefValue>(Base2))
return false;

View File

@ -182,7 +182,7 @@ namespace {
/// memory instruction can be moved to a delay slot.
class MemDefsUses : public InspectMemInstr {
public:
MemDefsUses(const DataLayout &DL, const MachineFrameInfo *MFI);
explicit MemDefsUses(const MachineFrameInfo *MFI);
private:
using ValueType = PointerUnion<const Value *, const PseudoSourceValue *>;
@ -200,7 +200,6 @@ namespace {
const MachineFrameInfo *MFI;
SmallPtrSet<ValueType, 4> Uses, Defs;
const DataLayout &DL;
/// Flags indicating whether loads or stores with no underlying objects have
/// been seen.
@ -492,8 +491,8 @@ bool LoadFromStackOrConst::hasHazard_(const MachineInstr &MI) {
return true;
}
MemDefsUses::MemDefsUses(const DataLayout &DL, const MachineFrameInfo *MFI_)
: InspectMemInstr(false), MFI(MFI_), DL(DL) {}
MemDefsUses::MemDefsUses(const MachineFrameInfo *MFI_)
: InspectMemInstr(false), MFI(MFI_) {}
bool MemDefsUses::hasHazard_(const MachineInstr &MI) {
bool HasHazard = false;
@ -542,7 +541,7 @@ getUnderlyingObjects(const MachineInstr &MI,
if (const Value *V = MMO.getValue()) {
SmallVector<const Value *, 4> Objs;
::getUnderlyingObjects(V, Objs, DL);
::getUnderlyingObjects(V, Objs);
for (const Value *UValue : Objs) {
if (!isIdentifiedObject(V))
@ -775,7 +774,7 @@ bool MipsDelaySlotFiller::searchBackward(MachineBasicBlock &MBB,
auto *Fn = MBB.getParent();
RegDefsUses RegDU(*Fn->getSubtarget().getRegisterInfo());
MemDefsUses MemDU(Fn->getDataLayout(), &Fn->getFrameInfo());
MemDefsUses MemDU(&Fn->getFrameInfo());
ReverseIter Filler;
RegDU.init(Slot);
@ -851,7 +850,7 @@ bool MipsDelaySlotFiller::searchSuccBBs(MachineBasicBlock &MBB,
IM.reset(new LoadFromStackOrConst());
} else {
const MachineFrameInfo &MFI = Fn->getFrameInfo();
IM.reset(new MemDefsUses(Fn->getDataLayout(), &MFI));
IM.reset(new MemDefsUses(&MFI));
}
if (!searchRange(MBB, SuccBB->begin(), SuccBB->end(), RegDU, *IM, Slot,

View File

@ -704,8 +704,7 @@ static bool canLowerToLDG(MemSDNode *N, const NVPTXSubtarget &Subtarget,
// because the former looks through phi nodes while the latter does not. We
// need to look through phi nodes to handle pointer induction variables.
SmallVector<const Value *, 8> Objs;
getUnderlyingObjects(N->getMemOperand()->getValue(), Objs,
F->getDataLayout());
getUnderlyingObjects(N->getMemOperand()->getValue(), Objs);
return all_of(Objs, [&](const Value *V) {
if (auto *A = dyn_cast<const Argument>(V))

View File

@ -214,8 +214,7 @@ bool NVPTXLowerArgs::runOnKernelFunction(Function &F) {
for (auto &I : B) {
if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
if (LI->getType()->isPointerTy()) {
Value *UO = getUnderlyingObject(LI->getPointerOperand(),
F.getParent()->getDataLayout());
Value *UO = getUnderlyingObject(LI->getPointerOperand());
if (Argument *Arg = dyn_cast<Argument>(UO)) {
if (Arg->hasByValAttr()) {
// LI is a load from a pointer within a byval kernel parameter.

View File

@ -5421,8 +5421,7 @@ struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
/// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
Optional<Type *> identifyPrivatizableType(Attributor &A) override {
Value *Obj =
getUnderlyingObject(&getAssociatedValue(), A.getInfoCache().getDL());
Value *Obj = getUnderlyingObject(&getAssociatedValue());
if (!Obj) {
LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
return nullptr;

View File

@ -5643,10 +5643,10 @@ Instruction *InstCombinerImpl::visitICmpInst(ICmpInst &I) {
// Try to optimize equality comparisons against alloca-based pointers.
if (Op0->getType()->isPointerTy() && I.isEquality()) {
assert(Op1->getType()->isPointerTy() && "Comparing pointer with non-pointer?");
if (auto *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(Op0, DL)))
if (auto *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(Op0)))
if (Instruction *New = foldAllocaCmp(I, Alloca, Op1))
return New;
if (auto *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(Op1, DL)))
if (auto *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(Op1)))
if (Instruction *New = foldAllocaCmp(I, Alloca, Op0))
return New;
}

View File

@ -1556,7 +1556,7 @@ void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
if (ClOpt && ClOptGlobals) {
// If initialization order checking is disabled, a simple access to a
// dynamically initialized global is always valid.
GlobalVariable *G = dyn_cast<GlobalVariable>(getUnderlyingObject(Addr, DL));
GlobalVariable *G = dyn_cast<GlobalVariable>(getUnderlyingObject(Addr));
if (G && (!ClInitializers || GlobalIsLinkerInitialized(G)) &&
isSafeAccess(ObjSizeVis, Addr, O.TypeSize)) {
NumOptimizedAccessesToGlobalVar++;
@ -1566,7 +1566,7 @@ void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
if (ClOpt && ClOptStack) {
// A direct inbounds access to a stack variable is always valid.
if (isa<AllocaInst>(getUnderlyingObject(Addr, DL)) &&
if (isa<AllocaInst>(getUnderlyingObject(Addr)) &&
isSafeAccess(ObjSizeVis, Addr, O.TypeSize)) {
NumOptimizedAccessesToStackVar++;
return;

View File

@ -1246,7 +1246,7 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
const llvm::Align ShadowAlign(Align * DFS.ShadowWidthBytes);
SmallVector<const Value *, 2> Objs;
getUnderlyingObjects(Addr, Objs, Pos->getModule()->getDataLayout());
getUnderlyingObjects(Addr, Objs);
bool AllConstants = true;
for (const Value *Obj : Objs) {
if (isa<Function>(Obj) || isa<BlockAddress>(Obj))

View File

@ -478,7 +478,7 @@ void ThreadSanitizer::chooseInstructionsToInstrument(
}
}
if (isa<AllocaInst>(getUnderlyingObject(Addr, DL)) &&
if (isa<AllocaInst>(getUnderlyingObject(Addr)) &&
!PointerMayBeCaptured(Addr, true, true)) {
// The variable is addressable but not captured, so it cannot be
// referenced from a different thread and participate in a data race

View File

@ -107,7 +107,7 @@ bool llvm::objcarc::CanUse(const Instruction *Inst, const Value *Ptr,
} else if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
// Special-case stores, because we don't care about the stored value, just
// the store address.
const Value *Op = GetUnderlyingObjCPtr(SI->getPointerOperand(), DL);
const Value *Op = GetUnderlyingObjCPtr(SI->getPointerOperand());
// If we can't tell what the underlying object was, assume there is a
// dependence.
return IsPotentialRetainableObjPtr(Op, *PA.getAA()) &&

View File

@ -162,8 +162,8 @@ bool ProvenanceAnalysis::relatedCheck(const Value *A, const Value *B,
bool ProvenanceAnalysis::related(const Value *A, const Value *B,
const DataLayout &DL) {
A = GetUnderlyingObjCPtrCached(A, DL, UnderlyingObjCPtrCache);
B = GetUnderlyingObjCPtrCached(B, DL, UnderlyingObjCPtrCache);
A = GetUnderlyingObjCPtrCached(A, UnderlyingObjCPtrCache);
B = GetUnderlyingObjCPtrCached(B, UnderlyingObjCPtrCache);
// Quick check.
if (A == B)

View File

@ -415,8 +415,7 @@ static OverwriteResult isOverwrite(const MemoryLocation &Later,
// Check to see if the later store is to the entire object (either a global,
// an alloca, or a byval/inalloca argument). If so, then it clearly
// overwrites any other store to the same object.
const Value *UO1 = getUnderlyingObject(P1, DL),
*UO2 = getUnderlyingObject(P2, DL);
const Value *UO1 = getUnderlyingObject(P1), *UO2 = getUnderlyingObject(P2);
// If we can't resolve the same pointers to the same object, then we can't
// analyze them at all.
@ -739,7 +738,6 @@ static bool handleFree(CallInst *F, AliasAnalysis *AA,
MemoryLocation Loc = MemoryLocation(F->getOperand(0));
SmallVector<BasicBlock *, 16> Blocks;
Blocks.push_back(F->getParent());
const DataLayout &DL = F->getModule()->getDataLayout();
while (!Blocks.empty()) {
BasicBlock *BB = Blocks.pop_back_val();
@ -755,7 +753,7 @@ static bool handleFree(CallInst *F, AliasAnalysis *AA,
break;
Value *DepPointer =
getUnderlyingObject(getStoredPointerOperand(Dependency), DL);
getUnderlyingObject(getStoredPointerOperand(Dependency));
// Check for aliasing.
if (!AA->isMustAlias(F->getArgOperand(0), DepPointer))
@ -795,7 +793,7 @@ static void removeAccessedObjects(const MemoryLocation &LoadedLoc,
const DataLayout &DL, AliasAnalysis *AA,
const TargetLibraryInfo *TLI,
const Function *F) {
const Value *UnderlyingPointer = getUnderlyingObject(LoadedLoc.Ptr, DL);
const Value *UnderlyingPointer = getUnderlyingObject(LoadedLoc.Ptr);
// A constant can't be in the dead pointer set.
if (isa<Constant>(UnderlyingPointer))
@ -861,7 +859,7 @@ static bool handleEndBlock(BasicBlock &BB, AliasAnalysis *AA,
if (hasAnalyzableMemoryWrite(&*BBI, *TLI) && isRemovable(&*BBI)) {
// See through pointer-to-pointer bitcasts
SmallVector<const Value *, 4> Pointers;
getUnderlyingObjects(getStoredPointerOperand(&*BBI), Pointers, DL);
getUnderlyingObjects(getStoredPointerOperand(&*BBI), Pointers);
// Stores to stack values are valid candidates for removal.
bool AllDead = true;
@ -1134,7 +1132,7 @@ static bool eliminateNoopStore(Instruction *Inst, BasicBlock::iterator &BBI,
Constant *StoredConstant = dyn_cast<Constant>(SI->getValueOperand());
if (StoredConstant && StoredConstant->isNullValue() && isRemovable(SI)) {
Instruction *UnderlyingPointer =
dyn_cast<Instruction>(getUnderlyingObject(SI->getPointerOperand(), DL));
dyn_cast<Instruction>(getUnderlyingObject(SI->getPointerOperand()));
if (UnderlyingPointer && isCallocLikeFn(UnderlyingPointer, TLI) &&
memoryIsNotModifiedBetween(UnderlyingPointer, SI, AA, DL, DT)) {
@ -1289,7 +1287,7 @@ static bool eliminateDeadStores(BasicBlock &BB, AliasAnalysis *AA,
// to it is dead along the unwind edge. Otherwise, we need to preserve
// the store.
if (LastThrowing && DepWrite->comesBefore(LastThrowing)) {
const Value *Underlying = getUnderlyingObject(DepLoc.Ptr, DL);
const Value *Underlying = getUnderlyingObject(DepLoc.Ptr);
bool IsStoreDeadOnUnwind = isa<AllocaInst>(Underlying);
if (!IsStoreDeadOnUnwind) {
// We're looking for a call to an allocation function
@ -1715,7 +1713,7 @@ struct DSEState {
// object can be considered terminated.
if (MaybeTermLoc->second) {
DataLayout DL = MaybeTerm->getParent()->getModule()->getDataLayout();
DefLoc = MemoryLocation(getUnderlyingObject(DefLoc.Ptr, DL));
DefLoc = MemoryLocation(getUnderlyingObject(DefLoc.Ptr));
}
return AA.isMustAlias(MaybeTermLoc->first, DefLoc);
}
@ -2030,7 +2028,6 @@ struct DSEState {
/// Eliminate writes to objects that are not visible in the caller and are not
/// accessed before returning from the function.
bool eliminateDeadWritesAtEndOfFunction() {
const DataLayout &DL = F.getParent()->getDataLayout();
bool MadeChange = false;
LLVM_DEBUG(
dbgs()
@ -2047,7 +2044,7 @@ struct DSEState {
Instruction *DefI = Def->getMemoryInst();
// See through pointer-to-pointer bitcasts
SmallVector<const Value *, 4> Pointers;
getUnderlyingObjects(getLocForWriteEx(DefI)->Ptr, Pointers, DL);
getUnderlyingObjects(getLocForWriteEx(DefI)->Ptr, Pointers);
LLVM_DEBUG(dbgs() << " ... MemoryDef is not accessed until the end "
"of the function\n");
@ -2130,7 +2127,7 @@ bool eliminateDeadStoresMemorySSA(Function &F, AliasAnalysis &AA,
}
MemoryLocation SILoc = *MaybeSILoc;
assert(SILoc.Ptr && "SILoc should not be null");
const Value *SILocUnd = getUnderlyingObject(SILoc.Ptr, DL);
const Value *SILocUnd = getUnderlyingObject(SILoc.Ptr);
// Check if the store is a no-op.
if (isRemovable(SI) && State.storeIsNoop(KillingDef, SILoc, SILocUnd)) {
@ -2231,7 +2228,7 @@ bool eliminateDeadStoresMemorySSA(Function &F, AliasAnalysis &AA,
MemoryLocation NILoc = *State.getLocForWriteEx(NI);
if (State.isMemTerminatorInst(SI)) {
const Value *NIUnd = getUnderlyingObject(NILoc.Ptr, DL);
const Value *NIUnd = getUnderlyingObject(NILoc.Ptr);
if (!SILocUnd || SILocUnd != NIUnd)
continue;
LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *NI

View File

@ -1909,7 +1909,7 @@ bool llvm::promoteLoopAccessesToScalars(
// we have to prove that the store is dead along the unwind edge. We do
// this by proving that the caller can't have a reference to the object
// after return and thus can't possibly load from the object.
Value *Object = getUnderlyingObject(SomePtr, MDL);
Value *Object = getUnderlyingObject(SomePtr);
if (!isKnownNonEscaping(Object, TLI))
return false;
// Subtlety: Alloca's aren't visible to callers, but *are* potentially
@ -2041,7 +2041,7 @@ bool llvm::promoteLoopAccessesToScalars(
if (IsKnownThreadLocalObject)
SafeToInsertStore = true;
else {
Value *Object = getUnderlyingObject(SomePtr, MDL);
Value *Object = getUnderlyingObject(SomePtr);
SafeToInsertStore =
(isAllocLikeFn(Object, TLI) || isa<AllocaInst>(Object)) &&
!PointerMayBeCaptured(Object, true, true);

View File

@ -564,12 +564,12 @@ void LoopIdiomRecognize::collectStores(BasicBlock *BB) {
break;
case LegalStoreKind::Memset: {
// Find the base pointer.
Value *Ptr = getUnderlyingObject(SI->getPointerOperand(), *DL);
Value *Ptr = getUnderlyingObject(SI->getPointerOperand());
StoreRefsForMemset[Ptr].push_back(SI);
} break;
case LegalStoreKind::MemsetPattern: {
// Find the base pointer.
Value *Ptr = getUnderlyingObject(SI->getPointerOperand(), *DL);
Value *Ptr = getUnderlyingObject(SI->getPointerOperand());
StoreRefsForMemsetPattern[Ptr].push_back(SI);
} break;
case LegalStoreKind::Memcpy:

View File

@ -1562,7 +1562,7 @@ public:
if (Value *Ptr = getPointerOperand(V))
return getUnderlyingObjectThroughLoads(Ptr);
else if (V->getType()->isPointerTy())
return getUnderlyingObject(V, DL);
return getUnderlyingObject(V);
return V;
}

View File

@ -69,7 +69,7 @@ RetainedKnowledge canonicalizedKnowledge(RetainedKnowledge RK, Module *M) {
default:
return RK;
case Attribute::NonNull:
RK.WasOn = getUnderlyingObject(RK.WasOn, M->getDataLayout());
RK.WasOn = getUnderlyingObject(RK.WasOn);
return RK;
case Attribute::Alignment: {
Value *V = RK.WasOn->stripInBoundsOffsets([&](const Value *Strip) {
@ -145,7 +145,7 @@ struct AssumeBuilderState {
if (!RK.WasOn)
return true;
if (RK.WasOn->getType()->isPointerTy()) {
Value *UnderlyingPtr = getUnderlyingObject(RK.WasOn, M->getDataLayout());
Value *UnderlyingPtr = getUnderlyingObject(RK.WasOn);
if (isa<AllocaInst>(UnderlyingPtr) || isa<GlobalValue>(UnderlyingPtr))
return false;
}

View File

@ -1037,7 +1037,7 @@ static void AddAliasScopeMetadata(CallBase &CB, ValueToValueMapTy &VMap,
SmallSetVector<const Argument *, 4> NAPtrArgs;
for (const Value *V : PtrArgs) {
SmallVector<const Value *, 4> Objects;
getUnderlyingObjects(V, Objects, DL, /* LI = */ nullptr);
getUnderlyingObjects(V, Objects, /* LI = */ nullptr);
for (const Value *O : Objects)
ObjSet.insert(O);

View File

@ -393,7 +393,7 @@ int analyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
if (!Src)
return -1;
GlobalVariable *GV = dyn_cast<GlobalVariable>(getUnderlyingObject(Src, DL));
GlobalVariable *GV = dyn_cast<GlobalVariable>(getUnderlyingObject(Src));
if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
return -1;

View File

@ -762,8 +762,8 @@ Vectorizer::getVectorizablePrefix(ArrayRef<Instruction *> Chain) {
return Chain.slice(0, ChainIdx);
}
static ChainID getChainID(const Value *Ptr, const DataLayout &DL) {
const Value *ObjPtr = getUnderlyingObject(Ptr, DL);
static ChainID getChainID(const Value *Ptr) {
const Value *ObjPtr = getUnderlyingObject(Ptr);
if (const auto *Sel = dyn_cast<SelectInst>(ObjPtr)) {
// The select's themselves are distinct instructions even if they share the
// same condition and evaluate to consecutive pointers for true and false
@ -830,7 +830,7 @@ Vectorizer::collectInstructions(BasicBlock *BB) {
continue;
// Save the load locations.
const ChainID ID = getChainID(Ptr, DL);
const ChainID ID = getChainID(Ptr);
LoadRefs[ID].push_back(LI);
} else if (StoreInst *SI = dyn_cast<StoreInst>(&I)) {
if (!SI->isSimple())
@ -876,7 +876,7 @@ Vectorizer::collectInstructions(BasicBlock *BB) {
continue;
// Save store location.
const ChainID ID = getChainID(Ptr, DL);
const ChainID ID = getChainID(Ptr);
StoreRefs[ID].push_back(SI);
}
}

View File

@ -5912,7 +5912,7 @@ void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) {
continue;
if (!isValidElementType(SI->getValueOperand()->getType()))
continue;
Stores[getUnderlyingObject(SI->getPointerOperand(), *DL)].push_back(SI);
Stores[getUnderlyingObject(SI->getPointerOperand())].push_back(SI);
}
// Ignore getelementptr instructions that have more than one index, a