1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 10:42:39 +01:00

Expose must/may alias info in MemorySSA.

Summary:
Building MemorySSA gathers alias information for Defs/Uses.
Store and expose this information when optimizing uses (when building MemorySSA),
and when optimizing defs or updating uses (getClobberingMemoryAccess).
Current patch does not propagate alias information through MemoryPhis.

Reviewers: gbiv, dberlin

Subscribers: Prazek, sanjoy, llvm-commits

Differential Revision: https://reviews.llvm.org/D38569

llvm-svn: 327035
This commit is contained in:
Alina Sbirlea 2018-03-08 18:03:14 +00:00
parent 3dc217b502
commit 0d82331cd7
3 changed files with 320 additions and 38 deletions

View File

@ -249,7 +249,7 @@ public:
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
/// \brief Get the instruction that this MemoryUse represents.
Instruction *getMemoryInst() const { return MemoryInst; }
Instruction *getMemoryInst() const { return MemoryAliasPair.getPointer(); }
/// \brief Get the access that produces the memory state used by this Use.
MemoryAccess *getDefiningAccess() const { return getOperand(0); }
@ -264,6 +264,12 @@ public:
inline MemoryAccess *getOptimized() const;
inline void setOptimized(MemoryAccess *);
// Retrieve AliasResult type of the optimized access. Ideally this would be
// returned by the caching walker and may go away in the future.
Optional<AliasResult> getOptimizedAccessType() const {
return threeBitIntToOptionalAliasResult(MemoryAliasPair.getInt());
}
/// \brief Reset the ID of what this MemoryUse was optimized to, causing it to
/// be rewalked by the walker if necessary.
/// This really should only be called by tests.
@ -275,23 +281,45 @@ protected:
MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty,
DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB)
: MemoryAccess(C, Vty, DeleteValue, BB, 1), MemoryInst(MI) {
: MemoryAccess(C, Vty, DeleteValue, BB, 1),
MemoryAliasPair(MI, optionalAliasResultToThreeBitInt(MayAlias)) {
setDefiningAccess(DMA);
}
// Use deleteValue() to delete a generic MemoryUseOrDef.
~MemoryUseOrDef() = default;
void setDefiningAccess(MemoryAccess *DMA, bool Optimized = false) {
void setOptimizedAccessType(Optional<AliasResult> AR) {
MemoryAliasPair.setInt(optionalAliasResultToThreeBitInt(AR));
}
void setDefiningAccess(MemoryAccess *DMA, bool Optimized = false,
Optional<AliasResult> AR = MayAlias) {
if (!Optimized) {
setOperand(0, DMA);
return;
}
setOptimized(DMA);
setOptimizedAccessType(AR);
}
private:
Instruction *MemoryInst;
// Pair of memory instruction and Optional<AliasResult> with optimized access.
PointerIntPair<Instruction *, 3, int> MemoryAliasPair;
static int optionalAliasResultToThreeBitInt(Optional<AliasResult> OAR) {
if (OAR == None)
return 4;
return (int)OAR.getValue();
}
static Optional<AliasResult> threeBitIntToOptionalAliasResult(int I) {
assert((I <= 4 && I >= 0) &&
"Invalid value for converting to an Optional<AliasResult>");
if (I == 4)
return None;
return (AliasResult)I;
}
};
template <>

View File

@ -224,13 +224,25 @@ static bool areLoadsReorderable(const LoadInst *Use,
return !(SeqCstUse || MayClobberIsAcquire);
}
static bool instructionClobbersQuery(MemoryDef *MD,
const MemoryLocation &UseLoc,
const Instruction *UseInst,
AliasAnalysis &AA) {
namespace {
struct ClobberAlias {
bool IsClobber;
Optional<AliasResult> AR;
};
} // end anonymous namespace
// Return a pair of {IsClobber (bool), AR (AliasResult)}. It relies on AR being
// ignored if IsClobber = false.
static ClobberAlias instructionClobbersQuery(MemoryDef *MD,
const MemoryLocation &UseLoc,
const Instruction *UseInst,
AliasAnalysis &AA) {
Instruction *DefInst = MD->getMemoryInst();
assert(DefInst && "Defining instruction not actually an instruction");
ImmutableCallSite UseCS(UseInst);
Optional<AliasResult> AR;
if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) {
// These intrinsics will show up as affecting memory, but they are just
@ -238,13 +250,14 @@ static bool instructionClobbersQuery(MemoryDef *MD,
switch (II->getIntrinsicID()) {
case Intrinsic::lifetime_start:
if (UseCS)
return false;
return AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), UseLoc);
return {false, NoAlias};
AR = AA.alias(MemoryLocation(II->getArgOperand(1)), UseLoc);
return {AR == MustAlias, AR};
case Intrinsic::lifetime_end:
case Intrinsic::invariant_start:
case Intrinsic::invariant_end:
case Intrinsic::assume:
return false;
return {false, NoAlias};
default:
break;
}
@ -252,19 +265,23 @@ static bool instructionClobbersQuery(MemoryDef *MD,
if (UseCS) {
ModRefInfo I = AA.getModRefInfo(DefInst, UseCS);
return isModOrRefSet(I);
AR = isMustSet(I) ? MustAlias : MayAlias;
return {isModOrRefSet(I), AR};
}
if (auto *DefLoad = dyn_cast<LoadInst>(DefInst))
if (auto *UseLoad = dyn_cast<LoadInst>(UseInst))
return !areLoadsReorderable(UseLoad, DefLoad);
return {!areLoadsReorderable(UseLoad, DefLoad), MayAlias};
return isModSet(AA.getModRefInfo(DefInst, UseLoc));
ModRefInfo I = AA.getModRefInfo(DefInst, UseLoc);
AR = isMustSet(I) ? MustAlias : MayAlias;
return {isModSet(I), AR};
}
static bool instructionClobbersQuery(MemoryDef *MD, const MemoryUseOrDef *MU,
const MemoryLocOrCall &UseMLOC,
AliasAnalysis &AA) {
static ClobberAlias instructionClobbersQuery(MemoryDef *MD,
const MemoryUseOrDef *MU,
const MemoryLocOrCall &UseMLOC,
AliasAnalysis &AA) {
// FIXME: This is a temporary hack to allow a single instructionClobbersQuery
// to exist while MemoryLocOrCall is pushed through places.
if (UseMLOC.IsCall)
@ -277,7 +294,7 @@ static bool instructionClobbersQuery(MemoryDef *MD, const MemoryUseOrDef *MU,
// Return true when MD may alias MU, return false otherwise.
bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
AliasAnalysis &AA) {
return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA);
return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA).IsClobber;
}
namespace {
@ -292,6 +309,7 @@ struct UpwardsMemoryQuery {
const Instruction *Inst = nullptr;
// The MemoryAccess we actually got called with, used to test local domination
const MemoryAccess *OriginalAccess = nullptr;
Optional<AliasResult> AR = MayAlias;
UpwardsMemoryQuery() = default;
@ -375,9 +393,15 @@ checkClobberSanity(MemoryAccess *Start, MemoryAccess *ClobberAt,
//
// Also, note that this can't be hoisted out of the `Worklist` loop,
// since MD may only act as a clobber for 1 of N MemoryLocations.
FoundClobber =
FoundClobber || MSSA.isLiveOnEntryDef(MD) ||
instructionClobbersQuery(MD, MAP.second, Query.Inst, AA);
FoundClobber = FoundClobber || MSSA.isLiveOnEntryDef(MD);
if (!FoundClobber) {
ClobberAlias CA =
instructionClobbersQuery(MD, MAP.second, Query.Inst, AA);
if (CA.IsClobber) {
FoundClobber = true;
// Not used: CA.AR;
}
}
}
break;
}
@ -387,7 +411,8 @@ checkClobberSanity(MemoryAccess *Start, MemoryAccess *ClobberAt,
if (auto *MD = dyn_cast<MemoryDef>(MA)) {
(void)MD;
assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) &&
assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA)
.IsClobber &&
"Found clobber before reaching ClobberAt!");
continue;
}
@ -457,9 +482,10 @@ class ClobberWalker {
/// Result of calling walkToPhiOrClobber.
struct UpwardsWalkResult {
/// The "Result" of the walk. Either a clobber, the last thing we walked, or
/// both.
/// both. Include alias info when clobber found.
MemoryAccess *Result;
bool IsKnownClobber;
Optional<AliasResult> AR;
};
/// Walk to the next Phi or Clobber in the def chain starting at Desc.Last.
@ -475,17 +501,21 @@ class ClobberWalker {
for (MemoryAccess *Current : def_chain(Desc.Last)) {
Desc.Last = Current;
if (Current == StopAt)
return {Current, false};
return {Current, false, MayAlias};
if (auto *MD = dyn_cast<MemoryDef>(Current))
if (MSSA.isLiveOnEntryDef(MD) ||
instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA))
return {MD, true};
if (auto *MD = dyn_cast<MemoryDef>(Current)) {
if (MSSA.isLiveOnEntryDef(MD))
return {MD, true, MustAlias};
ClobberAlias CA =
instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA);
if (CA.IsClobber)
return {MD, true, CA.AR};
}
}
assert(isa<MemoryPhi>(Desc.Last) &&
"Ended at a non-clobber that's not a phi?");
return {Desc.Last, false};
return {Desc.Last, false, MayAlias};
}
void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches,
@ -828,6 +858,7 @@ public:
MemoryAccess *Result;
if (WalkResult.IsKnownClobber) {
Result = WalkResult.Result;
Q.AR = WalkResult.AR;
} else {
OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last),
Current, Q.StartingLoc);
@ -1095,6 +1126,7 @@ private:
// This is where the last walk for this memory location ended.
unsigned long LastKill;
bool LastKillValid;
Optional<AliasResult> AR;
};
void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &,
@ -1154,7 +1186,7 @@ void MemorySSA::OptimizeUses::optimizeUsesInBlock(
}
if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) {
MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true);
MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true, None);
continue;
}
@ -1196,6 +1228,7 @@ void MemorySSA::OptimizeUses::optimizeUsesInBlock(
if (!LocInfo.LastKillValid) {
LocInfo.LastKill = VersionStack.size() - 1;
LocInfo.LastKillValid = true;
LocInfo.AR = MayAlias;
}
// At this point, we should have corrected last kill and LowerBound to be
@ -1239,24 +1272,32 @@ void MemorySSA::OptimizeUses::optimizeUsesInBlock(
// Reset UpperBound to liveOnEntryDef's place in the stack
UpperBound = 0;
FoundClobberResult = true;
LocInfo.AR = MustAlias;
break;
}
if (instructionClobbersQuery(MD, MU, UseMLOC, *AA)) {
ClobberAlias CA = instructionClobbersQuery(MD, MU, UseMLOC, *AA);
if (CA.IsClobber) {
FoundClobberResult = true;
LocInfo.AR = CA.AR;
break;
}
--UpperBound;
}
// Note: Phis always have AliasResult AR set to MayAlias ATM.
// At the end of this loop, UpperBound is either a clobber, or lower bound
// PHI walking may cause it to be < LowerBound, and in fact, < LastKill.
if (FoundClobberResult || UpperBound < LocInfo.LastKill) {
MU->setDefiningAccess(VersionStack[UpperBound], true);
// We were last killed now by where we got to
if (MSSA->isLiveOnEntryDef(VersionStack[UpperBound]))
LocInfo.AR = None;
MU->setDefiningAccess(VersionStack[UpperBound], true, LocInfo.AR);
LocInfo.LastKill = UpperBound;
} else {
// Otherwise, we checked all the new ones, and now we know we can get to
// LastKill.
MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true);
MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true, LocInfo.AR);
}
LocInfo.LowerBound = VersionStack.size() - 1;
LocInfo.LowerBoundBlock = BB;
@ -2026,8 +2067,8 @@ MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
return MA;
// If this is an already optimized use or def, return the optimized result.
// Note: Currently, we do not store the optimized def result because we'd need
// a separate field, since we can't use it as the defining access.
// Note: Currently, we store the optimized def result in a separate field,
// since we can't use the defining access.
if (StartingAccess->isOptimized())
return StartingAccess->getOptimized();
@ -2041,8 +2082,10 @@ MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
if (isUseTriviallyOptimizableToLiveOnEntry(*MSSA->AA, I)) {
MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef();
if (auto *MUD = dyn_cast<MemoryUseOrDef>(StartingAccess))
if (auto *MUD = dyn_cast<MemoryUseOrDef>(StartingAccess)) {
MUD->setOptimized(LiveOnEntry);
MUD->setOptimizedAccessType(None);
}
return LiveOnEntry;
}
@ -2051,16 +2094,27 @@ MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
// At this point, DefiningAccess may be the live on entry def.
// If it is, we will not get a better result.
if (MSSA->isLiveOnEntryDef(DefiningAccess))
if (MSSA->isLiveOnEntryDef(DefiningAccess)) {
if (auto *MUD = dyn_cast<MemoryUseOrDef>(StartingAccess)) {
MUD->setOptimized(DefiningAccess);
MUD->setOptimizedAccessType(None);
}
return DefiningAccess;
}
MemoryAccess *Result = getClobberingMemoryAccess(DefiningAccess, Q);
DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
DEBUG(dbgs() << *DefiningAccess << "\n");
DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
DEBUG(dbgs() << *Result << "\n");
if (auto *MUD = dyn_cast<MemoryUseOrDef>(StartingAccess))
if (auto *MUD = dyn_cast<MemoryUseOrDef>(StartingAccess)) {
MUD->setOptimized(Result);
if (MSSA->isLiveOnEntryDef(Result))
MUD->setOptimizedAccessType(None);
else if (Q.AR == MustAlias)
MUD->setOptimizedAccessType(MustAlias);
}
return Result;
}

View File

@ -999,3 +999,203 @@ TEST_F(MemorySSATest, RemovingDefInvalidatesCache) {
MSSA.getLiveOnEntryDef())
<< "(DefX1 = " << DefX1 << ")";
}
// Test Must alias for optimized uses
TEST_F(MemorySSATest, TestLoadMustAlias) {
F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
GlobalValue::ExternalLinkage, "F", &M);
B.SetInsertPoint(BasicBlock::Create(C, "", F));
Type *Int8 = Type::getInt8Ty(C);
Value *AllocaA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
Value *AllocaB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B");
B.CreateStore(ConstantInt::get(Int8, 1), AllocaB);
// Check load from LOE
LoadInst *LA1 = B.CreateLoad(AllocaA, "");
// Check load alias cached for second load
LoadInst *LA2 = B.CreateLoad(AllocaA, "");
B.CreateStore(ConstantInt::get(Int8, 1), AllocaA);
// Check load from store/def
LoadInst *LA3 = B.CreateLoad(AllocaA, "");
// Check load alias cached for second load
LoadInst *LA4 = B.CreateLoad(AllocaA, "");
setupAnalyses();
MemorySSA &MSSA = *Analyses->MSSA;
unsigned I = 0;
for (LoadInst *V : {LA1, LA2}) {
MemoryUse *MemUse = dyn_cast_or_null<MemoryUse>(MSSA.getMemoryAccess(V));
EXPECT_EQ(MemUse->getOptimizedAccessType(), None)
<< "Load " << I << " doesn't have the correct alias information";
// EXPECT_EQ expands such that if we increment I above, it won't get
// incremented except when we try to print the error message.
++I;
}
for (LoadInst *V : {LA3, LA4}) {
MemoryUse *MemUse = dyn_cast_or_null<MemoryUse>(MSSA.getMemoryAccess(V));
EXPECT_EQ(MemUse->getOptimizedAccessType(), MustAlias)
<< "Load " << I << " doesn't have the correct alias information";
// EXPECT_EQ expands such that if we increment I above, it won't get
// incremented except when we try to print the error message.
++I;
}
}
// Test Must alias for optimized defs.
TEST_F(MemorySSATest, TestStoreMustAlias) {
F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
GlobalValue::ExternalLinkage, "F", &M);
B.SetInsertPoint(BasicBlock::Create(C, "", F));
Type *Int8 = Type::getInt8Ty(C);
Value *AllocaA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
Value *AllocaB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B");
StoreInst *SA1 = B.CreateStore(ConstantInt::get(Int8, 1), AllocaA);
StoreInst *SB1 = B.CreateStore(ConstantInt::get(Int8, 1), AllocaB);
StoreInst *SA2 = B.CreateStore(ConstantInt::get(Int8, 2), AllocaA);
StoreInst *SB2 = B.CreateStore(ConstantInt::get(Int8, 2), AllocaB);
StoreInst *SA3 = B.CreateStore(ConstantInt::get(Int8, 3), AllocaA);
StoreInst *SB3 = B.CreateStore(ConstantInt::get(Int8, 3), AllocaB);
setupAnalyses();
MemorySSA &MSSA = *Analyses->MSSA;
MemorySSAWalker *Walker = Analyses->Walker;
unsigned I = 0;
for (StoreInst *V : {SA1, SB1, SA2, SB2, SA3, SB3}) {
MemoryDef *MemDef = dyn_cast_or_null<MemoryDef>(MSSA.getMemoryAccess(V));
EXPECT_EQ(MemDef->isOptimized(), false)
<< "Store " << I << " is optimized from the start?";
EXPECT_EQ(MemDef->getOptimizedAccessType(), MayAlias)
<< "Store " << I
<< " has correct alias information before being optimized?";
if (V == SA1)
Walker->getClobberingMemoryAccess(V);
else {
MemoryAccess *Def = MemDef->getDefiningAccess();
MemoryAccess *Clob = Walker->getClobberingMemoryAccess(V);
EXPECT_NE(Def, Clob) << "Store " << I
<< " has Defining Access equal to Clobbering Access";
}
EXPECT_EQ(MemDef->isOptimized(), true)
<< "Store " << I << " was not optimized";
if (I == 0 || I == 1)
EXPECT_EQ(MemDef->getOptimizedAccessType(), None)
<< "Store " << I << " doesn't have the correct alias information";
else
EXPECT_EQ(MemDef->getOptimizedAccessType(), MustAlias)
<< "Store " << I << " doesn't have the correct alias information";
// EXPECT_EQ expands such that if we increment I above, it won't get
// incremented except when we try to print the error message.
++I;
}
}
// Test May alias for optimized uses.
TEST_F(MemorySSATest, TestLoadMayAlias) {
F = Function::Create(FunctionType::get(B.getVoidTy(),
{B.getInt8PtrTy(), B.getInt8PtrTy()},
false),
GlobalValue::ExternalLinkage, "F", &M);
B.SetInsertPoint(BasicBlock::Create(C, "", F));
Type *Int8 = Type::getInt8Ty(C);
auto *ArgIt = F->arg_begin();
Argument *PointerA = &*ArgIt;
Argument *PointerB = &*(++ArgIt);
B.CreateStore(ConstantInt::get(Int8, 1), PointerB);
LoadInst *LA1 = B.CreateLoad(PointerA, "");
B.CreateStore(ConstantInt::get(Int8, 0), PointerA);
LoadInst *LB1 = B.CreateLoad(PointerB, "");
B.CreateStore(ConstantInt::get(Int8, 0), PointerA);
LoadInst *LA2 = B.CreateLoad(PointerA, "");
B.CreateStore(ConstantInt::get(Int8, 0), PointerB);
LoadInst *LB2 = B.CreateLoad(PointerB, "");
setupAnalyses();
MemorySSA &MSSA = *Analyses->MSSA;
unsigned I = 0;
for (LoadInst *V : {LA1, LB1}) {
MemoryUse *MemUse = dyn_cast_or_null<MemoryUse>(MSSA.getMemoryAccess(V));
EXPECT_EQ(MemUse->getOptimizedAccessType(), MayAlias)
<< "Load " << I << " doesn't have the correct alias information";
// EXPECT_EQ expands such that if we increment I above, it won't get
// incremented except when we try to print the error message.
++I;
}
for (LoadInst *V : {LA2, LB2}) {
MemoryUse *MemUse = dyn_cast_or_null<MemoryUse>(MSSA.getMemoryAccess(V));
EXPECT_EQ(MemUse->getOptimizedAccessType(), MustAlias)
<< "Load " << I << " doesn't have the correct alias information";
// EXPECT_EQ expands such that if we increment I above, it won't get
// incremented except when we try to print the error message.
++I;
}
}
// Test May alias for optimized defs.
TEST_F(MemorySSATest, TestStoreMayAlias) {
F = Function::Create(FunctionType::get(B.getVoidTy(),
{B.getInt8PtrTy(), B.getInt8PtrTy()},
false),
GlobalValue::ExternalLinkage, "F", &M);
B.SetInsertPoint(BasicBlock::Create(C, "", F));
Type *Int8 = Type::getInt8Ty(C);
auto *ArgIt = F->arg_begin();
Argument *PointerA = &*ArgIt;
Argument *PointerB = &*(++ArgIt);
Value *AllocaC = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "C");
// Store into arg1, must alias because it's LOE => must
StoreInst *SA1 = B.CreateStore(ConstantInt::get(Int8, 0), PointerA);
// Store into arg2, may alias store to arg1 => may
StoreInst *SB1 = B.CreateStore(ConstantInt::get(Int8, 1), PointerB);
// Store into aloca, no alias with args, so must alias LOE => must
StoreInst *SC1 = B.CreateStore(ConstantInt::get(Int8, 2), AllocaC);
// Store into arg1, may alias store to arg2 => may
StoreInst *SA2 = B.CreateStore(ConstantInt::get(Int8, 3), PointerA);
// Store into arg2, may alias store to arg1 => may
StoreInst *SB2 = B.CreateStore(ConstantInt::get(Int8, 4), PointerB);
// Store into aloca, no alias with args, so must alias SC1 => must
StoreInst *SC2 = B.CreateStore(ConstantInt::get(Int8, 5), AllocaC);
// Store into arg2, must alias store to arg2 => must
StoreInst *SB3 = B.CreateStore(ConstantInt::get(Int8, 6), PointerB);
std::initializer_list<StoreInst *> Sts = {SA1, SB1, SC1, SA2, SB2, SC2, SB3};
setupAnalyses();
MemorySSA &MSSA = *Analyses->MSSA;
MemorySSAWalker *Walker = Analyses->Walker;
unsigned I = 0;
for (StoreInst *V : Sts) {
MemoryDef *MemDef = dyn_cast_or_null<MemoryDef>(MSSA.getMemoryAccess(V));
EXPECT_EQ(MemDef->isOptimized(), false)
<< "Store " << I << " is optimized from the start?";
EXPECT_EQ(MemDef->getOptimizedAccessType(), MayAlias)
<< "Store " << I
<< " has correct alias information before being optimized?";
++I;
}
for (StoreInst *V : Sts)
Walker->getClobberingMemoryAccess(V);
I = 0;
for (StoreInst *V : Sts) {
MemoryDef *MemDef = dyn_cast_or_null<MemoryDef>(MSSA.getMemoryAccess(V));
EXPECT_EQ(MemDef->isOptimized(), true)
<< "Store " << I << " was not optimized";
if (I == 1 || I == 3 || I == 4)
EXPECT_EQ(MemDef->getOptimizedAccessType(), MayAlias)
<< "Store " << I << " doesn't have the correct alias information";
else if (I == 0 || I == 2)
EXPECT_EQ(MemDef->getOptimizedAccessType(), None)
<< "Store " << I << " doesn't have the correct alias information";
else
EXPECT_EQ(MemDef->getOptimizedAccessType(), MustAlias)
<< "Store " << I << " doesn't have the correct alias information";
// EXPECT_EQ expands such that if we increment I above, it won't get
// incremented except when we try to print the error message.
++I;
}
}