1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 10:42:39 +01:00

Allow None as a MemoryLocation to getModRefInfo

Summary:
Adding part of the changes in D30369 (needed to make progress):
Current patch updates AliasAnalysis and MemoryLocation, but does _not_ clean up MemorySSA.

Original summary from D30369, by dberlin:
Currently, we have instructions which affect memory but have no memory
location. If you call, for example, MemoryLocation::get on a fence,
it asserts. This means things specifically have to avoid that. It
also means we end up with a copy of each API, one taking a memory
location, one not.

This starts to fix that.

We add MemoryLocation::getOrNone as a new call, and reimplement the
old asserting version in terms of it.

We make MemoryLocation optional in the (Instruction, MemoryLocation)
version of getModRefInfo, and kill the old one argument version in
favor of passing None (it had one caller). Now both can handle fences
because you can just use MemoryLocation::getOrNone on an instruction
and it will return a correct answer.

We use all this to clean up part of MemorySSA that had to handle this difference.

Note that literally every actual getModRefInfo interface we have could be made private and replaced with:

getModRefInfo(Instruction, Optional<MemoryLocation>)
and
getModRefInfo(Instruction, Optional<MemoryLocation>, Instruction, Optional<MemoryLocation>)

and delegating to the right ones, if we wanted to.

I have not attempted to do this yet.

Reviewers: dberlin, davide, dblaikie

Subscribers: sanjoy, hfinkel, chandlerc, llvm-commits

Differential Revision: https://reviews.llvm.org/D35441

llvm-svn: 309641
This commit is contained in:
Alina Sbirlea 2017-08-01 00:28:29 +00:00
parent 6e16535e63
commit 7b373d280b
5 changed files with 49 additions and 51 deletions

View File

@ -38,6 +38,7 @@
#ifndef LLVM_ANALYSIS_ALIASANALYSIS_H
#define LLVM_ANALYSIS_ALIASANALYSIS_H
#include "llvm/ADT/Optional.h"
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/IR/CallSite.h"
@ -500,43 +501,33 @@ public:
return getModRefInfo(I, MemoryLocation(P, Size));
}
/// Check whether or not an instruction may read or write memory (without
/// regard to a specific location).
/// Check whether or not an instruction may read or write the optionally
/// specified memory location.
///
/// For function calls, this delegates to the alias-analysis specific
/// call-site mod-ref behavior queries. Otherwise it delegates to the generic
/// mod ref information query without a location.
ModRefInfo getModRefInfo(const Instruction *I) {
if (auto CS = ImmutableCallSite(I)) {
auto MRB = getModRefBehavior(CS);
if ((MRB & MRI_ModRef) == MRI_ModRef)
return MRI_ModRef;
if (MRB & MRI_Ref)
return MRI_Ref;
if (MRB & MRI_Mod)
return MRI_Mod;
return MRI_NoModRef;
}
return getModRefInfo(I, MemoryLocation());
}
/// Check whether or not an instruction may read or write the specified
/// memory location.
///
/// Note explicitly that getModRefInfo considers the effects of reading and
/// writing the memory location, and not the effect of ordering relative to
/// other instructions. Thus, a volatile load is considered to be Ref,
/// because it does not actually write memory, it just can't be reordered
/// relative to other volatiles (or removed). Atomic ordered loads/stores are
/// considered ModRef ATM because conservatively, the visible effect appears
/// as if memory was written, not just an ordering constraint.
///
/// An instruction that doesn't read or write memory may be trivially LICM'd
/// for example.
///
/// This primarily delegates to specific helpers above.
ModRefInfo getModRefInfo(const Instruction *I, const MemoryLocation &Loc) {
/// For function calls, this delegates to the alias-analysis specific
/// call-site mod-ref behavior queries. Otherwise it delegates to the specific
/// helpers above.
ModRefInfo getModRefInfo(const Instruction *I,
const Optional<MemoryLocation> &OptLoc) {
if (OptLoc == None) {
if (auto CS = ImmutableCallSite(I)) {
auto MRB = getModRefBehavior(CS);
if ((MRB & MRI_ModRef) == MRI_ModRef)
return MRI_ModRef;
if (MRB & MRI_Ref)
return MRI_Ref;
if (MRB & MRI_Mod)
return MRI_Mod;
return MRI_NoModRef;
}
}
const MemoryLocation &Loc = OptLoc.getValueOr(MemoryLocation());
switch (I->getOpcode()) {
case Instruction::VAArg: return getModRefInfo((const VAArgInst*)I, Loc);
case Instruction::Load: return getModRefInfo((const LoadInst*)I, Loc);

View File

@ -16,6 +16,7 @@
#ifndef LLVM_ANALYSIS_MEMORYLOCATION_H
#define LLVM_ANALYSIS_MEMORYLOCATION_H
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/Metadata.h"
@ -68,17 +69,23 @@ public:
static MemoryLocation get(const AtomicCmpXchgInst *CXI);
static MemoryLocation get(const AtomicRMWInst *RMWI);
static MemoryLocation get(const Instruction *Inst) {
if (auto *I = dyn_cast<LoadInst>(Inst))
return get(I);
else if (auto *I = dyn_cast<StoreInst>(Inst))
return get(I);
else if (auto *I = dyn_cast<VAArgInst>(Inst))
return get(I);
else if (auto *I = dyn_cast<AtomicCmpXchgInst>(Inst))
return get(I);
else if (auto *I = dyn_cast<AtomicRMWInst>(Inst))
return get(I);
llvm_unreachable("unsupported memory instruction");
return *MemoryLocation::getOrNone(Inst);
}
static Optional<MemoryLocation> getOrNone(const Instruction *Inst) {
switch (Inst->getOpcode()) {
case Instruction::Load:
return get(cast<LoadInst>(Inst));
case Instruction::Store:
return get(cast<StoreInst>(Inst));
case Instruction::VAArg:
return get(cast<VAArgInst>(Inst));
case Instruction::AtomicCmpXchg:
return get(cast<AtomicCmpXchgInst>(Inst));
case Instruction::AtomicRMW:
return get(cast<AtomicRMWInst>(Inst));
default:
return None;
}
}
/// Return a location representing the source of a memory transfer.

View File

@ -1473,7 +1473,7 @@ MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I) {
return nullptr;
// Find out what affect this instruction has on memory.
ModRefInfo ModRef = AA->getModRefInfo(I);
ModRefInfo ModRef = AA->getModRefInfo(I, None);
// The isOrdered check is used to ensure that volatiles end up as defs
// (atomics end up as ModRef right now anyway). Until we separate the
// ordering chain from the memory chain, this enables people to see at least

View File

@ -535,7 +535,7 @@ static bool moveUp(AliasAnalysis &AA, StoreInst *SI, Instruction *P,
for (auto I = --SI->getIterator(), E = P->getIterator(); I != E; --I) {
auto *C = &*I;
bool MayAlias = AA.getModRefInfo(C) != MRI_NoModRef;
bool MayAlias = AA.getModRefInfo(C, MemoryLocation()) != MRI_NoModRef;
bool NeedLift = false;
if (Args.erase(C))

View File

@ -192,17 +192,17 @@ TEST_F(AliasAnalysisTest, getModRefInfo) {
// Check basic results
EXPECT_EQ(AA.getModRefInfo(Store1, MemoryLocation()), MRI_Mod);
EXPECT_EQ(AA.getModRefInfo(Store1), MRI_Mod);
EXPECT_EQ(AA.getModRefInfo(Store1, None), MRI_Mod);
EXPECT_EQ(AA.getModRefInfo(Load1, MemoryLocation()), MRI_Ref);
EXPECT_EQ(AA.getModRefInfo(Load1), MRI_Ref);
EXPECT_EQ(AA.getModRefInfo(Load1, None), MRI_Ref);
EXPECT_EQ(AA.getModRefInfo(Add1, MemoryLocation()), MRI_NoModRef);
EXPECT_EQ(AA.getModRefInfo(Add1), MRI_NoModRef);
EXPECT_EQ(AA.getModRefInfo(Add1, None), MRI_NoModRef);
EXPECT_EQ(AA.getModRefInfo(VAArg1, MemoryLocation()), MRI_ModRef);
EXPECT_EQ(AA.getModRefInfo(VAArg1), MRI_ModRef);
EXPECT_EQ(AA.getModRefInfo(VAArg1, None), MRI_ModRef);
EXPECT_EQ(AA.getModRefInfo(CmpXChg1, MemoryLocation()), MRI_ModRef);
EXPECT_EQ(AA.getModRefInfo(CmpXChg1), MRI_ModRef);
EXPECT_EQ(AA.getModRefInfo(CmpXChg1, None), MRI_ModRef);
EXPECT_EQ(AA.getModRefInfo(AtomicRMW, MemoryLocation()), MRI_ModRef);
EXPECT_EQ(AA.getModRefInfo(AtomicRMW), MRI_ModRef);
EXPECT_EQ(AA.getModRefInfo(AtomicRMW, None), MRI_ModRef);
}
class AAPassInfraTest : public testing::Test {