1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 03:02:36 +01:00

[Atomic] Remove IsStore/IsLoad in the interface, and pass the instruction instead. NFC.

Now both emitLeadingFence and emitTrailingFence take the instruction
itself, instead of taking IsLoad/IsStore pairs.
Instruction::mayReadFromMemory and Instrucion::mayWriteToMemory are used
for determining those two booleans.

The instruction argument is also useful for later D32763, in
emitTrailingFence. For emitLeadingFence, it seems to have cleaner
interface with the proposed change.

Differential Revision: https://reviews.llvm.org/D32762

llvm-svn: 302539
This commit is contained in:
Tim Shen 2017-05-09 15:27:17 +00:00
parent 6b3ff33b51
commit 5f6285f048
8 changed files with 70 additions and 51 deletions

View File

@ -456,6 +456,12 @@ public:
/// higher.
bool isAtomic() const;
/// Return true if this atomic instruction loads from memory.
bool hasAtomicLoad() const;
/// Return true if this atomic instruction stores to memory.
bool hasAtomicStore() const;
/// Return true if this instruction may throw an exception.
bool mayThrow() const;

View File

@ -1396,7 +1396,10 @@ public:
/// It is called by AtomicExpandPass before expanding an
/// AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad
/// if shouldInsertFencesForAtomic returns true.
/// RMW and CmpXchg set both IsStore and IsLoad to true.
///
/// Inst is the original atomic instruction, prior to other expansions that
/// may be performed.
///
/// This function should either return a nullptr, or a pointer to an IR-level
/// Instruction*. Even complex fence sequences can be represented by a
/// single Instruction* through an intrinsic to be lowered later.
@ -1422,18 +1425,17 @@ public:
/// seq_cst. But if they are lowered to monotonic accesses, no amount of
/// IR-level fences can prevent it.
/// @{
virtual Instruction *emitLeadingFence(IRBuilder<> &Builder,
AtomicOrdering Ord, bool IsStore,
bool IsLoad) const {
if (isReleaseOrStronger(Ord) && IsStore)
virtual Instruction *emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst,
AtomicOrdering Ord) const {
if (isReleaseOrStronger(Ord) && Inst->hasAtomicStore())
return Builder.CreateFence(Ord);
else
return nullptr;
}
virtual Instruction *emitTrailingFence(IRBuilder<> &Builder,
AtomicOrdering Ord, bool IsStore,
bool IsLoad) const {
Instruction *Inst,
AtomicOrdering Ord) const {
if (isAcquireOrStronger(Ord))
return Builder.CreateFence(Ord);
else

View File

@ -47,8 +47,7 @@ namespace {
bool runOnFunction(Function &F) override;
private:
bool bracketInstWithFences(Instruction *I, AtomicOrdering Order,
bool IsStore, bool IsLoad);
bool bracketInstWithFences(Instruction *I, AtomicOrdering Order);
IntegerType *getCorrespondingIntegerType(Type *T, const DataLayout &DL);
LoadInst *convertAtomicLoadToIntegerType(LoadInst *LI);
bool tryExpandAtomicLoad(LoadInst *LI);
@ -224,22 +223,16 @@ bool AtomicExpand::runOnFunction(Function &F) {
if (TLI->shouldInsertFencesForAtomic(I)) {
auto FenceOrdering = AtomicOrdering::Monotonic;
bool IsStore, IsLoad;
if (LI && isAcquireOrStronger(LI->getOrdering())) {
FenceOrdering = LI->getOrdering();
LI->setOrdering(AtomicOrdering::Monotonic);
IsStore = false;
IsLoad = true;
} else if (SI && isReleaseOrStronger(SI->getOrdering())) {
FenceOrdering = SI->getOrdering();
SI->setOrdering(AtomicOrdering::Monotonic);
IsStore = true;
IsLoad = false;
} else if (RMWI && (isReleaseOrStronger(RMWI->getOrdering()) ||
isAcquireOrStronger(RMWI->getOrdering()))) {
FenceOrdering = RMWI->getOrdering();
RMWI->setOrdering(AtomicOrdering::Monotonic);
IsStore = IsLoad = true;
} else if (CASI && !TLI->shouldExpandAtomicCmpXchgInIR(CASI) &&
(isReleaseOrStronger(CASI->getSuccessOrdering()) ||
isAcquireOrStronger(CASI->getSuccessOrdering()))) {
@ -250,11 +243,10 @@ bool AtomicExpand::runOnFunction(Function &F) {
FenceOrdering = CASI->getSuccessOrdering();
CASI->setSuccessOrdering(AtomicOrdering::Monotonic);
CASI->setFailureOrdering(AtomicOrdering::Monotonic);
IsStore = IsLoad = true;
}
if (FenceOrdering != AtomicOrdering::Monotonic) {
MadeChange |= bracketInstWithFences(I, FenceOrdering, IsStore, IsLoad);
MadeChange |= bracketInstWithFences(I, FenceOrdering);
}
}
@ -320,13 +312,12 @@ bool AtomicExpand::runOnFunction(Function &F) {
return MadeChange;
}
bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order,
bool IsStore, bool IsLoad) {
bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order) {
IRBuilder<> Builder(I);
auto LeadingFence = TLI->emitLeadingFence(Builder, Order, IsStore, IsLoad);
auto LeadingFence = TLI->emitLeadingFence(Builder, I, Order);
auto TrailingFence = TLI->emitTrailingFence(Builder, Order, IsStore, IsLoad);
auto TrailingFence = TLI->emitTrailingFence(Builder, I, Order);
// The trailing fence is emitted before the instruction instead of after
// because there is no easy way of setting Builder insertion point after
// an instruction. So we must erase it from the BB, and insert it back
@ -1048,8 +1039,7 @@ bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
std::prev(BB->end())->eraseFromParent();
Builder.SetInsertPoint(BB);
if (ShouldInsertFencesForAtomic && UseUnconditionalReleaseBarrier)
TLI->emitLeadingFence(Builder, SuccessOrder, /*IsStore=*/true,
/*IsLoad=*/true);
TLI->emitLeadingFence(Builder, CI, SuccessOrder);
Builder.CreateBr(StartBB);
// Start the main loop block now that we've taken care of the preliminaries.
@ -1064,8 +1054,7 @@ bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
Builder.SetInsertPoint(ReleasingStoreBB);
if (ShouldInsertFencesForAtomic && !UseUnconditionalReleaseBarrier)
TLI->emitLeadingFence(Builder, SuccessOrder, /*IsStore=*/true,
/*IsLoad=*/true);
TLI->emitLeadingFence(Builder, CI, SuccessOrder);
Builder.CreateBr(TryStoreBB);
Builder.SetInsertPoint(TryStoreBB);
@ -1094,8 +1083,7 @@ bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
// necessary.
Builder.SetInsertPoint(SuccessBB);
if (ShouldInsertFencesForAtomic)
TLI->emitTrailingFence(Builder, SuccessOrder, /*IsStore=*/true,
/*IsLoad=*/true);
TLI->emitTrailingFence(Builder, CI, SuccessOrder);
Builder.CreateBr(ExitBB);
Builder.SetInsertPoint(NoStoreBB);
@ -1107,8 +1095,7 @@ bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
Builder.SetInsertPoint(FailureBB);
if (ShouldInsertFencesForAtomic)
TLI->emitTrailingFence(Builder, FailureOrder, /*IsStore=*/true,
/*IsLoad=*/true);
TLI->emitTrailingFence(Builder, CI, FailureOrder);
Builder.CreateBr(ExitBB);
// Finally, we have control-flow based knowledge of whether the cmpxchg

View File

@ -534,6 +534,30 @@ bool Instruction::isAtomic() const {
}
}
bool Instruction::hasAtomicLoad() const {
assert(isAtomic());
switch (getOpcode()) {
default:
return false;
case Instruction::AtomicCmpXchg:
case Instruction::AtomicRMW:
case Instruction::Load:
return true;
}
}
bool Instruction::hasAtomicStore() const {
assert(isAtomic());
switch (getOpcode()) {
default:
return false;
case Instruction::AtomicCmpXchg:
case Instruction::AtomicRMW:
case Instruction::Store:
return true;
}
}
bool Instruction::mayThrow() const {
if (const CallInst *CI = dyn_cast<CallInst>(this))
return !CI->doesNotThrow();

View File

@ -13416,9 +13416,9 @@ Instruction* ARMTargetLowering::makeDMB(IRBuilder<> &Builder,
}
// Based on http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
Instruction* ARMTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
AtomicOrdering Ord, bool IsStore,
bool IsLoad) const {
Instruction *ARMTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
Instruction *Inst,
AtomicOrdering Ord) const {
switch (Ord) {
case AtomicOrdering::NotAtomic:
case AtomicOrdering::Unordered:
@ -13427,7 +13427,7 @@ Instruction* ARMTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
case AtomicOrdering::Acquire:
return nullptr; // Nothing to do
case AtomicOrdering::SequentiallyConsistent:
if (!IsStore)
if (!Inst->hasAtomicStore())
return nullptr; // Nothing to do
/*FALLTHROUGH*/
case AtomicOrdering::Release:
@ -13441,9 +13441,9 @@ Instruction* ARMTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
llvm_unreachable("Unknown fence ordering in emitLeadingFence");
}
Instruction* ARMTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
AtomicOrdering Ord, bool IsStore,
bool IsLoad) const {
Instruction *ARMTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
Instruction *Inst,
AtomicOrdering Ord) const {
switch (Ord) {
case AtomicOrdering::NotAtomic:
case AtomicOrdering::Unordered:

View File

@ -483,10 +483,10 @@ class InstrItineraryData;
void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override;
Instruction* emitLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord,
bool IsStore, bool IsLoad) const override;
Instruction* emitTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord,
bool IsStore, bool IsLoad) const override;
Instruction *emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst,
AtomicOrdering Ord) const override;
Instruction *emitTrailingFence(IRBuilder<> &Builder, Instruction *Inst,
AtomicOrdering Ord) const override;
unsigned getMaxSupportedInterleaveFactor() const override { return 4; }

View File

@ -8737,9 +8737,9 @@ static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) {
// The mappings for emitLeading/TrailingFence is taken from
// http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
Instruction* PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
AtomicOrdering Ord, bool IsStore,
bool IsLoad) const {
Instruction *PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
Instruction *Inst,
AtomicOrdering Ord) const {
if (Ord == AtomicOrdering::SequentiallyConsistent)
return callIntrinsic(Builder, Intrinsic::ppc_sync);
if (isReleaseOrStronger(Ord))
@ -8747,10 +8747,10 @@ Instruction* PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
return nullptr;
}
Instruction* PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
AtomicOrdering Ord, bool IsStore,
bool IsLoad) const {
if (IsLoad && isAcquireOrStronger(Ord))
Instruction *PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
Instruction *Inst,
AtomicOrdering Ord) const {
if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord))
return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
// FIXME: this is too conservative, a dependent branch + isync is enough.
// See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and

View File

@ -617,10 +617,10 @@ namespace llvm {
return true;
}
Instruction* emitLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord,
bool IsStore, bool IsLoad) const override;
Instruction* emitTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord,
bool IsStore, bool IsLoad) const override;
Instruction *emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst,
AtomicOrdering Ord) const override;
Instruction *emitTrailingFence(IRBuilder<> &Builder, Instruction *Inst,
AtomicOrdering Ord) const override;
MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr &MI,