mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-01-31 12:41:49 +01:00
NFC: make AtomicOrdering an enum class
Summary: In the context of http://wg21.link/lwg2445 C++ uses the concept of 'stronger' ordering but doesn't define it properly. This should be fixed in C++17 barring a small question that's still open. The code currently plays fast and loose with the AtomicOrdering enum. Using an enum class is one step towards tightening things. I later also want to tighten related enums, such as clang's AtomicOrderingKind (which should be shared with LLVM as a 'C++ ABI' enum). This change touches a few lines of code which can be improved later, I'd like to keep it as NFC for now as it's already quite complex. I have related changes for clang. As a follow-up I'll add: bool operator<(AtomicOrdering, AtomicOrdering) = delete; bool operator>(AtomicOrdering, AtomicOrdering) = delete; bool operator<=(AtomicOrdering, AtomicOrdering) = delete; bool operator>=(AtomicOrdering, AtomicOrdering) = delete; This is separate so that clang and LLVM changes don't need to be in sync. Reviewers: jyknight, reames Subscribers: jyknight, llvm-commits Differential Revision: http://reviews.llvm.org/D18775 llvm-svn: 265602
This commit is contained in:
parent
9f05a7ca34
commit
f4f5b32f44
@ -367,7 +367,7 @@ Predicates for optimizer writers to query:
|
||||
that they return true for any operation which is volatile or at least
|
||||
Monotonic.
|
||||
|
||||
* ``isAtLeastAcquire()``/``isAtLeastRelease()``: These are predicates on
|
||||
* ``isStrongerThan`` / ``isAtLeastOrStrongerThan``: These are predicates on
|
||||
orderings. They can be useful for passes that are aware of atomics, for
|
||||
example to do DSE across a single atomic access, but not across a
|
||||
release-acquire pair (see MemoryDependencyAnalysis for an example of this)
|
||||
|
@ -1127,13 +1127,15 @@ class AtomicSDNode : public MemSDNode {
|
||||
AtomicOrdering FailureOrdering,
|
||||
SynchronizationScope SynchScope) {
|
||||
// This must match encodeMemSDNodeFlags() in SelectionDAG.cpp.
|
||||
assert((SuccessOrdering & 15) == SuccessOrdering &&
|
||||
assert((AtomicOrdering)((unsigned)SuccessOrdering & 15) ==
|
||||
SuccessOrdering &&
|
||||
"Ordering may not require more than 4 bits!");
|
||||
assert((FailureOrdering & 15) == FailureOrdering &&
|
||||
assert((AtomicOrdering)((unsigned)FailureOrdering & 15) ==
|
||||
FailureOrdering &&
|
||||
"Ordering may not require more than 4 bits!");
|
||||
assert((SynchScope & 1) == SynchScope &&
|
||||
"SynchScope may not require more than 1 bit!");
|
||||
SubclassData |= SuccessOrdering << 8;
|
||||
SubclassData |= (unsigned)SuccessOrdering << 8;
|
||||
SubclassData |= SynchScope << 12;
|
||||
this->FailureOrdering = FailureOrdering;
|
||||
assert(getSuccessOrdering() == SuccessOrdering &&
|
||||
|
@ -36,10 +36,16 @@ class ConstantRange;
|
||||
class DataLayout;
|
||||
class LLVMContext;
|
||||
|
||||
enum AtomicOrdering {
|
||||
/// C++ defines ordering as a lattice. LLVM supplements this with NotAtomic and
|
||||
/// Unordered, which are both below the C++ orders. See docs/Atomics.rst for
|
||||
/// details.
|
||||
///
|
||||
/// not_atomic-->unordered-->relaxed-->release--------------->acq_rel-->seq_cst
|
||||
/// \-->consume-->acquire--/
|
||||
enum class AtomicOrdering {
|
||||
NotAtomic = 0,
|
||||
Unordered = 1,
|
||||
Monotonic = 2,
|
||||
Monotonic = 2, // Equivalent to C++'s relaxed.
|
||||
// Consume = 3, // Not specified yet.
|
||||
Acquire = 4,
|
||||
Release = 5,
|
||||
@ -47,26 +53,68 @@ enum AtomicOrdering {
|
||||
SequentiallyConsistent = 7
|
||||
};
|
||||
|
||||
/// String used by LLVM IR to represent atomic ordering.
|
||||
static inline const char *toIRString(AtomicOrdering ao) {
|
||||
static const char *names[8] = {"not_atomic", "unordered", "monotonic",
|
||||
"consume", "acquire", "release",
|
||||
"acq_rel", "seq_cst"};
|
||||
return names[(size_t)ao];
|
||||
}
|
||||
|
||||
/// Returns true if ao is stronger than other as defined by the AtomicOrdering
|
||||
/// lattice, which is based on C++'s definition.
|
||||
static inline bool isStrongerThan(AtomicOrdering ao, AtomicOrdering other) {
|
||||
static const bool lookup[8][8] = {
|
||||
// NA UN RX CO AC RE AR SC
|
||||
/* NotAtomic */ {0, 0, 0, 0, 0, 0, 0, 0},
|
||||
/* Unordered */ {1, 0, 0, 0, 0, 0, 0, 0},
|
||||
/* relaxed */ {1, 1, 0, 0, 0, 0, 0, 0},
|
||||
/* consume */ {1, 1, 1, 0, 0, 0, 0, 0},
|
||||
/* acquire */ {1, 1, 1, 1, 0, 0, 0, 0},
|
||||
/* release */ {1, 1, 1, 0, 0, 0, 0, 0},
|
||||
/* acq_rel */ {1, 1, 1, 1, 1, 1, 0, 0},
|
||||
/* seq_cst */ {1, 1, 1, 1, 1, 1, 1, 0},
|
||||
};
|
||||
return lookup[(size_t)ao][(size_t)other];
|
||||
}
|
||||
|
||||
static inline bool isAtLeastOrStrongerThan(AtomicOrdering ao,
|
||||
AtomicOrdering other) {
|
||||
static const bool lookup[8][8] = {
|
||||
// NA UN RX CO AC RE AR SC
|
||||
/* NotAtomic */ {1, 0, 0, 0, 0, 0, 0, 0},
|
||||
/* Unordered */ {1, 1, 0, 0, 0, 0, 0, 0},
|
||||
/* relaxed */ {1, 1, 1, 0, 0, 0, 0, 0},
|
||||
/* consume */ {1, 1, 1, 1, 0, 0, 0, 0},
|
||||
/* acquire */ {1, 1, 1, 1, 1, 0, 0, 0},
|
||||
/* release */ {1, 1, 1, 0, 0, 1, 0, 0},
|
||||
/* acq_rel */ {1, 1, 1, 1, 1, 1, 1, 0},
|
||||
/* seq_cst */ {1, 1, 1, 1, 1, 1, 1, 1},
|
||||
};
|
||||
return lookup[(size_t)ao][(size_t)other];
|
||||
}
|
||||
|
||||
static inline bool isStrongerThanUnordered(AtomicOrdering Ord) {
|
||||
return isStrongerThan(Ord, AtomicOrdering::Unordered);
|
||||
}
|
||||
|
||||
static inline bool isStrongerThanMonotonic(AtomicOrdering Ord) {
|
||||
return isStrongerThan(Ord, AtomicOrdering::Monotonic);
|
||||
}
|
||||
|
||||
static inline bool isAcquireOrStronger(AtomicOrdering Ord) {
|
||||
return isAtLeastOrStrongerThan(Ord, AtomicOrdering::Acquire);
|
||||
}
|
||||
|
||||
static inline bool isReleaseOrStronger(AtomicOrdering Ord) {
|
||||
return isAtLeastOrStrongerThan(Ord, AtomicOrdering::Release);
|
||||
}
|
||||
|
||||
enum SynchronizationScope {
|
||||
SingleThread = 0,
|
||||
CrossThread = 1
|
||||
};
|
||||
|
||||
/// Returns true if the ordering is at least as strong as acquire
|
||||
/// (i.e. acquire, acq_rel or seq_cst)
|
||||
inline bool isAtLeastAcquire(AtomicOrdering Ord) {
|
||||
return (Ord == Acquire ||
|
||||
Ord == AcquireRelease ||
|
||||
Ord == SequentiallyConsistent);
|
||||
}
|
||||
|
||||
/// Returns true if the ordering is at least as strong as release
|
||||
/// (i.e. release, acq_rel or seq_cst)
|
||||
inline bool isAtLeastRelease(AtomicOrdering Ord) {
|
||||
return (Ord == Release ||
|
||||
Ord == AcquireRelease ||
|
||||
Ord == SequentiallyConsistent);
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// AllocaInst Class
|
||||
@ -269,7 +317,7 @@ public:
|
||||
/// AcquireRelease.
|
||||
void setOrdering(AtomicOrdering Ordering) {
|
||||
setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
|
||||
(Ordering << 7));
|
||||
((unsigned)Ordering << 7));
|
||||
}
|
||||
|
||||
SynchronizationScope getSynchScope() const {
|
||||
@ -292,7 +340,9 @@ public:
|
||||
|
||||
bool isSimple() const { return !isAtomic() && !isVolatile(); }
|
||||
bool isUnordered() const {
|
||||
return getOrdering() <= Unordered && !isVolatile();
|
||||
return (getOrdering() == AtomicOrdering::NotAtomic ||
|
||||
getOrdering() == AtomicOrdering::Unordered) &&
|
||||
!isVolatile();
|
||||
}
|
||||
|
||||
Value *getPointerOperand() { return getOperand(0); }
|
||||
@ -390,7 +440,7 @@ public:
|
||||
/// AcquireRelease.
|
||||
void setOrdering(AtomicOrdering Ordering) {
|
||||
setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
|
||||
(Ordering << 7));
|
||||
((unsigned)Ordering << 7));
|
||||
}
|
||||
|
||||
SynchronizationScope getSynchScope() const {
|
||||
@ -413,7 +463,9 @@ public:
|
||||
|
||||
bool isSimple() const { return !isAtomic() && !isVolatile(); }
|
||||
bool isUnordered() const {
|
||||
return getOrdering() <= Unordered && !isVolatile();
|
||||
return (getOrdering() == AtomicOrdering::NotAtomic ||
|
||||
getOrdering() == AtomicOrdering::Unordered) &&
|
||||
!isVolatile();
|
||||
}
|
||||
|
||||
Value *getValueOperand() { return getOperand(0); }
|
||||
@ -489,7 +541,7 @@ public:
|
||||
/// AcquireRelease, or SequentiallyConsistent.
|
||||
void setOrdering(AtomicOrdering Ordering) {
|
||||
setInstructionSubclassData((getSubclassDataFromInstruction() & 1) |
|
||||
(Ordering << 1));
|
||||
((unsigned)Ordering << 1));
|
||||
}
|
||||
|
||||
SynchronizationScope getSynchScope() const {
|
||||
@ -584,17 +636,17 @@ public:
|
||||
|
||||
/// Set the ordering constraint on this cmpxchg.
|
||||
void setSuccessOrdering(AtomicOrdering Ordering) {
|
||||
assert(Ordering != NotAtomic &&
|
||||
assert(Ordering != AtomicOrdering::NotAtomic &&
|
||||
"CmpXchg instructions can only be atomic.");
|
||||
setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x1c) |
|
||||
(Ordering << 2));
|
||||
((unsigned)Ordering << 2));
|
||||
}
|
||||
|
||||
void setFailureOrdering(AtomicOrdering Ordering) {
|
||||
assert(Ordering != NotAtomic &&
|
||||
assert(Ordering != AtomicOrdering::NotAtomic &&
|
||||
"CmpXchg instructions can only be atomic.");
|
||||
setInstructionSubclassData((getSubclassDataFromInstruction() & ~0xe0) |
|
||||
(Ordering << 5));
|
||||
((unsigned)Ordering << 5));
|
||||
}
|
||||
|
||||
/// Specify whether this cmpxchg is atomic and orders other operations with
|
||||
@ -646,15 +698,16 @@ public:
|
||||
static AtomicOrdering
|
||||
getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
|
||||
switch (SuccessOrdering) {
|
||||
default: llvm_unreachable("invalid cmpxchg success ordering");
|
||||
case Release:
|
||||
case Monotonic:
|
||||
return Monotonic;
|
||||
case AcquireRelease:
|
||||
case Acquire:
|
||||
return Acquire;
|
||||
case SequentiallyConsistent:
|
||||
return SequentiallyConsistent;
|
||||
default:
|
||||
llvm_unreachable("invalid cmpxchg success ordering");
|
||||
case AtomicOrdering::Release:
|
||||
case AtomicOrdering::Monotonic:
|
||||
return AtomicOrdering::Monotonic;
|
||||
case AtomicOrdering::AcquireRelease:
|
||||
case AtomicOrdering::Acquire:
|
||||
return AtomicOrdering::Acquire;
|
||||
case AtomicOrdering::SequentiallyConsistent:
|
||||
return AtomicOrdering::SequentiallyConsistent;
|
||||
}
|
||||
}
|
||||
|
||||
@ -770,10 +823,10 @@ public:
|
||||
|
||||
/// Set the ordering constraint on this RMW.
|
||||
void setOrdering(AtomicOrdering Ordering) {
|
||||
assert(Ordering != NotAtomic &&
|
||||
assert(Ordering != AtomicOrdering::NotAtomic &&
|
||||
"atomicrmw instructions can only be atomic.");
|
||||
setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 2)) |
|
||||
(Ordering << 2));
|
||||
((unsigned)Ordering << 2));
|
||||
}
|
||||
|
||||
/// Specify whether this RMW orders other operations with respect to all
|
||||
|
@ -1108,7 +1108,7 @@ public:
|
||||
virtual Instruction *emitLeadingFence(IRBuilder<> &Builder,
|
||||
AtomicOrdering Ord, bool IsStore,
|
||||
bool IsLoad) const {
|
||||
if (isAtLeastRelease(Ord) && IsStore)
|
||||
if (isReleaseOrStronger(Ord) && IsStore)
|
||||
return Builder.CreateFence(Ord);
|
||||
else
|
||||
return nullptr;
|
||||
@ -1117,7 +1117,7 @@ public:
|
||||
virtual Instruction *emitTrailingFence(IRBuilder<> &Builder,
|
||||
AtomicOrdering Ord, bool IsStore,
|
||||
bool IsLoad) const {
|
||||
if (isAtLeastAcquire(Ord))
|
||||
if (isAcquireOrStronger(Ord))
|
||||
return Builder.CreateFence(Ord);
|
||||
else
|
||||
return nullptr;
|
||||
|
@ -389,7 +389,7 @@ ModRefInfo AAResults::getModRefInfo(const CatchReturnInst *CatchRet,
|
||||
ModRefInfo AAResults::getModRefInfo(const AtomicCmpXchgInst *CX,
|
||||
const MemoryLocation &Loc) {
|
||||
// Acquire/Release cmpxchg has properties that matter for arbitrary addresses.
|
||||
if (CX->getSuccessOrdering() > Monotonic)
|
||||
if (isStrongerThanMonotonic(CX->getSuccessOrdering()))
|
||||
return MRI_ModRef;
|
||||
|
||||
// If the cmpxchg address does not alias the location, it does not access it.
|
||||
@ -402,7 +402,7 @@ ModRefInfo AAResults::getModRefInfo(const AtomicCmpXchgInst *CX,
|
||||
ModRefInfo AAResults::getModRefInfo(const AtomicRMWInst *RMW,
|
||||
const MemoryLocation &Loc) {
|
||||
// Acquire/Release atomicrmw has properties that matter for arbitrary addresses.
|
||||
if (RMW->getOrdering() > Monotonic)
|
||||
if (isStrongerThanMonotonic(RMW->getOrdering()))
|
||||
return MRI_ModRef;
|
||||
|
||||
// If the atomicrmw address does not alias the location, it does not access it.
|
||||
|
@ -300,7 +300,7 @@ bool AliasSetTracker::add(Value *Ptr, uint64_t Size, const AAMDNodes &AAInfo) {
|
||||
|
||||
|
||||
bool AliasSetTracker::add(LoadInst *LI) {
|
||||
if (LI->getOrdering() > Monotonic) return addUnknown(LI);
|
||||
if (isStrongerThanMonotonic(LI->getOrdering())) return addUnknown(LI);
|
||||
|
||||
AAMDNodes AAInfo;
|
||||
LI->getAAMetadata(AAInfo);
|
||||
@ -316,7 +316,7 @@ bool AliasSetTracker::add(LoadInst *LI) {
|
||||
}
|
||||
|
||||
bool AliasSetTracker::add(StoreInst *SI) {
|
||||
if (SI->getOrdering() > Monotonic) return addUnknown(SI);
|
||||
if (isStrongerThanMonotonic(SI->getOrdering())) return addUnknown(SI);
|
||||
|
||||
AAMDNodes AAInfo;
|
||||
SI->getAAMetadata(AAInfo);
|
||||
|
@ -93,7 +93,7 @@ static ModRefInfo GetLocation(const Instruction *Inst, MemoryLocation &Loc,
|
||||
Loc = MemoryLocation::get(LI);
|
||||
return MRI_Ref;
|
||||
}
|
||||
if (LI->getOrdering() == Monotonic) {
|
||||
if (LI->getOrdering() == AtomicOrdering::Monotonic) {
|
||||
Loc = MemoryLocation::get(LI);
|
||||
return MRI_ModRef;
|
||||
}
|
||||
@ -106,7 +106,7 @@ static ModRefInfo GetLocation(const Instruction *Inst, MemoryLocation &Loc,
|
||||
Loc = MemoryLocation::get(SI);
|
||||
return MRI_Mod;
|
||||
}
|
||||
if (SI->getOrdering() == Monotonic) {
|
||||
if (SI->getOrdering() == AtomicOrdering::Monotonic) {
|
||||
Loc = MemoryLocation::get(SI);
|
||||
return MRI_ModRef;
|
||||
}
|
||||
@ -518,11 +518,11 @@ MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom(
|
||||
// A Monotonic (or higher) load is OK if the query inst is itself not
|
||||
// atomic.
|
||||
// FIXME: This is overly conservative.
|
||||
if (LI->isAtomic() && LI->getOrdering() > Unordered) {
|
||||
if (LI->isAtomic() && isStrongerThanUnordered(LI->getOrdering())) {
|
||||
if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) ||
|
||||
isOtherMemAccess(QueryInst))
|
||||
return MemDepResult::getClobber(LI);
|
||||
if (LI->getOrdering() != Monotonic)
|
||||
if (LI->getOrdering() != AtomicOrdering::Monotonic)
|
||||
return MemDepResult::getClobber(LI);
|
||||
}
|
||||
|
||||
@ -588,7 +588,7 @@ MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom(
|
||||
if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) ||
|
||||
isOtherMemAccess(QueryInst))
|
||||
return MemDepResult::getClobber(SI);
|
||||
if (SI->getOrdering() != Monotonic)
|
||||
if (SI->getOrdering() != AtomicOrdering::Monotonic)
|
||||
return MemDepResult::getClobber(SI);
|
||||
}
|
||||
|
||||
@ -644,9 +644,9 @@ MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom(
|
||||
// loads. DSE uses this to find preceeding stores to delete and thus we
|
||||
// can't bypass the fence if the query instruction is a store.
|
||||
if (FenceInst *FI = dyn_cast<FenceInst>(Inst))
|
||||
if (isLoad && FI->getOrdering() == Release)
|
||||
if (isLoad && FI->getOrdering() == AtomicOrdering::Release)
|
||||
continue;
|
||||
|
||||
|
||||
// See if this instruction (e.g. a call or vaarg) mod/ref's the pointer.
|
||||
ModRefInfo MR = AA.getModRefInfo(Inst, MemLoc);
|
||||
// If necessary, perform additional analysis.
|
||||
@ -1708,4 +1708,3 @@ bool MemoryDependenceWrapperPass::runOnFunction(Function &F) {
|
||||
MemDep.emplace(AA, AC, TLI, DT);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1810,12 +1810,16 @@ bool LLParser::ParseScopeAndOrdering(bool isAtomic, SynchronizationScope &Scope,
|
||||
bool LLParser::ParseOrdering(AtomicOrdering &Ordering) {
|
||||
switch (Lex.getKind()) {
|
||||
default: return TokError("Expected ordering on atomic instruction");
|
||||
case lltok::kw_unordered: Ordering = Unordered; break;
|
||||
case lltok::kw_monotonic: Ordering = Monotonic; break;
|
||||
case lltok::kw_acquire: Ordering = Acquire; break;
|
||||
case lltok::kw_release: Ordering = Release; break;
|
||||
case lltok::kw_acq_rel: Ordering = AcquireRelease; break;
|
||||
case lltok::kw_seq_cst: Ordering = SequentiallyConsistent; break;
|
||||
case lltok::kw_unordered: Ordering = AtomicOrdering::Unordered; break;
|
||||
case lltok::kw_monotonic: Ordering = AtomicOrdering::Monotonic; break;
|
||||
// Not specified yet:
|
||||
// case lltok::kw_consume: Ordering = AtomicOrdering::Consume; break;
|
||||
case lltok::kw_acquire: Ordering = AtomicOrdering::Acquire; break;
|
||||
case lltok::kw_release: Ordering = AtomicOrdering::Release; break;
|
||||
case lltok::kw_acq_rel: Ordering = AtomicOrdering::AcquireRelease; break;
|
||||
case lltok::kw_seq_cst:
|
||||
Ordering = AtomicOrdering::SequentiallyConsistent;
|
||||
break;
|
||||
}
|
||||
Lex.Lex();
|
||||
return false;
|
||||
@ -5884,7 +5888,7 @@ int LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS) {
|
||||
unsigned Alignment = 0;
|
||||
bool AteExtraComma = false;
|
||||
bool isAtomic = false;
|
||||
AtomicOrdering Ordering = NotAtomic;
|
||||
AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
|
||||
SynchronizationScope Scope = CrossThread;
|
||||
|
||||
if (Lex.getKind() == lltok::kw_atomic) {
|
||||
@ -5911,7 +5915,8 @@ int LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS) {
|
||||
return Error(Loc, "load operand must be a pointer to a first class type");
|
||||
if (isAtomic && !Alignment)
|
||||
return Error(Loc, "atomic load must have explicit non-zero alignment");
|
||||
if (Ordering == Release || Ordering == AcquireRelease)
|
||||
if (Ordering == AtomicOrdering::Release ||
|
||||
Ordering == AtomicOrdering::AcquireRelease)
|
||||
return Error(Loc, "atomic load cannot use Release ordering");
|
||||
|
||||
if (Ty != cast<PointerType>(Val->getType())->getElementType())
|
||||
@ -5932,7 +5937,7 @@ int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS) {
|
||||
unsigned Alignment = 0;
|
||||
bool AteExtraComma = false;
|
||||
bool isAtomic = false;
|
||||
AtomicOrdering Ordering = NotAtomic;
|
||||
AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
|
||||
SynchronizationScope Scope = CrossThread;
|
||||
|
||||
if (Lex.getKind() == lltok::kw_atomic) {
|
||||
@ -5961,7 +5966,8 @@ int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS) {
|
||||
return Error(Loc, "stored value and pointer type do not match");
|
||||
if (isAtomic && !Alignment)
|
||||
return Error(Loc, "atomic store must have explicit non-zero alignment");
|
||||
if (Ordering == Acquire || Ordering == AcquireRelease)
|
||||
if (Ordering == AtomicOrdering::Acquire ||
|
||||
Ordering == AtomicOrdering::AcquireRelease)
|
||||
return Error(Loc, "atomic store cannot use Acquire ordering");
|
||||
|
||||
Inst = new StoreInst(Val, Ptr, isVolatile, Alignment, Ordering, Scope);
|
||||
@ -5974,8 +5980,8 @@ int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS) {
|
||||
int LLParser::ParseCmpXchg(Instruction *&Inst, PerFunctionState &PFS) {
|
||||
Value *Ptr, *Cmp, *New; LocTy PtrLoc, CmpLoc, NewLoc;
|
||||
bool AteExtraComma = false;
|
||||
AtomicOrdering SuccessOrdering = NotAtomic;
|
||||
AtomicOrdering FailureOrdering = NotAtomic;
|
||||
AtomicOrdering SuccessOrdering = AtomicOrdering::NotAtomic;
|
||||
AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic;
|
||||
SynchronizationScope Scope = CrossThread;
|
||||
bool isVolatile = false;
|
||||
bool isWeak = false;
|
||||
@ -5995,12 +6001,16 @@ int LLParser::ParseCmpXchg(Instruction *&Inst, PerFunctionState &PFS) {
|
||||
ParseOrdering(FailureOrdering))
|
||||
return true;
|
||||
|
||||
if (SuccessOrdering == Unordered || FailureOrdering == Unordered)
|
||||
if (SuccessOrdering == AtomicOrdering::Unordered ||
|
||||
FailureOrdering == AtomicOrdering::Unordered)
|
||||
return TokError("cmpxchg cannot be unordered");
|
||||
if (SuccessOrdering < FailureOrdering)
|
||||
return TokError("cmpxchg must be at least as ordered on success as failure");
|
||||
if (FailureOrdering == Release || FailureOrdering == AcquireRelease)
|
||||
return TokError("cmpxchg failure ordering cannot include release semantics");
|
||||
if (isStrongerThan(FailureOrdering, SuccessOrdering))
|
||||
return TokError("cmpxchg failure argument shall be no stronger than the "
|
||||
"success argument");
|
||||
if (FailureOrdering == AtomicOrdering::Release ||
|
||||
FailureOrdering == AtomicOrdering::AcquireRelease)
|
||||
return TokError(
|
||||
"cmpxchg failure ordering cannot include release semantics");
|
||||
if (!Ptr->getType()->isPointerTy())
|
||||
return Error(PtrLoc, "cmpxchg operand must be a pointer");
|
||||
if (cast<PointerType>(Ptr->getType())->getElementType() != Cmp->getType())
|
||||
@ -6023,7 +6033,7 @@ int LLParser::ParseCmpXchg(Instruction *&Inst, PerFunctionState &PFS) {
|
||||
int LLParser::ParseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS) {
|
||||
Value *Ptr, *Val; LocTy PtrLoc, ValLoc;
|
||||
bool AteExtraComma = false;
|
||||
AtomicOrdering Ordering = NotAtomic;
|
||||
AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
|
||||
SynchronizationScope Scope = CrossThread;
|
||||
bool isVolatile = false;
|
||||
AtomicRMWInst::BinOp Operation;
|
||||
@ -6053,7 +6063,7 @@ int LLParser::ParseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS) {
|
||||
ParseScopeAndOrdering(true /*Always atomic*/, Scope, Ordering))
|
||||
return true;
|
||||
|
||||
if (Ordering == Unordered)
|
||||
if (Ordering == AtomicOrdering::Unordered)
|
||||
return TokError("atomicrmw cannot be unordered");
|
||||
if (!Ptr->getType()->isPointerTy())
|
||||
return Error(PtrLoc, "atomicrmw operand must be a pointer");
|
||||
@ -6076,14 +6086,14 @@ int LLParser::ParseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS) {
|
||||
/// ParseFence
|
||||
/// ::= 'fence' 'singlethread'? AtomicOrdering
|
||||
int LLParser::ParseFence(Instruction *&Inst, PerFunctionState &PFS) {
|
||||
AtomicOrdering Ordering = NotAtomic;
|
||||
AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
|
||||
SynchronizationScope Scope = CrossThread;
|
||||
if (ParseScopeAndOrdering(true /*Always atomic*/, Scope, Ordering))
|
||||
return true;
|
||||
|
||||
if (Ordering == Unordered)
|
||||
if (Ordering == AtomicOrdering::Unordered)
|
||||
return TokError("fence cannot be unordered");
|
||||
if (Ordering == Monotonic)
|
||||
if (Ordering == AtomicOrdering::Monotonic)
|
||||
return TokError("fence cannot be monotonic");
|
||||
|
||||
Inst = new FenceInst(Context, Ordering, Scope);
|
||||
|
@ -808,14 +808,14 @@ static AtomicRMWInst::BinOp getDecodedRMWOperation(unsigned Val) {
|
||||
|
||||
static AtomicOrdering getDecodedOrdering(unsigned Val) {
|
||||
switch (Val) {
|
||||
case bitc::ORDERING_NOTATOMIC: return NotAtomic;
|
||||
case bitc::ORDERING_UNORDERED: return Unordered;
|
||||
case bitc::ORDERING_MONOTONIC: return Monotonic;
|
||||
case bitc::ORDERING_ACQUIRE: return Acquire;
|
||||
case bitc::ORDERING_RELEASE: return Release;
|
||||
case bitc::ORDERING_ACQREL: return AcquireRelease;
|
||||
case bitc::ORDERING_NOTATOMIC: return AtomicOrdering::NotAtomic;
|
||||
case bitc::ORDERING_UNORDERED: return AtomicOrdering::Unordered;
|
||||
case bitc::ORDERING_MONOTONIC: return AtomicOrdering::Monotonic;
|
||||
case bitc::ORDERING_ACQUIRE: return AtomicOrdering::Acquire;
|
||||
case bitc::ORDERING_RELEASE: return AtomicOrdering::Release;
|
||||
case bitc::ORDERING_ACQREL: return AtomicOrdering::AcquireRelease;
|
||||
default: // Map unknown orderings to sequentially-consistent.
|
||||
case bitc::ORDERING_SEQCST: return SequentiallyConsistent;
|
||||
case bitc::ORDERING_SEQCST: return AtomicOrdering::SequentiallyConsistent;
|
||||
}
|
||||
}
|
||||
|
||||
@ -4936,10 +4936,11 @@ std::error_code BitcodeReader::parseFunctionBody(Function *F) {
|
||||
Ty = cast<PointerType>(Op->getType())->getElementType();
|
||||
|
||||
AtomicOrdering Ordering = getDecodedOrdering(Record[OpNum + 2]);
|
||||
if (Ordering == NotAtomic || Ordering == Release ||
|
||||
Ordering == AcquireRelease)
|
||||
if (Ordering == AtomicOrdering::NotAtomic ||
|
||||
Ordering == AtomicOrdering::Release ||
|
||||
Ordering == AtomicOrdering::AcquireRelease)
|
||||
return error("Invalid record");
|
||||
if (Ordering != NotAtomic && Record[OpNum] == 0)
|
||||
if (Ordering != AtomicOrdering::NotAtomic && Record[OpNum] == 0)
|
||||
return error("Invalid record");
|
||||
SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 3]);
|
||||
|
||||
@ -4992,11 +4993,12 @@ std::error_code BitcodeReader::parseFunctionBody(Function *F) {
|
||||
typeCheckLoadStoreInst(Val->getType(), Ptr->getType()))
|
||||
return EC;
|
||||
AtomicOrdering Ordering = getDecodedOrdering(Record[OpNum + 2]);
|
||||
if (Ordering == NotAtomic || Ordering == Acquire ||
|
||||
Ordering == AcquireRelease)
|
||||
if (Ordering == AtomicOrdering::NotAtomic ||
|
||||
Ordering == AtomicOrdering::Acquire ||
|
||||
Ordering == AtomicOrdering::AcquireRelease)
|
||||
return error("Invalid record");
|
||||
SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 3]);
|
||||
if (Ordering != NotAtomic && Record[OpNum] == 0)
|
||||
if (Ordering != AtomicOrdering::NotAtomic && Record[OpNum] == 0)
|
||||
return error("Invalid record");
|
||||
|
||||
unsigned Align;
|
||||
@ -5022,7 +5024,8 @@ std::error_code BitcodeReader::parseFunctionBody(Function *F) {
|
||||
Record.size() < OpNum + 3 || Record.size() > OpNum + 5)
|
||||
return error("Invalid record");
|
||||
AtomicOrdering SuccessOrdering = getDecodedOrdering(Record[OpNum + 1]);
|
||||
if (SuccessOrdering == NotAtomic || SuccessOrdering == Unordered)
|
||||
if (SuccessOrdering == AtomicOrdering::NotAtomic ||
|
||||
SuccessOrdering == AtomicOrdering::Unordered)
|
||||
return error("Invalid record");
|
||||
SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 2]);
|
||||
|
||||
@ -5067,7 +5070,8 @@ std::error_code BitcodeReader::parseFunctionBody(Function *F) {
|
||||
Operation > AtomicRMWInst::LAST_BINOP)
|
||||
return error("Invalid record");
|
||||
AtomicOrdering Ordering = getDecodedOrdering(Record[OpNum + 2]);
|
||||
if (Ordering == NotAtomic || Ordering == Unordered)
|
||||
if (Ordering == AtomicOrdering::NotAtomic ||
|
||||
Ordering == AtomicOrdering::Unordered)
|
||||
return error("Invalid record");
|
||||
SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 3]);
|
||||
I = new AtomicRMWInst(Operation, Ptr, Val, Ordering, SynchScope);
|
||||
@ -5079,8 +5083,9 @@ std::error_code BitcodeReader::parseFunctionBody(Function *F) {
|
||||
if (2 != Record.size())
|
||||
return error("Invalid record");
|
||||
AtomicOrdering Ordering = getDecodedOrdering(Record[0]);
|
||||
if (Ordering == NotAtomic || Ordering == Unordered ||
|
||||
Ordering == Monotonic)
|
||||
if (Ordering == AtomicOrdering::NotAtomic ||
|
||||
Ordering == AtomicOrdering::Unordered ||
|
||||
Ordering == AtomicOrdering::Monotonic)
|
||||
return error("Invalid record");
|
||||
SynchronizationScope SynchScope = getDecodedSynchScope(Record[1]);
|
||||
I = new FenceInst(Context, Ordering, SynchScope);
|
||||
|
@ -133,13 +133,13 @@ static unsigned GetEncodedRMWOperation(AtomicRMWInst::BinOp Op) {
|
||||
|
||||
static unsigned GetEncodedOrdering(AtomicOrdering Ordering) {
|
||||
switch (Ordering) {
|
||||
case NotAtomic: return bitc::ORDERING_NOTATOMIC;
|
||||
case Unordered: return bitc::ORDERING_UNORDERED;
|
||||
case Monotonic: return bitc::ORDERING_MONOTONIC;
|
||||
case Acquire: return bitc::ORDERING_ACQUIRE;
|
||||
case Release: return bitc::ORDERING_RELEASE;
|
||||
case AcquireRelease: return bitc::ORDERING_ACQREL;
|
||||
case SequentiallyConsistent: return bitc::ORDERING_SEQCST;
|
||||
case AtomicOrdering::NotAtomic: return bitc::ORDERING_NOTATOMIC;
|
||||
case AtomicOrdering::Unordered: return bitc::ORDERING_UNORDERED;
|
||||
case AtomicOrdering::Monotonic: return bitc::ORDERING_MONOTONIC;
|
||||
case AtomicOrdering::Acquire: return bitc::ORDERING_ACQUIRE;
|
||||
case AtomicOrdering::Release: return bitc::ORDERING_RELEASE;
|
||||
case AtomicOrdering::AcquireRelease: return bitc::ORDERING_ACQREL;
|
||||
case AtomicOrdering::SequentiallyConsistent: return bitc::ORDERING_SEQCST;
|
||||
}
|
||||
llvm_unreachable("Invalid ordering");
|
||||
}
|
||||
|
@ -101,37 +101,37 @@ bool AtomicExpand::runOnFunction(Function &F) {
|
||||
assert((LI || SI || RMWI || CASI) && "Unknown atomic instruction");
|
||||
|
||||
if (TLI->shouldInsertFencesForAtomic(I)) {
|
||||
auto FenceOrdering = Monotonic;
|
||||
auto FenceOrdering = AtomicOrdering::Monotonic;
|
||||
bool IsStore, IsLoad;
|
||||
if (LI && isAtLeastAcquire(LI->getOrdering())) {
|
||||
if (LI && isAcquireOrStronger(LI->getOrdering())) {
|
||||
FenceOrdering = LI->getOrdering();
|
||||
LI->setOrdering(Monotonic);
|
||||
LI->setOrdering(AtomicOrdering::Monotonic);
|
||||
IsStore = false;
|
||||
IsLoad = true;
|
||||
} else if (SI && isAtLeastRelease(SI->getOrdering())) {
|
||||
} else if (SI && isReleaseOrStronger(SI->getOrdering())) {
|
||||
FenceOrdering = SI->getOrdering();
|
||||
SI->setOrdering(Monotonic);
|
||||
SI->setOrdering(AtomicOrdering::Monotonic);
|
||||
IsStore = true;
|
||||
IsLoad = false;
|
||||
} else if (RMWI && (isAtLeastRelease(RMWI->getOrdering()) ||
|
||||
isAtLeastAcquire(RMWI->getOrdering()))) {
|
||||
} else if (RMWI && (isReleaseOrStronger(RMWI->getOrdering()) ||
|
||||
isAcquireOrStronger(RMWI->getOrdering()))) {
|
||||
FenceOrdering = RMWI->getOrdering();
|
||||
RMWI->setOrdering(Monotonic);
|
||||
RMWI->setOrdering(AtomicOrdering::Monotonic);
|
||||
IsStore = IsLoad = true;
|
||||
} else if (CASI && !TLI->shouldExpandAtomicCmpXchgInIR(CASI) &&
|
||||
(isAtLeastRelease(CASI->getSuccessOrdering()) ||
|
||||
isAtLeastAcquire(CASI->getSuccessOrdering()))) {
|
||||
(isReleaseOrStronger(CASI->getSuccessOrdering()) ||
|
||||
isAcquireOrStronger(CASI->getSuccessOrdering()))) {
|
||||
// If a compare and swap is lowered to LL/SC, we can do smarter fence
|
||||
// insertion, with a stronger one on the success path than on the
|
||||
// failure path. As a result, fence insertion is directly done by
|
||||
// expandAtomicCmpXchg in that case.
|
||||
FenceOrdering = CASI->getSuccessOrdering();
|
||||
CASI->setSuccessOrdering(Monotonic);
|
||||
CASI->setFailureOrdering(Monotonic);
|
||||
CASI->setSuccessOrdering(AtomicOrdering::Monotonic);
|
||||
CASI->setFailureOrdering(AtomicOrdering::Monotonic);
|
||||
IsStore = IsLoad = true;
|
||||
}
|
||||
|
||||
if (FenceOrdering != Monotonic) {
|
||||
if (FenceOrdering != AtomicOrdering::Monotonic) {
|
||||
MadeChange |= bracketInstWithFences(I, FenceOrdering, IsStore, IsLoad);
|
||||
}
|
||||
}
|
||||
@ -520,7 +520,7 @@ bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
|
||||
// should preserve the ordering.
|
||||
bool ShouldInsertFencesForAtomic = TLI->shouldInsertFencesForAtomic(CI);
|
||||
AtomicOrdering MemOpOrder =
|
||||
ShouldInsertFencesForAtomic ? Monotonic : SuccessOrder;
|
||||
ShouldInsertFencesForAtomic ? AtomicOrdering::Monotonic : SuccessOrder;
|
||||
|
||||
// In implementations which use a barrier to achieve release semantics, we can
|
||||
// delay emitting this barrier until we know a store is actually going to be
|
||||
@ -532,8 +532,9 @@ bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
|
||||
// minimal loop. Unfortunately, this puts too much stress on later
|
||||
// optimisations so we avoid emitting the extra logic in those cases too.
|
||||
bool HasReleasedLoadBB = !CI->isWeak() && ShouldInsertFencesForAtomic &&
|
||||
SuccessOrder != Monotonic &&
|
||||
SuccessOrder != Acquire && !F->optForMinSize();
|
||||
SuccessOrder != AtomicOrdering::Monotonic &&
|
||||
SuccessOrder != AtomicOrdering::Acquire &&
|
||||
!F->optForMinSize();
|
||||
|
||||
// There's no overhead for sinking the release barrier in a weak cmpxchg, so
|
||||
// do it even on minsize.
|
||||
@ -767,8 +768,9 @@ bool llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI,
|
||||
CreateCmpXchgInstFun CreateCmpXchg) {
|
||||
assert(AI);
|
||||
|
||||
AtomicOrdering MemOpOrder =
|
||||
AI->getOrdering() == Unordered ? Monotonic : AI->getOrdering();
|
||||
AtomicOrdering MemOpOrder = AI->getOrdering() == AtomicOrdering::Unordered
|
||||
? AtomicOrdering::Monotonic
|
||||
: AI->getOrdering();
|
||||
Value *Addr = AI->getPointerOperand();
|
||||
BasicBlock *BB = AI->getParent();
|
||||
Function *F = BB->getParent();
|
||||
|
@ -3903,7 +3903,7 @@ void SelectionDAGBuilder::visitFence(const FenceInst &I) {
|
||||
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
|
||||
SDValue Ops[3];
|
||||
Ops[0] = getRoot();
|
||||
Ops[1] = DAG.getConstant(I.getOrdering(), dl,
|
||||
Ops[1] = DAG.getConstant((unsigned)I.getOrdering(), dl,
|
||||
TLI.getPointerTy(DAG.getDataLayout()));
|
||||
Ops[2] = DAG.getConstant(I.getSynchScope(), dl,
|
||||
TLI.getPointerTy(DAG.getDataLayout()));
|
||||
|
@ -2110,7 +2110,7 @@ void AssemblyWriter::writeOperand(const Value *Operand, bool PrintType) {
|
||||
|
||||
void AssemblyWriter::writeAtomic(AtomicOrdering Ordering,
|
||||
SynchronizationScope SynchScope) {
|
||||
if (Ordering == NotAtomic)
|
||||
if (Ordering == AtomicOrdering::NotAtomic)
|
||||
return;
|
||||
|
||||
switch (SynchScope) {
|
||||
@ -2118,46 +2118,22 @@ void AssemblyWriter::writeAtomic(AtomicOrdering Ordering,
|
||||
case CrossThread: break;
|
||||
}
|
||||
|
||||
switch (Ordering) {
|
||||
default: Out << " <bad ordering " << int(Ordering) << ">"; break;
|
||||
case Unordered: Out << " unordered"; break;
|
||||
case Monotonic: Out << " monotonic"; break;
|
||||
case Acquire: Out << " acquire"; break;
|
||||
case Release: Out << " release"; break;
|
||||
case AcquireRelease: Out << " acq_rel"; break;
|
||||
case SequentiallyConsistent: Out << " seq_cst"; break;
|
||||
}
|
||||
Out << " " << toIRString(Ordering);
|
||||
}
|
||||
|
||||
void AssemblyWriter::writeAtomicCmpXchg(AtomicOrdering SuccessOrdering,
|
||||
AtomicOrdering FailureOrdering,
|
||||
SynchronizationScope SynchScope) {
|
||||
assert(SuccessOrdering != NotAtomic && FailureOrdering != NotAtomic);
|
||||
assert(SuccessOrdering != AtomicOrdering::NotAtomic &&
|
||||
FailureOrdering != AtomicOrdering::NotAtomic);
|
||||
|
||||
switch (SynchScope) {
|
||||
case SingleThread: Out << " singlethread"; break;
|
||||
case CrossThread: break;
|
||||
}
|
||||
|
||||
switch (SuccessOrdering) {
|
||||
default: Out << " <bad ordering " << int(SuccessOrdering) << ">"; break;
|
||||
case Unordered: Out << " unordered"; break;
|
||||
case Monotonic: Out << " monotonic"; break;
|
||||
case Acquire: Out << " acquire"; break;
|
||||
case Release: Out << " release"; break;
|
||||
case AcquireRelease: Out << " acq_rel"; break;
|
||||
case SequentiallyConsistent: Out << " seq_cst"; break;
|
||||
}
|
||||
|
||||
switch (FailureOrdering) {
|
||||
default: Out << " <bad ordering " << int(FailureOrdering) << ">"; break;
|
||||
case Unordered: Out << " unordered"; break;
|
||||
case Monotonic: Out << " monotonic"; break;
|
||||
case Acquire: Out << " acquire"; break;
|
||||
case Release: Out << " release"; break;
|
||||
case AcquireRelease: Out << " acq_rel"; break;
|
||||
case SequentiallyConsistent: Out << " seq_cst"; break;
|
||||
}
|
||||
Out << " " << toIRString(SuccessOrdering);
|
||||
Out << " " << toIRString(FailureOrdering);
|
||||
}
|
||||
|
||||
void AssemblyWriter::writeParamOperand(const Value *Operand,
|
||||
|
@ -2602,14 +2602,15 @@ LLVMValueRef LLVMBuildStore(LLVMBuilderRef B, LLVMValueRef Val,
|
||||
|
||||
static AtomicOrdering mapFromLLVMOrdering(LLVMAtomicOrdering Ordering) {
|
||||
switch (Ordering) {
|
||||
case LLVMAtomicOrderingNotAtomic: return NotAtomic;
|
||||
case LLVMAtomicOrderingUnordered: return Unordered;
|
||||
case LLVMAtomicOrderingMonotonic: return Monotonic;
|
||||
case LLVMAtomicOrderingAcquire: return Acquire;
|
||||
case LLVMAtomicOrderingRelease: return Release;
|
||||
case LLVMAtomicOrderingAcquireRelease: return AcquireRelease;
|
||||
case LLVMAtomicOrderingNotAtomic: return AtomicOrdering::NotAtomic;
|
||||
case LLVMAtomicOrderingUnordered: return AtomicOrdering::Unordered;
|
||||
case LLVMAtomicOrderingMonotonic: return AtomicOrdering::Monotonic;
|
||||
case LLVMAtomicOrderingAcquire: return AtomicOrdering::Acquire;
|
||||
case LLVMAtomicOrderingRelease: return AtomicOrdering::Release;
|
||||
case LLVMAtomicOrderingAcquireRelease:
|
||||
return AtomicOrdering::AcquireRelease;
|
||||
case LLVMAtomicOrderingSequentiallyConsistent:
|
||||
return SequentiallyConsistent;
|
||||
return AtomicOrdering::SequentiallyConsistent;
|
||||
}
|
||||
|
||||
llvm_unreachable("Invalid LLVMAtomicOrdering value!");
|
||||
@ -2617,13 +2618,14 @@ static AtomicOrdering mapFromLLVMOrdering(LLVMAtomicOrdering Ordering) {
|
||||
|
||||
static LLVMAtomicOrdering mapToLLVMOrdering(AtomicOrdering Ordering) {
|
||||
switch (Ordering) {
|
||||
case NotAtomic: return LLVMAtomicOrderingNotAtomic;
|
||||
case Unordered: return LLVMAtomicOrderingUnordered;
|
||||
case Monotonic: return LLVMAtomicOrderingMonotonic;
|
||||
case Acquire: return LLVMAtomicOrderingAcquire;
|
||||
case Release: return LLVMAtomicOrderingRelease;
|
||||
case AcquireRelease: return LLVMAtomicOrderingAcquireRelease;
|
||||
case SequentiallyConsistent:
|
||||
case AtomicOrdering::NotAtomic: return LLVMAtomicOrderingNotAtomic;
|
||||
case AtomicOrdering::Unordered: return LLVMAtomicOrderingUnordered;
|
||||
case AtomicOrdering::Monotonic: return LLVMAtomicOrderingMonotonic;
|
||||
case AtomicOrdering::Acquire: return LLVMAtomicOrderingAcquire;
|
||||
case AtomicOrdering::Release: return LLVMAtomicOrderingRelease;
|
||||
case AtomicOrdering::AcquireRelease:
|
||||
return LLVMAtomicOrderingAcquireRelease;
|
||||
case AtomicOrdering::SequentiallyConsistent:
|
||||
return LLVMAtomicOrderingSequentiallyConsistent;
|
||||
}
|
||||
|
||||
|
@ -461,9 +461,9 @@ bool Instruction::isAtomic() const {
|
||||
case Instruction::Fence:
|
||||
return true;
|
||||
case Instruction::Load:
|
||||
return cast<LoadInst>(this)->getOrdering() != NotAtomic;
|
||||
return cast<LoadInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
|
||||
case Instruction::Store:
|
||||
return cast<StoreInst>(this)->getOrdering() != NotAtomic;
|
||||
return cast<StoreInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1209,13 +1209,13 @@ LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
|
||||
|
||||
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
|
||||
unsigned Align, Instruction *InsertBef)
|
||||
: LoadInst(Ty, Ptr, Name, isVolatile, Align, NotAtomic, CrossThread,
|
||||
InsertBef) {}
|
||||
: LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
|
||||
CrossThread, InsertBef) {}
|
||||
|
||||
LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
|
||||
unsigned Align, BasicBlock *InsertAE)
|
||||
: LoadInst(Ptr, Name, isVolatile, Align, NotAtomic, CrossThread, InsertAE) {
|
||||
}
|
||||
: LoadInst(Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
|
||||
CrossThread, InsertAE) {}
|
||||
|
||||
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
|
||||
unsigned Align, AtomicOrdering Order,
|
||||
@ -1247,7 +1247,7 @@ LoadInst::LoadInst(Value *Ptr, const char *Name, Instruction *InsertBef)
|
||||
Load, Ptr, InsertBef) {
|
||||
setVolatile(false);
|
||||
setAlignment(0);
|
||||
setAtomic(NotAtomic);
|
||||
setAtomic(AtomicOrdering::NotAtomic);
|
||||
AssertOK();
|
||||
if (Name && Name[0]) setName(Name);
|
||||
}
|
||||
@ -1257,7 +1257,7 @@ LoadInst::LoadInst(Value *Ptr, const char *Name, BasicBlock *InsertAE)
|
||||
Load, Ptr, InsertAE) {
|
||||
setVolatile(false);
|
||||
setAlignment(0);
|
||||
setAtomic(NotAtomic);
|
||||
setAtomic(AtomicOrdering::NotAtomic);
|
||||
AssertOK();
|
||||
if (Name && Name[0]) setName(Name);
|
||||
}
|
||||
@ -1268,7 +1268,7 @@ LoadInst::LoadInst(Type *Ty, Value *Ptr, const char *Name, bool isVolatile,
|
||||
assert(Ty == cast<PointerType>(Ptr->getType())->getElementType());
|
||||
setVolatile(isVolatile);
|
||||
setAlignment(0);
|
||||
setAtomic(NotAtomic);
|
||||
setAtomic(AtomicOrdering::NotAtomic);
|
||||
AssertOK();
|
||||
if (Name && Name[0]) setName(Name);
|
||||
}
|
||||
@ -1279,7 +1279,7 @@ LoadInst::LoadInst(Value *Ptr, const char *Name, bool isVolatile,
|
||||
Load, Ptr, InsertAE) {
|
||||
setVolatile(isVolatile);
|
||||
setAlignment(0);
|
||||
setAtomic(NotAtomic);
|
||||
setAtomic(AtomicOrdering::NotAtomic);
|
||||
AssertOK();
|
||||
if (Name && Name[0]) setName(Name);
|
||||
}
|
||||
@ -1324,13 +1324,13 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
|
||||
|
||||
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align,
|
||||
Instruction *InsertBefore)
|
||||
: StoreInst(val, addr, isVolatile, Align, NotAtomic, CrossThread,
|
||||
InsertBefore) {}
|
||||
: StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
|
||||
CrossThread, InsertBefore) {}
|
||||
|
||||
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align,
|
||||
BasicBlock *InsertAtEnd)
|
||||
: StoreInst(val, addr, isVolatile, Align, NotAtomic, CrossThread,
|
||||
InsertAtEnd) {}
|
||||
: StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
|
||||
CrossThread, InsertAtEnd) {}
|
||||
|
||||
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
|
||||
unsigned Align, AtomicOrdering Order,
|
||||
@ -1398,13 +1398,15 @@ void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
|
||||
assert(getOperand(2)->getType() ==
|
||||
cast<PointerType>(getOperand(0)->getType())->getElementType()
|
||||
&& "Ptr must be a pointer to NewVal type!");
|
||||
assert(SuccessOrdering != NotAtomic &&
|
||||
assert(SuccessOrdering != AtomicOrdering::NotAtomic &&
|
||||
"AtomicCmpXchg instructions must be atomic!");
|
||||
assert(FailureOrdering != NotAtomic &&
|
||||
assert(FailureOrdering != AtomicOrdering::NotAtomic &&
|
||||
"AtomicCmpXchg instructions must be atomic!");
|
||||
assert(SuccessOrdering >= FailureOrdering &&
|
||||
"AtomicCmpXchg success ordering must be at least as strong as fail");
|
||||
assert(FailureOrdering != Release && FailureOrdering != AcquireRelease &&
|
||||
assert(!isStrongerThan(FailureOrdering, SuccessOrdering) &&
|
||||
"AtomicCmpXchg failure argument shall be no stronger than the success "
|
||||
"argument");
|
||||
assert(FailureOrdering != AtomicOrdering::Release &&
|
||||
FailureOrdering != AtomicOrdering::AcquireRelease &&
|
||||
"AtomicCmpXchg failure ordering cannot include release semantics");
|
||||
}
|
||||
|
||||
@ -1454,7 +1456,7 @@ void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
|
||||
assert(getOperand(1)->getType() ==
|
||||
cast<PointerType>(getOperand(0)->getType())->getElementType()
|
||||
&& "Ptr must be a pointer to Val type!");
|
||||
assert(Ordering != NotAtomic &&
|
||||
assert(Ordering != AtomicOrdering::NotAtomic &&
|
||||
"AtomicRMW instructions must be atomic!");
|
||||
}
|
||||
|
||||
|
@ -2919,7 +2919,8 @@ void Verifier::visitLoadInst(LoadInst &LI) {
|
||||
Assert(LI.getAlignment() <= Value::MaximumAlignment,
|
||||
"huge alignment values are unsupported", &LI);
|
||||
if (LI.isAtomic()) {
|
||||
Assert(LI.getOrdering() != Release && LI.getOrdering() != AcquireRelease,
|
||||
Assert(LI.getOrdering() != AtomicOrdering::Release &&
|
||||
LI.getOrdering() != AtomicOrdering::AcquireRelease,
|
||||
"Load cannot have Release ordering", &LI);
|
||||
Assert(LI.getAlignment() != 0,
|
||||
"Atomic load must specify explicit alignment", &LI);
|
||||
@ -2946,7 +2947,8 @@ void Verifier::visitStoreInst(StoreInst &SI) {
|
||||
Assert(SI.getAlignment() <= Value::MaximumAlignment,
|
||||
"huge alignment values are unsupported", &SI);
|
||||
if (SI.isAtomic()) {
|
||||
Assert(SI.getOrdering() != Acquire && SI.getOrdering() != AcquireRelease,
|
||||
Assert(SI.getOrdering() != AtomicOrdering::Acquire &&
|
||||
SI.getOrdering() != AtomicOrdering::AcquireRelease,
|
||||
"Store cannot have Acquire ordering", &SI);
|
||||
Assert(SI.getAlignment() != 0,
|
||||
"Atomic store must specify explicit alignment", &SI);
|
||||
@ -3022,19 +3024,20 @@ void Verifier::visitAllocaInst(AllocaInst &AI) {
|
||||
void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
|
||||
|
||||
// FIXME: more conditions???
|
||||
Assert(CXI.getSuccessOrdering() != NotAtomic,
|
||||
Assert(CXI.getSuccessOrdering() != AtomicOrdering::NotAtomic,
|
||||
"cmpxchg instructions must be atomic.", &CXI);
|
||||
Assert(CXI.getFailureOrdering() != NotAtomic,
|
||||
Assert(CXI.getFailureOrdering() != AtomicOrdering::NotAtomic,
|
||||
"cmpxchg instructions must be atomic.", &CXI);
|
||||
Assert(CXI.getSuccessOrdering() != Unordered,
|
||||
Assert(CXI.getSuccessOrdering() != AtomicOrdering::Unordered,
|
||||
"cmpxchg instructions cannot be unordered.", &CXI);
|
||||
Assert(CXI.getFailureOrdering() != Unordered,
|
||||
Assert(CXI.getFailureOrdering() != AtomicOrdering::Unordered,
|
||||
"cmpxchg instructions cannot be unordered.", &CXI);
|
||||
Assert(CXI.getSuccessOrdering() >= CXI.getFailureOrdering(),
|
||||
"cmpxchg instructions be at least as constrained on success as fail",
|
||||
Assert(!isStrongerThan(CXI.getFailureOrdering(), CXI.getSuccessOrdering()),
|
||||
"cmpxchg instructions failure argument shall be no stronger than the "
|
||||
"success argument",
|
||||
&CXI);
|
||||
Assert(CXI.getFailureOrdering() != Release &&
|
||||
CXI.getFailureOrdering() != AcquireRelease,
|
||||
Assert(CXI.getFailureOrdering() != AtomicOrdering::Release &&
|
||||
CXI.getFailureOrdering() != AtomicOrdering::AcquireRelease,
|
||||
"cmpxchg failure ordering cannot include release semantics", &CXI);
|
||||
|
||||
PointerType *PTy = dyn_cast<PointerType>(CXI.getOperand(0)->getType());
|
||||
@ -3053,9 +3056,9 @@ void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
|
||||
}
|
||||
|
||||
void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
|
||||
Assert(RMWI.getOrdering() != NotAtomic,
|
||||
Assert(RMWI.getOrdering() != AtomicOrdering::NotAtomic,
|
||||
"atomicrmw instructions must be atomic.", &RMWI);
|
||||
Assert(RMWI.getOrdering() != Unordered,
|
||||
Assert(RMWI.getOrdering() != AtomicOrdering::Unordered,
|
||||
"atomicrmw instructions cannot be unordered.", &RMWI);
|
||||
PointerType *PTy = dyn_cast<PointerType>(RMWI.getOperand(0)->getType());
|
||||
Assert(PTy, "First atomicrmw operand must be a pointer.", &RMWI);
|
||||
@ -3074,10 +3077,12 @@ void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
|
||||
|
||||
void Verifier::visitFenceInst(FenceInst &FI) {
|
||||
const AtomicOrdering Ordering = FI.getOrdering();
|
||||
Assert(Ordering == Acquire || Ordering == Release ||
|
||||
Ordering == AcquireRelease || Ordering == SequentiallyConsistent,
|
||||
"fence instructions may only have "
|
||||
"acquire, release, acq_rel, or seq_cst ordering.",
|
||||
Assert(Ordering == AtomicOrdering::Acquire ||
|
||||
Ordering == AtomicOrdering::Release ||
|
||||
Ordering == AtomicOrdering::AcquireRelease ||
|
||||
Ordering == AtomicOrdering::SequentiallyConsistent,
|
||||
"fence instructions may only have acquire, release, acq_rel, or "
|
||||
"seq_cst ordering.",
|
||||
&FI);
|
||||
visitInstruction(FI);
|
||||
}
|
||||
|
@ -608,7 +608,7 @@ static bool isWorthFoldingADDlow(SDValue N) {
|
||||
|
||||
// ldar and stlr have much more restrictive addressing modes (just a
|
||||
// register).
|
||||
if (cast<MemSDNode>(Use)->getOrdering() > Monotonic)
|
||||
if (isStrongerThanMonotonic(cast<MemSDNode>(Use)->getOrdering()))
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -10132,7 +10132,7 @@ Value *AArch64TargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
|
||||
AtomicOrdering Ord) const {
|
||||
Module *M = Builder.GetInsertBlock()->getParent()->getParent();
|
||||
Type *ValTy = cast<PointerType>(Addr->getType())->getElementType();
|
||||
bool IsAcquire = isAtLeastAcquire(Ord);
|
||||
bool IsAcquire = isAcquireOrStronger(Ord);
|
||||
|
||||
// Since i128 isn't legal and intrinsics don't get type-lowered, the ldrexd
|
||||
// intrinsic must return {i64, i64} and we have to recombine them into a
|
||||
@ -10174,7 +10174,7 @@ Value *AArch64TargetLowering::emitStoreConditional(IRBuilder<> &Builder,
|
||||
Value *Val, Value *Addr,
|
||||
AtomicOrdering Ord) const {
|
||||
Module *M = Builder.GetInsertBlock()->getParent()->getParent();
|
||||
bool IsRelease = isAtLeastRelease(Ord);
|
||||
bool IsRelease = isReleaseOrStronger(Ord);
|
||||
|
||||
// Since the intrinsics must have legal type, the i128 intrinsics take two
|
||||
// parameters: "i64, i64". We must marshal Val into the appropriate form
|
||||
|
@ -29,7 +29,7 @@ def : Pat<(atomic_fence (imm), (imm)), (DMB (i32 0xb))>;
|
||||
class acquiring_load<PatFrag base>
|
||||
: PatFrag<(ops node:$ptr), (base node:$ptr), [{
|
||||
AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering();
|
||||
return isAtLeastAcquire(Ordering);
|
||||
return isAcquireOrStronger(Ordering);
|
||||
}]>;
|
||||
|
||||
// An atomic load operation that does not need either acquire or release
|
||||
@ -37,7 +37,7 @@ class acquiring_load<PatFrag base>
|
||||
class relaxed_load<PatFrag base>
|
||||
: PatFrag<(ops node:$ptr), (base node:$ptr), [{
|
||||
AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering();
|
||||
return !isAtLeastAcquire(Ordering);
|
||||
return !isAcquireOrStronger(Ordering);
|
||||
}]>;
|
||||
|
||||
// 8-bit loads
|
||||
@ -112,15 +112,16 @@ def : Pat<(relaxed_load<atomic_load_64>
|
||||
class releasing_store<PatFrag base>
|
||||
: PatFrag<(ops node:$ptr, node:$val), (base node:$ptr, node:$val), [{
|
||||
AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering();
|
||||
assert(Ordering != AcquireRelease && "unexpected store ordering");
|
||||
return isAtLeastRelease(Ordering);
|
||||
assert(Ordering != AtomicOrdering::AcquireRelease &&
|
||||
"unexpected store ordering");
|
||||
return isReleaseOrStronger(Ordering);
|
||||
}]>;
|
||||
|
||||
// An atomic store operation that doesn't actually need to be atomic on AArch64.
|
||||
class relaxed_store<PatFrag base>
|
||||
: PatFrag<(ops node:$ptr, node:$val), (base node:$ptr, node:$val), [{
|
||||
AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering();
|
||||
return !isAtLeastRelease(Ordering);
|
||||
return !isReleaseOrStronger(Ordering);
|
||||
}]>;
|
||||
|
||||
// 8-bit stores
|
||||
|
@ -3011,7 +3011,7 @@ static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG,
|
||||
if (Subtarget->isMClass()) {
|
||||
// Only a full system barrier exists in the M-class architectures.
|
||||
Domain = ARM_MB::SY;
|
||||
} else if (Subtarget->isSwift() && Ord == Release) {
|
||||
} else if (Subtarget->isSwift() && Ord == AtomicOrdering::Release) {
|
||||
// Swift happens to implement ISHST barriers in a way that's compatible with
|
||||
// Release semantics but weaker than ISH so we'd be fools not to use
|
||||
// it. Beware: other processors probably don't!
|
||||
@ -6932,13 +6932,13 @@ void ARMTargetLowering::ExpandDIV_Windows(
|
||||
}
|
||||
|
||||
static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) {
|
||||
// Monotonic load/store is legal for all targets
|
||||
if (cast<AtomicSDNode>(Op)->getOrdering() <= Monotonic)
|
||||
return Op;
|
||||
if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getOrdering()))
|
||||
// Acquire/Release load/store is not legal for targets without a dmb or
|
||||
// equivalent available.
|
||||
return SDValue();
|
||||
|
||||
// Acquire/Release load/store is not legal for targets without a
|
||||
// dmb or equivalent available.
|
||||
return SDValue();
|
||||
// Monotonic load/store is legal for all targets.
|
||||
return Op;
|
||||
}
|
||||
|
||||
static void ReplaceREADCYCLECOUNTER(SDNode *N,
|
||||
@ -12076,18 +12076,18 @@ Instruction* ARMTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
|
||||
AtomicOrdering Ord, bool IsStore,
|
||||
bool IsLoad) const {
|
||||
switch (Ord) {
|
||||
case NotAtomic:
|
||||
case Unordered:
|
||||
case AtomicOrdering::NotAtomic:
|
||||
case AtomicOrdering::Unordered:
|
||||
llvm_unreachable("Invalid fence: unordered/non-atomic");
|
||||
case Monotonic:
|
||||
case Acquire:
|
||||
case AtomicOrdering::Monotonic:
|
||||
case AtomicOrdering::Acquire:
|
||||
return nullptr; // Nothing to do
|
||||
case SequentiallyConsistent:
|
||||
case AtomicOrdering::SequentiallyConsistent:
|
||||
if (!IsStore)
|
||||
return nullptr; // Nothing to do
|
||||
/*FALLTHROUGH*/
|
||||
case Release:
|
||||
case AcquireRelease:
|
||||
case AtomicOrdering::Release:
|
||||
case AtomicOrdering::AcquireRelease:
|
||||
if (Subtarget->isSwift())
|
||||
return makeDMB(Builder, ARM_MB::ISHST);
|
||||
// FIXME: add a comment with a link to documentation justifying this.
|
||||
@ -12101,15 +12101,15 @@ Instruction* ARMTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
|
||||
AtomicOrdering Ord, bool IsStore,
|
||||
bool IsLoad) const {
|
||||
switch (Ord) {
|
||||
case NotAtomic:
|
||||
case Unordered:
|
||||
case AtomicOrdering::NotAtomic:
|
||||
case AtomicOrdering::Unordered:
|
||||
llvm_unreachable("Invalid fence: unordered/not-atomic");
|
||||
case Monotonic:
|
||||
case Release:
|
||||
case AtomicOrdering::Monotonic:
|
||||
case AtomicOrdering::Release:
|
||||
return nullptr; // Nothing to do
|
||||
case Acquire:
|
||||
case AcquireRelease:
|
||||
case SequentiallyConsistent:
|
||||
case AtomicOrdering::Acquire:
|
||||
case AtomicOrdering::AcquireRelease:
|
||||
case AtomicOrdering::SequentiallyConsistent:
|
||||
return makeDMB(Builder, ARM_MB::ISH);
|
||||
}
|
||||
llvm_unreachable("Unknown fence ordering in emitTrailingFence");
|
||||
@ -12204,7 +12204,7 @@ Value *ARMTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
|
||||
AtomicOrdering Ord) const {
|
||||
Module *M = Builder.GetInsertBlock()->getParent()->getParent();
|
||||
Type *ValTy = cast<PointerType>(Addr->getType())->getElementType();
|
||||
bool IsAcquire = isAtLeastAcquire(Ord);
|
||||
bool IsAcquire = isAcquireOrStronger(Ord);
|
||||
|
||||
// Since i64 isn't legal and intrinsics don't get type-lowered, the ldrexd
|
||||
// intrinsic must return {i32, i32} and we have to recombine them into a
|
||||
@ -12248,7 +12248,7 @@ Value *ARMTargetLowering::emitStoreConditional(IRBuilder<> &Builder, Value *Val,
|
||||
Value *Addr,
|
||||
AtomicOrdering Ord) const {
|
||||
Module *M = Builder.GetInsertBlock()->getParent()->getParent();
|
||||
bool IsRelease = isAtLeastRelease(Ord);
|
||||
bool IsRelease = isReleaseOrStronger(Ord);
|
||||
|
||||
// Since the intrinsics must have legal type, the i64 intrinsics take two
|
||||
// parameters: "i32, i32". We must marshal Val into the appropriate form
|
||||
|
@ -4761,7 +4761,7 @@ def : ARMPat<(stlex_2 (and GPR:$Rt, 0xffff), addr_offset_none:$addr),
|
||||
class acquiring_load<PatFrag base>
|
||||
: PatFrag<(ops node:$ptr), (base node:$ptr), [{
|
||||
AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering();
|
||||
return isAtLeastAcquire(Ordering);
|
||||
return isAcquireOrStronger(Ordering);
|
||||
}]>;
|
||||
|
||||
def atomic_load_acquire_8 : acquiring_load<atomic_load_8>;
|
||||
@ -4771,7 +4771,7 @@ def atomic_load_acquire_32 : acquiring_load<atomic_load_32>;
|
||||
class releasing_store<PatFrag base>
|
||||
: PatFrag<(ops node:$ptr, node:$val), (base node:$ptr, node:$val), [{
|
||||
AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering();
|
||||
return isAtLeastRelease(Ordering);
|
||||
return isReleaseOrStronger(Ordering);
|
||||
}]>;
|
||||
|
||||
def atomic_store_release_8 : releasing_store<atomic_store_8>;
|
||||
|
@ -1091,13 +1091,14 @@ std::string CppWriter::getOpName(const Value* V) {
|
||||
|
||||
static StringRef ConvertAtomicOrdering(AtomicOrdering Ordering) {
|
||||
switch (Ordering) {
|
||||
case NotAtomic: return "NotAtomic";
|
||||
case Unordered: return "Unordered";
|
||||
case Monotonic: return "Monotonic";
|
||||
case Acquire: return "Acquire";
|
||||
case Release: return "Release";
|
||||
case AcquireRelease: return "AcquireRelease";
|
||||
case SequentiallyConsistent: return "SequentiallyConsistent";
|
||||
case AtomicOrdering::NotAtomic: return "NotAtomic";
|
||||
case AtomicOrdering::Unordered: return "Unordered";
|
||||
case AtomicOrdering::Monotonic: return "Monotonic";
|
||||
case AtomicOrdering::Acquire: return "Acquire";
|
||||
case AtomicOrdering::Release: return "Release";
|
||||
case AtomicOrdering::AcquireRelease: return "AcquireRelease";
|
||||
case AtomicOrdering::SequentiallyConsistent:
|
||||
return "SequentiallyConsistent";
|
||||
}
|
||||
llvm_unreachable("Unknown ordering");
|
||||
}
|
||||
|
@ -8323,9 +8323,9 @@ static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) {
|
||||
Instruction* PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
|
||||
AtomicOrdering Ord, bool IsStore,
|
||||
bool IsLoad) const {
|
||||
if (Ord == SequentiallyConsistent)
|
||||
if (Ord == AtomicOrdering::SequentiallyConsistent)
|
||||
return callIntrinsic(Builder, Intrinsic::ppc_sync);
|
||||
if (isAtLeastRelease(Ord))
|
||||
if (isReleaseOrStronger(Ord))
|
||||
return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
|
||||
return nullptr;
|
||||
}
|
||||
@ -8333,7 +8333,7 @@ Instruction* PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
|
||||
Instruction* PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
|
||||
AtomicOrdering Ord, bool IsStore,
|
||||
bool IsLoad) const {
|
||||
if (IsLoad && isAtLeastAcquire(Ord))
|
||||
if (IsLoad && isAcquireOrStronger(Ord))
|
||||
return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
|
||||
// FIXME: this is too conservative, a dependent branch + isync is enough.
|
||||
// See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and
|
||||
|
@ -2929,12 +2929,12 @@ static SDValue LowerUMULO_SMULO(SDValue Op, SelectionDAG &DAG,
|
||||
}
|
||||
|
||||
static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG) {
|
||||
// Monotonic load/stores are legal.
|
||||
if (cast<AtomicSDNode>(Op)->getOrdering() <= Monotonic)
|
||||
return Op;
|
||||
|
||||
// Otherwise, expand with a fence.
|
||||
if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getOrdering()))
|
||||
// Expand with a fence.
|
||||
return SDValue();
|
||||
|
||||
// Monotonic load/stores are legal.
|
||||
return Op;
|
||||
}
|
||||
|
||||
SDValue SparcTargetLowering::
|
||||
|
@ -3130,9 +3130,11 @@ SDValue SystemZTargetLowering::lowerATOMIC_FENCE(SDValue Op,
|
||||
|
||||
// The only fence that needs an instruction is a sequentially-consistent
|
||||
// cross-thread fence.
|
||||
if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) {
|
||||
if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
|
||||
FenceScope == CrossThread) {
|
||||
return SDValue(DAG.getMachineNode(SystemZ::Serialize, DL, MVT::Other,
|
||||
Op.getOperand(0)), 0);
|
||||
Op.getOperand(0)),
|
||||
0);
|
||||
}
|
||||
|
||||
// MEMBARRIER is a compiler barrier; it codegens to a no-op.
|
||||
|
@ -20464,7 +20464,7 @@ X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
|
||||
// r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
|
||||
// lowered to just a load without a fence. A mfence flushes the store buffer,
|
||||
// making the optimization clearly correct.
|
||||
// FIXME: it is required if isAtLeastRelease(Order) but it is not clear
|
||||
// FIXME: it is required if isReleaseOrStronger(Order) but it is not clear
|
||||
// otherwise, we might be able to be more aggressive on relaxed idempotent
|
||||
// rmw. In practice, they do not look useful, so we don't try to be
|
||||
// especially clever.
|
||||
@ -20503,7 +20503,8 @@ static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget &Subtarget,
|
||||
|
||||
// The only fence that needs an instruction is a sequentially-consistent
|
||||
// cross-thread fence.
|
||||
if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) {
|
||||
if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
|
||||
FenceScope == CrossThread) {
|
||||
if (Subtarget.hasMFence())
|
||||
return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
|
||||
|
||||
@ -20986,7 +20987,8 @@ static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) {
|
||||
// FIXME: On 32-bit, store -> fist or movq would be more efficient
|
||||
// (The only way to get a 16-byte store is cmpxchg16b)
|
||||
// FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
|
||||
if (cast<AtomicSDNode>(Node)->getOrdering() == SequentiallyConsistent ||
|
||||
if (cast<AtomicSDNode>(Node)->getOrdering() ==
|
||||
AtomicOrdering::SequentiallyConsistent ||
|
||||
!DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
|
||||
SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
|
||||
cast<AtomicSDNode>(Node)->getMemoryVT(),
|
||||
|
@ -970,8 +970,9 @@ SDValue XCoreTargetLowering::
|
||||
LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const {
|
||||
AtomicSDNode *N = cast<AtomicSDNode>(Op);
|
||||
assert(N->getOpcode() == ISD::ATOMIC_LOAD && "Bad Atomic OP");
|
||||
assert(N->getOrdering() <= Monotonic &&
|
||||
"setInsertFencesForAtomic(true) and yet greater than Monotonic");
|
||||
assert((N->getOrdering() == AtomicOrdering::Unordered ||
|
||||
N->getOrdering() == AtomicOrdering::Monotonic) &&
|
||||
"setInsertFencesForAtomic(true) expects unordered / monotonic");
|
||||
if (N->getMemoryVT() == MVT::i32) {
|
||||
if (N->getAlignment() < 4)
|
||||
report_fatal_error("atomic load must be aligned");
|
||||
@ -1000,8 +1001,9 @@ SDValue XCoreTargetLowering::
|
||||
LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const {
|
||||
AtomicSDNode *N = cast<AtomicSDNode>(Op);
|
||||
assert(N->getOpcode() == ISD::ATOMIC_STORE && "Bad Atomic OP");
|
||||
assert(N->getOrdering() <= Monotonic &&
|
||||
"setInsertFencesForAtomic(true) and yet greater than Monotonic");
|
||||
assert((N->getOrdering() == AtomicOrdering::Unordered ||
|
||||
N->getOrdering() == AtomicOrdering::Monotonic) &&
|
||||
"setInsertFencesForAtomic(true) expects unordered / monotonic");
|
||||
if (N->getMemoryVT() == MVT::i32) {
|
||||
if (N->getAlignment() < 4)
|
||||
report_fatal_error("atomic store must be aligned");
|
||||
|
@ -1503,7 +1503,7 @@ static bool tryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, CallInst *CI,
|
||||
// into multiple malloc'd arrays, one for each field. This is basically
|
||||
// SRoA for malloc'd memory.
|
||||
|
||||
if (Ordering != NotAtomic)
|
||||
if (Ordering != AtomicOrdering::NotAtomic)
|
||||
return false;
|
||||
|
||||
// If this is an allocation of a fixed size array of structs, analyze as a
|
||||
@ -1982,7 +1982,7 @@ bool GlobalOpt::processInternalGlobal(GlobalVariable *GV,
|
||||
// Otherwise, if the global was not a boolean, we can shrink it to be a
|
||||
// boolean.
|
||||
if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) {
|
||||
if (GS.Ordering == NotAtomic) {
|
||||
if (GS.Ordering == AtomicOrdering::NotAtomic) {
|
||||
if (TryToShrinkGlobalToBoolean(GV, SOVConstant)) {
|
||||
++NumShrunkToBool;
|
||||
return true;
|
||||
@ -2581,4 +2581,3 @@ bool GlobalOpt::runOnModule(Module &M) {
|
||||
|
||||
return Changed;
|
||||
}
|
||||
|
||||
|
@ -401,6 +401,7 @@ private:
|
||||
int cmpTypes(Type *TyL, Type *TyR) const;
|
||||
|
||||
int cmpNumbers(uint64_t L, uint64_t R) const;
|
||||
int cmpOrderings(AtomicOrdering L, AtomicOrdering R) const;
|
||||
int cmpAPInts(const APInt &L, const APInt &R) const;
|
||||
int cmpAPFloats(const APFloat &L, const APFloat &R) const;
|
||||
int cmpInlineAsm(const InlineAsm *L, const InlineAsm *R) const;
|
||||
@ -477,6 +478,12 @@ int FunctionComparator::cmpNumbers(uint64_t L, uint64_t R) const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int FunctionComparator::cmpOrderings(AtomicOrdering L, AtomicOrdering R) const {
|
||||
if ((int)L < (int)R) return -1;
|
||||
if ((int)L > (int)R) return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int FunctionComparator::cmpAPInts(const APInt &L, const APInt &R) const {
|
||||
if (int Res = cmpNumbers(L.getBitWidth(), R.getBitWidth()))
|
||||
return Res;
|
||||
@ -939,7 +946,7 @@ int FunctionComparator::cmpOperations(const Instruction *L,
|
||||
cmpNumbers(LI->getAlignment(), cast<LoadInst>(R)->getAlignment()))
|
||||
return Res;
|
||||
if (int Res =
|
||||
cmpNumbers(LI->getOrdering(), cast<LoadInst>(R)->getOrdering()))
|
||||
cmpOrderings(LI->getOrdering(), cast<LoadInst>(R)->getOrdering()))
|
||||
return Res;
|
||||
if (int Res =
|
||||
cmpNumbers(LI->getSynchScope(), cast<LoadInst>(R)->getSynchScope()))
|
||||
@ -955,7 +962,7 @@ int FunctionComparator::cmpOperations(const Instruction *L,
|
||||
cmpNumbers(SI->getAlignment(), cast<StoreInst>(R)->getAlignment()))
|
||||
return Res;
|
||||
if (int Res =
|
||||
cmpNumbers(SI->getOrdering(), cast<StoreInst>(R)->getOrdering()))
|
||||
cmpOrderings(SI->getOrdering(), cast<StoreInst>(R)->getOrdering()))
|
||||
return Res;
|
||||
return cmpNumbers(SI->getSynchScope(), cast<StoreInst>(R)->getSynchScope());
|
||||
}
|
||||
@ -1009,7 +1016,7 @@ int FunctionComparator::cmpOperations(const Instruction *L,
|
||||
}
|
||||
if (const FenceInst *FI = dyn_cast<FenceInst>(L)) {
|
||||
if (int Res =
|
||||
cmpNumbers(FI->getOrdering(), cast<FenceInst>(R)->getOrdering()))
|
||||
cmpOrderings(FI->getOrdering(), cast<FenceInst>(R)->getOrdering()))
|
||||
return Res;
|
||||
return cmpNumbers(FI->getSynchScope(), cast<FenceInst>(R)->getSynchScope());
|
||||
}
|
||||
@ -1021,11 +1028,13 @@ int FunctionComparator::cmpOperations(const Instruction *L,
|
||||
if (int Res = cmpNumbers(CXI->isWeak(),
|
||||
cast<AtomicCmpXchgInst>(R)->isWeak()))
|
||||
return Res;
|
||||
if (int Res = cmpNumbers(CXI->getSuccessOrdering(),
|
||||
cast<AtomicCmpXchgInst>(R)->getSuccessOrdering()))
|
||||
if (int Res =
|
||||
cmpOrderings(CXI->getSuccessOrdering(),
|
||||
cast<AtomicCmpXchgInst>(R)->getSuccessOrdering()))
|
||||
return Res;
|
||||
if (int Res = cmpNumbers(CXI->getFailureOrdering(),
|
||||
cast<AtomicCmpXchgInst>(R)->getFailureOrdering()))
|
||||
if (int Res =
|
||||
cmpOrderings(CXI->getFailureOrdering(),
|
||||
cast<AtomicCmpXchgInst>(R)->getFailureOrdering()))
|
||||
return Res;
|
||||
return cmpNumbers(CXI->getSynchScope(),
|
||||
cast<AtomicCmpXchgInst>(R)->getSynchScope());
|
||||
@ -1037,7 +1046,7 @@ int FunctionComparator::cmpOperations(const Instruction *L,
|
||||
if (int Res = cmpNumbers(RMWI->isVolatile(),
|
||||
cast<AtomicRMWInst>(R)->isVolatile()))
|
||||
return Res;
|
||||
if (int Res = cmpNumbers(RMWI->getOrdering(),
|
||||
if (int Res = cmpOrderings(RMWI->getOrdering(),
|
||||
cast<AtomicRMWInst>(R)->getOrdering()))
|
||||
return Res;
|
||||
return cmpNumbers(RMWI->getSynchScope(),
|
||||
|
@ -1222,34 +1222,34 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
|
||||
|
||||
AtomicOrdering addReleaseOrdering(AtomicOrdering a) {
|
||||
switch (a) {
|
||||
case NotAtomic:
|
||||
return NotAtomic;
|
||||
case Unordered:
|
||||
case Monotonic:
|
||||
case Release:
|
||||
return Release;
|
||||
case Acquire:
|
||||
case AcquireRelease:
|
||||
return AcquireRelease;
|
||||
case SequentiallyConsistent:
|
||||
return SequentiallyConsistent;
|
||||
case AtomicOrdering::NotAtomic:
|
||||
return AtomicOrdering::NotAtomic;
|
||||
case AtomicOrdering::Unordered:
|
||||
case AtomicOrdering::Monotonic:
|
||||
case AtomicOrdering::Release:
|
||||
return AtomicOrdering::Release;
|
||||
case AtomicOrdering::Acquire:
|
||||
case AtomicOrdering::AcquireRelease:
|
||||
return AtomicOrdering::AcquireRelease;
|
||||
case AtomicOrdering::SequentiallyConsistent:
|
||||
return AtomicOrdering::SequentiallyConsistent;
|
||||
}
|
||||
llvm_unreachable("Unknown ordering");
|
||||
}
|
||||
|
||||
AtomicOrdering addAcquireOrdering(AtomicOrdering a) {
|
||||
switch (a) {
|
||||
case NotAtomic:
|
||||
return NotAtomic;
|
||||
case Unordered:
|
||||
case Monotonic:
|
||||
case Acquire:
|
||||
return Acquire;
|
||||
case Release:
|
||||
case AcquireRelease:
|
||||
return AcquireRelease;
|
||||
case SequentiallyConsistent:
|
||||
return SequentiallyConsistent;
|
||||
case AtomicOrdering::NotAtomic:
|
||||
return AtomicOrdering::NotAtomic;
|
||||
case AtomicOrdering::Unordered:
|
||||
case AtomicOrdering::Monotonic:
|
||||
case AtomicOrdering::Acquire:
|
||||
return AtomicOrdering::Acquire;
|
||||
case AtomicOrdering::Release:
|
||||
case AtomicOrdering::AcquireRelease:
|
||||
return AtomicOrdering::AcquireRelease;
|
||||
case AtomicOrdering::SequentiallyConsistent:
|
||||
return AtomicOrdering::SequentiallyConsistent;
|
||||
}
|
||||
llvm_unreachable("Unknown ordering");
|
||||
}
|
||||
|
@ -551,7 +551,7 @@ void SanitizerCoverageModule::InjectCoverageAtBlock(Function &F, BasicBlock &BB,
|
||||
IRB.CreateCall(SanCovWithCheckFunction, GuardP);
|
||||
} else {
|
||||
LoadInst *Load = IRB.CreateLoad(GuardP);
|
||||
Load->setAtomic(Monotonic);
|
||||
Load->setAtomic(AtomicOrdering::Monotonic);
|
||||
Load->setAlignment(4);
|
||||
SetNoSanitizeMetadata(Load);
|
||||
Value *Cmp =
|
||||
|
@ -480,14 +480,16 @@ bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I,
|
||||
static ConstantInt *createOrdering(IRBuilder<> *IRB, AtomicOrdering ord) {
|
||||
uint32_t v = 0;
|
||||
switch (ord) {
|
||||
case NotAtomic: llvm_unreachable("unexpected atomic ordering!");
|
||||
case Unordered: // Fall-through.
|
||||
case Monotonic: v = 0; break;
|
||||
// case Consume: v = 1; break; // Not specified yet.
|
||||
case Acquire: v = 2; break;
|
||||
case Release: v = 3; break;
|
||||
case AcquireRelease: v = 4; break;
|
||||
case SequentiallyConsistent: v = 5; break;
|
||||
case AtomicOrdering::NotAtomic:
|
||||
llvm_unreachable("unexpected atomic ordering!");
|
||||
case AtomicOrdering::Unordered: // Fall-through.
|
||||
case AtomicOrdering::Monotonic: v = 0; break;
|
||||
// Not specified yet:
|
||||
// case AtomicOrdering::Consume: v = 1; break;
|
||||
case AtomicOrdering::Acquire: v = 2; break;
|
||||
case AtomicOrdering::Release: v = 3; break;
|
||||
case AtomicOrdering::AcquireRelease: v = 4; break;
|
||||
case AtomicOrdering::SequentiallyConsistent: v = 5; break;
|
||||
}
|
||||
return IRB->getInt32(v);
|
||||
}
|
||||
|
@ -673,7 +673,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
|
||||
// to advance the generation. We do need to prevent DSE across the fence,
|
||||
// but that's handled above.
|
||||
if (FenceInst *FI = dyn_cast<FenceInst>(Inst))
|
||||
if (FI->getOrdering() == Release) {
|
||||
if (FI->getOrdering() == AtomicOrdering::Release) {
|
||||
assert(Inst->mayReadFromMemory() && "relied on to prevent DSE above");
|
||||
continue;
|
||||
}
|
||||
|
@ -100,12 +100,12 @@ static bool LowerFenceInst(FenceInst *FI) {
|
||||
}
|
||||
|
||||
static bool LowerLoadInst(LoadInst *LI) {
|
||||
LI->setAtomic(NotAtomic);
|
||||
LI->setAtomic(AtomicOrdering::NotAtomic);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool LowerStoreInst(StoreInst *SI) {
|
||||
SI->setAtomic(NotAtomic);
|
||||
SI->setAtomic(AtomicOrdering::NotAtomic);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -20,11 +20,11 @@ using namespace llvm;
|
||||
/// and release, then return AcquireRelease.
|
||||
///
|
||||
static AtomicOrdering strongerOrdering(AtomicOrdering X, AtomicOrdering Y) {
|
||||
if (X == Acquire && Y == Release)
|
||||
return AcquireRelease;
|
||||
if (Y == Acquire && X == Release)
|
||||
return AcquireRelease;
|
||||
return (AtomicOrdering)std::max(X, Y);
|
||||
if (X == AtomicOrdering::Acquire && Y == AtomicOrdering::Release)
|
||||
return AtomicOrdering::AcquireRelease;
|
||||
if (Y == AtomicOrdering::Acquire && X == AtomicOrdering::Release)
|
||||
return AtomicOrdering::AcquireRelease;
|
||||
return (AtomicOrdering)std::max((unsigned)X, (unsigned)Y);
|
||||
}
|
||||
|
||||
/// It is safe to destroy a constant iff it is only used by constants itself.
|
||||
@ -185,4 +185,4 @@ GlobalStatus::GlobalStatus()
|
||||
: IsCompared(false), IsLoaded(false), StoredType(NotStored),
|
||||
StoredOnceValue(nullptr), AccessingFunction(nullptr),
|
||||
HasMultipleAccessingFunctions(false), HasNonInstructionUser(false),
|
||||
Ordering(NotAtomic) {}
|
||||
Ordering(AtomicOrdering::NotAtomic) {}
|
||||
|
@ -179,12 +179,12 @@ TEST_F(AliasAnalysisTest, getModRefInfo) {
|
||||
auto *Load1 = new LoadInst(Addr, "load", BB);
|
||||
auto *Add1 = BinaryOperator::CreateAdd(Value, Value, "add", BB);
|
||||
auto *VAArg1 = new VAArgInst(Addr, PtrType, "vaarg", BB);
|
||||
auto *CmpXChg1 = new AtomicCmpXchgInst(Addr, ConstantInt::get(IntType, 0),
|
||||
ConstantInt::get(IntType, 1),
|
||||
Monotonic, Monotonic, CrossThread, BB);
|
||||
auto *CmpXChg1 = new AtomicCmpXchgInst(
|
||||
Addr, ConstantInt::get(IntType, 0), ConstantInt::get(IntType, 1),
|
||||
AtomicOrdering::Monotonic, AtomicOrdering::Monotonic, CrossThread, BB);
|
||||
auto *AtomicRMW =
|
||||
new AtomicRMWInst(AtomicRMWInst::Xchg, Addr, ConstantInt::get(IntType, 1),
|
||||
Monotonic, CrossThread, BB);
|
||||
AtomicOrdering::Monotonic, CrossThread, BB);
|
||||
|
||||
ReturnInst::Create(C, nullptr, BB);
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user