1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 10:42:39 +01:00

[CodeGen] Refactor TLI/AtomicExpand interface to make LLSC explicit.

We used to have this magic "hasLoadLinkedStoreConditional()" callback,
which really meant two things:
- expand cmpxchg (to ll/sc).
- expand atomic loads using ll/sc (rather than cmpxchg).

Remove it, and, instead, introduce explicit callbacks:
- bool shouldExpandAtomicCmpXchgInIR(inst)
- AtomicExpansionKind shouldExpandAtomicLoadInIR(inst)

Differential Revision: http://reviews.llvm.org/D12557

llvm-svn: 247429
This commit is contained in:
Ahmed Bougacha 2015-09-11 17:08:28 +00:00
parent aa999c6622
commit 3d9f4e715f
11 changed files with 72 additions and 57 deletions

View File

@ -446,7 +446,7 @@ It is often easiest for backends to use AtomicExpandPass to lower some of the
atomic constructs. Here are some lowerings it can do: atomic constructs. Here are some lowerings it can do:
* cmpxchg -> loop with load-linked/store-conditional * cmpxchg -> loop with load-linked/store-conditional
by overriding ``hasLoadLinkedStoreConditional()``, ``emitLoadLinked()``, by overriding ``shouldExpandAtomicCmpXchgInIR()``, ``emitLoadLinked()``,
``emitStoreConditional()`` ``emitStoreConditional()``
* large loads/stores -> ll-sc/cmpxchg * large loads/stores -> ll-sc/cmpxchg
by overriding ``shouldExpandAtomicStoreInIR()``/``shouldExpandAtomicLoadInIR()`` by overriding ``shouldExpandAtomicStoreInIR()``/``shouldExpandAtomicLoadInIR()``

View File

@ -124,15 +124,14 @@ public:
// mask (ex: x86 blends). // mask (ex: x86 blends).
}; };
/// Enum that specifies what a AtomicRMWInst is expanded to, if at all. Exists /// Enum that specifies what an atomic load/AtomicRMWInst is expanded
/// because different targets have different levels of support for these /// to, if at all. Exists because different targets have different levels of
/// atomic RMW instructions, and also have different options w.r.t. what they /// support for these atomic instructions, and also have different options
/// should expand to. /// w.r.t. what they should expand to.
enum class AtomicExpansionKind { enum class AtomicExpansionKind {
None, // Don't expand the instruction. None, // Don't expand the instruction.
LLSC, // Expand the instruction into loadlinked/storeconditional; used LLSC, // Expand the instruction into loadlinked/storeconditional; used
// by ARM/AArch64. Implies `hasLoadLinkedStoreConditional` // by ARM/AArch64.
// returns true.
CmpXChg, // Expand the instruction into cmpxchg; used by at least X86. CmpXChg, // Expand the instruction into cmpxchg; used by at least X86.
}; };
@ -1027,10 +1026,6 @@ public:
/// \name Helpers for atomic expansion. /// \name Helpers for atomic expansion.
/// @{ /// @{
/// True if AtomicExpandPass should use emitLoadLinked/emitStoreConditional
/// and expand AtomicCmpXchgInst.
virtual bool hasLoadLinkedStoreConditional() const { return false; }
/// Perform a load-linked operation on Addr, returning a "Value *" with the /// Perform a load-linked operation on Addr, returning a "Value *" with the
/// corresponding pointee type. This may entail some non-trivial operations to /// corresponding pointee type. This may entail some non-trivial operations to
/// truncate or reconstruct types that will be illegal in the backend. See /// truncate or reconstruct types that will be illegal in the backend. See
@ -1111,12 +1106,20 @@ public:
/// Returns true if arguments should be sign-extended in lib calls. /// Returns true if arguments should be sign-extended in lib calls.
virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const { virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
return IsSigned; return IsSigned;
} }
/// Returns true if the given (atomic) load should be expanded by the /// Returns how the given (atomic) load should be expanded by the
/// IR-level AtomicExpand pass into a load-linked instruction /// IR-level AtomicExpand pass.
/// (through emitLoadLinked()). virtual AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const {
virtual bool shouldExpandAtomicLoadInIR(LoadInst *LI) const { return false; } return AtomicExpansionKind::None;
}
/// Returns true if the given atomic cmpxchg should be expanded by the
/// IR-level AtomicExpand pass into a load-linked/store-conditional sequence
/// (through emitLoadLinked() and emitStoreConditional()).
virtual bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const {
return false;
}
/// Returns how the IR-level AtomicExpand pass should expand the given /// Returns how the IR-level AtomicExpand pass should expand the given
/// AtomicRMW, if at all. Default is to never expand. /// AtomicRMW, if at all. Default is to never expand.

View File

@ -46,7 +46,7 @@ namespace {
private: private:
bool bracketInstWithFences(Instruction *I, AtomicOrdering Order, bool bracketInstWithFences(Instruction *I, AtomicOrdering Order,
bool IsStore, bool IsLoad); bool IsStore, bool IsLoad);
bool expandAtomicLoad(LoadInst *LI); bool tryExpandAtomicLoad(LoadInst *LI);
bool expandAtomicLoadToLL(LoadInst *LI); bool expandAtomicLoadToLL(LoadInst *LI);
bool expandAtomicLoadToCmpXchg(LoadInst *LI); bool expandAtomicLoadToCmpXchg(LoadInst *LI);
bool expandAtomicStore(StoreInst *SI); bool expandAtomicStore(StoreInst *SI);
@ -109,7 +109,7 @@ bool AtomicExpand::runOnFunction(Function &F) {
FenceOrdering = RMWI->getOrdering(); FenceOrdering = RMWI->getOrdering();
RMWI->setOrdering(Monotonic); RMWI->setOrdering(Monotonic);
IsStore = IsLoad = true; IsStore = IsLoad = true;
} else if (CASI && !TLI->hasLoadLinkedStoreConditional() && } else if (CASI && !TLI->shouldExpandAtomicCmpXchgInIR(CASI) &&
(isAtLeastRelease(CASI->getSuccessOrdering()) || (isAtLeastRelease(CASI->getSuccessOrdering()) ||
isAtLeastAcquire(CASI->getSuccessOrdering()))) { isAtLeastAcquire(CASI->getSuccessOrdering()))) {
// If a compare and swap is lowered to LL/SC, we can do smarter fence // If a compare and swap is lowered to LL/SC, we can do smarter fence
@ -127,8 +127,8 @@ bool AtomicExpand::runOnFunction(Function &F) {
} }
} }
if (LI && TLI->shouldExpandAtomicLoadInIR(LI)) { if (LI) {
MadeChange |= expandAtomicLoad(LI); MadeChange |= tryExpandAtomicLoad(LI);
} else if (SI && TLI->shouldExpandAtomicStoreInIR(SI)) { } else if (SI && TLI->shouldExpandAtomicStoreInIR(SI)) {
MadeChange |= expandAtomicStore(SI); MadeChange |= expandAtomicStore(SI);
} else if (RMWI) { } else if (RMWI) {
@ -142,7 +142,7 @@ bool AtomicExpand::runOnFunction(Function &F) {
} else { } else {
MadeChange |= tryExpandAtomicRMW(RMWI); MadeChange |= tryExpandAtomicRMW(RMWI);
} }
} else if (CASI && TLI->hasLoadLinkedStoreConditional()) { } else if (CASI && TLI->shouldExpandAtomicCmpXchgInIR(CASI)) {
MadeChange |= expandAtomicCmpXchg(CASI); MadeChange |= expandAtomicCmpXchg(CASI);
} }
} }
@ -170,11 +170,18 @@ bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order,
return (LeadingFence || TrailingFence); return (LeadingFence || TrailingFence);
} }
bool AtomicExpand::expandAtomicLoad(LoadInst *LI) { bool AtomicExpand::tryExpandAtomicLoad(LoadInst *LI) {
if (TLI->hasLoadLinkedStoreConditional()) switch (TLI->shouldExpandAtomicLoadInIR(LI)) {
case TargetLoweringBase::AtomicExpansionKind::None:
return false;
case TargetLoweringBase::AtomicExpansionKind::LLSC: {
return expandAtomicLoadToLL(LI); return expandAtomicLoadToLL(LI);
else }
case TargetLoweringBase::AtomicExpansionKind::CmpXChg: {
return expandAtomicLoadToCmpXchg(LI); return expandAtomicLoadToCmpXchg(LI);
}
}
llvm_unreachable("Unhandled case in tryExpandAtomicLoad");
} }
bool AtomicExpand::expandAtomicLoadToLL(LoadInst *LI) { bool AtomicExpand::expandAtomicLoadToLL(LoadInst *LI) {
@ -243,11 +250,6 @@ bool AtomicExpand::tryExpandAtomicRMW(AtomicRMWInst *AI) {
case TargetLoweringBase::AtomicExpansionKind::None: case TargetLoweringBase::AtomicExpansionKind::None:
return false; return false;
case TargetLoweringBase::AtomicExpansionKind::LLSC: { case TargetLoweringBase::AtomicExpansionKind::LLSC: {
assert(TLI->hasLoadLinkedStoreConditional() &&
"TargetLowering requested we expand AtomicRMW instruction into "
"load-linked/store-conditional combos, but such instructions aren't "
"supported");
return expandAtomicRMWToLLSC(AI); return expandAtomicRMWToLLSC(AI);
} }
case TargetLoweringBase::AtomicExpansionKind::CmpXChg: { case TargetLoweringBase::AtomicExpansionKind::CmpXChg: {
@ -503,11 +505,8 @@ bool AtomicExpand::isIdempotentRMW(AtomicRMWInst* RMWI) {
} }
bool AtomicExpand::simplifyIdempotentRMW(AtomicRMWInst* RMWI) { bool AtomicExpand::simplifyIdempotentRMW(AtomicRMWInst* RMWI) {
if (auto ResultingLoad = TLI->lowerIdempotentRMWIntoFencedLoad(RMWI)) { if (auto ResultingLoad = TLI->lowerIdempotentRMWIntoFencedLoad(RMWI))
if (TLI->shouldExpandAtomicLoadInIR(ResultingLoad)) return tryExpandAtomicLoad(ResultingLoad);
expandAtomicLoad(ResultingLoad);
return true;
}
return false; return false;
} }

View File

@ -9492,19 +9492,21 @@ bool AArch64TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
// Loads and stores less than 128-bits are already atomic; ones above that // Loads and stores less than 128-bits are already atomic; ones above that
// are doomed anyway, so defer to the default libcall and blame the OS when // are doomed anyway, so defer to the default libcall and blame the OS when
// things go wrong. // things go wrong.
bool AArch64TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { TargetLowering::AtomicExpansionKind
AArch64TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
unsigned Size = LI->getType()->getPrimitiveSizeInBits(); unsigned Size = LI->getType()->getPrimitiveSizeInBits();
return Size == 128; return Size == 128 ? AtomicExpansionKind::LLSC : AtomicExpansionKind::None;
} }
// For the real atomic operations, we have ldxr/stxr up to 128 bits, // For the real atomic operations, we have ldxr/stxr up to 128 bits,
TargetLoweringBase::AtomicExpansionKind TargetLowering::AtomicExpansionKind
AArch64TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { AArch64TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
unsigned Size = AI->getType()->getPrimitiveSizeInBits(); unsigned Size = AI->getType()->getPrimitiveSizeInBits();
return Size <= 128 ? AtomicExpansionKind::LLSC : AtomicExpansionKind::None; return Size <= 128 ? AtomicExpansionKind::LLSC : AtomicExpansionKind::None;
} }
bool AArch64TargetLowering::hasLoadLinkedStoreConditional() const { bool AArch64TargetLowering::shouldExpandAtomicCmpXchgInIR(
AtomicCmpXchgInst *AI) const {
return true; return true;
} }

View File

@ -343,17 +343,19 @@ public:
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
Type *Ty) const override; Type *Ty) const override;
bool hasLoadLinkedStoreConditional() const override;
Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr, Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
AtomicOrdering Ord) const override; AtomicOrdering Ord) const override;
Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val, Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
Value *Addr, AtomicOrdering Ord) const override; Value *Addr, AtomicOrdering Ord) const override;
bool shouldExpandAtomicLoadInIR(LoadInst *LI) const override; TargetLoweringBase::AtomicExpansionKind
shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override; bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
TargetLoweringBase::AtomicExpansionKind TargetLoweringBase::AtomicExpansionKind
shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override; shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
bool useLoadStackGuardNode() const override; bool useLoadStackGuardNode() const override;
TargetLoweringBase::LegalizeTypeAction TargetLoweringBase::LegalizeTypeAction
getPreferredVectorAction(EVT VT) const override; getPreferredVectorAction(EVT VT) const override;

View File

@ -11430,8 +11430,6 @@ bool ARMTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
return true; return true;
} }
bool ARMTargetLowering::hasLoadLinkedStoreConditional() const { return true; }
Instruction* ARMTargetLowering::makeDMB(IRBuilder<> &Builder, Instruction* ARMTargetLowering::makeDMB(IRBuilder<> &Builder,
ARM_MB::MemBOpt Domain) const { ARM_MB::MemBOpt Domain) const {
Module *M = Builder.GetInsertBlock()->getParent()->getParent(); Module *M = Builder.GetInsertBlock()->getParent()->getParent();
@ -11527,14 +11525,16 @@ bool ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
// FIXME: ldrd and strd are atomic if the CPU has LPAE (e.g. A15 has that // FIXME: ldrd and strd are atomic if the CPU has LPAE (e.g. A15 has that
// guarantee, see DDI0406C ARM architecture reference manual, // guarantee, see DDI0406C ARM architecture reference manual,
// sections A8.8.72-74 LDRD) // sections A8.8.72-74 LDRD)
bool ARMTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { TargetLowering::AtomicExpansionKind
ARMTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
unsigned Size = LI->getType()->getPrimitiveSizeInBits(); unsigned Size = LI->getType()->getPrimitiveSizeInBits();
return (Size == 64) && !Subtarget->isMClass(); return ((Size == 64) && !Subtarget->isMClass()) ? AtomicExpansionKind::LLSC
: AtomicExpansionKind::None;
} }
// For the real atomic operations, we have ldrex/strex up to 32 bits, // For the real atomic operations, we have ldrex/strex up to 32 bits,
// and up to 64 bits on the non-M profiles // and up to 64 bits on the non-M profiles
TargetLoweringBase::AtomicExpansionKind TargetLowering::AtomicExpansionKind
ARMTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { ARMTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
unsigned Size = AI->getType()->getPrimitiveSizeInBits(); unsigned Size = AI->getType()->getPrimitiveSizeInBits();
return (Size <= (Subtarget->isMClass() ? 32U : 64U)) return (Size <= (Subtarget->isMClass() ? 32U : 64U))
@ -11542,6 +11542,11 @@ ARMTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
: AtomicExpansionKind::None; : AtomicExpansionKind::None;
} }
bool ARMTargetLowering::shouldExpandAtomicCmpXchgInIR(
AtomicCmpXchgInst *AI) const {
return true;
}
// This has so far only been implemented for MachO. // This has so far only been implemented for MachO.
bool ARMTargetLowering::useLoadStackGuardNode() const { bool ARMTargetLowering::useLoadStackGuardNode() const {
return Subtarget->isTargetMachO(); return Subtarget->isTargetMachO();

View File

@ -415,7 +415,6 @@ namespace llvm {
bool functionArgumentNeedsConsecutiveRegisters( bool functionArgumentNeedsConsecutiveRegisters(
Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override; Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override;
bool hasLoadLinkedStoreConditional() const override;
Instruction *makeDMB(IRBuilder<> &Builder, ARM_MB::MemBOpt Domain) const; Instruction *makeDMB(IRBuilder<> &Builder, ARM_MB::MemBOpt Domain) const;
Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr, Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
AtomicOrdering Ord) const override; AtomicOrdering Ord) const override;
@ -436,10 +435,12 @@ namespace llvm {
bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
unsigned Factor) const override; unsigned Factor) const override;
bool shouldExpandAtomicLoadInIR(LoadInst *LI) const override; TargetLoweringBase::AtomicExpansionKind
shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override; bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
TargetLoweringBase::AtomicExpansionKind TargetLoweringBase::AtomicExpansionKind
shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override; shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
bool useLoadStackGuardNode() const override; bool useLoadStackGuardNode() const override;

View File

@ -2499,9 +2499,12 @@ Value *HexagonTargetLowering::emitStoreConditional(IRBuilder<> &Builder,
return Ext; return Ext;
} }
bool HexagonTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { TargetLowering::AtomicExpansionKind
HexagonTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
// Do not expand loads and stores that don't exceed 64 bits. // Do not expand loads and stores that don't exceed 64 bits.
return LI->getType()->getPrimitiveSizeInBits() > 64; return LI->getType()->getPrimitiveSizeInBits() > 64
? AtomicExpansionKind::LLSC
: AtomicExpansionKind::None;
} }
bool HexagonTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { bool HexagonTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {

View File

@ -209,14 +209,11 @@ bool isPositiveHalfWord(SDNode *N);
bool isLegalICmpImmediate(int64_t Imm) const override; bool isLegalICmpImmediate(int64_t Imm) const override;
// Handling of atomic RMW instructions. // Handling of atomic RMW instructions.
bool hasLoadLinkedStoreConditional() const override {
return true;
}
Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr, Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
AtomicOrdering Ord) const override; AtomicOrdering Ord) const override;
Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val, Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
Value *Addr, AtomicOrdering Ord) const override; Value *Addr, AtomicOrdering Ord) const override;
bool shouldExpandAtomicLoadInIR(LoadInst *LI) const override; AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override; bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
AtomicExpansionKind AtomicExpansionKind
shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override { shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override {

View File

@ -18406,12 +18406,14 @@ bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
// Note: this turns large loads into lock cmpxchg8b/16b. // Note: this turns large loads into lock cmpxchg8b/16b.
// FIXME: On 32 bits x86, fild/movq might be faster than lock cmpxchg8b. // FIXME: On 32 bits x86, fild/movq might be faster than lock cmpxchg8b.
bool X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { TargetLowering::AtomicExpansionKind
X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
auto PTy = cast<PointerType>(LI->getPointerOperand()->getType()); auto PTy = cast<PointerType>(LI->getPointerOperand()->getType());
return needsCmpXchgNb(PTy->getElementType()); return needsCmpXchgNb(PTy->getElementType()) ? AtomicExpansionKind::CmpXChg
: AtomicExpansionKind::None;
} }
TargetLoweringBase::AtomicExpansionKind TargetLowering::AtomicExpansionKind
X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32; unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
Type *MemType = AI->getType(); Type *MemType = AI->getType();

View File

@ -1053,7 +1053,8 @@ namespace llvm {
const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override; const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
bool shouldExpandAtomicLoadInIR(LoadInst *SI) const override; TargetLoweringBase::AtomicExpansionKind
shouldExpandAtomicLoadInIR(LoadInst *SI) const override;
bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override; bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
TargetLoweringBase::AtomicExpansionKind TargetLoweringBase::AtomicExpansionKind
shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override; shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;