2014-04-17 20:22:47 +02:00
|
|
|
//===-- AtomicExpandLoadLinkedPass.cpp - Expand atomic instructions -------===//
|
2014-04-03 13:44:58 +02:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file contains a pass (at IR level) to replace atomic instructions with
|
|
|
|
// appropriate (intrinsic-based) ldrex/strex loops.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "llvm/CodeGen/Passes.h"
|
|
|
|
#include "llvm/IR/Function.h"
|
|
|
|
#include "llvm/IR/IRBuilder.h"
|
|
|
|
#include "llvm/IR/Instructions.h"
|
|
|
|
#include "llvm/IR/Intrinsics.h"
|
|
|
|
#include "llvm/IR/Module.h"
|
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#include "llvm/Target/TargetLowering.h"
|
|
|
|
#include "llvm/Target/TargetMachine.h"
|
2014-06-19 23:03:04 +02:00
|
|
|
#include "llvm/Target/TargetSubtargetInfo.h"
|
|
|
|
|
2014-04-03 13:44:58 +02:00
|
|
|
using namespace llvm;
|
|
|
|
|
2014-04-22 04:02:50 +02:00
|
|
|
#define DEBUG_TYPE "arm-atomic-expand"
|
|
|
|
|
2014-04-03 13:44:58 +02:00
|
|
|
namespace {
|
2014-04-17 20:22:47 +02:00
|
|
|
class AtomicExpandLoadLinked : public FunctionPass {
|
2014-06-19 23:03:04 +02:00
|
|
|
const TargetMachine *TM;
|
2014-04-03 13:44:58 +02:00
|
|
|
public:
|
|
|
|
static char ID; // Pass identification, replacement for typeid
|
2014-04-24 08:44:33 +02:00
|
|
|
explicit AtomicExpandLoadLinked(const TargetMachine *TM = nullptr)
|
2014-06-19 23:03:04 +02:00
|
|
|
: FunctionPass(ID), TM(TM) {
|
2014-04-17 20:22:47 +02:00
|
|
|
initializeAtomicExpandLoadLinkedPass(*PassRegistry::getPassRegistry());
|
|
|
|
}
|
2014-04-03 13:44:58 +02:00
|
|
|
|
|
|
|
bool runOnFunction(Function &F) override;
|
|
|
|
bool expandAtomicInsts(Function &F);
|
|
|
|
|
|
|
|
bool expandAtomicLoad(LoadInst *LI);
|
|
|
|
bool expandAtomicStore(StoreInst *LI);
|
|
|
|
bool expandAtomicRMW(AtomicRMWInst *AI);
|
|
|
|
bool expandAtomicCmpXchg(AtomicCmpXchgInst *CI);
|
|
|
|
|
|
|
|
AtomicOrdering insertLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord);
|
|
|
|
void insertTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord);
|
2014-04-17 20:22:47 +02:00
|
|
|
};
|
|
|
|
}
|
2014-04-03 13:44:58 +02:00
|
|
|
|
2014-04-17 20:22:47 +02:00
|
|
|
char AtomicExpandLoadLinked::ID = 0;
|
|
|
|
char &llvm::AtomicExpandLoadLinkedID = AtomicExpandLoadLinked::ID;
|
2014-06-11 09:04:37 +02:00
|
|
|
INITIALIZE_TM_PASS(AtomicExpandLoadLinked, "atomic-ll-sc",
|
|
|
|
"Expand Atomic calls in terms of load-linked & store-conditional",
|
|
|
|
false, false)
|
2014-04-03 13:44:58 +02:00
|
|
|
|
2014-04-17 20:22:47 +02:00
|
|
|
FunctionPass *llvm::createAtomicExpandLoadLinkedPass(const TargetMachine *TM) {
|
|
|
|
return new AtomicExpandLoadLinked(TM);
|
2014-04-03 13:44:58 +02:00
|
|
|
}
|
|
|
|
|
2014-04-17 20:22:47 +02:00
|
|
|
bool AtomicExpandLoadLinked::runOnFunction(Function &F) {
|
2014-06-19 23:03:04 +02:00
|
|
|
if (!TM || !TM->getSubtargetImpl()->enableAtomicExpandLoadLinked())
|
2014-04-17 20:22:47 +02:00
|
|
|
return false;
|
|
|
|
|
2014-04-03 13:44:58 +02:00
|
|
|
SmallVector<Instruction *, 1> AtomicInsts;
|
|
|
|
|
|
|
|
// Changing control-flow while iterating through it is a bad idea, so gather a
|
|
|
|
// list of all atomic instructions before we start.
|
|
|
|
for (BasicBlock &BB : F)
|
|
|
|
for (Instruction &Inst : BB) {
|
|
|
|
if (isa<AtomicRMWInst>(&Inst) || isa<AtomicCmpXchgInst>(&Inst) ||
|
|
|
|
(isa<LoadInst>(&Inst) && cast<LoadInst>(&Inst)->isAtomic()) ||
|
|
|
|
(isa<StoreInst>(&Inst) && cast<StoreInst>(&Inst)->isAtomic()))
|
|
|
|
AtomicInsts.push_back(&Inst);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool MadeChange = false;
|
|
|
|
for (Instruction *Inst : AtomicInsts) {
|
2014-06-19 23:03:04 +02:00
|
|
|
if (!TM->getTargetLowering()->shouldExpandAtomicInIR(Inst))
|
2014-04-03 13:44:58 +02:00
|
|
|
continue;
|
|
|
|
|
|
|
|
if (AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(Inst))
|
|
|
|
MadeChange |= expandAtomicRMW(AI);
|
|
|
|
else if (AtomicCmpXchgInst *CI = dyn_cast<AtomicCmpXchgInst>(Inst))
|
|
|
|
MadeChange |= expandAtomicCmpXchg(CI);
|
|
|
|
else if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
|
|
|
|
MadeChange |= expandAtomicLoad(LI);
|
|
|
|
else if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
|
|
|
|
MadeChange |= expandAtomicStore(SI);
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unknown atomic instruction");
|
|
|
|
}
|
|
|
|
|
|
|
|
return MadeChange;
|
|
|
|
}
|
|
|
|
|
2014-04-17 20:22:47 +02:00
|
|
|
bool AtomicExpandLoadLinked::expandAtomicLoad(LoadInst *LI) {
|
2014-04-03 13:44:58 +02:00
|
|
|
// Load instructions don't actually need a leading fence, even in the
|
|
|
|
// SequentiallyConsistent case.
|
|
|
|
AtomicOrdering MemOpOrder =
|
2014-06-19 23:03:04 +02:00
|
|
|
TM->getTargetLowering()->getInsertFencesForAtomic() ? Monotonic
|
|
|
|
: LI->getOrdering();
|
2014-04-03 13:44:58 +02:00
|
|
|
|
|
|
|
// The only 64-bit load guaranteed to be single-copy atomic by the ARM ARM is
|
|
|
|
// an ldrexd (A3.5.3).
|
|
|
|
IRBuilder<> Builder(LI);
|
2014-06-19 23:03:04 +02:00
|
|
|
Value *Val = TM->getTargetLowering()->emitLoadLinked(
|
|
|
|
Builder, LI->getPointerOperand(), MemOpOrder);
|
2014-04-03 13:44:58 +02:00
|
|
|
|
|
|
|
insertTrailingFence(Builder, LI->getOrdering());
|
|
|
|
|
|
|
|
LI->replaceAllUsesWith(Val);
|
|
|
|
LI->eraseFromParent();
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-04-17 20:22:47 +02:00
|
|
|
bool AtomicExpandLoadLinked::expandAtomicStore(StoreInst *SI) {
|
2014-04-03 13:44:58 +02:00
|
|
|
// The only atomic 64-bit store on ARM is an strexd that succeeds, which means
|
|
|
|
// we need a loop and the entire instruction is essentially an "atomicrmw
|
|
|
|
// xchg" that ignores the value loaded.
|
|
|
|
IRBuilder<> Builder(SI);
|
|
|
|
AtomicRMWInst *AI =
|
|
|
|
Builder.CreateAtomicRMW(AtomicRMWInst::Xchg, SI->getPointerOperand(),
|
|
|
|
SI->getValueOperand(), SI->getOrdering());
|
|
|
|
SI->eraseFromParent();
|
|
|
|
|
|
|
|
// Now we have an appropriate swap instruction, lower it as usual.
|
|
|
|
return expandAtomicRMW(AI);
|
|
|
|
}
|
|
|
|
|
2014-04-17 20:22:47 +02:00
|
|
|
bool AtomicExpandLoadLinked::expandAtomicRMW(AtomicRMWInst *AI) {
|
2014-04-03 13:44:58 +02:00
|
|
|
AtomicOrdering Order = AI->getOrdering();
|
|
|
|
Value *Addr = AI->getPointerOperand();
|
|
|
|
BasicBlock *BB = AI->getParent();
|
|
|
|
Function *F = BB->getParent();
|
|
|
|
LLVMContext &Ctx = F->getContext();
|
|
|
|
|
|
|
|
// Given: atomicrmw some_op iN* %addr, iN %incr ordering
|
|
|
|
//
|
|
|
|
// The standard expansion we produce is:
|
|
|
|
// [...]
|
|
|
|
// fence?
|
|
|
|
// atomicrmw.start:
|
|
|
|
// %loaded = @load.linked(%addr)
|
|
|
|
// %new = some_op iN %loaded, %incr
|
|
|
|
// %stored = @store_conditional(%new, %addr)
|
|
|
|
// %try_again = icmp i32 ne %stored, 0
|
|
|
|
// br i1 %try_again, label %loop, label %atomicrmw.end
|
|
|
|
// atomicrmw.end:
|
|
|
|
// fence?
|
|
|
|
// [...]
|
|
|
|
BasicBlock *ExitBB = BB->splitBasicBlock(AI, "atomicrmw.end");
|
|
|
|
BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
|
|
|
|
|
|
|
|
// This grabs the DebugLoc from AI.
|
|
|
|
IRBuilder<> Builder(AI);
|
|
|
|
|
|
|
|
// The split call above "helpfully" added a branch at the end of BB (to the
|
|
|
|
// wrong place), but we might want a fence too. It's easiest to just remove
|
|
|
|
// the branch entirely.
|
|
|
|
std::prev(BB->end())->eraseFromParent();
|
|
|
|
Builder.SetInsertPoint(BB);
|
|
|
|
AtomicOrdering MemOpOrder = insertLeadingFence(Builder, Order);
|
|
|
|
Builder.CreateBr(LoopBB);
|
|
|
|
|
|
|
|
// Start the main loop block now that we've taken care of the preliminaries.
|
|
|
|
Builder.SetInsertPoint(LoopBB);
|
2014-06-19 23:03:04 +02:00
|
|
|
Value *Loaded =
|
|
|
|
TM->getTargetLowering()->emitLoadLinked(Builder, Addr, MemOpOrder);
|
2014-04-03 13:44:58 +02:00
|
|
|
|
|
|
|
Value *NewVal;
|
|
|
|
switch (AI->getOperation()) {
|
|
|
|
case AtomicRMWInst::Xchg:
|
|
|
|
NewVal = AI->getValOperand();
|
|
|
|
break;
|
|
|
|
case AtomicRMWInst::Add:
|
|
|
|
NewVal = Builder.CreateAdd(Loaded, AI->getValOperand(), "new");
|
|
|
|
break;
|
|
|
|
case AtomicRMWInst::Sub:
|
|
|
|
NewVal = Builder.CreateSub(Loaded, AI->getValOperand(), "new");
|
|
|
|
break;
|
|
|
|
case AtomicRMWInst::And:
|
|
|
|
NewVal = Builder.CreateAnd(Loaded, AI->getValOperand(), "new");
|
|
|
|
break;
|
|
|
|
case AtomicRMWInst::Nand:
|
|
|
|
NewVal = Builder.CreateAnd(Loaded, Builder.CreateNot(AI->getValOperand()),
|
|
|
|
"new");
|
|
|
|
break;
|
|
|
|
case AtomicRMWInst::Or:
|
|
|
|
NewVal = Builder.CreateOr(Loaded, AI->getValOperand(), "new");
|
|
|
|
break;
|
|
|
|
case AtomicRMWInst::Xor:
|
|
|
|
NewVal = Builder.CreateXor(Loaded, AI->getValOperand(), "new");
|
|
|
|
break;
|
|
|
|
case AtomicRMWInst::Max:
|
|
|
|
NewVal = Builder.CreateICmpSGT(Loaded, AI->getValOperand());
|
|
|
|
NewVal = Builder.CreateSelect(NewVal, Loaded, AI->getValOperand(), "new");
|
|
|
|
break;
|
|
|
|
case AtomicRMWInst::Min:
|
|
|
|
NewVal = Builder.CreateICmpSLE(Loaded, AI->getValOperand());
|
|
|
|
NewVal = Builder.CreateSelect(NewVal, Loaded, AI->getValOperand(), "new");
|
|
|
|
break;
|
|
|
|
case AtomicRMWInst::UMax:
|
|
|
|
NewVal = Builder.CreateICmpUGT(Loaded, AI->getValOperand());
|
|
|
|
NewVal = Builder.CreateSelect(NewVal, Loaded, AI->getValOperand(), "new");
|
|
|
|
break;
|
|
|
|
case AtomicRMWInst::UMin:
|
|
|
|
NewVal = Builder.CreateICmpULE(Loaded, AI->getValOperand());
|
|
|
|
NewVal = Builder.CreateSelect(NewVal, Loaded, AI->getValOperand(), "new");
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unknown atomic op");
|
|
|
|
}
|
|
|
|
|
2014-06-19 23:03:04 +02:00
|
|
|
Value *StoreSuccess = TM->getTargetLowering()->emitStoreConditional(
|
|
|
|
Builder, NewVal, Addr, MemOpOrder);
|
2014-04-03 13:44:58 +02:00
|
|
|
Value *TryAgain = Builder.CreateICmpNE(
|
|
|
|
StoreSuccess, ConstantInt::get(IntegerType::get(Ctx, 32), 0), "tryagain");
|
|
|
|
Builder.CreateCondBr(TryAgain, LoopBB, ExitBB);
|
|
|
|
|
|
|
|
Builder.SetInsertPoint(ExitBB, ExitBB->begin());
|
|
|
|
insertTrailingFence(Builder, Order);
|
|
|
|
|
|
|
|
AI->replaceAllUsesWith(Loaded);
|
|
|
|
AI->eraseFromParent();
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-04-17 20:22:47 +02:00
|
|
|
bool AtomicExpandLoadLinked::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
|
2014-04-03 15:06:54 +02:00
|
|
|
AtomicOrdering SuccessOrder = CI->getSuccessOrdering();
|
|
|
|
AtomicOrdering FailureOrder = CI->getFailureOrdering();
|
2014-04-03 13:44:58 +02:00
|
|
|
Value *Addr = CI->getPointerOperand();
|
|
|
|
BasicBlock *BB = CI->getParent();
|
|
|
|
Function *F = BB->getParent();
|
|
|
|
LLVMContext &Ctx = F->getContext();
|
|
|
|
|
|
|
|
// Given: cmpxchg some_op iN* %addr, iN %desired, iN %new success_ord fail_ord
|
|
|
|
//
|
2014-04-03 15:06:54 +02:00
|
|
|
// The full expansion we produce is:
|
2014-04-03 13:44:58 +02:00
|
|
|
// [...]
|
|
|
|
// fence?
|
|
|
|
// cmpxchg.start:
|
|
|
|
// %loaded = @load.linked(%addr)
|
|
|
|
// %should_store = icmp eq %loaded, %desired
|
2014-04-03 15:06:54 +02:00
|
|
|
// br i1 %should_store, label %cmpxchg.trystore,
|
2014-06-13 18:45:52 +02:00
|
|
|
// label %cmpxchg.failure
|
2014-04-03 13:44:58 +02:00
|
|
|
// cmpxchg.trystore:
|
|
|
|
// %stored = @store_conditional(%new, %addr)
|
2014-06-13 18:45:52 +02:00
|
|
|
// %success = icmp eq i32 %stored, 0
|
|
|
|
// br i1 %success, label %cmpxchg.success, label %loop/%cmpxchg.failure
|
|
|
|
// cmpxchg.success:
|
|
|
|
// fence?
|
|
|
|
// br label %cmpxchg.end
|
|
|
|
// cmpxchg.failure:
|
2014-04-03 13:44:58 +02:00
|
|
|
// fence?
|
2014-04-03 15:06:54 +02:00
|
|
|
// br label %cmpxchg.end
|
|
|
|
// cmpxchg.end:
|
2014-06-13 18:45:52 +02:00
|
|
|
// %success = phi i1 [true, %cmpxchg.success], [false, %cmpxchg.failure]
|
|
|
|
// %restmp = insertvalue { iN, i1 } undef, iN %loaded, 0
|
|
|
|
// %res = insertvalue { iN, i1 } %restmp, i1 %success, 1
|
2014-04-03 13:44:58 +02:00
|
|
|
// [...]
|
|
|
|
BasicBlock *ExitBB = BB->splitBasicBlock(CI, "cmpxchg.end");
|
2014-06-13 18:45:52 +02:00
|
|
|
auto FailureBB = BasicBlock::Create(Ctx, "cmpxchg.failure", F, ExitBB);
|
|
|
|
auto SuccessBB = BasicBlock::Create(Ctx, "cmpxchg.success", F, FailureBB);
|
|
|
|
auto TryStoreBB = BasicBlock::Create(Ctx, "cmpxchg.trystore", F, SuccessBB);
|
2014-04-03 15:06:54 +02:00
|
|
|
auto LoopBB = BasicBlock::Create(Ctx, "cmpxchg.start", F, TryStoreBB);
|
2014-04-03 13:44:58 +02:00
|
|
|
|
|
|
|
// This grabs the DebugLoc from CI
|
|
|
|
IRBuilder<> Builder(CI);
|
|
|
|
|
|
|
|
// The split call above "helpfully" added a branch at the end of BB (to the
|
|
|
|
// wrong place), but we might want a fence too. It's easiest to just remove
|
|
|
|
// the branch entirely.
|
|
|
|
std::prev(BB->end())->eraseFromParent();
|
|
|
|
Builder.SetInsertPoint(BB);
|
2014-04-03 15:06:54 +02:00
|
|
|
AtomicOrdering MemOpOrder = insertLeadingFence(Builder, SuccessOrder);
|
2014-04-03 13:44:58 +02:00
|
|
|
Builder.CreateBr(LoopBB);
|
|
|
|
|
|
|
|
// Start the main loop block now that we've taken care of the preliminaries.
|
|
|
|
Builder.SetInsertPoint(LoopBB);
|
2014-06-19 23:03:04 +02:00
|
|
|
Value *Loaded =
|
|
|
|
TM->getTargetLowering()->emitLoadLinked(Builder, Addr, MemOpOrder);
|
2014-04-03 13:44:58 +02:00
|
|
|
Value *ShouldStore =
|
|
|
|
Builder.CreateICmpEQ(Loaded, CI->getCompareOperand(), "should_store");
|
2014-04-03 15:06:54 +02:00
|
|
|
|
|
|
|
// If the the cmpxchg doesn't actually need any ordering when it fails, we can
|
|
|
|
// jump straight past that fence instruction (if it exists).
|
|
|
|
Builder.CreateCondBr(ShouldStore, TryStoreBB, FailureBB);
|
2014-04-03 13:44:58 +02:00
|
|
|
|
|
|
|
Builder.SetInsertPoint(TryStoreBB);
|
2014-06-19 23:03:04 +02:00
|
|
|
Value *StoreSuccess = TM->getTargetLowering()->emitStoreConditional(
|
2014-04-17 20:22:47 +02:00
|
|
|
Builder, CI->getNewValOperand(), Addr, MemOpOrder);
|
2014-06-13 18:45:36 +02:00
|
|
|
StoreSuccess = Builder.CreateICmpEQ(
|
2014-04-03 13:44:58 +02:00
|
|
|
StoreSuccess, ConstantInt::get(Type::getInt32Ty(Ctx), 0), "success");
|
2014-06-13 18:45:52 +02:00
|
|
|
Builder.CreateCondBr(StoreSuccess, SuccessBB,
|
|
|
|
CI->isWeak() ? FailureBB : LoopBB);
|
2014-04-03 13:44:58 +02:00
|
|
|
|
2014-05-30 12:09:59 +02:00
|
|
|
// Make sure later instructions don't get reordered with a fence if necessary.
|
2014-06-13 18:45:52 +02:00
|
|
|
Builder.SetInsertPoint(SuccessBB);
|
2014-04-03 15:06:54 +02:00
|
|
|
insertTrailingFence(Builder, SuccessOrder);
|
|
|
|
Builder.CreateBr(ExitBB);
|
2014-04-03 13:44:58 +02:00
|
|
|
|
2014-06-13 18:45:52 +02:00
|
|
|
Builder.SetInsertPoint(FailureBB);
|
|
|
|
insertTrailingFence(Builder, FailureOrder);
|
|
|
|
Builder.CreateBr(ExitBB);
|
|
|
|
|
2014-05-30 12:09:59 +02:00
|
|
|
// Finally, we have control-flow based knowledge of whether the cmpxchg
|
|
|
|
// succeeded or not. We expose this to later passes by converting any
|
|
|
|
// subsequent "icmp eq/ne %loaded, %oldval" into a use of an appropriate PHI.
|
|
|
|
|
|
|
|
// Setup the builder so we can create any PHIs we need.
|
2014-06-13 18:45:52 +02:00
|
|
|
Builder.SetInsertPoint(ExitBB, ExitBB->begin());
|
IR: add "cmpxchg weak" variant to support permitted failure.
This commit adds a weak variant of the cmpxchg operation, as described
in C++11. A cmpxchg instruction with this modifier is permitted to
fail to store, even if the comparison indicated it should.
As a result, cmpxchg instructions must return a flag indicating
success in addition to their original iN value loaded. Thus, for
uniformity *all* cmpxchg instructions now return "{ iN, i1 }". The
second flag is 1 when the store succeeded.
At the DAG level, a new ATOMIC_CMP_SWAP_WITH_SUCCESS node has been
added as the natural representation for the new cmpxchg instructions.
It is a strong cmpxchg.
By default this gets Expanded to the existing ATOMIC_CMP_SWAP during
Legalization, so existing backends should see no change in behaviour.
If they wish to deal with the enhanced node instead, they can call
setOperationAction on it. Beware: as a node with 2 results, it cannot
be selected from TableGen.
Currently, no use is made of the extra information provided in this
patch. Test updates are almost entirely adapting the input IR to the
new scheme.
Summary for out of tree users:
------------------------------
+ Legacy Bitcode files are upgraded during read.
+ Legacy assembly IR files will be invalid.
+ Front-ends must adapt to different type for "cmpxchg".
+ Backends should be unaffected by default.
llvm-svn: 210903
2014-06-13 16:24:07 +02:00
|
|
|
PHINode *Success = Builder.CreatePHI(Type::getInt1Ty(Ctx), 2);
|
|
|
|
Success->addIncoming(ConstantInt::getTrue(Ctx), SuccessBB);
|
2014-06-13 18:45:52 +02:00
|
|
|
Success->addIncoming(ConstantInt::getFalse(Ctx), FailureBB);
|
2014-05-30 12:09:59 +02:00
|
|
|
|
|
|
|
// Look for any users of the cmpxchg that are just comparing the loaded value
|
|
|
|
// against the desired one, and replace them with the CFG-derived version.
|
IR: add "cmpxchg weak" variant to support permitted failure.
This commit adds a weak variant of the cmpxchg operation, as described
in C++11. A cmpxchg instruction with this modifier is permitted to
fail to store, even if the comparison indicated it should.
As a result, cmpxchg instructions must return a flag indicating
success in addition to their original iN value loaded. Thus, for
uniformity *all* cmpxchg instructions now return "{ iN, i1 }". The
second flag is 1 when the store succeeded.
At the DAG level, a new ATOMIC_CMP_SWAP_WITH_SUCCESS node has been
added as the natural representation for the new cmpxchg instructions.
It is a strong cmpxchg.
By default this gets Expanded to the existing ATOMIC_CMP_SWAP during
Legalization, so existing backends should see no change in behaviour.
If they wish to deal with the enhanced node instead, they can call
setOperationAction on it. Beware: as a node with 2 results, it cannot
be selected from TableGen.
Currently, no use is made of the extra information provided in this
patch. Test updates are almost entirely adapting the input IR to the
new scheme.
Summary for out of tree users:
------------------------------
+ Legacy Bitcode files are upgraded during read.
+ Legacy assembly IR files will be invalid.
+ Front-ends must adapt to different type for "cmpxchg".
+ Backends should be unaffected by default.
llvm-svn: 210903
2014-06-13 16:24:07 +02:00
|
|
|
SmallVector<ExtractValueInst *, 2> PrunedInsts;
|
2014-05-30 12:09:59 +02:00
|
|
|
for (auto User : CI->users()) {
|
IR: add "cmpxchg weak" variant to support permitted failure.
This commit adds a weak variant of the cmpxchg operation, as described
in C++11. A cmpxchg instruction with this modifier is permitted to
fail to store, even if the comparison indicated it should.
As a result, cmpxchg instructions must return a flag indicating
success in addition to their original iN value loaded. Thus, for
uniformity *all* cmpxchg instructions now return "{ iN, i1 }". The
second flag is 1 when the store succeeded.
At the DAG level, a new ATOMIC_CMP_SWAP_WITH_SUCCESS node has been
added as the natural representation for the new cmpxchg instructions.
It is a strong cmpxchg.
By default this gets Expanded to the existing ATOMIC_CMP_SWAP during
Legalization, so existing backends should see no change in behaviour.
If they wish to deal with the enhanced node instead, they can call
setOperationAction on it. Beware: as a node with 2 results, it cannot
be selected from TableGen.
Currently, no use is made of the extra information provided in this
patch. Test updates are almost entirely adapting the input IR to the
new scheme.
Summary for out of tree users:
------------------------------
+ Legacy Bitcode files are upgraded during read.
+ Legacy assembly IR files will be invalid.
+ Front-ends must adapt to different type for "cmpxchg".
+ Backends should be unaffected by default.
llvm-svn: 210903
2014-06-13 16:24:07 +02:00
|
|
|
ExtractValueInst *EV = dyn_cast<ExtractValueInst>(User);
|
|
|
|
if (!EV)
|
2014-05-30 12:09:59 +02:00
|
|
|
continue;
|
|
|
|
|
IR: add "cmpxchg weak" variant to support permitted failure.
This commit adds a weak variant of the cmpxchg operation, as described
in C++11. A cmpxchg instruction with this modifier is permitted to
fail to store, even if the comparison indicated it should.
As a result, cmpxchg instructions must return a flag indicating
success in addition to their original iN value loaded. Thus, for
uniformity *all* cmpxchg instructions now return "{ iN, i1 }". The
second flag is 1 when the store succeeded.
At the DAG level, a new ATOMIC_CMP_SWAP_WITH_SUCCESS node has been
added as the natural representation for the new cmpxchg instructions.
It is a strong cmpxchg.
By default this gets Expanded to the existing ATOMIC_CMP_SWAP during
Legalization, so existing backends should see no change in behaviour.
If they wish to deal with the enhanced node instead, they can call
setOperationAction on it. Beware: as a node with 2 results, it cannot
be selected from TableGen.
Currently, no use is made of the extra information provided in this
patch. Test updates are almost entirely adapting the input IR to the
new scheme.
Summary for out of tree users:
------------------------------
+ Legacy Bitcode files are upgraded during read.
+ Legacy assembly IR files will be invalid.
+ Front-ends must adapt to different type for "cmpxchg".
+ Backends should be unaffected by default.
llvm-svn: 210903
2014-06-13 16:24:07 +02:00
|
|
|
assert(EV->getNumIndices() == 1 && EV->getIndices()[0] <= 1 &&
|
|
|
|
"weird extraction from { iN, i1 }");
|
2014-05-30 12:09:59 +02:00
|
|
|
|
IR: add "cmpxchg weak" variant to support permitted failure.
This commit adds a weak variant of the cmpxchg operation, as described
in C++11. A cmpxchg instruction with this modifier is permitted to
fail to store, even if the comparison indicated it should.
As a result, cmpxchg instructions must return a flag indicating
success in addition to their original iN value loaded. Thus, for
uniformity *all* cmpxchg instructions now return "{ iN, i1 }". The
second flag is 1 when the store succeeded.
At the DAG level, a new ATOMIC_CMP_SWAP_WITH_SUCCESS node has been
added as the natural representation for the new cmpxchg instructions.
It is a strong cmpxchg.
By default this gets Expanded to the existing ATOMIC_CMP_SWAP during
Legalization, so existing backends should see no change in behaviour.
If they wish to deal with the enhanced node instead, they can call
setOperationAction on it. Beware: as a node with 2 results, it cannot
be selected from TableGen.
Currently, no use is made of the extra information provided in this
patch. Test updates are almost entirely adapting the input IR to the
new scheme.
Summary for out of tree users:
------------------------------
+ Legacy Bitcode files are upgraded during read.
+ Legacy assembly IR files will be invalid.
+ Front-ends must adapt to different type for "cmpxchg".
+ Backends should be unaffected by default.
llvm-svn: 210903
2014-06-13 16:24:07 +02:00
|
|
|
if (EV->getIndices()[0] == 0)
|
|
|
|
EV->replaceAllUsesWith(Loaded);
|
|
|
|
else
|
|
|
|
EV->replaceAllUsesWith(Success);
|
|
|
|
|
|
|
|
PrunedInsts.push_back(EV);
|
2014-05-30 12:09:59 +02:00
|
|
|
}
|
|
|
|
|
IR: add "cmpxchg weak" variant to support permitted failure.
This commit adds a weak variant of the cmpxchg operation, as described
in C++11. A cmpxchg instruction with this modifier is permitted to
fail to store, even if the comparison indicated it should.
As a result, cmpxchg instructions must return a flag indicating
success in addition to their original iN value loaded. Thus, for
uniformity *all* cmpxchg instructions now return "{ iN, i1 }". The
second flag is 1 when the store succeeded.
At the DAG level, a new ATOMIC_CMP_SWAP_WITH_SUCCESS node has been
added as the natural representation for the new cmpxchg instructions.
It is a strong cmpxchg.
By default this gets Expanded to the existing ATOMIC_CMP_SWAP during
Legalization, so existing backends should see no change in behaviour.
If they wish to deal with the enhanced node instead, they can call
setOperationAction on it. Beware: as a node with 2 results, it cannot
be selected from TableGen.
Currently, no use is made of the extra information provided in this
patch. Test updates are almost entirely adapting the input IR to the
new scheme.
Summary for out of tree users:
------------------------------
+ Legacy Bitcode files are upgraded during read.
+ Legacy assembly IR files will be invalid.
+ Front-ends must adapt to different type for "cmpxchg".
+ Backends should be unaffected by default.
llvm-svn: 210903
2014-06-13 16:24:07 +02:00
|
|
|
// We can remove the instructions now we're no longer iterating through them.
|
|
|
|
for (auto EV : PrunedInsts)
|
|
|
|
EV->eraseFromParent();
|
2014-04-03 13:44:58 +02:00
|
|
|
|
IR: add "cmpxchg weak" variant to support permitted failure.
This commit adds a weak variant of the cmpxchg operation, as described
in C++11. A cmpxchg instruction with this modifier is permitted to
fail to store, even if the comparison indicated it should.
As a result, cmpxchg instructions must return a flag indicating
success in addition to their original iN value loaded. Thus, for
uniformity *all* cmpxchg instructions now return "{ iN, i1 }". The
second flag is 1 when the store succeeded.
At the DAG level, a new ATOMIC_CMP_SWAP_WITH_SUCCESS node has been
added as the natural representation for the new cmpxchg instructions.
It is a strong cmpxchg.
By default this gets Expanded to the existing ATOMIC_CMP_SWAP during
Legalization, so existing backends should see no change in behaviour.
If they wish to deal with the enhanced node instead, they can call
setOperationAction on it. Beware: as a node with 2 results, it cannot
be selected from TableGen.
Currently, no use is made of the extra information provided in this
patch. Test updates are almost entirely adapting the input IR to the
new scheme.
Summary for out of tree users:
------------------------------
+ Legacy Bitcode files are upgraded during read.
+ Legacy assembly IR files will be invalid.
+ Front-ends must adapt to different type for "cmpxchg".
+ Backends should be unaffected by default.
llvm-svn: 210903
2014-06-13 16:24:07 +02:00
|
|
|
if (!CI->use_empty()) {
|
|
|
|
// Some use of the full struct return that we don't understand has happened,
|
|
|
|
// so we've got to reconstruct it properly.
|
|
|
|
Value *Res;
|
|
|
|
Res = Builder.CreateInsertValue(UndefValue::get(CI->getType()), Loaded, 0);
|
|
|
|
Res = Builder.CreateInsertValue(Res, Success, 1);
|
|
|
|
|
|
|
|
CI->replaceAllUsesWith(Res);
|
|
|
|
}
|
|
|
|
|
|
|
|
CI->eraseFromParent();
|
2014-04-03 13:44:58 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-04-17 20:22:47 +02:00
|
|
|
AtomicOrdering AtomicExpandLoadLinked::insertLeadingFence(IRBuilder<> &Builder,
|
2014-04-03 13:44:58 +02:00
|
|
|
AtomicOrdering Ord) {
|
2014-06-19 23:03:04 +02:00
|
|
|
if (!TM->getTargetLowering()->getInsertFencesForAtomic())
|
2014-04-03 13:44:58 +02:00
|
|
|
return Ord;
|
|
|
|
|
|
|
|
if (Ord == Release || Ord == AcquireRelease || Ord == SequentiallyConsistent)
|
|
|
|
Builder.CreateFence(Release);
|
|
|
|
|
|
|
|
// The exclusive operations don't need any barrier if we're adding separate
|
|
|
|
// fences.
|
|
|
|
return Monotonic;
|
|
|
|
}
|
|
|
|
|
2014-04-17 20:22:47 +02:00
|
|
|
void AtomicExpandLoadLinked::insertTrailingFence(IRBuilder<> &Builder,
|
2014-04-03 13:44:58 +02:00
|
|
|
AtomicOrdering Ord) {
|
2014-06-19 23:03:04 +02:00
|
|
|
if (!TM->getTargetLowering()->getInsertFencesForAtomic())
|
2014-04-03 13:44:58 +02:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (Ord == Acquire || Ord == AcquireRelease)
|
|
|
|
Builder.CreateFence(Acquire);
|
|
|
|
else if (Ord == SequentiallyConsistent)
|
|
|
|
Builder.CreateFence(SequentiallyConsistent);
|
|
|
|
}
|