1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 19:23:23 +01:00

Update instcombine for atomic load/store.

llvm-svn: 137664
This commit is contained in:
Eli Friedman 2011-08-15 22:09:40 +00:00
parent 8fe83ecaf3
commit 36ef5fd140
4 changed files with 54 additions and 31 deletions

View File

@ -132,6 +132,7 @@ static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
LoadInst *NewLoad = LoadInst *NewLoad =
IC.Builder->CreateLoad(CastOp, LI.isVolatile(), CI->getName()); IC.Builder->CreateLoad(CastOp, LI.isVolatile(), CI->getName());
NewLoad->setAlignment(LI.getAlignment()); NewLoad->setAlignment(LI.getAlignment());
NewLoad->setAtomic(LI.getOrdering(), LI.getSynchScope());
// Now cast the result of the load. // Now cast the result of the load.
return new BitCastInst(NewLoad, LI.getType()); return new BitCastInst(NewLoad, LI.getType());
} }
@ -162,8 +163,9 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
if (Instruction *Res = InstCombineLoadCast(*this, LI, TD)) if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
return Res; return Res;
// None of the following transforms are legal for volatile loads. // None of the following transforms are legal for volatile/atomic loads.
if (LI.isVolatile()) return 0; // FIXME: Some of it is okay for atomic loads; needs refactoring.
if (!LI.isSimple()) return 0;
// Do really simple store-to-load forwarding and load CSE, to catch cases // Do really simple store-to-load forwarding and load CSE, to catch cases
// where there are several consecutive memory accesses to the same location, // where there are several consecutive memory accesses to the same location,
@ -368,21 +370,6 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
Value *Val = SI.getOperand(0); Value *Val = SI.getOperand(0);
Value *Ptr = SI.getOperand(1); Value *Ptr = SI.getOperand(1);
// If the RHS is an alloca with a single use, zapify the store, making the
// alloca dead.
if (!SI.isVolatile()) {
if (Ptr->hasOneUse()) {
if (isa<AllocaInst>(Ptr))
return EraseInstFromFunction(SI);
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
if (isa<AllocaInst>(GEP->getOperand(0))) {
if (GEP->getOperand(0)->hasOneUse())
return EraseInstFromFunction(SI);
}
}
}
}
// Attempt to improve the alignment. // Attempt to improve the alignment.
if (TD) { if (TD) {
unsigned KnownAlign = unsigned KnownAlign =
@ -398,6 +385,23 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
SI.setAlignment(EffectiveStoreAlign); SI.setAlignment(EffectiveStoreAlign);
} }
// Don't hack volatile/atomic stores.
// FIXME: Some bits are legal for atomic stores; needs refactoring.
if (!SI.isSimple()) return 0;
// If the RHS is an alloca with a single use, zapify the store, making the
// alloca dead.
if (Ptr->hasOneUse()) {
if (isa<AllocaInst>(Ptr))
return EraseInstFromFunction(SI);
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
if (isa<AllocaInst>(GEP->getOperand(0))) {
if (GEP->getOperand(0)->hasOneUse())
return EraseInstFromFunction(SI);
}
}
}
// Do really simple DSE, to catch cases where there are several consecutive // Do really simple DSE, to catch cases where there are several consecutive
// stores to the same location, separated by a few arithmetic operations. This // stores to the same location, separated by a few arithmetic operations. This
// situation often occurs with bitfield accesses. // situation often occurs with bitfield accesses.
@ -415,8 +419,8 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) { if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
// Prev store isn't volatile, and stores to the same location? // Prev store isn't volatile, and stores to the same location?
if (!PrevSI->isVolatile() &&equivalentAddressValues(PrevSI->getOperand(1), if (PrevSI->isSimple() && equivalentAddressValues(PrevSI->getOperand(1),
SI.getOperand(1))) { SI.getOperand(1))) {
++NumDeadStore; ++NumDeadStore;
++BBI; ++BBI;
EraseInstFromFunction(*PrevSI); EraseInstFromFunction(*PrevSI);
@ -430,7 +434,7 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
// then *this* store is dead (X = load P; store X -> P). // then *this* store is dead (X = load P; store X -> P).
if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) { if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr) && if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr) &&
!SI.isVolatile()) LI->isSimple())
return EraseInstFromFunction(SI); return EraseInstFromFunction(SI);
// Otherwise, this is a load from some other location. Stores before it // Otherwise, this is a load from some other location. Stores before it
@ -443,9 +447,6 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
break; break;
} }
if (SI.isVolatile()) return 0; // Don't hack volatile stores.
// store X, null -> turns into 'unreachable' in SimplifyCFG // store X, null -> turns into 'unreachable' in SimplifyCFG
if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) { if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
if (!isa<UndefValue>(Val)) { if (!isa<UndefValue>(Val)) {
@ -547,11 +548,11 @@ bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
return false; return false;
--BBI; --BBI;
} }
// If this isn't a store, isn't a store to the same location, or if the // If this isn't a store, isn't a store to the same location, or is not the
// alignments differ, bail out. // right kind of store, bail out.
OtherStore = dyn_cast<StoreInst>(BBI); OtherStore = dyn_cast<StoreInst>(BBI);
if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) || if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
OtherStore->getAlignment() != SI.getAlignment()) !SI.isSameOperationAs(OtherStore))
return false; return false;
} else { } else {
// Otherwise, the other block ended with a conditional branch. If one of the // Otherwise, the other block ended with a conditional branch. If one of the
@ -567,7 +568,7 @@ bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
// Check to see if we find the matching store. // Check to see if we find the matching store.
if ((OtherStore = dyn_cast<StoreInst>(BBI))) { if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
if (OtherStore->getOperand(1) != SI.getOperand(1) || if (OtherStore->getOperand(1) != SI.getOperand(1) ||
OtherStore->getAlignment() != SI.getAlignment()) !SI.isSameOperationAs(OtherStore))
return false; return false;
break; break;
} }
@ -601,8 +602,10 @@ bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
// insert it. // insert it.
BBI = DestBB->getFirstNonPHI(); BBI = DestBB->getFirstNonPHI();
StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1), StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1),
OtherStore->isVolatile(), SI.isVolatile(),
SI.getAlignment()); SI.getAlignment(),
SI.getOrdering(),
SI.getSynchScope());
InsertNewInstBefore(NewSI, *BBI); InsertNewInstBefore(NewSI, *BBI);
NewSI->setDebugLoc(OtherStore->getDebugLoc()); NewSI->setDebugLoc(OtherStore->getDebugLoc());

View File

@ -287,6 +287,11 @@ static bool isSafeAndProfitableToSinkLoad(LoadInst *L) {
Instruction *InstCombiner::FoldPHIArgLoadIntoPHI(PHINode &PN) { Instruction *InstCombiner::FoldPHIArgLoadIntoPHI(PHINode &PN) {
LoadInst *FirstLI = cast<LoadInst>(PN.getIncomingValue(0)); LoadInst *FirstLI = cast<LoadInst>(PN.getIncomingValue(0));
// FIXME: This is overconservative; this transform is allowed in some cases
// for atomic operations.
if (FirstLI->isAtomic())
return 0;
// When processing loads, we need to propagate two bits of information to the // When processing loads, we need to propagate two bits of information to the
// sunk load: whether it is volatile, and what its alignment is. We currently // sunk load: whether it is volatile, and what its alignment is. We currently
// don't sink loads when some have their alignment specified and some don't. // don't sink loads when some have their alignment specified and some don't.

View File

@ -1379,7 +1379,7 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
// load from a GEP. This reduces the size of the load. // load from a GEP. This reduces the size of the load.
// FIXME: If a load is used only by extractvalue instructions then this // FIXME: If a load is used only by extractvalue instructions then this
// could be done regardless of having multiple uses. // could be done regardless of having multiple uses.
if (!L->isVolatile() && L->hasOneUse()) { if (L->isSimple() && L->hasOneUse()) {
// extractvalue has integer indices, getelementptr has Value*s. Convert. // extractvalue has integer indices, getelementptr has Value*s. Convert.
SmallVector<Value*, 4> Indices; SmallVector<Value*, 4> Indices;
// Prefix an i32 0 since we need the first element. // Prefix an i32 0 since we need the first element.

View File

@ -0,0 +1,15 @@
; RUN: opt -S < %s -instcombine | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-apple-macosx10.7.0"
; Check transforms involving atomic operations
define i32* @test1(i8** %p) {
; CHECK: define i32* @test1
; CHECK: load atomic i8** %p monotonic, align 8
%c = bitcast i8** %p to i32**
%r = load atomic i32** %c monotonic, align 8
ret i32* %r
}