1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 11:13:28 +01:00

Teach all of the loads, stores, memsets and memcpys created by the

rewriter in SROA to carry a proper alignment. This involves
interrogating various sources of alignment, etc. This is a more complete
and principled fix to PR13920 as well as related bugs pointed out by Eli
in review and by inspection in the area.

Also by inspection fix the integer and vector promotion paths to create
aligned loads and stores. I still need to work up test cases for
these... Sorry for the delay, they were found purely by inspection.

llvm-svn: 164689
This commit is contained in:
Chandler Carruth 2012-09-26 10:27:46 +00:00
parent 8d729fba5a
commit 0254cf6d85
3 changed files with 95 additions and 47 deletions

View File

@ -1896,7 +1896,8 @@ private:
Value *extractInteger(IRBuilder<> &IRB, IntegerType *TargetTy,
uint64_t Offset) {
assert(IntPromotionTy && "Alloca is not an integer we can extract from");
Value *V = IRB.CreateLoad(&NewAI, getName(".load"));
Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
getName(".load"));
assert(Offset >= NewAllocaBeginOffset && "Out of bounds offset");
uint64_t RelOffset = Offset - NewAllocaBeginOffset;
if (RelOffset)
@ -1912,7 +1913,7 @@ private:
StoreInst *insertInteger(IRBuilder<> &IRB, Value *V, uint64_t Offset) {
IntegerType *Ty = cast<IntegerType>(V->getType());
if (Ty == IntPromotionTy)
return IRB.CreateStore(V, &NewAI);
return IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment());
assert(Ty->getBitWidth() < IntPromotionTy->getBitWidth() &&
"Cannot insert a larger integer!");
@ -1924,10 +1925,12 @@ private:
APInt Mask = ~Ty->getMask().zext(IntPromotionTy->getBitWidth())
.shl(RelOffset*8);
Value *Old = IRB.CreateAnd(IRB.CreateLoad(&NewAI, getName(".oldload")),
Value *Old = IRB.CreateAnd(IRB.CreateAlignedLoad(&NewAI,
NewAI.getAlignment(),
getName(".oldload")),
Mask, getName(".mask"));
return IRB.CreateStore(IRB.CreateOr(Old, V, getName(".insert")),
&NewAI);
return IRB.CreateAlignedStore(IRB.CreateOr(Old, V, getName(".insert")),
&NewAI, NewAI.getAlignment());
}
void deleteIfTriviallyDead(Value *V) {
@ -1949,12 +1952,12 @@ private:
Value *Result;
if (LI.getType() == VecTy->getElementType() ||
BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset) {
Result
= IRB.CreateExtractElement(IRB.CreateLoad(&NewAI, getName(".load")),
getIndex(IRB, BeginOffset),
getName(".extract"));
Result = IRB.CreateExtractElement(
IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), getName(".load")),
getIndex(IRB, BeginOffset), getName(".extract"));
} else {
Result = IRB.CreateLoad(&NewAI, getName(".load"));
Result = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
getName(".load"));
}
if (Result->getType() != LI.getType())
Result = getValueCast(IRB, Result, LI.getType());
@ -2002,13 +2005,14 @@ private:
BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset) {
if (V->getType() != ElementTy)
V = getValueCast(IRB, V, ElementTy);
V = IRB.CreateInsertElement(IRB.CreateLoad(&NewAI, getName(".load")), V,
getIndex(IRB, BeginOffset),
LoadInst *LI = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
getName(".load"));
V = IRB.CreateInsertElement(LI, V, getIndex(IRB, BeginOffset),
getName(".insert"));
} else if (V->getType() != VecTy) {
V = getValueCast(IRB, V, VecTy);
}
StoreInst *Store = IRB.CreateStore(V, &NewAI);
StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment());
Pass.DeadInsts.push_back(&SI);
(void)Store;
@ -2073,11 +2077,15 @@ private:
!TD.isLegalInteger(TD.getTypeSizeInBits(ScalarTy)))) {
Type *SizeTy = II.getLength()->getType();
Constant *Size = ConstantInt::get(SizeTy, EndOffset - BeginOffset);
unsigned Align = 1;
if (NewAI.getAlignment())
Align = MinAlign(NewAI.getAlignment(),
BeginOffset - NewAllocaBeginOffset);
CallInst *New
= IRB.CreateMemSet(getAdjustedAllocaPtr(IRB,
II.getRawDest()->getType()),
II.getValue(), Size, II.getAlignment(),
II.getValue(), Size, Align,
II.isVolatile());
(void)New;
DEBUG(dbgs() << " to: " << *New << "\n");
@ -2115,11 +2123,13 @@ private:
// If this is an element-wide memset of a vectorizable alloca, insert it.
if (VecTy && (BeginOffset > NewAllocaBeginOffset ||
EndOffset < NewAllocaEndOffset)) {
StoreInst *Store = IRB.CreateStore(
IRB.CreateInsertElement(IRB.CreateLoad(&NewAI, getName(".load")), V,
getIndex(IRB, BeginOffset),
StoreInst *Store = IRB.CreateAlignedStore(
IRB.CreateInsertElement(IRB.CreateAlignedLoad(&NewAI,
NewAI.getAlignment(),
getName(".load")),
V, getIndex(IRB, BeginOffset),
getName(".insert")),
&NewAI);
&NewAI, NewAI.getAlignment());
(void)Store;
DEBUG(dbgs() << " to: " << *Store << "\n");
return true;
@ -2137,7 +2147,8 @@ private:
assert(V->getType() == VecTy);
}
Value *New = IRB.CreateStore(V, &NewAI, II.isVolatile());
Value *New = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(),
II.isVolatile());
(void)New;
DEBUG(dbgs() << " to: " << *New << "\n");
return !II.isVolatile();
@ -2227,6 +2238,11 @@ private:
OtherPtr = getAdjustedPtr(IRB, TD, OtherPtr, RelOffset, OtherPtrTy,
getName("." + OtherPtr->getName()));
unsigned Align = II.getAlignment();
if (Align > 1)
Align = MinAlign(RelOffset.zextOrTrunc(64).getZExtValue(),
MinAlign(II.getAlignment(), NewAI.getAlignment()));
// Strip all inbounds GEPs and pointer casts to try to dig out any root
// alloca that should be re-examined after rewriting this instruction.
if (AllocaInst *AI
@ -2242,8 +2258,7 @@ private:
CallInst *New = IRB.CreateMemCpy(IsDest ? OurPtr : OtherPtr,
IsDest ? OtherPtr : OurPtr,
Size, II.getAlignment(),
II.isVolatile());
Size, Align, II.isVolatile());
(void)New;
DEBUG(dbgs() << " to: " << *New << "\n");
return false;
@ -2257,24 +2272,26 @@ private:
Value *Src;
if (IsVectorElement && !IsDest) {
// We have to extract rather than load.
Src = IRB.CreateExtractElement(IRB.CreateLoad(SrcPtr,
getName(".copyload")),
getIndex(IRB, BeginOffset),
getName(".copyextract"));
Src = IRB.CreateExtractElement(
IRB.CreateAlignedLoad(SrcPtr, Align, getName(".copyload")),
getIndex(IRB, BeginOffset),
getName(".copyextract"));
} else {
Src = IRB.CreateLoad(SrcPtr, II.isVolatile(), getName(".copyload"));
Src = IRB.CreateAlignedLoad(SrcPtr, Align, II.isVolatile(),
getName(".copyload"));
}
if (IsVectorElement && IsDest) {
// We have to insert into a loaded copy before storing.
Src = IRB.CreateInsertElement(IRB.CreateLoad(&NewAI, getName(".load")),
Src, getIndex(IRB, BeginOffset),
getName(".insert"));
Src = IRB.CreateInsertElement(
IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), getName(".load")),
Src, getIndex(IRB, BeginOffset),
getName(".insert"));
}
StoreInst *Store = cast<StoreInst>(IRB.CreateStore(Src, DstPtr,
II.isVolatile()));
Store->setAlignment(II.getAlignment());
StoreInst *Store = cast<StoreInst>(
IRB.CreateAlignedStore(Src, DstPtr, Align, II.isVolatile()));
(void)Store;
DEBUG(dbgs() << " to: " << *Store << "\n");
return !II.isVolatile();
}

View File

@ -0,0 +1,46 @@
; RUN: opt < %s -sroa -S | FileCheck %s
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64"
declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1)
define void @test1({ i8, i8 }* %a, { i8, i8 }* %b) {
; CHECK: @test1
; CHECK: %[[gep_a0:.*]] = getelementptr inbounds { i8, i8 }* %a, i64 0, i32 0
; CHECK: %[[a0:.*]] = load i8* %[[gep_a0]], align 16
; CHECK: %[[gep_a1:.*]] = getelementptr inbounds { i8, i8 }* %a, i64 0, i32 1
; CHECK: %[[a1:.*]] = load i8* %[[gep_a1]], align 1
; CHECK: %[[gep_b0:.*]] = getelementptr inbounds { i8, i8 }* %b, i64 0, i32 0
; CHECK: store i8 %[[a0]], i8* %[[gep_b0]], align 16
; CHECK: %[[gep_b1:.*]] = getelementptr inbounds { i8, i8 }* %b, i64 0, i32 1
; CHECK: store i8 %[[a1]], i8* %[[gep_b1]], align 1
; CHECK: ret void
entry:
%alloca = alloca { i8, i8 }, align 16
%gep_a = getelementptr { i8, i8 }* %a, i32 0, i32 0
%gep_alloca = getelementptr { i8, i8 }* %alloca, i32 0, i32 0
%gep_b = getelementptr { i8, i8 }* %b, i32 0, i32 0
store i8 420, i8* %gep_alloca, align 16
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %gep_alloca, i8* %gep_a, i32 2, i32 16, i1 false)
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %gep_b, i8* %gep_alloca, i32 2, i32 16, i1 false)
ret void
}
define void @PR13920(<2 x i64>* %a, i16* %b) {
; Test that alignments on memcpy intrinsics get propagated to loads and stores.
; CHECK: @PR13920
; CHECK: load <2 x i64>* %a, align 2
; CHECK: store <2 x i64> {{.*}}, <2 x i64>* {{.*}}, align 2
; CHECK: ret void
entry:
%aa = alloca <2 x i64>, align 16
%aptr = bitcast <2 x i64>* %a to i8*
%aaptr = bitcast <2 x i64>* %aa to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %aaptr, i8* %aptr, i32 16, i32 2, i1 false)
%bptr = bitcast i16* %b to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %bptr, i8* %aaptr, i32 16, i32 2, i1 false)
ret void
}

View File

@ -897,18 +897,3 @@ if.end:
%tmp2 = load i8* %gep
ret void
}
define void @test23(<2 x i64> %a, i16* %b) {
; CHECK: @test23
; CHECK: store {{.*}}, align 2
; CHECK: ret void
; PR13920
entry:
%a.addr = alloca <2 x i64>, align 16
store <2 x i64> %a, <2 x i64>* %a.addr, align 16
%0 = bitcast i16* %b to i8*
%1 = bitcast <2 x i64>* %a.addr to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %0, i8* %1, i32 16, i32 2, i1 false)
ret void
}