1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-19 02:52:53 +02:00

Make instcombine set explicit alignments on load or store

instructions with alignment 0, so that subsequent passes don't
need to bother checking the TargetData ABI size manually.

llvm-svn: 110128
This commit is contained in:
Dan Gohman 2010-08-03 18:20:32 +00:00
parent 7186be986b
commit a80f89dbc7
3 changed files with 45 additions and 20 deletions

View File

@ -146,10 +146,14 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
if (TD) {
unsigned KnownAlign =
GetOrEnforceKnownAlignment(Op, TD->getPrefTypeAlignment(LI.getType()));
if (KnownAlign >
(LI.getAlignment() == 0 ? TD->getABITypeAlignment(LI.getType()) :
LI.getAlignment()))
unsigned LoadAlign = LI.getAlignment();
unsigned EffectiveLoadAlign = LoadAlign != 0 ? LoadAlign :
TD->getABITypeAlignment(LI.getType());
if (KnownAlign > EffectiveLoadAlign)
LI.setAlignment(KnownAlign);
else if (LoadAlign == 0)
LI.setAlignment(EffectiveLoadAlign);
}
// load (cast X) --> cast (load X) iff safe.
@ -411,10 +415,14 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
if (TD) {
unsigned KnownAlign =
GetOrEnforceKnownAlignment(Ptr, TD->getPrefTypeAlignment(Val->getType()));
if (KnownAlign >
(SI.getAlignment() == 0 ? TD->getABITypeAlignment(Val->getType()) :
SI.getAlignment()))
unsigned StoreAlign = SI.getAlignment();
unsigned EffectiveStoreAlign = StoreAlign != 0 ? StoreAlign :
TD->getABITypeAlignment(Val->getType());
if (KnownAlign > EffectiveStoreAlign)
SI.setAlignment(KnownAlign);
else if (StoreAlign == 0)
SI.setAlignment(EffectiveStoreAlign);
}
// Do really simple DSE, to catch cases where there are several consecutive

View File

@ -1,10 +1,13 @@
; RUN: opt < %s -instcombine -S | grep {align 16} | count 1
; RUN: opt < %s -instcombine -S | FileCheck %s
target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
; Instcombine should be able to prove vector alignment in the
; presence of a few mild address computation tricks.
define void @foo(i8* %b, i64 %n, i64 %u, i64 %y) nounwind {
; CHECK: @test0(
; CHECK: align 16
define void @test0(i8* %b, i64 %n, i64 %u, i64 %y) nounwind {
entry:
%c = ptrtoint i8* %b to i64
%d = and i64 %c, -16
@ -29,3 +32,29 @@ return:
ret void
}
; When we see a unaligned load from an insufficiently aligned global or
; alloca, increase the alignment of the load, turning it into an aligned load.
; CHECK: @test1(
; CHECK: tmp = load
; CHECK: GLOBAL{{.*}}align 16
@GLOBAL = internal global [4 x i32] zeroinitializer
define <16 x i8> @test1(<2 x i64> %x) {
entry:
%tmp = load <16 x i8>* bitcast ([4 x i32]* @GLOBAL to <16 x i8>*), align 1
ret <16 x i8> %tmp
}
; When a load or store lacks an explicit alignment, add one.
; CHECK: @test2(
; CHECK: load double* %p, align 8
; CHECK: store double %n, double* %p, align 8
define double @test2(double* %p, double %n) nounwind {
%t = load double* %p
store double %n, double* %p
ret double %t
}

View File

@ -1,12 +0,0 @@
; RUN: opt < %s -instcombine -S | grep {GLOBAL.*align 16}
; RUN: opt < %s -instcombine -S | grep {tmp = load}
target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
@GLOBAL = internal global [4 x i32] zeroinitializer
define <16 x i8> @foo(<2 x i64> %x) {
entry:
%tmp = load <16 x i8>* bitcast ([4 x i32]* @GLOBAL to <16 x i8>*), align 1
ret <16 x i8> %tmp
}