2010-02-19 19:17:13 +01:00
|
|
|
; RUN: llc < %s -relocation-model=static -realign-stack=1 -mcpu=yonah | FileCheck %s
|
Infer alignment of loads and increase their alignment when we can tell they are
from the stack. This allows us to compile stack-align.ll to:
_test:
movsd LCPI1_0, %xmm0
movapd %xmm0, %xmm1
*** andpd 4(%esp), %xmm1
andpd _G, %xmm0
addsd %xmm1, %xmm0
movl 20(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test:
movsd LCPI1_0, %xmm0
** movsd 4(%esp), %xmm1
** andpd %xmm0, %xmm1
andpd _G, %xmm0
addsd %xmm1, %xmm0
movl 20(%esp), %eax
movsd %xmm0, (%eax)
ret
llvm-svn: 46401
2008-01-26 20:45:50 +01:00
|
|
|
|
|
|
|
; The double argument is at 4(esp) which is 16-byte aligned, allowing us to
|
|
|
|
; fold the load into the andpd.
|
|
|
|
|
|
|
|
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
|
|
|
|
target triple = "i686-apple-darwin8"
|
|
|
|
@G = external global double
|
|
|
|
|
|
|
|
define void @test({ double, double }* byval %z, double* %P) {
|
|
|
|
entry:
|
2010-06-22 00:17:20 +02:00
|
|
|
%tmp3 = load double* @G, align 16 ; <double> [#uses=1]
|
|
|
|
%tmp4 = tail call double @fabs( double %tmp3 ) ; <double> [#uses=1]
|
|
|
|
volatile store double %tmp4, double* %P
|
Infer alignment of loads and increase their alignment when we can tell they are
from the stack. This allows us to compile stack-align.ll to:
_test:
movsd LCPI1_0, %xmm0
movapd %xmm0, %xmm1
*** andpd 4(%esp), %xmm1
andpd _G, %xmm0
addsd %xmm1, %xmm0
movl 20(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test:
movsd LCPI1_0, %xmm0
** movsd 4(%esp), %xmm1
** andpd %xmm0, %xmm1
andpd _G, %xmm0
addsd %xmm1, %xmm0
movl 20(%esp), %eax
movsd %xmm0, (%eax)
ret
llvm-svn: 46401
2008-01-26 20:45:50 +01:00
|
|
|
%tmp = getelementptr { double, double }* %z, i32 0, i32 0 ; <double*> [#uses=1]
|
2010-06-22 00:17:20 +02:00
|
|
|
%tmp1 = volatile load double* %tmp, align 8 ; <double> [#uses=1]
|
Infer alignment of loads and increase their alignment when we can tell they are
from the stack. This allows us to compile stack-align.ll to:
_test:
movsd LCPI1_0, %xmm0
movapd %xmm0, %xmm1
*** andpd 4(%esp), %xmm1
andpd _G, %xmm0
addsd %xmm1, %xmm0
movl 20(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test:
movsd LCPI1_0, %xmm0
** movsd 4(%esp), %xmm1
** andpd %xmm0, %xmm1
andpd _G, %xmm0
addsd %xmm1, %xmm0
movl 20(%esp), %eax
movsd %xmm0, (%eax)
ret
llvm-svn: 46401
2008-01-26 20:45:50 +01:00
|
|
|
%tmp2 = tail call double @fabs( double %tmp1 ) ; <double> [#uses=1]
|
2010-02-19 19:17:13 +01:00
|
|
|
; CHECK: andpd{{.*}}4(%esp), %xmm
|
2009-06-05 00:49:04 +02:00
|
|
|
%tmp6 = fadd double %tmp4, %tmp2 ; <double> [#uses=1]
|
2010-06-22 00:17:20 +02:00
|
|
|
volatile store double %tmp6, double* %P, align 8
|
Infer alignment of loads and increase their alignment when we can tell they are
from the stack. This allows us to compile stack-align.ll to:
_test:
movsd LCPI1_0, %xmm0
movapd %xmm0, %xmm1
*** andpd 4(%esp), %xmm1
andpd _G, %xmm0
addsd %xmm1, %xmm0
movl 20(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test:
movsd LCPI1_0, %xmm0
** movsd 4(%esp), %xmm1
** andpd %xmm0, %xmm1
andpd _G, %xmm0
addsd %xmm1, %xmm0
movl 20(%esp), %eax
movsd %xmm0, (%eax)
ret
llvm-svn: 46401
2008-01-26 20:45:50 +01:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2010-02-19 19:17:13 +01:00
|
|
|
define void @test2() alignstack(16) {
|
|
|
|
entry:
|
|
|
|
; CHECK: andl{{.*}}$-16, %esp
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; Use a call to force a spill.
|
|
|
|
define <2 x double> @test3(<2 x double> %x, <2 x double> %y) alignstack(32) {
|
|
|
|
entry:
|
|
|
|
; CHECK: andl{{.*}}$-32, %esp
|
|
|
|
call void @test2()
|
2010-05-04 00:36:46 +02:00
|
|
|
%A = fmul <2 x double> %x, %y
|
2010-02-19 19:17:13 +01:00
|
|
|
ret <2 x double> %A
|
|
|
|
}
|
|
|
|
|
Infer alignment of loads and increase their alignment when we can tell they are
from the stack. This allows us to compile stack-align.ll to:
_test:
movsd LCPI1_0, %xmm0
movapd %xmm0, %xmm1
*** andpd 4(%esp), %xmm1
andpd _G, %xmm0
addsd %xmm1, %xmm0
movl 20(%esp), %eax
movsd %xmm0, (%eax)
ret
instead of:
_test:
movsd LCPI1_0, %xmm0
** movsd 4(%esp), %xmm1
** andpd %xmm0, %xmm1
andpd _G, %xmm0
addsd %xmm1, %xmm0
movl 20(%esp), %eax
movsd %xmm0, (%eax)
ret
llvm-svn: 46401
2008-01-26 20:45:50 +01:00
|
|
|
declare double @fabs(double)
|
2010-02-19 19:17:13 +01:00
|
|
|
|