mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-10-30 23:42:52 +01:00
4b0d66f924
The current Intel Atom microarchitecture has a feature whereby when a function returns early then it is slightly faster to execute a sequence of NOP instructions to wait until the return address is ready, as opposed to simply stalling on the ret instruction until the return address is ready. When compiling for X86 Atom only, this patch will run a pass, called "X86PadShortFunction" which will add NOP instructions where less than four cycles elapse between function entry and return. It includes tests. This patch has been updated to address Nadav's review comments - Optimize only at >= O1 and don't do optimization if -Os is set - Stores MachineBasicBlock* instead of BBNum - Uses DenseMap instead of std::map - Fixes placement of braces Patch by Andy Zhang. llvm-svn: 171879
40 lines
806 B
LLVM
40 lines
806 B
LLVM
; RUN: llc < %s -mtriple=x86_64-apple-darwin11 -mcpu=core2 -mattr=+mmx,+sse2 | FileCheck %s
|
|
; rdar://6602459
|
|
|
|
@g_v1di = external global <1 x i64>
|
|
|
|
define void @t1() nounwind {
|
|
entry:
|
|
%call = call <1 x i64> @return_v1di() ; <<1 x i64>> [#uses=0]
|
|
store <1 x i64> %call, <1 x i64>* @g_v1di
|
|
ret void
|
|
; CHECK: t1:
|
|
; CHECK: callq
|
|
; CHECK-NEXT: movq _g_v1di
|
|
; CHECK-NEXT: movq %rax,
|
|
}
|
|
|
|
declare <1 x i64> @return_v1di()
|
|
|
|
define <1 x i64> @t2() nounwind {
|
|
ret <1 x i64> <i64 1>
|
|
; CHECK: t2:
|
|
; CHECK: movl $1
|
|
; CHECK-NEXT: ret
|
|
}
|
|
|
|
define <2 x i32> @t3() nounwind {
|
|
ret <2 x i32> <i32 1, i32 0>
|
|
; CHECK: t3:
|
|
; CHECK: movl $1
|
|
; CHECK: movd {{.*}}, %xmm0
|
|
}
|
|
|
|
define double @t4() nounwind {
|
|
ret double bitcast (<2 x i32> <i32 1, i32 0> to double)
|
|
; CHECK: t4:
|
|
; CHECK: movl $1
|
|
; CHECK: movd {{.*}}, %xmm0
|
|
}
|
|
|