1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-24 19:52:54 +01:00

Allow more tailcall optimization: calls with inputs that are all passed in registers.

llvm-svn: 94873
This commit is contained in:
Evan Cheng 2010-01-30 01:22:00 +00:00
parent 5acba193ef
commit 40ae22e14d
3 changed files with 56 additions and 12 deletions

View File

@ -2258,9 +2258,18 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
return true;
// Look for obvious safe cases to perform tail call optimization.
// For now, only consider callees which take no arguments.
if (!Outs.empty())
return false;
// If the callee takes no arguments then go on to check the results of the
// call.
if (!Outs.empty()) {
// Check if stack adjustment is needed. For now, do not do this if any
// argument is passed on the stack.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CalleeCC, isVarArg, getTargetMachine(),
ArgLocs, *DAG.getContext());
CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CalleeCC));
if (CCInfo.getNextStackOffset())
return false;
}
// If the caller does not return a value, then this is obviously safe.
// This is one case where it's safe to perform this optimization even

View File

@ -5,7 +5,7 @@ define void @bar(i32 %b, i32 %a) nounwind optsize ssp {
entry:
; CHECK: leal 15(%rsi), %edi
; CHECK-NOT: movl
; CHECK: callq _foo
; CHECK: jmp _foo
%0 = add i32 %a, 15 ; <i32> [#uses=1]
%1 = zext i32 %0 to i64 ; <i64> [#uses=1]
tail call void @foo(i64 %1) nounwind

View File

@ -1,10 +1,13 @@
; RUN: llc < %s -march=x86 -asm-verbose=false | FileCheck %s
; RUN: llc < %s -march=x86-64 -asm-verbose=false | FileCheck %s
; RUN: llc < %s -march=x86 -asm-verbose=false | FileCheck %s -check-prefix=32
; RUN: llc < %s -march=x86-64 -asm-verbose=false | FileCheck %s -check-prefix=64
define void @t1(i32 %x) nounwind ssp {
entry:
; CHECK: t1:
; CHECK: jmp {{_?}}foo
; 32: t1:
; 32: jmp {{_?}}foo
; 64: t1:
; 64: jmp {{_?}}foo
tail call void @foo() nounwind
ret void
}
@ -13,8 +16,11 @@ declare void @foo()
define void @t2() nounwind ssp {
entry:
; CHECK: t2:
; CHECK: jmp {{_?}}foo2
; 32: t2:
; 32: jmp {{_?}}foo2
; 64: t2:
; 64: jmp {{_?}}foo2
%0 = tail call i32 @foo2() nounwind
ret void
}
@ -23,10 +29,39 @@ declare i32 @foo2()
define void @t3() nounwind ssp {
entry:
; CHECK: t3:
; CHECK: jmp {{_?}}foo3
; 32: t3:
; 32: jmp {{_?}}foo3
; 64: t3:
; 64: jmp {{_?}}foo3
%0 = tail call i32 @foo3() nounwind
ret void
}
declare i32 @foo3()
define void @t4(void (i32)* nocapture %x) nounwind ssp {
entry:
; 32: t4:
; 32: call *
; FIXME: gcc can generate a tailcall for this. But it's tricky.
; 64: t4:
; 64-NOT: call
; 64: jmpq *
tail call void %x(i32 0) nounwind
ret void
}
define void @t5(void ()* nocapture %x) nounwind ssp {
entry:
; 32: t5:
; 32-NOT: call
; 32: jmpl *
; 64: t5:
; 64-NOT: call
; 64: jmpq *
tail call void %x() nounwind
ret void
}