diff --git a/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp index ccccc2ad02e..10b0ee40360 100644 --- a/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp +++ b/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp @@ -448,3 +448,88 @@ ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) { return ISD::SETNE; } } + +/// Test if the given instruction is in a position to be optimized +/// with a tail-call. This roughly means that it's in a block with +/// a return and there's nothing that needs to be scheduled +/// between it and the return. +/// +/// This function only tests target-independent requirements. +bool llvm::isInTailCallPosition(ImmutableCallSite CS, Attributes CalleeRetAttr, + const TargetLowering &TLI) { + const Instruction *I = CS.getInstruction(); + const BasicBlock *ExitBB = I->getParent(); + const TerminatorInst *Term = ExitBB->getTerminator(); + const ReturnInst *Ret = dyn_cast(Term); + const Function *F = ExitBB->getParent(); + + // The block must end in a return statement or unreachable. + // + // FIXME: Decline tailcall if it's not guaranteed and if the block ends in + // an unreachable, for now. The way tailcall optimization is currently + // implemented means it will add an epilogue followed by a jump. That is + // not profitable. Also, if the callee is a special function (e.g. + // longjmp on x86), it can end up causing miscompilation that has not + // been fully understood. + if (!Ret && + (!GuaranteedTailCallOpt || !isa(Term))) return false; + + // If I will have a chain, make sure no other instruction that will have a + // chain interposes between I and the return. + if (I->mayHaveSideEffects() || I->mayReadFromMemory() || + !I->isSafeToSpeculativelyExecute()) + for (BasicBlock::const_iterator BBI = prior(prior(ExitBB->end())); ; + --BBI) { + if (&*BBI == I) + break; + // Debug info intrinsics do not get in the way of tail call optimization. + if (isa(BBI)) + continue; + if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() || + !BBI->isSafeToSpeculativelyExecute()) + return false; + } + + // If the block ends with a void return or unreachable, it doesn't matter + // what the call's return type is. + if (!Ret || Ret->getNumOperands() == 0) return true; + + // If the return value is undef, it doesn't matter what the call's + // return type is. + if (isa(Ret->getOperand(0))) return true; + + // Conservatively require the attributes of the call to match those of + // the return. Ignore noalias because it doesn't affect the call sequence. + unsigned CallerRetAttr = F->getAttributes().getRetAttributes(); + if ((CalleeRetAttr ^ CallerRetAttr) & ~Attribute::NoAlias) + return false; + + // It's not safe to eliminate the sign / zero extension of the return value. + if ((CallerRetAttr & Attribute::ZExt) || (CallerRetAttr & Attribute::SExt)) + return false; + + // Otherwise, make sure the unmodified return value of I is the return value. + for (const Instruction *U = dyn_cast(Ret->getOperand(0)); ; + U = dyn_cast(U->getOperand(0))) { + if (!U) + return false; + if (!U->hasOneUse()) + return false; + if (U == I) + break; + // Check for a truly no-op truncate. + if (isa(U) && + TLI.isTruncateFree(U->getOperand(0)->getType(), U->getType())) + continue; + // Check for a truly no-op bitcast. + if (isa(U) && + (U->getOperand(0)->getType() == U->getType() || + (U->getOperand(0)->getType()->isPointerTy() && + U->getType()->isPointerTy()))) + continue; + // Otherwise it's not a true no-op. + return false; + } + + return true; +} diff --git a/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.h b/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.h index 34c8da62f7f..0d9af1a9bf3 100644 --- a/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.h +++ b/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.h @@ -24,6 +24,7 @@ #endif #include "llvm/CodeGen/ValueTypes.h" #include "llvm/CodeGen/ISDOpcodes.h" +#include "llvm/Support/CallSite.h" #include namespace llvm { @@ -166,6 +167,15 @@ ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred); /// ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred); +/// Test if the given instruction is in a position to be optimized +/// with a tail-call. This roughly means that it's in a block with +/// a return and there's nothing that needs to be scheduled +/// between it and the return. +/// +/// This function only tests target-independent requirements. +bool isInTailCallPosition(ImmutableCallSite CS, Attributes CalleeRetAttr, + const TargetLowering &TLI); + } // end namespace llvm #endif diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index fea8377a0bd..9011610d745 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -4190,92 +4190,6 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) { } } -/// Test if the given instruction is in a position to be optimized -/// with a tail-call. This roughly means that it's in a block with -/// a return and there's nothing that needs to be scheduled -/// between it and the return. -/// -/// This function only tests target-independent requirements. -static bool -isInTailCallPosition(ImmutableCallSite CS, Attributes CalleeRetAttr, - const TargetLowering &TLI) { - const Instruction *I = CS.getInstruction(); - const BasicBlock *ExitBB = I->getParent(); - const TerminatorInst *Term = ExitBB->getTerminator(); - const ReturnInst *Ret = dyn_cast(Term); - const Function *F = ExitBB->getParent(); - - // The block must end in a return statement or unreachable. - // - // FIXME: Decline tailcall if it's not guaranteed and if the block ends in - // an unreachable, for now. The way tailcall optimization is currently - // implemented means it will add an epilogue followed by a jump. That is - // not profitable. Also, if the callee is a special function (e.g. - // longjmp on x86), it can end up causing miscompilation that has not - // been fully understood. - if (!Ret && - (!GuaranteedTailCallOpt || !isa(Term))) return false; - - // If I will have a chain, make sure no other instruction that will have a - // chain interposes between I and the return. - if (I->mayHaveSideEffects() || I->mayReadFromMemory() || - !I->isSafeToSpeculativelyExecute()) - for (BasicBlock::const_iterator BBI = prior(prior(ExitBB->end())); ; - --BBI) { - if (&*BBI == I) - break; - // Debug info intrinsics do not get in the way of tail call optimization. - if (isa(BBI)) - continue; - if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() || - !BBI->isSafeToSpeculativelyExecute()) - return false; - } - - // If the block ends with a void return or unreachable, it doesn't matter - // what the call's return type is. - if (!Ret || Ret->getNumOperands() == 0) return true; - - // If the return value is undef, it doesn't matter what the call's - // return type is. - if (isa(Ret->getOperand(0))) return true; - - // Conservatively require the attributes of the call to match those of - // the return. Ignore noalias because it doesn't affect the call sequence. - unsigned CallerRetAttr = F->getAttributes().getRetAttributes(); - if ((CalleeRetAttr ^ CallerRetAttr) & ~Attribute::NoAlias) - return false; - - // It's not safe to eliminate the sign / zero extension of the return value. - if ((CallerRetAttr & Attribute::ZExt) || (CallerRetAttr & Attribute::SExt)) - return false; - - // Otherwise, make sure the unmodified return value of I is the return value. - for (const Instruction *U = dyn_cast(Ret->getOperand(0)); ; - U = dyn_cast(U->getOperand(0))) { - if (!U) - return false; - if (!U->hasOneUse()) - return false; - if (U == I) - break; - // Check for a truly no-op truncate. - if (isa(U) && - TLI.isTruncateFree(U->getOperand(0)->getType(), U->getType())) - continue; - // Check for a truly no-op bitcast. - if (isa(U) && - (U->getOperand(0)->getType() == U->getType() || - (U->getOperand(0)->getType()->isPointerTy() && - U->getType()->isPointerTy()))) - continue; - // Otherwise it's not a true no-op. - return false; - } - - return true; -} - void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee, bool isTailCall, MachineBasicBlock *LandingPad) {