diff --git a/include/llvm/Transforms/Utils/InlineCost.h b/include/llvm/Transforms/Utils/InlineCost.h index 1698a819400..415fc1e91b8 100644 --- a/include/llvm/Transforms/Utils/InlineCost.h +++ b/include/llvm/Transforms/Utils/InlineCost.h @@ -78,6 +78,9 @@ namespace llvm { /// caller. bool NeverInline; + /// usesDynamicAlloca - True if this function calls alloca (in the C sense). + bool usesDynamicAlloca; + /// NumInsts, NumBlocks - Keep track of how large each function is, which /// is used to estimate the code size cost of inlining it. unsigned NumInsts, NumBlocks; @@ -93,8 +96,8 @@ namespace llvm { /// entry here. std::vector ArgumentWeights; - FunctionInfo() : NeverInline(false), NumInsts(0), NumBlocks(0), - NumVectorInsts(0) {} + FunctionInfo() : NeverInline(false), usesDynamicAlloca(false), NumInsts(0), + NumBlocks(0), NumVectorInsts(0) {} /// analyzeFunction - Fill in the current structure with information /// gleaned from the specified function. diff --git a/lib/Transforms/Utils/InlineCost.cpp b/lib/Transforms/Utils/InlineCost.cpp index 29d4f797325..82e310b38c4 100644 --- a/lib/Transforms/Utils/InlineCost.cpp +++ b/lib/Transforms/Utils/InlineCost.cpp @@ -126,6 +126,11 @@ void InlineCostAnalyzer::FunctionInfo::analyzeFunction(Function *F) { NumInsts += 5; } + if (const AllocaInst *AI = dyn_cast(II)) { + if (!isa(AI->getArraySize())) + this->usesDynamicAlloca = true; + } + if (isa(II) || isa(II->getType())) ++NumVectorInsts; @@ -173,7 +178,7 @@ InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS, SmallPtrSet &NeverInline) { Instruction *TheCall = CS.getInstruction(); Function *Callee = CS.getCalledFunction(); - const Function *Caller = TheCall->getParent()->getParent(); + Function *Caller = TheCall->getParent()->getParent(); // Don't inline a directly recursive call. if (Caller == Callee || @@ -219,11 +224,24 @@ InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS, // If we haven't calculated this information yet, do so now. if (CalleeFI.NumBlocks == 0) CalleeFI.analyzeFunction(Callee); - + // If we should never inline this, return a huge cost. if (CalleeFI.NeverInline) return InlineCost::getNever(); + // Get infomation about the caller... + FunctionInfo &CallerFI = CachedFunctionInfo[Caller]; + + // If we haven't calculated this information yet, do so now. + if (CallerFI.NumBlocks == 0) + CallerFI.analyzeFunction(Caller); + + // Don't inline a callee with dynamic alloca into a caller without them. + // Functions containing dynamic alloca's are inefficient in various ways; + // don't create more inefficiency. + if (CalleeFI.usesDynamicAlloca && !CallerFI.usesDynamicAlloca) + return InlineCost::getNever(); + // FIXME: It would be nice to kill off CalleeFI.NeverInline. Then we // could move this up and avoid computing the FunctionInfo for // things we are going to just return always inline for. This diff --git a/test/Transforms/Inline/2009-01-08-NoInlineDynamicAlloca.ll b/test/Transforms/Inline/2009-01-08-NoInlineDynamicAlloca.ll new file mode 100644 index 00000000000..14840bac676 --- /dev/null +++ b/test/Transforms/Inline/2009-01-08-NoInlineDynamicAlloca.ll @@ -0,0 +1,36 @@ +; RUN: llvm-as < %s | opt -inline | llvm-dis | grep call +; Do not inline calls to variable-sized alloca. + +@q = common global i8* null ; [#uses=1] + +define i8* @a(i32 %i) nounwind { +entry: + %i_addr = alloca i32 ; [#uses=2] + %retval = alloca i8* ; [#uses=1] + %p = alloca i8* ; [#uses=2] + %"alloca point" = bitcast i32 0 to i32 ; [#uses=0] + store i32 %i, i32* %i_addr + %0 = load i32* %i_addr, align 4 ; [#uses=1] + %1 = alloca i8, i32 %0 ; [#uses=1] + store i8* %1, i8** %p, align 4 + %2 = load i8** %p, align 4 ; [#uses=1] + store i8* %2, i8** @q, align 4 + br label %return + +return: ; preds = %entry + %retval1 = load i8** %retval ; [#uses=1] + ret i8* %retval1 +} + +define void @b(i32 %i) nounwind { +entry: + %i_addr = alloca i32 ; [#uses=2] + %"alloca point" = bitcast i32 0 to i32 ; [#uses=0] + store i32 %i, i32* %i_addr + %0 = load i32* %i_addr, align 4 ; [#uses=1] + %1 = call i8* @a(i32 %0) nounwind ; [#uses=0] + br label %return + +return: ; preds = %entry + ret void +} diff --git a/test/Transforms/Inline/dynamic_alloca_test.ll b/test/Transforms/Inline/dynamic_alloca_test.ll index 87707120e68..b8ff7dedc49 100644 --- a/test/Transforms/Inline/dynamic_alloca_test.ll +++ b/test/Transforms/Inline/dynamic_alloca_test.ll @@ -1,5 +1,7 @@ ; Test that functions with dynamic allocas get inlined in a case where ; naively inlining it would result in a miscompilation. +; Functions with dynamic allocas can only be inlined into functions that +; already have dynamic allocas. ; RUN: llvm-as < %s | opt -inline | llvm-dis | \ ; RUN: grep llvm.stacksave @@ -16,6 +18,8 @@ define internal void @callee(i32 %N) { define void @foo(i32 %N) { ;