diff --git a/include/llvm-c/Core.h b/include/llvm-c/Core.h index 74c170928f0..353cab20b2c 100644 --- a/include/llvm-c/Core.h +++ b/include/llvm-c/Core.h @@ -457,7 +457,6 @@ void LLVMDisposeTypeHandle(LLVMTypeHandleRef TypeHandle); macro(UnaryInstruction) \ macro(AllocationInst) \ macro(AllocaInst) \ - macro(MallocInst) \ macro(CastInst) \ macro(BitCastInst) \ macro(FPExtInst) \ diff --git a/include/llvm/InstrTypes.h b/include/llvm/InstrTypes.h index cc923dec298..45d366f1ef2 100644 --- a/include/llvm/InstrTypes.h +++ b/include/llvm/InstrTypes.h @@ -116,8 +116,7 @@ public: // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const UnaryInstruction *) { return true; } static inline bool classof(const Instruction *I) { - return I->getOpcode() == Instruction::Malloc || - I->getOpcode() == Instruction::Alloca || + return I->getOpcode() == Instruction::Alloca || I->getOpcode() == Instruction::Free || I->getOpcode() == Instruction::Load || I->getOpcode() == Instruction::VAArg || diff --git a/include/llvm/Instruction.def b/include/llvm/Instruction.def index e603c1257ef..5c8fe3eaccd 100644 --- a/include/llvm/Instruction.def +++ b/include/llvm/Instruction.def @@ -128,49 +128,48 @@ HANDLE_BINARY_INST(24, Xor , BinaryOperator) // Memory operators... FIRST_MEMORY_INST(25) -HANDLE_MEMORY_INST(25, Malloc, MallocInst) // Heap management instructions -HANDLE_MEMORY_INST(26, Free , FreeInst ) -HANDLE_MEMORY_INST(27, Alloca, AllocaInst) // Stack management -HANDLE_MEMORY_INST(28, Load , LoadInst ) // Memory manipulation instrs -HANDLE_MEMORY_INST(29, Store , StoreInst ) -HANDLE_MEMORY_INST(30, GetElementPtr, GetElementPtrInst) - LAST_MEMORY_INST(30) +HANDLE_MEMORY_INST(25, Free , FreeInst ) // Heap management instructions +HANDLE_MEMORY_INST(26, Alloca, AllocaInst) // Stack management +HANDLE_MEMORY_INST(27, Load , LoadInst ) // Memory manipulation instrs +HANDLE_MEMORY_INST(28, Store , StoreInst ) +HANDLE_MEMORY_INST(29, GetElementPtr, GetElementPtrInst) + LAST_MEMORY_INST(29) // Cast operators ... // NOTE: The order matters here because CastInst::isEliminableCastPair // NOTE: (see Instructions.cpp) encodes a table based on this ordering. - FIRST_CAST_INST(31) -HANDLE_CAST_INST(31, Trunc , TruncInst ) // Truncate integers -HANDLE_CAST_INST(32, ZExt , ZExtInst ) // Zero extend integers -HANDLE_CAST_INST(33, SExt , SExtInst ) // Sign extend integers -HANDLE_CAST_INST(34, FPToUI , FPToUIInst ) // floating point -> UInt -HANDLE_CAST_INST(35, FPToSI , FPToSIInst ) // floating point -> SInt -HANDLE_CAST_INST(36, UIToFP , UIToFPInst ) // UInt -> floating point -HANDLE_CAST_INST(37, SIToFP , SIToFPInst ) // SInt -> floating point -HANDLE_CAST_INST(38, FPTrunc , FPTruncInst ) // Truncate floating point -HANDLE_CAST_INST(39, FPExt , FPExtInst ) // Extend floating point -HANDLE_CAST_INST(40, PtrToInt, PtrToIntInst) // Pointer -> Integer -HANDLE_CAST_INST(41, IntToPtr, IntToPtrInst) // Integer -> Pointer -HANDLE_CAST_INST(42, BitCast , BitCastInst ) // Type cast - LAST_CAST_INST(42) + FIRST_CAST_INST(30) +HANDLE_CAST_INST(30, Trunc , TruncInst ) // Truncate integers +HANDLE_CAST_INST(31, ZExt , ZExtInst ) // Zero extend integers +HANDLE_CAST_INST(32, SExt , SExtInst ) // Sign extend integers +HANDLE_CAST_INST(33, FPToUI , FPToUIInst ) // floating point -> UInt +HANDLE_CAST_INST(34, FPToSI , FPToSIInst ) // floating point -> SInt +HANDLE_CAST_INST(35, UIToFP , UIToFPInst ) // UInt -> floating point +HANDLE_CAST_INST(36, SIToFP , SIToFPInst ) // SInt -> floating point +HANDLE_CAST_INST(37, FPTrunc , FPTruncInst ) // Truncate floating point +HANDLE_CAST_INST(38, FPExt , FPExtInst ) // Extend floating point +HANDLE_CAST_INST(39, PtrToInt, PtrToIntInst) // Pointer -> Integer +HANDLE_CAST_INST(40, IntToPtr, IntToPtrInst) // Integer -> Pointer +HANDLE_CAST_INST(41, BitCast , BitCastInst ) // Type cast + LAST_CAST_INST(41) // Other operators... - FIRST_OTHER_INST(43) -HANDLE_OTHER_INST(43, ICmp , ICmpInst ) // Integer comparison instruction -HANDLE_OTHER_INST(44, FCmp , FCmpInst ) // Floating point comparison instr. -HANDLE_OTHER_INST(45, PHI , PHINode ) // PHI node instruction -HANDLE_OTHER_INST(46, Call , CallInst ) // Call a function -HANDLE_OTHER_INST(47, Select , SelectInst ) // select instruction -HANDLE_OTHER_INST(48, UserOp1, Instruction) // May be used internally in a pass -HANDLE_OTHER_INST(49, UserOp2, Instruction) // Internal to passes only -HANDLE_OTHER_INST(50, VAArg , VAArgInst ) // vaarg instruction -HANDLE_OTHER_INST(51, ExtractElement, ExtractElementInst)// extract from vector -HANDLE_OTHER_INST(52, InsertElement, InsertElementInst) // insert into vector -HANDLE_OTHER_INST(53, ShuffleVector, ShuffleVectorInst) // shuffle two vectors. -HANDLE_OTHER_INST(54, ExtractValue, ExtractValueInst)// extract from aggregate -HANDLE_OTHER_INST(55, InsertValue, InsertValueInst) // insert into aggregate + FIRST_OTHER_INST(42) +HANDLE_OTHER_INST(42, ICmp , ICmpInst ) // Integer comparison instruction +HANDLE_OTHER_INST(43, FCmp , FCmpInst ) // Floating point comparison instr. +HANDLE_OTHER_INST(44, PHI , PHINode ) // PHI node instruction +HANDLE_OTHER_INST(45, Call , CallInst ) // Call a function +HANDLE_OTHER_INST(46, Select , SelectInst ) // select instruction +HANDLE_OTHER_INST(47, UserOp1, Instruction) // May be used internally in a pass +HANDLE_OTHER_INST(48, UserOp2, Instruction) // Internal to passes only +HANDLE_OTHER_INST(49, VAArg , VAArgInst ) // vaarg instruction +HANDLE_OTHER_INST(50, ExtractElement, ExtractElementInst)// extract from vector +HANDLE_OTHER_INST(51, InsertElement, InsertElementInst) // insert into vector +HANDLE_OTHER_INST(52, ShuffleVector, ShuffleVectorInst) // shuffle two vectors. +HANDLE_OTHER_INST(53, ExtractValue, ExtractValueInst)// extract from aggregate +HANDLE_OTHER_INST(54, InsertValue, InsertValueInst) // insert into aggregate - LAST_OTHER_INST(55) + LAST_OTHER_INST(54) #undef FIRST_TERM_INST #undef HANDLE_TERM_INST diff --git a/include/llvm/Instructions.h b/include/llvm/Instructions.h index d9e59ed3a2b..438c220b2ce 100644 --- a/include/llvm/Instructions.h +++ b/include/llvm/Instructions.h @@ -37,8 +37,7 @@ class DominatorTree; // AllocationInst Class //===----------------------------------------------------------------------===// -/// AllocationInst - This class is the common base class of MallocInst and -/// AllocaInst. +/// AllocationInst - This class is the base class of AllocaInst. /// class AllocationInst : public UnaryInstruction { protected: @@ -85,56 +84,7 @@ public: // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const AllocationInst *) { return true; } static inline bool classof(const Instruction *I) { - return I->getOpcode() == Instruction::Alloca || - I->getOpcode() == Instruction::Malloc; - } - static inline bool classof(const Value *V) { - return isa(V) && classof(cast(V)); - } -}; - - -//===----------------------------------------------------------------------===// -// MallocInst Class -//===----------------------------------------------------------------------===// - -/// MallocInst - an instruction to allocated memory on the heap -/// -class MallocInst : public AllocationInst { -public: - explicit MallocInst(const Type *Ty, Value *ArraySize = 0, - const Twine &NameStr = "", - Instruction *InsertBefore = 0) - : AllocationInst(Ty, ArraySize, Malloc, - 0, NameStr, InsertBefore) {} - MallocInst(const Type *Ty, Value *ArraySize, - const Twine &NameStr, BasicBlock *InsertAtEnd) - : AllocationInst(Ty, ArraySize, Malloc, 0, NameStr, InsertAtEnd) {} - - MallocInst(const Type *Ty, const Twine &NameStr, - Instruction *InsertBefore = 0) - : AllocationInst(Ty, 0, Malloc, 0, NameStr, InsertBefore) {} - MallocInst(const Type *Ty, const Twine &NameStr, - BasicBlock *InsertAtEnd) - : AllocationInst(Ty, 0, Malloc, 0, NameStr, InsertAtEnd) {} - - MallocInst(const Type *Ty, Value *ArraySize, - unsigned Align, const Twine &NameStr, - BasicBlock *InsertAtEnd) - : AllocationInst(Ty, ArraySize, Malloc, - Align, NameStr, InsertAtEnd) {} - MallocInst(const Type *Ty, Value *ArraySize, - unsigned Align, const Twine &NameStr = "", - Instruction *InsertBefore = 0) - : AllocationInst(Ty, ArraySize, - Malloc, Align, NameStr, InsertBefore) {} - - virtual MallocInst *clone() const; - - // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const MallocInst *) { return true; } - static inline bool classof(const Instruction *I) { - return (I->getOpcode() == Instruction::Malloc); + return I->getOpcode() == Instruction::Alloca; } static inline bool classof(const Value *V) { return isa(V) && classof(cast(V)); diff --git a/include/llvm/Support/IRBuilder.h b/include/llvm/Support/IRBuilder.h index 1f659787eb7..f4d1101cdfa 100644 --- a/include/llvm/Support/IRBuilder.h +++ b/include/llvm/Support/IRBuilder.h @@ -429,10 +429,6 @@ public: // Instruction creation methods: Memory Instructions //===--------------------------------------------------------------------===// - MallocInst *CreateMalloc(const Type *Ty, Value *ArraySize = 0, - const Twine &Name = "") { - return Insert(new MallocInst(Ty, ArraySize), Name); - } AllocaInst *CreateAlloca(const Type *Ty, Value *ArraySize = 0, const Twine &Name = "") { return Insert(new AllocaInst(Ty, ArraySize), Name); diff --git a/include/llvm/Support/InstVisitor.h b/include/llvm/Support/InstVisitor.h index 5d7c2f72ba7..440657cfef7 100644 --- a/include/llvm/Support/InstVisitor.h +++ b/include/llvm/Support/InstVisitor.h @@ -46,17 +46,17 @@ namespace llvm { /// /// Declare the class. Note that we derive from InstVisitor instantiated /// /// with _our new subclasses_ type. /// /// -/// struct CountMallocVisitor : public InstVisitor { +/// struct CountAllocaVisitor : public InstVisitor { /// unsigned Count; -/// CountMallocVisitor() : Count(0) {} +/// CountAllocaVisitor() : Count(0) {} /// -/// void visitMallocInst(MallocInst &MI) { ++Count; } +/// void visitAllocaInst(AllocaInst &AI) { ++Count; } /// }; /// /// And this class would be used like this: -/// CountMallocVistor CMV; -/// CMV.visit(function); -/// NumMallocs = CMV.Count; +/// CountAllocaVisitor CAV; +/// CAV.visit(function); +/// NumAllocas = CAV.Count; /// /// The defined has 'visit' methods for Instruction, and also for BasicBlock, /// Function, and Module, which recursively process all contained instructions. @@ -165,7 +165,6 @@ public: RetTy visitUnreachableInst(UnreachableInst &I) { DELEGATE(TerminatorInst);} RetTy visitICmpInst(ICmpInst &I) { DELEGATE(CmpInst);} RetTy visitFCmpInst(FCmpInst &I) { DELEGATE(CmpInst);} - RetTy visitMallocInst(MallocInst &I) { DELEGATE(AllocationInst);} RetTy visitAllocaInst(AllocaInst &I) { DELEGATE(AllocationInst);} RetTy visitFreeInst(FreeInst &I) { DELEGATE(Instruction); } RetTy visitLoadInst(LoadInst &I) { DELEGATE(Instruction); } diff --git a/include/llvm/Transforms/Scalar.h b/include/llvm/Transforms/Scalar.h index 2483768ead5..fee4e659381 100644 --- a/include/llvm/Transforms/Scalar.h +++ b/include/llvm/Transforms/Scalar.h @@ -225,12 +225,11 @@ extern const PassInfo *const LoopSimplifyID; //===----------------------------------------------------------------------===// // -// LowerAllocations - Turn malloc and free instructions into @malloc and @free -// calls. +// LowerAllocations - Turn free instructions into @free calls. // // AU.addRequiredID(LowerAllocationsID); // -Pass *createLowerAllocationsPass(bool LowerMallocArgToInteger = false); +Pass *createLowerAllocationsPass(); extern const PassInfo *const LowerAllocationsID; //===----------------------------------------------------------------------===// diff --git a/lib/Analysis/IPA/GlobalsModRef.cpp b/lib/Analysis/IPA/GlobalsModRef.cpp index f5c11084129..7949288340a 100644 --- a/lib/Analysis/IPA/GlobalsModRef.cpp +++ b/lib/Analysis/IPA/GlobalsModRef.cpp @@ -303,7 +303,7 @@ bool GlobalsModRef::AnalyzeIndirectGlobalMemory(GlobalValue *GV) { // Check the value being stored. Value *Ptr = SI->getOperand(0)->getUnderlyingObject(); - if (isa(Ptr) || isMalloc(Ptr)) { + if (isMalloc(Ptr)) { // Okay, easy case. } else if (CallInst *CI = dyn_cast(Ptr)) { Function *F = CI->getCalledFunction(); @@ -439,8 +439,7 @@ void GlobalsModRef::AnalyzeCallGraph(CallGraph &CG, Module &M) { if (cast(*II).isVolatile()) // Treat volatile stores as reading memory somewhere. FunctionEffect |= Ref; - } else if (isa(*II) || isa(*II) || - isMalloc(&cast(*II))) { + } else if (isMalloc(&cast(*II)) || isa(*II)) { FunctionEffect |= ModRef; } diff --git a/lib/Analysis/InlineCost.cpp b/lib/Analysis/InlineCost.cpp index 3b0d2c90aeb..b833baacede 100644 --- a/lib/Analysis/InlineCost.cpp +++ b/lib/Analysis/InlineCost.cpp @@ -131,7 +131,7 @@ void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB) { } // These, too, are calls. - if (isa(II) || isa(II)) + if (isa(II)) NumInsts += InlineConstants::CallPenalty; if (const AllocaInst *AI = dyn_cast(II)) { diff --git a/lib/Analysis/InstCount.cpp b/lib/Analysis/InstCount.cpp index 83724caf521..4cde7935772 100644 --- a/lib/Analysis/InstCount.cpp +++ b/lib/Analysis/InstCount.cpp @@ -76,11 +76,11 @@ FunctionPass *llvm::createInstCountPass() { return new InstCount(); } bool InstCount::runOnFunction(Function &F) { unsigned StartMemInsts = NumGetElementPtrInst + NumLoadInst + NumStoreInst + NumCallInst + - NumInvokeInst + NumAllocaInst + NumMallocInst + NumFreeInst; + NumInvokeInst + NumAllocaInst + NumFreeInst; visit(F); unsigned EndMemInsts = NumGetElementPtrInst + NumLoadInst + NumStoreInst + NumCallInst + - NumInvokeInst + NumAllocaInst + NumMallocInst + NumFreeInst; + NumInvokeInst + NumAllocaInst + NumFreeInst; TotalMemInst += EndMemInsts-StartMemInsts; return false; } diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp index baa347a6638..dc0d4890473 100644 --- a/lib/Analysis/ValueTracking.cpp +++ b/lib/Analysis/ValueTracking.cpp @@ -469,26 +469,11 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask, break; } - case Instruction::Alloca: - case Instruction::Malloc: { + case Instruction::Alloca: { AllocationInst *AI = cast(V); unsigned Align = AI->getAlignment(); - if (Align == 0 && TD) { - if (isa(AI)) - Align = TD->getABITypeAlignment(AI->getType()->getElementType()); - else if (isa(AI)) { - // Malloc returns maximally aligned memory. - Align = TD->getABITypeAlignment(AI->getType()->getElementType()); - Align = - std::max(Align, - (unsigned)TD->getABITypeAlignment( - Type::getDoubleTy(V->getContext()))); - Align = - std::max(Align, - (unsigned)TD->getABITypeAlignment( - Type::getInt64Ty(V->getContext()))); - } - } + if (Align == 0 && TD) + Align = TD->getABITypeAlignment(AI->getType()->getElementType()); if (Align > 0) KnownZero = Mask & APInt::getLowBitsSet(BitWidth, diff --git a/lib/AsmParser/LLParser.cpp b/lib/AsmParser/LLParser.cpp index 001d56d5385..749ce9360f2 100644 --- a/lib/AsmParser/LLParser.cpp +++ b/lib/AsmParser/LLParser.cpp @@ -3315,7 +3315,7 @@ bool LLParser::ParseShuffleVector(Instruction *&Inst, PerFunctionState &PFS) { } /// ParsePHI -/// ::= 'phi' Type '[' Value ',' Value ']' (',' '[' Value ',' Valueß ']')* +/// ::= 'phi' Type '[' Value ',' Value ']' (',' '[' Value ',' Value√ü ']')* bool LLParser::ParsePHI(Instruction *&Inst, PerFunctionState &PFS) { PATypeHolder Ty(Type::getVoidTy(Context)); Value *Op0, *Op1; diff --git a/lib/Bitcode/Writer/BitcodeWriter.cpp b/lib/Bitcode/Writer/BitcodeWriter.cpp index 12a1f5ea5dc..304900f7115 100644 --- a/lib/Bitcode/Writer/BitcodeWriter.cpp +++ b/lib/Bitcode/Writer/BitcodeWriter.cpp @@ -1054,13 +1054,6 @@ static void WriteInstruction(const Instruction &I, unsigned InstID, Vals.push_back(VE.getValueID(I.getOperand(i))); break; - case Instruction::Malloc: - Code = bitc::FUNC_CODE_INST_MALLOC; - Vals.push_back(VE.getTypeID(I.getType())); - Vals.push_back(VE.getValueID(I.getOperand(0))); // size. - Vals.push_back(Log2_32(cast(I).getAlignment())+1); - break; - case Instruction::Free: Code = bitc::FUNC_CODE_INST_FREE; PushValueAndType(I.getOperand(0), InstID, Vals, VE); diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuild.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuild.cpp index 9017e435962..adcc5322721 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGBuild.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuild.cpp @@ -5485,48 +5485,6 @@ void SelectionDAGLowering::visitInlineAsm(CallSite CS) { DAG.setRoot(Chain); } - -void SelectionDAGLowering::visitMalloc(MallocInst &I) { - SDValue Src = getValue(I.getOperand(0)); - - // Scale up by the type size in the original i32 type width. Various - // mid-level optimizers may make assumptions about demanded bits etc from the - // i32-ness of the optimizer: we do not want to promote to i64 and then - // multiply on 64-bit targets. - // FIXME: Malloc inst should go away: PR715. - uint64_t ElementSize = TD->getTypeAllocSize(I.getType()->getElementType()); - if (ElementSize != 1) { - // Src is always 32-bits, make sure the constant fits. - assert(Src.getValueType() == MVT::i32); - ElementSize = (uint32_t)ElementSize; - Src = DAG.getNode(ISD::MUL, getCurDebugLoc(), Src.getValueType(), - Src, DAG.getConstant(ElementSize, Src.getValueType())); - } - - EVT IntPtr = TLI.getPointerTy(); - - Src = DAG.getZExtOrTrunc(Src, getCurDebugLoc(), IntPtr); - - TargetLowering::ArgListTy Args; - TargetLowering::ArgListEntry Entry; - Entry.Node = Src; - Entry.Ty = TLI.getTargetData()->getIntPtrType(*DAG.getContext()); - Args.push_back(Entry); - - bool isTailCall = PerformTailCallOpt && - isInTailCallPosition(&I, Attribute::None, TLI); - std::pair Result = - TLI.LowerCallTo(getRoot(), I.getType(), false, false, false, false, - 0, CallingConv::C, isTailCall, - /*isReturnValueUsed=*/true, - DAG.getExternalSymbol("malloc", IntPtr), - Args, DAG, getCurDebugLoc()); - if (Result.first.getNode()) - setValue(&I, Result.first); // Pointers always fit in registers - if (Result.second.getNode()) - DAG.setRoot(Result.second); -} - void SelectionDAGLowering::visitFree(FreeInst &I) { TargetLowering::ArgListTy Args; TargetLowering::ArgListEntry Entry; diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuild.h b/lib/CodeGen/SelectionDAG/SelectionDAGBuild.h index 06acc8a6bfa..722b1d82551 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGBuild.h +++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuild.h @@ -60,7 +60,6 @@ class MachineFunction; class MachineInstr; class MachineModuleInfo; class MachineRegisterInfo; -class MallocInst; class PHINode; class PtrToIntInst; class ReturnInst; @@ -529,7 +528,6 @@ private: void visitGetElementPtr(User &I); void visitSelect(User &I); - void visitMalloc(MallocInst &I); void visitFree(FreeInst &I); void visitAlloca(AllocaInst &I); void visitLoad(LoadInst &I); diff --git a/lib/Target/CBackend/CBackend.cpp b/lib/Target/CBackend/CBackend.cpp index fe63edf3ff6..cbf769bf015 100644 --- a/lib/Target/CBackend/CBackend.cpp +++ b/lib/Target/CBackend/CBackend.cpp @@ -302,7 +302,6 @@ namespace { void visitInlineAsm(CallInst &I); bool visitBuiltinCall(CallInst &I, Intrinsic::ID ID, bool &WroteCallee); - void visitMallocInst(MallocInst &I); void visitAllocaInst(AllocaInst &I); void visitFreeInst (FreeInst &I); void visitLoadInst (LoadInst &I); @@ -3405,10 +3404,6 @@ void CWriter::visitInlineAsm(CallInst &CI) { Out << ")"; } -void CWriter::visitMallocInst(MallocInst &I) { - llvm_unreachable("lowerallocations pass didn't work!"); -} - void CWriter::visitAllocaInst(AllocaInst &I) { Out << '('; printType(Out, I.getType()); @@ -3690,7 +3685,7 @@ bool CTargetMachine::addPassesToEmitWholeFile(PassManager &PM, if (FileType != TargetMachine::AssemblyFile) return true; PM.add(createGCLoweringPass()); - PM.add(createLowerAllocationsPass(true)); + PM.add(createLowerAllocationsPass()); PM.add(createLowerInvokePass()); PM.add(createCFGSimplificationPass()); // clean up after lower invoke. PM.add(new CBackendNameAllUsedStructsAndMergeFunctions()); diff --git a/lib/Target/CppBackend/CPPBackend.cpp b/lib/Target/CppBackend/CPPBackend.cpp index 14ad451074a..45c2a7b1e33 100644 --- a/lib/Target/CppBackend/CPPBackend.cpp +++ b/lib/Target/CppBackend/CPPBackend.cpp @@ -1258,20 +1258,6 @@ namespace { Out << "\");"; break; } - case Instruction::Malloc: { - const MallocInst* mallocI = cast(I); - Out << "MallocInst* " << iName << " = new MallocInst(" - << getCppName(mallocI->getAllocatedType()) << ", "; - if (mallocI->isArrayAllocation()) - Out << opNames[0] << ", " ; - Out << "\""; - printEscapedString(mallocI->getName()); - Out << "\", " << bbname << ");"; - if (mallocI->getAlignment()) - nl(Out) << iName << "->setAlignment(" - << mallocI->getAlignment() << ");"; - break; - } case Instruction::Free: { Out << "FreeInst* " << iName << " = new FreeInst(" << getCppName(I->getOperand(0)) << ", " << bbname << ");"; diff --git a/lib/Target/MSIL/MSILWriter.cpp b/lib/Target/MSIL/MSILWriter.cpp index 26d637b4347..cf08a97a957 100644 --- a/lib/Target/MSIL/MSILWriter.cpp +++ b/lib/Target/MSIL/MSILWriter.cpp @@ -1191,9 +1191,6 @@ void MSILWriter::printInstruction(const Instruction* Inst) { case Instruction::Alloca: printAllocaInstruction(cast(Inst)); break; - case Instruction::Malloc: - llvm_unreachable("LowerAllocationsPass used"); - break; case Instruction::Free: llvm_unreachable("LowerAllocationsPass used"); break; @@ -1702,7 +1699,7 @@ bool MSILTarget::addPassesToEmitWholeFile(PassManager &PM, if (FileType != TargetMachine::AssemblyFile) return true; MSILWriter* Writer = new MSILWriter(o); PM.add(createGCLoweringPass()); - PM.add(createLowerAllocationsPass(true)); + PM.add(createLowerAllocationsPass()); // FIXME: Handle switch trougth native IL instruction "switch" PM.add(createLowerSwitchPass()); PM.add(createCFGSimplificationPass()); diff --git a/lib/Transforms/IPO/FunctionAttrs.cpp b/lib/Transforms/IPO/FunctionAttrs.cpp index 7edaa7fbef5..563d594c7b0 100644 --- a/lib/Transforms/IPO/FunctionAttrs.cpp +++ b/lib/Transforms/IPO/FunctionAttrs.cpp @@ -153,7 +153,7 @@ bool FunctionAttrs::AddReadAttrs(const std::vector &SCC) { // Writes memory. Just give up. return false; - if (isa(I)) + if (isMalloc(I)) // malloc claims not to write memory! PR3754. return false; @@ -267,7 +267,6 @@ bool FunctionAttrs::IsFunctionMallocLike(Function *F, // Check whether the pointer came from an allocation. case Instruction::Alloca: - case Instruction::Malloc: break; case Instruction::Call: if (isMalloc(RVI)) diff --git a/lib/Transforms/IPO/GlobalOpt.cpp b/lib/Transforms/IPO/GlobalOpt.cpp index 3d10649c95b..9ced2e89a7e 100644 --- a/lib/Transforms/IPO/GlobalOpt.cpp +++ b/lib/Transforms/IPO/GlobalOpt.cpp @@ -816,130 +816,6 @@ static void ConstantPropUsersOf(Value *V, LLVMContext &Context) { } } -/// OptimizeGlobalAddressOfMalloc - This function takes the specified global -/// variable, and transforms the program as if it always contained the result of -/// the specified malloc. Because it is always the result of the specified -/// malloc, there is no reason to actually DO the malloc. Instead, turn the -/// malloc into a global, and any loads of GV as uses of the new global. -static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, - MallocInst *MI, - LLVMContext &Context) { - DEBUG(errs() << "PROMOTING MALLOC GLOBAL: " << *GV << " MALLOC = " << *MI); - ConstantInt *NElements = cast(MI->getArraySize()); - - if (NElements->getZExtValue() != 1) { - // If we have an array allocation, transform it to a single element - // allocation to make the code below simpler. - Type *NewTy = ArrayType::get(MI->getAllocatedType(), - NElements->getZExtValue()); - MallocInst *NewMI = - new MallocInst(NewTy, Constant::getNullValue(Type::getInt32Ty(Context)), - MI->getAlignment(), MI->getName(), MI); - Value* Indices[2]; - Indices[0] = Indices[1] = Constant::getNullValue(Type::getInt32Ty(Context)); - Value *NewGEP = GetElementPtrInst::Create(NewMI, Indices, Indices + 2, - NewMI->getName()+".el0", MI); - MI->replaceAllUsesWith(NewGEP); - MI->eraseFromParent(); - MI = NewMI; - } - - // Create the new global variable. The contents of the malloc'd memory is - // undefined, so initialize with an undef value. - // FIXME: This new global should have the alignment returned by malloc. Code - // could depend on malloc returning large alignment (on the mac, 16 bytes) but - // this would only guarantee some lower alignment. - Constant *Init = UndefValue::get(MI->getAllocatedType()); - GlobalVariable *NewGV = new GlobalVariable(*GV->getParent(), - MI->getAllocatedType(), false, - GlobalValue::InternalLinkage, Init, - GV->getName()+".body", - GV, - GV->isThreadLocal()); - - // Anything that used the malloc now uses the global directly. - MI->replaceAllUsesWith(NewGV); - - Constant *RepValue = NewGV; - if (NewGV->getType() != GV->getType()->getElementType()) - RepValue = ConstantExpr::getBitCast(RepValue, - GV->getType()->getElementType()); - - // If there is a comparison against null, we will insert a global bool to - // keep track of whether the global was initialized yet or not. - GlobalVariable *InitBool = - new GlobalVariable(Context, Type::getInt1Ty(Context), false, - GlobalValue::InternalLinkage, - ConstantInt::getFalse(Context), GV->getName()+".init", - GV->isThreadLocal()); - bool InitBoolUsed = false; - - // Loop over all uses of GV, processing them in turn. - std::vector Stores; - while (!GV->use_empty()) - if (LoadInst *LI = dyn_cast(GV->use_back())) { - while (!LI->use_empty()) { - Use &LoadUse = LI->use_begin().getUse(); - if (!isa(LoadUse.getUser())) - LoadUse = RepValue; - else { - ICmpInst *CI = cast(LoadUse.getUser()); - // Replace the cmp X, 0 with a use of the bool value. - Value *LV = new LoadInst(InitBool, InitBool->getName()+".val", CI); - InitBoolUsed = true; - switch (CI->getPredicate()) { - default: llvm_unreachable("Unknown ICmp Predicate!"); - case ICmpInst::ICMP_ULT: - case ICmpInst::ICMP_SLT: - LV = ConstantInt::getFalse(Context); // X < null -> always false - break; - case ICmpInst::ICMP_ULE: - case ICmpInst::ICMP_SLE: - case ICmpInst::ICMP_EQ: - LV = BinaryOperator::CreateNot(LV, "notinit", CI); - break; - case ICmpInst::ICMP_NE: - case ICmpInst::ICMP_UGE: - case ICmpInst::ICMP_SGE: - case ICmpInst::ICMP_UGT: - case ICmpInst::ICMP_SGT: - break; // no change. - } - CI->replaceAllUsesWith(LV); - CI->eraseFromParent(); - } - } - LI->eraseFromParent(); - } else { - StoreInst *SI = cast(GV->use_back()); - // The global is initialized when the store to it occurs. - new StoreInst(ConstantInt::getTrue(Context), InitBool, SI); - SI->eraseFromParent(); - } - - // If the initialization boolean was used, insert it, otherwise delete it. - if (!InitBoolUsed) { - while (!InitBool->use_empty()) // Delete initializations - cast(InitBool->use_back())->eraseFromParent(); - delete InitBool; - } else - GV->getParent()->getGlobalList().insert(GV, InitBool); - - - // Now the GV is dead, nuke it and the malloc. - GV->eraseFromParent(); - MI->eraseFromParent(); - - // To further other optimizations, loop over all users of NewGV and try to - // constant prop them. This will promote GEP instructions with constant - // indices into GEP constant-exprs, which will allow global-opt to hack on it. - ConstantPropUsersOf(NewGV, Context); - if (RepValue != NewGV) - ConstantPropUsersOf(RepValue, Context); - - return NewGV; -} - /// OptimizeGlobalAddressOfMalloc - This function takes the specified global /// variable, and transforms the program as if it always contained the result of /// the specified malloc. Because it is always the result of the specified @@ -1397,185 +1273,6 @@ static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load, } } -/// PerformHeapAllocSRoA - MI is an allocation of an array of structures. Break -/// it up into multiple allocations of arrays of the fields. -static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, MallocInst *MI, - LLVMContext &Context){ - DEBUG(errs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *MI); - const StructType *STy = cast(MI->getAllocatedType()); - - // There is guaranteed to be at least one use of the malloc (storing - // it into GV). If there are other uses, change them to be uses of - // the global to simplify later code. This also deletes the store - // into GV. - ReplaceUsesOfMallocWithGlobal(MI, GV); - - // Okay, at this point, there are no users of the malloc. Insert N - // new mallocs at the same place as MI, and N globals. - std::vector FieldGlobals; - std::vector FieldMallocs; - - for (unsigned FieldNo = 0, e = STy->getNumElements(); FieldNo != e;++FieldNo){ - const Type *FieldTy = STy->getElementType(FieldNo); - const Type *PFieldTy = PointerType::getUnqual(FieldTy); - - GlobalVariable *NGV = - new GlobalVariable(*GV->getParent(), - PFieldTy, false, GlobalValue::InternalLinkage, - Constant::getNullValue(PFieldTy), - GV->getName() + ".f" + Twine(FieldNo), GV, - GV->isThreadLocal()); - FieldGlobals.push_back(NGV); - - MallocInst *NMI = new MallocInst(FieldTy, MI->getArraySize(), - MI->getName() + ".f" + Twine(FieldNo), MI); - FieldMallocs.push_back(NMI); - new StoreInst(NMI, NGV, MI); - } - - // The tricky aspect of this transformation is handling the case when malloc - // fails. In the original code, malloc failing would set the result pointer - // of malloc to null. In this case, some mallocs could succeed and others - // could fail. As such, we emit code that looks like this: - // F0 = malloc(field0) - // F1 = malloc(field1) - // F2 = malloc(field2) - // if (F0 == 0 || F1 == 0 || F2 == 0) { - // if (F0) { free(F0); F0 = 0; } - // if (F1) { free(F1); F1 = 0; } - // if (F2) { free(F2); F2 = 0; } - // } - Value *RunningOr = 0; - for (unsigned i = 0, e = FieldMallocs.size(); i != e; ++i) { - Value *Cond = new ICmpInst(MI, ICmpInst::ICMP_EQ, FieldMallocs[i], - Constant::getNullValue(FieldMallocs[i]->getType()), - "isnull"); - if (!RunningOr) - RunningOr = Cond; // First seteq - else - RunningOr = BinaryOperator::CreateOr(RunningOr, Cond, "tmp", MI); - } - - // Split the basic block at the old malloc. - BasicBlock *OrigBB = MI->getParent(); - BasicBlock *ContBB = OrigBB->splitBasicBlock(MI, "malloc_cont"); - - // Create the block to check the first condition. Put all these blocks at the - // end of the function as they are unlikely to be executed. - BasicBlock *NullPtrBlock = BasicBlock::Create(Context, "malloc_ret_null", - OrigBB->getParent()); - - // Remove the uncond branch from OrigBB to ContBB, turning it into a cond - // branch on RunningOr. - OrigBB->getTerminator()->eraseFromParent(); - BranchInst::Create(NullPtrBlock, ContBB, RunningOr, OrigBB); - - // Within the NullPtrBlock, we need to emit a comparison and branch for each - // pointer, because some may be null while others are not. - for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) { - Value *GVVal = new LoadInst(FieldGlobals[i], "tmp", NullPtrBlock); - Value *Cmp = new ICmpInst(*NullPtrBlock, ICmpInst::ICMP_NE, GVVal, - Constant::getNullValue(GVVal->getType()), - "tmp"); - BasicBlock *FreeBlock = BasicBlock::Create(Context, "free_it", - OrigBB->getParent()); - BasicBlock *NextBlock = BasicBlock::Create(Context, "next", - OrigBB->getParent()); - BranchInst::Create(FreeBlock, NextBlock, Cmp, NullPtrBlock); - - // Fill in FreeBlock. - new FreeInst(GVVal, FreeBlock); - new StoreInst(Constant::getNullValue(GVVal->getType()), FieldGlobals[i], - FreeBlock); - BranchInst::Create(NextBlock, FreeBlock); - - NullPtrBlock = NextBlock; - } - - BranchInst::Create(ContBB, NullPtrBlock); - - // MI is no longer needed, remove it. - MI->eraseFromParent(); - - /// InsertedScalarizedLoads - As we process loads, if we can't immediately - /// update all uses of the load, keep track of what scalarized loads are - /// inserted for a given load. - DenseMap > InsertedScalarizedValues; - InsertedScalarizedValues[GV] = FieldGlobals; - - std::vector > PHIsToRewrite; - - // Okay, the malloc site is completely handled. All of the uses of GV are now - // loads, and all uses of those loads are simple. Rewrite them to use loads - // of the per-field globals instead. - for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end(); UI != E;) { - Instruction *User = cast(*UI++); - - if (LoadInst *LI = dyn_cast(User)) { - RewriteUsesOfLoadForHeapSRoA(LI, InsertedScalarizedValues, PHIsToRewrite, - Context); - continue; - } - - // Must be a store of null. - StoreInst *SI = cast(User); - assert(isa(SI->getOperand(0)) && - "Unexpected heap-sra user!"); - - // Insert a store of null into each global. - for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) { - const PointerType *PT = cast(FieldGlobals[i]->getType()); - Constant *Null = Constant::getNullValue(PT->getElementType()); - new StoreInst(Null, FieldGlobals[i], SI); - } - // Erase the original store. - SI->eraseFromParent(); - } - - // While we have PHIs that are interesting to rewrite, do it. - while (!PHIsToRewrite.empty()) { - PHINode *PN = PHIsToRewrite.back().first; - unsigned FieldNo = PHIsToRewrite.back().second; - PHIsToRewrite.pop_back(); - PHINode *FieldPN = cast(InsertedScalarizedValues[PN][FieldNo]); - assert(FieldPN->getNumIncomingValues() == 0 &&"Already processed this phi"); - - // Add all the incoming values. This can materialize more phis. - for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { - Value *InVal = PN->getIncomingValue(i); - InVal = GetHeapSROAValue(InVal, FieldNo, InsertedScalarizedValues, - PHIsToRewrite, Context); - FieldPN->addIncoming(InVal, PN->getIncomingBlock(i)); - } - } - - // Drop all inter-phi links and any loads that made it this far. - for (DenseMap >::iterator - I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end(); - I != E; ++I) { - if (PHINode *PN = dyn_cast(I->first)) - PN->dropAllReferences(); - else if (LoadInst *LI = dyn_cast(I->first)) - LI->dropAllReferences(); - } - - // Delete all the phis and loads now that inter-references are dead. - for (DenseMap >::iterator - I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end(); - I != E; ++I) { - if (PHINode *PN = dyn_cast(I->first)) - PN->eraseFromParent(); - else if (LoadInst *LI = dyn_cast(I->first)) - LI->eraseFromParent(); - } - - // The old global is now dead, remove it. - GV->eraseFromParent(); - - ++NumHeapSRA; - return cast(FieldGlobals[0]); -} - /// PerformHeapAllocSRoA - CI is an allocation of an array of structures. Break /// it up into multiple allocations of arrays of the fields. static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, @@ -1763,95 +1460,6 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, return cast(FieldGlobals[0]); } -/// TryToOptimizeStoreOfMallocToGlobal - This function is called when we see a -/// pointer global variable with a single value stored it that is a malloc or -/// cast of malloc. -static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, - MallocInst *MI, - Module::global_iterator &GVI, - TargetData *TD, - LLVMContext &Context) { - // If this is a malloc of an abstract type, don't touch it. - if (!MI->getAllocatedType()->isSized()) - return false; - - // We can't optimize this global unless all uses of it are *known* to be - // of the malloc value, not of the null initializer value (consider a use - // that compares the global's value against zero to see if the malloc has - // been reached). To do this, we check to see if all uses of the global - // would trap if the global were null: this proves that they must all - // happen after the malloc. - if (!AllUsesOfLoadedValueWillTrapIfNull(GV)) - return false; - - // We can't optimize this if the malloc itself is used in a complex way, - // for example, being stored into multiple globals. This allows the - // malloc to be stored into the specified global, loaded setcc'd, and - // GEP'd. These are all things we could transform to using the global - // for. - { - SmallPtrSet PHIs; - if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(MI, GV, PHIs)) - return false; - } - - - // If we have a global that is only initialized with a fixed size malloc, - // transform the program to use global memory instead of malloc'd memory. - // This eliminates dynamic allocation, avoids an indirection accessing the - // data, and exposes the resultant global to further GlobalOpt. - if (ConstantInt *NElements = dyn_cast(MI->getArraySize())) { - // Restrict this transformation to only working on small allocations - // (2048 bytes currently), as we don't want to introduce a 16M global or - // something. - if (TD && - NElements->getZExtValue()* - TD->getTypeAllocSize(MI->getAllocatedType()) < 2048) { - GVI = OptimizeGlobalAddressOfMalloc(GV, MI, Context); - return true; - } - } - - // If the allocation is an array of structures, consider transforming this - // into multiple malloc'd arrays, one for each field. This is basically - // SRoA for malloc'd memory. - const Type *AllocTy = MI->getAllocatedType(); - - // If this is an allocation of a fixed size array of structs, analyze as a - // variable size array. malloc [100 x struct],1 -> malloc struct, 100 - if (!MI->isArrayAllocation()) - if (const ArrayType *AT = dyn_cast(AllocTy)) - AllocTy = AT->getElementType(); - - if (const StructType *AllocSTy = dyn_cast(AllocTy)) { - // This the structure has an unreasonable number of fields, leave it - // alone. - if (AllocSTy->getNumElements() <= 16 && AllocSTy->getNumElements() != 0 && - AllGlobalLoadUsesSimpleEnoughForHeapSRA(GV, MI)) { - - // If this is a fixed size array, transform the Malloc to be an alloc of - // structs. malloc [100 x struct],1 -> malloc struct, 100 - if (const ArrayType *AT = dyn_cast(MI->getAllocatedType())) { - MallocInst *NewMI = - new MallocInst(AllocSTy, - ConstantInt::get(Type::getInt32Ty(Context), - AT->getNumElements()), - "", MI); - NewMI->takeName(MI); - Value *Cast = new BitCastInst(NewMI, MI->getType(), "tmp", MI); - MI->replaceAllUsesWith(Cast); - MI->eraseFromParent(); - MI = NewMI; - } - - GVI = PerformHeapAllocSRoA(GV, MI, Context); - return true; - } - } - - return false; -} - /// TryToOptimizeStoreOfMallocToGlobal - This function is called when we see a /// pointer global variable with a single value stored it that is a malloc or /// cast of malloc. @@ -1970,9 +1578,6 @@ static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal, // Optimize away any trapping uses of the loaded value. if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, Context)) return true; - } else if (MallocInst *MI = dyn_cast(StoredOnceVal)) { - if (TryToOptimizeStoreOfMallocToGlobal(GV, MI, GVI, TD, Context)) - return true; } else if (CallInst *CI = extractMallocCall(StoredOnceVal)) { if (getMallocAllocatedType(CI)) { BitCastInst* BCI = NULL; diff --git a/lib/Transforms/Scalar/InstructionCombining.cpp b/lib/Transforms/Scalar/InstructionCombining.cpp index f635af3974d..ea2164f6587 100644 --- a/lib/Transforms/Scalar/InstructionCombining.cpp +++ b/lib/Transforms/Scalar/InstructionCombining.cpp @@ -6300,16 +6300,6 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) { return SelectInst::Create(LHSI->getOperand(0), Op1, Op2); break; } - case Instruction::Malloc: - // If we have (malloc != null), and if the malloc has a single use, we - // can assume it is successful and remove the malloc. - if (LHSI->hasOneUse() && isa(RHSC)) { - Worklist.Add(LHSI); - return ReplaceInstUsesWith(I, - ConstantInt::get(Type::getInt1Ty(*Context), - !I.isTrueWhenEqual())); - } - break; case Instruction::Call: // If we have (malloc != null), and if the malloc has a single use, we // can assume it is successful and remove the malloc. @@ -7809,11 +7799,7 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI, Amt = AllocaBuilder.CreateAdd(Amt, Off, "tmp"); } - AllocationInst *New; - if (isa(AI)) - New = AllocaBuilder.CreateMalloc(CastElTy, Amt); - else - New = AllocaBuilder.CreateAlloca(CastElTy, Amt); + AllocationInst *New = AllocaBuilder.CreateAlloca(CastElTy, Amt); New->setAlignment(AI.getAlignment()); New->takeName(&AI); @@ -11213,15 +11199,8 @@ Instruction *InstCombiner::visitAllocationInst(AllocationInst &AI) { if (const ConstantInt *C = dyn_cast(AI.getArraySize())) { const Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue()); - AllocationInst *New = 0; - - // Create and insert the replacement instruction... - if (isa(AI)) - New = Builder->CreateMalloc(NewTy, 0, AI.getName()); - else { - assert(isa(AI) && "Unknown type of allocation inst!"); - New = Builder->CreateAlloca(NewTy, 0, AI.getName()); - } + assert(isa(AI) && "Unknown type of allocation inst!"); + AllocationInst *New = Builder->CreateAlloca(NewTy, 0, AI.getName()); New->setAlignment(AI.getAlignment()); // Scan to the end of the allocation instructions, to skip over a block of @@ -11294,12 +11273,6 @@ Instruction *InstCombiner::visitFreeInst(FreeInst &FI) { } } - // Change free(malloc) into nothing, if the malloc has a single use. - if (MallocInst *MI = dyn_cast(Op)) - if (MI->hasOneUse()) { - EraseInstFromFunction(FI); - return EraseInstFromFunction(*MI); - } if (isMalloc(Op)) { if (CallInst* CI = extractMallocCallFromBitCast(Op)) { if (Op->hasOneUse() && CI->hasOneUse()) { diff --git a/lib/Transforms/Scalar/Reassociate.cpp b/lib/Transforms/Scalar/Reassociate.cpp index 9160654c3d8..00d450812c0 100644 --- a/lib/Transforms/Scalar/Reassociate.cpp +++ b/lib/Transforms/Scalar/Reassociate.cpp @@ -122,7 +122,7 @@ static bool isUnmovableInstruction(Instruction *I) { if (I->getOpcode() == Instruction::PHI || I->getOpcode() == Instruction::Alloca || I->getOpcode() == Instruction::Load || - I->getOpcode() == Instruction::Malloc || isMalloc(I) || + isMalloc(I) || I->getOpcode() == Instruction::Invoke || (I->getOpcode() == Instruction::Call && !isa(I)) || diff --git a/lib/Transforms/Utils/LowerAllocations.cpp b/lib/Transforms/Utils/LowerAllocations.cpp index f26d7c146ee..9c9113daa93 100644 --- a/lib/Transforms/Utils/LowerAllocations.cpp +++ b/lib/Transforms/Utils/LowerAllocations.cpp @@ -1,4 +1,4 @@ -//===- LowerAllocations.cpp - Reduce malloc & free insts to calls ---------===// +//===- LowerAllocations.cpp - Reduce free insts to calls ------------------===// // // The LLVM Compiler Infrastructure // @@ -29,18 +29,15 @@ using namespace llvm; STATISTIC(NumLowered, "Number of allocations lowered"); namespace { - /// LowerAllocations - Turn malloc and free instructions into @malloc and - /// @free calls. + /// LowerAllocations - Turn free instructions into @free calls. /// class VISIBILITY_HIDDEN LowerAllocations : public BasicBlockPass { Constant *FreeFunc; // Functions in the module we are processing // Initialized by doInitialization - bool LowerMallocArgToInteger; public: static char ID; // Pass ID, replacement for typeid - explicit LowerAllocations(bool LowerToInt = false) - : BasicBlockPass(&ID), FreeFunc(0), - LowerMallocArgToInteger(LowerToInt) {} + explicit LowerAllocations() + : BasicBlockPass(&ID), FreeFunc(0) {} virtual void getAnalysisUsage(AnalysisUsage &AU) const { AU.addRequired(); @@ -54,7 +51,7 @@ namespace { } /// doPassInitialization - For the lower allocations pass, this ensures that - /// a module contains a declaration for a malloc and a free function. + /// a module contains a declaration for a free function. /// bool doInitialization(Module &M); @@ -76,13 +73,13 @@ X("lowerallocs", "Lower allocations from instructions to calls"); // Publically exposed interface to pass... const PassInfo *const llvm::LowerAllocationsID = &X; // createLowerAllocationsPass - Interface to this file... -Pass *llvm::createLowerAllocationsPass(bool LowerMallocArgToInteger) { - return new LowerAllocations(LowerMallocArgToInteger); +Pass *llvm::createLowerAllocationsPass() { + return new LowerAllocations(); } // doInitialization - For the lower allocations pass, this ensures that a -// module contains a declaration for a malloc and a free function. +// module contains a declaration for a free function. // // This function is always successful. // @@ -102,25 +99,9 @@ bool LowerAllocations::runOnBasicBlock(BasicBlock &BB) { BasicBlock::InstListType &BBIL = BB.getInstList(); - const TargetData &TD = getAnalysis(); - const Type *IntPtrTy = TD.getIntPtrType(BB.getContext()); - - // Loop over all of the instructions, looking for malloc or free instructions + // Loop over all of the instructions, looking for free instructions for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I) { - if (MallocInst *MI = dyn_cast(I)) { - Value *ArraySize = MI->getOperand(0); - if (ArraySize->getType() != IntPtrTy) - ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, - false /*ZExt*/, "", I); - Value *MCast = CallInst::CreateMalloc(I, IntPtrTy, - MI->getAllocatedType(), ArraySize); - - // Replace all uses of the old malloc inst with the cast inst - MI->replaceAllUsesWith(MCast); - I = --BBIL.erase(I); // remove and delete the malloc instr... - Changed = true; - ++NumLowered; - } else if (FreeInst *FI = dyn_cast(I)) { + if (FreeInst *FI = dyn_cast(I)) { Value *PtrCast = new BitCastInst(FI->getOperand(0), Type::getInt8PtrTy(BB.getContext()), "", I); diff --git a/lib/VMCore/Instruction.cpp b/lib/VMCore/Instruction.cpp index 4df536e68b4..dd8a5430502 100644 --- a/lib/VMCore/Instruction.cpp +++ b/lib/VMCore/Instruction.cpp @@ -127,7 +127,6 @@ const char *Instruction::getOpcodeName(unsigned OpCode) { case Xor: return "xor"; // Memory instructions... - case Malloc: return "malloc"; case Free: return "free"; case Alloca: return "alloca"; case Load: return "load"; @@ -442,7 +441,6 @@ bool Instruction::isSafeToSpeculativelyExecute() const { // overflow-checking arithmetic, etc.) case VAArg: case Alloca: - case Malloc: case Invoke: case PHI: case Store: diff --git a/lib/VMCore/Instructions.cpp b/lib/VMCore/Instructions.cpp index 4ff253a7dbe..e36da135154 100644 --- a/lib/VMCore/Instructions.cpp +++ b/lib/VMCore/Instructions.cpp @@ -847,7 +847,7 @@ static Value *getAISize(LLVMContext &Context, Value *Amt) { assert(!isa(Amt) && "Passed basic block into allocation size parameter! Use other ctor"); assert(Amt->getType() == Type::getInt32Ty(Context) && - "Malloc/Allocation array size is not a 32-bit integer!"); + "Allocation array size is not a 32-bit integer!"); } return Amt; } @@ -3083,18 +3083,6 @@ InsertValueInst *InsertValueInst::clone() const { return New; } -MallocInst *MallocInst::clone() const { - MallocInst *New = new MallocInst(getAllocatedType(), - (Value*)getOperand(0), - getAlignment()); - New->SubclassOptionalData = SubclassOptionalData; - if (hasMetadata()) { - LLVMContext &Context = getContext(); - Context.pImpl->TheMetadata.ValueIsCloned(this, New); - } - return New; -} - AllocaInst *AllocaInst::clone() const { AllocaInst *New = new AllocaInst(getAllocatedType(), (Value*)getOperand(0),