1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-19 02:52:53 +02:00

Remove MallocInst from LLVM Instructions.

llvm-svn: 84299
This commit is contained in:
Victor Hernandez 2009-10-17 01:18:07 +00:00
parent f5225573b7
commit a25a2890fa
25 changed files with 73 additions and 677 deletions

View File

@ -457,7 +457,6 @@ void LLVMDisposeTypeHandle(LLVMTypeHandleRef TypeHandle);
macro(UnaryInstruction) \
macro(AllocationInst) \
macro(AllocaInst) \
macro(MallocInst) \
macro(CastInst) \
macro(BitCastInst) \
macro(FPExtInst) \

View File

@ -116,8 +116,7 @@ public:
// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const UnaryInstruction *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::Malloc ||
I->getOpcode() == Instruction::Alloca ||
return I->getOpcode() == Instruction::Alloca ||
I->getOpcode() == Instruction::Free ||
I->getOpcode() == Instruction::Load ||
I->getOpcode() == Instruction::VAArg ||

View File

@ -128,49 +128,48 @@ HANDLE_BINARY_INST(24, Xor , BinaryOperator)
// Memory operators...
FIRST_MEMORY_INST(25)
HANDLE_MEMORY_INST(25, Malloc, MallocInst) // Heap management instructions
HANDLE_MEMORY_INST(26, Free , FreeInst )
HANDLE_MEMORY_INST(27, Alloca, AllocaInst) // Stack management
HANDLE_MEMORY_INST(28, Load , LoadInst ) // Memory manipulation instrs
HANDLE_MEMORY_INST(29, Store , StoreInst )
HANDLE_MEMORY_INST(30, GetElementPtr, GetElementPtrInst)
LAST_MEMORY_INST(30)
HANDLE_MEMORY_INST(25, Free , FreeInst ) // Heap management instructions
HANDLE_MEMORY_INST(26, Alloca, AllocaInst) // Stack management
HANDLE_MEMORY_INST(27, Load , LoadInst ) // Memory manipulation instrs
HANDLE_MEMORY_INST(28, Store , StoreInst )
HANDLE_MEMORY_INST(29, GetElementPtr, GetElementPtrInst)
LAST_MEMORY_INST(29)
// Cast operators ...
// NOTE: The order matters here because CastInst::isEliminableCastPair
// NOTE: (see Instructions.cpp) encodes a table based on this ordering.
FIRST_CAST_INST(31)
HANDLE_CAST_INST(31, Trunc , TruncInst ) // Truncate integers
HANDLE_CAST_INST(32, ZExt , ZExtInst ) // Zero extend integers
HANDLE_CAST_INST(33, SExt , SExtInst ) // Sign extend integers
HANDLE_CAST_INST(34, FPToUI , FPToUIInst ) // floating point -> UInt
HANDLE_CAST_INST(35, FPToSI , FPToSIInst ) // floating point -> SInt
HANDLE_CAST_INST(36, UIToFP , UIToFPInst ) // UInt -> floating point
HANDLE_CAST_INST(37, SIToFP , SIToFPInst ) // SInt -> floating point
HANDLE_CAST_INST(38, FPTrunc , FPTruncInst ) // Truncate floating point
HANDLE_CAST_INST(39, FPExt , FPExtInst ) // Extend floating point
HANDLE_CAST_INST(40, PtrToInt, PtrToIntInst) // Pointer -> Integer
HANDLE_CAST_INST(41, IntToPtr, IntToPtrInst) // Integer -> Pointer
HANDLE_CAST_INST(42, BitCast , BitCastInst ) // Type cast
LAST_CAST_INST(42)
FIRST_CAST_INST(30)
HANDLE_CAST_INST(30, Trunc , TruncInst ) // Truncate integers
HANDLE_CAST_INST(31, ZExt , ZExtInst ) // Zero extend integers
HANDLE_CAST_INST(32, SExt , SExtInst ) // Sign extend integers
HANDLE_CAST_INST(33, FPToUI , FPToUIInst ) // floating point -> UInt
HANDLE_CAST_INST(34, FPToSI , FPToSIInst ) // floating point -> SInt
HANDLE_CAST_INST(35, UIToFP , UIToFPInst ) // UInt -> floating point
HANDLE_CAST_INST(36, SIToFP , SIToFPInst ) // SInt -> floating point
HANDLE_CAST_INST(37, FPTrunc , FPTruncInst ) // Truncate floating point
HANDLE_CAST_INST(38, FPExt , FPExtInst ) // Extend floating point
HANDLE_CAST_INST(39, PtrToInt, PtrToIntInst) // Pointer -> Integer
HANDLE_CAST_INST(40, IntToPtr, IntToPtrInst) // Integer -> Pointer
HANDLE_CAST_INST(41, BitCast , BitCastInst ) // Type cast
LAST_CAST_INST(41)
// Other operators...
FIRST_OTHER_INST(43)
HANDLE_OTHER_INST(43, ICmp , ICmpInst ) // Integer comparison instruction
HANDLE_OTHER_INST(44, FCmp , FCmpInst ) // Floating point comparison instr.
HANDLE_OTHER_INST(45, PHI , PHINode ) // PHI node instruction
HANDLE_OTHER_INST(46, Call , CallInst ) // Call a function
HANDLE_OTHER_INST(47, Select , SelectInst ) // select instruction
HANDLE_OTHER_INST(48, UserOp1, Instruction) // May be used internally in a pass
HANDLE_OTHER_INST(49, UserOp2, Instruction) // Internal to passes only
HANDLE_OTHER_INST(50, VAArg , VAArgInst ) // vaarg instruction
HANDLE_OTHER_INST(51, ExtractElement, ExtractElementInst)// extract from vector
HANDLE_OTHER_INST(52, InsertElement, InsertElementInst) // insert into vector
HANDLE_OTHER_INST(53, ShuffleVector, ShuffleVectorInst) // shuffle two vectors.
HANDLE_OTHER_INST(54, ExtractValue, ExtractValueInst)// extract from aggregate
HANDLE_OTHER_INST(55, InsertValue, InsertValueInst) // insert into aggregate
FIRST_OTHER_INST(42)
HANDLE_OTHER_INST(42, ICmp , ICmpInst ) // Integer comparison instruction
HANDLE_OTHER_INST(43, FCmp , FCmpInst ) // Floating point comparison instr.
HANDLE_OTHER_INST(44, PHI , PHINode ) // PHI node instruction
HANDLE_OTHER_INST(45, Call , CallInst ) // Call a function
HANDLE_OTHER_INST(46, Select , SelectInst ) // select instruction
HANDLE_OTHER_INST(47, UserOp1, Instruction) // May be used internally in a pass
HANDLE_OTHER_INST(48, UserOp2, Instruction) // Internal to passes only
HANDLE_OTHER_INST(49, VAArg , VAArgInst ) // vaarg instruction
HANDLE_OTHER_INST(50, ExtractElement, ExtractElementInst)// extract from vector
HANDLE_OTHER_INST(51, InsertElement, InsertElementInst) // insert into vector
HANDLE_OTHER_INST(52, ShuffleVector, ShuffleVectorInst) // shuffle two vectors.
HANDLE_OTHER_INST(53, ExtractValue, ExtractValueInst)// extract from aggregate
HANDLE_OTHER_INST(54, InsertValue, InsertValueInst) // insert into aggregate
LAST_OTHER_INST(55)
LAST_OTHER_INST(54)
#undef FIRST_TERM_INST
#undef HANDLE_TERM_INST

View File

@ -37,8 +37,7 @@ class DominatorTree;
// AllocationInst Class
//===----------------------------------------------------------------------===//
/// AllocationInst - This class is the common base class of MallocInst and
/// AllocaInst.
/// AllocationInst - This class is the base class of AllocaInst.
///
class AllocationInst : public UnaryInstruction {
protected:
@ -85,56 +84,7 @@ public:
// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const AllocationInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::Alloca ||
I->getOpcode() == Instruction::Malloc;
}
static inline bool classof(const Value *V) {
return isa<Instruction>(V) && classof(cast<Instruction>(V));
}
};
//===----------------------------------------------------------------------===//
// MallocInst Class
//===----------------------------------------------------------------------===//
/// MallocInst - an instruction to allocated memory on the heap
///
class MallocInst : public AllocationInst {
public:
explicit MallocInst(const Type *Ty, Value *ArraySize = 0,
const Twine &NameStr = "",
Instruction *InsertBefore = 0)
: AllocationInst(Ty, ArraySize, Malloc,
0, NameStr, InsertBefore) {}
MallocInst(const Type *Ty, Value *ArraySize,
const Twine &NameStr, BasicBlock *InsertAtEnd)
: AllocationInst(Ty, ArraySize, Malloc, 0, NameStr, InsertAtEnd) {}
MallocInst(const Type *Ty, const Twine &NameStr,
Instruction *InsertBefore = 0)
: AllocationInst(Ty, 0, Malloc, 0, NameStr, InsertBefore) {}
MallocInst(const Type *Ty, const Twine &NameStr,
BasicBlock *InsertAtEnd)
: AllocationInst(Ty, 0, Malloc, 0, NameStr, InsertAtEnd) {}
MallocInst(const Type *Ty, Value *ArraySize,
unsigned Align, const Twine &NameStr,
BasicBlock *InsertAtEnd)
: AllocationInst(Ty, ArraySize, Malloc,
Align, NameStr, InsertAtEnd) {}
MallocInst(const Type *Ty, Value *ArraySize,
unsigned Align, const Twine &NameStr = "",
Instruction *InsertBefore = 0)
: AllocationInst(Ty, ArraySize,
Malloc, Align, NameStr, InsertBefore) {}
virtual MallocInst *clone() const;
// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const MallocInst *) { return true; }
static inline bool classof(const Instruction *I) {
return (I->getOpcode() == Instruction::Malloc);
return I->getOpcode() == Instruction::Alloca;
}
static inline bool classof(const Value *V) {
return isa<Instruction>(V) && classof(cast<Instruction>(V));

View File

@ -429,10 +429,6 @@ public:
// Instruction creation methods: Memory Instructions
//===--------------------------------------------------------------------===//
MallocInst *CreateMalloc(const Type *Ty, Value *ArraySize = 0,
const Twine &Name = "") {
return Insert(new MallocInst(Ty, ArraySize), Name);
}
AllocaInst *CreateAlloca(const Type *Ty, Value *ArraySize = 0,
const Twine &Name = "") {
return Insert(new AllocaInst(Ty, ArraySize), Name);

View File

@ -46,17 +46,17 @@ namespace llvm {
/// /// Declare the class. Note that we derive from InstVisitor instantiated
/// /// with _our new subclasses_ type.
/// ///
/// struct CountMallocVisitor : public InstVisitor<CountMallocVisitor> {
/// struct CountAllocaVisitor : public InstVisitor<CountAllocaVisitor> {
/// unsigned Count;
/// CountMallocVisitor() : Count(0) {}
/// CountAllocaVisitor() : Count(0) {}
///
/// void visitMallocInst(MallocInst &MI) { ++Count; }
/// void visitAllocaInst(AllocaInst &AI) { ++Count; }
/// };
///
/// And this class would be used like this:
/// CountMallocVistor CMV;
/// CMV.visit(function);
/// NumMallocs = CMV.Count;
/// CountAllocaVisitor CAV;
/// CAV.visit(function);
/// NumAllocas = CAV.Count;
///
/// The defined has 'visit' methods for Instruction, and also for BasicBlock,
/// Function, and Module, which recursively process all contained instructions.
@ -165,7 +165,6 @@ public:
RetTy visitUnreachableInst(UnreachableInst &I) { DELEGATE(TerminatorInst);}
RetTy visitICmpInst(ICmpInst &I) { DELEGATE(CmpInst);}
RetTy visitFCmpInst(FCmpInst &I) { DELEGATE(CmpInst);}
RetTy visitMallocInst(MallocInst &I) { DELEGATE(AllocationInst);}
RetTy visitAllocaInst(AllocaInst &I) { DELEGATE(AllocationInst);}
RetTy visitFreeInst(FreeInst &I) { DELEGATE(Instruction); }
RetTy visitLoadInst(LoadInst &I) { DELEGATE(Instruction); }

View File

@ -225,12 +225,11 @@ extern const PassInfo *const LoopSimplifyID;
//===----------------------------------------------------------------------===//
//
// LowerAllocations - Turn malloc and free instructions into @malloc and @free
// calls.
// LowerAllocations - Turn free instructions into @free calls.
//
// AU.addRequiredID(LowerAllocationsID);
//
Pass *createLowerAllocationsPass(bool LowerMallocArgToInteger = false);
Pass *createLowerAllocationsPass();
extern const PassInfo *const LowerAllocationsID;
//===----------------------------------------------------------------------===//

View File

@ -303,7 +303,7 @@ bool GlobalsModRef::AnalyzeIndirectGlobalMemory(GlobalValue *GV) {
// Check the value being stored.
Value *Ptr = SI->getOperand(0)->getUnderlyingObject();
if (isa<MallocInst>(Ptr) || isMalloc(Ptr)) {
if (isMalloc(Ptr)) {
// Okay, easy case.
} else if (CallInst *CI = dyn_cast<CallInst>(Ptr)) {
Function *F = CI->getCalledFunction();
@ -439,8 +439,7 @@ void GlobalsModRef::AnalyzeCallGraph(CallGraph &CG, Module &M) {
if (cast<StoreInst>(*II).isVolatile())
// Treat volatile stores as reading memory somewhere.
FunctionEffect |= Ref;
} else if (isa<MallocInst>(*II) || isa<FreeInst>(*II) ||
isMalloc(&cast<Instruction>(*II))) {
} else if (isMalloc(&cast<Instruction>(*II)) || isa<FreeInst>(*II)) {
FunctionEffect |= ModRef;
}

View File

@ -131,7 +131,7 @@ void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB) {
}
// These, too, are calls.
if (isa<MallocInst>(II) || isa<FreeInst>(II))
if (isa<FreeInst>(II))
NumInsts += InlineConstants::CallPenalty;
if (const AllocaInst *AI = dyn_cast<AllocaInst>(II)) {

View File

@ -76,11 +76,11 @@ FunctionPass *llvm::createInstCountPass() { return new InstCount(); }
bool InstCount::runOnFunction(Function &F) {
unsigned StartMemInsts =
NumGetElementPtrInst + NumLoadInst + NumStoreInst + NumCallInst +
NumInvokeInst + NumAllocaInst + NumMallocInst + NumFreeInst;
NumInvokeInst + NumAllocaInst + NumFreeInst;
visit(F);
unsigned EndMemInsts =
NumGetElementPtrInst + NumLoadInst + NumStoreInst + NumCallInst +
NumInvokeInst + NumAllocaInst + NumMallocInst + NumFreeInst;
NumInvokeInst + NumAllocaInst + NumFreeInst;
TotalMemInst += EndMemInsts-StartMemInsts;
return false;
}

View File

@ -469,26 +469,11 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
break;
}
case Instruction::Alloca:
case Instruction::Malloc: {
case Instruction::Alloca: {
AllocationInst *AI = cast<AllocationInst>(V);
unsigned Align = AI->getAlignment();
if (Align == 0 && TD) {
if (isa<AllocaInst>(AI))
if (Align == 0 && TD)
Align = TD->getABITypeAlignment(AI->getType()->getElementType());
else if (isa<MallocInst>(AI)) {
// Malloc returns maximally aligned memory.
Align = TD->getABITypeAlignment(AI->getType()->getElementType());
Align =
std::max(Align,
(unsigned)TD->getABITypeAlignment(
Type::getDoubleTy(V->getContext())));
Align =
std::max(Align,
(unsigned)TD->getABITypeAlignment(
Type::getInt64Ty(V->getContext())));
}
}
if (Align > 0)
KnownZero = Mask & APInt::getLowBitsSet(BitWidth,

View File

@ -3315,7 +3315,7 @@ bool LLParser::ParseShuffleVector(Instruction *&Inst, PerFunctionState &PFS) {
}
/// ParsePHI
/// ::= 'phi' Type '[' Value ',' Value ']' (',' '[' Value ',' Valueß ']')*
/// ::= 'phi' Type '[' Value ',' Value ']' (',' '[' Value ',' Valueß ']')*
bool LLParser::ParsePHI(Instruction *&Inst, PerFunctionState &PFS) {
PATypeHolder Ty(Type::getVoidTy(Context));
Value *Op0, *Op1;

View File

@ -1054,13 +1054,6 @@ static void WriteInstruction(const Instruction &I, unsigned InstID,
Vals.push_back(VE.getValueID(I.getOperand(i)));
break;
case Instruction::Malloc:
Code = bitc::FUNC_CODE_INST_MALLOC;
Vals.push_back(VE.getTypeID(I.getType()));
Vals.push_back(VE.getValueID(I.getOperand(0))); // size.
Vals.push_back(Log2_32(cast<MallocInst>(I).getAlignment())+1);
break;
case Instruction::Free:
Code = bitc::FUNC_CODE_INST_FREE;
PushValueAndType(I.getOperand(0), InstID, Vals, VE);

View File

@ -5485,48 +5485,6 @@ void SelectionDAGLowering::visitInlineAsm(CallSite CS) {
DAG.setRoot(Chain);
}
void SelectionDAGLowering::visitMalloc(MallocInst &I) {
SDValue Src = getValue(I.getOperand(0));
// Scale up by the type size in the original i32 type width. Various
// mid-level optimizers may make assumptions about demanded bits etc from the
// i32-ness of the optimizer: we do not want to promote to i64 and then
// multiply on 64-bit targets.
// FIXME: Malloc inst should go away: PR715.
uint64_t ElementSize = TD->getTypeAllocSize(I.getType()->getElementType());
if (ElementSize != 1) {
// Src is always 32-bits, make sure the constant fits.
assert(Src.getValueType() == MVT::i32);
ElementSize = (uint32_t)ElementSize;
Src = DAG.getNode(ISD::MUL, getCurDebugLoc(), Src.getValueType(),
Src, DAG.getConstant(ElementSize, Src.getValueType()));
}
EVT IntPtr = TLI.getPointerTy();
Src = DAG.getZExtOrTrunc(Src, getCurDebugLoc(), IntPtr);
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
Entry.Node = Src;
Entry.Ty = TLI.getTargetData()->getIntPtrType(*DAG.getContext());
Args.push_back(Entry);
bool isTailCall = PerformTailCallOpt &&
isInTailCallPosition(&I, Attribute::None, TLI);
std::pair<SDValue,SDValue> Result =
TLI.LowerCallTo(getRoot(), I.getType(), false, false, false, false,
0, CallingConv::C, isTailCall,
/*isReturnValueUsed=*/true,
DAG.getExternalSymbol("malloc", IntPtr),
Args, DAG, getCurDebugLoc());
if (Result.first.getNode())
setValue(&I, Result.first); // Pointers always fit in registers
if (Result.second.getNode())
DAG.setRoot(Result.second);
}
void SelectionDAGLowering::visitFree(FreeInst &I) {
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;

View File

@ -60,7 +60,6 @@ class MachineFunction;
class MachineInstr;
class MachineModuleInfo;
class MachineRegisterInfo;
class MallocInst;
class PHINode;
class PtrToIntInst;
class ReturnInst;
@ -529,7 +528,6 @@ private:
void visitGetElementPtr(User &I);
void visitSelect(User &I);
void visitMalloc(MallocInst &I);
void visitFree(FreeInst &I);
void visitAlloca(AllocaInst &I);
void visitLoad(LoadInst &I);

View File

@ -302,7 +302,6 @@ namespace {
void visitInlineAsm(CallInst &I);
bool visitBuiltinCall(CallInst &I, Intrinsic::ID ID, bool &WroteCallee);
void visitMallocInst(MallocInst &I);
void visitAllocaInst(AllocaInst &I);
void visitFreeInst (FreeInst &I);
void visitLoadInst (LoadInst &I);
@ -3405,10 +3404,6 @@ void CWriter::visitInlineAsm(CallInst &CI) {
Out << ")";
}
void CWriter::visitMallocInst(MallocInst &I) {
llvm_unreachable("lowerallocations pass didn't work!");
}
void CWriter::visitAllocaInst(AllocaInst &I) {
Out << '(';
printType(Out, I.getType());
@ -3690,7 +3685,7 @@ bool CTargetMachine::addPassesToEmitWholeFile(PassManager &PM,
if (FileType != TargetMachine::AssemblyFile) return true;
PM.add(createGCLoweringPass());
PM.add(createLowerAllocationsPass(true));
PM.add(createLowerAllocationsPass());
PM.add(createLowerInvokePass());
PM.add(createCFGSimplificationPass()); // clean up after lower invoke.
PM.add(new CBackendNameAllUsedStructsAndMergeFunctions());

View File

@ -1258,20 +1258,6 @@ namespace {
Out << "\");";
break;
}
case Instruction::Malloc: {
const MallocInst* mallocI = cast<MallocInst>(I);
Out << "MallocInst* " << iName << " = new MallocInst("
<< getCppName(mallocI->getAllocatedType()) << ", ";
if (mallocI->isArrayAllocation())
Out << opNames[0] << ", " ;
Out << "\"";
printEscapedString(mallocI->getName());
Out << "\", " << bbname << ");";
if (mallocI->getAlignment())
nl(Out) << iName << "->setAlignment("
<< mallocI->getAlignment() << ");";
break;
}
case Instruction::Free: {
Out << "FreeInst* " << iName << " = new FreeInst("
<< getCppName(I->getOperand(0)) << ", " << bbname << ");";

View File

@ -1191,9 +1191,6 @@ void MSILWriter::printInstruction(const Instruction* Inst) {
case Instruction::Alloca:
printAllocaInstruction(cast<AllocaInst>(Inst));
break;
case Instruction::Malloc:
llvm_unreachable("LowerAllocationsPass used");
break;
case Instruction::Free:
llvm_unreachable("LowerAllocationsPass used");
break;
@ -1702,7 +1699,7 @@ bool MSILTarget::addPassesToEmitWholeFile(PassManager &PM,
if (FileType != TargetMachine::AssemblyFile) return true;
MSILWriter* Writer = new MSILWriter(o);
PM.add(createGCLoweringPass());
PM.add(createLowerAllocationsPass(true));
PM.add(createLowerAllocationsPass());
// FIXME: Handle switch trougth native IL instruction "switch"
PM.add(createLowerSwitchPass());
PM.add(createCFGSimplificationPass());

View File

@ -153,7 +153,7 @@ bool FunctionAttrs::AddReadAttrs(const std::vector<CallGraphNode *> &SCC) {
// Writes memory. Just give up.
return false;
if (isa<MallocInst>(I))
if (isMalloc(I))
// malloc claims not to write memory! PR3754.
return false;
@ -267,7 +267,6 @@ bool FunctionAttrs::IsFunctionMallocLike(Function *F,
// Check whether the pointer came from an allocation.
case Instruction::Alloca:
case Instruction::Malloc:
break;
case Instruction::Call:
if (isMalloc(RVI))

View File

@ -816,130 +816,6 @@ static void ConstantPropUsersOf(Value *V, LLVMContext &Context) {
}
}
/// OptimizeGlobalAddressOfMalloc - This function takes the specified global
/// variable, and transforms the program as if it always contained the result of
/// the specified malloc. Because it is always the result of the specified
/// malloc, there is no reason to actually DO the malloc. Instead, turn the
/// malloc into a global, and any loads of GV as uses of the new global.
static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
MallocInst *MI,
LLVMContext &Context) {
DEBUG(errs() << "PROMOTING MALLOC GLOBAL: " << *GV << " MALLOC = " << *MI);
ConstantInt *NElements = cast<ConstantInt>(MI->getArraySize());
if (NElements->getZExtValue() != 1) {
// If we have an array allocation, transform it to a single element
// allocation to make the code below simpler.
Type *NewTy = ArrayType::get(MI->getAllocatedType(),
NElements->getZExtValue());
MallocInst *NewMI =
new MallocInst(NewTy, Constant::getNullValue(Type::getInt32Ty(Context)),
MI->getAlignment(), MI->getName(), MI);
Value* Indices[2];
Indices[0] = Indices[1] = Constant::getNullValue(Type::getInt32Ty(Context));
Value *NewGEP = GetElementPtrInst::Create(NewMI, Indices, Indices + 2,
NewMI->getName()+".el0", MI);
MI->replaceAllUsesWith(NewGEP);
MI->eraseFromParent();
MI = NewMI;
}
// Create the new global variable. The contents of the malloc'd memory is
// undefined, so initialize with an undef value.
// FIXME: This new global should have the alignment returned by malloc. Code
// could depend on malloc returning large alignment (on the mac, 16 bytes) but
// this would only guarantee some lower alignment.
Constant *Init = UndefValue::get(MI->getAllocatedType());
GlobalVariable *NewGV = new GlobalVariable(*GV->getParent(),
MI->getAllocatedType(), false,
GlobalValue::InternalLinkage, Init,
GV->getName()+".body",
GV,
GV->isThreadLocal());
// Anything that used the malloc now uses the global directly.
MI->replaceAllUsesWith(NewGV);
Constant *RepValue = NewGV;
if (NewGV->getType() != GV->getType()->getElementType())
RepValue = ConstantExpr::getBitCast(RepValue,
GV->getType()->getElementType());
// If there is a comparison against null, we will insert a global bool to
// keep track of whether the global was initialized yet or not.
GlobalVariable *InitBool =
new GlobalVariable(Context, Type::getInt1Ty(Context), false,
GlobalValue::InternalLinkage,
ConstantInt::getFalse(Context), GV->getName()+".init",
GV->isThreadLocal());
bool InitBoolUsed = false;
// Loop over all uses of GV, processing them in turn.
std::vector<StoreInst*> Stores;
while (!GV->use_empty())
if (LoadInst *LI = dyn_cast<LoadInst>(GV->use_back())) {
while (!LI->use_empty()) {
Use &LoadUse = LI->use_begin().getUse();
if (!isa<ICmpInst>(LoadUse.getUser()))
LoadUse = RepValue;
else {
ICmpInst *CI = cast<ICmpInst>(LoadUse.getUser());
// Replace the cmp X, 0 with a use of the bool value.
Value *LV = new LoadInst(InitBool, InitBool->getName()+".val", CI);
InitBoolUsed = true;
switch (CI->getPredicate()) {
default: llvm_unreachable("Unknown ICmp Predicate!");
case ICmpInst::ICMP_ULT:
case ICmpInst::ICMP_SLT:
LV = ConstantInt::getFalse(Context); // X < null -> always false
break;
case ICmpInst::ICMP_ULE:
case ICmpInst::ICMP_SLE:
case ICmpInst::ICMP_EQ:
LV = BinaryOperator::CreateNot(LV, "notinit", CI);
break;
case ICmpInst::ICMP_NE:
case ICmpInst::ICMP_UGE:
case ICmpInst::ICMP_SGE:
case ICmpInst::ICMP_UGT:
case ICmpInst::ICMP_SGT:
break; // no change.
}
CI->replaceAllUsesWith(LV);
CI->eraseFromParent();
}
}
LI->eraseFromParent();
} else {
StoreInst *SI = cast<StoreInst>(GV->use_back());
// The global is initialized when the store to it occurs.
new StoreInst(ConstantInt::getTrue(Context), InitBool, SI);
SI->eraseFromParent();
}
// If the initialization boolean was used, insert it, otherwise delete it.
if (!InitBoolUsed) {
while (!InitBool->use_empty()) // Delete initializations
cast<Instruction>(InitBool->use_back())->eraseFromParent();
delete InitBool;
} else
GV->getParent()->getGlobalList().insert(GV, InitBool);
// Now the GV is dead, nuke it and the malloc.
GV->eraseFromParent();
MI->eraseFromParent();
// To further other optimizations, loop over all users of NewGV and try to
// constant prop them. This will promote GEP instructions with constant
// indices into GEP constant-exprs, which will allow global-opt to hack on it.
ConstantPropUsersOf(NewGV, Context);
if (RepValue != NewGV)
ConstantPropUsersOf(RepValue, Context);
return NewGV;
}
/// OptimizeGlobalAddressOfMalloc - This function takes the specified global
/// variable, and transforms the program as if it always contained the result of
/// the specified malloc. Because it is always the result of the specified
@ -1397,185 +1273,6 @@ static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load,
}
}
/// PerformHeapAllocSRoA - MI is an allocation of an array of structures. Break
/// it up into multiple allocations of arrays of the fields.
static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, MallocInst *MI,
LLVMContext &Context){
DEBUG(errs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *MI);
const StructType *STy = cast<StructType>(MI->getAllocatedType());
// There is guaranteed to be at least one use of the malloc (storing
// it into GV). If there are other uses, change them to be uses of
// the global to simplify later code. This also deletes the store
// into GV.
ReplaceUsesOfMallocWithGlobal(MI, GV);
// Okay, at this point, there are no users of the malloc. Insert N
// new mallocs at the same place as MI, and N globals.
std::vector<Value*> FieldGlobals;
std::vector<MallocInst*> FieldMallocs;
for (unsigned FieldNo = 0, e = STy->getNumElements(); FieldNo != e;++FieldNo){
const Type *FieldTy = STy->getElementType(FieldNo);
const Type *PFieldTy = PointerType::getUnqual(FieldTy);
GlobalVariable *NGV =
new GlobalVariable(*GV->getParent(),
PFieldTy, false, GlobalValue::InternalLinkage,
Constant::getNullValue(PFieldTy),
GV->getName() + ".f" + Twine(FieldNo), GV,
GV->isThreadLocal());
FieldGlobals.push_back(NGV);
MallocInst *NMI = new MallocInst(FieldTy, MI->getArraySize(),
MI->getName() + ".f" + Twine(FieldNo), MI);
FieldMallocs.push_back(NMI);
new StoreInst(NMI, NGV, MI);
}
// The tricky aspect of this transformation is handling the case when malloc
// fails. In the original code, malloc failing would set the result pointer
// of malloc to null. In this case, some mallocs could succeed and others
// could fail. As such, we emit code that looks like this:
// F0 = malloc(field0)
// F1 = malloc(field1)
// F2 = malloc(field2)
// if (F0 == 0 || F1 == 0 || F2 == 0) {
// if (F0) { free(F0); F0 = 0; }
// if (F1) { free(F1); F1 = 0; }
// if (F2) { free(F2); F2 = 0; }
// }
Value *RunningOr = 0;
for (unsigned i = 0, e = FieldMallocs.size(); i != e; ++i) {
Value *Cond = new ICmpInst(MI, ICmpInst::ICMP_EQ, FieldMallocs[i],
Constant::getNullValue(FieldMallocs[i]->getType()),
"isnull");
if (!RunningOr)
RunningOr = Cond; // First seteq
else
RunningOr = BinaryOperator::CreateOr(RunningOr, Cond, "tmp", MI);
}
// Split the basic block at the old malloc.
BasicBlock *OrigBB = MI->getParent();
BasicBlock *ContBB = OrigBB->splitBasicBlock(MI, "malloc_cont");
// Create the block to check the first condition. Put all these blocks at the
// end of the function as they are unlikely to be executed.
BasicBlock *NullPtrBlock = BasicBlock::Create(Context, "malloc_ret_null",
OrigBB->getParent());
// Remove the uncond branch from OrigBB to ContBB, turning it into a cond
// branch on RunningOr.
OrigBB->getTerminator()->eraseFromParent();
BranchInst::Create(NullPtrBlock, ContBB, RunningOr, OrigBB);
// Within the NullPtrBlock, we need to emit a comparison and branch for each
// pointer, because some may be null while others are not.
for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) {
Value *GVVal = new LoadInst(FieldGlobals[i], "tmp", NullPtrBlock);
Value *Cmp = new ICmpInst(*NullPtrBlock, ICmpInst::ICMP_NE, GVVal,
Constant::getNullValue(GVVal->getType()),
"tmp");
BasicBlock *FreeBlock = BasicBlock::Create(Context, "free_it",
OrigBB->getParent());
BasicBlock *NextBlock = BasicBlock::Create(Context, "next",
OrigBB->getParent());
BranchInst::Create(FreeBlock, NextBlock, Cmp, NullPtrBlock);
// Fill in FreeBlock.
new FreeInst(GVVal, FreeBlock);
new StoreInst(Constant::getNullValue(GVVal->getType()), FieldGlobals[i],
FreeBlock);
BranchInst::Create(NextBlock, FreeBlock);
NullPtrBlock = NextBlock;
}
BranchInst::Create(ContBB, NullPtrBlock);
// MI is no longer needed, remove it.
MI->eraseFromParent();
/// InsertedScalarizedLoads - As we process loads, if we can't immediately
/// update all uses of the load, keep track of what scalarized loads are
/// inserted for a given load.
DenseMap<Value*, std::vector<Value*> > InsertedScalarizedValues;
InsertedScalarizedValues[GV] = FieldGlobals;
std::vector<std::pair<PHINode*, unsigned> > PHIsToRewrite;
// Okay, the malloc site is completely handled. All of the uses of GV are now
// loads, and all uses of those loads are simple. Rewrite them to use loads
// of the per-field globals instead.
for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end(); UI != E;) {
Instruction *User = cast<Instruction>(*UI++);
if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
RewriteUsesOfLoadForHeapSRoA(LI, InsertedScalarizedValues, PHIsToRewrite,
Context);
continue;
}
// Must be a store of null.
StoreInst *SI = cast<StoreInst>(User);
assert(isa<ConstantPointerNull>(SI->getOperand(0)) &&
"Unexpected heap-sra user!");
// Insert a store of null into each global.
for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) {
const PointerType *PT = cast<PointerType>(FieldGlobals[i]->getType());
Constant *Null = Constant::getNullValue(PT->getElementType());
new StoreInst(Null, FieldGlobals[i], SI);
}
// Erase the original store.
SI->eraseFromParent();
}
// While we have PHIs that are interesting to rewrite, do it.
while (!PHIsToRewrite.empty()) {
PHINode *PN = PHIsToRewrite.back().first;
unsigned FieldNo = PHIsToRewrite.back().second;
PHIsToRewrite.pop_back();
PHINode *FieldPN = cast<PHINode>(InsertedScalarizedValues[PN][FieldNo]);
assert(FieldPN->getNumIncomingValues() == 0 &&"Already processed this phi");
// Add all the incoming values. This can materialize more phis.
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
Value *InVal = PN->getIncomingValue(i);
InVal = GetHeapSROAValue(InVal, FieldNo, InsertedScalarizedValues,
PHIsToRewrite, Context);
FieldPN->addIncoming(InVal, PN->getIncomingBlock(i));
}
}
// Drop all inter-phi links and any loads that made it this far.
for (DenseMap<Value*, std::vector<Value*> >::iterator
I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end();
I != E; ++I) {
if (PHINode *PN = dyn_cast<PHINode>(I->first))
PN->dropAllReferences();
else if (LoadInst *LI = dyn_cast<LoadInst>(I->first))
LI->dropAllReferences();
}
// Delete all the phis and loads now that inter-references are dead.
for (DenseMap<Value*, std::vector<Value*> >::iterator
I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end();
I != E; ++I) {
if (PHINode *PN = dyn_cast<PHINode>(I->first))
PN->eraseFromParent();
else if (LoadInst *LI = dyn_cast<LoadInst>(I->first))
LI->eraseFromParent();
}
// The old global is now dead, remove it.
GV->eraseFromParent();
++NumHeapSRA;
return cast<GlobalVariable>(FieldGlobals[0]);
}
/// PerformHeapAllocSRoA - CI is an allocation of an array of structures. Break
/// it up into multiple allocations of arrays of the fields.
static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV,
@ -1763,95 +1460,6 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV,
return cast<GlobalVariable>(FieldGlobals[0]);
}
/// TryToOptimizeStoreOfMallocToGlobal - This function is called when we see a
/// pointer global variable with a single value stored it that is a malloc or
/// cast of malloc.
static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
MallocInst *MI,
Module::global_iterator &GVI,
TargetData *TD,
LLVMContext &Context) {
// If this is a malloc of an abstract type, don't touch it.
if (!MI->getAllocatedType()->isSized())
return false;
// We can't optimize this global unless all uses of it are *known* to be
// of the malloc value, not of the null initializer value (consider a use
// that compares the global's value against zero to see if the malloc has
// been reached). To do this, we check to see if all uses of the global
// would trap if the global were null: this proves that they must all
// happen after the malloc.
if (!AllUsesOfLoadedValueWillTrapIfNull(GV))
return false;
// We can't optimize this if the malloc itself is used in a complex way,
// for example, being stored into multiple globals. This allows the
// malloc to be stored into the specified global, loaded setcc'd, and
// GEP'd. These are all things we could transform to using the global
// for.
{
SmallPtrSet<PHINode*, 8> PHIs;
if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(MI, GV, PHIs))
return false;
}
// If we have a global that is only initialized with a fixed size malloc,
// transform the program to use global memory instead of malloc'd memory.
// This eliminates dynamic allocation, avoids an indirection accessing the
// data, and exposes the resultant global to further GlobalOpt.
if (ConstantInt *NElements = dyn_cast<ConstantInt>(MI->getArraySize())) {
// Restrict this transformation to only working on small allocations
// (2048 bytes currently), as we don't want to introduce a 16M global or
// something.
if (TD &&
NElements->getZExtValue()*
TD->getTypeAllocSize(MI->getAllocatedType()) < 2048) {
GVI = OptimizeGlobalAddressOfMalloc(GV, MI, Context);
return true;
}
}
// If the allocation is an array of structures, consider transforming this
// into multiple malloc'd arrays, one for each field. This is basically
// SRoA for malloc'd memory.
const Type *AllocTy = MI->getAllocatedType();
// If this is an allocation of a fixed size array of structs, analyze as a
// variable size array. malloc [100 x struct],1 -> malloc struct, 100
if (!MI->isArrayAllocation())
if (const ArrayType *AT = dyn_cast<ArrayType>(AllocTy))
AllocTy = AT->getElementType();
if (const StructType *AllocSTy = dyn_cast<StructType>(AllocTy)) {
// This the structure has an unreasonable number of fields, leave it
// alone.
if (AllocSTy->getNumElements() <= 16 && AllocSTy->getNumElements() != 0 &&
AllGlobalLoadUsesSimpleEnoughForHeapSRA(GV, MI)) {
// If this is a fixed size array, transform the Malloc to be an alloc of
// structs. malloc [100 x struct],1 -> malloc struct, 100
if (const ArrayType *AT = dyn_cast<ArrayType>(MI->getAllocatedType())) {
MallocInst *NewMI =
new MallocInst(AllocSTy,
ConstantInt::get(Type::getInt32Ty(Context),
AT->getNumElements()),
"", MI);
NewMI->takeName(MI);
Value *Cast = new BitCastInst(NewMI, MI->getType(), "tmp", MI);
MI->replaceAllUsesWith(Cast);
MI->eraseFromParent();
MI = NewMI;
}
GVI = PerformHeapAllocSRoA(GV, MI, Context);
return true;
}
}
return false;
}
/// TryToOptimizeStoreOfMallocToGlobal - This function is called when we see a
/// pointer global variable with a single value stored it that is a malloc or
/// cast of malloc.
@ -1970,9 +1578,6 @@ static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
// Optimize away any trapping uses of the loaded value.
if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, Context))
return true;
} else if (MallocInst *MI = dyn_cast<MallocInst>(StoredOnceVal)) {
if (TryToOptimizeStoreOfMallocToGlobal(GV, MI, GVI, TD, Context))
return true;
} else if (CallInst *CI = extractMallocCall(StoredOnceVal)) {
if (getMallocAllocatedType(CI)) {
BitCastInst* BCI = NULL;

View File

@ -6300,16 +6300,6 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
return SelectInst::Create(LHSI->getOperand(0), Op1, Op2);
break;
}
case Instruction::Malloc:
// If we have (malloc != null), and if the malloc has a single use, we
// can assume it is successful and remove the malloc.
if (LHSI->hasOneUse() && isa<ConstantPointerNull>(RHSC)) {
Worklist.Add(LHSI);
return ReplaceInstUsesWith(I,
ConstantInt::get(Type::getInt1Ty(*Context),
!I.isTrueWhenEqual()));
}
break;
case Instruction::Call:
// If we have (malloc != null), and if the malloc has a single use, we
// can assume it is successful and remove the malloc.
@ -7809,11 +7799,7 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
Amt = AllocaBuilder.CreateAdd(Amt, Off, "tmp");
}
AllocationInst *New;
if (isa<MallocInst>(AI))
New = AllocaBuilder.CreateMalloc(CastElTy, Amt);
else
New = AllocaBuilder.CreateAlloca(CastElTy, Amt);
AllocationInst *New = AllocaBuilder.CreateAlloca(CastElTy, Amt);
New->setAlignment(AI.getAlignment());
New->takeName(&AI);
@ -11213,15 +11199,8 @@ Instruction *InstCombiner::visitAllocationInst(AllocationInst &AI) {
if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
const Type *NewTy =
ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
AllocationInst *New = 0;
// Create and insert the replacement instruction...
if (isa<MallocInst>(AI))
New = Builder->CreateMalloc(NewTy, 0, AI.getName());
else {
assert(isa<AllocaInst>(AI) && "Unknown type of allocation inst!");
New = Builder->CreateAlloca(NewTy, 0, AI.getName());
}
AllocationInst *New = Builder->CreateAlloca(NewTy, 0, AI.getName());
New->setAlignment(AI.getAlignment());
// Scan to the end of the allocation instructions, to skip over a block of
@ -11294,12 +11273,6 @@ Instruction *InstCombiner::visitFreeInst(FreeInst &FI) {
}
}
// Change free(malloc) into nothing, if the malloc has a single use.
if (MallocInst *MI = dyn_cast<MallocInst>(Op))
if (MI->hasOneUse()) {
EraseInstFromFunction(FI);
return EraseInstFromFunction(*MI);
}
if (isMalloc(Op)) {
if (CallInst* CI = extractMallocCallFromBitCast(Op)) {
if (Op->hasOneUse() && CI->hasOneUse()) {

View File

@ -122,7 +122,7 @@ static bool isUnmovableInstruction(Instruction *I) {
if (I->getOpcode() == Instruction::PHI ||
I->getOpcode() == Instruction::Alloca ||
I->getOpcode() == Instruction::Load ||
I->getOpcode() == Instruction::Malloc || isMalloc(I) ||
isMalloc(I) ||
I->getOpcode() == Instruction::Invoke ||
(I->getOpcode() == Instruction::Call &&
!isa<DbgInfoIntrinsic>(I)) ||

View File

@ -1,4 +1,4 @@
//===- LowerAllocations.cpp - Reduce malloc & free insts to calls ---------===//
//===- LowerAllocations.cpp - Reduce free insts to calls ------------------===//
//
// The LLVM Compiler Infrastructure
//
@ -29,18 +29,15 @@ using namespace llvm;
STATISTIC(NumLowered, "Number of allocations lowered");
namespace {
/// LowerAllocations - Turn malloc and free instructions into @malloc and
/// @free calls.
/// LowerAllocations - Turn free instructions into @free calls.
///
class VISIBILITY_HIDDEN LowerAllocations : public BasicBlockPass {
Constant *FreeFunc; // Functions in the module we are processing
// Initialized by doInitialization
bool LowerMallocArgToInteger;
public:
static char ID; // Pass ID, replacement for typeid
explicit LowerAllocations(bool LowerToInt = false)
: BasicBlockPass(&ID), FreeFunc(0),
LowerMallocArgToInteger(LowerToInt) {}
explicit LowerAllocations()
: BasicBlockPass(&ID), FreeFunc(0) {}
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<TargetData>();
@ -54,7 +51,7 @@ namespace {
}
/// doPassInitialization - For the lower allocations pass, this ensures that
/// a module contains a declaration for a malloc and a free function.
/// a module contains a declaration for a free function.
///
bool doInitialization(Module &M);
@ -76,13 +73,13 @@ X("lowerallocs", "Lower allocations from instructions to calls");
// Publically exposed interface to pass...
const PassInfo *const llvm::LowerAllocationsID = &X;
// createLowerAllocationsPass - Interface to this file...
Pass *llvm::createLowerAllocationsPass(bool LowerMallocArgToInteger) {
return new LowerAllocations(LowerMallocArgToInteger);
Pass *llvm::createLowerAllocationsPass() {
return new LowerAllocations();
}
// doInitialization - For the lower allocations pass, this ensures that a
// module contains a declaration for a malloc and a free function.
// module contains a declaration for a free function.
//
// This function is always successful.
//
@ -102,25 +99,9 @@ bool LowerAllocations::runOnBasicBlock(BasicBlock &BB) {
BasicBlock::InstListType &BBIL = BB.getInstList();
const TargetData &TD = getAnalysis<TargetData>();
const Type *IntPtrTy = TD.getIntPtrType(BB.getContext());
// Loop over all of the instructions, looking for malloc or free instructions
// Loop over all of the instructions, looking for free instructions
for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I) {
if (MallocInst *MI = dyn_cast<MallocInst>(I)) {
Value *ArraySize = MI->getOperand(0);
if (ArraySize->getType() != IntPtrTy)
ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy,
false /*ZExt*/, "", I);
Value *MCast = CallInst::CreateMalloc(I, IntPtrTy,
MI->getAllocatedType(), ArraySize);
// Replace all uses of the old malloc inst with the cast inst
MI->replaceAllUsesWith(MCast);
I = --BBIL.erase(I); // remove and delete the malloc instr...
Changed = true;
++NumLowered;
} else if (FreeInst *FI = dyn_cast<FreeInst>(I)) {
if (FreeInst *FI = dyn_cast<FreeInst>(I)) {
Value *PtrCast =
new BitCastInst(FI->getOperand(0),
Type::getInt8PtrTy(BB.getContext()), "", I);

View File

@ -127,7 +127,6 @@ const char *Instruction::getOpcodeName(unsigned OpCode) {
case Xor: return "xor";
// Memory instructions...
case Malloc: return "malloc";
case Free: return "free";
case Alloca: return "alloca";
case Load: return "load";
@ -442,7 +441,6 @@ bool Instruction::isSafeToSpeculativelyExecute() const {
// overflow-checking arithmetic, etc.)
case VAArg:
case Alloca:
case Malloc:
case Invoke:
case PHI:
case Store:

View File

@ -847,7 +847,7 @@ static Value *getAISize(LLVMContext &Context, Value *Amt) {
assert(!isa<BasicBlock>(Amt) &&
"Passed basic block into allocation size parameter! Use other ctor");
assert(Amt->getType() == Type::getInt32Ty(Context) &&
"Malloc/Allocation array size is not a 32-bit integer!");
"Allocation array size is not a 32-bit integer!");
}
return Amt;
}
@ -3083,18 +3083,6 @@ InsertValueInst *InsertValueInst::clone() const {
return New;
}
MallocInst *MallocInst::clone() const {
MallocInst *New = new MallocInst(getAllocatedType(),
(Value*)getOperand(0),
getAlignment());
New->SubclassOptionalData = SubclassOptionalData;
if (hasMetadata()) {
LLVMContext &Context = getContext();
Context.pImpl->TheMetadata.ValueIsCloned(this, New);
}
return New;
}
AllocaInst *AllocaInst::clone() const {
AllocaInst *New = new AllocaInst(getAllocatedType(),
(Value*)getOperand(0),