1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 10:42:39 +01:00

Remove trailing whitespace

llvm-svn: 21427
This commit is contained in:
Misha Brukman 2005-04-21 23:48:37 +00:00
parent bf3f6181fd
commit 53e199440e
123 changed files with 1863 additions and 1863 deletions

View File

@ -1,10 +1,10 @@
//===- ExprTypeConvert.cpp - Code to change an LLVM Expr Type -------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file implements the part of level raising that checks to see if it is
@ -69,7 +69,7 @@ static bool MallocConvertibleToType(MallocInst *MI, const Type *Ty,
// here...
uint64_t Offset = OffsetVal * OldTypeSize;
uint64_t Scale = ScaleVal * OldTypeSize;
// In order to be successful, both the scale and the offset must be a multiple
// of the requested data type's size.
//
@ -145,7 +145,7 @@ bool llvm::ExpressionConvertibleToType(Value *V, const Type *Ty,
// Expression type must be holdable in a register.
if (!Ty->isFirstClassType())
return false;
ValueTypeCache::iterator CTMI = CTMap.find(V);
if (CTMI != CTMap.end()) return CTMI->second == Ty;
@ -154,7 +154,7 @@ bool llvm::ExpressionConvertibleToType(Value *V, const Type *Ty,
//
if (isa<Constant>(V) && !isa<GlobalValue>(V))
return true;
CTMap[V] = Ty;
if (V->getType() == Ty) return true; // Expression already correct type!
@ -170,7 +170,7 @@ bool llvm::ExpressionConvertibleToType(Value *V, const Type *Ty,
// We also do not allow conversion of a cast that casts from a ptr to array
// of X to a *X. For example: cast [4 x %List *] * %val to %List * *
//
if (const PointerType *SPT =
if (const PointerType *SPT =
dyn_cast<PointerType>(I->getOperand(0)->getType()))
if (const PointerType *DPT = dyn_cast<PointerType>(I->getType()))
if (const ArrayType *AT = dyn_cast<ArrayType>(SPT->getElementType()))
@ -200,7 +200,7 @@ bool llvm::ExpressionConvertibleToType(Value *V, const Type *Ty,
if (!ExpressionConvertibleToType(LI->getPointerOperand(),
PointerType::get(Ty), CTMap, TD))
return false;
break;
break;
}
case Instruction::PHI: {
PHINode *PN = cast<PHINode>(I);
@ -227,7 +227,7 @@ bool llvm::ExpressionConvertibleToType(Value *V, const Type *Ty,
// %t2 = cast %List * * %t1 to %List *
// into
// %t2 = getelementptr %Hosp * %hosp, ubyte 4 ; <%List *>
//
//
GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
const PointerType *PTy = dyn_cast<PointerType>(Ty);
if (!PTy) return false; // GEP must always return a pointer...
@ -283,9 +283,9 @@ bool llvm::ExpressionConvertibleToType(Value *V, const Type *Ty,
// and want to convert it into something like this:
// getelemenptr [[int] *] * %reg115, long %reg138 ; [int]**
//
if (GEP->getNumOperands() == 2 &&
if (GEP->getNumOperands() == 2 &&
PTy->getElementType()->isSized() &&
TD.getTypeSize(PTy->getElementType()) ==
TD.getTypeSize(PTy->getElementType()) ==
TD.getTypeSize(GEP->getType()->getElementType())) {
const PointerType *NewSrcTy = PointerType::get(PVTy);
if (!ExpressionConvertibleToType(I->getOperand(0), NewSrcTy, CTMap, TD))
@ -329,7 +329,7 @@ bool llvm::ExpressionConvertibleToType(Value *V, const Type *Ty,
}
Value *llvm::ConvertExpressionToType(Value *V, const Type *Ty,
Value *llvm::ConvertExpressionToType(Value *V, const Type *Ty,
ValueMapCache &VMC, const TargetData &TD) {
if (V->getType() == Ty) return V; // Already where we need to be?
@ -364,7 +364,7 @@ Value *llvm::ConvertExpressionToType(Value *V, const Type *Ty,
Instruction *Res; // Result of conversion
ValueHandle IHandle(VMC, I); // Prevent I from being removed!
Constant *Dummy = Constant::getNullValue(Ty);
switch (I->getOpcode()) {
@ -373,7 +373,7 @@ Value *llvm::ConvertExpressionToType(Value *V, const Type *Ty,
Res = new CastInst(I->getOperand(0), Ty, Name);
VMC.NewCasts.insert(ValueHandle(VMC, Res));
break;
case Instruction::Add:
case Instruction::Sub:
Res = BinaryOperator::create(cast<BinaryOperator>(I)->getOpcode(),
@ -436,7 +436,7 @@ Value *llvm::ConvertExpressionToType(Value *V, const Type *Ty,
// %t2 = cast %List * * %t1 to %List *
// into
// %t2 = getelementptr %Hosp * %hosp, ubyte 4 ; <%List *>
//
//
GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
// Check to see if there are zero elements that we can remove from the
@ -461,7 +461,7 @@ Value *llvm::ConvertExpressionToType(Value *V, const Type *Ty,
if (Res == 0 && GEP->getNumOperands() == 2 &&
GEP->getType() == PointerType::get(Type::SByteTy)) {
// Otherwise, we can convert a GEP from one form to the other iff the
// current gep is of the form 'getelementptr sbyte*, unsigned N
// and we could convert this to an appropriate GEP for the new type.
@ -475,7 +475,7 @@ Value *llvm::ConvertExpressionToType(Value *V, const Type *Ty,
std::vector<Value*> Indices;
const Type *ElTy = ConvertibleToGEP(NewSrcTy, I->getOperand(1),
Indices, TD, &It);
if (ElTy) {
if (ElTy) {
assert(ElTy == PVTy && "Internal error, setup wrong!");
Res = new GetElementPtrInst(Constant::getNullValue(NewSrcTy),
Indices, Name);
@ -625,7 +625,7 @@ static bool OperandConvertibleToType(User *U, Value *V, const Type *Ty,
// We also do not allow conversion of a cast that casts from a ptr to array
// of X to a *X. For example: cast [4 x %List *] * %val to %List * *
//
if (const PointerType *SPT =
if (const PointerType *SPT =
dyn_cast<PointerType>(I->getOperand(0)->getType()))
if (const PointerType *DPT = dyn_cast<PointerType>(I->getType()))
if (const ArrayType *AT = dyn_cast<ArrayType>(SPT->getElementType()))
@ -645,7 +645,7 @@ static bool OperandConvertibleToType(User *U, Value *V, const Type *Ty,
CTMap[I] = RetTy;
return true;
}
// We have to return failure here because ValueConvertibleToType could
// We have to return failure here because ValueConvertibleToType could
// have polluted our map
return false;
}
@ -681,7 +681,7 @@ static bool OperandConvertibleToType(User *U, Value *V, const Type *Ty,
if (const PointerType *PT = dyn_cast<PointerType>(Ty)) {
LoadInst *LI = cast<LoadInst>(I);
const Type *LoadedTy = PT->getElementType();
// They could be loading the first element of a composite type...
@ -733,7 +733,7 @@ static bool OperandConvertibleToType(User *U, Value *V, const Type *Ty,
assert(Offset == 0 && "Offset changed!");
if (ElTy == 0) // Element at offset zero in struct doesn't exist!
return false; // Can only happen for {}*
if (ElTy == Ty) // Looks like the 0th element of structure is
return true; // compatible! Accept now!
@ -763,7 +763,7 @@ static bool OperandConvertibleToType(User *U, Value *V, const Type *Ty,
}
// Must move the same amount of data...
if (!ElTy->isSized() ||
if (!ElTy->isSized() ||
TD.getTypeSize(ElTy) != TD.getTypeSize(I->getOperand(0)->getType()))
return false;
@ -801,7 +801,7 @@ static bool OperandConvertibleToType(User *U, Value *V, const Type *Ty,
CST = ConstantSInt::get(Index->getType(), DataSize);
else
CST = ConstantUInt::get(Index->getType(), DataSize);
TempScale = BinaryOperator::create(Instruction::Mul, Index, CST);
Index = TempScale;
}
@ -854,7 +854,7 @@ static bool OperandConvertibleToType(User *U, Value *V, const Type *Ty,
// the call provides...
//
if (NumArgs < FTy->getNumParams()) return false;
// Unless this is a vararg function type, we cannot provide more arguments
// than are desired...
//
@ -878,7 +878,7 @@ static bool OperandConvertibleToType(User *U, Value *V, const Type *Ty,
//
return ValueConvertibleToType(I, FTy->getReturnType(), CTMap, TD);
}
const PointerType *MPtr = cast<PointerType>(I->getOperand(0)->getType());
const FunctionType *FTy = cast<FunctionType>(MPtr->getElementType());
if (!FTy->isVarArg()) return false;
@ -941,7 +941,7 @@ static void ConvertOperandToType(User *U, Value *OldVal, Value *NewVal,
ValueHandle IHandle(VMC, I);
const Type *NewTy = NewVal->getType();
Constant *Dummy = (NewTy != Type::VoidTy) ?
Constant *Dummy = (NewTy != Type::VoidTy) ?
Constant::getNullValue(NewTy) : 0;
switch (I->getOpcode()) {
@ -1025,7 +1025,7 @@ static void ConvertOperandToType(User *U, Value *OldVal, Value *NewVal,
Src = new GetElementPtrInst(Src, Indices, Name+".idx", I);
}
}
Res = new LoadInst(Src, Name);
assert(Res->getType()->isFirstClassType() && "Load of structure or array!");
break;
@ -1042,13 +1042,13 @@ static void ConvertOperandToType(User *U, Value *OldVal, Value *NewVal,
//
const Type *ElTy =
cast<PointerType>(VMCI->second->getType())->getElementType();
Value *SrcPtr = VMCI->second;
if (ElTy != NewTy) {
// We check that this is a struct in the initial scan...
const StructType *SElTy = cast<StructType>(ElTy);
std::vector<Value*> Indices;
Indices.push_back(Constant::getNullValue(Type::UIntTy));
@ -1135,7 +1135,7 @@ static void ConvertOperandToType(User *U, Value *OldVal, Value *NewVal,
// anything that is a pointer type...
//
BasicBlock::iterator It = I;
// Check to see if the second argument is an expression that can
// be converted to the appropriate size... if so, allow it.
//
@ -1143,7 +1143,7 @@ static void ConvertOperandToType(User *U, Value *OldVal, Value *NewVal,
const Type *ElTy = ConvertibleToGEP(NewVal->getType(), I->getOperand(1),
Indices, TD, &It);
assert(ElTy != 0 && "GEP Conversion Failure!");
Res = new GetElementPtrInst(NewVal, Indices, Name);
} else {
// Convert a getelementptr ulong * %reg123, uint %N
@ -1271,7 +1271,7 @@ static void RecursiveDelete(ValueMapCache &Cache, Instruction *I) {
//DEBUG(std::cerr << "VH DELETING: " << (void*)I << " " << I);
for (User::op_iterator OI = I->op_begin(), OE = I->op_end();
for (User::op_iterator OI = I->op_begin(), OE = I->op_end();
OI != OE; ++OI)
if (Instruction *U = dyn_cast<Instruction>(OI)) {
*OI = 0;

View File

@ -1,10 +1,10 @@
//===- Hello.cpp - Example code from "Writing an LLVM Pass" ---------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file implements two versions of the LLVM "Hello World" pass described
@ -24,7 +24,7 @@ namespace {
std::cerr << "Hello: " << F.getName() << "\n";
return false;
}
};
};
RegisterOpt<Hello> X("hello", "Hello World Pass");
// Hello2 - The second implementation with getAnalysisUsage implemented.
@ -38,6 +38,6 @@ namespace {
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
};
};
};
RegisterOpt<Hello2> Y("hello2", "Hello World Pass (with getAnalysisUsage implemented)");
}

View File

@ -1,10 +1,10 @@
//===-- ArgumentPromotion.cpp - Promote by-reference arguments ------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This pass promotes "by reference" arguments to be "by value" arguments. In
@ -67,7 +67,7 @@ namespace {
virtual bool runOnSCC(const std::vector<CallGraphNode *> &SCC);
private:
bool PromoteArguments(CallGraphNode *CGN);
bool isSafeToPromoteArgument(Argument *Arg) const;
bool isSafeToPromoteArgument(Argument *Arg) const;
Function *DoPromotion(Function *F, std::vector<Argument*> &ArgsToPromote);
};
@ -89,7 +89,7 @@ bool ArgPromotion::runOnSCC(const std::vector<CallGraphNode *> &SCC) {
LocalChange |= PromoteArguments(SCC[i]);
Changed |= LocalChange; // Remember that we changed something.
} while (LocalChange);
return Changed;
}
@ -306,7 +306,7 @@ namespace {
unsigned idx = 0;
for (; idx < LHS.size() && idx < RHS.size(); ++idx) {
if (LHS[idx] != RHS[idx]) {
return cast<ConstantInt>(LHS[idx])->getRawValue() <
return cast<ConstantInt>(LHS[idx])->getRawValue() <
cast<ConstantInt>(RHS[idx])->getRawValue();
}
}
@ -325,7 +325,7 @@ namespace {
Function *ArgPromotion::DoPromotion(Function *F,
std::vector<Argument*> &Args2Prom) {
std::set<Argument*> ArgsToPromote(Args2Prom.begin(), Args2Prom.end());
// Start by computing a new prototype for the function, which is the same as
// the old function, but has modified arguments.
const FunctionType *FTy = F->getFunctionType();
@ -391,7 +391,7 @@ Function *ArgPromotion::DoPromotion(Function *F,
Params.push_back(Type::IntTy);
}
FunctionType *NFTy = FunctionType::get(RetTy, Params, FTy->isVarArg());
// Create the new function body and insert it into the module...
Function *NF = new Function(NFTy, F->getLinkage(), F->getName());
F->getParent()->getFunctionList().insert(F, NF);
@ -456,7 +456,7 @@ Function *ArgPromotion::DoPromotion(Function *F,
Call->setName("");
New->setName(Name);
}
// Finally, remove the old call from the program, reducing the use-count of
// F.
Call->getParent()->getInstList().erase(Call);

View File

@ -1,10 +1,10 @@
//===- ConstantMerge.cpp - Merge duplicate global constants ---------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file defines the interface to a pass that merges duplicate global
@ -60,10 +60,10 @@ bool ConstantMerge::runOnModule(Module &M) {
// Only process constants with initializers
if (GV->isConstant() && GV->hasInitializer()) {
Constant *Init = GV->getInitializer();
// Check to see if the initializer is already known...
std::map<Constant*, GlobalVariable*>::iterator I = CMap.find(Init);
if (I == CMap.end()) { // Nope, add it to the map
CMap.insert(I, std::make_pair(Init, GV));
} else if (GV->hasInternalLinkage()) { // Yup, this is a duplicate!
@ -75,22 +75,22 @@ bool ConstantMerge::runOnModule(Module &M) {
I->second = GV;
}
}
if (Replacements.empty())
return MadeChange;
CMap.clear();
// Now that we have figured out which replacements must be made, do them all
// now. This avoid invalidating the pointers in CMap, which are unneeded
// now.
for (unsigned i = 0, e = Replacements.size(); i != e; ++i) {
// Eliminate any uses of the dead global...
Replacements[i].first->replaceAllUsesWith(Replacements[i].second);
// Delete the global value from the module...
M.getGlobalList().erase(Replacements[i].first);
}
NumMerged += Replacements.size();
Replacements.clear();
}

View File

@ -1,10 +1,10 @@
//===-- DeadArgumentElimination.cpp - Eliminate dead arguments ------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This pass deletes dead arguments from internal functions. Dead argument
@ -88,7 +88,7 @@ namespace {
void MarkArgumentLive(Argument *Arg);
void MarkRetValLive(Function *F);
void MarkReturnInstArgumentLive(ReturnInst *RI);
void RemoveDeadArgumentsFromFunction(Function *F);
};
RegisterOpt<DAE> X("deadargelim", "Dead Argument Elimination");
@ -168,7 +168,7 @@ void DAE::SurveyFunction(Function &F) {
if (!F.hasInternalLinkage() &&
(!ShouldHackArguments() || F.getIntrinsicID()))
FunctionIntrinsicallyLive = true;
else
else
for (Value::use_iterator I = F.use_begin(), E = F.use_end(); I != E; ++I) {
// If this use is anything other than a call site, the function is alive.
CallSite CS = CallSite::get(*I);
@ -197,7 +197,7 @@ void DAE::SurveyFunction(Function &F) {
RetValLiveness = Live;
break;
}
// If the function is PASSED IN as an argument, its address has been taken
for (CallSite::arg_iterator AI = CS.arg_begin(), E = CS.arg_end();
AI != E; ++AI)
@ -300,11 +300,11 @@ bool DAE::isMaybeLiveArgumentNowLive(Argument *Arg) {
void DAE::MarkArgumentLive(Argument *Arg) {
std::set<Argument*>::iterator It = MaybeLiveArguments.lower_bound(Arg);
if (It == MaybeLiveArguments.end() || *It != Arg) return;
DEBUG(std::cerr << " MaybeLive argument now live: " << Arg->getName()<<"\n");
MaybeLiveArguments.erase(It);
LiveArguments.insert(Arg);
// Loop over all of the call sites of the function, making any arguments
// passed in to provide a value for this argument live as necessary.
//
@ -440,7 +440,7 @@ void DAE::RemoveDeadArgumentsFromFunction(Function *F) {
New->setName(Name);
}
}
// Finally, remove the old call from the program, reducing the use-count of
// F.
Call->getParent()->getInstList().erase(Call);
@ -499,7 +499,7 @@ bool DAE::runOnModule(Module &M) {
while (!InstructionsToInspect.empty()) {
Instruction *I = InstructionsToInspect.back();
InstructionsToInspect.pop_back();
if (ReturnInst *RI = dyn_cast<ReturnInst>(I)) {
// For return instructions, we just have to check to see if the return
// value for the current function is known now to be alive. If so, any
@ -513,7 +513,7 @@ bool DAE::runOnModule(Module &M) {
assert(CS.getInstruction() && "Unknown instruction for the I2I list!");
Function *Callee = CS.getCalledFunction();
// If we found a call or invoke instruction on this list, that means that
// an argument of the function is a call instruction. If the argument is
// live, then the return value of the called instruction is now live.
@ -556,7 +556,7 @@ bool DAE::runOnModule(Module &M) {
if (MaybeLiveArguments.empty() && DeadArguments.empty() &&
MaybeLiveRetVal.empty() && DeadRetVal.empty())
return false;
// Otherwise, compact into one set, and start eliminating the arguments from
// the functions.
DeadArguments.insert(MaybeLiveArguments.begin(), MaybeLiveArguments.end());

View File

@ -1,10 +1,10 @@
//===- DeadTypeElimination.cpp - Eliminate unused types for symbol table --===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This pass is used to cleanup the output of GCC. It eliminate names for types

View File

@ -1,10 +1,10 @@
//===-- ExtractFunction.cpp - Function extraction pass --------------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This pass extracts
@ -25,7 +25,7 @@ namespace {
/// specified function. Otherwise, it deletes as much of the module as
/// possible, except for the function specified.
///
FunctionExtractorPass(Function *F = 0, bool deleteFn = true)
FunctionExtractorPass(Function *F = 0, bool deleteFn = true)
: Named(F), deleteFunc(deleteFn) {}
bool runOnModule(Module &M) {
@ -36,7 +36,7 @@ namespace {
if (deleteFunc)
return deleteFunction();
else
else
return isolateFunction(M);
}
@ -57,31 +57,31 @@ namespace {
I->setInitializer(0); // Make all variables external
I->setLinkage(GlobalValue::ExternalLinkage);
}
// All of the functions may be used by global variables or the named
// function. Loop through them and create a new, external functions that
// can be "used", instead of ones with bodies.
std::vector<Function*> NewFunctions;
Function *Last = --M.end(); // Figure out where the last real fn is.
for (Module::iterator I = M.begin(); ; ++I) {
if (&*I != Named) {
Function *New = new Function(I->getFunctionType(),
GlobalValue::ExternalLinkage,
I->getName());
I->setName(""); // Remove Old name
// If it's not the named function, delete the body of the function
I->dropAllReferences();
M.getFunctionList().push_back(New);
NewFunctions.push_back(New);
}
if (&*I == Last) break; // Stop after processing the last function
}
// Now that we have replacements all set up, loop through the module,
// deleting the old functions, replacing them with the newly created
// functions.
@ -92,19 +92,19 @@ namespace {
if (&*I != Named) {
// Make everything that uses the old function use the new dummy fn
I->replaceAllUsesWith(NewFunctions[FuncNum++]);
Function *Old = I;
++I; // Move the iterator to the new function
// Delete the old function!
M.getFunctionList().erase(Old);
} else {
++I; // Skip the function we are extracting
}
} while (&*I != NewFunctions[0]);
}
return true;
}
};

View File

@ -1,10 +1,10 @@
//===- FunctionResolution.cpp - Resolve declarations to implementations ---===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// Loop over the functions that are in the module and look for functions that
@ -57,7 +57,7 @@ static bool ResolveFunctions(Module &M, std::vector<GlobalValue*> &Globals,
Function *Old = cast<Function>(Globals[i]);
const FunctionType *OldMT = Old->getFunctionType();
const FunctionType *ConcreteMT = Concrete->getFunctionType();
if (OldMT->getNumParams() > ConcreteMT->getNumParams() &&
!ConcreteMT->isVarArg())
if (!Old->use_empty()) {
@ -69,7 +69,7 @@ static bool ResolveFunctions(Module &M, std::vector<GlobalValue*> &Globals,
WriteAsOperand(std::cerr, Concrete);
std::cerr << "\n";
}
// Check to make sure that if there are specified types, that they
// match...
//
@ -79,7 +79,7 @@ static bool ResolveFunctions(Module &M, std::vector<GlobalValue*> &Globals,
if (!Old->use_empty() && !Concrete->use_empty())
for (unsigned i = 0; i < NumArguments; ++i)
if (OldMT->getParamType(i) != ConcreteMT->getParamType(i))
if (OldMT->getParamType(i)->getTypeID() !=
if (OldMT->getParamType(i)->getTypeID() !=
ConcreteMT->getParamType(i)->getTypeID()) {
std::cerr << "WARNING: Function [" << Old->getName()
<< "]: Parameter types conflict for: '";
@ -89,7 +89,7 @@ static bool ResolveFunctions(Module &M, std::vector<GlobalValue*> &Globals,
std::cerr << "'\n";
return Changed;
}
// Attempt to convert all of the uses of the old function to the concrete
// form of the function. If there is a use of the fn that we don't
// understand here we punt to avoid making a bad transformation.
@ -174,11 +174,11 @@ static bool ProcessGlobalsWithSameName(Module &M, TargetData &TD,
if (!F->isExternal()) {
if (Concrete && !Concrete->isExternal())
return false; // Found two different functions types. Can't choose!
Concrete = Globals[i];
} else if (Concrete) {
if (Concrete->isExternal()) // If we have multiple external symbols...
if (F->getFunctionType()->getNumParams() >
if (F->getFunctionType()->getNumParams() >
cast<Function>(Concrete)->getFunctionType()->getNumParams())
Concrete = F; // We are more concrete than "Concrete"!
@ -213,7 +213,7 @@ static bool ProcessGlobalsWithSameName(Module &M, TargetData &TD,
else if (!Globals[i]->hasInternalLinkage())
NumInstancesWithExternalLinkage++;
}
if (!HasExternal && NumInstancesWithExternalLinkage <= 1)
return false; // Nothing to do? Must have multiple internal definitions.
@ -231,7 +231,7 @@ static bool ProcessGlobalsWithSameName(Module &M, TargetData &TD,
OtherF->getFunctionType()->isVarArg() &&
OtherF->getFunctionType()->getNumParams() == 0)
DontPrintWarning = true;
// Otherwise, if the non-concrete global is a global array variable with a
// size of 0, and the concrete global is an array with a real size, don't
// warn. This occurs due to declaring 'extern int A[];'.

View File

@ -1,10 +1,10 @@
//===-- GlobalDCE.cpp - DCE unreachable internal functions ----------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This transform is designed to eliminate unreachable internal globals from the
@ -111,7 +111,7 @@ bool GlobalDCE::runOnModule(Module &M) {
NumVariables += DeadGlobalVars.size();
Changed = true;
}
// Make sure that all memory is released
AliveGlobals.clear();
return Changed;
@ -148,7 +148,7 @@ void GlobalDCE::GlobalIsNeeded(GlobalValue *G) {
if (GlobalValue *GV = dyn_cast<GlobalValue>(*U))
GlobalIsNeeded(GV);
else if (Constant *C = dyn_cast<Constant>(*U))
MarkUsedGlobalsAsNeeded(C);
MarkUsedGlobalsAsNeeded(C);
}
}
@ -174,7 +174,7 @@ bool GlobalDCE::RemoveUnusedGlobalValue(GlobalValue &GV) {
GV.removeDeadConstantUsers();
return GV.use_empty();
}
// SafeToDestroyConstant - It is safe to destroy a constant iff it is only used
// by constants itself. Note that constants cannot be cyclic, so this test is
// pretty easy to implement recursively.

View File

@ -1,10 +1,10 @@
//===- GlobalOpt.cpp - Optimize Global Variables --------------------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This pass transforms simple global variables that never have their address
@ -47,7 +47,7 @@ namespace {
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<TargetData>();
}
bool runOnModule(Module &M);
private:
@ -201,7 +201,7 @@ static bool AnalyzeGlobal(Value *V, GlobalStatus &GS,
// If the first two indices are constants, this can be SRA'd.
if (isa<GlobalVariable>(I->getOperand(0))) {
if (I->getNumOperands() < 3 || !isa<Constant>(I->getOperand(1)) ||
!cast<Constant>(I->getOperand(1))->isNullValue() ||
!cast<Constant>(I->getOperand(1))->isNullValue() ||
!isa<ConstantInt>(I->getOperand(2)))
GS.isNotSuitableForSRA = true;
} else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I->getOperand(0))){
@ -304,7 +304,7 @@ static bool CleanupConstantGlobalUsers(Value *V, Constant *Init) {
bool Changed = false;
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;) {
User *U = *UI++;
if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
if (Init) {
// Replace the load with the initializer.
@ -367,7 +367,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV) {
assert(GV->hasInternalLinkage() && !GV->isConstant());
Constant *Init = GV->getInitializer();
const Type *Ty = Init->getType();
std::vector<GlobalVariable*> NewGlobals;
Module::GlobalListType &Globals = GV->getParent()->getGlobalList();
@ -422,7 +422,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV) {
assert(((isa<ConstantExpr>(GEP) &&
cast<ConstantExpr>(GEP)->getOpcode()==Instruction::GetElementPtr)||
isa<GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!");
// Ignore the 1th operand, which has to be zero or else the program is quite
// broken (undefined). Get the 2nd operand, which is the structure or array
// index.
@ -499,7 +499,7 @@ static bool AllUsesOfValueWillTrapIfNull(Value *V) {
if (!AllUsesOfValueWillTrapIfNull(CI)) return false;
} else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(*UI)) {
if (!AllUsesOfValueWillTrapIfNull(GEPI)) return false;
} else if (isa<SetCondInst>(*UI) &&
} else if (isa<SetCondInst>(*UI) &&
isa<ConstantPointerNull>(UI->getOperand(1))) {
// Ignore setcc X, null
} else {
@ -681,7 +681,7 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
MI->eraseFromParent();
MI = NewMI;
}
// Create the new global variable. The contents of the malloc'd memory is
// undefined, so initialize with an undef value.
Constant *Init = UndefValue::get(MI->getAllocatedType());
@ -689,7 +689,7 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
GlobalValue::InternalLinkage, Init,
GV->getName()+".body");
GV->getParent()->getGlobalList().insert(GV, NewGV);
// Anything that used the malloc now uses the global directly.
MI->replaceAllUsesWith(NewGV);
@ -699,8 +699,8 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
// If there is a comparison against null, we will insert a global bool to
// keep track of whether the global was initialized yet or not.
GlobalVariable *InitBool =
new GlobalVariable(Type::BoolTy, false, GlobalValue::InternalLinkage,
GlobalVariable *InitBool =
new GlobalVariable(Type::BoolTy, false, GlobalValue::InternalLinkage,
ConstantBool::False, GV->getName()+".init");
bool InitBoolUsed = false;
@ -817,7 +817,7 @@ static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
if (Constant *SOVC = dyn_cast<Constant>(StoredOnceVal)) {
if (GV->getInitializer()->getType() != SOVC->getType())
SOVC = ConstantExpr::getCast(SOVC, GV->getInitializer()->getType());
// Optimize away any trapping uses of the loaded value.
if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC))
return true;
@ -846,7 +846,7 @@ static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
}
/// ShrinkGlobalToBoolean - At this point, we have learned that the only two
/// values ever stored into GV are its initializer and OtherVal.
/// values ever stored into GV are its initializer and OtherVal.
static void ShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
// Create the new global, initializing it to false.
GlobalVariable *NewGV = new GlobalVariable(Type::BoolTy, false,
@ -895,13 +895,13 @@ static void ShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
} else if (!UI->use_empty()) {
// Change the load into a load of bool then a select.
LoadInst *LI = cast<LoadInst>(UI);
std::string Name = LI->getName(); LI->setName("");
LoadInst *NLI = new LoadInst(NewGV, Name+".b", LI);
Value *NSI;
if (IsOneZero)
NSI = new CastInst(NLI, LI->getType(), Name, LI);
else
else
NSI = new SelectInst(NLI, OtherVal, InitVal, Name, LI);
LI->replaceAllUsesWith(NSI);
}
@ -947,7 +947,7 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
AllocaInst* Alloca = new AllocaInst(ElemTy, NULL, GV->getName(), FirstI);
if (!isa<UndefValue>(GV->getInitializer()))
new StoreInst(GV->getInitializer(), Alloca, FirstI);
GV->replaceAllUsesWith(Alloca);
GV->eraseFromParent();
++NumLocalized;
@ -969,14 +969,14 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
Changed = true;
}
return Changed;
} else if (GS.StoredType <= GlobalStatus::isInitializerStored) {
DEBUG(std::cerr << "MARKING CONSTANT: " << *GV);
GV->setConstant(true);
// Clean up any obviously simplifiable users now.
CleanupConstantGlobalUsers(GV, GV->getInitializer());
// If the global is dead now, just nuke it.
if (GV->use_empty()) {
DEBUG(std::cerr << " *** Marking constant allowed us to simplify "
@ -984,7 +984,7 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
GV->eraseFromParent();
++NumDeleted;
}
++NumMarked;
return true;
} else if (!GS.isNotSuitableForSRA &&
@ -1002,10 +1002,10 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
if (isa<UndefValue>(GV->getInitializer())) {
// Change the initial value here.
GV->setInitializer(SOVConstant);
// Clean up any obviously simplifiable users now.
CleanupConstantGlobalUsers(GV, GV->getInitializer());
if (GV->use_empty()) {
DEBUG(std::cerr << " *** Substituting initializer allowed us to "
"simplify all users and delete global!\n");

View File

@ -1,10 +1,10 @@
//===-- IPConstantPropagation.cpp - Propagate constants through calls -----===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This pass implements an _extremely_ simple interprocedural constant
@ -81,10 +81,10 @@ bool IPCP::PropagateConstantsIntoArguments(Function &F) {
return false; // Used by a non-instruction, do not transform
else {
CallSite CS = CallSite::get(cast<Instruction>(*I));
if (CS.getInstruction() == 0 ||
if (CS.getInstruction() == 0 ||
CS.getCalledFunction() != &F)
return false; // Not a direct call site?
// Check out all of the potentially constant arguments
CallSite::arg_iterator AI = CS.arg_begin();
Function::arg_iterator Arg = F.arg_begin();
@ -163,7 +163,7 @@ bool IPCP::PropagateConstantReturn(Function &F) {
ReplacedAllUsers = false;
else {
CallSite CS = CallSite::get(cast<Instruction>(*I));
if (CS.getInstruction() == 0 ||
if (CS.getInstruction() == 0 ||
CS.getCalledFunction() != &F) {
ReplacedAllUsers = false;
} else {

View File

@ -1,10 +1,10 @@
//===- InlineSimple.cpp - Code to perform simple function inlining --------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file implements bottom-up inlining of functions into callees.
@ -101,7 +101,7 @@ static unsigned CountCodeReductionForConstant(Value *V) {
if (AllOperandsConstant) {
// We will get to remove this instruction...
Reduction += 7;
// And any other instructions that use it which become constants
// themselves.
Reduction += CountCodeReductionForConstant(&Inst);

View File

@ -1,10 +1,10 @@
//===- Inliner.cpp - Code common to all inliners --------------------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file implements the mechanics required to implement inlining without
@ -53,18 +53,18 @@ static bool InlineCallIfPossible(CallSite CS, CallGraph &CG,
for (CallGraphNode::iterator I = CalleeNode->begin(),
E = CalleeNode->end(); I != E; ++I)
CallerNode->addCalledFunction(*I);
// If we inlined the last possible call site to the function, delete the
// function body now.
if (Callee->use_empty() && Callee->hasInternalLinkage() &&
!SCCFunctions.count(Callee)) {
DEBUG(std::cerr << " -> Deleting dead function: "
<< Callee->getName() << "\n");
// Remove any call graph edges from the callee to its callees.
while (CalleeNode->begin() != CalleeNode->end())
CalleeNode->removeCallEdgeTo(*(CalleeNode->end()-1));
// Removing the node for callee from the call graph and delete it.
delete CG.removeFunctionFromModule(CalleeNode);
++NumDeleted;
@ -99,7 +99,7 @@ bool Inliner::runOnSCC(const std::vector<CallGraphNode*> &SCC) {
}
DEBUG(std::cerr << ": " << CallSites.size() << " call sites.\n");
// Now that we have all of the call sites, move the ones to functions in the
// current SCC to the end of the list.
unsigned FirstCallInSCC = CallSites.size();
@ -107,7 +107,7 @@ bool Inliner::runOnSCC(const std::vector<CallGraphNode*> &SCC) {
if (Function *F = CallSites[i].getCalledFunction())
if (SCCFunctions.count(F))
std::swap(CallSites[i--], CallSites[--FirstCallInSCC]);
// Now that we have all of the call sites, loop over them and inline them if
// it looks profitable to do so.
bool Changed = false;
@ -137,7 +137,7 @@ bool Inliner::runOnSCC(const std::vector<CallGraphNode*> &SCC) {
} else {
DEBUG(std::cerr << " Inlining: cost=" << InlineCost
<< ", Call: " << *CS.getInstruction());
Function *Caller = CS.getInstruction()->getParent()->getParent();
// Attempt to inline the function...
@ -178,12 +178,12 @@ bool Inliner::doFinalization(CallGraph &CG) {
// Remove any call graph edges from the function to its callees.
while (CGN->begin() != CGN->end())
CGN->removeCallEdgeTo(*(CGN->end()-1));
// Remove any edges from the external node to the function's call graph
// node. These edges might have been made irrelegant due to
// optimization of the program.
CG.getExternalCallingNode()->removeAnyCallEdgeTo(CGN);
// Removing the node for callee from the call graph and delete it.
FunctionsToRemove.insert(CGN);
}

View File

@ -1,10 +1,10 @@
//===- InlineCommon.h - Code common to all inliners -------------*- C++ -*-===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file defines a simple policy-based bottom-up inliner. This file

View File

@ -1,10 +1,10 @@
//===-- Internalize.cpp - Mark functions internal -------------------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This pass loops over all of the functions in the input module, looking for a
@ -38,7 +38,7 @@ namespace {
APIList("internalize-public-api-list", cl::value_desc("list"),
cl::desc("A list of symbol names to preserve"),
cl::CommaSeparated);
class InternalizePass : public ModulePass {
std::set<std::string> ExternalNames;
public:
@ -80,7 +80,7 @@ namespace {
}
bool Changed = false;
// Found a main function, mark all functions not named main as internal.
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I)
if (!I->isExternal() && // Function must be defined here
@ -109,7 +109,7 @@ namespace {
++NumGlobals;
DEBUG(std::cerr << "Internalizing gvar " << I->getName() << "\n");
}
return Changed;
}
};

View File

@ -1,10 +1,10 @@
//===- LoopExtractor.cpp - Extract each loop into a new function ----------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// A pass wrapper around the ExtractLoop() scalar transformation to extract each
@ -27,7 +27,7 @@ using namespace llvm;
namespace {
Statistic<> NumExtracted("loop-extract", "Number of loops extracted");
// FIXME: This is not a function pass, but the PassManager doesn't allow
// Module passes to require FunctionPasses, so we can't get loop info if we're
// not a function pass.
@ -37,7 +37,7 @@ namespace {
LoopExtractor(unsigned numLoops = ~0) : NumLoops(numLoops) {}
virtual bool runOnFunction(Function &F);
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequiredID(BreakCriticalEdgesID);
AU.addRequiredID(LoopSimplifyID);
@ -46,7 +46,7 @@ namespace {
}
};
RegisterOpt<LoopExtractor>
RegisterOpt<LoopExtractor>
X("loop-extract", "Extract loops into new functions");
/// SingleLoopExtractor - For bugpoint.
@ -54,9 +54,9 @@ namespace {
SingleLoopExtractor() : LoopExtractor(1) {}
};
RegisterOpt<SingleLoopExtractor>
RegisterOpt<SingleLoopExtractor>
Y("loop-extract-single", "Extract at most one loop into a new function");
} // End anonymous namespace
} // End anonymous namespace
// createLoopExtractorPass - This pass extracts all natural loops from the
// program into a function if it can.
@ -87,11 +87,11 @@ bool LoopExtractor::runOnFunction(Function &F) {
// than a minimal wrapper around the loop, extract the loop.
Loop *TLL = *LI.begin();
bool ShouldExtractLoop = false;
// Extract the loop if the entry block doesn't branch to the loop header.
TerminatorInst *EntryTI = F.getEntryBlock().getTerminator();
if (!isa<BranchInst>(EntryTI) ||
!cast<BranchInst>(EntryTI)->isUnconditional() ||
!cast<BranchInst>(EntryTI)->isUnconditional() ||
EntryTI->getSuccessor(0) != TLL->getHeader())
ShouldExtractLoop = true;
else {
@ -105,7 +105,7 @@ bool LoopExtractor::runOnFunction(Function &F) {
break;
}
}
if (ShouldExtractLoop) {
if (NumLoops == 0) return Changed;
--NumLoops;
@ -184,6 +184,6 @@ bool BlockExtractorPass::runOnModule(Module &M) {
for (unsigned i = 0, e = BlocksToExtract.size(); i != e; ++i)
ExtractBasicBlock(BlocksToExtract[i]);
return !BlocksToExtract.empty();
}

View File

@ -1,10 +1,10 @@
//===- LowerSetJmp.cpp - Code pertaining to lowering set/long jumps -------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file implements the lowering of setjmp and longjmp to use the
@ -204,7 +204,7 @@ bool LowerSetJmp::doInitialization(Module& M)
// void __llvm_sjljeh_init_setjmpmap(void**)
InitSJMap = M.getOrInsertFunction("__llvm_sjljeh_init_setjmpmap",
Type::VoidTy, SBPPTy, 0);
Type::VoidTy, SBPPTy, 0);
// void __llvm_sjljeh_destroy_setjmpmap(void**)
DestroySJMap = M.getOrInsertFunction("__llvm_sjljeh_destroy_setjmpmap",
Type::VoidTy, SBPPTy, 0);
@ -386,7 +386,7 @@ void LowerSetJmp::TransformSetJmpCall(CallInst* Inst)
// instructions after the call.
for (BasicBlock::iterator I = ++BasicBlock::iterator(Inst), E = ABlock->end();
I != E; ++I)
InstrsAfterCall.insert(I);
InstrsAfterCall.insert(I);
for (BasicBlock::iterator II = ABlock->begin();
II != BasicBlock::iterator(Inst); ++II)
@ -460,7 +460,7 @@ void LowerSetJmp::visitCallInst(CallInst& CI)
std::vector<Value*> Params(CI.op_begin() + 1, CI.op_end());
InvokeInst* II = new
InvokeInst(CI.getCalledValue(), NewBB, PrelimBBMap[Func],
Params, CI.getName(), Term);
Params, CI.getName(), Term);
// Replace the old call inst with the invoke inst and remove the call.
CI.replaceAllUsesWith(II);

View File

@ -1,10 +1,10 @@
//===- PruneEH.cpp - Pass which deletes unused exception handlers ---------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file implements a simple interprocedural pass which walks the
@ -77,7 +77,7 @@ bool PruneEH::runOnSCC(const std::vector<CallGraphNode *> &SCC) {
SCCMightThrow = true;
break;
}
} else {
// Indirect call, it might throw.
SCCMightThrow = true;
@ -109,24 +109,24 @@ bool PruneEH::runOnSCC(const std::vector<CallGraphNode *> &SCC) {
std::vector<Value*>(II->op_begin()+3,
II->op_end()),
Name, II);
// Anything that used the value produced by the invoke instruction
// now uses the value produced by the call instruction.
II->replaceAllUsesWith(Call);
II->getUnwindDest()->removePredecessor(II->getParent());
// Insert a branch to the normal destination right before the
// invoke.
new BranchInst(II->getNormalDest(), II);
// Finally, delete the invoke instruction!
I->getInstList().pop_back();
++NumRemoved;
MadeChange = true;
}
}
return MadeChange;
return MadeChange;
}

View File

@ -1,10 +1,10 @@
//===- RaiseAllocations.cpp - Convert %malloc & %free calls to insts ------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file defines the RaiseAllocations pass which convert malloc and free
@ -33,17 +33,17 @@ namespace {
Function *FreeFunc; // Initialized by doPassInitializationVirt
public:
RaiseAllocations() : MallocFunc(0), FreeFunc(0) {}
// doPassInitialization - For the raise allocations pass, this finds a
// declaration for malloc and free if they exist.
//
void doInitialization(Module &M);
// run - This method does the actual work of converting instructions over.
//
bool runOnModule(Module &M);
};
RegisterOpt<RaiseAllocations>
X("raiseallocs", "Raise allocations from calls to instructions");
} // end anonymous namespace
@ -134,14 +134,14 @@ bool RaiseAllocations::runOnModule(Module &M) {
(CS.getCalledFunction() == MallocFunc ||
std::find(EqPointers.begin(), EqPointers.end(),
CS.getCalledValue()) != EqPointers.end())) {
Value *Source = *CS.arg_begin();
// If no prototype was provided for malloc, we may need to cast the
// source size.
if (Source->getType() != Type::UIntTy)
Source = new CastInst(Source, Type::UIntTy, "MallocAmtCast", I);
std::string Name(I->getName()); I->setName("");
MallocInst *MI = new MallocInst(Type::SByteTy, Source, Name, I);
I->replaceAllUsesWith(MI);
@ -183,7 +183,7 @@ bool RaiseAllocations::runOnModule(Module &M) {
(CS.getCalledFunction() == FreeFunc ||
std::find(EqPointers.begin(), EqPointers.end(),
CS.getCalledValue()) != EqPointers.end())) {
// If no prototype was provided for free, we may need to cast the
// source pointer. This should be really uncommon, but it's necessary
// just in case we are dealing with weird code like this:

View File

@ -1,10 +1,10 @@
//===- StripSymbols.cpp - Strip symbols and debug info from a module ------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file implements stripping symbols out of symbol tables.
@ -17,7 +17,7 @@
//
// Notice that:
// * This pass makes code much less readable, so it should only be used in
// situations where the 'strip' utility would be used (such as reducing
// situations where the 'strip' utility would be used (such as reducing
// code size, and making it harder to reverse engineer code).
//
//===----------------------------------------------------------------------===//
@ -63,7 +63,7 @@ static void RemoveDeadConstant(Constant *C) {
}
else if (!isa<Function>(C))
C->destroyConstant();
// If the constant referenced anything, see if we can delete it as well.
while (!Operands.empty()) {
RemoveDeadConstant(Operands.back());
@ -144,5 +144,5 @@ bool StripSymbols::runOnModule(Module &M) {
RemoveDeadConstant(GV);
}
return true;
return true;
}

View File

@ -1,10 +1,10 @@
//===- BlockProfiling.cpp - Insert counters for block profiling -----------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This pass instruments the specified program with counters for basic block or
@ -51,7 +51,7 @@ bool FunctionProfiler::runOnModule(Module &M) {
}
unsigned NumFunctions = 0;
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I)
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I)
if (!I->isExternal())
++NumFunctions;
@ -62,7 +62,7 @@ bool FunctionProfiler::runOnModule(Module &M) {
// Instrument all of the functions...
unsigned i = 0;
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I)
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I)
if (!I->isExternal())
// Insert counter at the start of the function
IncrementCounterInBlock(I->begin(), i++, Counters);
@ -93,7 +93,7 @@ bool BlockProfiler::runOnModule(Module &M) {
}
unsigned NumBlocks = 0;
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I)
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I)
NumBlocks += I->size();
const Type *ATy = ArrayType::get(Type::UIntTy, NumBlocks);
@ -103,7 +103,7 @@ bool BlockProfiler::runOnModule(Module &M) {
// Instrument all of the blocks...
unsigned i = 0;
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I)
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I)
for (Function::iterator BB = I->begin(), E = I->end(); BB != E; ++BB)
// Insert counter at the start of the block
IncrementCounterInBlock(BB, i++, Counters);

View File

@ -1,10 +1,10 @@
//===- EdgeProfiling.cpp - Insert counters for edge profiling -------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This pass instruments the specified program with counters for edge profiling.

View File

@ -1,10 +1,10 @@
//===-- EmitFunctions.cpp - interface to insert instrumentation -----------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This inserts into the input module three new global constants containing
@ -27,7 +27,7 @@
#include "llvm/Transforms/Instrumentation.h"
using namespace llvm;
namespace llvm {
namespace llvm {
namespace {
enum Color{
@ -35,11 +35,11 @@ namespace {
GREY,
BLACK
};
struct EmitFunctionTable : public ModulePass {
bool runOnModule(Module &M);
};
RegisterOpt<EmitFunctionTable>
X("emitfuncs", "Emit a function table for the reoptimizer");
}
@ -48,9 +48,9 @@ static char doDFS(BasicBlock * node,std::map<BasicBlock *, Color > &color){
color[node] = GREY;
for(succ_iterator vl = succ_begin(node), ve = succ_end(node); vl != ve; ++vl){
BasicBlock *BB = *vl;
BasicBlock *BB = *vl;
if(color[BB]!=GREY && color[BB]!=BLACK){
if(!doDFS(BB, color)){
return 0;
@ -75,7 +75,7 @@ static char hasBackEdge(Function *F){
// Per Module pass for inserting function table
bool EmitFunctionTable::runOnModule(Module &M){
std::vector<const Type*> vType;
std::vector<Constant *> vConsts;
std::vector<Constant *> sBCons;
@ -83,24 +83,24 @@ bool EmitFunctionTable::runOnModule(Module &M){
for(Module::iterator MI = M.begin(), ME = M.end(); MI != ME; ++MI)
if (!MI->isExternal()) {
vType.push_back(MI->getType());
//std::cerr<<MI;
vConsts.push_back(MI);
sBCons.push_back(ConstantInt::get(Type::SByteTy, hasBackEdge(MI)));
counter++;
}
StructType *sttype = StructType::get(vType);
Constant *cstruct = ConstantStruct::get(sttype, vConsts);
GlobalVariable *gb = new GlobalVariable(cstruct->getType(), true,
GlobalValue::ExternalLinkage,
GlobalValue::ExternalLinkage,
cstruct, "llvmFunctionTable");
M.getGlobalList().push_back(gb);
Constant *constArray = ConstantArray::get(ArrayType::get(Type::SByteTy,
Constant *constArray = ConstantArray::get(ArrayType::get(Type::SByteTy,
sBCons.size()),
sBCons);
@ -110,9 +110,9 @@ bool EmitFunctionTable::runOnModule(Module &M){
M.getGlobalList().push_back(funcArray);
ConstantInt *cnst = ConstantSInt::get(Type::IntTy, counter);
GlobalVariable *fnCount = new GlobalVariable(Type::IntTy, true,
GlobalValue::ExternalLinkage,
ConstantInt *cnst = ConstantSInt::get(Type::IntTy, counter);
GlobalVariable *fnCount = new GlobalVariable(Type::IntTy, true,
GlobalValue::ExternalLinkage,
cnst, "llvmFunctionCount");
M.getGlobalList().push_back(fnCount);
return true; // Always modifies program

View File

@ -1,10 +1,10 @@
//===-- CombineBranch.cpp -------------------------------------------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// Combine multiple back-edges going to the same sink into a single
@ -31,14 +31,14 @@ namespace {
void getBackEdgesVisit(BasicBlock *u,
std::map<BasicBlock *, Color > &color,
std::map<BasicBlock *, int > &d,
std::map<BasicBlock *, int > &d,
int &time,
std::map<BasicBlock *, BasicBlock *> &be);
void removeRedundant(std::map<BasicBlock *, BasicBlock *> &be);
public:
bool runOnFunction(Function &F);
};
RegisterOpt<CombineBranches>
X("branch-combine", "Multiple backedges going to same target are merged");
}
@ -53,10 +53,10 @@ namespace {
///
void CombineBranches::getBackEdgesVisit(BasicBlock *u,
std::map<BasicBlock *, Color > &color,
std::map<BasicBlock *, int > &d,
std::map<BasicBlock *, int > &d,
int &time,
std::map<BasicBlock *, BasicBlock *> &be) {
color[u]=GREY;
time++;
d[u]=time;
@ -66,7 +66,7 @@ void CombineBranches::getBackEdgesVisit(BasicBlock *u,
if(color[BB]!=GREY && color[BB]!=BLACK)
getBackEdgesVisit(BB, color, d, time, be);
//now checking for d and f vals
else if(color[BB]==GREY){
//so v is ancestor of u if time of u > time of v
@ -83,29 +83,29 @@ void CombineBranches::getBackEdgesVisit(BasicBlock *u,
void CombineBranches::removeRedundant(std::map<BasicBlock *, BasicBlock *> &be){
std::vector<BasicBlock *> toDelete;
std::map<BasicBlock *, int> seenBB;
for(std::map<BasicBlock *, BasicBlock *>::iterator MI = be.begin(),
for(std::map<BasicBlock *, BasicBlock *>::iterator MI = be.begin(),
ME = be.end(); MI != ME; ++MI){
if(seenBB[MI->second])
continue;
seenBB[MI->second] = 1;
std::vector<BasicBlock *> sameTarget;
sameTarget.clear();
for(std::map<BasicBlock *, BasicBlock *>::iterator MMI = be.begin(),
for(std::map<BasicBlock *, BasicBlock *>::iterator MMI = be.begin(),
MME = be.end(); MMI != MME; ++MMI){
if(MMI->first == MI->first)
continue;
if(MMI->second == MI->second)
sameTarget.push_back(MMI->first);
}
//so more than one branch to same target
if(sameTarget.size()){
@ -126,9 +126,9 @@ void CombineBranches::removeRedundant(std::map<BasicBlock *, BasicBlock *> &be){
ti->setSuccessor(index, newBB);
for(BasicBlock::iterator BB2Inst = MI->second->begin(),
for(BasicBlock::iterator BB2Inst = MI->second->begin(),
BBend = MI->second->end(); BB2Inst != BBend; ++BB2Inst){
if (PHINode *phiInst = dyn_cast<PHINode>(BB2Inst)){
int bbIndex;
bbIndex = phiInst->getBasicBlockIndex(*VBI);
@ -178,7 +178,7 @@ bool CombineBranches::runOnFunction(Function &F){
int time = 0;
getBackEdgesVisit (F.begin (), color, d, time, be);
removeRedundant (be);
return true; // FIXME: assumes a modification was always made.
}

View File

@ -1,16 +1,16 @@
//===-- EdgeCode.cpp - generate LLVM instrumentation code -----------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//It implements the class EdgeCode: which provides
//It implements the class EdgeCode: which provides
//support for inserting "appropriate" instrumentation at
//designated points in the graph
//
//It also has methods to insert initialization code in
//It also has methods to insert initialization code in
//top block of cfg
//===----------------------------------------------------------------------===//
@ -29,8 +29,8 @@ using std::vector;
namespace llvm {
static void getTriggerCode(Module *M, BasicBlock *BB, int MethNo, Value *pathNo,
Value *cnt, Instruction *rInst){
Value *cnt, Instruction *rInst){
vector<Value *> tmpVec;
tmpVec.push_back(Constant::getNullValue(Type::LongTy));
tmpVec.push_back(Constant::getNullValue(Type::LongTy));
@ -38,7 +38,7 @@ static void getTriggerCode(Module *M, BasicBlock *BB, int MethNo, Value *pathNo,
BB->getInstList().push_back(Idx);
const Type *PIntTy = PointerType::get(Type::IntTy);
Function *trigMeth = M->getOrInsertFunction("trigger", Type::VoidTy,
Function *trigMeth = M->getOrInsertFunction("trigger", Type::VoidTy,
Type::IntTy, Type::IntTy,
PIntTy, PIntTy, 0);
assert(trigMeth && "trigger method could not be inserted!");
@ -58,18 +58,18 @@ static void getTriggerCode(Module *M, BasicBlock *BB, int MethNo, Value *pathNo,
//get the code to be inserted on the edge
//This is determined from cond (1-6)
void getEdgeCode::getCode(Instruction *rInst, Value *countInst,
Function *M, BasicBlock *BB,
void getEdgeCode::getCode(Instruction *rInst, Value *countInst,
Function *M, BasicBlock *BB,
vector<Value *> &retVec){
//Instruction *InsertPos = BB->getInstList().begin();
//now check for cdIn and cdOut
//first put cdOut
if(cdOut!=NULL){
cdOut->getCode(rInst, countInst, M, BB, retVec);
}
if(cdIn!=NULL){
cdIn->getCode(rInst, countInst, M, BB, retVec);
}
@ -93,7 +93,7 @@ void getEdgeCode::getCode(Instruction *rInst, Value *countInst,
#endif
break;
}
//r+=k
case 3:{
Instruction *ldInst = new LoadInst(rInst, "ti1");//, InsertPos);
@ -116,33 +116,33 @@ void getEdgeCode::getCode(Instruction *rInst, Value *countInst,
tmpVec.push_back(ConstantSInt::get(Type::LongTy, inc));
Instruction *Idx = new GetElementPtrInst(countInst, tmpVec, "");//,
//Instruction *Idx = new GetElementPtrInst(countInst,
//Instruction *Idx = new GetElementPtrInst(countInst,
// vector<Value*>(1,ConstantSInt::get(Type::LongTy, inc)),
// "");//, InsertPos);
BB->getInstList().push_back(Idx);
Instruction *ldInst=new LoadInst(Idx, "ti1");//, InsertPos);
BB->getInstList().push_back(ldInst);
Value *val = ConstantSInt::get(Type::IntTy, 1);
//Instruction *addIn =
Instruction *newCount =
BinaryOperator::create(Instruction::Add, ldInst, val,"ti2");
BB->getInstList().push_back(newCount);
#ifdef INSERT_STORE
//Instruction *stInst=new StoreInst(addIn, Idx, InsertPos);
Instruction *stInst=new StoreInst(newCount, Idx);//, InsertPos);
BB->getInstList().push_back(stInst);
#endif
Value *trAddIndex = ConstantSInt::get(Type::IntTy,inc);
retVec.push_back(newCount);
retVec.push_back(trAddIndex);
//insert trigger
//getTriggerCode(M->getParent(), BB, MethNo,
//getTriggerCode(M->getParent(), BB, MethNo,
// ConstantSInt::get(Type::IntTy,inc), newCount, triggerInst);
//end trigger code
@ -152,7 +152,7 @@ void getEdgeCode::getCode(Instruction *rInst, Value *countInst,
//case: count[r+inc]++
case 5:{
//ti1=inc+r
Instruction *ldIndex=new LoadInst(rInst, "ti1");//, InsertPos);
BB->getInstList().push_back(ldIndex);
@ -161,9 +161,9 @@ void getEdgeCode::getCode(Instruction *rInst, Value *countInst,
Instruction *addIndex=BinaryOperator::
create(Instruction::Add, ldIndex, val,"ti2");//, InsertPos);
BB->getInstList().push_back(addIndex);
//now load count[addIndex]
Instruction *castInst=new CastInst(addIndex,
Instruction *castInst=new CastInst(addIndex,
Type::LongTy,"ctin");//, InsertPos);
BB->getInstList().push_back(castInst);
@ -180,10 +180,10 @@ void getEdgeCode::getCode(Instruction *rInst, Value *countInst,
Value *cons=ConstantSInt::get(Type::IntTy,1);
//count[addIndex]++
//std::cerr<<"Type ldInst:"<<ldInst->getType()<<"\t cons:"<<cons->getType()<<"\n";
Instruction *newCount = BinaryOperator::create(Instruction::Add, ldInst,
Instruction *newCount = BinaryOperator::create(Instruction::Add, ldInst,
cons,"");
BB->getInstList().push_back(newCount);
#ifdef INSERT_STORE
Instruction *stInst = new StoreInst(newCount, Idx);//, InsertPos);
BB->getInstList().push_back(stInst);
@ -213,11 +213,11 @@ void getEdgeCode::getCode(Instruction *rInst, Value *countInst,
tmpVec.push_back(castInst2);
Instruction *Idx = new GetElementPtrInst(countInst, tmpVec, "");//,
//Instruction *Idx = new GetElementPtrInst(countInst,
//Instruction *Idx = new GetElementPtrInst(countInst,
// vector<Value*>(1,castInst2), "");
BB->getInstList().push_back(Idx);
Instruction *ldInst=new LoadInst(Idx, "ti2");//, InsertPos);
BB->getInstList().push_back(ldInst);
@ -237,7 +237,7 @@ void getEdgeCode::getCode(Instruction *rInst, Value *countInst,
retVec.push_back(ldIndex);
break;
}
}
}
@ -245,25 +245,25 @@ void getEdgeCode::getCode(Instruction *rInst, Value *countInst,
//Insert the initialization code in the top BB
//this includes initializing r, and count
//r is like an accumulator, that
//r is like an accumulator, that
//keeps on adding increments as we traverse along a path
//and at the end of the path, r contains the path
//number of that path
//Count is an array, where Count[k] represents
//the number of executions of path k
void insertInTopBB(BasicBlock *front,
int k,
void insertInTopBB(BasicBlock *front,
int k,
Instruction *rVar, Value *threshold){
//rVar is variable r,
//rVar is variable r,
//countVar is count[]
Value *Int0 = ConstantInt::get(Type::IntTy, 0);
//now push all instructions in front of the BB
BasicBlock::iterator here=front->begin();
front->getInstList().insert(here, rVar);
//front->getInstList().insert(here,countVar);
//Initialize Count[...] with 0
//for (int i=0;i<k; i++){
@ -281,22 +281,22 @@ void insertInTopBB(BasicBlock *front,
//insert a basic block with appropriate code
//along a given edge
void insertBB(Edge ed,
getEdgeCode *edgeCode,
Instruction *rInst,
Value *countInst,
getEdgeCode *edgeCode,
Instruction *rInst,
Value *countInst,
int numPaths, int Methno, Value *threshold){
BasicBlock* BB1=ed.getFirst()->getElement();
BasicBlock* BB2=ed.getSecond()->getElement();
#ifdef DEBUG_PATH_PROFILES
//debugging info
cerr<<"Edges with codes ######################\n";
cerr<<BB1->getName()<<"->"<<BB2->getName()<<"\n";
cerr<<"########################\n";
#endif
//We need to insert a BB between BB1 and BB2
//We need to insert a BB between BB1 and BB2
TerminatorInst *TI=BB1->getTerminator();
BasicBlock *newBB=new BasicBlock("counter", BB1->getParent());
@ -316,7 +316,7 @@ void insertBB(Edge ed,
else{
if(BI->getSuccessor(0)==BB2)
BI->setSuccessor(0, newBB);
if(BI->getSuccessor(1)==BB2)
BI->setSuccessor(1, newBB);
}
@ -324,21 +324,21 @@ void insertBB(Edge ed,
BasicBlock *triggerBB = NULL;
if(retVec.size()>0){
triggerBB = new BasicBlock("trigger", BB1->getParent());
getTriggerCode(BB1->getParent()->getParent(), triggerBB, Methno,
getTriggerCode(BB1->getParent()->getParent(), triggerBB, Methno,
retVec[1], countInst, rInst);//retVec[0]);
//Instruction *castInst = new CastInst(retVec[0], Type::IntTy, "");
Instruction *etr = new LoadInst(threshold, "threshold");
//std::cerr<<"type1: "<<etr->getType()<<" type2: "<<retVec[0]->getType()<<"\n";
Instruction *cmpInst = new SetCondInst(Instruction::SetLE, etr,
//std::cerr<<"type1: "<<etr->getType()<<" type2: "<<retVec[0]->getType()<<"\n";
Instruction *cmpInst = new SetCondInst(Instruction::SetLE, etr,
retVec[0], "");
Instruction *newBI2 = new BranchInst(triggerBB, BB2, cmpInst);
//newBB->getInstList().push_back(castInst);
newBB->getInstList().push_back(etr);
newBB->getInstList().push_back(cmpInst);
newBB->getInstList().push_back(newBI2);
//triggerBB->getInstList().push_back(triggerInst);
new BranchInst(BB2, 0, 0, triggerBB);
}
@ -347,9 +347,9 @@ void insertBB(Edge ed,
}
//now iterate over BB2, and set its Phi nodes right
for(BasicBlock::iterator BB2Inst = BB2->begin(), BBend = BB2->end();
for(BasicBlock::iterator BB2Inst = BB2->begin(), BBend = BB2->end();
BB2Inst != BBend; ++BB2Inst){
if(PHINode *phiInst=dyn_cast<PHINode>(BB2Inst)){
int bbIndex=phiInst->getBasicBlockIndex(BB1);
assert(bbIndex>=0);

View File

@ -1,10 +1,10 @@
//===-- Graph.cpp - Implements Graph class --------------------------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This implements Graph for helping in trace generation This graph gets used by
@ -23,7 +23,7 @@ namespace llvm {
const graphListElement *findNodeInList(const Graph::nodeList &NL,
Node *N) {
for(Graph::nodeList::const_iterator NI = NL.begin(), NE=NL.end(); NI != NE;
for(Graph::nodeList::const_iterator NI = NL.begin(), NE=NL.end(); NI != NE;
++NI)
if (*NI->element== *N)
return &*NI;
@ -38,7 +38,7 @@ graphListElement *findNodeInList(Graph::nodeList &NL, Node *N) {
}
//graph constructor with root and exit specified
Graph::Graph(std::vector<Node*> n, std::vector<Edge> e,
Graph::Graph(std::vector<Node*> n, std::vector<Edge> e,
Node *rt, Node *lt){
strt=rt;
ext=lt;
@ -49,17 +49,17 @@ Graph::Graph(std::vector<Node*> n, std::vector<Edge> e,
for(vector<Edge >::iterator x=e.begin(), en=e.end(); x!=en; ++x){
Edge ee=*x;
int w=ee.getWeight();
//nodes[ee.getFirst()].push_front(graphListElement(ee.getSecond(),w, ee.getRandId()));
//nodes[ee.getFirst()].push_front(graphListElement(ee.getSecond(),w, ee.getRandId()));
nodes[ee.getFirst()].push_back(graphListElement(ee.getSecond(),w, ee.getRandId()));
}
}
//sorting edgelist, called by backEdgeVist ONLY!!!
Graph::nodeList &Graph::sortNodeList(Node *par, nodeList &nl, vector<Edge> &be){
assert(par && "null node pointer");
BasicBlock *bbPar = par->getElement();
if(nl.size()<=1) return nl;
if(getExit() == par) return nl;
@ -79,7 +79,7 @@ Graph::nodeList &Graph::sortNodeList(Node *par, nodeList &nl, vector<Edge> &be){
assert(ti && "not a branch");
assert(ti->getNumSuccessors()==2 && "less successors!");
BasicBlock *tB = ti->getSuccessor(0);
BasicBlock *fB = ti->getSuccessor(1);
//so one of LI or min must be back edge!
@ -109,24 +109,24 @@ Graph::nodeList &Graph::sortNodeList(Node *par, nodeList &nl, vector<Edge> &be){
}
}
}
else if (min->element->getElement() != LI->element->getElement()){
TerminatorInst *tti = par->getElement()->getTerminator();
BranchInst *ti = cast<BranchInst>(tti);
assert(ti && "not a branch");
if(ti->getNumSuccessors()<=1) continue;
assert(ti->getNumSuccessors()==2 && "less successors!");
BasicBlock *tB = ti->getSuccessor(0);
BasicBlock *fB = ti->getSuccessor(1);
if(tB == LI->element->getElement() || fB == min->element->getElement())
min = LI;
}
}
graphListElement tmpElmnt = *min;
*min = *NLI;
*NLI = tmpElmnt;
@ -159,11 +159,11 @@ bool Graph::hasEdgeAndWt(Edge ed){
Node *nd2=ed.getSecond();
nodeList &nli = nodes[ed.getFirst()];//getNodeList(ed.getFirst());
for(nodeList::iterator NI=nli.begin(), NE=nli.end(); NI!=NE; ++NI)
if(*NI->element == *nd2 && ed.getWeight()==NI->weight)
return true;
return false;
}
@ -180,9 +180,9 @@ void Graph::addNode(Node *nd){
}
//add an edge
//this adds an edge ONLY when
//this adds an edge ONLY when
//the edge to be added does not already exist
//we "equate" two edges here only with their
//we "equate" two edges here only with their
//end points
void Graph::addEdge(Edge ed, int w){
nodeList &ndList = nodes[ed.getFirst()];
@ -190,7 +190,7 @@ void Graph::addEdge(Edge ed, int w){
if(findNodeInList(nodes[ed.getFirst()], nd2))
return;
//ndList.push_front(graphListElement(nd2,w, ed.getRandId()));
ndList.push_back(graphListElement(nd2,w, ed.getRandId()));//chng
//sortNodeList(ed.getFirst(), ndList);
@ -296,7 +296,7 @@ int Graph::getNumberOfIncomingEdges(Node *nd){
for(nodeMapTy::const_iterator EI=nodes.begin(), EE=nodes.end(); EI!=EE ;++EI){
Node *lnode=EI->first;
const nodeList &nl = getNodeList(lnode);
for(Graph::nodeList::const_iterator NI = nl.begin(), NE=nl.end(); NI != NE;
for(Graph::nodeList::const_iterator NI = nl.begin(), NE=nl.end(); NI != NE;
++NI)
if (*NI->element== *nd)
count++;
@ -340,19 +340,19 @@ static void printNode(Node *nd){
//of the graph
Graph* Graph::getMaxSpanningTree(){
//assume connected graph
Graph *st=new Graph();//max spanning tree, undirected edges
int inf=9999999;//largest key
vector<Node *> lt = getAllNodes();
//initially put all vertices in vector vt
//assign wt(root)=0
//wt(others)=infinity
//
//now:
//pull out u: a vertex frm vt of min wt
//for all vertices w in vt,
//if wt(w) greater than
//for all vertices w in vt,
//if wt(w) greater than
//the wt(u->w), then assign
//wt(w) to be wt(u->w).
//
@ -360,7 +360,7 @@ Graph* Graph::getMaxSpanningTree(){
//keep pulling out vertices from vt till it is empty
vector<Node *> vt;
std::map<Node*, Node* > parent;
std::map<Node*, int > ed_weight;
@ -373,7 +373,7 @@ Graph* Graph::getMaxSpanningTree(){
parent[thisNode]=NULL;
ed_weight[thisNode]=0;
}
else{
else{
thisNode->setWeight(inf);
}
st->addNode(thisNode);//add all nodes to spanning tree
@ -396,7 +396,7 @@ Graph* Graph::getMaxSpanningTree(){
}
//vt.erase(u);
//remove u frm vt
for(vector<Node *>::iterator VI=vt.begin(), VE=vt.end(); VI!=VE; ++VI){
if(**VI==*u){
@ -404,7 +404,7 @@ Graph* Graph::getMaxSpanningTree(){
break;
}
}
//assign wt(v) to all adjacent vertices v of u
//only if v is in vt
Graph::nodeList &nl = getNodeList(u);
@ -438,7 +438,7 @@ Graph* Graph::getMaxSpanningTree(){
return st;
}
//print the graph (for debugging)
//print the graph (for debugging)
void Graph::printGraph(){
vector<Node *> lt=getAllNodes();
std::cerr<<"Graph---------------------\n";
@ -469,7 +469,7 @@ vector<Node *> Graph::reverseTopologicalSort(){
}
//a private method for doing DFS traversal of graph
//this is used in determining the reverse topological sort
//this is used in determining the reverse topological sort
//of the graph
void Graph::DFS_Visit(Node *nd, vector<Node *> &toReturn){
nd->setWeight(GREY);
@ -482,13 +482,13 @@ void Graph::DFS_Visit(Node *nd, vector<Node *> &toReturn){
}
//Ordinarily, the graph is directional
//this converts the graph into an
//this converts the graph into an
//undirectional graph
//This is done by adding an edge
//v->u for all existing edges u->v
void Graph::makeUnDirectional(){
vector<Node* > allNodes=getAllNodes();
for(vector<Node *>::iterator NI=allNodes.begin(), NE=allNodes.end(); NI!=NE;
for(vector<Node *>::iterator NI=allNodes.begin(), NE=allNodes.end(); NI!=NE;
++NI) {
nodeList &nl = getNodeList(*NI);
for(nodeList::iterator NLI=nl.begin(), NLE=nl.end(); NLI!=NLE; ++NLI){
@ -507,10 +507,10 @@ void Graph::makeUnDirectional(){
//using min-spanning tree, and vice versa
void Graph::reverseWts(){
vector<Node *> allNodes=getAllNodes();
for(vector<Node *>::iterator NI=allNodes.begin(), NE=allNodes.end(); NI!=NE;
for(vector<Node *>::iterator NI=allNodes.begin(), NE=allNodes.end(); NI!=NE;
++NI) {
nodeList &node_list = getNodeList(*NI);
for(nodeList::iterator NLI=nodes[*NI].begin(), NLE=nodes[*NI].end();
for(nodeList::iterator NLI=nodes[*NI].begin(), NLE=nodes[*NI].end();
NLI!=NLE; ++NLI)
NLI->weight=-NLI->weight;
}
@ -535,7 +535,7 @@ void Graph::getBackEdges(vector<Edge > &be, std::map<Node *, int> &d){
getBackEdgesVisit(getRoot(), be, color, d, time);
}
//helper function to get back edges: it is called by
//helper function to get back edges: it is called by
//the "getBackEdges" function above
void Graph::getBackEdgesVisit(Node *u, vector<Edge > &be,
std::map<Node *, Color > &color,
@ -545,14 +545,14 @@ void Graph::getBackEdgesVisit(Node *u, vector<Edge > &be,
d[u]=time;
vector<graphListElement> &succ_list = getNodeList(u);
for(vector<graphListElement>::iterator vl=succ_list.begin(),
for(vector<graphListElement>::iterator vl=succ_list.begin(),
ve=succ_list.end(); vl!=ve; ++vl){
Node *v=vl->element;
if(color[v]!=GREY && color[v]!=BLACK){
getBackEdgesVisit(v, be, color, d, time);
}
//now checking for d and f vals
if(color[v]==GREY){
//so v is ancestor of u if time of u > time of v

View File

@ -1,10 +1,10 @@
//===-- Graph.h -------------------------------------------------*- C++ -*-===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// Header file for Graph: This Graph is used by PathProfiles class, and is used
@ -58,13 +58,13 @@ public:
randId=rand();
isnull=false;
}
inline Edge(Node *f,Node *s, int wt, double rd){
first=f;
second=s;
weight=wt;
randId=rd;
isnull=false;
isnull=false;
}
inline Edge() { isnull = true; }
@ -73,22 +73,22 @@ public:
inline Node* const getFirst() const { assert(!isNull()); return first; }
inline Node* getSecond() { assert(!isNull()); return second; }
inline Node* const getSecond() const { assert(!isNull()); return second; }
inline int getWeight() { assert(!isNull()); return weight; }
inline void setWeight(int n) { assert(!isNull()); weight=n; }
inline void setFirst(Node *&f) { assert(!isNull()); first=f; }
inline void setSecond(Node *&s) { assert(!isNull()); second=s; }
inline bool isNull() const { return isnull;}
inline bool isNull() const { return isnull;}
inline bool operator<(const Edge& ed) const{
// Can't be the same if one is null and the other isn't
if (isNull() != ed.isNull())
return true;
return (*first<*(ed.getFirst()))||
return (*first<*(ed.getFirst()))||
(*first==*(ed.getFirst()) && *second<*(ed.getSecond()));
}
@ -96,19 +96,19 @@ public:
return !(*this<ed) && !(ed<*this);
}
inline bool operator!=(const Edge& ed) const{return !(*this==ed);}
inline bool operator!=(const Edge& ed) const{return !(*this==ed);}
};
//graphListElement
//This forms the "adjacency list element" of a
//This forms the "adjacency list element" of a
//vertex adjacency list in graph
struct graphListElement{
Node *element;
int weight;
double randId;
inline graphListElement(Node *n, int w, double rand){
element=n;
inline graphListElement(Node *n, int w, double rand){
element=n;
weight=w;
randId=rand;
}
@ -127,12 +127,12 @@ using namespace llvm;
return n1->getElement() < n2->getElement();
}
};
template<>
struct less<Edge> : public binary_function<Edge,Edge,bool> {
bool operator()(Edge e1, Edge e2) const {
assert(!e1.isNull() && !e2.isNull());
Node *x1=e1.getFirst();
Node *x2=e1.getSecond();
Node *y1=e2.getFirst();
@ -210,7 +210,7 @@ public:
private:
//the adjacency list of a vertex or node
nodeMapTy nodes;
//the start or root node
Node *strt;
@ -218,7 +218,7 @@ private:
Node *ext;
//a private method for doing DFS traversal of graph
//this is used in determining the reverse topological sort
//this is used in determining the reverse topological sort
//of the graph
void DFS_Visit(Node *nd, std::vector<Node *> &toReturn);
@ -232,10 +232,10 @@ private:
//have been visited
//So we have a back edge when we meet a successor of
//a node with smaller time, and GREY color
void getBackEdgesVisit(Node *u,
void getBackEdgesVisit(Node *u,
std::vector<Edge > &be,
std::map<Node *, Color> &clr,
std::map<Node *, int> &d,
std::map<Node *, int> &d,
int &time);
public:
@ -248,18 +248,18 @@ public:
//empty constructor: then add edges and nodes later on
Graph() {}
//constructor with root and exit node specified
Graph(std::vector<Node*> n,
Graph(std::vector<Node*> n,
std::vector<Edge> e, Node *rt, Node *lt);
//add a node
void addNode(Node *nd);
//add an edge
//this adds an edge ONLY when
//this adds an edge ONLY when
//the edge to be added doesn not already exist
//we "equate" two edges here only with their
//we "equate" two edges here only with their
//end points
void addEdge(Edge ed, int w);
@ -310,14 +310,14 @@ public:
//in r-topological sorted order
//note that we assumed graph to be connected
std::vector<Node *> reverseTopologicalSort();
//reverse the sign of weights on edges
//this way, max-spanning tree could be obtained
//usin min-spanning tree, and vice versa
void reverseWts();
//Ordinarily, the graph is directional
//this converts the graph into an
//this converts the graph into an
//undirectional graph
//This is done by adding an edge
//v->u for all existing edges u->v
@ -325,31 +325,31 @@ public:
//print graph: for debugging
void printGraph();
//get a vector of back edges in the graph
void getBackEdges(std::vector<Edge> &be, std::map<Node *, int> &d);
nodeList &sortNodeList(Node *par, nodeList &nl, std::vector<Edge> &be);
//Get the Maximal spanning tree (also a graph)
//of the graph
Graph* getMaxSpanningTree();
//get the nodeList adjacent to a node
//a nodeList element contains a node, and the weight
//a nodeList element contains a node, and the weight
//corresponding to the edge for that element
inline nodeList &getNodeList(Node *nd) {
elementIterator nli = nodes.find(nd);
assert(nli != nodes.end() && "Node must be in nodes map");
return nodes[nd];//sortNodeList(nd, nli->second);
}
nodeList &getSortedNodeList(Node *nd, std::vector<Edge> &be) {
elementIterator nli = nodes.find(nd);
assert(nli != nodes.end() && "Node must be in nodes map");
return sortNodeList(nd, nodes[nd], be);
}
//get the root of the graph
inline Node *getRoot() {return strt; }
inline Node * const getRoot() const {return strt; }
@ -365,7 +365,7 @@ public:
inline bool isLeaf(Node *n) const {return (*n==*ext); }
};
//This class is used to generate
//This class is used to generate
//"appropriate" code to be inserted
//along an edge
//The code to be inserted can be of six different types
@ -378,13 +378,13 @@ public:
//6: Count[r]++
class getEdgeCode{
private:
//cond implies which
//cond implies which
//"kind" of code is to be inserted
//(from 1-6 above)
int cond;
//inc is the increment: eg k, or 0
int inc;
//A backedge must carry the code
//of both incoming "dummy" edge
//and outgoing "dummy" edge
@ -420,23 +420,23 @@ public:
//set CdIn (only used for backedges)
inline void setCdIn(getEdgeCode *gd){ cdIn=gd;}
//set CdOut (only used for backedges)
inline void setCdOut(getEdgeCode *gd){ cdOut=gd;}
//get the code to be inserted on the edge
//This is determined from cond (1-6)
void getCode(Instruction *a, Value *b, Function *M, BasicBlock *BB,
void getCode(Instruction *a, Value *b, Function *M, BasicBlock *BB,
std::vector<Value *> &retVec);
};
//auxillary functions on graph
//print a given edge in the form BB1Label->BB2Label
//print a given edge in the form BB1Label->BB2Label
void printEdge(Edge ed);
//Do graph processing: to determine minimal edge increments,
//Do graph processing: to determine minimal edge increments,
//appropriate code insertions etc and insert the code at
//appropriate locations
void processGraph(Graph &g, Instruction *rInst, Value *countInst, std::vector<Edge> &be, std::vector<Edge> &stDummy, std::vector<Edge> &exDummy, int n, int MethNo, Value *threshold);
@ -452,7 +452,7 @@ void insertBB(Edge ed, getEdgeCode *edgeCode, Instruction *rInst, Value *countIn
//Insert the initialization code in the top BB
//this includes initializing r, and count
//r is like an accumulator, that
//r is like an accumulator, that
//keeps on adding increments as we traverse along a path
//and at the end of the path, r contains the path
//number of that path
@ -470,7 +470,7 @@ void addDummyEdges(std::vector<Edge> &stDummy, std::vector<Edge> &exDummy, Graph
//such that if we traverse along any path from root to exit, and
//add up the edge values, we get a path number that uniquely
//refers to the path we travelled
int valueAssignmentToEdges(Graph& g, std::map<Node *, int> nodePriority,
int valueAssignmentToEdges(Graph& g, std::map<Node *, int> nodePriority,
std::vector<Edge> &be);
void getBBtrace(std::vector<BasicBlock *> &vBB, int pathNo, Function *M);

View File

@ -1,10 +1,10 @@
//===- GraphAuxiliary.cpp - Auxiliary functions on graph ------------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// auxiliary function associated with graph: they all operate on graph, and help
@ -36,10 +36,10 @@ static void getChords(vector<Edge > &chords, Graph &g, Graph st){
//make sure the spanning tree is directional
//iterate over ALL the edges of the graph
vector<Node *> allNodes=g.getAllNodes();
for(vector<Node *>::iterator NI=allNodes.begin(), NE=allNodes.end(); NI!=NE;
for(vector<Node *>::iterator NI=allNodes.begin(), NE=allNodes.end(); NI!=NE;
++NI){
Graph::nodeList node_list=g.getNodeList(*NI);
for(Graph::nodeList::iterator NLI=node_list.begin(), NLE=node_list.end();
for(Graph::nodeList::iterator NLI=node_list.begin(), NLE=node_list.end();
NLI!=NLE; ++NLI){
Edge f(*NI, NLI->element,NLI->weight, NLI->randId);
if(!(st.hasEdgeAndWt(f)))//addnl
@ -51,13 +51,13 @@ static void getChords(vector<Edge > &chords, Graph &g, Graph st){
//Given a tree t, and a "directed graph" g
//replace the edges in the tree t with edges that exist in graph
//The tree is formed from "undirectional" copy of graph
//So whatever edges the tree has, the undirectional graph
//would have too. This function corrects some of the directions in
//So whatever edges the tree has, the undirectional graph
//would have too. This function corrects some of the directions in
//the tree so that now, all edge directions in the tree match
//the edge directions of corresponding edges in the directed graph
static void removeTreeEdges(Graph &g, Graph& t){
vector<Node* > allNodes=t.getAllNodes();
for(vector<Node *>::iterator NI=allNodes.begin(), NE=allNodes.end(); NI!=NE;
for(vector<Node *>::iterator NI=allNodes.begin(), NE=allNodes.end(); NI!=NE;
++NI){
Graph::nodeList nl=t.getNodeList(*NI);
for(Graph::nodeList::iterator NLI=nl.begin(), NLE=nl.end(); NLI!=NLE;++NLI){
@ -72,11 +72,11 @@ static void removeTreeEdges(Graph &g, Graph& t){
//such that if we traverse along any path from root to exit, and
//add up the edge values, we get a path number that uniquely
//refers to the path we travelled
int valueAssignmentToEdges(Graph& g, map<Node *, int> nodePriority,
int valueAssignmentToEdges(Graph& g, map<Node *, int> nodePriority,
vector<Edge> &be){
vector<Node *> revtop=g.reverseTopologicalSort();
map<Node *,int > NumPaths;
for(vector<Node *>::iterator RI=revtop.begin(), RE=revtop.end();
for(vector<Node *>::iterator RI=revtop.begin(), RE=revtop.end();
RI!=RE; ++RI){
if(g.isLeaf(*RI))
NumPaths[*RI]=1;
@ -87,47 +87,47 @@ int valueAssignmentToEdges(Graph& g, map<Node *, int> nodePriority,
Graph::nodeList &nlist=g.getSortedNodeList(*RI, be);
//sort nodelist by increasing order of numpaths
int sz=nlist.size();
for(int i=0;i<sz-1; i++){
int min=i;
for(int j=i+1; j<sz; j++){
BasicBlock *bb1 = nlist[j].element->getElement();
BasicBlock *bb2 = nlist[min].element->getElement();
if(bb1 == bb2) continue;
if(*RI == g.getRoot()){
assert(nodePriority[nlist[min].element]!=
nodePriority[nlist[j].element]
assert(nodePriority[nlist[min].element]!=
nodePriority[nlist[j].element]
&& "priorities can't be same!");
if(nodePriority[nlist[j].element] <
if(nodePriority[nlist[j].element] <
nodePriority[nlist[min].element])
min = j;
min = j;
}
else{
TerminatorInst *tti = (*RI)->getElement()->getTerminator();
BranchInst *ti = cast<BranchInst>(tti);
assert(ti && "not a branch");
assert(ti->getNumSuccessors()==2 && "less successors!");
BasicBlock *tB = ti->getSuccessor(0);
BasicBlock *fB = ti->getSuccessor(1);
if(tB == bb1 || fB == bb2)
min = j;
}
}
graphListElement tempEl=nlist[min];
nlist[min]=nlist[i];
nlist[i]=tempEl;
}
//sorted now!
for(Graph::nodeList::iterator GLI=nlist.begin(), GLE=nlist.end();
GLI!=GLE; ++GLI){
@ -148,35 +148,35 @@ int valueAssignmentToEdges(Graph& g, map<Node *, int> nodePriority,
//refers to the path we travelled
//inc_Dir tells whether 2 edges are in same, or in different directions
//if same direction, return 1, else -1
static int inc_Dir(Edge e, Edge f){
if(e.isNull())
static int inc_Dir(Edge e, Edge f){
if(e.isNull())
return 1;
//check that the edges must have at least one common endpoint
assert(*(e.getFirst())==*(f.getFirst()) ||
*(e.getFirst())==*(f.getSecond()) ||
*(e.getFirst())==*(f.getSecond()) ||
*(e.getSecond())==*(f.getFirst()) ||
*(e.getSecond())==*(f.getSecond()));
if(*(e.getFirst())==*(f.getSecond()) ||
if(*(e.getFirst())==*(f.getSecond()) ||
*(e.getSecond())==*(f.getFirst()))
return 1;
return -1;
}
//used for getting edge increments (read comments above in inc_Dir)
//inc_DFS is a modification of DFS
static void inc_DFS(Graph& g,Graph& t,map<Edge, int, EdgeCompare2>& Increment,
//inc_DFS is a modification of DFS
static void inc_DFS(Graph& g,Graph& t,map<Edge, int, EdgeCompare2>& Increment,
int events, Node *v, Edge e){
vector<Node *> allNodes=t.getAllNodes();
for(vector<Node *>::iterator NI=allNodes.begin(), NE=allNodes.end(); NI!=NE;
for(vector<Node *>::iterator NI=allNodes.begin(), NE=allNodes.end(); NI!=NE;
++NI){
Graph::nodeList node_list=t.getNodeList(*NI);
for(Graph::nodeList::iterator NLI=node_list.begin(), NLE=node_list.end();
for(Graph::nodeList::iterator NLI=node_list.begin(), NLE=node_list.end();
NLI!= NLE; ++NLI){
Edge f(*NI, NLI->element,NLI->weight, NLI->randId);
if(!edgesEqual(f,e) && *v==*(f.getSecond())){
@ -187,29 +187,29 @@ static void inc_DFS(Graph& g,Graph& t,map<Edge, int, EdgeCompare2>& Increment,
}
}
for(vector<Node *>::iterator NI=allNodes.begin(), NE=allNodes.end(); NI!=NE;
for(vector<Node *>::iterator NI=allNodes.begin(), NE=allNodes.end(); NI!=NE;
++NI){
Graph::nodeList node_list=t.getNodeList(*NI);
for(Graph::nodeList::iterator NLI=node_list.begin(), NLE=node_list.end();
for(Graph::nodeList::iterator NLI=node_list.begin(), NLE=node_list.end();
NLI!=NLE; ++NLI){
Edge f(*NI, NLI->element,NLI->weight, NLI->randId);
if(!edgesEqual(f,e) && *v==*(f.getFirst())){
int dir_count=inc_Dir(e,f);
int wt=f.getWeight();
inc_DFS(g,t, Increment, dir_count*events+wt,
inc_DFS(g,t, Increment, dir_count*events+wt,
f.getSecond(), f);
}
}
}
allNodes=g.getAllNodes();
for(vector<Node *>::iterator NI=allNodes.begin(), NE=allNodes.end(); NI!=NE;
for(vector<Node *>::iterator NI=allNodes.begin(), NE=allNodes.end(); NI!=NE;
++NI){
Graph::nodeList node_list=g.getNodeList(*NI);
for(Graph::nodeList::iterator NLI=node_list.begin(), NLE=node_list.end();
for(Graph::nodeList::iterator NLI=node_list.begin(), NLE=node_list.end();
NLI!=NLE; ++NLI){
Edge f(*NI, NLI->element,NLI->weight, NLI->randId);
if(!(t.hasEdgeAndWt(f)) && (*v==*(f.getSecond()) ||
if(!(t.hasEdgeAndWt(f)) && (*v==*(f.getSecond()) ||
*v==*(f.getFirst()))){
int dir_count=inc_Dir(e,f);
Increment[f]+=dir_count*events;
@ -219,21 +219,21 @@ static void inc_DFS(Graph& g,Graph& t,map<Edge, int, EdgeCompare2>& Increment,
}
//Now we select a subset of all edges
//and assign them some values such that
//and assign them some values such that
//if we consider just this subset, it still represents
//the path sum along any path in the graph
static map<Edge, int, EdgeCompare2> getEdgeIncrements(Graph& g, Graph& t,
static map<Edge, int, EdgeCompare2> getEdgeIncrements(Graph& g, Graph& t,
vector<Edge> &be){
//get all edges in g-t
map<Edge, int, EdgeCompare2> Increment;
vector<Node *> allNodes=g.getAllNodes();
for(vector<Node *>::iterator NI=allNodes.begin(), NE=allNodes.end(); NI!=NE;
for(vector<Node *>::iterator NI=allNodes.begin(), NE=allNodes.end(); NI!=NE;
++NI){
Graph::nodeList node_list=g.getSortedNodeList(*NI, be);
//modified g.getNodeList(*NI);
for(Graph::nodeList::iterator NLI=node_list.begin(), NLE=node_list.end();
for(Graph::nodeList::iterator NLI=node_list.begin(), NLE=node_list.end();
NLI!=NLE; ++NLI){
Edge ed(*NI, NLI->element,NLI->weight,NLI->randId);
if(!(t.hasEdgeAndWt(ed))){
@ -245,11 +245,11 @@ static map<Edge, int, EdgeCompare2> getEdgeIncrements(Graph& g, Graph& t,
Edge *ed=new Edge();
inc_DFS(g,t,Increment, 0, g.getRoot(), *ed);
for(vector<Node *>::iterator NI=allNodes.begin(), NE=allNodes.end(); NI!=NE;
for(vector<Node *>::iterator NI=allNodes.begin(), NE=allNodes.end(); NI!=NE;
++NI){
Graph::nodeList node_list=g.getSortedNodeList(*NI, be);
//modified g.getNodeList(*NI);
for(Graph::nodeList::iterator NLI=node_list.begin(), NLE=node_list.end();
for(Graph::nodeList::iterator NLI=node_list.begin(), NLE=node_list.end();
NLI!=NLE; ++NLI){
Edge ed(*NI, NLI->element,NLI->weight, NLI->randId);
if(!(t.hasEdgeAndWt(ed))){
@ -274,7 +274,7 @@ graphListElement *findNodeInList(Graph::nodeList &NL, Node *N);
//The idea here is to minimize the computation
//by inserting only the needed code
static void getCodeInsertions(Graph &g, map<Edge, getEdgeCode *, EdgeCompare2> &instr,
vector<Edge > &chords,
vector<Edge > &chords,
map<Edge,int, EdgeCompare2> &edIncrements){
//Register initialization code
@ -285,7 +285,7 @@ static void getCodeInsertions(Graph &g, map<Edge, getEdgeCode *, EdgeCompare2> &
ws.pop_back();
//for each edge v->w
Graph::nodeList succs=g.getNodeList(v);
for(Graph::nodeList::iterator nl=succs.begin(), ne=succs.end();
nl!=ne; ++nl){
int edgeWt=nl->weight;
@ -320,7 +320,7 @@ static void getCodeInsertions(Graph &g, map<Edge, getEdgeCode *, EdgeCompare2> &
/////Memory increment code
ws.push_back(g.getExit());
while(!ws.empty()) {
Node *w=ws.back();
ws.pop_back();
@ -333,11 +333,11 @@ static void getCodeInsertions(Graph &g, map<Edge, getEdgeCode *, EdgeCompare2> &
Node *lnode=*EII;
Graph::nodeList &nl = g.getNodeList(lnode);
//graphListElement *N = findNodeInList(nl, w);
for(Graph::nodeList::const_iterator N = nl.begin(),
for(Graph::nodeList::const_iterator N = nl.begin(),
NNEN = nl.end(); N!= NNEN; ++N){
if (*N->element == *w){
Node *v=lnode;
//if chords has v->w
Edge ed(v,w, N->weight, N->randId);
getEdgeCode *edCd=new getEdgeCode();
@ -359,7 +359,7 @@ static void getCodeInsertions(Graph &g, map<Edge, getEdgeCode *, EdgeCompare2> &
edCd->setInc(edIncrements[ed]);
instr[ed]=edCd;
}
}
else if(g.getNumberOfOutgoingEdges(v)==1)
ws.push_back(v);
@ -387,8 +387,8 @@ static void getCodeInsertions(Graph &g, map<Edge, getEdgeCode *, EdgeCompare2> &
//then incoming dummy edge is root->b
//and outgoing dummy edge is a->exit
//changed
void addDummyEdges(vector<Edge > &stDummy,
vector<Edge > &exDummy,
void addDummyEdges(vector<Edge > &stDummy,
vector<Edge > &exDummy,
Graph &g, vector<Edge> &be){
for(vector<Edge >::iterator VI=be.begin(), VE=be.end(); VI!=VE; ++VI){
Edge ed=*VI;
@ -420,17 +420,17 @@ void printEdge(Edge ed){
//Move the incoming dummy edge code and outgoing dummy
//edge code over to the corresponding back edge
static void moveDummyCode(vector<Edge> &stDummy,
vector<Edge> &exDummy,
vector<Edge> &be,
map<Edge, getEdgeCode *, EdgeCompare2> &insertions,
static void moveDummyCode(vector<Edge> &stDummy,
vector<Edge> &exDummy,
vector<Edge> &be,
map<Edge, getEdgeCode *, EdgeCompare2> &insertions,
Graph &g){
typedef vector<Edge >::iterator vec_iter;
map<Edge,getEdgeCode *, EdgeCompare2> temp;
//iterate over edges with code
std::vector<Edge> toErase;
for(map<Edge,getEdgeCode *, EdgeCompare2>::iterator MI=insertions.begin(),
for(map<Edge,getEdgeCode *, EdgeCompare2>::iterator MI=insertions.begin(),
ME=insertions.end(); MI!=ME; ++MI){
Edge ed=MI->first;
getEdgeCode *edCd=MI->second;
@ -462,18 +462,18 @@ static void moveDummyCode(vector<Edge> &stDummy,
}
}
}
for(vector<Edge >::iterator vmi=toErase.begin(), vme=toErase.end(); vmi!=vme;
for(vector<Edge >::iterator vmi=toErase.begin(), vme=toErase.end(); vmi!=vme;
++vmi){
insertions.erase(*vmi);
g.removeEdgeWithWt(*vmi);
}
for(map<Edge,getEdgeCode *, EdgeCompare2>::iterator MI=temp.begin(),
for(map<Edge,getEdgeCode *, EdgeCompare2>::iterator MI=temp.begin(),
ME=temp.end(); MI!=ME; ++MI){
insertions[MI->first]=MI->second;
}
#ifdef DEBUG_PATH_PROFILES
cerr<<"size of deletions: "<<toErase.size()<<"\n";
cerr<<"SIZE OF INSERTIONS AFTER DEL "<<insertions.size()<<"\n";
@ -481,16 +481,16 @@ static void moveDummyCode(vector<Edge> &stDummy,
}
//Do graph processing: to determine minimal edge increments,
//Do graph processing: to determine minimal edge increments,
//appropriate code insertions etc and insert the code at
//appropriate locations
void processGraph(Graph &g,
Instruction *rInst,
Value *countInst,
vector<Edge >& be,
vector<Edge >& stDummy,
vector<Edge >& exDummy,
int numPaths, int MethNo,
void processGraph(Graph &g,
Instruction *rInst,
Value *countInst,
vector<Edge >& be,
vector<Edge >& stDummy,
vector<Edge >& exDummy,
int numPaths, int MethNo,
Value *threshold){
//Given a graph: with exit->root edge, do the following in seq:
@ -505,11 +505,11 @@ void processGraph(Graph &g,
//5. Get edge increments
//6. Get code insertions
//7. move code on dummy edges over to the back edges
//This is used as maximum "weight" for
//This is used as maximum "weight" for
//priority queue
//This would hold all
//This would hold all
//right as long as number of paths in the graph
//is less than this
const int Infinity=99999999;
@ -524,7 +524,7 @@ void processGraph(Graph &g,
//if its there earlier, remove it!
//assign it weight Infinity
//so that this edge IS ALWAYS IN spanning tree
//Note than edges in spanning tree do not get
//Note than edges in spanning tree do not get
//instrumented: and we do not want the
//edge exit->root to get instrumented
//as it MAY BE a dummy edge
@ -544,13 +544,13 @@ void processGraph(Graph &g,
#endif
//now edges of tree t have weights reversed
//(negative) because the algorithm used
//to find max spanning tree is
//to find max spanning tree is
//actually for finding min spanning tree
//so get back the original weights
t->reverseWts();
//Ordinarily, the graph is directional
//lets converts the graph into an
//lets converts the graph into an
//undirectional graph
//This is done by adding an edge
//v->u for all existing edges u->v
@ -559,8 +559,8 @@ void processGraph(Graph &g,
//Given a tree t, and a "directed graph" g
//replace the edges in the tree t with edges that exist in graph
//The tree is formed from "undirectional" copy of graph
//So whatever edges the tree has, the undirectional graph
//would have too. This function corrects some of the directions in
//So whatever edges the tree has, the undirectional graph
//would have too. This function corrects some of the directions in
//the tree so that now, all edge directions in the tree match
//the edge directions of corresponding edges in the directed graph
removeTreeEdges(g, *t);
@ -588,7 +588,7 @@ void processGraph(Graph &g,
//step 5: Get edge increments
//Now we select a subset of all edges
//and assign them some values such that
//and assign them some values such that
//if we consider just this subset, it still represents
//the path sum along any path in the graph
@ -603,9 +603,9 @@ void processGraph(Graph &g,
std::cerr<<"-------end of edge increments\n";
#endif
//step 6: Get code insertions
//Based on edgeIncrements (above), now obtain
//the kind of code to be inserted along an edge
//The idea here is to minimize the computation
@ -616,11 +616,11 @@ void processGraph(Graph &g,
map<Edge, getEdgeCode *, EdgeCompare2> codeInsertions;
getCodeInsertions(g, codeInsertions, chords,increment);
#ifdef DEBUG_PATH_PROFILES
//print edges with code for debugging
cerr<<"Code inserted in following---------------\n";
for(map<Edge, getEdgeCode *, EdgeCompare2>::iterator cd_i=codeInsertions.begin(),
for(map<Edge, getEdgeCode *, EdgeCompare2>::iterator cd_i=codeInsertions.begin(),
cd_e=codeInsertions.end(); cd_i!=cd_e; ++cd_i){
printEdge(cd_i->first);
cerr<<cd_i->second->getCond()<<":"<<cd_i->second->getInc()<<"\n";
@ -634,11 +634,11 @@ void processGraph(Graph &g,
//edge code over to the corresponding back edge
moveDummyCode(stDummy, exDummy, be, codeInsertions, g);
#ifdef DEBUG_PATH_PROFILES
//debugging info
cerr<<"After moving dummy code\n";
for(map<Edge, getEdgeCode *,EdgeCompare2>::iterator cd_i=codeInsertions.begin(),
for(map<Edge, getEdgeCode *,EdgeCompare2>::iterator cd_i=codeInsertions.begin(),
cd_e=codeInsertions.end(); cd_i != cd_e; ++cd_i){
printEdge(cd_i->first);
cerr<<cd_i->second->getCond()<<":"
@ -650,22 +650,22 @@ void processGraph(Graph &g,
//see what it looks like...
//now insert code along edges which have codes on them
for(map<Edge, getEdgeCode *,EdgeCompare2>::iterator MI=codeInsertions.begin(),
for(map<Edge, getEdgeCode *,EdgeCompare2>::iterator MI=codeInsertions.begin(),
ME=codeInsertions.end(); MI!=ME; ++MI){
Edge ed=MI->first;
insertBB(ed, MI->second, rInst, countInst, numPaths, MethNo, threshold);
}
}
}
//print the graph (for debugging)
void printGraph(Graph &g){
vector<Node *> lt=g.getAllNodes();
cerr<<"Graph---------------------\n";
for(vector<Node *>::iterator LI=lt.begin();
for(vector<Node *>::iterator LI=lt.begin();
LI!=lt.end(); ++LI){
cerr<<((*LI)->getElement())->getName()<<"->";
Graph::nodeList nl=g.getNodeList(*LI);
for(Graph::nodeList::iterator NI=nl.begin();
for(Graph::nodeList::iterator NI=nl.begin();
NI!=nl.end(); ++NI){
cerr<<":"<<"("<<(NI->element->getElement())
->getName()<<":"<<NI->element->getWeight()<<","<<NI->weight<<","

View File

@ -1,10 +1,10 @@
//===-- InstLoops.cpp -----------------------------------------------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This is the first-level instrumentation pass for the Reoptimizer. It
@ -46,7 +46,7 @@ namespace {
DominatorSet *DS;
void getBackEdgesVisit(BasicBlock *u,
std::map<BasicBlock *, Color > &color,
std::map<BasicBlock *, int > &d,
std::map<BasicBlock *, int > &d,
int &time, BBMap &be);
void removeRedundant(BBMap &be);
void findAndInstrumentBackEdges(Function &F);
@ -54,15 +54,15 @@ namespace {
bool doInitialization(Module &M);
bool runOnFunction(Function &F);
};
RegisterOpt<InstLoops> X("instloops", "Instrument backedges for profiling");
}
//helper function to get back edges: it is called by
//helper function to get back edges: it is called by
//the "getBackEdges" function below
void InstLoops::getBackEdgesVisit(BasicBlock *u,
std::map<BasicBlock *, Color > &color,
std::map<BasicBlock *, int > &d,
std::map<BasicBlock *, int > &d,
int &time, BBMap &be) {
color[u]=GREY;
time++;
@ -74,7 +74,7 @@ void InstLoops::getBackEdgesVisit(BasicBlock *u,
if(color[BB]!=GREY && color[BB]!=BLACK){
getBackEdgesVisit(BB, color, d, time, be);
}
//now checking for d and f vals
else if(color[BB]==GREY){
//so v is ancestor of u if time of u > time of v
@ -91,13 +91,13 @@ void InstLoops::getBackEdgesVisit(BasicBlock *u,
//set
void InstLoops::removeRedundant(BBMap &be) {
std::vector<BasicBlock *> toDelete;
for(std::map<BasicBlock *, BasicBlock *>::iterator MI = be.begin(),
for(std::map<BasicBlock *, BasicBlock *>::iterator MI = be.begin(),
ME = be.end(); MI != ME; ++MI)
for(BBMap::iterator MMI = be.begin(), MME = be.end(); MMI != MME; ++MMI)
if(DS->properlyDominates(MI->first, MMI->first))
toDelete.push_back(MMI->first);
// Remove all the back-edges we found from be.
for(std::vector<BasicBlock *>::iterator VI = toDelete.begin(),
for(std::vector<BasicBlock *>::iterator VI = toDelete.begin(),
VE = toDelete.end(); VI != VE; ++VI)
be.erase(*VI);
}
@ -137,14 +137,14 @@ void InstLoops::findAndInstrumentBackEdges(Function &F){
assert(ti->getNumSuccessors() > index && "Not enough successors!");
ti->setSuccessor(index, newBB);
BasicBlock::InstListType &lt = newBB->getInstList();
lt.push_back(new CallInst(inCountMth));
new BranchInst(BB, newBB);
// Now, set the sources of Phi nodes corresponding to the back-edge
// in BB to come from the instrumentation block instead.
for(BasicBlock::iterator BB2Inst = BB->begin(), BBend = BB->end();
for(BasicBlock::iterator BB2Inst = BB->begin(), BBend = BB->end();
BB2Inst != BBend; ++BB2Inst) {
if (PHINode *phiInst = dyn_cast<PHINode>(BB2Inst)) {
int bbIndex = phiInst->getBasicBlockIndex(u);

View File

@ -1,17 +1,17 @@
//===-- ProfilePaths.cpp - interface to insert instrumentation --*- C++ -*-===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This inserts instrumentation for counting execution of paths though a given
// function Its implemented as a "Function" Pass, and called using opt
//
// This pass is implemented by using algorithms similar to
// 1."Efficient Path Profiling": Ball, T. and Larus, J. R.,
// This pass is implemented by using algorithms similar to
// 1."Efficient Path Profiling": Ball, T. and Larus, J. R.,
// Proceedings of Micro-29, Dec 1996, Paris, France.
// 2."Efficiently Counting Program events with support for on-line
// "queries": Ball T., ACM Transactions on Programming Languages
@ -22,7 +22,7 @@
// (implementation in Graph.cpp and GraphAuxiliary.cpp) and finally, appropriate
// instrumentation is placed over suitable edges. (code inserted through
// EdgeCode.cpp).
//
//
// The algorithm inserts code such that every acyclic path in the CFG of a
// function is identified through a unique number. the code insertion is optimal
// in the sense that its inserted over a minimal set of edges. Also, the
@ -47,7 +47,7 @@ namespace llvm {
struct ProfilePaths : public FunctionPass {
bool runOnFunction(Function &F);
// Before this pass, make sure that there is only one
// Before this pass, make sure that there is only one
// entry and only one exit node for the function in the CFG of the function
//
void getAnalysisUsage(AnalysisUsage &AU) const {
@ -76,13 +76,13 @@ bool ProfilePaths::runOnFunction(Function &F){
if(F.isExternal()) {
return false;
}
//increment counter for instrumented functions. mn is now function#
mn++;
// Transform the cfg s.t. we have just one exit node
BasicBlock *ExitNode =
getAnalysis<UnifyFunctionExitNodes>().getReturnBlock();
BasicBlock *ExitNode =
getAnalysis<UnifyFunctionExitNodes>().getReturnBlock();
//iterating over BBs and making graph
std::vector<Node *> nodes;
@ -92,10 +92,10 @@ bool ProfilePaths::runOnFunction(Function &F){
// The nodes must be uniquely identified:
// That is, no two nodes must hav same BB*
for (Function::iterator BB = F.begin(), BE = F.end(); BB != BE; ++BB) {
Node *nd=new Node(BB);
nodes.push_back(nd);
nodes.push_back(nd);
if(&*BB == ExitNode)
exitNode=nd;
if(BB==F.begin())
@ -114,22 +114,22 @@ bool ProfilePaths::runOnFunction(Function &F){
edges.push_back(ed);
}
}
Graph g(nodes,edges, startNode, exitNode);
#ifdef DEBUG_PATH_PROFILES
#ifdef DEBUG_PATH_PROFILES
std::cerr<<"Original graph\n";
printGraph(g);
#endif
BasicBlock *fr = &F.front();
// The graph is made acyclic: this is done
// by removing back edges for now, and adding them later on
std::vector<Edge> be;
std::map<Node *, int> nodePriority; //it ranks nodes in depth first order traversal
g.getBackEdges(be, nodePriority);
#ifdef DEBUG_PATH_PROFILES
std::cerr<<"BackEdges-------------\n";
for (std::vector<Edge>::iterator VI=be.begin(); VI!=be.end(); ++VI){
@ -190,7 +190,7 @@ bool ProfilePaths::runOnFunction(Function &F){
Function *initialize =
F.getParent()->getOrInsertFunction("reoptimizerInitialize", Type::VoidTy,
PointerType::get(Type::IntTy), 0);
std::vector<Value *> trargs;
trargs.push_back(threshold);
new CallInst(initialize, trargs, "", fr->begin());
@ -198,8 +198,8 @@ bool ProfilePaths::runOnFunction(Function &F){
if(numPaths<=1 || numPaths >5000) return false;
#ifdef DEBUG_PATH_PROFILES
#ifdef DEBUG_PATH_PROFILES
printGraph(g);
#endif
@ -210,12 +210,12 @@ bool ProfilePaths::runOnFunction(Function &F){
//count is an array: count[x] would store
//the number of executions of path numbered x
Instruction *rVar=new
AllocaInst(Type::IntTy,
Instruction *rVar=new
AllocaInst(Type::IntTy,
ConstantUInt::get(Type::UIntTy,1),"R");
//Instruction *countVar=new
//AllocaInst(Type::IntTy,
//Instruction *countVar=new
//AllocaInst(Type::IntTy,
// ConstantUInt::get(Type::UIntTy, numPaths), "Count");
//initialize counter array!
@ -230,21 +230,21 @@ bool ProfilePaths::runOnFunction(Function &F){
CountCounter++;
std::string countStr = tempChar;
GlobalVariable *countVar = new GlobalVariable(ATy, false,
GlobalValue::InternalLinkage,
GlobalValue::InternalLinkage,
initializer, countStr,
F.getParent());
// insert initialization code in first (entry) BB
// this includes initializing r and count
insertInTopBB(&F.getEntryBlock(), numPaths, rVar, threshold);
//now process the graph: get path numbers,
//get increments along different paths,
//and assign "increments" and "updates" (to r and count)
//"optimally". Finally, insert llvm code along various edges
processGraph(g, rVar, countVar, be, stDummy, exDummy, numPaths, mn,
threshold);
processGraph(g, rVar, countVar, be, stDummy, exDummy, numPaths, mn,
threshold);
return true; // Always modifies function
}

View File

@ -1,10 +1,10 @@
//===- RetracePath.cpp ----------------------------------------------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// Retraces a path of BasicBlock, given a path number and a graph!
@ -25,12 +25,12 @@ namespace llvm {
//Routines to get the path trace!
void getPathFrmNode(Node *n, vector<BasicBlock*> &vBB, int pathNo, Graph &g,
vector<Edge> &stDummy, vector<Edge> &exDummy,
void getPathFrmNode(Node *n, vector<BasicBlock*> &vBB, int pathNo, Graph &g,
vector<Edge> &stDummy, vector<Edge> &exDummy,
vector<Edge> &be,
double strand){
Graph::nodeList &nlist = g.getNodeList(n);
//printGraph(g);
//std::cerr<<"Path No: "<<pathNo<<"\n";
int maxCount=-9999999;
@ -53,7 +53,7 @@ void getPathFrmNode(Node *n, vector<BasicBlock*> &vBB, int pathNo, Graph &g,
}
if(!isStart)
assert(strand!=-1 && "strand not assigned!");
assert(strand!=-1 && "strand not assigned!");
assert(!(*nextRoot==*n && pathNo>0) && "No more BBs to go");
assert(!(*nextRoot==*g.getExit() && pathNo-maxCount!=0) && "Reached exit");
@ -65,7 +65,7 @@ void getPathFrmNode(Node *n, vector<BasicBlock*> &vBB, int pathNo, Graph &g,
//look for strnd and edgeRnd now:
bool has1=false, has2=false;
//check if exit has it
for(vector<Edge>::iterator VI=exDummy.begin(), VE=exDummy.end(); VI!=VE;
for(vector<Edge>::iterator VI=exDummy.begin(), VE=exDummy.end(); VI!=VE;
++VI){
if(VI->getRandId()==edgeRnd){
has2=true;
@ -74,7 +74,7 @@ void getPathFrmNode(Node *n, vector<BasicBlock*> &vBB, int pathNo, Graph &g,
}
//check if start has it
for(vector<Edge>::iterator VI=stDummy.begin(), VE=stDummy.end(); VI!=VE;
for(vector<Edge>::iterator VI=stDummy.begin(), VE=stDummy.end(); VI!=VE;
++VI){
if(VI->getRandId()==strand){
has1=true;
@ -98,22 +98,22 @@ void getPathFrmNode(Node *n, vector<BasicBlock*> &vBB, int pathNo, Graph &g,
//find backedge with startpoint vBB[vBB.size()-1]
for(vector<Edge>::iterator VI=be.begin(), VE=be.end(); VI!=VE; ++VI){
assert(vBB.size()>0 && "vector too small");
if( VI->getFirst()->getElement() == vBB[vBB.size()-1] &&
if( VI->getFirst()->getElement() == vBB[vBB.size()-1] &&
VI->getSecond()->getElement() == vBB[0]){
//vBB.push_back(VI->getSecond()->getElement());
break;
}
}
}
else
else
vBB.push_back(nextRoot->getElement());
return;
}
assert(pathNo-maxCount>=0);
return getPathFrmNode(nextRoot, vBB, pathNo-maxCount, g, stDummy,
return getPathFrmNode(nextRoot, vBB, pathNo-maxCount, g, stDummy,
exDummy, be, strand);
}
@ -131,16 +131,16 @@ void getBBtrace(vector<BasicBlock *> &vBB, int pathNo, Function *M){//,
// vector<Instruction *> &instToErase){
//step 1: create graph
//Transform the cfg s.t. we have just one exit node
std::vector<Node *> nodes;
std::vector<Edge> edges;
Node *exitNode=0, *startNode=0;
//Creat cfg just once for each function!
static std::map<Function *, Graph *> graphMap;
static std::map<Function *, Graph *> graphMap;
//get backedges, exit and start edges for the graphs and store them
static std::map<Function *, vector<Edge> > stMap, exMap, beMap;
static std::map<Function *, vector<Edge> > stMap, exMap, beMap;
static std::map<Function *, Value *> pathReg; //path register
@ -152,19 +152,19 @@ void getBBtrace(vector<BasicBlock *> &vBB, int pathNo, Function *M){//,
break;
}
}
assert(ExitNode!=0 && "exitnode not found");
//iterating over BBs and making graph
//iterating over BBs and making graph
//The nodes must be uniquely identified:
//That is, no two nodes must hav same BB*
//keep a map for trigger basicblocks!
std::map<BasicBlock *, unsigned char> triggerBBs;
//First enter just nodes: later enter edges
for(Function::iterator BB = M->begin(), BE=M->end(); BB != BE; ++BB){
bool cont = false;
if(BB->size()==3 || BB->size() ==2){
for(BasicBlock::iterator II = BB->begin(), IE = BB->end();
II != IE; ++II){
@ -180,10 +180,10 @@ void getBBtrace(vector<BasicBlock *> &vBB, int pathNo, Function *M){//,
}
}
}
if(cont)
continue;
// const Instruction *inst = BB->getInstList().begin();
// if(isa<CallInst>(inst)){
// Instruction *ii1 = BB->getInstList().begin();
@ -191,9 +191,9 @@ void getBBtrace(vector<BasicBlock *> &vBB, int pathNo, Function *M){//,
// if(callInst->getCalledFunction()->getName()=="trigger")
// continue;
// }
Node *nd=new Node(BB);
nodes.push_back(nd);
nodes.push_back(nd);
if(&*BB==ExitNode)
exitNode=nd;
if(&*BB==&M->front())
@ -201,16 +201,16 @@ void getBBtrace(vector<BasicBlock *> &vBB, int pathNo, Function *M){//,
}
assert(exitNode!=0 && startNode!=0 && "Start or exit not found!");
for (Function::iterator BB = M->begin(), BE=M->end(); BB != BE; ++BB){
if(triggerBBs[BB] == 9)
if(triggerBBs[BB] == 9)
continue;
//if(BB->size()==3)
//if(CallInst *callInst = dyn_cast<CallInst>(BB->getInstList().begin()))
//if(callInst->getCalledFunction()->getName() == "trigger")
//continue;
// if(BB->size()==2){
// const Instruction *inst = BB->getInstList().begin();
// if(isa<CallInst>(inst)){
@ -220,12 +220,12 @@ void getBBtrace(vector<BasicBlock *> &vBB, int pathNo, Function *M){//,
// continue;
// }
// }
Node *nd=findBB(nodes, BB);
assert(nd && "No node for this edge!");
for(succ_iterator s=succ_begin(BB), se=succ_end(BB); s!=se; ++s){
if(triggerBBs[*s] == 9){
//if(!pathReg[M]){ //Get the path register for this!
//if(BB->size()>8)
@ -235,11 +235,11 @@ void getBBtrace(vector<BasicBlock *> &vBB, int pathNo, Function *M){//,
continue;
}
//if((*s)->size()==3)
//if(CallInst *callInst =
//if(CallInst *callInst =
// dyn_cast<CallInst>((*s)->getInstList().begin()))
// if(callInst->getCalledFunction()->getName() == "trigger")
// continue;
// if((*s)->size()==2){
// const Instruction *inst = (*s)->getInstList().begin();
// if(isa<CallInst>(inst)){
@ -249,40 +249,40 @@ void getBBtrace(vector<BasicBlock *> &vBB, int pathNo, Function *M){//,
// continue;
// }
// }
Node *nd2 = findBB(nodes,*s);
assert(nd2 && "No node for this edge!");
Edge ed(nd,nd2,0);
edges.push_back(ed);
}
}
graphMap[M]= new Graph(nodes,edges, startNode, exitNode);
Graph *g = graphMap[M];
if (M->size() <= 1) return; //uninstrumented
if (M->size() <= 1) return; //uninstrumented
//step 2: getBackEdges
//vector<Edge> be;
std::map<Node *, int> nodePriority;
g->getBackEdges(beMap[M], nodePriority);
//step 3: add dummy edges
//vector<Edge> stDummy;
//vector<Edge> exDummy;
addDummyEdges(stMap[M], exMap[M], *g, beMap[M]);
//step 4: value assgn to edges
int numPaths = valueAssignmentToEdges(*g, nodePriority, beMap[M]);
}
//step 5: now travel from root, select max(edge) < pathNo,
//step 5: now travel from root, select max(edge) < pathNo,
//and go on until reach the exit
getPathFrmNode(graphMap[M]->getRoot(), vBB, pathNo, *graphMap[M],
getPathFrmNode(graphMap[M]->getRoot(), vBB, pathNo, *graphMap[M],
stMap[M], exMap[M], beMap[M], -1);
//post process vBB to locate instructions to be erased
/*

View File

@ -1,10 +1,10 @@
//===- ProfilingUtils.cpp - Helper functions shared by profilers ----------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This files implements a few helper functions which are used by profile
@ -51,7 +51,7 @@ void llvm::InsertProfilingInitCall(Function *MainFn, const char *FnName,
Args[2] = ConstantPointerNull::get(UIntPtr);
}
Args[3] = ConstantUInt::get(Type::UIntTy, NumElements);
Instruction *InitCall = new CallInst(InitFn, Args, "newargc", InsertPos);
// If argc or argv are not available in main, just pass null values in.
@ -80,7 +80,7 @@ void llvm::InsertProfilingInitCall(Function *MainFn, const char *FnName,
AI->replaceAllUsesWith(InitCall);
InitCall->setOperand(1, AI);
}
case 0: break;
}
}

View File

@ -1,10 +1,10 @@
//===- ProfilingUtils.h - Helper functions shared by profilers --*- C++ -*-===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This files defines a few helper functions which are used by profile

View File

@ -1,10 +1,10 @@
//===- TraceBasicBlocks.cpp - Insert basic-block trace instrumentation ----===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This pass instruments the specified program with calls into a runtime

View File

@ -1,10 +1,10 @@
//===- TraceValues.cpp - Value Tracing for debugging ----------------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// Support for inserting LLVM code to print values at basic block and function
@ -41,7 +41,7 @@ static void TraceValuesAtBBExit(BasicBlock *BB,
// We trace a particular function if no functions to trace were specified
// or if the function is in the specified list.
//
//
inline static bool
TraceThisFunction(Function &F)
{
@ -58,19 +58,19 @@ namespace {
Function *RecordPtrFunc, *PushOnEntryFunc, *ReleaseOnReturnFunc;
void doInitialization(Module &M); // Add prototypes for external functions
};
class InsertTraceCode : public FunctionPass {
protected:
ExternalFuncs externalFuncs;
public:
// Add a prototype for runtime functions not already in the program.
//
bool doInitialization(Module &M);
//--------------------------------------------------------------------------
// Function InsertCodeToTraceValues
//
//
// Inserts tracing code for all live values at basic block and/or function
// exits as specified by `traceBasicBlockExits' and `traceFunctionExits'.
//
@ -131,13 +131,13 @@ void ExternalFuncs::doInitialization(Module &M) {
// uint (sbyte*)
HashPtrFunc = M.getOrInsertFunction("HashPointerToSeqNum", Type::UIntTy, SBP,
0);
// void (sbyte*)
ReleasePtrFunc = M.getOrInsertFunction("ReleasePointerSeqNum",
ReleasePtrFunc = M.getOrInsertFunction("ReleasePointerSeqNum",
Type::VoidTy, SBP, 0);
RecordPtrFunc = M.getOrInsertFunction("RecordPointer",
Type::VoidTy, SBP, 0);
PushOnEntryFunc = M.getOrInsertFunction("PushPointerSet", Type::VoidTy, 0);
ReleaseOnReturnFunc = M.getOrInsertFunction("ReleasePointersPopSet",
Type::VoidTy, 0);
@ -158,7 +158,7 @@ static inline GlobalVariable *getStringRef(Module *M, const std::string &str) {
// Create the global variable and record it in the module
// The GV will be renamed to a unique name if needed.
GlobalVariable *GV = new GlobalVariable(Init->getType(), true,
GlobalVariable *GV = new GlobalVariable(Init->getType(), true,
GlobalValue::InternalLinkage, Init,
"trstr");
M->getGlobalList().push_back(GV);
@ -166,12 +166,12 @@ static inline GlobalVariable *getStringRef(Module *M, const std::string &str) {
}
//
//
// Check if this instruction has any uses outside its basic block,
// or if it used by either a Call or Return instruction (ditto).
// (Values stored to memory within this BB are live at end of BB but are
// traced at the store instruction, not where they are computed.)
//
//
static inline bool LiveAtBBExit(const Instruction* I) {
const BasicBlock *BB = I->getParent();
for (Value::use_const_iterator U = I->use_begin(); U != I->use_end(); ++U)
@ -186,7 +186,7 @@ static inline bool LiveAtBBExit(const Instruction* I) {
static inline bool TraceThisOpCode(unsigned opCode) {
// Explicitly test for opCodes *not* to trace so that any new opcodes will
// be traced by default (VoidTy's are already excluded)
//
//
return (opCode < Instruction::OtherOpsBegin &&
opCode != Instruction::Alloca &&
opCode != Instruction::PHI &&
@ -198,7 +198,7 @@ static inline bool TraceThisOpCode(unsigned opCode) {
// by a real computation, not just a copy (see TraceThisOpCode), and
// -- it is a load instruction: we want to check values read from memory
// -- or it is live at exit from the basic block (i.e., ignore local temps)
//
//
static bool ShouldTraceValue(const Instruction *I) {
return
I->getType() != Type::VoidTy &&
@ -216,7 +216,7 @@ static std::string getPrintfCodeFor(const Value *V) {
return DisablePtrHashing ? "0x%p" : "%d";
else if (V->getType()->isIntegral())
return "%d";
assert(0 && "Illegal value to print out...");
return "";
}
@ -245,7 +245,7 @@ static void InsertPrintInst(Value *V, BasicBlock *BB, Instruction *InsertBefore,
// Turn the format string into an sbyte *
Constant *GEP=ConstantExpr::getGetElementPtr(fmtVal,
std::vector<Constant*>(2,Constant::getNullValue(Type::LongTy)));
// Insert a call to the hash function if this is a pointer value
if (V && isa<PointerType>(V->getType()) && !DisablePtrHashing) {
const Type *SBP = PointerType::get(Type::SByteTy);
@ -255,14 +255,14 @@ static void InsertPrintInst(Value *V, BasicBlock *BB, Instruction *InsertBefore,
std::vector<Value*> HashArgs(1, V);
V = new CallInst(HashPtrToSeqNum, HashArgs, "ptrSeqNum", InsertBefore);
}
// Insert the first print instruction to print the string flag:
std::vector<Value*> PrintArgs;
PrintArgs.push_back(GEP);
if (V) PrintArgs.push_back(V);
new CallInst(Printf, PrintArgs, "trace", InsertBefore);
}
static void InsertVerbosePrintInst(Value *V, BasicBlock *BB,
Instruction *InsertBefore,
@ -274,11 +274,11 @@ static void InsertVerbosePrintInst(Value *V, BasicBlock *BB,
Printf, HashPtrToSeqNum);
}
static void
static void
InsertReleaseInst(Value *V, BasicBlock *BB,
Instruction *InsertBefore,
Function* ReleasePtrFunc) {
const Type *SBP = PointerType::get(Type::SByteTy);
if (V->getType() != SBP) // Cast pointer to be sbyte*
V = new CastInst(V, SBP, "RPSN_cast", InsertBefore);
@ -287,7 +287,7 @@ InsertReleaseInst(Value *V, BasicBlock *BB,
new CallInst(ReleasePtrFunc, releaseArgs, "", InsertBefore);
}
static void
static void
InsertRecordInst(Value *V, BasicBlock *BB,
Instruction *InsertBefore,
Function* RecordPtrFunc) {
@ -302,17 +302,17 @@ InsertRecordInst(Value *V, BasicBlock *BB,
// Look for alloca and free instructions. These are the ptrs to release.
// Release the free'd pointers immediately. Record the alloca'd pointers
// to be released on return from the current function.
//
//
static void
ReleasePtrSeqNumbers(BasicBlock *BB,
ExternalFuncs& externalFuncs) {
for (BasicBlock::iterator II=BB->begin(), IE = BB->end(); II != IE; ++II)
if (FreeInst *FI = dyn_cast<FreeInst>(II))
InsertReleaseInst(FI->getOperand(0), BB, FI,externalFuncs.ReleasePtrFunc);
else if (AllocaInst *AI = dyn_cast<AllocaInst>(II))
InsertRecordInst(AI, BB, AI->getNext(), externalFuncs.RecordPtrFunc);
}
}
// Insert print instructions at the end of basic block BB for each value
@ -323,15 +323,15 @@ ReleasePtrSeqNumbers(BasicBlock *BB,
// for printing at the exit from the function. (Note that in each invocation
// of the function, this will only get the last value stored for each static
// store instruction).
//
//
static void TraceValuesAtBBExit(BasicBlock *BB,
Function *Printf, Function* HashPtrToSeqNum,
std::vector<Instruction*> *valuesStoredInFunction) {
// Get an iterator to point to the insertion location, which is
// just before the terminator instruction.
//
//
TerminatorInst *InsertPos = BB->getTerminator();
std::ostringstream OutStr;
WriteAsOperand(OutStr, BB, false);
InsertPrintInst(0, BB, InsertPos, "LEAVING BB:" + OutStr.str(),
@ -340,7 +340,7 @@ static void TraceValuesAtBBExit(BasicBlock *BB,
// Insert a print instruction for each instruction preceding InsertPos.
// The print instructions must go before InsertPos, so we use the
// instruction *preceding* InsertPos to check when to terminate the loop.
//
//
for (BasicBlock::iterator II = BB->begin(); &*II != InsertPos; ++II) {
if (StoreInst *SI = dyn_cast<StoreInst>(II)) {
// Trace the stored value and address
@ -380,12 +380,12 @@ static inline void InsertCodeToShowFunctionExit(BasicBlock *BB,
Function* HashPtrToSeqNum) {
// Get an iterator to point to the insertion location
ReturnInst *Ret = cast<ReturnInst>(BB->getTerminator());
std::ostringstream OutStr;
WriteAsOperand(OutStr, BB->getParent(), true);
InsertPrintInst(0, BB, Ret, "LEAVING FUNCTION: " + OutStr.str(),
Printf, HashPtrToSeqNum);
// print the return value, if any
if (BB->getParent()->getReturnType() != Type::VoidTy)
InsertPrintInst(Ret->getReturnValue(), BB, Ret, " Returning: ",
@ -396,14 +396,14 @@ static inline void InsertCodeToShowFunctionExit(BasicBlock *BB,
bool InsertTraceCode::runOnFunction(Function &F) {
if (!TraceThisFunction(F))
return false;
std::vector<Instruction*> valuesStoredInFunction;
std::vector<BasicBlock*> exitBlocks;
// Insert code to trace values at function entry
InsertCodeToShowFunctionEntry(F, externalFuncs.PrintfFunc,
externalFuncs.HashPtrFunc);
// Push a pointer set for recording alloca'd pointers at entry.
if (!DisablePtrHashing)
new CallInst(externalFuncs.PushOnEntryFunc, std::vector<Value*>(), "",
@ -419,18 +419,18 @@ bool InsertTraceCode::runOnFunction(Function &F) {
if (!DisablePtrHashing) // release seq. numbers on free/ret
ReleasePtrSeqNumbers(BB, externalFuncs);
}
for (unsigned i=0; i != exitBlocks.size(); ++i)
{
// Insert code to trace values at function exit
InsertCodeToShowFunctionExit(exitBlocks[i], externalFuncs.PrintfFunc,
externalFuncs.HashPtrFunc);
// Release all recorded pointers before RETURN. Do this LAST!
if (!DisablePtrHashing)
new CallInst(externalFuncs.ReleaseOnReturnFunc, std::vector<Value*>(),
"", exitBlocks[i]->getTerminator());
}
return true;
}

View File

@ -1,10 +1,10 @@
//===- LevelRaise.cpp - Code to change LLVM to higher level ---------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file implements the 'raising' part of the LevelChange API. This is
@ -37,7 +37,7 @@ StartInst("raise-start-inst", cl::Hidden, cl::value_desc("inst name"),
static Statistic<>
NumLoadStorePeepholes("raise", "Number of load/store peepholes");
static Statistic<>
static Statistic<>
NumGEPInstFormed("raise", "Number of other getelementptr's formed");
static Statistic<>
@ -138,14 +138,14 @@ static bool HandleCastToPointer(BasicBlock::iterator BI,
PRINT_PEEPHOLE2("cast-add-to-gep:in", *Src, CI);
// If we have a getelementptr capability... transform all of the
// If we have a getelementptr capability... transform all of the
// add instruction uses into getelementptr's.
while (!CI.use_empty()) {
BinaryOperator *I = cast<BinaryOperator>(*CI.use_begin());
assert((I->getOpcode() == Instruction::Add ||
I->getOpcode() == Instruction::Sub) &&
I->getOpcode() == Instruction::Sub) &&
"Use is not a valid add instruction!");
// Get the value added to the cast result pointer...
Value *OtherPtr = I->getOperand((I->getOperand(0) == &CI) ? 1 : 0);
@ -156,7 +156,7 @@ static bool HandleCastToPointer(BasicBlock::iterator BI,
// one index (from code above), so we just need to negate the pointer index
// long value.
if (I->getOpcode() == Instruction::Sub) {
Instruction *Neg = BinaryOperator::createNeg(GEP->getOperand(1),
Instruction *Neg = BinaryOperator::createNeg(GEP->getOperand(1),
GEP->getOperand(1)->getName()+".neg", I);
GEP->setOperand(1, Neg);
}
@ -276,7 +276,7 @@ bool RPR::PeepholeOptimize(BasicBlock *BB, BasicBlock::iterator &BI) {
ConvertedTypes[CI] = CI->getType(); // Make sure the cast doesn't change
if (ExpressionConvertibleToType(Src, DestTy, ConvertedTypes, TD)) {
PRINT_PEEPHOLE3("CAST-SRC-EXPR-CONV:in ", *Src, *CI, *BB->getParent());
DEBUG(std::cerr << "\nCONVERTING SRC EXPR TYPE:\n");
{ // ValueMap must be destroyed before function verified!
ValueMapCache ValueMap;
@ -284,7 +284,7 @@ bool RPR::PeepholeOptimize(BasicBlock *BB, BasicBlock::iterator &BI) {
if (Constant *CPV = dyn_cast<Constant>(E))
CI->replaceAllUsesWith(CPV);
PRINT_PEEPHOLE1("CAST-SRC-EXPR-CONV:out", *E);
DEBUG(std::cerr << "DONE CONVERTING SRC EXPR TYPE: \n"
<< *BB->getParent());
@ -376,7 +376,7 @@ bool RPR::PeepholeOptimize(BasicBlock *BB, BasicBlock::iterator &BI) {
if (const StructType *CurSTy = dyn_cast<StructType>(CurCTy)) {
// Check for a zero element struct type... if we have one, bail.
if (CurSTy->getNumElements() == 0) break;
// Grab the first element of the struct type, which must lie at
// offset zero in the struct.
//
@ -390,13 +390,13 @@ bool RPR::PeepholeOptimize(BasicBlock *BB, BasicBlock::iterator &BI) {
// Did we find what we're looking for?
if (ElTy->isLosslesslyConvertibleTo(DestPointedTy)) break;
// Nope, go a level deeper.
++Depth;
CurCTy = dyn_cast<CompositeType>(ElTy);
ElTy = 0;
}
// Did we find what we were looking for? If so, do the transformation
if (ElTy) {
PRINT_PEEPHOLE1("cast-for-first:in", *CI);
@ -411,7 +411,7 @@ bool RPR::PeepholeOptimize(BasicBlock *BB, BasicBlock::iterator &BI) {
// the old src value.
//
CI->setOperand(0, GEP);
PRINT_PEEPHOLE2("cast-for-first:out", *GEP, *CI);
++NumGEPInstFormed;
return true;
@ -422,12 +422,12 @@ bool RPR::PeepholeOptimize(BasicBlock *BB, BasicBlock::iterator &BI) {
} else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
Value *Val = SI->getOperand(0);
Value *Pointer = SI->getPointerOperand();
// Peephole optimize the following instructions:
// %t = cast <T1>* %P to <T2> * ;; If T1 is losslessly convertible to T2
// store <T2> %V, <T2>* %t
//
// Into:
// Into:
// %t = cast <T2> %V to <T1>
// store <T1> %t2, <T1>* %P
//
@ -460,12 +460,12 @@ bool RPR::PeepholeOptimize(BasicBlock *BB, BasicBlock::iterator &BI) {
Value *Pointer = LI->getOperand(0);
const Type *PtrElType =
cast<PointerType>(Pointer->getType())->getElementType();
// Peephole optimize the following instructions:
// %Val = cast <T1>* to <T2>* ;; If T1 is losslessly convertible to T2
// %t = load <T2>* %P
//
// Into:
// Into:
// %t = load <T1>* %P
// %Val = cast <T1> to <T2>
//
@ -483,7 +483,7 @@ bool RPR::PeepholeOptimize(BasicBlock *BB, BasicBlock::iterator &BI) {
// Create the new load instruction... loading the pre-casted value
LoadInst *NewLI = new LoadInst(CastSrc, LI->getName(), BI);
// Insert the new T cast instruction... stealing old T's name
CastInst *NCI = new CastInst(NewLI, LI->getType(), CI->getName());
@ -540,7 +540,7 @@ bool RPR::PeepholeOptimize(BasicBlock *BB, BasicBlock::iterator &BI) {
std::vector<Value*>(CI->op_begin()+1, CI->op_end()));
++BI;
ReplaceInstWithInst(CI, NewCall);
++NumVarargCallChanges;
return true;
}
@ -559,7 +559,7 @@ bool RPR::DoRaisePass(Function &F) {
for (BasicBlock::iterator BI = BB->begin(); BI != BB->end();) {
DEBUG(std::cerr << "LevelRaising: " << *BI);
if (dceInstruction(BI) || doConstantPropagation(BI)) {
Changed = true;
Changed = true;
++NumDCEorCP;
DEBUG(std::cerr << "***\t\t^^-- Dead code eliminated!\n");
} else if (PeepholeOptimize(BB, BI)) {

View File

@ -1,14 +1,14 @@
//===- ADCE.cpp - Code to perform aggressive dead code elimination --------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file implements "aggressive" dead code elimination. ADCE is DCe where
// values are assumed to be dead until proven otherwise. This is similar to
// values are assumed to be dead until proven otherwise. This is similar to
// SCCP, except applied to the liveness of values.
//
//===----------------------------------------------------------------------===//
@ -116,11 +116,11 @@ void ADCE::markBlockAlive(BasicBlock *BB) {
if (It != CDG.end()) {
// Get the blocks that this node is control dependent on...
const PostDominanceFrontier::DomSetType &CDB = It->second;
for (PostDominanceFrontier::DomSetType::const_iterator I =
for (PostDominanceFrontier::DomSetType::const_iterator I =
CDB.begin(), E = CDB.end(); I != E; ++I)
markTerminatorLive(*I); // Mark all their terminators as live
}
// If this basic block is live, and it ends in an unconditional branch, then
// the branch is alive as well...
if (BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()))
@ -162,7 +162,7 @@ TerminatorInst *ADCE::convertToUnconditionalBranch(TerminatorInst *TI) {
// Remove entries from PHI nodes to avoid confusing ourself later...
for (unsigned i = 1, e = TI->getNumSuccessors(); i != e; ++i)
TI->getSuccessor(i)->removePredecessor(BB);
// Delete the old branch itself...
BB->getInstList().erase(TI);
return NB;
@ -203,7 +203,7 @@ bool ADCE::doADCE() {
}
// Iterate over all of the instructions in the function, eliminating trivially
// dead instructions, and marking instructions live that are known to be
// dead instructions, and marking instructions live that are known to be
// needed. Perform the walk in depth first order so that we avoid marking any
// instructions live in basic blocks that are unreachable. These blocks will
// be eliminated later, along with the instructions inside.
@ -338,7 +338,7 @@ bool ADCE::doADCE() {
return MadeChanges;
}
// If the entry node is dead, insert a new entry node to eliminate the entry
// node as a special case.
@ -350,7 +350,7 @@ bool ADCE::doADCE() {
AliveBlocks.insert(NewEntry); // This block is always alive!
LiveSet.insert(NewEntry->getTerminator()); // The branch is live
}
// Loop over all of the alive blocks in the function. If any successor
// blocks are not alive, we adjust the outgoing branches to branch to the
// first live postdominator of the live block, adjusting any PHI nodes in
@ -360,7 +360,7 @@ bool ADCE::doADCE() {
if (AliveBlocks.count(I)) {
BasicBlock *BB = I;
TerminatorInst *TI = BB->getTerminator();
// If the terminator instruction is alive, but the block it is contained
// in IS alive, this means that this terminator is a conditional branch on
// a condition that doesn't matter. Make it an unconditional branch to
@ -392,7 +392,7 @@ bool ADCE::doADCE() {
break;
}
}
}
}
// There is a special case here... if there IS no post-dominator for
// the block we have nowhere to point our branch to. Instead, convert
@ -411,12 +411,12 @@ bool ADCE::doADCE() {
// branch into an infinite loop into a return instruction!
//
RemoveSuccessor(TI, i);
// RemoveSuccessor may replace TI... make sure we have a fresh
// pointer.
//
TI = BB->getTerminator();
// Rescan this successor...
--i;
} else {
@ -443,7 +443,7 @@ bool ADCE::doADCE() {
int OldIdx = PN->getBasicBlockIndex(LastDead);
assert(OldIdx != -1 &&"LastDead is not a pred of NextAlive!");
Value *InVal = PN->getIncomingValue(OldIdx);
// Add an incoming value for BB now...
PN->addIncoming(InVal, BB);
}

View File

@ -1,10 +1,10 @@
//===-- BasicBlockPlacement.cpp - Basic Block Code Layout optimization ----===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file implements a very simple profile guided basic block placement
@ -37,7 +37,7 @@ using namespace llvm;
namespace {
Statistic<> NumMoved("block-placement", "Number of basic blocks moved");
struct BlockPlacement : public FunctionPass {
virtual bool runOnFunction(Function &F);
@ -78,11 +78,11 @@ bool BlockPlacement::runOnFunction(Function &F) {
PI = &getAnalysis<ProfileInfo>();
NumMovedBlocks = 0;
InsertPos = F.begin();
InsertPos = F.begin();
// Recursively place all blocks.
PlaceBlocks(F.begin());
PlacedBlocks.clear();
NumMoved += NumMovedBlocks;
return NumMovedBlocks != 0;
@ -115,12 +115,12 @@ void BlockPlacement::PlaceBlocks(BasicBlock *BB) {
while (1) {
// Okay, now place any unplaced successors.
succ_iterator SI = succ_begin(BB), E = succ_end(BB);
// Scan for the first unplaced successor.
for (; SI != E && PlacedBlocks.count(*SI); ++SI)
/*empty*/;
if (SI == E) return; // No more successors to place.
unsigned MaxExecutionCount = PI->getExecutionCount(*SI);
BasicBlock *MaxSuccessor = *SI;

View File

@ -1,10 +1,10 @@
//===-- CondPropagate.cpp - Propagate Conditional Expressions -------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This pass propagates information about conditional expressions through the
@ -73,7 +73,7 @@ void CondProp::SimplifyBlock(BasicBlock *BB) {
if (BI->isConditional() && isa<PHINode>(BI->getCondition()) &&
cast<PHINode>(BI->getCondition())->getParent() == BB)
SimplifyPredecessors(BI);
} else if (SwitchInst *SI = dyn_cast<SwitchInst>(BB->getTerminator())) {
if (isa<PHINode>(SI->getCondition()) &&
cast<PHINode>(SI->getCondition())->getParent() == BB)

View File

@ -1,10 +1,10 @@
//===- ConstantProp.cpp - Code to perform Simple Constant Propagation -----===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file implements constant propagation and merging:
@ -66,7 +66,7 @@ bool ConstantPropagation::runOnFunction(Function &F) {
for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
UI != UE; ++UI)
WorkList.insert(cast<Instruction>(*UI));
// Replace all of the uses of a variable with uses of the constant.
I->replaceAllUsesWith(C);

View File

@ -1,10 +1,10 @@
//===- CorrelatedExprs.cpp - Pass to detect and eliminated c.e.'s ---------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// Correlated Expression Elimination propagates information from conditional
@ -178,7 +178,7 @@ namespace {
// empty - return true if this region has no information known about it.
bool empty() const { return ValueMap.empty(); }
const RegionInfo &operator=(const RegionInfo &RI) {
ValueMap = RI.ValueMap;
return *this;
@ -204,7 +204,7 @@ namespace {
if (I != ValueMap.end()) return &I->second;
return 0;
}
/// removeValueInfo - Remove anything known about V from our records. This
/// works whether or not we know anything about V.
///
@ -281,7 +281,7 @@ namespace {
bool SimplifyBasicBlock(BasicBlock &BB, const RegionInfo &RI);
bool SimplifyInstruction(Instruction *Inst, const RegionInfo &RI);
};
};
RegisterOpt<CEE> X("cee", "Correlated Expression Elimination");
}
@ -299,7 +299,7 @@ bool CEE::runOnFunction(Function &F) {
// blocks.
DS = &getAnalysis<DominatorSet>();
DT = &getAnalysis<DominatorTree>();
std::set<BasicBlock*> VisitedBlocks;
bool Changed = TransformRegion(&F.getEntryBlock(), VisitedBlocks);
@ -458,13 +458,13 @@ bool CEE::ForwardCorrelatedEdgeDestination(TerminatorInst *TI, unsigned SuccNo,
for (BasicBlock::iterator I = OldSucc->begin(), E = OldSucc->end(); I!=E; ++I)
if (I->getType() != Type::VoidTy)
NewRI.removeValueInfo(I);
// Put the newly discovered information into the RegionInfo...
for (BasicBlock::iterator I = OldSucc->begin(), E = OldSucc->end(); I!=E; ++I)
if (PHINode *PN = dyn_cast<PHINode>(I)) {
int OpNum = PN->getBasicBlockIndex(BB);
assert(OpNum != -1 && "PHI doesn't have incoming edge for predecessor!?");
PropagateEquality(PN, PN->getIncomingValue(OpNum), NewRI);
PropagateEquality(PN, PN->getIncomingValue(OpNum), NewRI);
} else if (SetCondInst *SCI = dyn_cast<SetCondInst>(I)) {
Relation::KnownResult Res = getSetCCResult(SCI, NewRI);
if (Res == Relation::Unknown) return false;
@ -472,7 +472,7 @@ bool CEE::ForwardCorrelatedEdgeDestination(TerminatorInst *TI, unsigned SuccNo,
} else {
assert(isa<BranchInst>(*I) && "Unexpected instruction type!");
}
// Compute the facts implied by what we have discovered...
ComputeReplacements(NewRI);
@ -486,7 +486,7 @@ bool CEE::ForwardCorrelatedEdgeDestination(TerminatorInst *TI, unsigned SuccNo,
ForwardSuccessorTo(TI, SuccNo, BI->getSuccessor(!CB->getValue()), NewRI);
return true;
}
return false;
}
@ -582,7 +582,7 @@ void CEE::ForwardSuccessorTo(TerminatorInst *TI, unsigned SuccNo,
// node yet though if this is the last edge into it.
Value *EdgeValue = PN->removeIncomingValue(BB, false);
// Make sure that anything that used to use PN now refers to EdgeValue
// Make sure that anything that used to use PN now refers to EdgeValue
ReplaceUsesOfValueInRegion(PN, EdgeValue, Dest);
// If there is only one value left coming into the PHI node, replace the PHI
@ -603,7 +603,7 @@ void CEE::ForwardSuccessorTo(TerminatorInst *TI, unsigned SuccNo,
++I; // Otherwise, move on to the next PHI node
}
}
// Actually revector the branch now...
TI->setSuccessor(SuccNo, Dest);
@ -689,7 +689,7 @@ static void CalcRegionExitBlocks(BasicBlock *Header, BasicBlock *BB,
} else {
// Header does not dominate this block, but we have a predecessor that does
// dominate us. Add ourself to the list.
RegionExitBlocks.push_back(BB);
RegionExitBlocks.push_back(BB);
}
}
@ -703,7 +703,7 @@ void CEE::CalculateRegionExitBlocks(BasicBlock *BB, BasicBlock *OldSucc,
// Recursively calculate blocks we are interested in...
CalcRegionExitBlocks(BB, BB, Visited, *DS, RegionExitBlocks);
// Filter out blocks that are not dominated by OldSucc...
for (unsigned i = 0; i != RegionExitBlocks.size(); ) {
if (DS->dominates(OldSucc, RegionExitBlocks[i]))
@ -738,7 +738,7 @@ void CEE::InsertRegionExitMerges(PHINode *BBVal, Instruction *OldVal,
// otherwise use OldVal.
NewPN->addIncoming(DS->dominates(BB, *PI) ? BBVal : OldVal, *PI);
}
// Now make everyone dominated by this block use this new value!
ReplaceUsesOfValueInRegion(OldVal, NewPN, FBlock);
}
@ -783,7 +783,7 @@ void CEE::PropagateBranchInfo(BranchInst *BI) {
//
PropagateEquality(BI->getCondition(), ConstantBool::True,
getRegionInfo(BI->getSuccessor(0)));
// Propagate information into the false block...
//
PropagateEquality(BI->getCondition(), ConstantBool::False,
@ -825,7 +825,7 @@ void CEE::PropagateEquality(Value *Op0, Value *Op1, RegionInfo &RI) {
PropagateEquality(Inst->getOperand(0), CB, RI);
PropagateEquality(Inst->getOperand(1), CB, RI);
}
// If we know that this instruction is an OR instruction, and the result
// is false, this means that both operands to the OR are know to be false
// as well.
@ -834,7 +834,7 @@ void CEE::PropagateEquality(Value *Op0, Value *Op1, RegionInfo &RI) {
PropagateEquality(Inst->getOperand(0), CB, RI);
PropagateEquality(Inst->getOperand(1), CB, RI);
}
// If we know that this instruction is a NOT instruction, we know that the
// operand is known to be the inverse of whatever the current value is.
//
@ -857,7 +857,7 @@ void CEE::PropagateEquality(Value *Op0, Value *Op1, RegionInfo &RI) {
} else { // If we know the condition is false...
// We know the opposite of the condition is true...
Instruction::BinaryOps C = SCI->getInverseCondition();
PropagateRelation(C, SCI->getOperand(0), SCI->getOperand(1), RI);
PropagateRelation(SetCondInst::getSwappedCondition(C),
SCI->getOperand(1), SCI->getOperand(0), RI);
@ -1065,7 +1065,7 @@ Relation::KnownResult CEE::getSetCCResult(SetCondInst *SCI,
const RegionInfo &RI) {
Value *Op0 = SCI->getOperand(0), *Op1 = SCI->getOperand(1);
Instruction::BinaryOps Opcode = SCI->getOpcode();
if (isa<Constant>(Op0)) {
if (isa<Constant>(Op1)) {
if (Constant *Result = ConstantFoldInstruction(SCI)) {
@ -1098,7 +1098,7 @@ Relation::KnownResult CEE::getSetCCResult(SetCondInst *SCI,
// If the intersection of the two ranges is empty, then the condition
// could never be true!
//
//
if (Int.isEmptySet()) {
Result = Relation::KnownFalse;
@ -1254,7 +1254,7 @@ Relation::getImpliedResult(Instruction::BinaryOps Op) const {
// print - Implement the standard print form to print out analysis information.
void CEE::print(std::ostream &O, const Module *M) const {
O << "\nPrinting Correlated Expression Info:\n";
for (std::map<BasicBlock*, RegionInfo>::const_iterator I =
for (std::map<BasicBlock*, RegionInfo>::const_iterator I =
RegionInfoMap.begin(), E = RegionInfoMap.end(); I != E; ++I)
I->second.print(O);
}

View File

@ -1,10 +1,10 @@
//===- DCE.cpp - Code to perform dead code elimination --------------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file implements dead inst elimination and dead code elimination.
@ -49,7 +49,7 @@ namespace {
AU.setPreservesCFG();
}
};
RegisterOpt<DeadInstElimination> X("die", "Dead Instruction Elimination");
}
@ -81,7 +81,7 @@ bool DCE::runOnFunction(Function &F) {
WorkList.push_back(&*i);
}
std::set<Instruction*> DeadInsts;
// Loop over the worklist finding instructions that are dead. If they are
// dead make them drop all of their uses, making other instructions
// potentially dead, and work until the worklist is empty.
@ -89,7 +89,7 @@ bool DCE::runOnFunction(Function &F) {
while (!WorkList.empty()) {
Instruction *I = WorkList.back();
WorkList.pop_back();
if (isInstructionTriviallyDead(I)) { // If the instruction is dead...
// Loop over all of the values that the instruction uses, if there are
// instructions being used, add them to the worklist, because they might

View File

@ -1,10 +1,10 @@
//===- DeadStoreElimination.cpp - Dead Store Elimination ------------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file implements a trivial dead store elimination that only considers
@ -39,9 +39,9 @@ namespace {
Changed |= runOnBasicBlock(*I);
return Changed;
}
bool runOnBasicBlock(BasicBlock &BB);
void DeleteDeadInstructionChains(Instruction *I,
SetVector<Instruction*> &DeadInsts);
@ -87,7 +87,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
bool MadeChange = false;
for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ) {
Instruction *I = --BBI; // Keep moving iterator backwards
// If this is a free instruction, it makes the free'd location dead!
if (FreeInst *FI = dyn_cast<FreeInst>(I)) {
// Free instructions make any stores to the free'd location dead.
@ -161,7 +161,7 @@ void DSE::DeleteDeadInstructionChains(Instruction *I,
DeadInsts.insert(Op); // Attempt to nuke it later.
I->setOperand(i, 0); // Drop from the operand list.
}
I->eraseFromParent();
++NumOther;
}

View File

@ -1,10 +1,10 @@
//===-- GCSE.cpp - SSA-based Global Common Subexpression Elimination ------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This pass is designed to be a very quick global transformation that
@ -116,7 +116,7 @@ bool GCSE::runOnFunction(Function &F) {
else {
I = Inst; --I;
}
// First check to see if we were able to value number this instruction
// to a non-instruction value. If so, prefer that value over other
// instructions which may compute the same thing.
@ -186,14 +186,14 @@ void GCSE::ReplaceInstructionWith(Instruction *I, Value *V) {
getAnalysis<ValueNumbering>().deleteValue(I);
I->replaceAllUsesWith(V);
if (InvokeInst *II = dyn_cast<InvokeInst>(I)) {
// Removing an invoke instruction requires adding a branch to the normal
// destination and removing PHI node entries in the exception destination.
new BranchInst(II->getNormalDest(), II);
II->getUnwindDest()->removePredecessor(II->getParent());
}
// Erase the instruction from the program.
I->getParent()->getInstList().erase(I);
}

View File

@ -1,10 +1,10 @@
//===- IndVarSimplify.cpp - Induction Variable Elimination ----------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This transformation analyzes and transforms the induction variables (and
@ -76,7 +76,7 @@ namespace {
bool isInsertedInstruction(Instruction *I) const {
return InsertedInstructions.count(I);
}
/// getOrInsertCanonicalInductionVariable - This method returns the
/// canonical induction variable of the specified type for the specified
/// loop (inserting one if there is none). A canonical induction variable
@ -128,7 +128,7 @@ namespace {
return ConstantExpr::getCast(C, Ty);
else if (Instruction *I = dyn_cast<Instruction>(V)) {
// Check to see if there is already a cast. If there is, use it.
for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
UI != E; ++UI) {
if ((*UI)->getType() == Ty)
if (CastInst *CI = dyn_cast<CastInst>(cast<Instruction>(*UI))) {
@ -206,10 +206,10 @@ Value *SCEVExpander::visitMulExpr(SCEVMulExpr *S) {
if (SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getOperand(0)))
if (SC->getValue()->isAllOnesValue())
FirstOp = 1;
int i = S->getNumOperands()-2;
Value *V = expandInTy(S->getOperand(i+1), Ty);
// Emit a bunch of multiply instructions
for (; i >= FirstOp; --i)
V = BinaryOperator::createMul(V, expandInTy(S->getOperand(i), Ty),
@ -358,7 +358,7 @@ DeleteTriviallyDeadInstructions(std::set<Instruction*> &Insts) {
/// EliminatePointerRecurrence - Check to see if this is a trivial GEP pointer
/// recurrence. If so, change it into an integer recurrence, permitting
/// analysis by the SCEV routines.
void IndVarSimplify::EliminatePointerRecurrence(PHINode *PN,
void IndVarSimplify::EliminatePointerRecurrence(PHINode *PN,
BasicBlock *Preheader,
std::set<Instruction*> &DeadInsts) {
assert(PN->getNumIncomingValues() == 2 && "Noncanonicalized loop!");
@ -368,7 +368,7 @@ void IndVarSimplify::EliminatePointerRecurrence(PHINode *PN,
dyn_cast<GetElementPtrInst>(PN->getIncomingValue(BackedgeIdx)))
if (GEPI->getOperand(0) == PN) {
assert(GEPI->getNumOperands() == 2 && "GEP types must mismatch!");
// Okay, we found a pointer recurrence. Transform this pointer
// recurrence into an integer recurrence. Compute the value that gets
// added to the pointer at every iteration.
@ -383,10 +383,10 @@ void IndVarSimplify::EliminatePointerRecurrence(PHINode *PN,
Value *NewAdd = BinaryOperator::createAdd(NewPhi, AddedVal,
GEPI->getName()+".rec", GEPI);
NewPhi->addIncoming(NewAdd, PN->getIncomingBlock(BackedgeIdx));
// Update the existing GEP to use the recurrence.
GEPI->setOperand(0, PN->getIncomingValue(PreheaderIdx));
// Update the GEP to use the new recurrence we just inserted.
GEPI->setOperand(1, NewAdd);
@ -547,7 +547,7 @@ void IndVarSimplify::RewriteLoopExitValues(Loop *L) {
bool HasConstantItCount = isa<SCEVConstant>(SE->getIterationCount(L));
std::set<Instruction*> InstructionsToDelete;
for (unsigned i = 0, e = L->getBlocks().size(); i != e; ++i)
if (LI->getLoopFor(L->getBlocks()[i]) == L) { // Not in a subloop...
BasicBlock *BB = L->getBlocks()[i];
@ -599,7 +599,7 @@ void IndVarSimplify::runOnLoop(Loop *L) {
//
BasicBlock *Header = L->getHeader();
BasicBlock *Preheader = L->getLoopPreheader();
std::set<Instruction*> DeadInsts;
for (BasicBlock::iterator I = Header->begin(); isa<PHINode>(I); ++I) {
PHINode *PN = cast<PHINode>(I);
@ -748,7 +748,7 @@ void IndVarSimplify::runOnLoop(Loop *L) {
DeadInsts.insert(I);
++NumRemoved;
Changed = true;
}
}
}
}
#endif

View File

@ -1,10 +1,10 @@
//===- InstructionCombining.cpp - Combine multiple instructions -----------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// InstructionCombining - Combine instructions to form fewer, simple
@ -103,7 +103,7 @@ namespace {
// null - No change was made
// I - Change was made, I is still valid, I may be dead though
// otherwise - Change was made, replace I with returned instruction
//
//
Instruction *visitAdd(BinaryOperator &I);
Instruction *visitSub(BinaryOperator &I);
Instruction *visitMul(BinaryOperator &I);
@ -159,7 +159,7 @@ namespace {
/// cast.
Value *InsertCastBefore(Value *V, const Type *Ty, Instruction &Pos) {
if (V->getType() == Ty) return V;
Instruction *C = new CastInst(V, Ty, V->getName(), &Pos);
WorkList.push_back(C);
return C;
@ -275,7 +275,7 @@ bool InstCombiner::SimplifyCommutative(BinaryOperator &I) {
bool Changed = false;
if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1)))
Changed = !I.swapOperands();
if (!I.isAssociative()) return Changed;
Instruction::BinaryOps Opcode = I.getOpcode();
if (BinaryOperator *Op = dyn_cast<BinaryOperator>(I.getOperand(0)))
@ -302,7 +302,7 @@ bool InstCombiner::SimplifyCommutative(BinaryOperator &I) {
I.setOperand(0, New);
I.setOperand(1, Folded);
return true;
}
}
}
return Changed;
}
@ -427,7 +427,7 @@ Instruction *AssociativeOpt(BinaryOperator &Root, const Functor &F) {
// reassociate the expression from ((? op A) op B) to (? op (A op B))
if (ShouldApply) {
BasicBlock *BB = Root.getParent();
// Now all of the instructions are in the current basic block, go ahead
// and perform the reassociation.
Instruction *TmpLHSI = cast<Instruction>(Root.getOperand(0));
@ -463,12 +463,12 @@ Instruction *AssociativeOpt(BinaryOperator &Root, const Functor &F) {
TmpLHSI = NextLHSI;
ExtraOperand = NextOp;
}
// Now that the instructions are reassociated, have the functor perform
// the transformation...
return F.apply(Root);
}
LHSI = dyn_cast<Instruction>(LHSI->getOperand(0));
}
return 0;
@ -493,7 +493,7 @@ struct AddMaskingAnd {
AddMaskingAnd(Constant *c) : C2(c) {}
bool shouldApply(Value *LHS) const {
ConstantInt *C1;
return match(LHS, m_And(m_Value(), m_ConstantInt(C1))) &&
return match(LHS, m_And(m_Value(), m_ConstantInt(C1))) &&
ConstantExpr::getAnd(C1, C2)->isNullValue();
}
Instruction *apply(BinaryOperator &Add) const {
@ -506,7 +506,7 @@ static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO,
if (isa<CastInst>(I)) {
if (Constant *SOC = dyn_cast<Constant>(SO))
return ConstantExpr::getCast(SOC, I.getType());
return IC->InsertNewInstBefore(new CastInst(SO, I.getType(),
SO->getName() + ".cast"), I);
}
@ -615,7 +615,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
if (!I.getType()->isFloatingPoint() && // -0 + +0 = +0, so it's not a noop
RHSC->isNullValue())
return ReplaceInstUsesWith(I, LHS);
// X + (signbit) --> X ^ signbit
if (ConstantInt *CI = dyn_cast<ConstantInt>(RHSC)) {
unsigned NumBits = CI->getType()->getPrimitiveSize()*8;
@ -654,7 +654,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
if (Value *V = dyn_castNegVal(RHS))
return BinaryOperator::createSub(LHS, V);
ConstantInt *C2;
if (Value *X = dyn_castFoldableMul(LHS, C2)) {
if (X == RHS) // X*C + X --> X * (C+1)
@ -696,7 +696,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
// See if the and mask includes all of these bits.
uint64_t AddRHSHighBitsAnd = AddRHSHighBits & C2->getRawValue();
if (AddRHSHighBits == AddRHSHighBitsAnd) {
// Okay, the xform is safe. Insert the new add pronto.
Value *NewAdd = InsertNewInstBefore(BinaryOperator::createAdd(X, CRHS,
@ -832,7 +832,7 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
Value *IIOp0 = Op1I->getOperand(0), *IIOp1 = Op1I->getOperand(1);
Op1I->setOperand(0, IIOp1);
Op1I->setOperand(1, IIOp0);
// Create the new top level add instruction...
return BinaryOperator::createAdd(Op0, Op1);
}
@ -853,13 +853,13 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(Op0))
if (CSI->isNullValue())
if (Constant *DivRHS = dyn_cast<Constant>(Op1I->getOperand(1)))
return BinaryOperator::createDiv(Op1I->getOperand(0),
return BinaryOperator::createDiv(Op1I->getOperand(0),
ConstantExpr::getNeg(DivRHS));
// X - X*C --> X * (1-C)
ConstantInt *C2;
if (dyn_castFoldableMul(Op1I, C2) == Op0) {
Constant *CP1 =
Constant *CP1 =
ConstantExpr::getSub(ConstantInt::get(I.getType(), 1), C2);
return BinaryOperator::createMul(Op0, CP1);
}
@ -877,7 +877,7 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
if (Op0I->getOperand(0) == Op1) // (X-Y)-X == -Y
return BinaryOperator::createNeg(Op0I->getOperand(1), I.getName());
}
ConstantInt *C1;
if (Value *X = dyn_castFoldableMul(Op0, C1)) {
if (X == Op1) { // X*C - X --> X * (C-1)
@ -929,7 +929,7 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
if (Constant *ShOp = dyn_cast<Constant>(SI->getOperand(1)))
return BinaryOperator::createMul(SI->getOperand(0),
ConstantExpr::getShl(CI, ShOp));
if (CI->isNullValue())
return ReplaceInstUsesWith(I, Op1); // X * 0 == 0
if (CI->equalsInt(1)) // X * 1 == X
@ -1004,7 +1004,7 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
// or truncate to the multiply type.
if (I.getType() != V->getType())
V = InsertNewInstBefore(new CastInst(V, I.getType(), V->getName()),I);
Value *OtherOp = Op0 == BoolCast ? I.getOperand(1) : Op0;
return BinaryOperator::createAnd(V, OtherOp);
}
@ -1069,10 +1069,10 @@ Instruction *InstCombiner::visitDiv(BinaryOperator &I) {
if (ConstantUInt *SFO = dyn_cast<ConstantUInt>(SI->getOperand(2))) {
if (STO->getValue() == 0) { // Couldn't be this argument.
I.setOperand(1, SFO);
return &I;
return &I;
} else if (SFO->getValue() == 0) {
I.setOperand(2, STO);
return &I;
return &I;
}
uint64_t TVA = STO->getValue(), FVA = SFO->getValue();
@ -1083,7 +1083,7 @@ Instruction *InstCombiner::visitDiv(BinaryOperator &I) {
Instruction *TSI = new ShiftInst(Instruction::Shr, Op0,
TC, SI->getName()+".t");
TSI = InsertNewInstBefore(TSI, I);
Constant *FC = ConstantUInt::get(Type::UByteTy, FSA);
Instruction *FSI = new ShiftInst(Instruction::Shr, Op0,
FC, SI->getName()+".f");
@ -1091,7 +1091,7 @@ Instruction *InstCombiner::visitDiv(BinaryOperator &I) {
return new SelectInst(SI->getOperand(0), TSI, FSI);
}
}
// 0 / X == 0, we don't need to preserve faults!
if (ConstantInt *LHS = dyn_cast<ConstantInt>(Op0))
if (LHS->equalsInt(0))
@ -1147,10 +1147,10 @@ Instruction *InstCombiner::visitRem(BinaryOperator &I) {
if (ConstantUInt *SFO = dyn_cast<ConstantUInt>(SI->getOperand(2))) {
if (STO->getValue() == 0) { // Couldn't be this argument.
I.setOperand(1, SFO);
return &I;
return &I;
} else if (SFO->getValue() == 0) {
I.setOperand(1, STO);
return &I;
return &I;
}
if (!(STO->getValue() & (STO->getValue()-1)) &&
@ -1162,7 +1162,7 @@ Instruction *InstCombiner::visitRem(BinaryOperator &I) {
return new SelectInst(SI->getOperand(0), TrueAnd, FalseAnd);
}
}
// 0 % X == 0, we don't need to preserve faults!
if (ConstantInt *LHS = dyn_cast<ConstantInt>(Op0))
if (LHS->equalsInt(0))
@ -1182,7 +1182,7 @@ static bool isMaxValueMinusOne(const ConstantInt *C) {
}
const ConstantSInt *CS = cast<ConstantSInt>(C);
// Calculate 0111111111..11111
unsigned TypeBits = C->getType()->getPrimitiveSize()*8;
int64_t Val = INT64_MAX; // All ones
@ -1196,8 +1196,8 @@ static bool isMinValuePlusOne(const ConstantInt *C) {
return CU->getValue() == 1;
const ConstantSInt *CS = cast<ConstantSInt>(C);
// Calculate 1111111111000000000000
// Calculate 1111111111000000000000
unsigned TypeBits = C->getType()->getPrimitiveSize()*8;
int64_t Val = -1; // All ones
Val <<= TypeBits-1; // Shift over to the right spot
@ -1325,7 +1325,7 @@ static bool MaskedValueIsZero(Value *V, ConstantIntegral *Mask) {
return true;
if (ConstantIntegral *CI = dyn_cast<ConstantIntegral>(V))
return ConstantExpr::getAnd(CI, Mask)->isNullValue();
if (Instruction *I = dyn_cast<Instruction>(V)) {
switch (I->getOpcode()) {
case Instruction::And:
@ -1336,11 +1336,11 @@ static bool MaskedValueIsZero(Value *V, ConstantIntegral *Mask) {
break;
case Instruction::Or:
// If the LHS and the RHS are MaskedValueIsZero, the result is also zero.
return MaskedValueIsZero(I->getOperand(1), Mask) &&
return MaskedValueIsZero(I->getOperand(1), Mask) &&
MaskedValueIsZero(I->getOperand(0), Mask);
case Instruction::Select:
// If the T and F values are MaskedValueIsZero, the result is also zero.
return MaskedValueIsZero(I->getOperand(2), Mask) &&
return MaskedValueIsZero(I->getOperand(2), Mask) &&
MaskedValueIsZero(I->getOperand(1), Mask);
case Instruction::Cast: {
const Type *SrcTy = I->getOperand(0)->getType();
@ -1414,7 +1414,7 @@ Instruction *InstCombiner::OptAndOp(Instruction *Op,
case Instruction::Or:
if (Together == AndRHS) // (X | C) & C --> C
return ReplaceInstUsesWith(TheAnd, AndRHS);
if (Op->hasOneUse() && Together != OpRHS) {
// (X | C1) & C2 --> (X | (C1&C2)) & C2
std::string Op0Name = Op->getName(); Op->setName("");
@ -1439,7 +1439,7 @@ Instruction *InstCombiner::OptAndOp(Instruction *Op,
// ADD down to exactly one bit. If the constant we are adding has
// no bits set below this bit, then we can eliminate the ADD.
uint64_t AddRHS = cast<ConstantInt>(OpRHS)->getRawValue();
// Check to see if any bits below the one bit set in AndRHSV are set.
if ((AddRHS & (AndRHSV-1)) == 0) {
// If not, the only thing that can effect the output of the AND is
@ -1468,7 +1468,7 @@ Instruction *InstCombiner::OptAndOp(Instruction *Op,
Constant *AllOne = ConstantIntegral::getAllOnesValue(AndRHS->getType());
Constant *ShlMask = ConstantExpr::getShl(AllOne, OpRHS);
Constant *CI = ConstantExpr::getAnd(AndRHS, ShlMask);
if (CI == ShlMask) { // Masking out bits that the shift already masks
return ReplaceInstUsesWith(TheAnd, Op); // No need for the and.
} else if (CI != AndRHS) { // Reducing bits set in and.
@ -1476,7 +1476,7 @@ Instruction *InstCombiner::OptAndOp(Instruction *Op,
return &TheAnd;
}
break;
}
}
case Instruction::Shr:
// We know that the AND will not produce any of the bits shifted in, so if
// the anded constant includes them, clear them now! This only applies to
@ -1536,7 +1536,7 @@ Instruction *InstCombiner::InsertRangeTest(Value *V, Constant *Lo, Constant *Hi,
return new SetCondInst(Instruction::SetNE, V, V);
if (cast<ConstantIntegral>(Lo)->isMinValue())
return new SetCondInst(Instruction::SetLT, V, Hi);
Constant *AddCST = ConstantExpr::getNeg(Lo);
Instruction *Add = BinaryOperator::createAdd(V, AddCST,V->getName()+".off");
InsertNewInstBefore(Add, IB);
@ -1589,9 +1589,9 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
// If the mask is not masking out any bits, there is no reason to do the
// and in the first place.
ConstantIntegral *NotAndRHS =
ConstantIntegral *NotAndRHS =
cast<ConstantIntegral>(ConstantExpr::getNot(AndRHS));
if (MaskedValueIsZero(Op0, NotAndRHS))
if (MaskedValueIsZero(Op0, NotAndRHS))
return ReplaceInstUsesWith(I, Op0);
// Optimize a variety of ((val OP C1) & C2) combinations...
@ -1605,9 +1605,9 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
// (X ^ V) & C2 --> (X & C2) iff (V & C2) == 0
// (X | V) & C2 --> (X & C2) iff (V & C2) == 0
if (MaskedValueIsZero(Op0LHS, AndRHS))
return BinaryOperator::createAnd(Op0RHS, AndRHS);
return BinaryOperator::createAnd(Op0RHS, AndRHS);
if (MaskedValueIsZero(Op0RHS, AndRHS))
return BinaryOperator::createAnd(Op0LHS, AndRHS);
return BinaryOperator::createAnd(Op0LHS, AndRHS);
// If the mask is only needed on one incoming arm, push it up.
if (Op0I->hasOneUse()) {
@ -1618,7 +1618,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
InsertNewInstBefore(NewRHS, I);
return BinaryOperator::create(
cast<BinaryOperator>(Op0I)->getOpcode(), Op0LHS, NewRHS);
}
}
if (!isa<Constant>(NotAndRHS) &&
MaskedValueIsZero(Op0RHS, NotAndRHS)) {
// Not masking anything out for the RHS, move to LHS.
@ -1727,7 +1727,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
if (match(RHS, m_SetCond(RHSCC, m_Value(RHSVal), m_ConstantInt(RHSCst))))
if (LHSVal == RHSVal && // Found (X setcc C1) & (X setcc C2)
// Set[GL]E X, CST is folded to Set[GL]T elsewhere.
LHSCC != Instruction::SetGE && LHSCC != Instruction::SetLE &&
LHSCC != Instruction::SetGE && LHSCC != Instruction::SetLE &&
RHSCC != Instruction::SetGE && RHSCC != Instruction::SetLE) {
// Ensure that the larger constant is on the RHS.
Constant *Cmp = ConstantExpr::getSetGT(LHSCst, RHSCst);
@ -1869,7 +1869,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
if (match(Op0, m_Not(m_Value(A)))) { // ~A | Op1
if (A == Op1) // ~A | A == -1
return ReplaceInstUsesWith(I,
return ReplaceInstUsesWith(I,
ConstantIntegral::getAllOnesValue(I.getType()));
} else {
A = 0;
@ -1877,7 +1877,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
if (match(Op1, m_Not(m_Value(B)))) { // Op0 | ~B
if (Op0 == B)
return ReplaceInstUsesWith(I,
return ReplaceInstUsesWith(I,
ConstantIntegral::getAllOnesValue(I.getType()));
// (~A | ~B) == (~(A & B)) - De Morgan's Law
@ -1900,7 +1900,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
if (match(RHS, m_SetCond(RHSCC, m_Value(RHSVal), m_ConstantInt(RHSCst))))
if (LHSVal == RHSVal && // Found (X setcc C1) | (X setcc C2)
// Set[GL]E X, CST is folded to Set[GL]T elsewhere.
LHSCC != Instruction::SetGE && LHSCC != Instruction::SetLE &&
LHSCC != Instruction::SetGE && LHSCC != Instruction::SetLE &&
RHSCC != Instruction::SetGE && RHSCC != Instruction::SetLE) {
// Ensure that the larger constant is on the RHS.
Constant *Cmp = ConstantExpr::getSetGT(LHSCst, RHSCst);
@ -2035,13 +2035,13 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
if (dyn_castNotVal(Op0I->getOperand(1))) Op0I->swapOperands();
if (Value *Op0NotVal = dyn_castNotVal(Op0I->getOperand(0))) {
Instruction *NotY =
BinaryOperator::createNot(Op0I->getOperand(1),
BinaryOperator::createNot(Op0I->getOperand(1),
Op0I->getOperand(1)->getName()+".not");
InsertNewInstBefore(NotY, I);
return BinaryOperator::createOr(Op0NotVal, NotY);
}
}
if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1)))
switch (Op0I->getOpcode()) {
case Instruction::Add:
@ -2096,7 +2096,7 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
} else if (Op1I->getOperand(1) == Op0) { // B^(A|B) == (A|B)^B
I.swapOperands();
std::swap(Op0, Op1);
}
}
} else if (Op1I->getOpcode() == Instruction::Xor) {
if (Op0 == Op1I->getOperand(0)) // A^(A^B) == B
return ReplaceInstUsesWith(I, Op1I->getOperand(1));
@ -2242,13 +2242,13 @@ Instruction *InstCombiner::FoldGEPSetCC(User *GEPLHS, Value *RHS,
EmitIt = false;
else if (TD->getTypeSize(GTI.getIndexedType()) == 0) {
EmitIt = false; // This is indexing into a zero sized array?
} else if (isa<ConstantInt>(C))
} else if (isa<ConstantInt>(C))
return ReplaceInstUsesWith(I, // No comparison is needed here.
ConstantBool::get(Cond == Instruction::SetNE));
}
if (EmitIt) {
Instruction *Comp =
Instruction *Comp =
new SetCondInst(Cond, GEPLHS->getOperand(i),
Constant::getNullValue(GEPLHS->getOperand(i)->getType()));
if (InVal == 0)
@ -2312,7 +2312,7 @@ Instruction *InstCombiner::FoldGEPSetCC(User *GEPLHS, Value *RHS,
unsigned DiffOperand = 0; // The operand that differs.
for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
if (GEPLHS->getOperand(i)->getType()->getPrimitiveSize() !=
if (GEPLHS->getOperand(i)->getType()->getPrimitiveSize() !=
GEPRHS->getOperand(i)->getType()->getPrimitiveSize()) {
// Irreconcilable differences.
NumDifferences = 2;
@ -2364,9 +2364,9 @@ Instruction *InstCombiner::visitSetCondInst(BinaryOperator &I) {
// setcc <global/alloca*/null>, <global/alloca*/null> - Global/Stack value
// addresses never equal each other! We already know that Op0 != Op1.
if ((isa<GlobalValue>(Op0) || isa<AllocaInst>(Op0) ||
isa<ConstantPointerNull>(Op0)) &&
(isa<GlobalValue>(Op1) || isa<AllocaInst>(Op1) ||
if ((isa<GlobalValue>(Op0) || isa<AllocaInst>(Op0) ||
isa<ConstantPointerNull>(Op0)) &&
(isa<GlobalValue>(Op1) || isa<AllocaInst>(Op1) ||
isa<ConstantPointerNull>(Op1)))
return ReplaceInstUsesWith(I, ConstantBool::get(!isTrueWhenEqual(I)));
@ -2466,7 +2466,7 @@ Instruction *InstCombiner::visitSetCondInst(BinaryOperator &I) {
ShAmt = Shift ? dyn_cast<ConstantUInt>(Shift->getOperand(1)) : 0;
ConstantInt *AndCST = cast<ConstantInt>(LHSI->getOperand(1));
const Type *Ty = LHSI->getType();
// We can fold this as long as we can't shift unknown bits
// into the mask. This can only happen with signed shift
// rights, as they sign-extend.
@ -2476,14 +2476,14 @@ Instruction *InstCombiner::visitSetCondInst(BinaryOperator &I) {
if (!CanFold) {
// To test for the bad case of the signed shr, see if any
// of the bits shifted in could be tested after the mask.
Constant *OShAmt = ConstantUInt::get(Type::UByteTy,
Constant *OShAmt = ConstantUInt::get(Type::UByteTy,
Ty->getPrimitiveSize()*8-ShAmt->getValue());
Constant *ShVal =
Constant *ShVal =
ConstantExpr::getShl(ConstantInt::getAllOnesValue(Ty), OShAmt);
if (ConstantExpr::getAnd(ShVal, AndCST)->isNullValue())
CanFold = true;
}
if (CanFold) {
Constant *NewCst;
if (Shift->getOpcode() == Instruction::Shl)
@ -2521,7 +2521,7 @@ Instruction *InstCombiner::visitSetCondInst(BinaryOperator &I) {
// (setcc (cast X to larger), CI)
case Instruction::Cast:
if (Instruction *R =
if (Instruction *R =
visitSetCondInstWithCastAndConstant(I,cast<CastInst>(LHSI),CI))
return R;
break;
@ -2534,7 +2534,7 @@ Instruction *InstCombiner::visitSetCondInst(BinaryOperator &I) {
case Instruction::SetNE: {
// If we are comparing against bits always shifted out, the
// comparison cannot succeed.
Constant *Comp =
Constant *Comp =
ConstantExpr::getShl(ConstantExpr::getShr(CI, ShAmt), ShAmt);
if (Comp != CI) {// Comparing against a bit that we know is zero.
bool IsSetNE = I.getOpcode() == Instruction::SetNE;
@ -2556,7 +2556,7 @@ Instruction *InstCombiner::visitSetCondInst(BinaryOperator &I) {
} else {
Mask = ConstantInt::getAllOnesValue(CI->getType());
}
Instruction *AndI =
BinaryOperator::createAnd(LHSI->getOperand(0),
Mask, LHSI->getName()+".mask");
@ -2577,15 +2577,15 @@ Instruction *InstCombiner::visitSetCondInst(BinaryOperator &I) {
case Instruction::SetNE: {
// If we are comparing against bits always shifted out, the
// comparison cannot succeed.
Constant *Comp =
Constant *Comp =
ConstantExpr::getShr(ConstantExpr::getShl(CI, ShAmt), ShAmt);
if (Comp != CI) {// Comparing against a bit that we know is zero.
bool IsSetNE = I.getOpcode() == Instruction::SetNE;
Constant *Cst = ConstantBool::get(IsSetNE);
return ReplaceInstUsesWith(I, Cst);
}
if (LHSI->hasOneUse() || CI->isNullValue()) {
unsigned ShAmtVal = (unsigned)ShAmt->getValue();
@ -2602,7 +2602,7 @@ Instruction *InstCombiner::visitSetCondInst(BinaryOperator &I) {
} else {
Mask = ConstantSInt::get(CI->getType(), Val);
}
Instruction *AndI =
BinaryOperator::createAnd(LHSI->getOperand(0),
Mask, LHSI->getName()+".mask");
@ -2727,12 +2727,12 @@ Instruction *InstCombiner::visitSetCondInst(BinaryOperator &I) {
I.getName()), I);
}
}
if (Op1)
return new SelectInst(LHSI->getOperand(0), Op1, Op2);
break;
}
// Simplify seteq and setne instructions...
if (I.getOpcode() == Instruction::SetEQ ||
I.getOpcode() == Instruction::SetNE) {
@ -2758,7 +2758,7 @@ Instruction *InstCombiner::visitSetCondInst(BinaryOperator &I) {
return BinaryOperator::create(I.getOpcode(), NewRem,
Constant::getNullValue(UTy));
}
break;
break;
case Instruction::Add:
// Replace ((add A, B) != C) with (A != C-B) if B & C are constants.
@ -2770,7 +2770,7 @@ Instruction *InstCombiner::visitSetCondInst(BinaryOperator &I) {
// Replace ((add A, B) != 0) with (A != -B) if A or B is
// efficiently invertible, or if the add has just this one use.
Value *BOp0 = BO->getOperand(0), *BOp1 = BO->getOperand(1);
if (Value *NegVal = dyn_castNegVal(BOp1))
return new SetCondInst(I.getOpcode(), BOp0, NegVal);
else if (Value *NegVal = dyn_castNegVal(BOp0))
@ -2835,7 +2835,7 @@ Instruction *InstCombiner::visitSetCondInst(BinaryOperator &I) {
Instruction::SetGE, X,
Constant::getNullValue(X->getType()));
}
// ((X & ~7) == 0) --> X < 8
if (CI->isNullValue() && isHighOnes(BOC)) {
Value *X = BO->getOperand(0);
@ -2857,14 +2857,14 @@ Instruction *InstCombiner::visitSetCondInst(BinaryOperator &I) {
}
}
} else { // Not a SetEQ/SetNE
// If the LHS is a cast from an integral value of the same size,
// If the LHS is a cast from an integral value of the same size,
if (CastInst *Cast = dyn_cast<CastInst>(Op0)) {
Value *CastOp = Cast->getOperand(0);
const Type *SrcTy = CastOp->getType();
unsigned SrcTySize = SrcTy->getPrimitiveSize();
if (SrcTy != Cast->getType() && SrcTy->isInteger() &&
SrcTySize == Cast->getType()->getPrimitiveSize()) {
assert((SrcTy->isSigned() ^ Cast->getType()->isSigned()) &&
assert((SrcTy->isSigned() ^ Cast->getType()->isSigned()) &&
"Source and destination signednesses should differ!");
if (Cast->getType()->isSigned()) {
// If this is a signed comparison, check for comparisons in the
@ -2916,14 +2916,14 @@ Instruction *InstCombiner::visitSetCondInst(BinaryOperator &I) {
// We keep moving the cast from the left operand over to the right
// operand, where it can often be eliminated completely.
Op0 = CastOp0;
// If operand #1 is a cast instruction, see if we can eliminate it as
// well.
if (CastInst *CI2 = dyn_cast<CastInst>(Op1))
if (CI2->getOperand(0)->getType()->isLosslesslyConvertibleTo(
Op0->getType()))
Op1 = CI2->getOperand(0);
// If Op1 is a constant, we can fold the cast into the constant.
if (Op1->getType() != Op0->getType())
if (Constant *Op1C = dyn_cast<Constant>(Op1)) {
@ -2978,7 +2978,7 @@ Instruction *InstCombiner::visitSetCondInst(BinaryOperator &I) {
return ReplaceInstUsesWith(I, ConstantBool::False);
}
}
// Otherwise, we can replace the setcc with a setcc of the smaller
// operand value.
Op1 = ConstantExpr::getCast(cast<Constant>(Op1), SrcTy);
@ -2989,10 +2989,10 @@ Instruction *InstCombiner::visitSetCondInst(BinaryOperator &I) {
return Changed ? &I : 0;
}
// visitSetCondInstWithCastAndConstant - this method is part of the
// visitSetCondInstWithCastAndConstant - this method is part of the
// visitSetCondInst method. It handles the situation where we have:
// (setcc (cast X to larger), CI)
// It tries to remove the cast and even the setcc if the CI value
// It tries to remove the cast and even the setcc if the CI value
// and range of the cast allow it.
Instruction *
InstCombiner::visitSetCondInstWithCastAndConstant(BinaryOperator&I,
@ -3005,9 +3005,9 @@ InstCombiner::visitSetCondInstWithCastAndConstant(BinaryOperator&I,
unsigned SrcBits = SrcTy->getPrimitiveSize()*8;
unsigned DestBits = DestTy->getPrimitiveSize()*8;
if (SrcTy == Type::BoolTy)
if (SrcTy == Type::BoolTy)
SrcBits = 1;
if (DestTy == Type::BoolTy)
if (DestTy == Type::BoolTy)
DestBits = 1;
if (SrcBits < DestBits) {
// There are fewer bits in the source of the cast than in the result
@ -3015,25 +3015,25 @@ InstCombiner::visitSetCondInstWithCastAndConstant(BinaryOperator&I,
// value won't have changed due to sign extension.
Constant *NewCst = ConstantExpr::getCast(CI, SrcTy);
if (ConstantExpr::getCast(NewCst, DestTy) == CI) {
// The constant value operand of the setCC before and after a
// cast to the source type of the cast instruction is the same
// value, so we just replace with the same setcc opcode, but
// using the source value compared to the constant casted to the
// source type.
// The constant value operand of the setCC before and after a
// cast to the source type of the cast instruction is the same
// value, so we just replace with the same setcc opcode, but
// using the source value compared to the constant casted to the
// source type.
if (SrcTy->isSigned() && DestTy->isUnsigned()) {
CastInst* Cst = new CastInst(LHSI->getOperand(0),
SrcTy->getUnsignedVersion(),
LHSI->getName());
InsertNewInstBefore(Cst,I);
return new SetCondInst(I.getOpcode(), Cst,
return new SetCondInst(I.getOpcode(), Cst,
ConstantExpr::getCast(CI,
SrcTy->getUnsignedVersion()));
}
return new SetCondInst(I.getOpcode(), LHSI->getOperand(0),NewCst);
}
// The constant value before and after a cast to the source type
// is different, so various cases are possible depending on the
// The constant value before and after a cast to the source type
// is different, so various cases are possible depending on the
// opcode and the signs of the types involved in the cast.
switch (I.getOpcode()) {
case Instruction::SetLT: {
@ -3052,14 +3052,14 @@ InstCombiner::visitSetCondInstWithCastAndConstant(BinaryOperator&I,
// We're looking for equality, and we know the values are not
// equal so replace with constant False.
return ReplaceInstUsesWith(I, ConstantBool::False);
case Instruction::SetNE:
case Instruction::SetNE:
// We're testing for inequality, and we know the values are not
// equal so replace with constant True.
return ReplaceInstUsesWith(I, ConstantBool::True);
case Instruction::SetLE:
case Instruction::SetGE:
case Instruction::SetLE:
case Instruction::SetGE:
assert(0 && "SetLE and SetGE should be handled elsewhere");
default:
default:
assert(0 && "unknown integer comparison");
}
}
@ -3123,7 +3123,7 @@ Instruction *InstCombiner::visitShiftInst(ShiftInst &I) {
if (Constant *BOOp = dyn_cast<Constant>(BO->getOperand(1)))
return BinaryOperator::createMul(BO->getOperand(0),
ConstantExpr::getShl(BOOp, CUI));
// Try to fold constant and into select arguments.
if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
if (Instruction *R = FoldOpIntoSelect(I, SI, this))
@ -3219,7 +3219,7 @@ Instruction *InstCombiner::visitShiftInst(ShiftInst &I) {
dyn_cast<ConstantUInt>(Op0SI->getOperand(1))) {
unsigned ShiftAmt1 = (unsigned)ShiftAmt1C->getValue();
unsigned ShiftAmt2 = (unsigned)CUI->getValue();
// Check for (A << c1) << c2 and (A >> c1) >> c2
if (I.getOpcode() == Op0SI->getOpcode()) {
unsigned Amt = ShiftAmt1+ShiftAmt2; // Fold into one big shift...
@ -3228,7 +3228,7 @@ Instruction *InstCombiner::visitShiftInst(ShiftInst &I) {
return new ShiftInst(I.getOpcode(), Op0SI->getOperand(0),
ConstantUInt::get(Type::UByteTy, Amt));
}
// Check for (A << c1) >> c2 or visaversa. If we are dealing with
// signed types, we can only support the (A >> c1) << c2 configuration,
// because it can not turn an arbitrary bit of A into a sign bit.
@ -3239,12 +3239,12 @@ Instruction *InstCombiner::visitShiftInst(ShiftInst &I) {
C = ConstantExpr::getShl(C, ShiftAmt1C);
else
C = ConstantExpr::getShr(C, ShiftAmt1C);
Instruction *Mask =
BinaryOperator::createAnd(Op0SI->getOperand(0), C,
Op0SI->getOperand(0)->getName()+".mask");
InsertNewInstBefore(Mask, I);
// Figure out what flavor of shift we should use...
if (ShiftAmt1 == ShiftAmt2)
return ReplaceInstUsesWith(I, Mask); // (A << c) >> c === A & c2
@ -3293,7 +3293,7 @@ static inline bool isEliminableCastOfCast(const Type *SrcTy, const Type *MidTy,
const Type *DstTy, TargetData *TD) {
// It is legal to eliminate the instruction if casting A->B->A if the sizes
// are identical and the bits don't get reinterpreted (for example
// are identical and the bits don't get reinterpreted (for example
// int->float->int would not be allowed).
if (SrcTy == DstTy && SrcTy->isLosslesslyConvertibleTo(MidTy))
return true;
@ -3341,7 +3341,7 @@ static inline bool isEliminableCastOfCast(const Type *SrcTy, const Type *MidTy,
CastType ResultCast = getCastType(SrcTy, DstTy);
if (ResultCast == Noop || ResultCast == Truncate)
return true;
// Otherwise we are still growing the value, we are only safe if the
// Otherwise we are still growing the value, we are only safe if the
// result will match the sign/zeroextendness of the result.
return ResultCast == FirstCast;
}
@ -3402,7 +3402,7 @@ Instruction *InstCombiner::visitCastInst(CastInst &CI) {
// If this is an A->B->A cast, and we are dealing with integral types, try
// to convert this into a logical 'and' instruction.
//
if (A->getType()->isInteger() &&
if (A->getType()->isInteger() &&
CI.getType()->isInteger() && CSrc->getType()->isInteger() &&
CSrc->getType()->isUnsigned() && // B->A cast must zero extend
CSrc->getType()->getPrimitiveSize() < CI.getType()->getPrimitiveSize()&&
@ -3422,7 +3422,7 @@ Instruction *InstCombiner::visitCastInst(CastInst &CI) {
return And;
}
}
// If this is a cast to bool, turn it into the appropriate setne instruction.
if (CI.getType() == Type::BoolTy)
return BinaryOperator::createSetNE(CI.getOperand(0),
@ -3460,7 +3460,7 @@ Instruction *InstCombiner::visitCastInst(CastInst &CI) {
// If the allocation is for an even multiple of the cast type size
if (CastElTySize && (AllocElTySize % CastElTySize == 0)) {
Value *Amt = ConstantUInt::get(Type::UIntTy,
Value *Amt = ConstantUInt::get(Type::UIntTy,
AllocElTySize/CastElTySize);
std::string Name = AI->getName(); AI->setName("");
AllocationInst *New;
@ -3527,7 +3527,7 @@ Instruction *InstCombiner::visitCastInst(CastInst &CI) {
break;
}
}
return 0;
}
@ -3553,7 +3553,7 @@ static unsigned GetSelectFoldableOperands(Instruction *I) {
case Instruction::Sub: // Can only fold on the amount subtracted.
case Instruction::Shl: // Can only fold on the shift amount.
case Instruction::Shr:
return 1;
return 1;
default:
return 0; // Cannot fold
}
@ -3592,7 +3592,7 @@ Instruction *InstCombiner::FoldSelectOpOp(SelectInst &SI, Instruction *TI,
} else {
return 0; // unknown unary op.
}
// Fold this by inserting a select from the input values.
SelectInst *NewSI = new SelectInst(SI.getCondition(), TI->getOperand(0),
FI->getOperand(0), SI.getName()+".v");
@ -3732,9 +3732,9 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
cast<Constant>(IC->getOperand(1))->isNullValue())
if (Instruction *ICA = dyn_cast<Instruction>(IC->getOperand(0)))
if (ICA->getOpcode() == Instruction::And &&
isa<ConstantInt>(ICA->getOperand(1)) &&
(ICA->getOperand(1) == TrueValC ||
ICA->getOperand(1) == FalseValC) &&
isa<ConstantInt>(ICA->getOperand(1)) &&
(ICA->getOperand(1) == TrueValC ||
ICA->getOperand(1) == FalseValC) &&
isOneBitSet(cast<ConstantInt>(ICA->getOperand(1)))) {
// Okay, now we know that everything is set up, we just don't
// know whether we have a setne or seteq and whether the true or
@ -3770,7 +3770,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
// NOTE: if we wanted to, this is where to detect MIN/MAX/ABS/etc.
}
}
if (Instruction *TI = dyn_cast<Instruction>(TrueVal))
if (Instruction *FI = dyn_cast<Instruction>(FalseVal))
if (TI->hasOneUse() && FI->hasOneUse()) {
@ -3821,14 +3821,14 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
std::swap(NewTrueOp, NewFalseOp);
Instruction *NewSel =
new SelectInst(CondVal, NewTrueOp,NewFalseOp,SI.getName()+".p");
NewSel = InsertNewInstBefore(NewSel, SI);
return BinaryOperator::createAdd(SubOp->getOperand(0), NewSel);
}
}
}
}
// See if we can fold the select into one of our operands.
if (SI.getType()->isInteger()) {
// See the comment above GetSelectFoldableOperands for a description of the
@ -3906,7 +3906,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
if (NumBytes->isNullValue()) return EraseInstFromFunction(CI);
// FIXME: Increase alignment here.
if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
if (CI->getRawValue() == 1) {
// Replace the instruction with just byte operations. We would
@ -3998,7 +3998,7 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
}
}
}
return Changed ? CS.getInstruction() : 0;
}
@ -4043,12 +4043,12 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin());
unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
CallSite::arg_iterator AI = CS.arg_begin();
for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
const Type *ParamTy = FT->getParamType(i);
bool isConvertible = (*AI)->getType()->isLosslesslyConvertibleTo(ParamTy);
if (Callee->isExternal() && !isConvertible) return false;
if (Callee->isExternal() && !isConvertible) return false;
}
if (FT->getNumParams() < NumActualArgs && !FT->isVarArg() &&
@ -4200,7 +4200,7 @@ Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) {
InsertNewInstBefore(NewPN, PN);
PhiVal = NewPN;
}
// Insert and return the new operation.
if (isa<CastInst>(FirstInst))
return new CastInst(PhiVal, PN.getType());
@ -4223,7 +4223,7 @@ static bool DeadPHICycle(PHINode *PN, std::set<PHINode*> &PotentiallyDeadPHIs) {
if (PHINode *PU = dyn_cast<PHINode>(PN->use_back()))
return DeadPHICycle(PU, PotentiallyDeadPHIs);
return false;
}
@ -4295,7 +4295,7 @@ Instruction *InstCombiner::visitPHINode(PHINode &PN) {
if (DeadPHICycle(PU, PotentiallyDeadPHIs))
return ReplaceInstUsesWith(PN, UndefValue::get(PN.getType()));
}
return 0;
}
@ -4353,7 +4353,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// We can always eliminate a cast from int to [u]long. We can
// eliminate a cast from uint to [u]long iff the target is a 32-bit
// pointer target.
if (SrcTy->isSigned() ||
if (SrcTy->isSigned() ||
SrcTy->getPrimitiveSize() >= TD->getPointerSize()) {
MadeChange = true;
GEP.setOperand(i, Src);
@ -4412,7 +4412,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
for (gep_type_iterator I = gep_type_begin(*cast<User>(PtrOp)),
E = gep_type_end(*cast<User>(PtrOp)); I != E; ++I)
EndsWithSequential = !isa<StructType>(*I);
// Can we combine the two pointer arithmetics offsets?
if (EndsWithSequential) {
// Replace: gep (gep %P, long B), long A, ...
@ -4466,9 +4466,9 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
Indices.push_back(Sum);
Indices.insert(Indices.end(), GEP.op_begin()+2, GEP.op_end());
}
} else if (isa<Constant>(*GEP.idx_begin()) &&
} else if (isa<Constant>(*GEP.idx_begin()) &&
cast<Constant>(*GEP.idx_begin())->isNullValue() &&
SrcGEPOperands.size() != 1) {
SrcGEPOperands.size() != 1) {
// Otherwise we can do the fold if the first index of the GEP is a zero
Indices.insert(Indices.end(), SrcGEPOperands.begin()+1,
SrcGEPOperands.end());
@ -4526,7 +4526,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
const Type *SrcElTy = cast<PointerType>(X->getType())->getElementType();
const Type *ResElTy =cast<PointerType>(CE->getType())->getElementType();
if (isa<ArrayType>(SrcElTy) &&
TD->getTypeSize(cast<ArrayType>(SrcElTy)->getElementType()) ==
TD->getTypeSize(cast<ArrayType>(SrcElTy)->getElementType()) ==
TD->getTypeSize(ResElTy)) {
Value *V = InsertNewInstBefore(
new GetElementPtrInst(X, Constant::getNullValue(Type::IntTy),
@ -4556,7 +4556,7 @@ Instruction *InstCombiner::visitAllocationInst(AllocationInst &AI) {
}
InsertNewInstBefore(New, AI);
// Scan to the end of the allocation instructions, to skip over a block of
// allocas if possible...
//
@ -4579,7 +4579,7 @@ Instruction *InstCombiner::visitAllocationInst(AllocationInst &AI) {
// If alloca'ing a zero byte object, replace the alloca with a null pointer.
// Note that we only do this for alloca's, because malloc should allocate and
// return a unique pointer, even for a zero byte allocation.
if (isa<AllocaInst>(AI) && AI.getAllocatedType()->isSized() &&
if (isa<AllocaInst>(AI) && AI.getAllocatedType()->isSized() &&
TD->getTypeSize(AI.getAllocatedType()) == 0)
return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
@ -4682,9 +4682,9 @@ static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI) {
// Do not allow turning this into a load of an integer, which is then
// casted to a pointer, this pessimizes pointer analysis a lot.
(isa<PointerType>(SrcPTy) == isa<PointerType>(LI.getType())) &&
IC.getTargetData().getTypeSize(SrcPTy) ==
IC.getTargetData().getTypeSize(SrcPTy) ==
IC.getTargetData().getTypeSize(DestPTy)) {
// Okay, we are casting from one integer or pointer type to another of
// the same size. Instead of casting the pointer before the load, cast
// the result of the loaded value.
@ -4721,7 +4721,7 @@ static bool isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom) {
if (LI->getOperand(0) == V) return true;
} else if (StoreInst *SI = dyn_cast<StoreInst>(BBI))
if (SI->getOperand(1) == V) return true;
}
return false;
}
@ -4743,7 +4743,7 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op))
if (GV->isConstant() && !GV->isExternal())
return ReplaceInstUsesWith(LI, GV->getInitializer());
// Instcombine load (constantexpr_GEP global, 0, ...) into the value loaded.
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op))
if (CE->getOpcode() == Instruction::GetElementPtr) {
@ -4867,7 +4867,7 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
}
if ((SrcPTy->isInteger() || isa<PointerType>(SrcPTy)) &&
IC.getTargetData().getTypeSize(SrcPTy) ==
IC.getTargetData().getTypeSize(SrcPTy) ==
IC.getTargetData().getTypeSize(DestPTy)) {
// Okay, we are casting from one integer or pointer type to another of
@ -4967,7 +4967,7 @@ Instruction *InstCombiner::visitBranchInst(BranchInst &BI) {
WorkList.push_back(cast<Instruction>(NewSCC));
return &BI;
}
return 0;
}
@ -5004,7 +5004,7 @@ static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) {
// Cannot move control-flow-involving instructions.
if (isa<PHINode>(I) || isa<InvokeInst>(I) || isa<CallInst>(I)) return false;
// Do not sink alloca instructions out of the entry block.
if (isa<AllocaInst>(I) && I->getParent() == &DestBlock->getParent()->front())
return false;
@ -5024,7 +5024,7 @@ static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) {
while (isa<PHINode>(InsertPos)) ++InsertPos;
BasicBlock *SrcBlock = I->getParent();
DestBlock->getInstList().splice(InsertPos, SrcBlock->getInstList(), I);
DestBlock->getInstList().splice(InsertPos, SrcBlock->getInstList(), I);
++NumSunkInst;
return true;
}
@ -5165,7 +5165,7 @@ bool InstCombiner::runOnFunction(Function &F) {
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i)
if (Instruction *OpI = dyn_cast<Instruction>(I->getOperand(i)))
WorkList.push_back(OpI);
// Instructions may end up in the worklist more than once. Erase all
// occurrances of this instruction.
removeFromWorkList(I);

View File

@ -1,10 +1,10 @@
//===-- LICM.cpp - Loop Invariant Code Motion Pass ------------------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This pass performs loop invariant code motion, attempting to remove as much
@ -89,7 +89,7 @@ namespace {
Loop *CurLoop; // The current loop we are working on...
AliasSetTracker *CurAST; // AliasSet information for the current loop...
/// visitLoop - Hoist expressions out of the specified loop...
/// visitLoop - Hoist expressions out of the specified loop...
///
void visitLoop(Loop *L, AliasSetTracker &AST);
@ -131,21 +131,21 @@ namespace {
BasicBlock *LoopHeader = CurLoop->getHeader();
if (BlockInLoop == LoopHeader)
return true;
DominatorTree::Node *BlockInLoopNode = DT->getNode(BlockInLoop);
DominatorTree::Node *IDom = DT->getNode(ExitBlock);
// Because the exit block is not in the loop, we know we have to get _at
// least_ its immediate dominator.
do {
// Get next Immediate Dominator.
IDom = IDom->getIDom();
// If we have got to the header of the loop, then the instructions block
// did not dominate the exit node, so we can't hoist it.
if (IDom->getBlock() == LoopHeader)
return false;
} while (IDom != BlockInLoopNode);
return true;
@ -170,7 +170,7 @@ namespace {
/// pointerInvalidatedByLoop - Return true if the body of this loop may
/// store into the memory location pointed to by V.
///
///
bool pointerInvalidatedByLoop(Value *V, unsigned Size) {
// Check to see if any of the basic blocks in CurLoop invalidate *V.
return CurAST->getAliasSetForPointer(V, Size).isMod();
@ -222,7 +222,7 @@ bool LICM::runOnFunction(Function &) {
}
/// visitLoop - Hoist expressions out of the specified loop...
/// visitLoop - Hoist expressions out of the specified loop...
///
void LICM::visitLoop(Loop *L, AliasSetTracker &AST) {
// Recurse through all subloops before we process this loop...
@ -296,7 +296,7 @@ void LICM::SinkRegion(DominatorTree::Node *N) {
for (BasicBlock::iterator II = BB->end(); II != BB->begin(); ) {
Instruction &I = *--II;
// Check to see if we can sink this instruction to the exit blocks
// of the loop. We can do this if the all users of the instruction are
// outside of the loop. In this case, it doesn't even matter if the
@ -327,12 +327,12 @@ void LICM::HoistRegion(DominatorTree::Node *N) {
if (!inSubLoop(BB))
for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E; ) {
Instruction &I = *II++;
// Try hoisting the instruction out to the preheader. We can only do this
// if all of the operands of the instruction are loop invariant and if it
// is safe to hoist the instruction.
//
if (isLoopInvariantInst(I) && canSinkOrHoistInst(I) &&
if (isLoopInvariantInst(I) && canSinkOrHoistInst(I) &&
isSafeToExecuteUnconditionally(I))
hoist(I);
}
@ -380,11 +380,11 @@ bool LICM::canSinkOrHoistInst(Instruction &I) {
// FIXME: This should use mod/ref information to see if we can hoist or sink
// the call.
return false;
}
return isa<BinaryOperator>(I) || isa<ShiftInst>(I) || isa<CastInst>(I) ||
return isa<BinaryOperator>(I) || isa<ShiftInst>(I) || isa<CastInst>(I) ||
isa<SelectInst>(I) ||
isa<GetElementPtrInst>(I) || isa<VANextInst>(I) || isa<VAArgInst>(I);
}
@ -452,7 +452,7 @@ void LICM::sink(Instruction &I) {
// Move the instruction to the start of the exit block, after any PHI
// nodes in it.
I.getParent()->getInstList().remove(&I);
BasicBlock::iterator InsertPt = ExitBlocks[0]->begin();
while (isa<PHINode>(InsertPt)) ++InsertPt;
ExitBlocks[0]->getInstList().insert(InsertPt, &I);
@ -472,7 +472,7 @@ void LICM::sink(Instruction &I) {
if (I.getType() != Type::VoidTy)
AI = new AllocaInst(I.getType(), 0, I.getName(),
I.getParent()->getParent()->front().begin());
// Secondly, insert load instructions for each use of the instruction
// outside of the loop.
while (!I.use_empty()) {
@ -519,7 +519,7 @@ void LICM::sink(Instruction &I) {
// Insert the code after the last PHI node...
BasicBlock::iterator InsertPt = ExitBlock->begin();
while (isa<PHINode>(InsertPt)) ++InsertPt;
// If this is the first exit block processed, just move the original
// instruction, otherwise clone the original instruction and insert
// the copy.
@ -535,7 +535,7 @@ void LICM::sink(Instruction &I) {
New->setName(I.getName()+".le");
ExitBlock->getInstList().insert(InsertPt, New);
}
// Now that we have inserted the instruction, store it into the alloca
if (AI) new StoreInst(New, AI, InsertPt);
}
@ -547,7 +547,7 @@ void LICM::sink(Instruction &I) {
CurAST->deleteValue(&I);
I.getParent()->getInstList().erase(&I);
}
// Finally, promote the fine value to SSA form.
if (AI) {
std::vector<AllocaInst*> Allocas;
@ -561,7 +561,7 @@ void LICM::sink(Instruction &I) {
/// that is safe to hoist, this instruction is called to do the dirty work.
///
void LICM::hoist(Instruction &I) {
DEBUG(std::cerr << "LICM hoisting to " << Preheader->getName()
DEBUG(std::cerr << "LICM hoisting to " << Preheader->getName()
<< ": " << I);
// Remove the instruction from its current basic block... but don't delete the
@ -570,7 +570,7 @@ void LICM::hoist(Instruction &I) {
// Insert the new node in Preheader, before the terminator.
Preheader->getInstList().insert(Preheader->getTerminator(), &I);
if (isa<LoadInst>(I)) ++NumMovedLoads;
else if (isa<CallInst>(I)) ++NumMovedCalls;
++NumHoisted;
@ -584,7 +584,7 @@ void LICM::hoist(Instruction &I) {
bool LICM::isSafeToExecuteUnconditionally(Instruction &Inst) {
// If it is not a trapping instruction, it is always safe to hoist.
if (!Inst.isTrapping()) return true;
// Otherwise we have to check to make sure that the instruction dominates all
// of the exit blocks. If it doesn't, then there is a path out of the loop
// which does not execute this instruction, so we can't hoist it.
@ -610,7 +610,7 @@ bool LICM::isSafeToExecuteUnconditionally(Instruction &Inst) {
for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i)
if (!isExitBlockDominatedByBlockInLoop(ExitBlocks[i], Inst.getParent()))
return false;
return true;
}
@ -672,7 +672,7 @@ void LICM::PromoteValuesInLoop() {
// Store into the temporary alloca.
new StoreInst(LI, PromotedValues[i].first, LoopPredInst);
}
// Scan the basic blocks in the loop, replacing uses of our pointers with
// uses of the allocas in question.
//
@ -777,10 +777,10 @@ void LICM::FindPromotableValuesInLoop(
// Update the AST and alias analysis.
CurAST->copyValue(V, AI);
for (AliasSet::iterator I = AS.begin(), E = AS.end(); I != E; ++I)
ValueToAllocaMap.insert(std::make_pair(I->first, AI));
DEBUG(std::cerr << "LICM: Promoting value: " << *V << "\n");
}
}

View File

@ -1,10 +1,10 @@
//===- LoopSimplify.cpp - Loop Canonicalization Pass ----------------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This pass performs several transformations to transform natural loops into a
@ -60,7 +60,7 @@ namespace {
AliasAnalysis *AA;
virtual bool runOnFunction(Function &F);
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
// We need loop information to identify the loops...
AU.addRequired<LoopInfo>();
@ -204,13 +204,13 @@ bool LoopSimplify::ProcessLoop(Loop *L) {
BasicBlock *LoopSimplify::SplitBlockPredecessors(BasicBlock *BB,
const char *Suffix,
const std::vector<BasicBlock*> &Preds) {
// Create new basic block, insert right before the original block...
BasicBlock *NewBB = new BasicBlock(BB->getName()+Suffix, BB->getParent(), BB);
// The preheader first gets an unconditional branch to the loop header...
BranchInst *BI = new BranchInst(BB, NewBB);
// For every PHI node in the block, insert a PHI node into NewBB where the
// incoming values from the out of loop edges are moved to NewBB. We have two
// possible cases here. If the loop is dead, we just insert dummy entries
@ -232,13 +232,13 @@ BasicBlock *LoopSimplify::SplitBlockPredecessors(BasicBlock *BB,
InVal = 0;
break;
}
// If the values coming into the block are not the same, we need a PHI.
if (InVal == 0) {
// Create the new PHI node, insert it into NewBB at the end of the block
PHINode *NewPHI = new PHINode(PN->getType(), PN->getName()+".ph", BI);
if (AA) AA->copyValue(PN, NewPHI);
// Move all of the edges from blocks outside the loop to the new PHI
for (unsigned i = 0, e = Preds.size(); i != e; ++i) {
Value *V = PN->removeIncomingValue(Preds[i], false);
@ -266,7 +266,7 @@ BasicBlock *LoopSimplify::SplitBlockPredecessors(BasicBlock *BB,
}
}
}
// Now that the PHI nodes are updated, actually move the edges from
// Preds to point to NewBB instead of BB.
//
@ -276,14 +276,14 @@ BasicBlock *LoopSimplify::SplitBlockPredecessors(BasicBlock *BB,
if (TI->getSuccessor(s) == BB)
TI->setSuccessor(s, NewBB);
}
} else { // Otherwise the loop is dead...
for (BasicBlock::iterator I = BB->begin(); isa<PHINode>(I); ++I) {
PHINode *PN = cast<PHINode>(I);
// Insert dummy values as the incoming value...
PN->addIncoming(Constant::getNullValue(PN->getType()), NewBB);
}
}
}
return NewBB;
}
@ -300,15 +300,15 @@ void LoopSimplify::InsertPreheaderForLoop(Loop *L) {
PI != PE; ++PI)
if (!L->contains(*PI)) // Coming in from outside the loop?
OutsideBlocks.push_back(*PI); // Keep track of it...
// Split out the loop pre-header
BasicBlock *NewBB =
SplitBlockPredecessors(Header, ".preheader", OutsideBlocks);
//===--------------------------------------------------------------------===//
// Update analysis results now that we have performed the transformation
//
// We know that we have loop information to update... update it now.
if (Loop *Parent = L->getParentLoop())
Parent->addBasicBlockToLoop(NewBB, getAnalysis<LoopInfo>());
@ -330,7 +330,7 @@ void LoopSimplify::InsertPreheaderForLoop(Loop *L) {
DominatorSet &DS = getAnalysis<DominatorSet>(); // Update dominator info
DominatorTree &DT = getAnalysis<DominatorTree>();
// Update the dominator tree information.
// The immediate dominator of the preheader is the immediate dominator of
@ -353,16 +353,16 @@ void LoopSimplify::InsertPreheaderForLoop(Loop *L) {
E = df_end(PHDomTreeNode); DFI != E; ++DFI)
DS.addDominator((*DFI)->getBlock(), NewBB);
}
// Update immediate dominator information if we have it...
if (ImmediateDominators *ID = getAnalysisToUpdate<ImmediateDominators>()) {
// Whatever i-dominated the header node now immediately dominates NewBB
ID->addNewBlock(NewBB, ID->get(Header));
// The preheader now is the immediate dominator for the header node...
ID->setImmediateDominator(Header, NewBB);
}
// Update dominance frontier information...
if (DominanceFrontier *DF = getAnalysisToUpdate<DominanceFrontier>()) {
// The DF(NewBB) is just (DF(Header)-Header), because NewBB dominates
@ -405,7 +405,7 @@ void LoopSimplify::InsertPreheaderForLoop(Loop *L) {
/// outside of the loop.
BasicBlock *LoopSimplify::RewriteLoopExitBlock(Loop *L, BasicBlock *Exit) {
DominatorSet &DS = getAnalysis<DominatorSet>();
std::vector<BasicBlock*> LoopBlocks;
for (pred_iterator I = pred_begin(Exit), E = pred_end(Exit); I != E; ++I)
if (L->contains(*I))
@ -579,7 +579,7 @@ void LoopSimplify::InsertUniqueBackedgeBlock(Loop *L) {
// Move the new backedge block to right after the last backedge block.
Function::iterator InsertPos = BackedgeBlocks.back(); ++InsertPos;
F->getBasicBlockList().splice(InsertPos, F->getBasicBlockList(), BEBlock);
// Now that the block has been inserted into the function, create PHI nodes in
// the backedge block which correspond to any PHI nodes in the header block.
for (BasicBlock::iterator I = Header->begin(); isa<PHINode>(I); ++I) {
@ -609,7 +609,7 @@ void LoopSimplify::InsertUniqueBackedgeBlock(Loop *L) {
}
}
}
// Delete all of the incoming values from the old PN except the preheader's
assert(PreheaderIdx != ~0U && "PHI has no preheader entry??");
if (PreheaderIdx != 0) {
@ -825,7 +825,7 @@ void LoopSimplify::UpdateDomInfoForRevectoredPreds(BasicBlock *NewBB,
for (DominatorSet::DomSetType::const_iterator PDI = PredDoms.begin(),
PDE = PredDoms.end(); PDI != PDE; ++PDI) {
BasicBlock *PredDom = *PDI;
// If the NewBBSucc node is in DF(PredDom), then PredDom didn't
// dominate NewBBSucc but did dominate a predecessor of it. Now we
// change this entry to include NewBB in the DF instead of NewBBSucc.
@ -846,7 +846,7 @@ void LoopSimplify::UpdateDomInfoForRevectoredPreds(BasicBlock *NewBB,
break;
}
}
if (ShouldRemove)
DF->removeFromFrontier(DFI, NewBBSucc);
DF->addToFrontier(DFI, NewBB);

View File

@ -1,10 +1,10 @@
//===- LoopStrengthReduce.cpp - Strength Reduce GEPs in Loops -------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by Nate Begeman and is distributed under the
// University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This pass performs a strength reduction on array references inside loops that
@ -85,7 +85,7 @@ namespace {
std::set<Instruction*> &DeadInsts);
void DeleteTriviallyDeadInstructions(std::set<Instruction*> &Insts);
};
RegisterOpt<LoopStrengthReduce> X("loop-reduce",
RegisterOpt<LoopStrengthReduce> X("loop-reduce",
"Strength Reduce GEP Uses of Ind. Vars");
}
@ -170,7 +170,7 @@ void LoopStrengthReduce::strengthReduceGEP(GetElementPtrInst *GEPI, Loop *L,
Cache = Cache->get(operand);
}
assert(indvar > 0 && "Indvar used by GEP not found in operand list");
// Ensure the pointer base is loop invariant. While strength reduction
// makes sense even if the pointer changed on every iteration, there is no
// realistic way of handling it unless GEPs were completely decomposed into
@ -185,9 +185,9 @@ void LoopStrengthReduce::strengthReduceGEP(GetElementPtrInst *GEPI, Loop *L,
if (sz && (sz & (sz-1)) == 0) // Power of two?
if (sz <= (1ULL << (MaxTargetAMSize-1)))
return;
// If all operands of the GEP we are going to insert into the preheader
// are constants, generate a GEP ConstantExpr instead.
// are constants, generate a GEP ConstantExpr instead.
//
// If there is only one operand after the initial non-constant one, we know
// that it was the induction variable, and has been replaced by a constant
@ -202,7 +202,7 @@ void LoopStrengthReduce::strengthReduceGEP(GetElementPtrInst *GEPI, Loop *L,
PreGEP = GEPI->getOperand(0);
} else {
PreGEP = new GetElementPtrInst(GEPI->getOperand(0),
pre_op_vector, GEPI->getName()+".pre",
pre_op_vector, GEPI->getName()+".pre",
Preheader->getTerminator());
}
@ -210,15 +210,15 @@ void LoopStrengthReduce::strengthReduceGEP(GetElementPtrInst *GEPI, Loop *L,
// choose between the initial GEP we created and inserted into the
// preheader, and the incremented GEP that we will create below and insert
// into the loop body.
NewPHI = new PHINode(PreGEP->getType(),
NewPHI = new PHINode(PreGEP->getType(),
GEPI->getName()+".str", InsertBefore);
NewPHI->addIncoming(PreGEP, Preheader);
// Now, create the GEP instruction to increment by one the value selected
// by the PHI instruction we just created above, and add it as the second
// incoming Value/BasicBlock pair to the PHINode. It is inserted before
// the increment of the canonical induction variable.
Instruction *IncrInst =
Instruction *IncrInst =
const_cast<Instruction*>(L->getCanonicalInductionVariableIncrement());
GetElementPtrInst *StrGEP = new GetElementPtrInst(NewPHI, inc_op_vector,
GEPI->getName()+".inc",
@ -233,7 +233,7 @@ void LoopStrengthReduce::strengthReduceGEP(GetElementPtrInst *GEPI, Loop *L,
// about to create.
NewPHI = Cache->CachedPHINode;
}
if (GEPI->getNumOperands() - 1 == indvar) {
// If there were no operands following the induction variable, replace all
// uses of the old GEP instruction with the new PHI.
@ -252,7 +252,7 @@ void LoopStrengthReduce::strengthReduceGEP(GetElementPtrInst *GEPI, Loop *L,
GEPI);
GEPI->replaceAllUsesWith(newGEP);
}
// The old GEP is now dead.
DeadInsts.insert(GEPI);
++NumReduced;
@ -273,7 +273,7 @@ void LoopStrengthReduce::runOnLoop(Loop *L) {
// pass creates code like this, which we can't currently detect:
// %tmp.1 = sub uint 2000, %indvar
// %tmp.8 = getelementptr int* %y, uint %tmp.1
// Strength reduce all GEPs in the Loop. Insert secondary PHI nodes for the
// strength reduced pointers we'll be creating after the canonical induction
// variable's PHI.

View File

@ -1,10 +1,10 @@
//===-- LoopUnroll.cpp - Loop unroller pass -------------------------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This pass implements a simple loop unroller. It works best when loops have
@ -88,7 +88,7 @@ static unsigned ApproximateLoopSize(const Loop *L) {
} else if (I->hasOneUse() && I->use_back() == Term) {
// Ignore instructions only used by the loop terminator.
} else if (DbgInfoIntrinsic *DbgI = dyn_cast<DbgInfoIntrinsic>(I)) {
// Ignore debug instructions
// Ignore debug instructions
} else {
++Size;
}
@ -102,10 +102,10 @@ static unsigned ApproximateLoopSize(const Loop *L) {
return Size;
}
// RemapInstruction - Convert the instruction operands from referencing the
// RemapInstruction - Convert the instruction operands from referencing the
// current values into those specified by ValueMap.
//
static inline void RemapInstruction(Instruction *I,
static inline void RemapInstruction(Instruction *I,
std::map<const Value *, Value*> &ValueMap) {
for (unsigned op = 0, E = I->getNumOperands(); op != E; ++op) {
Value *Op = I->getOperand(op);
@ -150,7 +150,7 @@ bool LoopUnroll::visitLoop(Loop *L) {
return Changed;
}
DEBUG(std::cerr << "UNROLLING!\n");
unsigned TripCount = (unsigned)TripCountFull;
BasicBlock *LoopExit = BI->getSuccessor(L->contains(BI->getSuccessor(0)));
@ -235,7 +235,7 @@ bool LoopUnroll::visitLoop(Loop *L) {
PN->replaceAllUsesWith(PN->getIncomingValueForBlock(Preheader));
BB->getInstList().erase(PN);
}
// Finally, add an unconditional branch to the block to continue into the exit
// block.
new BranchInst(LoopExit, BB);
@ -245,7 +245,7 @@ bool LoopUnroll::visitLoop(Loop *L) {
// go.
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ) {
Instruction *Inst = I++;
if (isInstructionTriviallyDead(Inst))
BB->getInstList().erase(Inst);
else if (Constant *C = ConstantFoldInstruction(Inst)) {

View File

@ -1,10 +1,10 @@
//===-- LoopUnswitch.cpp - Hoist loop-invariant conditionals in loop ------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This pass transforms loops that contain branches on loop-invariant conditions
@ -116,15 +116,15 @@ bool LoopUnswitch::visitLoop(Loop *L) {
} else {
// FIXME: check for profitability.
//std::cerr << "BEFORE:\n"; LI->dump();
VersionLoop(BI->getCondition(), L);
//std::cerr << "AFTER:\n"; LI->dump();
return true;
}
}
}
return Changed;
}
@ -152,10 +152,10 @@ BasicBlock *LoopUnswitch::SplitBlock(BasicBlock *BB, bool SplitAtTop) {
}
// RemapInstruction - Convert the instruction operands from referencing the
// RemapInstruction - Convert the instruction operands from referencing the
// current values into those specified by ValueMap.
//
static inline void RemapInstruction(Instruction *I,
static inline void RemapInstruction(Instruction *I,
std::map<const Value *, Value*> &ValueMap) {
for (unsigned op = 0, E = I->getNumOperands(); op != E; ++op) {
Value *Op = I->getOperand(op);
@ -185,7 +185,7 @@ static Loop *CloneLoop(Loop *L, Loop *PL, std::map<const Value*, Value*> &VM,
// Add all of the subloops to the new loop.
for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
CloneLoop(*I, New, VM, LI);
return New;
}
@ -210,7 +210,7 @@ InsertPHINodesForUsesOutsideLoop(Instruction *OI, Instruction *NI,
!NL->contains(cast<Instruction>(*UI)->getParent()))
goto UsedOutsideOfLoop;
return 0;
UsedOutsideOfLoop:
// Okay, this instruction is used outside of the current loop. Insert a PHI
// nodes for the instruction merging the values together.

View File

@ -1,10 +1,10 @@
//===- LowerAllocations.cpp - Reduce malloc & free insts to calls ---------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// The LowerAllocations transformation is a target-dependent tranformation
@ -49,7 +49,7 @@ namespace {
virtual bool doInitialization(Function &F) {
return BasicBlockPass::doInitialization(F);
}
/// runOnBasicBlock - This method does the actual work of converting
/// instructions over, assuming that the pass has already been initialized.
///
@ -104,7 +104,7 @@ bool LowerAllocations::runOnBasicBlock(BasicBlock &BB) {
for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I) {
if (MallocInst *MI = dyn_cast<MallocInst>(I)) {
const Type *AllocTy = MI->getType()->getElementType();
// malloc(type) becomes sbyte *malloc(size)
Value *MallocArg;
if (LowerMallocArgToInteger)
@ -133,7 +133,7 @@ bool LowerAllocations::runOnBasicBlock(BasicBlock &BB) {
const FunctionType *MallocFTy = MallocFunc->getFunctionType();
std::vector<Value*> MallocArgs;
if (MallocFTy->getNumParams() > 0 || MallocFTy->isVarArg()) {
if (MallocFTy->isVarArg()) {
if (MallocArg->getType() != IntPtrTy)
@ -150,14 +150,14 @@ bool LowerAllocations::runOnBasicBlock(BasicBlock &BB) {
// Create the call to Malloc...
CallInst *MCall = new CallInst(MallocFunc, MallocArgs, "", I);
// Create a cast instruction to convert to the right type...
Value *MCast;
if (MCall->getType() != Type::VoidTy)
MCast = new CastInst(MCall, MI->getType(), "", I);
else
MCast = Constant::getNullValue(MI->getType());
// Replace all uses of the old malloc inst with the cast inst
MI->replaceAllUsesWith(MCast);
I = --BBIL.erase(I); // remove and delete the malloc instr...
@ -166,7 +166,7 @@ bool LowerAllocations::runOnBasicBlock(BasicBlock &BB) {
} else if (FreeInst *FI = dyn_cast<FreeInst>(I)) {
const FunctionType *FreeFTy = FreeFunc->getFunctionType();
std::vector<Value*> FreeArgs;
if (FreeFTy->getNumParams() > 0 || FreeFTy->isVarArg()) {
Value *MCast = FI->getOperand(0);
if (FreeFTy->getNumParams() > 0 &&
@ -178,10 +178,10 @@ bool LowerAllocations::runOnBasicBlock(BasicBlock &BB) {
// If malloc is prototyped to take extra arguments, pass nulls.
for (unsigned i = 1; i < FreeFTy->getNumParams(); ++i)
FreeArgs.push_back(Constant::getNullValue(FreeFTy->getParamType(i)));
// Insert a call to the free function...
new CallInst(FreeFunc, FreeArgs, "", I);
// Delete the old free instruction
I = --BBIL.erase(I);
Changed = true;

View File

@ -1,14 +1,14 @@
//===-- lib/Transforms/Scalar/LowerConstantExprs.cpp ------------*- C++ -*-===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was written by Vladimir Prus and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file defines the LowerConstantExpression pass, which converts all
// constant expressions into instructions. This is primarily usefull for
// constant expressions into instructions. This is primarily usefull for
// code generators which don't yet want or don't have a need to handle
// constant expressions themself.
//
@ -29,18 +29,18 @@ namespace {
class ConstantExpressionsLower : public FunctionPass {
private: // FunctionPass overrides
bool runOnFunction(Function& f);
private: // internal methods
/// For all operands of 'insn' which are constant expressions, generates
/// an appropriate instruction and replaces the use of constant
/// an appropriate instruction and replaces the use of constant
/// expression with the use of the generated instruction.
bool runOnInstruction(Instruction& insn);
/// Given an constant expression 'c' which occures in 'instruction',
/// at position 'pos',
/// at position 'pos',
/// generates instruction to compute 'c' and replaces the use of 'c'
/// with the use of that instruction. This handles only top-level
/// expression in 'c', any subexpressions are not handled.
@ -48,7 +48,7 @@ namespace {
};
RegisterOpt<ConstantExpressionsLower> X(
"lowerconstantexprs", "Lower constant expressions");
"lowerconstantexprs", "Lower constant expressions");
}
bool ConstantExpressionsLower::runOnFunction(Function& f)
@ -66,14 +66,14 @@ bool ConstantExpressionsLower::runOnInstruction(Instruction& instruction)
bool modified = false;
for (unsigned pos = 0; pos < instruction.getNumOperands(); ++pos)
{
if (ConstantExpr* ce
if (ConstantExpr* ce
= dyn_cast<ConstantExpr>(instruction.getOperand(pos))) {
// Decide where to insert the new instruction
Instruction* where = &instruction;
// For PHI nodes we can't insert new instruction before phi,
// since phi should always come at the beginning of the
// For PHI nodes we can't insert new instruction before phi,
// since phi should always come at the beginning of the
// basic block.
// So, we need to insert it in the predecessor, right before
// the terminating instruction.
@ -92,12 +92,12 @@ bool ConstantExpressionsLower::runOnInstruction(Instruction& instruction)
// Note: we can't call replaceAllUsesWith, since
// that might replace uses in another functions,
// where the instruction(s) we've generated are not
// available.
// Moreover, we can't replace all the users in the same
// function, because we can't be sure the definition
// available.
// Moreover, we can't replace all the users in the same
// function, because we can't be sure the definition
// made in this block will be available in other
// places where the constant is used.
// places where the constant is used.
instruction.setOperand(pos, n);
// The new instruction might have constant expressions in
@ -105,11 +105,11 @@ bool ConstantExpressionsLower::runOnInstruction(Instruction& instruction)
runOnInstruction(*n);
modified = true;
}
}
}
return modified;
}
Instruction*
Instruction*
ConstantExpressionsLower::convert(const ConstantExpr& c, Instruction* where)
{
Instruction* result = 0;
@ -118,13 +118,13 @@ ConstantExpressionsLower::convert(const ConstantExpr& c, Instruction* where)
c.getOpcode() < Instruction::BinaryOpsEnd)
{
result = BinaryOperator::create(
static_cast<Instruction::BinaryOps>(c.getOpcode()),
static_cast<Instruction::BinaryOps>(c.getOpcode()),
c.getOperand(0), c.getOperand(1), "", where);
}
else
{
switch(c.getOpcode()) {
case Instruction::GetElementPtr:
case Instruction::GetElementPtr:
{
vector<Value*> idx;
for (unsigned i = 1; i < c.getNumOperands(); ++i)
@ -135,7 +135,7 @@ ConstantExpressionsLower::convert(const ConstantExpr& c, Instruction* where)
}
case Instruction::Cast:
result = new CastInst(c.getOperand(0), c.getType(), "",
result = new CastInst(c.getOperand(0), c.getType(), "",
where);
break;
@ -143,15 +143,15 @@ ConstantExpressionsLower::convert(const ConstantExpr& c, Instruction* where)
case Instruction::Shl:
case Instruction::Shr:
result = new ShiftInst(
static_cast<Instruction::OtherOps>(c.getOpcode()),
static_cast<Instruction::OtherOps>(c.getOpcode()),
c.getOperand(0), c.getOperand(1), "", where);
break;
case Instruction::Select:
result = new SelectInst(c.getOperand(0), c.getOperand(1),
c.getOperand(2), "", where);
break;
default:
std::cerr << "Offending expr: " << c << "\n";
assert(0 && "Constant expression not yet handled!\n");

View File

@ -47,7 +47,7 @@ namespace {
/// had zero roots.
const Type *MainRootRecordType;
public:
LowerGC() : GCRootInt(0), GCReadInt(0), GCWriteInt(0),
LowerGC() : GCRootInt(0), GCReadInt(0), GCWriteInt(0),
GCRead(0), GCWrite(0), RootChain(0), MainRootRecordType(0) {}
virtual bool doInitialization(Module &M);
virtual bool runOnFunction(Function &F);
@ -125,7 +125,7 @@ bool LowerGC::doInitialization(Module &M) {
if (RootChain == 0) {
// If the root chain does not exist, insert a new one with linkonce
// linkage!
RootChain = new GlobalVariable(PRLTy, false,
RootChain = new GlobalVariable(PRLTy, false,
GlobalValue::LinkOnceLinkage,
Constant::getNullValue(PRLTy),
"llvm_gc_root_chain", &M);
@ -141,7 +141,7 @@ bool LowerGC::doInitialization(Module &M) {
/// not have the specified type, insert a cast.
static void Coerce(Instruction *I, unsigned OpNum, Type *Ty) {
if (I->getOperand(OpNum)->getType() != Ty) {
if (Constant *C = dyn_cast<Constant>(I->getOperand(OpNum)))
if (Constant *C = dyn_cast<Constant>(I->getOperand(OpNum)))
I->setOperand(OpNum, ConstantExpr::getCast(C, Ty));
else {
CastInst *CI = new CastInst(I->getOperand(OpNum), Ty, "", I);
@ -152,7 +152,7 @@ static void Coerce(Instruction *I, unsigned OpNum, Type *Ty) {
/// runOnFunction - If the program is using GC intrinsics, replace any
/// read/write intrinsics with the appropriate read/write barrier calls, then
/// inline them. Finally, build the data structures for
/// inline them. Finally, build the data structures for
bool LowerGC::runOnFunction(Function &F) {
// Quick exit for programs that are not using GC mechanisms.
if (!GCRootInt && !GCReadInt && !GCWriteInt) return false;
@ -192,7 +192,7 @@ bool LowerGC::runOnFunction(Function &F) {
CI->setOperand(0, GCRead);
} else {
// Create a whole new call to replace the old one.
CallInst *NC = new CallInst(GCRead, CI->getOperand(1),
CallInst *NC = new CallInst(GCRead, CI->getOperand(1),
CI->getOperand(2),
CI->getName(), CI);
Value *NV = new CastInst(NC, CI->getType(), "", CI);
@ -208,7 +208,7 @@ bool LowerGC::runOnFunction(Function &F) {
MadeChange = true;
}
}
// If there are no GC roots in this function, then there is no need to create
// a GC list record for it.
if (GCRoots.empty()) return MadeChange;
@ -263,7 +263,7 @@ bool LowerGC::runOnFunction(Function &F) {
Par[3] = Zero;
Value *RootPtrPtr = new GetElementPtrInst(AI, Par, "RootEntPtr", IP);
new StoreInst(Null, RootPtrPtr, IP);
// Each occurrance of the llvm.gcroot intrinsic now turns into an
// initialization of the slot with the address and a zeroing out of the
// address specified.
@ -301,7 +301,7 @@ bool LowerGC::runOnFunction(Function &F) {
UnwindInst *UI = new UnwindInst(Cleanup);
PrevPtr = new LoadInst(PrevPtrPtr, "prevptr", UI);
new StoreInst(PrevPtr, RootChain, UI);
// Loop over all of the function calls, turning them into invokes.
while (!NormalCalls.empty()) {
CallInst *CI = NormalCalls.back();
@ -314,7 +314,7 @@ bool LowerGC::runOnFunction(Function &F) {
// Remove the unconditional branch inserted at the end of the CBB.
CBB->getInstList().pop_back();
NewBB->getInstList().remove(CI);
// Create a new invoke instruction.
Value *II = new InvokeInst(CI->getCalledValue(), NewBB, Cleanup,
std::vector<Value*>(CI->op_begin()+1,

View File

@ -1,10 +1,10 @@
//===- LowerInvoke.cpp - Eliminate Invoke & Unwind instructions -----------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This transformation is designed for use by code generators which do not yet
@ -153,7 +153,7 @@ void LowerInvoke::createAbortMessage() {
Constant *Msg =
ConstantArray::get("ERROR: Exception thrown, but not caught!\n");
AbortMessageLength = Msg->getNumOperands()-1; // don't include \0
GlobalVariable *MsgGV = new GlobalVariable(Msg->getType(), true,
GlobalValue::InternalLinkage,
Msg, "abortmsg", &M);
@ -192,7 +192,7 @@ void LowerInvoke::writeAbortMessage(Instruction *IB) {
unsigned NumArgs = FT->getNumParams();
for (unsigned i = 0; i != 3; ++i)
if (i < NumArgs && FT->getParamType(i) != Args[i]->getType())
Args[i] = ConstantExpr::getCast(cast<Constant>(Args[i]),
Args[i] = ConstantExpr::getCast(cast<Constant>(Args[i]),
FT->getParamType(i));
new CallInst(WriteFn, Args, "", IB);
@ -209,7 +209,7 @@ bool LowerInvoke::insertCheapEHSupport(Function &F) {
std::vector<Value*>(II->op_begin()+3,
II->op_end()), Name,II);
II->replaceAllUsesWith(NewCall);
// Insert an unconditional branch to the normal destination.
new BranchInst(II->getNormalDest(), II);
@ -269,7 +269,7 @@ bool LowerInvoke::insertExpensiveEHSupport(Function &F) {
Value *NextFieldPtr = new GetElementPtrInst(JmpBuf, Idx, "NextField", II);
new StoreInst(OldEntry, NextFieldPtr, II);
new StoreInst(JmpBuf, JBListHead, II);
// Call setjmp, passing in the address of the jmpbuffer.
Idx[1] = ConstantUInt::get(Type::UIntTy, 1);
Value *JmpBufPtr = new GetElementPtrInst(JmpBuf, Idx, "TheJmpBuf", II);
@ -283,7 +283,7 @@ bool LowerInvoke::insertExpensiveEHSupport(Function &F) {
// destination.
SplitCriticalEdge(II, 0, this);
Instruction *InsertLoc = II->getNormalDest()->begin();
// Insert a normal call instruction on the normal execution path.
std::string Name = II->getName(); II->setName("");
Value *NewCall = new CallInst(II->getCalledValue(),
@ -291,7 +291,7 @@ bool LowerInvoke::insertExpensiveEHSupport(Function &F) {
II->op_end()), Name,
InsertLoc);
II->replaceAllUsesWith(NewCall);
// If we got this far, then no exception was thrown and we can pop our
// jmpbuf entry off.
new StoreInst(OldEntry, JBListHead, InsertLoc);
@ -301,8 +301,8 @@ bool LowerInvoke::insertExpensiveEHSupport(Function &F) {
// Remove the InvokeInst now.
BB->getInstList().erase(II);
++NumLowered; Changed = true;
++NumLowered; Changed = true;
} else if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
if (UnwindBlock == 0) {
// Create two new blocks, the unwind block and the terminate block. Add
@ -330,7 +330,7 @@ bool LowerInvoke::insertExpensiveEHSupport(Function &F) {
// Remove the UnwindInst now.
BB->getInstList().erase(UI);
++NumLowered; Changed = true;
++NumLowered; Changed = true;
}
// If an unwind instruction was inserted, we need to set up the Unwind and
@ -370,7 +370,7 @@ bool LowerInvoke::insertExpensiveEHSupport(Function &F) {
// Now we set up the terminate block.
RI = TermBlock->getTerminator();
// Insert a new call to write(2, AbortMessage, AbortMessageLength);
writeAbortMessage(RI);

View File

@ -1,10 +1,10 @@
//===- LowerPacked.cpp - Implementation of LowerPacked Transform ---------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by Brad Jones and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file implements lowering Packed datatypes into more primitive
@ -38,7 +38,7 @@ namespace {
///
class LowerPacked : public FunctionPass, public InstVisitor<LowerPacked> {
public:
/// @brief Lowers packed operations to scalar operations.
/// @brief Lowers packed operations to scalar operations.
/// @param F The fuction to process
virtual bool runOnFunction(Function &F);
@ -60,13 +60,13 @@ public:
/// This function asserts if the instruction is a PackedType but
/// is handled by another function.
///
///
/// @brief Asserts if PackedType instruction is not handled elsewhere.
/// @param I the unhandled instruction
void visitInstruction(Instruction &I)
{
if(isa<PackedType>(I.getType())) {
std::cerr << "Unhandled Instruction with Packed ReturnType: " <<
std::cerr << "Unhandled Instruction with Packed ReturnType: " <<
I << '\n';
}
}
@ -82,7 +82,7 @@ private:
void setValues(Value* val,const std::vector<Value*>& values);
// Data Members
/// @brief whether we changed the function or not
/// @brief whether we changed the function or not
bool Changed;
/// @brief a map from old packed values to new smaller packed values
@ -91,27 +91,27 @@ private:
/// Instructions in the source program to get rid of
/// after we do a pass (the old packed instructions)
std::vector<Instruction*> instrsToRemove;
};
};
RegisterOpt<LowerPacked>
X("lower-packed",
RegisterOpt<LowerPacked>
X("lower-packed",
"lowers packed operations to operations on smaller packed datatypes");
} // end namespace
} // end namespace
FunctionPass *llvm::createLowerPackedPass() { return new LowerPacked(); }
// This function sets lowered values for a corresponding
// packed value. Note, in the case of a forward reference
// getValues(Value*) will have already been called for
// the packed parameter. This function will then replace
// all references in the in the function of the "dummy"
// value the previous getValues(Value*) call
// getValues(Value*) will have already been called for
// the packed parameter. This function will then replace
// all references in the in the function of the "dummy"
// value the previous getValues(Value*) call
// returned with actual references.
void LowerPacked::setValues(Value* value,const std::vector<Value*>& values)
{
std::map<Value*,std::vector<Value*> >::iterator it =
std::map<Value*,std::vector<Value*> >::iterator it =
packedToScalarMap.lower_bound(value);
if (it == packedToScalarMap.end() || it->first != value) {
// there was not a forward reference to this element
@ -119,7 +119,7 @@ void LowerPacked::setValues(Value* value,const std::vector<Value*>& values)
}
else {
// replace forward declarations with actual definitions
assert(it->second.size() == values.size() &&
assert(it->second.size() == values.size() &&
"Error forward refences and actual definition differ in size");
for (unsigned i = 0, e = values.size(); i != e; ++i) {
// replace and get rid of old forward references
@ -133,8 +133,8 @@ void LowerPacked::setValues(Value* value,const std::vector<Value*>& values)
// This function will examine the packed value parameter
// and if it is a packed constant or a forward reference
// properly create the lowered values needed. Otherwise
// it will simply retreive values from a
// setValues(Value*,const std::vector<Value*>&)
// it will simply retreive values from a
// setValues(Value*,const std::vector<Value*>&)
// call. Failing both of these cases, it will abort
// the program.
std::vector<Value*>& LowerPacked::getValues(Value* value)
@ -144,7 +144,7 @@ std::vector<Value*>& LowerPacked::getValues(Value* value)
// reject further processing if this one has
// already been handled
std::map<Value*,std::vector<Value*> >::iterator it =
std::map<Value*,std::vector<Value*> >::iterator it =
packedToScalarMap.lower_bound(value);
if (it != packedToScalarMap.end() && it->first == value) {
return it->second;
@ -162,11 +162,11 @@ std::vector<Value*>& LowerPacked::getValues(Value* value)
}
else if (ConstantAggregateZero* CAZ =
dyn_cast<ConstantAggregateZero>(value)) {
// zero constant
// zero constant
const PackedType* PKT = cast<PackedType>(CAZ->getType());
std::vector<Value*> results;
results.reserve(PKT->getNumElements());
Constant* C = Constant::getNullValue(PKT->getElementType());
for (unsigned i = 0, e = PKT->getNumElements(); i != e; ++i) {
results.push_back(C);
@ -179,7 +179,7 @@ std::vector<Value*>& LowerPacked::getValues(Value* value)
const PackedType* PKT = cast<PackedType>(value->getType());
std::vector<Value*> results;
results.reserve(PKT->getNumElements());
for (unsigned i = 0, e = PKT->getNumElements(); i != e; ++i) {
results.push_back(new Argument(PKT->getElementType()));
}
@ -221,19 +221,19 @@ void LowerPacked::visitLoadInst(LoadInst& LI)
Idx[1] = ConstantUInt::get(Type::UIntTy,i);
// Get the pointer
Value* val = new GetElementPtrInst(array,
Value* val = new GetElementPtrInst(array,
Idx,
LI.getName() +
LI.getName() +
".ge." + utostr(i),
&LI);
// generate the new load and save the result in packedToScalar map
values.push_back(new LoadInst(val,
values.push_back(new LoadInst(val,
LI.getName()+"."+utostr(i),
LI.isVolatile(),
&LI));
}
setValues(&LI,values);
Changed = true;
instrsToRemove.push_back(&LI);
@ -251,13 +251,13 @@ void LowerPacked::visitBinaryOperator(BinaryOperator& BO)
"The two packed operand to scalar maps must be equal in size.");
result.reserve(op0Vals.size());
// generate the new binary op and save the result
for (unsigned i = 0; i != op0Vals.size(); ++i) {
result.push_back(BinaryOperator::create(BO.getOpcode(),
op0Vals[i],
result.push_back(BinaryOperator::create(BO.getOpcode(),
op0Vals[i],
op1Vals[i],
BO.getName() +
BO.getName() +
"." + utostr(i),
&BO));
}
@ -270,12 +270,12 @@ void LowerPacked::visitBinaryOperator(BinaryOperator& BO)
void LowerPacked::visitStoreInst(StoreInst& SI)
{
if (const PackedType* PKT =
if (const PackedType* PKT =
dyn_cast<PackedType>(SI.getOperand(0)->getType())) {
// We will need this for getelementptr
std::vector<Value*> Idx(2);
Idx[0] = ConstantUInt::get(Type::UIntTy,0);
ArrayType* AT = ArrayType::get(PKT->getContainedType(0),
PKT->getNumElements());
PointerType* APT = PointerType::get(AT);
@ -286,21 +286,21 @@ void LowerPacked::visitStoreInst(StoreInst& SI)
"store.ge.a.",
&SI);
std::vector<Value*>& values = getValues(SI.getOperand(0));
assert((values.size() == PKT->getNumElements()) &&
"Scalar must have the same number of elements as Packed Type");
for (unsigned i = 0, e = PKT->getNumElements(); i != e; ++i) {
// Generate the indices for getelementptr
Idx[1] = ConstantUInt::get(Type::UIntTy,i);
Value* val = new GetElementPtrInst(array,
Value* val = new GetElementPtrInst(array,
Idx,
"store.ge." +
utostr(i) + ".",
&SI);
new StoreInst(values[i], val, SI.isVolatile(),&SI);
}
Changed = true;
instrsToRemove.push_back(&SI);
}
@ -319,12 +319,12 @@ void LowerPacked::visitSelectInst(SelectInst& SELI)
for (unsigned i = 0; i != op0Vals.size(); ++i) {
result.push_back(new SelectInst(SELI.getCondition(),
op0Vals[i],
op0Vals[i],
op1Vals[i],
SELI.getName()+ "." + utostr(i),
&SELI));
}
setValues(&SELI,result);
Changed = true;
instrsToRemove.push_back(&SELI);
@ -334,24 +334,24 @@ void LowerPacked::visitSelectInst(SelectInst& SELI)
bool LowerPacked::runOnFunction(Function& F)
{
// initialize
Changed = false;
Changed = false;
// Does three passes:
// Pass 1) Converts Packed Operations to
// Pass 1) Converts Packed Operations to
// new Packed Operations on smaller
// datatypes
visit(F);
// Pass 2) Drop all references
std::for_each(instrsToRemove.begin(),
instrsToRemove.end(),
std::mem_fun(&Instruction::dropAllReferences));
// Pass 3) Delete the Instructions to remove aka packed instructions
for (std::vector<Instruction*>::iterator i = instrsToRemove.begin(),
e = instrsToRemove.end();
for (std::vector<Instruction*>::iterator i = instrsToRemove.begin(),
e = instrsToRemove.end();
i != e; ++i) {
(*i)->getParent()->getInstList().erase(*i);
(*i)->getParent()->getInstList().erase(*i);
}
// clean-up

View File

@ -1,10 +1,10 @@
//===- LowerSelect.cpp - Transform select insts to branches ---------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This pass lowers select instructions into conditional branches for targets
@ -86,7 +86,7 @@ bool LowerSelect::runOnFunction(Function &F) {
// Use the PHI instead of the select.
SI->replaceAllUsesWith(PN);
NewCont->getInstList().erase(SI);
Changed = true;
break; // This block is done with.
}

View File

@ -1,10 +1,10 @@
//===- LowerSwitch.cpp - Eliminate Switch instructions --------------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// The LowerSwitch transformation rewrites switch statements with a sequence of

View File

@ -1,10 +1,10 @@
//===- Mem2Reg.cpp - The -mem2reg pass, a wrapper around the Utils lib ----===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This pass is a simple pass wrapper around the PromoteMemToReg function call
@ -53,7 +53,7 @@ bool PromotePass::runOnFunction(Function &F) {
DominatorTree &DT = getAnalysis<DominatorTree>();
DominanceFrontier &DF = getAnalysis<DominanceFrontier>();
while (1) {
Allocas.clear();

View File

@ -1,10 +1,10 @@
//===- PRE.cpp - Partial Redundancy Elimination ---------------------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file implements the well-known Partial Redundancy Elimination
@ -80,7 +80,7 @@ namespace {
AvailableBlocksTy AvailableBlocks;
bool ProcessBlock(BasicBlock *BB);
// Anticipatibility calculation...
void MarkPostDominatingBlocksAnticipatible(PostDominatorTree::Node *N,
std::vector<char> &AntBlocks,
@ -262,7 +262,7 @@ void PRE::ReplaceDominatedAvailableOccurrencesWith(Instruction *NewOcc,
// active definition...
if (ExistingAvailableVal == 0) {
ExistingAvailableVal = NewOcc;
for (DominatorTree::Node::iterator I = N->begin(), E = N->end(); I != E;++I)
ReplaceDominatedAvailableOccurrencesWith(NewOcc, *I);
} else {
@ -283,7 +283,7 @@ void PRE::ReplaceDominatedAvailableOccurrencesWith(Instruction *NewOcc,
for (df_iterator<DominatorTree::Node*> DI = df_begin(N), E = df_end(N);
DI != E; ++DI)
AvailableBlocks[(*DI)->getBlock()] = NewOcc;
}
}
}
@ -400,7 +400,7 @@ bool PRE::ProcessExpression(Instruction *Expr) {
if (AnticipatibleBlocks[i])
std::cerr << BlockMapping[i]->getName() <<" ";
std::cerr << "\n";);
// AvailabilityFrontier - Calculates the availability frontier for the current
@ -463,7 +463,7 @@ bool PRE::ProcessExpression(Instruction *Expr) {
AnyNotAvailable = true;
break;
}
// If any predecessor blocks are not available, add the node to
// the current expression dominance frontier.
if (AnyNotAvailable) {
@ -597,12 +597,12 @@ bool PRE::ProcessExpression(Instruction *Expr) {
++NumRedundant;
DEBUG(std::cerr << " PHI replaces available value: %"
<< OldVal->getName() << "\n");
// Loop over all of the blocks dominated by this PHI node, and change
// the AvailableBlocks entries to be the PHI node instead of the old
// instruction.
MarkOccurrenceAvailableInAllDominatedBlocks(PN, AFBlock);
AFBlock->getInstList().erase(OldVal); // Delete old instruction!
// The resultant PHI node is a new definition of the value!
@ -613,7 +613,7 @@ bool PRE::ProcessExpression(Instruction *Expr) {
// region (occurs when hoisting loop invariants, f.e.). In this case,
// the PHI node should actually just be removed.
assert(PN->use_empty() && "No uses should exist for dead PHI node!");
PN->getParent()->getInstList().erase(PN);
PN->getParent()->getInstList().erase(PN);
}
} else {
// The resultant PHI node is a new definition of the value!

View File

@ -1,10 +1,10 @@
//===- Reassociate.cpp - Reassociate binary expressions -------------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This pass reassociates commutative expressions in an order that is designed
@ -115,7 +115,7 @@ bool Reassociate::ReassociateExpr(BinaryOperator *I) {
Value *RHS = I->getOperand(1);
unsigned LHSRank = getRank(LHS);
unsigned RHSRank = getRank(RHS);
bool Changed = false;
// Make sure the LHS of the operand always has the greater rank...
@ -130,7 +130,7 @@ bool Reassociate::ReassociateExpr(BinaryOperator *I) {
DEBUG(std::cerr << "Transposed: " << *I
/* << " Result BB: " << I->getParent()*/);
}
// If the LHS is the same operator as the current one is, and if we are the
// only expression using it...
//
@ -233,7 +233,7 @@ bool Reassociate::ReassociateBB(BasicBlock *BB) {
BI = New;
New->setOperand(1, NegateValue(New->getOperand(1), BI));
Changed = true;
DEBUG(std::cerr << "Negated: " << *New /*<< " Result BB: " << BB*/);
}

View File

@ -1,10 +1,10 @@
//===- SCCP.cpp - Sparse Conditional Constant Propagation -----------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file implements sparse conditional constant propagation and merging:
@ -45,7 +45,7 @@ using namespace llvm;
namespace {
class LatticeVal {
enum {
enum {
undefined, // This instruction has no known value
constant, // This instruction has a constant value
overdefined // This instruction has an unknown value
@ -198,7 +198,7 @@ public:
private:
// markConstant - Make a value be marked as "constant". If the value
// is not already a constant, add it to the instruction work list so that
// is not already a constant, add it to the instruction work list so that
// the users of the instruction are updated later.
//
inline void markConstant(LatticeVal &IV, Value *V, Constant *C) {
@ -212,9 +212,9 @@ private:
}
// markOverdefined - Make a value be marked as "overdefined". If the
// value is not already overdefined, add it to the overdefined instruction
// value is not already overdefined, add it to the overdefined instruction
// work list so that the users of the instruction are updated later.
inline void markOverdefined(LatticeVal &IV, Value *V) {
if (IV.markOverdefined()) {
DEBUG(std::cerr << "markOverdefined: ";
@ -262,9 +262,9 @@ private:
return ValueState[V];
}
// markEdgeExecutable - Mark a basic block as executable, adding it to the BB
// markEdgeExecutable - Mark a basic block as executable, adding it to the BB
// work list if it is not already executable...
//
//
void markEdgeExecutable(BasicBlock *Source, BasicBlock *Dest) {
if (!KnownFeasibleEdges.insert(Edge(Source, Dest)).second)
return; // This edge is already known to be executable!
@ -308,7 +308,7 @@ private:
private:
friend class InstVisitor<SCCPSolver>;
// visit implementations - Something changed in this instruction... Either an
// visit implementations - Something changed in this instruction... Either an
// operand made a transition, or the instruction is newly executable. Change
// the value type of I to reflect these changes if appropriate.
//
@ -406,7 +406,7 @@ bool SCCPSolver::isEdgeFeasible(BasicBlock *From, BasicBlock *To) {
// Make sure the source basic block is executable!!
if (!BBExecutable.count(From)) return false;
// Check to make sure this edge itself is actually feasible now...
TerminatorInst *TI = From->getTerminator();
if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
@ -422,7 +422,7 @@ bool SCCPSolver::isEdgeFeasible(BasicBlock *From, BasicBlock *To) {
if (!isa<ConstantBool>(BCValue.getConstant())) return true;
// Constant condition variables mean the branch can only go a single way
return BI->getSuccessor(BCValue.getConstant() ==
return BI->getSuccessor(BCValue.getConstant() ==
ConstantBool::False) == To;
}
return false;
@ -511,7 +511,7 @@ void SCCPSolver::visitPHINode(PHINode &PN) {
for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) {
LatticeVal &IV = getValueState(PN.getIncomingValue(i));
if (IV.isUndefined()) continue; // Doesn't influence PHI node.
if (isEdgeFeasible(PN.getIncomingBlock(i), PN.getParent())) {
if (IV.isOverdefined()) { // PHI node becomes overdefined!
markOverdefined(PNIV, &PN);
@ -524,7 +524,7 @@ void SCCPSolver::visitPHINode(PHINode &PN) {
// There is already a reachable operand. If we conflict with it,
// then the PHI node becomes overdefined. If we agree with it, we
// can continue on.
// Check to see if there are two different constants merging...
if (IV.getConstant() != OperandVal) {
// Yes there is. This means the PHI node is not constant.
@ -753,7 +753,7 @@ void SCCPSolver::visitGetElementPtrInst(GetElementPtrInst &I) {
Constant *Ptr = Operands[0];
Operands.erase(Operands.begin()); // Erase the pointer from idx list...
markConstant(IV, &I, ConstantExpr::getGetElementPtr(Ptr, Operands));
markConstant(IV, &I, ConstantExpr::getGetElementPtr(Ptr, Operands));
}
/// GetGEPGlobalInitializer - Given a constant and a getelementptr constantexpr,
@ -813,7 +813,7 @@ void SCCPSolver::visitLoadInst(LoadInst &I) {
markConstant(IV, &I, Constant::getNullValue(I.getType()));
return;
}
// Transform load (constant global) into the value loaded.
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) {
if (GV->isConstant()) {
@ -837,7 +837,7 @@ void SCCPSolver::visitLoadInst(LoadInst &I) {
if (CE->getOpcode() == Instruction::GetElementPtr)
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(CE->getOperand(0)))
if (GV->isConstant() && !GV->isExternal())
if (Constant *V =
if (Constant *V =
GetGEPGlobalInitializer(GV->getInitializer(), CE)) {
markConstant(IV, &I, V);
return;
@ -857,7 +857,7 @@ void SCCPSolver::visitCallSite(CallSite CS) {
hash_map<Function*, LatticeVal>::iterator TFRVI =TrackedFunctionRetVals.end();
if (F && F->hasInternalLinkage())
TFRVI = TrackedFunctionRetVals.find(F);
if (TFRVI != TrackedFunctionRetVals.end()) {
// If this is the first call to the function hit, mark its entry block
// executable.
@ -883,7 +883,7 @@ void SCCPSolver::visitCallSite(CallSite CS) {
mergeInValue(IV, I, TFRVI->second);
return;
}
if (F == 0 || !F->isExternal() || !canConstantFoldCallTo(F)) {
markOverdefined(IV, I);
return;
@ -914,7 +914,7 @@ void SCCPSolver::visitCallSite(CallSite CS) {
void SCCPSolver::Solve() {
// Process the work lists until they are empty!
while (!BBWorkList.empty() || !InstWorkList.empty() ||
while (!BBWorkList.empty() || !InstWorkList.empty() ||
!OverdefinedInstWorkList.empty()) {
// Process the instruction work list...
while (!OverdefinedInstWorkList.empty()) {
@ -922,7 +922,7 @@ void SCCPSolver::Solve() {
OverdefinedInstWorkList.pop_back();
DEBUG(std::cerr << "\nPopped off OI-WL: " << *I);
// "I" got into the work list because it either made the transition from
// bottom to constant
//
@ -940,7 +940,7 @@ void SCCPSolver::Solve() {
InstWorkList.pop_back();
DEBUG(std::cerr << "\nPopped off I-WL: " << *I);
// "I" got into the work list because it either made the transition from
// bottom to constant
//
@ -953,14 +953,14 @@ void SCCPSolver::Solve() {
UI != E; ++UI)
OperandChangedState(*UI);
}
// Process the basic block work list...
while (!BBWorkList.empty()) {
BasicBlock *BB = BBWorkList.back();
BBWorkList.pop_back();
DEBUG(std::cerr << "\nPopped off BBWL: " << *BB);
// Notify all instructions in this basic block that they are newly
// executable.
visit(BB);
@ -1016,7 +1016,7 @@ namespace {
// algorithm, and return true if the function was modified.
//
bool runOnFunction(Function &F);
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
}
@ -1095,13 +1095,13 @@ bool SCCP::runOnFunction(Function &F) {
Constant *Const = IV.isConstant()
? IV.getConstant() : UndefValue::get(Inst->getType());
DEBUG(std::cerr << " Constant: " << *Const << " = " << *Inst);
// Replaces all of the uses of a variable with uses of the constant.
Inst->replaceAllUsesWith(Const);
// Delete the instruction.
BB->getInstList().erase(Inst);
// Hey, we just changed something!
MadeChanges = true;
++NumInstRemoved;
@ -1217,7 +1217,7 @@ bool IPSCCP::runOnModule(Module &M) {
Constant *CST = IV.isConstant() ?
IV.getConstant() : UndefValue::get(AI->getType());
DEBUG(std::cerr << "*** Arg " << *AI << " = " << *CST <<"\n");
// Replaces all of the uses of a variable with uses of the
// constant.
AI->replaceAllUsesWith(CST);
@ -1247,7 +1247,7 @@ bool IPSCCP::runOnModule(Module &M) {
MadeChanges = true;
++IPNumInstRemoved;
}
for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) {
BasicBlock *Succ = TI->getSuccessor(i);
if (Succ->begin() != Succ->end() && isa<PHINode>(Succ->begin()))
@ -1272,11 +1272,11 @@ bool IPSCCP::runOnModule(Module &M) {
Constant *Const = IV.isConstant()
? IV.getConstant() : UndefValue::get(Inst->getType());
DEBUG(std::cerr << " Constant: " << *Const << " = " << *Inst);
// Replaces all of the uses of a variable with uses of the
// constant.
Inst->replaceAllUsesWith(Const);
// Delete the instruction.
if (!isa<TerminatorInst>(Inst) && !isa<CallInst>(Inst))
BB->getInstList().erase(Inst);
@ -1300,7 +1300,7 @@ bool IPSCCP::runOnModule(Module &M) {
bool Folded = ConstantFoldTerminator(I->getParent());
assert(Folded && "Didn't fold away reference to block!");
}
// Finally, delete the basic block.
F->getBasicBlockList().erase(DeadBB);
}
@ -1338,6 +1338,6 @@ bool IPSCCP::runOnModule(Module &M) {
M.getGlobalList().erase(GV);
++IPNumGlobalConst;
}
return MadeChanges;
}

View File

@ -1,10 +1,10 @@
//===- ScalarReplAggregates.cpp - Scalar Replacement of Aggregates --------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This transformation implements the well known scalar replacement of
@ -91,7 +91,7 @@ bool SROA::performPromotion(Function &F) {
BasicBlock &BB = F.getEntryBlock(); // Get the entry node for the function
bool Changed = false;
while (1) {
Allocas.clear();
@ -154,7 +154,7 @@ bool SROA::performScalarRepl(Function &F) {
DEBUG(std::cerr << "Found inst to xform: " << *AI);
Changed = true;
std::vector<AllocaInst*> ElementAllocas;
if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) {
ElementAllocas.reserve(ST->getNumContainedTypes());
@ -175,7 +175,7 @@ bool SROA::performScalarRepl(Function &F) {
WorkList.push_back(NA); // Add to worklist for recursive processing
}
}
// Now that we have created the alloca instructions that we want to use,
// expand the getelementptr instructions to use them.
//
@ -183,12 +183,12 @@ bool SROA::performScalarRepl(Function &F) {
Instruction *User = cast<Instruction>(AI->use_back());
GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User);
// We now know that the GEP is of the form: GEP <ptr>, 0, <cst>
unsigned Idx =
unsigned Idx =
(unsigned)cast<ConstantInt>(GEPI->getOperand(2))->getRawValue();
assert(Idx < ElementAllocas.size() && "Index out of range?");
AllocaInst *AllocaToUse = ElementAllocas[Idx];
Value *RepValue;
if (GEPI->getNumOperands() == 3) {
// Do not insert a new getelementptr instruction with zero indices, only
@ -206,7 +206,7 @@ bool SROA::performScalarRepl(Function &F) {
GEPI->setName("");
RepValue = new GetElementPtrInst(AllocaToUse, NewArgs, OldName, GEPI);
}
// Move all of the users over to the new GEP.
GEPI->replaceAllUsesWith(RepValue);
// Delete the old GEP
@ -259,7 +259,7 @@ static bool AllUsersAreLoads(Value *Ptr) {
I != E; ++I)
if (cast<Instruction>(*I)->getOpcode() != Instruction::Load)
return false;
return true;
return true;
}
/// isSafeUseOfAllocation - Check to see if this user is an allowed use for an
@ -289,7 +289,7 @@ int SROA::isSafeUseOfAllocation(Instruction *User) {
//
if (cast<ConstantInt>(GEPI->getOperand(2))->getRawValue() >= NumElements)
return 0;
} else {
// If this is an array index and the index is not constant, we cannot
// promote... that is unless the array has exactly one or two elements in
@ -342,7 +342,7 @@ void SROA::CanonicalizeAllocaUsers(AllocationInst *AI) {
if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) {
uint64_t NumElements = AT->getNumElements();
if (!isa<ConstantInt>(I.getOperand())) {
if (NumElements == 1) {
GEPI->setOperand(2, Constant::getNullValue(Type::IntTy));

View File

@ -1,10 +1,10 @@
//===- SimplifyCFG.cpp - CFG Simplification Pass --------------------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file implements dead code elimination and basic block merging.
@ -100,7 +100,7 @@ bool CFGSimplifyPass::runOnFunction(Function &F) {
(*SI)->removePredecessor(BB);
BB->dropAllReferences();
}
for (Function::iterator I = ++F.begin(); I != F.end();)
if (!Reachable.count(I))
I = F.getBasicBlockList().erase(I);

View File

@ -1,10 +1,10 @@
//===- TailDuplication.cpp - Simplify CFG through tail duplication --------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This pass performs a limited form of tail duplication, intended to simplify
@ -127,7 +127,7 @@ bool TailDup::shouldEliminateUnconditionalBranch(TerminatorInst *TI) {
if (TooMany-- == 0) return false;
}
return true;
return true;
}
/// FindObviousSharedDomOf - We know there is a branch from SrcBlock to
@ -141,7 +141,7 @@ static BasicBlock *FindObviousSharedDomOf(BasicBlock *SrcBlock,
if (PI == PE || ++PI != PE) return 0;
BasicBlock *SrcPred = *pred_begin(SrcBlock);
// Look at the predecessors of DstBlock. One of them will be SrcBlock. If
// there is only one other pred, get it, otherwise we can't handle it.
PI = pred_begin(DstBlock); PE = pred_end(DstBlock);
@ -199,7 +199,7 @@ void TailDup::eliminateUnconditionalBranch(BranchInst *Branch) {
while (isa<PHINode>(BBI)) ++BBI;
while (!isa<TerminatorInst>(BBI)) {
Instruction *I = BBI++;
bool CanHoist = !I->isTrapping() && !I->mayWriteToMemory();
if (CanHoist) {
for (unsigned op = 0, e = I->getNumOperands(); op != e; ++op)
@ -303,7 +303,7 @@ void TailDup::eliminateUnconditionalBranch(BranchInst *Branch) {
// Ok, we have a PHI node. Figure out what the incoming value was for the
// DestBlock.
Value *IV = PN->getIncomingValueForBlock(DestBlock);
// Remap the value if necessary...
if (Value *MappedIV = ValueMapping[IV])
IV = MappedIV;

View File

@ -1,10 +1,10 @@
//===- TailRecursionElimination.cpp - Eliminate Tail Calls ----------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file transforms calls of the current function (self recursion) followed
@ -89,7 +89,7 @@ bool TailCallElim::runOnFunction(Function &F) {
for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
if (ReturnInst *Ret = dyn_cast<ReturnInst>(BB->getTerminator()))
MadeChange |= ProcessReturningBlock(Ret, OldEntry, ArgumentPHIs);
// If we eliminated any tail recursions, it's possible that we inserted some
// silly PHI nodes which just merge an initial value (the incoming operand)
// with themselves. Check to see if we did and clean up our mess if so. This
@ -163,7 +163,7 @@ static bool isDynamicConstant(Value *V, CallInst *CI) {
Function *F = CI->getParent()->getParent();
for (Function::arg_iterator AI = F->arg_begin(); &*AI != Arg; ++AI)
++ArgNo;
// If we are passing this argument into call as the corresponding
// argument operand, then the argument is dynamically constant.
// Otherwise, we cannot transform this function safely.
@ -292,7 +292,7 @@ bool TailCallElim::ProcessReturningBlock(ReturnInst *Ret, BasicBlock *&OldEntry,
std::string OldName = OldEntry->getName(); OldEntry->setName("tailrecurse");
BasicBlock *NewEntry = new BasicBlock(OldName, F, OldEntry);
new BranchInst(OldEntry, NewEntry);
// Now that we have created a new block, which jumps to the entry
// block, insert a PHI node for each argument of the function.
// For now, we initialize each PHI to only have the real arguments
@ -305,13 +305,13 @@ bool TailCallElim::ProcessReturningBlock(ReturnInst *Ret, BasicBlock *&OldEntry,
ArgumentPHIs.push_back(PN);
}
}
// Ok, now that we know we have a pseudo-entry block WITH all of the
// required PHI nodes, add entries into the PHI node for the actual
// parameters passed into the tail-recursive call.
for (unsigned i = 0, e = CI->getNumOperands()-1; i != e; ++i)
ArgumentPHIs[i]->addIncoming(CI->getOperand(i+1), BB);
// If we are introducing an accumulator variable to eliminate the recursion,
// do so now. Note that we _know_ that no subsequent tail recursion
// eliminations will happen on this function because of the way the

View File

@ -1,10 +1,10 @@
//===- TransformInternals.cpp - Implement shared functions for transforms -===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file defines shared functions used by the different components of the
@ -30,10 +30,10 @@ static const Type *getStructOffsetStep(const StructType *STy, uint64_t &Offset,
for (i = 0; i < SL->MemberOffsets.size()-1; ++i)
if (Offset >= SL->MemberOffsets[i] && Offset < SL->MemberOffsets[i+1])
break;
assert(Offset >= SL->MemberOffsets[i] &&
(i == SL->MemberOffsets.size()-1 || Offset < SL->MemberOffsets[i+1]));
// Make sure to save the current index...
Indices.push_back(ConstantUInt::get(Type::UIntTy, i));
Offset = SL->MemberOffsets[i];

View File

@ -1,10 +1,10 @@
//===-- TransformInternals.h - Shared functions for Transforms --*- C++ -*-===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This header file declares shared functions used by the different components
@ -56,7 +56,7 @@ const Type *ConvertibleToGEP(const Type *Ty, Value *V,
// ValueHandle Class - Smart pointer that occupies a slot on the users USE list
// that prevents it from being destroyed. This "looks" like an Instruction
// with Opcode UserOp1.
//
//
class ValueMapCache;
class ValueHandle : public Instruction {
Use Op;

View File

@ -1,10 +1,10 @@
//===-- BasicBlockUtils.cpp - BasicBlock Utilities -------------------------==//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This family of functions perform manipulations on basic blocks, and
@ -30,7 +30,7 @@ void llvm::ReplaceInstWithValue(BasicBlock::InstListType &BIL,
I.replaceAllUsesWith(V);
std::string OldName = I.getName();
// Delete the unnecessary instruction now...
BI = BIL.erase(BI);
@ -92,7 +92,7 @@ void llvm::RemoveSuccessor(TerminatorInst *TI, unsigned SuccNum) {
cast<BranchInst>(TI)->setUnconditionalDest(TI->getSuccessor(1-SuccNum));
} else { // Otherwise convert to a return instruction...
Value *RetVal = 0;
// Create a value to return... if the function doesn't return null...
if (BB->getParent()->getReturnType() != Type::VoidTy)
RetVal = Constant::getNullValue(BB->getParent()->getReturnType());
@ -100,7 +100,7 @@ void llvm::RemoveSuccessor(TerminatorInst *TI, unsigned SuccNum) {
// Create the return...
NewTI = new ReturnInst(RetVal);
}
break;
break;
case Instruction::Invoke: // Should convert to call
case Instruction::Switch: // Should remove entry

View File

@ -1,10 +1,10 @@
//===- BreakCriticalEdges.cpp - Critical Edge Elimination Pass ------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// BreakCriticalEdges pass - Break all of the critical edges in the CFG by
@ -31,7 +31,7 @@ namespace {
struct BreakCriticalEdges : public FunctionPass {
virtual bool runOnFunction(Function &F);
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.addPreserved<DominatorSet>();
AU.addPreserved<ImmediateDominators>();
@ -108,7 +108,7 @@ bool llvm::SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum, Pass *P) {
DestBB->getName() + "_crit_edge");
// Create our unconditional branch...
new BranchInst(DestBB, NewBB);
// Branch to the new block, breaking the edge...
TI->setSuccessor(SuccNum, NewBB);
@ -150,11 +150,11 @@ bool llvm::SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum, Pass *P) {
// anything.
ID->addNewBlock(NewBB, TIBB);
}
// Should we update DominatorTree information?
if (DominatorTree *DT = P->getAnalysisToUpdate<DominatorTree>()) {
DominatorTree::Node *TINode = DT->getNode(TIBB);
// The new block is not the immediate dominator for any other nodes, but
// TINode is the immediate dominator for the new node.
//

View File

@ -1,10 +1,10 @@
//===- CloneFunction.cpp - Clone a function into another function ---------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file implements the CloneFunctionInto interface, which is used as the
@ -47,7 +47,7 @@ void llvm::CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
std::vector<ReturnInst*> &Returns,
const char *NameSuffix) {
assert(NameSuffix && "NameSuffix cannot be null!");
#ifndef NDEBUG
for (Function::const_arg_iterator I = OldFunc->arg_begin(), E = OldFunc->arg_end();
I != E; ++I)
@ -61,7 +61,7 @@ void llvm::CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
for (Function::const_iterator BI = OldFunc->begin(), BE = OldFunc->end();
BI != BE; ++BI) {
const BasicBlock &BB = *BI;
// Create a new basic block and copy instructions into it!
BasicBlock *CBB = CloneBasicBlock(&BB, ValueMap, NameSuffix, NewFunc);
ValueMap[&BB] = CBB; // Add basic block mapping.
@ -70,7 +70,7 @@ void llvm::CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
Returns.push_back(RI);
}
// Loop over all of the instructions in the function, fixing up operand
// Loop over all of the instructions in the function, fixing up operand
// references as we go. This uses ValueMap to do all the hard work.
//
for (Function::iterator BB = cast<BasicBlock>(ValueMap[OldFunc->begin()]),
@ -105,7 +105,7 @@ Function *llvm::CloneFunction(const Function *F,
// Create the new function...
Function *NewF = new Function(FTy, F->getLinkage(), F->getName());
// Loop over the arguments, copying the names of the mapped arguments over...
Function::arg_iterator DestI = NewF->arg_begin();
for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I)
@ -116,6 +116,6 @@ Function *llvm::CloneFunction(const Function *F,
std::vector<ReturnInst*> Returns; // Ignore returns cloned...
CloneFunctionInto(NewF, F, ValueMap, Returns);
return NewF;
return NewF;
}

View File

@ -1,10 +1,10 @@
//===- CloneModule.cpp - Clone an entire module ---------------------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file implements the CloneModule interface which makes a copy of an

View File

@ -1,10 +1,10 @@
//===- CloneTrace.cpp - Clone a trace -------------------------------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file implements the CloneTrace interface, which is used when writing
@ -27,7 +27,7 @@ std::vector<BasicBlock *>
llvm::CloneTrace(const std::vector<BasicBlock*> &origTrace) {
std::vector<BasicBlock *> clonedTrace;
std::map<const Value*, Value*> ValueMap;
//First, loop over all the Basic Blocks in the trace and copy
//them using CloneBasicBlock. Also fix the phi nodes during
//this loop. To fix the phi nodes, we delete incoming branches
@ -38,7 +38,7 @@ llvm::CloneTrace(const std::vector<BasicBlock*> &origTrace) {
//Clone Basic Block
BasicBlock *clonedBlock =
CloneBasicBlock(*T, ValueMap, ".tr", (*T)->getParent());
//Add it to our new trace
clonedTrace.push_back(clonedBlock);
@ -55,10 +55,10 @@ llvm::CloneTrace(const std::vector<BasicBlock*> &origTrace) {
//get incoming value for the previous BB
Value *V = PN->getIncomingValueForBlock(*(T-1));
assert(V && "No incoming value from a BasicBlock in our trace!");
//remap our phi node to point to incoming value
ValueMap[*&I] = V;
//remove phi node
clonedBlock->getInstList().erase(PN);
}
@ -69,7 +69,7 @@ llvm::CloneTrace(const std::vector<BasicBlock*> &origTrace) {
for(std::vector<BasicBlock *>::const_iterator BB = clonedTrace.begin(),
BE = clonedTrace.end(); BB != BE; ++BB) {
for(BasicBlock::iterator I = (*BB)->begin(); I != (*BB)->end(); ++I) {
//Loop over all the operands of the instruction
for(unsigned op=0, E = I->getNumOperands(); op != E; ++op) {
const Value *Op = I->getOperand(op);
@ -83,7 +83,7 @@ llvm::CloneTrace(const std::vector<BasicBlock*> &origTrace) {
}
}
}
//return new vector of basic blocks
return clonedTrace;
}

View File

@ -1,10 +1,10 @@
//===- CodeExtractor.cpp - Pull code region into a new function -----------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file implements the interface to tear out a code region, such as an
@ -64,7 +64,7 @@ namespace {
return true;
return false;
}
/// definedInCaller - Return true if the specified value is defined in the
/// function being code extracted, but not in the region being extracted.
/// These values must be passed in as live-ins to the function.
@ -198,7 +198,7 @@ void CodeExtractor::splitReturnBlocks() {
//
void CodeExtractor::findInputsOutputs(Values &inputs, Values &outputs) {
std::set<BasicBlock*> ExitBlocks;
for (std::set<BasicBlock*>::const_iterator ci = BlocksToExtract.begin(),
for (std::set<BasicBlock*>::const_iterator ci = BlocksToExtract.begin(),
ce = BlocksToExtract.end(); ci != ce; ++ci) {
BasicBlock *BB = *ci;
@ -208,7 +208,7 @@ void CodeExtractor::findInputsOutputs(Values &inputs, Values &outputs) {
for (User::op_iterator O = I->op_begin(), E = I->op_end(); O != E; ++O)
if (definedInCaller(*O))
inputs.push_back(*O);
// Consider uses of this instruction (outputs).
for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
UI != E; ++UI)
@ -326,7 +326,7 @@ Function *CodeExtractor::constructFunction(const Values &inputs,
for (unsigned i = 0, e = inputs.size(); i != e; ++i, ++AI)
AI->setName(inputs[i]->getName());
for (unsigned i = 0, e = outputs.size(); i != e; ++i, ++AI)
AI->setName(outputs[i]->getName()+".out");
AI->setName(outputs[i]->getName()+".out");
}
// Rewrite branches to basic blocks outside of the loop to new dummy blocks
@ -383,8 +383,8 @@ emitCallAndSwitchStatement(Function *newFunction, BasicBlock *codeReplacer,
// Allocate a struct at the beginning of this function
Type *StructArgTy = StructType::get(ArgTypes);
Struct =
new AllocaInst(StructArgTy, 0, "structArg",
Struct =
new AllocaInst(StructArgTy, 0, "structArg",
codeReplacer->getParent()->begin()->begin());
params.push_back(Struct);
@ -399,7 +399,7 @@ emitCallAndSwitchStatement(Function *newFunction, BasicBlock *codeReplacer,
StoreInst *SI = new StoreInst(StructValues[i], GEP);
codeReplacer->getInstList().push_back(SI);
}
}
}
// Emit the call to the function
CallInst *call = new CallInst(newFunction, params,
@ -418,7 +418,7 @@ emitCallAndSwitchStatement(Function *newFunction, BasicBlock *codeReplacer,
std::vector<Value*> Indices;
Indices.push_back(Constant::getNullValue(Type::UIntTy));
Indices.push_back(ConstantUInt::get(Type::UIntTy, FirstOut + i));
GetElementPtrInst *GEP
GetElementPtrInst *GEP
= new GetElementPtrInst(Struct, Indices,
"gep_reload_" + outputs[i]->getName());
codeReplacer->getInstList().push_back(GEP);
@ -521,7 +521,7 @@ emitCallAndSwitchStatement(Function *newFunction, BasicBlock *codeReplacer,
Indices.push_back(ConstantUInt::get(Type::UIntTy,FirstOut+out));
GetElementPtrInst *GEP =
new GetElementPtrInst(OAI, Indices,
"gep_" + outputs[out]->getName(),
"gep_" + outputs[out]->getName(),
NTRet);
new StoreInst(outputs[out], GEP, NTRet);
} else {
@ -545,7 +545,7 @@ emitCallAndSwitchStatement(Function *newFunction, BasicBlock *codeReplacer,
// There are no successors (the block containing the switch itself), which
// means that previously this was the last part of the function, and hence
// this should be rewritten as a `ret'
// Check if the function should return a value
if (OldFnRetTy == Type::VoidTy) {
new ReturnInst(0, TheSwitch); // Return void
@ -603,13 +603,13 @@ void CodeExtractor::moveCodeToFunction(Function *newFunction) {
///
/// find inputs and outputs for the region
///
/// for inputs: add to function as args, map input instr* to arg#
/// for outputs: add allocas for scalars,
/// for inputs: add to function as args, map input instr* to arg#
/// for outputs: add allocas for scalars,
/// add to func as args, map output instr* to arg#
///
/// rewrite func to use argument #s instead of instr*
///
/// for each scalar output in the function: at every exit, store intermediate
/// for each scalar output in the function: at every exit, store intermediate
/// computed result back into memory.
///
Function *CodeExtractor::
@ -637,7 +637,7 @@ ExtractCodeRegion(const std::vector<BasicBlock*> &code) {
assert(BlocksToExtract.count(*PI) &&
"No blocks in this region may have entries from outside the region"
" except for the first block!");
// If we have to split PHI nodes or the entry block, do so now.
severSplitPHINodes(header);
@ -660,7 +660,7 @@ ExtractCodeRegion(const std::vector<BasicBlock*> &code) {
// Construct new function based on inputs/outputs & add allocas for all defs.
Function *newFunction = constructFunction(inputs, outputs, header,
newFuncRoot,
newFuncRoot,
codeReplacer, oldFunction,
oldFunction->getParent());
@ -676,7 +676,7 @@ ExtractCodeRegion(const std::vector<BasicBlock*> &code) {
if (!BlocksToExtract.count(PN->getIncomingBlock(i)))
PN->setIncomingBlock(i, newFuncRoot);
}
// Look at all successors of the codeReplacer block. If any of these blocks
// had PHI nodes in them, we need to update the "from" block to be the code
// replacer, not the original block in the extracted region.
@ -697,7 +697,7 @@ ExtractCodeRegion(const std::vector<BasicBlock*> &code) {
--i; --e;
}
}
//std::cerr << "NEW FUNCTION: " << *newFunction;
// verifyFunction(*newFunction);
@ -744,5 +744,5 @@ Function* llvm::ExtractLoop(DominatorSet &DS, Loop *L, bool AggregateArgs) {
Function* llvm::ExtractBasicBlock(BasicBlock *BB, bool AggregateArgs) {
std::vector<BasicBlock*> Blocks;
Blocks.push_back(BB);
return CodeExtractor(0, AggregateArgs).ExtractCodeRegion(Blocks);
return CodeExtractor(0, AggregateArgs).ExtractCodeRegion(Blocks);
}

View File

@ -1,12 +1,12 @@
//===- DemoteRegToStack.cpp - Move a virtual register to the stack --------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
//
// This file provide the function DemoteRegToStack(). This function takes a
// virtual register computed by an Instruction and replaces it with a slot in
// the stack frame, allocated via alloca. It returns the pointer to the

View File

@ -1,10 +1,10 @@
//===- InlineFunction.cpp - Code to perform function inlining -------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file implements inlining of a function into a call site, resolving
@ -31,8 +31,8 @@ bool llvm::InlineFunction(InvokeInst *II) {return InlineFunction(CallSite(II));}
// block of the caller. This returns false if it is not possible to inline this
// call. The program is still in a well defined state if this occurs though.
//
// Note that this only does one level of inlining. For example, if the
// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
// Note that this only does one level of inlining. For example, if the
// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
// exists in the instruction stream. Similiarly this will inline a recursive
// function by one level.
//
@ -60,18 +60,18 @@ bool llvm::InlineFunction(CallSite CS) {
{ // Scope to destroy ValueMap after cloning.
// Calculate the vector of arguments to pass into the function cloner...
std::map<const Value*, Value*> ValueMap;
assert(std::distance(CalledFunc->arg_begin(), CalledFunc->arg_end()) ==
assert(std::distance(CalledFunc->arg_begin(), CalledFunc->arg_end()) ==
std::distance(CS.arg_begin(), CS.arg_end()) &&
"No varargs calls can be inlined!");
CallSite::arg_iterator AI = CS.arg_begin();
for (Function::const_arg_iterator I = CalledFunc->arg_begin(),
E = CalledFunc->arg_end(); I != E; ++I, ++AI)
ValueMap[I] = *AI;
// Clone the entire body of the callee into the caller.
// Clone the entire body of the callee into the caller.
CloneFunctionInto(Caller, CalledFunc, ValueMap, Returns, ".i");
}
}
// Remember the first block that is newly cloned over.
Function::iterator FirstNewBlock = LastBlock; ++FirstNewBlock;
@ -131,21 +131,21 @@ bool llvm::InlineFunction(CallSite CS) {
} else {
// First, split the basic block...
BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc");
// Next, create the new invoke instruction, inserting it at the end
// of the old basic block.
InvokeInst *II =
new InvokeInst(CI->getCalledValue(), Split, InvokeDest,
new InvokeInst(CI->getCalledValue(), Split, InvokeDest,
std::vector<Value*>(CI->op_begin()+1, CI->op_end()),
CI->getName(), BB->getTerminator());
// Make sure that anything using the call now uses the invoke!
CI->replaceAllUsesWith(II);
// Delete the unconditional branch inserted by splitBasicBlock
BB->getInstList().pop_back();
Split->getInstList().pop_front(); // Delete the original call
// Update any PHI nodes in the exceptional block to indicate that
// there is now a new entry in them.
unsigned i = 0;
@ -154,7 +154,7 @@ bool llvm::InlineFunction(CallSite CS) {
PHINode *PN = cast<PHINode>(I);
PN->addIncoming(InvokeDestPHIValues[i], BB);
}
// This basic block is now complete, start scanning the next one.
break;
}
@ -200,7 +200,7 @@ bool llvm::InlineFunction(CallSite CS) {
FirstNewBlock->begin(), FirstNewBlock->end());
// Remove the cloned basic block.
Caller->getBasicBlockList().pop_back();
// If the call site was an invoke instruction, add a branch to the normal
// destination.
if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall))
@ -229,16 +229,16 @@ bool llvm::InlineFunction(CallSite CS) {
// this is an invoke instruction or a call instruction.
BasicBlock *AfterCallBB;
if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
// Add an unconditional branch to make this look like the CallInst case...
BranchInst *NewBr = new BranchInst(II->getNormalDest(), TheCall);
// Split the basic block. This guarantees that no PHI nodes will have to be
// updated due to new incoming edges, and make the invoke case more
// symmetric to the call case.
AfterCallBB = OrigBB->splitBasicBlock(NewBr,
CalledFunc->getName()+".exit");
} else { // It's a call
// If this is a call instruction, we need to split the basic block that
// the call lives in.
@ -251,7 +251,7 @@ bool llvm::InlineFunction(CallSite CS) {
// basic block of the inlined function.
//
TerminatorInst *Br = OrigBB->getTerminator();
assert(Br && Br->getOpcode() == Instruction::Br &&
assert(Br && Br->getOpcode() == Instruction::Br &&
"splitBasicBlock broken!");
Br->setOperand(0, FirstNewBlock);
@ -273,39 +273,39 @@ bool llvm::InlineFunction(CallSite CS) {
if (!TheCall->use_empty()) {
PHI = new PHINode(CalledFunc->getReturnType(),
TheCall->getName(), AfterCallBB->begin());
// Anything that used the result of the function call should now use the
// PHI node as their operand.
//
TheCall->replaceAllUsesWith(PHI);
}
// Loop over all of the return instructions, turning them into unconditional
// branches to the merge point now, and adding entries to the PHI node as
// appropriate.
for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
ReturnInst *RI = Returns[i];
if (PHI) {
assert(RI->getReturnValue() && "Ret should have value!");
assert(RI->getReturnValue()->getType() == PHI->getType() &&
assert(RI->getReturnValue()->getType() == PHI->getType() &&
"Ret value not consistent in function!");
PHI->addIncoming(RI->getReturnValue(), RI->getParent());
}
// Add a branch to the merge point where the PHI node lives if it exists.
new BranchInst(AfterCallBB, RI);
// Delete the return instruction now
RI->getParent()->getInstList().erase(RI);
}
} else if (!Returns.empty()) {
// Otherwise, if there is exactly one return value, just replace anything
// using the return value of the call with the computed value.
if (!TheCall->use_empty())
TheCall->replaceAllUsesWith(Returns[0]->getReturnValue());
// Splice the code from the return block into the block that it will return
// to, which contains the code that was after the call.
BasicBlock *ReturnBB = Returns[0]->getParent();
@ -314,7 +314,7 @@ bool llvm::InlineFunction(CallSite CS) {
// Update PHI nodes that use the ReturnBB to use the AfterCallBB.
ReturnBB->replaceAllUsesWith(AfterCallBB);
// Delete the return instruction now and empty ReturnBB now.
Returns[0]->eraseFromParent();
ReturnBB->eraseFromParent();
@ -323,7 +323,7 @@ bool llvm::InlineFunction(CallSite CS) {
// nuke the result.
TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
}
// Since we are now done with the Call/Invoke, we can delete it.
TheCall->eraseFromParent();

View File

@ -1,10 +1,10 @@
//===-- Local.cpp - Functions to perform local transformations ------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This family of functions perform various local transformations to the
@ -32,7 +32,7 @@ bool llvm::doConstantPropagation(BasicBlock::iterator &II) {
if (Constant *C = ConstantFoldInstruction(II)) {
// Replaces all of the uses of a variable with uses of the constant.
II->replaceAllUsesWith(C);
// Remove the instruction from the basic block...
II = II->getParent()->getInstList().erase(II);
return true;
@ -50,7 +50,7 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I) {
if (PHINode *PN = dyn_cast<PHINode>(I)) {
if (PN->getNumIncomingValues() == 0)
return Constant::getNullValue(PN->getType());
Constant *Result = dyn_cast<Constant>(PN->getIncomingValue(0));
if (Result == 0) return 0;
@ -58,7 +58,7 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I) {
for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i)
if (PN->getIncomingValue(i) != Result && PN->getIncomingValue(i) != PN)
return 0; // Not all the same incoming constants...
// If we reach here, all incoming values are the same constant.
return Result;
} else if (CallInst *CI = dyn_cast<CallInst>(I)) {
@ -89,7 +89,7 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I) {
}
if (isa<BinaryOperator>(I) || isa<ShiftInst>(I))
return ConstantExpr::get(I->getOpcode(), Op0, Op1);
return ConstantExpr::get(I->getOpcode(), Op0, Op1);
switch (I->getOpcode()) {
default: return 0;
@ -118,7 +118,7 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I) {
//
bool llvm::ConstantFoldTerminator(BasicBlock *BB) {
TerminatorInst *T = BB->getTerminator();
// Branch - See if we are conditional jumping on constant
if (BranchInst *BI = dyn_cast<BranchInst>(T)) {
if (BI->isUnconditional()) return false; // Can't optimize uncond branch
@ -131,8 +131,8 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB) {
BasicBlock *Destination = Cond->getValue() ? Dest1 : Dest2;
BasicBlock *OldDest = Cond->getValue() ? Dest2 : Dest1;
//cerr << "Function: " << T->getParent()->getParent()
// << "\nRemoving branch from " << T->getParent()
//cerr << "Function: " << T->getParent()->getParent()
// << "\nRemoving branch from " << T->getParent()
// << "\n\nTo: " << OldDest << endl;
// Let the basic block know that we are letting go of it. Based on this,
@ -145,7 +145,7 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB) {
BI->setUnconditionalDest(Destination);
return true;
} else if (Dest2 == Dest1) { // Conditional branch to same location?
// This branch matches something like this:
// This branch matches something like this:
// br bool %cond, label %Dest, label %Dest
// and changes it into: br label %Dest
@ -294,7 +294,7 @@ Constant *llvm::ConstantFoldCall(Function *F,
if (Name == "llvm.isunordered")
return ConstantBool::get(IsNAN(Op1V) || IsNAN(Op2V));
else
else
if (Name == "pow") {
errno = 0;
double V = pow(Op1V, Op2V);

View File

@ -1,10 +1,10 @@
//===- PromoteMemoryToRegister.cpp - Convert allocas to registers ---------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file promote memory references to be register references. It promotes
@ -48,7 +48,7 @@ bool llvm::isAllocaPromotable(const AllocaInst *AI, const TargetData &TD) {
} else {
return false; // Not a load or store.
}
return true;
}
@ -107,7 +107,7 @@ namespace {
void MarkDominatingPHILive(BasicBlock *BB, unsigned AllocaNum,
std::set<PHINode*> &DeadPHINodes);
void PromoteLocallyUsedAlloca(BasicBlock *BB, AllocaInst *AI);
void PromoteLocallyUsedAllocas(BasicBlock *BB,
void PromoteLocallyUsedAllocas(BasicBlock *BB,
const std::vector<AllocaInst*> &AIs);
void RenamePass(BasicBlock *BB, BasicBlock *Pred,
@ -267,13 +267,13 @@ void PromoteMem2Reg::run() {
if (AST && isa<PointerType>(PN->getType()))
AST->deleteValue(PN);
PN->getParent()->getInstList().erase(PN);
PN->getParent()->getInstList().erase(PN);
}
// Keep the reverse mapping of the 'Allocas' array.
// Keep the reverse mapping of the 'Allocas' array.
AllocaLookup[Allocas[AllocaNum]] = AllocaNum;
}
// Process all allocas which are only used in a single basic block.
for (std::map<BasicBlock*, std::vector<AllocaInst*> >::iterator I =
LocallyUsedAllocas.begin(), E = LocallyUsedAllocas.end(); I != E; ++I){
@ -327,7 +327,7 @@ void PromoteMem2Reg::run() {
// have incoming values for all predecessors. Loop over all PHI nodes we have
// created, inserting undef values if they are missing any incoming values.
//
for (std::map<BasicBlock*, std::vector<PHINode *> >::iterator I =
for (std::map<BasicBlock*, std::vector<PHINode *> >::iterator I =
NewPhiNodes.begin(), E = NewPhiNodes.end(); I != E; ++I) {
std::vector<BasicBlock*> Preds(pred_begin(I->first), pred_end(I->first));
@ -449,7 +449,7 @@ void PromoteMem2Reg::PromoteLocallyUsedAlloca(BasicBlock *BB, AllocaInst *AI) {
} else {
// Uses of the uninitialized memory location shall get undef.
Value *CurVal = UndefValue::get(AI->getAllocatedType());
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ) {
Instruction *Inst = I++;
if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
@ -572,7 +572,7 @@ void PromoteMem2Reg::RenamePass(BasicBlock *BB, BasicBlock *Pred,
// don't revisit nodes
if (Visited.count(BB)) return;
// mark as visited
Visited.insert(BB);

View File

@ -1,10 +1,10 @@
//===- SimplifyCFG.cpp - Code to perform CFG simplification ---------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// Peephole optimize the CFG.
@ -81,7 +81,7 @@ static bool PropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) {
PN->addIncoming(OldValPN->getIncomingValue(i),
OldValPN->getIncomingBlock(i));
} else {
for (std::vector<BasicBlock*>::const_iterator PredI = BBPreds.begin(),
for (std::vector<BasicBlock*>::const_iterator PredI = BBPreds.begin(),
End = BBPreds.end(); PredI != End; ++PredI) {
// Add an incoming value for each of the new incoming values...
PN->addIncoming(OldVal, *PredI);
@ -97,7 +97,7 @@ static bool PropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) {
/// which entry into BB will be taken. Also, return by references the block
/// that will be entered from if the condition is true, and the block that will
/// be entered if the condition is false.
///
///
///
static Value *GetIfCondition(BasicBlock *BB,
BasicBlock *&IfTrue, BasicBlock *&IfFalse) {
@ -240,7 +240,7 @@ static bool DominatesMergePoint(Value *V, BasicBlock *BB,
case Instruction::SetGE:
break; // These are all cheap and non-trapping instructions.
}
// Okay, we can only really hoist these out if their operands are not
// defined in the conditional region.
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i)
@ -317,7 +317,7 @@ static bool GatherValueComparisons(Instruction *Cond, Value *&CompVal,
return true;
} else if (Cond->getOpcode() == Instruction::And) {
CompVal = GatherConstantSetNEs(Cond, Values);
// Return false to indicate that the condition is false if the CompVal is
// equal to one of the constants.
return false;
@ -360,7 +360,7 @@ static bool SafeToMergeTerminators(TerminatorInst *SI1, TerminatorInst *SI2) {
PN->getIncomingValueForBlock(SI2BB))
return false;
}
return true;
}
@ -397,7 +397,7 @@ static Value *isValueEqualityComparison(TerminatorInst *TI) {
if (BI->isConditional() && BI->getCondition()->hasOneUse())
if (SetCondInst *SCI = dyn_cast<SetCondInst>(BI->getCondition()))
if ((SCI->getOpcode() == Instruction::SetEQ ||
SCI->getOpcode() == Instruction::SetNE) &&
SCI->getOpcode() == Instruction::SetNE) &&
isa<ConstantInt>(SCI->getOperand(1)))
return SCI->getOperand(0);
return 0;
@ -406,7 +406,7 @@ static Value *isValueEqualityComparison(TerminatorInst *TI) {
// Given a value comparison instruction, decode all of the 'cases' that it
// represents and return the 'default' block.
static BasicBlock *
GetValueEqualityComparisonCases(TerminatorInst *TI,
GetValueEqualityComparisonCases(TerminatorInst *TI,
std::vector<std::pair<ConstantInt*,
BasicBlock*> > &Cases) {
if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
@ -427,7 +427,7 @@ GetValueEqualityComparisonCases(TerminatorInst *TI,
// EliminateBlockCases - Given an vector of bb/value pairs, remove any entries
// in the list that match the specified block.
static void EliminateBlockCases(BasicBlock *BB,
static void EliminateBlockCases(BasicBlock *BB,
std::vector<std::pair<ConstantInt*, BasicBlock*> > &Cases) {
for (unsigned i = 0, e = Cases.size(); i != e; ++i)
if (Cases[i].second == BB) {
@ -491,7 +491,7 @@ static bool SimplifyEqualityComparisonWithOnlyPredecessor(TerminatorInst *TI,
BasicBlock *PredDef = GetValueEqualityComparisonCases(Pred->getTerminator(),
PredCases);
EliminateBlockCases(PredDef, PredCases); // Remove default from cases.
// Find information about how control leaves this block.
std::vector<std::pair<ConstantInt*, BasicBlock*> > ThisCases;
BasicBlock *ThisDef = GetValueEqualityComparisonCases(TI, ThisCases);
@ -608,7 +608,7 @@ static bool FoldValueComparisonIntoPredecessors(TerminatorInst *TI) {
while (!Preds.empty()) {
BasicBlock *Pred = Preds.back();
Preds.pop_back();
// See if the predecessor is a comparison with the same value.
TerminatorInst *PTI = Pred->getTerminator();
Value *PCV = isValueEqualityComparison(PTI); // PredCondVal
@ -719,7 +719,7 @@ static bool FoldValueComparisonIntoPredecessors(TerminatorInst *TI) {
}
NewSI->setSuccessor(i, InfLoopBlock);
}
Changed = true;
}
}
@ -750,7 +750,7 @@ static bool HoistThenElseCodeToIf(BranchInst *BI) {
// broken BB), instead clone it, and remove BI.
if (isa<TerminatorInst>(I1))
goto HoistTerminator;
// For a normal instruction, we just move one to right before the branch,
// then replace all uses of the other with the first. Finally, we remove
// the now redundant second instruction.
@ -758,7 +758,7 @@ static bool HoistThenElseCodeToIf(BranchInst *BI) {
if (!I2->use_empty())
I2->replaceAllUsesWith(I1);
BB2->getInstList().erase(I2);
I1 = BB1->begin();
I2 = BB2->begin();
} while (I1->getOpcode() == I2->getOpcode() && I1->isIdenticalTo(I2));
@ -804,7 +804,7 @@ HoistTerminator:
// Update any PHI nodes in our new successors.
for (succ_iterator SI = succ_begin(BB1), E = succ_end(BB1); SI != E; ++SI)
AddPredecessorToBlock(*SI, BIParent, BB1);
BI->eraseFromParent();
return true;
}
@ -850,13 +850,13 @@ bool llvm::SimplifyCFG(BasicBlock *BB) {
Instruction &I = BB->back();
// If this instruction is used, replace uses with an arbitrary
// constant value. Because control flow can't get here, we don't care
// what we replace the value with. Note that since this block is
// what we replace the value with. Note that since this block is
// unreachable, and all values contained within it must dominate their
// uses, that all uses will eventually be removed.
if (!I.use_empty())
if (!I.use_empty())
// Make all users of this instruction reference the constant instead
I.replaceAllUsesWith(Constant::getNullValue(I.getType()));
// Remove the instruction from the basic block
BB->getInstList().pop_back();
}
@ -886,11 +886,11 @@ bool llvm::SimplifyCFG(BasicBlock *BB) {
//
if (!PropagatePredecessorsForPHIs(BB, Succ)) {
DEBUG(std::cerr << "Killing Trivial BB: \n" << *BB);
if (isa<PHINode>(&BB->front())) {
std::vector<BasicBlock*>
OldSuccPreds(pred_begin(Succ), pred_end(Succ));
// Move all PHI nodes in BB to Succ if they are alive, otherwise
// delete them.
while (PHINode *PN = dyn_cast<PHINode>(&BB->front()))
@ -903,7 +903,7 @@ bool llvm::SimplifyCFG(BasicBlock *BB) {
// strictly dominated Succ.
BB->getInstList().remove(BB->begin());
Succ->getInstList().push_front(PN);
// We need to add new entries for the PHI node to account for
// predecessors of Succ that the PHI node does not take into
// account. At this point, since we know that BB dominated succ,
@ -915,7 +915,7 @@ bool llvm::SimplifyCFG(BasicBlock *BB) {
PN->addIncoming(PN, OldSuccPreds[i]);
}
}
// Everything that jumped to BB now goes to Succ.
std::string OldName = BB->getName();
BB->replaceAllUsesWith(Succ);
@ -948,7 +948,7 @@ bool llvm::SimplifyCFG(BasicBlock *BB) {
else
CondBranchPreds.push_back(BI);
}
// If we found some, do the transformation!
if (!UncondBranchPreds.empty()) {
while (!UncondBranchPreds.empty()) {
@ -1061,7 +1061,7 @@ bool llvm::SimplifyCFG(BasicBlock *BB) {
// is now a fall through...
BranchInst *BI = new BranchInst(II->getNormalDest(), II);
Pred->getInstList().remove(II); // Take out of symbol table
// Insert the call now...
std::vector<Value*> Args(II->op_begin()+3, II->op_end());
CallInst *CI = new CallInst(II->getCalledValue(), Args,
@ -1071,7 +1071,7 @@ bool llvm::SimplifyCFG(BasicBlock *BB) {
delete II;
Changed = true;
}
Preds.pop_back();
}
@ -1153,7 +1153,7 @@ bool llvm::SimplifyCFG(BasicBlock *BB) {
Instruction::BinaryOps Opcode =
PBI->getSuccessor(0) == TrueDest ?
Instruction::Or : Instruction::And;
Value *NewCond =
Value *NewCond =
BinaryOperator::create(Opcode, PBI->getCondition(),
New, "bothcond", PBI);
PBI->setCondition(NewCond);
@ -1179,7 +1179,7 @@ bool llvm::SimplifyCFG(BasicBlock *BB) {
OnlyPred = 0; // There are multiple different predecessors...
break;
}
if (OnlyPred)
if (BranchInst *PBI = dyn_cast<BranchInst>(OnlyPred->getTerminator()))
if (PBI->isConditional() &&
@ -1275,7 +1275,7 @@ bool llvm::SimplifyCFG(BasicBlock *BB) {
// place to note that the call does not throw though.
BranchInst *BI = new BranchInst(II->getNormalDest(), II);
II->removeFromParent(); // Take out of symbol table
// Insert the call now...
std::vector<Value*> Args(II->op_begin()+3, II->op_end());
CallInst *CI = new CallInst(II->getCalledValue(), Args,
@ -1339,23 +1339,23 @@ bool llvm::SimplifyCFG(BasicBlock *BB) {
// Delete the unconditional branch from the predecessor...
OnlyPred->getInstList().pop_back();
// Move all definitions in the successor to the predecessor...
OnlyPred->getInstList().splice(OnlyPred->end(), BB->getInstList());
// Make all PHI nodes that referred to BB now refer to Pred as their
// source...
BB->replaceAllUsesWith(OnlyPred);
std::string OldName = BB->getName();
// Erase basic block from the function...
// Erase basic block from the function...
M->getBasicBlockList().erase(BB);
// Inherit predecessors name if it exists...
if (!OldName.empty() && !OnlyPred->hasName())
OnlyPred->setName(OldName);
return true;
}
@ -1393,19 +1393,19 @@ bool llvm::SimplifyCFG(BasicBlock *BB) {
// instruction can't handle, remove them now.
std::sort(Values.begin(), Values.end(), ConstantIntOrdering());
Values.erase(std::unique(Values.begin(), Values.end()), Values.end());
// Figure out which block is which destination.
BasicBlock *DefaultBB = BI->getSuccessor(1);
BasicBlock *EdgeBB = BI->getSuccessor(0);
if (!TrueWhenEqual) std::swap(DefaultBB, EdgeBB);
// Create the new switch instruction now.
SwitchInst *New = new SwitchInst(CompVal, DefaultBB,Values.size(),BI);
// Add all of the 'cases' to the switch instruction.
for (unsigned i = 0, e = Values.size(); i != e; ++i)
New->addCase(Values[i], EdgeBB);
// We added edges from PI to the EdgeBB. As such, if there were any
// PHI nodes in EdgeBB, they need entries to be added corresponding to
// the number of edges added.
@ -1489,7 +1489,7 @@ bool llvm::SimplifyCFG(BasicBlock *BB) {
}
Pred = PN->getIncomingBlock(1);
if (CanPromote &&
if (CanPromote &&
cast<BranchInst>(Pred->getTerminator())->isUnconditional()) {
IfBlock2 = Pred;
DomBlock = *pred_begin(Pred);
@ -1539,6 +1539,6 @@ bool llvm::SimplifyCFG(BasicBlock *BB) {
}
}
}
return Changed;
}

View File

@ -1,10 +1,10 @@
//===- UnifyFunctionExitNodes.cpp - Make all functions have a single exit -===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This pass is used to ensure that functions have at most one return
@ -64,7 +64,7 @@ bool UnifyFunctionExitNodes::runOnFunction(Function &F) {
UnwindBlock = new BasicBlock("UnifiedUnwindBlock", &F);
new UnwindInst(UnwindBlock);
for (std::vector<BasicBlock*>::iterator I = UnwindingBlocks.begin(),
for (std::vector<BasicBlock*>::iterator I = UnwindingBlocks.begin(),
E = UnwindingBlocks.end(); I != E; ++I) {
BasicBlock *BB = *I;
BB->getInstList().pop_back(); // Remove the unwind insn
@ -81,7 +81,7 @@ bool UnifyFunctionExitNodes::runOnFunction(Function &F) {
UnreachableBlock = new BasicBlock("UnifiedUnreachableBlock", &F);
new UnreachableInst(UnreachableBlock);
for (std::vector<BasicBlock*>::iterator I = UnreachableBlocks.begin(),
for (std::vector<BasicBlock*>::iterator I = UnreachableBlocks.begin(),
E = UnreachableBlocks.end(); I != E; ++I) {
BasicBlock *BB = *I;
BB->getInstList().pop_back(); // Remove the unreachable inst.
@ -99,7 +99,7 @@ bool UnifyFunctionExitNodes::runOnFunction(Function &F) {
}
// Otherwise, we need to insert a new basic block into the function, add a PHI
// node (if the function returns a value), and convert all of the return
// node (if the function returns a value), and convert all of the return
// instructions into unconditional branches.
//
BasicBlock *NewRetBlock = new BasicBlock("UnifiedReturnBlock", &F);
@ -115,7 +115,7 @@ bool UnifyFunctionExitNodes::runOnFunction(Function &F) {
// Loop over all of the blocks, replacing the return instruction with an
// unconditional branch.
//
for (std::vector<BasicBlock*>::iterator I = ReturningBlocks.begin(),
for (std::vector<BasicBlock*>::iterator I = ReturningBlocks.begin(),
E = ReturningBlocks.end(); I != E; ++I) {
BasicBlock *BB = *I;

View File

@ -1,10 +1,10 @@
//===- ValueMapper.cpp - Interface shared by lib/Transforms/Utils ---------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file defines the MapValue function, which is shared by various parts of
@ -23,7 +23,7 @@ using namespace llvm;
Value *llvm::MapValue(const Value *V, std::map<const Value*, Value*> &VM) {
Value *&VMSlot = VM[V];
if (VMSlot) return VMSlot; // Does it exist in the map yet?
// Global values do not need to be seeded into the ValueMap if they are using
// the identity mapping.
if (isa<GlobalValue>(V))

View File

@ -1,10 +1,10 @@
//===- ValueMapper.h - Interface shared by lib/Transforms/Utils -*- C++ -*-===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file defines the MapValue interface which is used by various parts of

View File

@ -1,10 +1,10 @@
//===-- AsmWriter.cpp - Printing LLVM as an assembly file -----------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This library implements the functionality defined in llvm/Assembly/Writer.h
@ -46,7 +46,7 @@ public:
typedef std::map<const Type*, unsigned> TypeMap;
/// @brief A plane with next slot number and ValueMap
struct ValuePlane {
struct ValuePlane {
unsigned next_slot; ///< The next slot number to use
ValueMap map; ///< The map of Value* -> unsigned
ValuePlane() { next_slot = 0; } ///< Make sure we start at 0
@ -90,15 +90,15 @@ public:
/// @name Mutators
/// @{
public:
/// If you'd like to deal with a function instead of just a module, use
/// If you'd like to deal with a function instead of just a module, use
/// this method to get its data into the SlotMachine.
void incorporateFunction(const Function *F) {
TheFunction = F;
void incorporateFunction(const Function *F) {
TheFunction = F;
FunctionProcessed = false;
}
/// After calling incorporateFunction, use this method to remove the
/// most recently incorporated function from the SlotMachine. This
/// After calling incorporateFunction, use this method to remove the
/// most recently incorporated function from the SlotMachine. This
/// will reset the state of the machine back to just the module contents.
void purgeFunction();
@ -109,7 +109,7 @@ private:
/// This function does the actual initialization.
inline void initialize();
/// Values can be crammed into here at will. If they haven't
/// Values can be crammed into here at will. If they haven't
/// been inserted already, they get inserted, otherwise they are ignored.
/// Either way, the slot number for the Value* is returned.
unsigned createSlot(const Value *V);
@ -117,7 +117,7 @@ private:
/// Insert a value into the value table. Return the slot number
/// that it now occupies. BadThings(TM) will happen if you insert a
/// Value that's already been inserted.
/// Value that's already been inserted.
unsigned insertValue( const Value *V );
unsigned insertValue( const Type* Ty);
@ -162,12 +162,12 @@ X("printm", "Print module to stderr",PassInfo::Analysis|PassInfo::Optimization);
static RegisterPass<PrintFunctionPass>
Y("print","Print function to stderr",PassInfo::Analysis|PassInfo::Optimization);
static void WriteAsOperandInternal(std::ostream &Out, const Value *V,
static void WriteAsOperandInternal(std::ostream &Out, const Value *V,
bool PrintName,
std::map<const Type *, std::string> &TypeTable,
SlotMachine *Machine);
static void WriteAsOperandInternal(std::ostream &Out, const Type *T,
static void WriteAsOperandInternal(std::ostream &Out, const Type *T,
bool PrintName,
std::map<const Type *, std::string> &TypeTable,
SlotMachine *Machine);
@ -219,7 +219,7 @@ static std::string getLLVMName(const std::string &Name,
C != '-' && C != '.' && C != '_')
return "\"" + Name + "\"";
}
// If we get here, then the identifier is legal to use as a "VarID".
if (prefixName)
return "%"+Name;
@ -250,7 +250,7 @@ static void fillTypeNameTable(const Module *M,
static void calcTypeName(const Type *Ty,
static void calcTypeName(const Type *Ty,
std::vector<const Type *> &TypeStack,
std::map<const Type *, std::string> &TypeNames,
std::string & Result){
@ -275,7 +275,7 @@ static void calcTypeName(const Type *Ty,
unsigned Slot = 0, CurSize = TypeStack.size();
while (Slot < CurSize && TypeStack[Slot] != Ty) ++Slot; // Scan for type
// This is another base case for the recursion. In this case, we know
// This is another base case for the recursion. In this case, we know
// that we have looped back to a type that we have previously visited.
// Generate the appropriate upreference to handle this.
if (Slot < CurSize) {
@ -284,7 +284,7 @@ static void calcTypeName(const Type *Ty,
}
TypeStack.push_back(Ty); // Recursive case: Add us to the stack..
switch (Ty->getTypeID()) {
case Type::FunctionTyID: {
const FunctionType *FTy = cast<FunctionType>(Ty);
@ -316,7 +316,7 @@ static void calcTypeName(const Type *Ty,
break;
}
case Type::PointerTyID:
calcTypeName(cast<PointerType>(Ty)->getElementType(),
calcTypeName(cast<PointerType>(Ty)->getElementType(),
TypeStack, TypeNames, Result);
Result += "*";
break;
@ -379,22 +379,22 @@ static std::ostream &printTypeInt(std::ostream &Out, const Type *Ty,
///
std::ostream &llvm::WriteTypeSymbolic(std::ostream &Out, const Type *Ty,
const Module *M) {
Out << ' ';
Out << ' ';
// If they want us to print out a type, attempt to make it symbolic if there
// is a symbol table in the module...
if (M) {
std::map<const Type *, std::string> TypeNames;
fillTypeNameTable(M, TypeNames);
return printTypeInt(Out, Ty, TypeNames);
} else {
return Out << Ty->getDescription();
}
}
/// @brief Internal constant writer.
static void WriteConstantInt(std::ostream &Out, const Constant *CV,
/// @brief Internal constant writer.
static void WriteConstantInt(std::ostream &Out, const Constant *CV,
bool PrintName,
std::map<const Type *, std::string> &TypeTable,
SlotMachine *Machine) {
@ -424,7 +424,7 @@ static void WriteConstantInt(std::ostream &Out, const Constant *CV,
Out << StrVal;
return;
}
// Otherwise we could not reparse it to exactly the same value, so we must
// output the string in hexadecimal format!
//
@ -445,7 +445,7 @@ static void WriteConstantInt(std::ostream &Out, const Constant *CV,
} else if (const ConstantArray *CA = dyn_cast<ConstantArray>(CV)) {
// As a special case, print the array as a string if it is an array of
// ubytes or an array of sbytes with positive values.
//
//
const Type *ETy = CA->getType()->getElementType();
bool isString = (ETy == Type::SByteTy || ETy == Type::UByteTy);
@ -459,9 +459,9 @@ static void WriteConstantInt(std::ostream &Out, const Constant *CV,
if (isString) {
Out << "c\"";
for (unsigned i = 0; i < CA->getNumOperands(); ++i) {
unsigned char C =
unsigned char C =
(unsigned char)cast<ConstantInt>(CA->getOperand(i))->getRawValue();
if (isprint(C) && C != '"' && C != '\\') {
Out << C;
} else {
@ -509,7 +509,7 @@ static void WriteConstantInt(std::ostream &Out, const Constant *CV,
Out << " }";
} else if (const ConstantPacked *CP = dyn_cast<ConstantPacked>(CV)) {
const Type *ETy = CP->getType()->getElementType();
assert(CP->getNumOperands() > 0 &&
assert(CP->getNumOperands() > 0 &&
"Number of operands for a PackedConst must be > 0");
Out << '<';
Out << ' ';
@ -531,14 +531,14 @@ static void WriteConstantInt(std::ostream &Out, const Constant *CV,
} else if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(CV)) {
Out << CE->getOpcodeName() << " (";
for (User::const_op_iterator OI=CE->op_begin(); OI != CE->op_end(); ++OI) {
printTypeInt(Out, (*OI)->getType(), TypeTable);
WriteAsOperandInternal(Out, *OI, PrintName, TypeTable, Machine);
if (OI+1 != CE->op_end())
Out << ", ";
}
if (CE->getOpcode() == Instruction::Cast) {
Out << " to ";
printTypeInt(Out, CE->getType(), TypeTable);
@ -555,7 +555,7 @@ static void WriteConstantInt(std::ostream &Out, const Constant *CV,
/// ostream. This can be useful when you just want to print int %reg126, not
/// the whole instruction that generated it.
///
static void WriteAsOperandInternal(std::ostream &Out, const Value *V,
static void WriteAsOperandInternal(std::ostream &Out, const Value *V,
bool PrintName,
std::map<const Type*, std::string> &TypeTable,
SlotMachine *Machine) {
@ -572,7 +572,7 @@ static void WriteAsOperandInternal(std::ostream &Out, const Value *V,
Slot = Machine->getSlot(V);
} else {
Machine = createSlotMachine(V);
if (Machine == 0)
if (Machine == 0)
Slot = Machine->getSlot(V);
else
Slot = -1;
@ -591,7 +591,7 @@ static void WriteAsOperandInternal(std::ostream &Out, const Value *V,
/// the whole instruction that generated it.
///
std::ostream &llvm::WriteAsOperand(std::ostream &Out, const Value *V,
bool PrintType, bool PrintName,
bool PrintType, bool PrintName,
const Module *Context) {
std::map<const Type *, std::string> TypeNames;
if (Context == 0) Context = getModuleFromVal(V);
@ -601,16 +601,16 @@ std::ostream &llvm::WriteAsOperand(std::ostream &Out, const Value *V,
if (PrintType)
printTypeInt(Out, V->getType(), TypeNames);
WriteAsOperandInternal(Out, V, PrintName, TypeNames, 0);
return Out;
}
/// WriteAsOperandInternal - Write the name of the specified value out to
/// the specified ostream. This can be useful when you just want to print
/// WriteAsOperandInternal - Write the name of the specified value out to
/// the specified ostream. This can be useful when you just want to print
/// int %reg126, not the whole instruction that generated it.
///
static void WriteAsOperandInternal(std::ostream &Out, const Type *T,
static void WriteAsOperandInternal(std::ostream &Out, const Type *T,
bool PrintName,
std::map<const Type*, std::string> &TypeTable,
SlotMachine *Machine) {
@ -632,7 +632,7 @@ static void WriteAsOperandInternal(std::ostream &Out, const Type *T,
/// the whole instruction that generated it.
///
std::ostream &llvm::WriteAsOperand(std::ostream &Out, const Type *Ty,
bool PrintType, bool PrintName,
bool PrintType, bool PrintName,
const Module *Context) {
std::map<const Type *, std::string> TypeNames;
assert(Context != 0 && "Can't write types as operand without module context");
@ -641,7 +641,7 @@ std::ostream &llvm::WriteAsOperand(std::ostream &Out, const Type *Ty,
// if (PrintType)
// printTypeInt(Out, V->getType(), TypeNames);
printTypeInt(Out, Ty, TypeNames);
WriteAsOperandInternal(Out, Ty, PrintName, TypeNames, 0);
@ -753,7 +753,7 @@ std::ostream &AssemblyWriter::printTypeAtLeastOneLevel(const Type *Ty) {
}
void AssemblyWriter::writeOperand(const Value *Operand, bool PrintType,
void AssemblyWriter::writeOperand(const Value *Operand, bool PrintType,
bool PrintName) {
if (Operand != 0) {
if (PrintType) { Out << ' '; printType(Operand->getType()); }
@ -766,7 +766,7 @@ void AssemblyWriter::writeOperand(const Value *Operand, bool PrintType,
void AssemblyWriter::printModule(const Module *M) {
if (!M->getModuleIdentifier().empty() &&
// Don't print the ID if it will start a new line (which would
// Don't print the ID if it will start a new line (which would
// require a comment char before it).
M->getModuleIdentifier().find('\n') == std::string::npos)
Out << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
@ -783,7 +783,7 @@ void AssemblyWriter::printModule(const Module *M) {
}
if (!M->getTargetTriple().empty())
Out << "target triple = \"" << M->getTargetTriple() << "\"\n";
// Loop over the dependent libraries and emit them.
Module::lib_iterator LI = M->lib_begin();
Module::lib_iterator LE = M->lib_end();
@ -800,12 +800,12 @@ void AssemblyWriter::printModule(const Module *M) {
// Loop over the symbol table, emitting all named constants.
printSymbolTable(M->getSymbolTable());
for (Module::const_global_iterator I = M->global_begin(), E = M->global_end(); I != E; ++I)
printGlobal(I);
Out << "\nimplementation ; Functions:\n";
// Output all of the functions.
for (Module::const_iterator I = M->begin(), E = M->end(); I != E; ++I)
printFunction(I);
@ -814,7 +814,7 @@ void AssemblyWriter::printModule(const Module *M) {
void AssemblyWriter::printGlobal(const GlobalVariable *GV) {
if (GV->hasName()) Out << getLLVMName(GV->getName()) << " = ";
if (!GV->hasInitializer())
if (!GV->hasInitializer())
Out << "external ";
else
switch (GV->getLinkage()) {
@ -856,7 +856,7 @@ void AssemblyWriter::printSymbolTable(const SymbolTable &ST) {
//
printTypeAtLeastOneLevel(TI->second) << "\n";
}
// Print the constants, in type plane order.
for (SymbolTable::plane_const_iterator PI = ST.plane_begin();
PI != ST.plane_end(); ++PI ) {
@ -940,7 +940,7 @@ void AssemblyWriter::printFunction(const Function *F) {
Out << "\n";
} else {
Out << " {";
// Output all of its basic blocks... for the function
for (Function::const_iterator I = F->begin(), E = F->end(); I != E; ++I)
printBasicBlock(I);
@ -960,7 +960,7 @@ void AssemblyWriter::printArgument(const Argument *Arg) {
// Output type...
printType(Arg->getType());
// Output name, if available...
if (Arg->hasName())
Out << ' ' << getLLVMName(Arg->getName());
@ -987,7 +987,7 @@ void AssemblyWriter::printBasicBlock(const BasicBlock *BB) {
// Output predecessors for the block...
Out << "\t\t;";
pred_const_iterator PI = pred_begin(BB), PE = pred_end(BB);
if (PI == PE) {
Out << " No predecessors!";
} else {
@ -1000,7 +1000,7 @@ void AssemblyWriter::printBasicBlock(const BasicBlock *BB) {
}
}
}
Out << "\n";
if (AnnotationWriter) AnnotationWriter->emitBasicBlockStartAnnot(BB, Out);
@ -1080,7 +1080,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
for (unsigned op = 0, Eop = I.getNumOperands(); op < Eop; op += 2) {
if (op) Out << ", ";
Out << '[';
Out << '[';
writeOperand(I.getOperand(op ), false); Out << ',';
writeOperand(I.getOperand(op+1), false); Out << " ]";
}
@ -1096,7 +1096,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
// and if the return type is not a pointer to a function.
//
if (!FTy->isVarArg() &&
(!isa<PointerType>(RetTy) ||
(!isa<PointerType>(RetTy) ||
!isa<FunctionType>(cast<PointerType>(RetTy)->getElementType()))) {
Out << ' '; printType(RetTy);
writeOperand(Operand, false);
@ -1121,7 +1121,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
// and if the return type is not a pointer to a function.
//
if (!FTy->isVarArg() &&
(!isa<PointerType>(RetTy) ||
(!isa<PointerType>(RetTy) ||
!isa<FunctionType>(cast<PointerType>(RetTy)->getElementType()))) {
Out << ' '; printType(RetTy);
writeOperand(Operand, false);
@ -1162,7 +1162,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
printType(VAN->getArgType());
} else if (Operand) { // Print the normal way...
// PrintAllTypes - Instructions who have operands of all the same type
// PrintAllTypes - Instructions who have operands of all the same type
// omit the type from all but the first operand. If the instruction has
// different type operands (for example br), then they are all printed.
bool PrintAllTypes = false;
@ -1181,7 +1181,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
}
}
}
if (!PrintAllTypes) {
Out << ' ';
printType(TheType);
@ -1223,7 +1223,7 @@ void Function::print(std::ostream &o, AssemblyAnnotationWriter *AAW) const {
void BasicBlock::print(std::ostream &o, AssemblyAnnotationWriter *AAW) const {
SlotMachine SlotTable(getParent());
AssemblyWriter W(o, SlotTable,
AssemblyWriter W(o, SlotTable,
getParent() ? getParent()->getParent() : 0, AAW);
W.write(this);
}
@ -1245,7 +1245,7 @@ void Constant::print(std::ostream &o) const {
WriteConstantInt(o, this, false, TypeTable, 0);
}
void Type::print(std::ostream &o) const {
void Type::print(std::ostream &o) const {
if (this == 0)
o << "<null Type>";
else
@ -1294,7 +1294,7 @@ CachedWriter &CachedWriter::operator<<(const Value &V) {
AW->write(F);
else if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(&V))
AW->write(GV);
else
else
AW->writeOperand(&V, true, true);
return *this;
}
@ -1321,7 +1321,7 @@ CachedWriter& CachedWriter::operator<<(const Type &Ty) {
// Module level constructor. Causes the contents of the Module (sans functions)
// to be added to the slot table.
SlotMachine::SlotMachine(const Module *M)
SlotMachine::SlotMachine(const Module *M)
: TheModule(M) ///< Saved for lazy initialization.
, TheFunction(0)
, FunctionProcessed(false)
@ -1334,7 +1334,7 @@ SlotMachine::SlotMachine(const Module *M)
// Function level constructor. Causes the contents of the Module and the one
// function provided to be added to the slot table.
SlotMachine::SlotMachine(const Function *F )
SlotMachine::SlotMachine(const Function *F )
: TheModule( F ? F->getParent() : 0 ) ///< Saved for lazy initialization
, TheFunction(F) ///< Saved for lazy initialization
, FunctionProcessed(false)
@ -1346,17 +1346,17 @@ SlotMachine::SlotMachine(const Function *F )
}
inline void SlotMachine::initialize(void) {
if ( TheModule) {
processModule();
if ( TheModule) {
processModule();
TheModule = 0; ///< Prevent re-processing next time we're called.
}
if ( TheFunction && ! FunctionProcessed) {
processFunction();
if ( TheFunction && ! FunctionProcessed) {
processFunction();
}
}
// Iterate through all the global variables, functions, and global
// variable initializers and create slots for them.
// variable initializers and create slots for them.
void SlotMachine::processModule() {
SC_DEBUG("begin processModule!\n");
@ -1379,14 +1379,14 @@ void SlotMachine::processFunction() {
SC_DEBUG("begin processFunction!\n");
// Add all the function arguments
for(Function::const_arg_iterator AI = TheFunction->arg_begin(),
for(Function::const_arg_iterator AI = TheFunction->arg_begin(),
AE = TheFunction->arg_end(); AI != AE; ++AI)
createSlot(AI);
SC_DEBUG("Inserting Instructions:\n");
// Add all of the basic blocks and instructions
for (Function::const_iterator BB = TheFunction->begin(),
for (Function::const_iterator BB = TheFunction->begin(),
E = TheFunction->end(); BB != E; ++BB) {
createSlot(BB);
for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I!=E; ++I) {
@ -1417,8 +1417,8 @@ void SlotMachine::purgeFunction() {
/// Types are forbidden because Type does not inherit from Value (any more).
int SlotMachine::getSlot(const Value *V) {
assert( V && "Can't get slot for null Value" );
assert(!isa<Constant>(V) || isa<GlobalValue>(V) &&
"Can't insert a non-GlobalValue Constant into SlotMachine");
assert(!isa<Constant>(V) || isa<GlobalValue>(V) &&
"Can't insert a non-GlobalValue Constant into SlotMachine");
// Check for uninitialized state and do lazy initialization
this->initialize();
@ -1445,7 +1445,7 @@ int SlotMachine::getSlot(const Value *V) {
if (MVI == MI->second.map.end()) return -1;
assert( MVI != MI->second.map.end() && "Value not found");
// We found it only at the module level
return MVI->second;
return MVI->second;
// else the value exists in the function map
} else {
@ -1489,10 +1489,10 @@ int SlotMachine::getSlot(const Type *Ty) {
if ( FTI == fTypes.map.end() ) {
TypeMap::const_iterator MTI = mTypes.map.find(Ty);
// If we didn't find it, it wasn't inserted
if (MTI == mTypes.map.end())
if (MTI == mTypes.map.end())
return -1;
// We found it only at the module level
return MTI->second;
return MTI->second;
// else the value exists in the function map
} else {
@ -1518,8 +1518,8 @@ int SlotMachine::getSlot(const Type *Ty) {
// of asserting when the Value* isn't found, it inserts the value.
unsigned SlotMachine::createSlot(const Value *V) {
assert( V && "Can't insert a null Value to SlotMachine");
assert(!isa<Constant>(V) || isa<GlobalValue>(V) &&
"Can't insert a non-GlobalValue Constant into SlotMachine");
assert(!isa<Constant>(V) || isa<GlobalValue>(V) &&
"Can't insert a non-GlobalValue Constant into SlotMachine");
const Type* VTy = V->getType();
@ -1587,7 +1587,7 @@ unsigned SlotMachine::createSlot(const Value *V) {
if ( MI != mMap.end() ) {
// Lookup the value in the module's map
ValueMap::const_iterator MVI = MI->second.map.find(V);
if ( MVI != MI->second.map.end() )
if ( MVI != MI->second.map.end() )
return MVI->second;
}
@ -1627,7 +1627,7 @@ unsigned SlotMachine::createSlot(const Type *Ty) {
// Lookup the type in the module's map
TypeMap::const_iterator MTI = mTypes.map.find(Ty);
if ( MTI != mTypes.map.end() )
if ( MTI != mTypes.map.end() )
return MTI->second;
return insertValue(Ty);
@ -1637,11 +1637,11 @@ unsigned SlotMachine::createSlot(const Type *Ty) {
// function is just for the convenience of createSlot (above).
unsigned SlotMachine::insertValue(const Value *V ) {
assert(V && "Can't insert a null Value into SlotMachine!");
assert(!isa<Constant>(V) || isa<GlobalValue>(V) &&
"Can't insert a non-GlobalValue Constant into SlotMachine");
assert(!isa<Constant>(V) || isa<GlobalValue>(V) &&
"Can't insert a non-GlobalValue Constant into SlotMachine");
// If this value does not contribute to a plane (is void)
// or if the value already has a name then ignore it.
// or if the value already has a name then ignore it.
if (V->getType() == Type::VoidTy || V->hasName() ) {
SC_DEBUG("ignored value " << *V << "\n");
return 0; // FIXME: Wrong return value
@ -1652,7 +1652,7 @@ unsigned SlotMachine::insertValue(const Value *V ) {
if ( TheFunction ) {
TypedPlanes::iterator I = fMap.find( VTy );
if ( I == fMap.end() )
if ( I == fMap.end() )
I = fMap.insert(std::make_pair(VTy,ValuePlane())).first;
DestSlot = I->second.map[V] = I->second.next_slot++;
} else {
@ -1662,10 +1662,10 @@ unsigned SlotMachine::insertValue(const Value *V ) {
DestSlot = I->second.map[V] = I->second.next_slot++;
}
SC_DEBUG(" Inserting value [" << VTy << "] = " << V << " slot=" <<
SC_DEBUG(" Inserting value [" << VTy << "] = " << V << " slot=" <<
DestSlot << " [");
// G = Global, C = Constant, T = Type, F = Function, o = other
SC_DEBUG((isa<GlobalVariable>(V) ? 'G' : (isa<Function>(V) ? 'F' :
SC_DEBUG((isa<GlobalVariable>(V) ? 'G' : (isa<Function>(V) ? 'F' :
(isa<Constant>(V) ? 'C' : 'o'))));
SC_DEBUG("]\n");
return DestSlot;

View File

@ -1,10 +1,10 @@
//===-- BasicBlock.cpp - Implement BasicBlock related methods -------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file implements the BasicBlock class for the VMCore library.
@ -130,7 +130,7 @@ BasicBlock *BasicBlock::getSinglePredecessor() {
/// removePredecessor - This method is used to notify a BasicBlock that the
/// specified Predecessor of the block is no longer able to reach it. This is
/// actually not used to update the Predecessor list, but is actually used to
/// actually not used to update the Predecessor list, but is actually used to
/// update the PHI nodes that reside in the block. Note that this should be
/// called while the predecessor still refers to this block.
///
@ -153,9 +153,9 @@ void BasicBlock::removePredecessor(BasicBlock *Pred,
// br Loop ;; %x2 does not dominate all uses
//
// This is because the PHI node input is actually taken from the predecessor
// basic block. The only case this can happen is with a self loop, so we
// basic block. The only case this can happen is with a self loop, so we
// check for this case explicitly now.
//
//
unsigned max_idx = APN->getNumIncomingValues();
assert(max_idx != 0 && "PHI Node in block with 0 predecessors!?!?!");
if (max_idx == 2) {
@ -197,18 +197,18 @@ void BasicBlock::removePredecessor(BasicBlock *Pred,
/// splitBasicBlock - This splits a basic block into two at the specified
/// instruction. Note that all instructions BEFORE the specified iterator stay
/// as part of the original basic block, an unconditional branch is added to
/// as part of the original basic block, an unconditional branch is added to
/// the new BB, and the rest of the instructions in the BB are moved to the new
/// BB, including the old terminator. This invalidates the iterator.
///
/// Note that this only works on well formed basic blocks (must have a
/// Note that this only works on well formed basic blocks (must have a
/// terminator), and 'I' must not be the end of instruction list (which would
/// cause a degenerate basic block to be formed, having a terminator inside of
/// the basic block).
/// the basic block).
///
BasicBlock *BasicBlock::splitBasicBlock(iterator I, const std::string &BBName) {
assert(getTerminator() && "Can't use splitBasicBlock on degenerate BB!");
assert(I != InstList.end() &&
assert(I != InstList.end() &&
"Trying to get me to create degenerate basic block!");
BasicBlock *New = new BasicBlock(BBName, getParent(), getNext());

View File

@ -1,10 +1,10 @@
//===- ConstantFolding.cpp - LLVM constant folder -------------------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file implements folding of constants for LLVM. This implements the
@ -30,7 +30,7 @@ using namespace llvm;
namespace {
struct ConstRules {
ConstRules() {}
// Binary Operators...
virtual Constant *add(const Constant *V1, const Constant *V2) const = 0;
virtual Constant *sub(const Constant *V1, const Constant *V2) const = 0;
@ -59,7 +59,7 @@ namespace {
virtual Constant *castToDouble(const Constant *V) const = 0;
virtual Constant *castToPointer(const Constant *V,
const PointerType *Ty) const = 0;
// ConstRules::get - Return an instance of ConstRules for the specified
// constant operands.
//
@ -75,11 +75,11 @@ namespace {
// TemplateRules Class
//===----------------------------------------------------------------------===//
//
// TemplateRules - Implement a subclass of ConstRules that provides all
// operations as noops. All other rules classes inherit from this class so
// that if functionality is needed in the future, it can simply be added here
// TemplateRules - Implement a subclass of ConstRules that provides all
// operations as noops. All other rules classes inherit from this class so
// that if functionality is needed in the future, it can simply be added here
// and to ConstRules without changing anything else...
//
//
// This class also provides subclasses with typesafe implementations of methods
// so that don't have to do type casting.
//
@ -90,41 +90,41 @@ class TemplateRules : public ConstRules {
// Redirecting functions that cast to the appropriate types
//===--------------------------------------------------------------------===//
virtual Constant *add(const Constant *V1, const Constant *V2) const {
return SubClassName::Add((const ArgType *)V1, (const ArgType *)V2);
virtual Constant *add(const Constant *V1, const Constant *V2) const {
return SubClassName::Add((const ArgType *)V1, (const ArgType *)V2);
}
virtual Constant *sub(const Constant *V1, const Constant *V2) const {
return SubClassName::Sub((const ArgType *)V1, (const ArgType *)V2);
virtual Constant *sub(const Constant *V1, const Constant *V2) const {
return SubClassName::Sub((const ArgType *)V1, (const ArgType *)V2);
}
virtual Constant *mul(const Constant *V1, const Constant *V2) const {
return SubClassName::Mul((const ArgType *)V1, (const ArgType *)V2);
virtual Constant *mul(const Constant *V1, const Constant *V2) const {
return SubClassName::Mul((const ArgType *)V1, (const ArgType *)V2);
}
virtual Constant *div(const Constant *V1, const Constant *V2) const {
return SubClassName::Div((const ArgType *)V1, (const ArgType *)V2);
virtual Constant *div(const Constant *V1, const Constant *V2) const {
return SubClassName::Div((const ArgType *)V1, (const ArgType *)V2);
}
virtual Constant *rem(const Constant *V1, const Constant *V2) const {
return SubClassName::Rem((const ArgType *)V1, (const ArgType *)V2);
virtual Constant *rem(const Constant *V1, const Constant *V2) const {
return SubClassName::Rem((const ArgType *)V1, (const ArgType *)V2);
}
virtual Constant *op_and(const Constant *V1, const Constant *V2) const {
return SubClassName::And((const ArgType *)V1, (const ArgType *)V2);
virtual Constant *op_and(const Constant *V1, const Constant *V2) const {
return SubClassName::And((const ArgType *)V1, (const ArgType *)V2);
}
virtual Constant *op_or(const Constant *V1, const Constant *V2) const {
return SubClassName::Or((const ArgType *)V1, (const ArgType *)V2);
virtual Constant *op_or(const Constant *V1, const Constant *V2) const {
return SubClassName::Or((const ArgType *)V1, (const ArgType *)V2);
}
virtual Constant *op_xor(const Constant *V1, const Constant *V2) const {
return SubClassName::Xor((const ArgType *)V1, (const ArgType *)V2);
virtual Constant *op_xor(const Constant *V1, const Constant *V2) const {
return SubClassName::Xor((const ArgType *)V1, (const ArgType *)V2);
}
virtual Constant *shl(const Constant *V1, const Constant *V2) const {
return SubClassName::Shl((const ArgType *)V1, (const ArgType *)V2);
virtual Constant *shl(const Constant *V1, const Constant *V2) const {
return SubClassName::Shl((const ArgType *)V1, (const ArgType *)V2);
}
virtual Constant *shr(const Constant *V1, const Constant *V2) const {
return SubClassName::Shr((const ArgType *)V1, (const ArgType *)V2);
virtual Constant *shr(const Constant *V1, const Constant *V2) const {
return SubClassName::Shr((const ArgType *)V1, (const ArgType *)V2);
}
virtual Constant *lessthan(const Constant *V1, const Constant *V2) const {
virtual Constant *lessthan(const Constant *V1, const Constant *V2) const {
return SubClassName::LessThan((const ArgType *)V1, (const ArgType *)V2);
}
virtual Constant *equalto(const Constant *V1, const Constant *V2) const {
virtual Constant *equalto(const Constant *V1, const Constant *V2) const {
return SubClassName::EqualTo((const ArgType *)V1, (const ArgType *)V2);
}
@ -162,7 +162,7 @@ class TemplateRules : public ConstRules {
virtual Constant *castToDouble(const Constant *V) const {
return SubClassName::CastToDouble((const ArgType*)V);
}
virtual Constant *castToPointer(const Constant *V,
virtual Constant *castToPointer(const Constant *V,
const PointerType *Ty) const {
return SubClassName::CastToPointer((const ArgType*)V, Ty);
}
@ -357,7 +357,7 @@ struct DirectRules : public TemplateRules<ConstantClass, SuperClass> {
static Constant *LessThan(const ConstantClass *V1, const ConstantClass *V2) {
bool R = (BuiltinType)V1->getValue() < (BuiltinType)V2->getValue();
return ConstantBool::get(R);
}
}
static Constant *EqualTo(const ConstantClass *V1, const ConstantClass *V2) {
bool R = (BuiltinType)V1->getValue() == (BuiltinType)V2->getValue();
@ -654,7 +654,7 @@ static int IdxCompare(Constant *C1, Constant *C2, const Type *ElTy) {
// ConstantExprs? If so, we can't do anything with them.
if (!isa<ConstantInt>(C1) || !isa<ConstantInt>(C2))
return -2; // don't know!
// Ok, we have two differing integer indices. Sign extend them to be the same
// type. Long is always big enough, so we use it.
C1 = ConstantExpr::getSignExtend(C1, Type::LongTy);
@ -798,7 +798,7 @@ static Instruction::BinaryOps evaluateRelation(const Constant *V1,
// same global. From this, we can precisely determine the relative
// ordering of the resultant pointers.
unsigned i = 1;
// Compare all of the operands the GEP's have in common.
gep_type_iterator GTI = gep_type_begin(CE1);
for (;i != CE1->getNumOperands() && i != CE2->getNumOperands();
@ -818,7 +818,7 @@ static Instruction::BinaryOps evaluateRelation(const Constant *V1,
return Instruction::SetGT;
else
return Instruction::BinaryOpsEnd; // Might be equal.
for (; i < CE2->getNumOperands(); ++i)
if (!CE2->getOperand(i)->isNullValue())
if (isa<ConstantIntegral>(CE2->getOperand(i)))
@ -829,7 +829,7 @@ static Instruction::BinaryOps evaluateRelation(const Constant *V1,
}
}
}
default:
break;
}
@ -910,7 +910,7 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode,
if (Opcode == Instruction::SetLT) return ConstantBool::False;
if (Opcode == Instruction::SetGT) return ConstantBool::True;
break;
case Instruction::SetNE:
// If we know that V1 != V2, we can only partially decide this relation.
if (Opcode == Instruction::SetEQ) return ConstantBool::False;
@ -1115,12 +1115,12 @@ Constant *llvm::ConstantFoldGetElementPtr(const Constant *C,
if (!Idx0->isNullValue()) {
const Type *IdxTy = Combined->getType();
if (IdxTy != Idx0->getType()) IdxTy = Type::LongTy;
Combined =
Combined =
ConstantExpr::get(Instruction::Add,
ConstantExpr::getCast(Idx0, IdxTy),
ConstantExpr::getCast(Combined, IdxTy));
}
NewIndices.push_back(Combined);
NewIndices.insert(NewIndices.end(), IdxList.begin()+1, IdxList.end());
return ConstantExpr::getGetElementPtr(CE->getOperand(0), NewIndices);
@ -1134,7 +1134,7 @@ Constant *llvm::ConstantFoldGetElementPtr(const Constant *C,
//
if (CE->getOpcode() == Instruction::Cast && IdxList.size() > 1 &&
Idx0->isNullValue())
if (const PointerType *SPT =
if (const PointerType *SPT =
dyn_cast<PointerType>(CE->getOperand(0)->getType()))
if (const ArrayType *SAT = dyn_cast<ArrayType>(SPT->getElementType()))
if (const ArrayType *CAT =

View File

@ -1,10 +1,10 @@
//===-- ConstantFolding.h - Internal Constant Folding Interface -*- C++ -*-===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file defines the (internal) constant folding interfaces for LLVM. These
@ -25,7 +25,7 @@ namespace llvm {
class Value;
class Constant;
class Type;
// Constant fold various types of instruction...
Constant *ConstantFoldCastInstruction(const Constant *V, const Type *DestTy);
Constant *ConstantFoldSelectInstruction(const Constant *Cond,

View File

@ -1,10 +1,10 @@
//===-- ConstantRange.cpp - ConstantRange implementation ------------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// Represent a range of possible values that may occur when the program is run
@ -32,7 +32,7 @@ using namespace llvm;
static ConstantIntegral *Next(ConstantIntegral *CI) {
if (CI->getType() == Type::BoolTy)
return CI == ConstantBool::True ? ConstantBool::False : ConstantBool::True;
Constant *Result = ConstantExpr::getAdd(CI,
ConstantInt::get(CI->getType(), 1));
return cast<ConstantIntegral>(Result);
@ -84,7 +84,7 @@ ConstantRange::ConstantRange(Constant *L, Constant *U)
: Lower(cast<ConstantIntegral>(L)), Upper(cast<ConstantIntegral>(U)) {
assert(Lower->getType() == Upper->getType() &&
"Incompatible types for ConstantRange!");
// Make sure that if L & U are equal that they are either Min or Max...
assert((L != U || (L == ConstantIntegral::getMaxValue(L->getType()) ||
L == ConstantIntegral::getMinValue(L->getType()))) &&
@ -126,7 +126,7 @@ const Type *ConstantRange::getType() const { return Lower->getType(); }
bool ConstantRange::isFullSet() const {
return Lower == Upper && Lower == ConstantIntegral::getMaxValue(getType());
}
/// isEmptySet - Return true if this set contains no members.
///
bool ConstantRange::isEmptySet() const {
@ -140,7 +140,7 @@ bool ConstantRange::isWrappedSet() const {
return GT(Lower, Upper);
}
/// getSingleElement - If this set contains a single element, return it,
/// otherwise return null.
ConstantIntegral *ConstantRange::getSingleElement() const {
@ -158,7 +158,7 @@ uint64_t ConstantRange::getSetSize() const {
return 1;
return 2; // Must be full set...
}
// Simply subtract the bounds...
Constant *Result = ConstantExpr::getSub(Upper, Lower);
return cast<ConstantInt>(Result)->getRawValue();

View File

@ -1,10 +1,10 @@
//===-- Constants.cpp - Implement Constant nodes --------------------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file implements the Constant* classes...
@ -108,7 +108,7 @@ Constant *Constant::getNullValue(const Type *Ty) {
return NullDouble;
}
case Type::PointerTyID:
case Type::PointerTyID:
return ConstantPointerNull::get(cast<PointerType>(Ty));
case Type::StructTyID:
@ -130,7 +130,7 @@ ConstantIntegral *ConstantIntegral::getMaxValue(const Type *Ty) {
case Type::ShortTyID:
case Type::IntTyID:
case Type::LongTyID: {
// Calculate 011111111111111...
// Calculate 011111111111111...
unsigned TypeBits = Ty->getPrimitiveSize()*8;
int64_t Val = INT64_MAX; // All ones
Val >>= 64-TypeBits; // Shift out unwanted 1 bits...
@ -154,7 +154,7 @@ ConstantIntegral *ConstantIntegral::getMinValue(const Type *Ty) {
case Type::ShortTyID:
case Type::IntTyID:
case Type::LongTyID: {
// Calculate 1111111111000000000000
// Calculate 1111111111000000000000
unsigned TypeBits = Ty->getPrimitiveSize()*8;
int64_t Val = -1; // All ones
Val <<= TypeBits-1; // Shift over to the right spot
@ -347,7 +347,7 @@ struct GetElementPtrConstantExpr : public ConstantExpr {
OperandList[i+1].init(IdxList[i], this);
}
~GetElementPtrConstantExpr() {
delete [] OperandList;
delete [] OperandList;
}
};
@ -489,7 +489,7 @@ void ConstantArray::replaceUsesOfWithOnConstant(Value *From, Value *To,
if (Val == From) Val = cast<Constant>(To);
Values.push_back(Val);
}
Constant *Replacement = ConstantArray::get(getType(), Values);
assert(Replacement != this && "I didn't contain From!");
@ -498,9 +498,9 @@ void ConstantArray::replaceUsesOfWithOnConstant(Value *From, Value *To,
uncheckedReplaceAllUsesWith(Replacement);
else
replaceAllUsesWith(Replacement);
// Delete the old constant!
destroyConstant();
destroyConstant();
}
void ConstantStruct::replaceUsesOfWithOnConstant(Value *From, Value *To,
@ -514,7 +514,7 @@ void ConstantStruct::replaceUsesOfWithOnConstant(Value *From, Value *To,
if (Val == From) Val = cast<Constant>(To);
Values.push_back(Val);
}
Constant *Replacement = ConstantStruct::get(getType(), Values);
assert(Replacement != this && "I didn't contain From!");
@ -523,7 +523,7 @@ void ConstantStruct::replaceUsesOfWithOnConstant(Value *From, Value *To,
uncheckedReplaceAllUsesWith(Replacement);
else
replaceAllUsesWith(Replacement);
// Delete the old constant!
destroyConstant();
}
@ -539,7 +539,7 @@ void ConstantPacked::replaceUsesOfWithOnConstant(Value *From, Value *To,
if (Val == From) Val = cast<Constant>(To);
Values.push_back(Val);
}
Constant *Replacement = ConstantPacked::get(getType(), Values);
assert(Replacement != this && "I didn't contain From!");
@ -548,9 +548,9 @@ void ConstantPacked::replaceUsesOfWithOnConstant(Value *From, Value *To,
uncheckedReplaceAllUsesWith(Replacement);
else
replaceAllUsesWith(Replacement);
// Delete the old constant!
destroyConstant();
destroyConstant();
}
void ConstantExpr::replaceUsesOfWithOnConstant(Value *From, Value *ToV,
@ -564,7 +564,7 @@ void ConstantExpr::replaceUsesOfWithOnConstant(Value *From, Value *ToV,
Constant *Pointer = getOperand(0);
Indices.reserve(getNumOperands()-1);
if (Pointer == From) Pointer = To;
for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
Constant *Val = getOperand(i);
if (Val == From) Val = To;
@ -592,7 +592,7 @@ void ConstantExpr::replaceUsesOfWithOnConstant(Value *From, Value *ToV,
assert(0 && "Unknown ConstantExpr type!");
return;
}
assert(Replacement != this && "I didn't contain From!");
// Everyone using this now uses the replacement...
@ -600,7 +600,7 @@ void ConstantExpr::replaceUsesOfWithOnConstant(Value *From, Value *ToV,
uncheckedReplaceAllUsesWith(Replacement);
else
replaceAllUsesWith(Replacement);
// Delete the old constant!
destroyConstant();
}
@ -620,7 +620,7 @@ namespace llvm {
return new ConstantClass(Ty, V);
}
};
template<class ConstantClass, class TypeClass>
struct ConvertConstantType {
static void convert(ConstantClass *OldC, const TypeClass *NewTy) {
@ -683,7 +683,7 @@ namespace {
}
return Result;
}
void remove(ConstantClass *CP) {
MapIterator I = Map.find(MapKey((TypeClass*)CP->getRawType(),
getValType(CP)));
@ -708,14 +708,14 @@ namespace {
// Yes, we are removing the representative entry for this type.
// See if there are any other entries of the same type.
MapIterator TmpIt = ATMEntryIt;
// First check the entry before this one...
if (TmpIt != Map.begin()) {
--TmpIt;
if (TmpIt->first.first != Ty) // Not the same type, move back...
++TmpIt;
}
// If we didn't find the same type, try to move forward...
if (TmpIt == ATMEntryIt) {
++TmpIt;
@ -735,12 +735,12 @@ namespace {
}
}
}
Map.erase(I);
}
void refineAbstractType(const DerivedType *OldTy, const Type *NewTy) {
typename AbstractTypeMapTy::iterator I =
typename AbstractTypeMapTy::iterator I =
AbstractTypeMap.find(cast<TypeClass>(OldTy));
assert(I != AbstractTypeMap.end() &&
@ -995,14 +995,14 @@ namespace llvm {
C.push_back(cast<Constant>(OldC->getOperand(i)));
Constant *New = ConstantStruct::get(NewTy, C);
assert(New != OldC && "Didn't replace constant??");
OldC->uncheckedReplaceAllUsesWith(New);
OldC->destroyConstant(); // This constant is now dead, destroy it.
}
};
}
static ValueMap<std::vector<Constant*>, StructType,
static ValueMap<std::vector<Constant*>, StructType,
ConstantStruct> StructConstants;
static std::vector<Constant*> getValType(ConstantStruct *CS) {
@ -1197,9 +1197,9 @@ namespace llvm {
return new BinaryConstantExpr(V.first, V.second[0], V.second[1]);
if (V.first == Instruction::Select)
return new SelectConstantExpr(V.second[0], V.second[1], V.second[2]);
assert(V.first == Instruction::GetElementPtr && "Invalid ConstantExpr!");
std::vector<Constant*> IdxList(V.second.begin()+1, V.second.end());
return new GetElementPtrConstantExpr(V.second[0], IdxList, Ty);
}
@ -1230,12 +1230,12 @@ namespace llvm {
OldC->getOperand(1));
break;
case Instruction::GetElementPtr:
// Make everyone now use a constant of the new type...
// Make everyone now use a constant of the new type...
std::vector<Value*> Idx(OldC->op_begin()+1, OldC->op_end());
New = ConstantExpr::getGetElementPtrTy(NewTy, OldC->getOperand(0), Idx);
break;
}
assert(New != OldC && "Didn't replace constant??");
OldC->uncheckedReplaceAllUsesWith(New);
OldC->destroyConstant(); // This constant is now dead, destroy it.
@ -1333,7 +1333,7 @@ Constant *ConstantExpr::get(unsigned Opcode, Constant *C1, Constant *C2) {
case Instruction::Mul: case Instruction::Div:
case Instruction::Rem:
assert(C1->getType() == C2->getType() && "Op types should be identical!");
assert((C1->getType()->isInteger() || C1->getType()->isFloatingPoint()) &&
assert((C1->getType()->isInteger() || C1->getType()->isFloatingPoint()) &&
"Tried to create an arithmetic operation on a non-arithmetic type!");
break;
case Instruction::And:
@ -1471,7 +1471,7 @@ void Constant::clearAllValueMaps() {
UndefValueConstants.clear(Constants);
ExprConstants.clear(Constants);
for (std::vector<Constant *>::iterator I = Constants.begin(),
for (std::vector<Constant *>::iterator I = Constants.begin(),
E = Constants.end(); I != E; ++I)
(*I)->dropAllReferences();
for (std::vector<Constant *>::iterator I = Constants.begin(),

View File

@ -1,10 +1,10 @@
//===- Dominators.cpp - Dominator Calculation -----------------------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file implements simple dominator construction algorithms for finding
@ -74,7 +74,7 @@ void ImmediateDominators::Compress(BasicBlock *V, InfoRec &VInfo) {
Compress(VAncestor, VAInfo);
BasicBlock *VAncestorLabel = VAInfo.Label;
BasicBlock *VAncestorLabel = VAInfo.Label;
BasicBlock *VLabel = VInfo.Label;
if (Info[VAncestorLabel].Semi < Info[VLabel].Semi)
VInfo.Label = VAncestorLabel;
@ -115,10 +115,10 @@ void ImmediateDominators::Link(BasicBlock *V, BasicBlock *W, InfoRec &WInfo){
unsigned WLabelSemi = Info[WLabel].Semi;
BasicBlock *S = W;
InfoRec *SInfo = &Info[S];
BasicBlock *SChild = SInfo->Child;
InfoRec *SChildInfo = &Info[SChild];
while (WLabelSemi < Info[SChildInfo->Label].Semi) {
BasicBlock *SChildChild = SChildInfo->Child;
if (SInfo->Size+Info[SChildChild].Size >= 2*SChildInfo->Size) {
@ -133,17 +133,17 @@ void ImmediateDominators::Link(BasicBlock *V, BasicBlock *W, InfoRec &WInfo){
SChildInfo = &Info[SChild];
}
}
InfoRec &VInfo = Info[V];
SInfo->Label = WLabel;
assert(V != W && "The optimization here will not work in this case!");
unsigned WSize = WInfo.Size;
unsigned VSize = (VInfo.Size += WSize);
if (VSize < 2*WSize)
std::swap(S, VInfo.Child);
while (S) {
SInfo = &Info[S];
SInfo->Ancestor = V;
@ -161,7 +161,7 @@ bool ImmediateDominators::runOnFunction(Function &F) {
Roots.push_back(Root);
Vertex.push_back(0);
// Step #1: Number blocks in depth-first order and initialize variables used
// in later stages of the algorithm.
unsigned N = 0;
@ -179,7 +179,7 @@ bool ImmediateDominators::runOnFunction(Function &F) {
if (SemiU < WInfo.Semi)
WInfo.Semi = SemiU;
}
Info[Vertex[WInfo.Semi]].Bucket.push_back(W);
BasicBlock *WParent = WInfo.Parent;
@ -240,11 +240,11 @@ B("domset", "Dominator Set Construction", true);
bool DominatorSetBase::dominates(Instruction *A, Instruction *B) const {
BasicBlock *BBA = A->getParent(), *BBB = B->getParent();
if (BBA != BBB) return dominates(BBA, BBB);
// Loop through the basic block until we find A or B.
BasicBlock::iterator I = BBA->begin();
for (; &*I != A && &*I != B; ++I) /*empty*/;
// A dominates B if it is found first in the basic block...
return &*I == A;
}
@ -275,8 +275,8 @@ bool DominatorSet::runOnFunction(Function &F) {
DomSetType &DS = Doms[I];
assert(DS.empty() && "Domset already filled in for this block?");
DS.insert(I); // Blocks always dominate themselves
// Insert all dominators into the set...
// Insert all dominators into the set...
while (IDom) {
// If we have already computed the dominator sets for our immediate
// dominator, just use it instead of walking all the way up to the root.
@ -333,7 +333,7 @@ E("domtree", "Dominator Tree Construction", true);
// DominatorTreeBase::reset - Free all of the tree node memory.
//
void DominatorTreeBase::reset() {
void DominatorTreeBase::reset() {
for (NodeMapType::iterator I = Nodes.begin(), E = Nodes.end(); I != E; ++I)
delete I->second;
Nodes.clear();
@ -364,7 +364,7 @@ DominatorTreeBase::Node *DominatorTree::getNodeForBlock(BasicBlock *BB) {
// immediate dominator.
BasicBlock *IDom = getAnalysis<ImmediateDominators>()[BB];
Node *IDomNode = getNodeForBlock(IDom);
// Add a new tree node for this BasicBlock, and link it as a child of
// IDomNode
return BBNode = IDomNode->addChild(new Node(BB, IDomNode));
@ -403,7 +403,7 @@ static std::ostream &operator<<(std::ostream &o,
static void PrintDomTree(const DominatorTreeBase::Node *N, std::ostream &o,
unsigned Lev) {
o << std::string(2*Lev, ' ') << "[" << Lev << "] " << N;
for (DominatorTreeBase::Node::const_iterator I = N->begin(), E = N->end();
for (DominatorTreeBase::Node::const_iterator I = N->begin(), E = N->end();
I != E; ++I)
PrintDomTree(*I, o, Lev+1);
}
@ -423,7 +423,7 @@ static RegisterAnalysis<DominanceFrontier>
G("domfrontier", "Dominance Frontier Construction", true);
const DominanceFrontier::DomSetType &
DominanceFrontier::calculate(const DominatorTree &DT,
DominanceFrontier::calculate(const DominatorTree &DT,
const DominatorTree::Node *Node) {
// Loop over CFG successors to calculate DFlocal[Node]
BasicBlock *BB = Node->getBlock();

View File

@ -1,10 +1,10 @@
//===-- Function.cpp - Implement the Global object classes ----------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file implements the Function & GlobalVariable classes for the VMCore
@ -51,7 +51,7 @@ template class SymbolTableListTraits<BasicBlock, Function, Function>;
// Argument Implementation
//===----------------------------------------------------------------------===//
Argument::Argument(const Type *Ty, const std::string &Name, Function *Par)
Argument::Argument(const Type *Ty, const std::string &Name, Function *Par)
: Value(Ty, Value::ArgumentVal, Name) {
Parent = 0;
@ -125,7 +125,7 @@ bool Function::isVarArg() const {
return getFunctionType()->isVarArg();
}
const Type *Function::getReturnType() const {
const Type *Function::getReturnType() const {
return getFunctionType()->getReturnType();
}
@ -182,7 +182,7 @@ void Function::renameLocalSymbols() {
// 'delete' a whole class at a time, even though there may be circular
// references... first all references are dropped, and all use counts go to
// zero. Then everything is deleted for real. Note that no operations are
// valid on an object that has "dropped all references", except operator
// valid on an object that has "dropped all references", except operator
// delete.
//
void Function::dropAllReferences() {
@ -204,7 +204,7 @@ unsigned Function::getIntrinsicID() const {
return 0; // All intrinsics start with 'llvm.'
assert(getName().size() != 5 && "'llvm.' is an invalid intrinsic name!");
switch (getName()[5]) {
case 'd':
if (getName() == "llvm.dbg.stoppoint") return Intrinsic::dbg_stoppoint;
@ -233,8 +233,8 @@ unsigned Function::getIntrinsicID() const {
if (getName() == "llvm.memset") return Intrinsic::memset;
break;
case 'p':
if (getName() == "llvm.prefetch") return Intrinsic::prefetch;
if (getName() == "llvm.pcmarker") return Intrinsic::pcmarker;
if (getName() == "llvm.prefetch") return Intrinsic::prefetch;
if (getName() == "llvm.pcmarker") return Intrinsic::pcmarker;
break;
case 'r':
if (getName() == "llvm.returnaddress") return Intrinsic::returnaddress;

View File

@ -1,10 +1,10 @@
//===-- Globals.cpp - Implement the Global object classes -----------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file implements the GlobalValue & GlobalVariable classes for the VMCore
@ -23,7 +23,7 @@ using namespace llvm;
// GlobalValue Class
//===----------------------------------------------------------------------===//
/// This could be named "SafeToDestroyGlobalValue". It just makes sure that
/// This could be named "SafeToDestroyGlobalValue". It just makes sure that
/// there are no non-constant uses of this GlobalValue. If there aren't then
/// this and the transitive closure of the constants can be deleted. See the
/// destructor for details.
@ -32,7 +32,7 @@ static bool removeDeadConstantUsers(Constant* C) {
while (!C->use_empty())
if (Constant *User = dyn_cast<Constant>(C->use_back())) {
if (!removeDeadConstantUsers(User))
if (!removeDeadConstantUsers(User))
return false; // Constant wasn't dead
} else {
return false; // Non-constant usage;
@ -47,7 +47,7 @@ static bool removeDeadConstantUsers(Constant* C) {
/// that want to check to see if a global is unused, but don't want to deal
/// with potentially dead constants hanging off of the globals.
///
/// This function returns true if the global value is now dead. If all
/// This function returns true if the global value is now dead. If all
/// users of this global are not dead, this method may return false and
/// leave some of them around.
void GlobalValue::removeDeadConstantUsers() {
@ -61,7 +61,7 @@ void GlobalValue::removeDeadConstantUsers() {
}
}
/// Override destroyConstant to make sure it doesn't get called on
/// Override destroyConstant to make sure it doesn't get called on
/// GlobalValue's because they shouldn't be treated like other constants.
void GlobalValue::destroyConstant() {
assert(0 && "You can't GV->destroyConstant()!");
@ -111,7 +111,7 @@ void GlobalVariable::replaceUsesOfWithOnConstant(Value *From, Value *To,
bool DisableChecking) {
// If you call this, then you better know this GVar has a constant
// initializer worth replacing. Enforce that here.
assert(getNumOperands() == 1 &&
assert(getNumOperands() == 1 &&
"Attempt to replace uses of Constants on a GVar with no initializer");
// And, since you know it has an initializer, the From value better be
@ -122,7 +122,7 @@ void GlobalVariable::replaceUsesOfWithOnConstant(Value *From, Value *To,
// And, you better have a constant for the replacement value
assert(isa<Constant>(To) &&
"Attempt to replace GVar initializer with non-constant");
// Okay, preconditions out of the way, replace the constant initializer.
this->setOperand(0, cast<Constant>(To));
}

View File

@ -1,10 +1,10 @@
//===-- Instruction.cpp - Implement the Instruction class -----------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file implements the Instruction class for the VMCore library.
@ -74,7 +74,7 @@ const char *Instruction::getOpcodeName(unsigned OpCode) {
case Invoke: return "invoke";
case Unwind: return "unwind";
case Unreachable: return "unreachable";
// Standard binary operators...
case Add: return "add";
case Sub: return "sub";
@ -94,7 +94,7 @@ const char *Instruction::getOpcodeName(unsigned OpCode) {
case SetGT: return "setgt";
case SetEQ: return "seteq";
case SetNE: return "setne";
// Memory instructions...
case Malloc: return "malloc";
case Free: return "free";
@ -102,7 +102,7 @@ const char *Instruction::getOpcodeName(unsigned OpCode) {
case Load: return "load";
case Store: return "store";
case GetElementPtr: return "getelementptr";
// Other instructions...
case PHI: return "phi";
case Cast: return "cast";
@ -115,7 +115,7 @@ const char *Instruction::getOpcodeName(unsigned OpCode) {
default: return "<Invalid operator> ";
}
return 0;
}
@ -172,7 +172,7 @@ bool Instruction::isCommutative(unsigned op) {
switch (op) {
case Add:
case Mul:
case And:
case And:
case Or:
case Xor:
case SetEQ:

View File

@ -1,10 +1,10 @@
//===-- Instructions.cpp - Implement the LLVM instructions ----------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file implements all of the non-inline methods for the LLVM instruction
@ -25,7 +25,7 @@ using namespace llvm;
//===----------------------------------------------------------------------===//
TerminatorInst::TerminatorInst(Instruction::TermOps iType,
Use *Ops, unsigned NumOps, Instruction *IB)
Use *Ops, unsigned NumOps, Instruction *IB)
: Instruction(Type::VoidTy, iType, Ops, NumOps, "", IB) {
}
@ -104,7 +104,7 @@ void PHINode::resizeOperands(unsigned NumOps) {
} else if (NumOps == NumOperands) {
if (ReservedSpace == NumOps) return;
} else {
return;
return;
}
ReservedSpace = NumOps;
@ -132,10 +132,10 @@ void CallInst::init(Value *Func, const std::vector<Value*> &Params) {
Use *OL = OperandList = new Use[Params.size()+1];
OL[0].init(Func, this);
const FunctionType *FTy =
const FunctionType *FTy =
cast<FunctionType>(cast<PointerType>(Func->getType())->getElementType());
assert((Params.size() == FTy->getNumParams() ||
assert((Params.size() == FTy->getNumParams() ||
(FTy->isVarArg() && Params.size() > FTy->getNumParams())) &&
"Calling a function with bad signature");
for (unsigned i = 0, e = Params.size(); i != e; ++i)
@ -148,8 +148,8 @@ void CallInst::init(Value *Func, Value *Actual1, Value *Actual2) {
OL[0].init(Func, this);
OL[1].init(Actual1, this);
OL[2].init(Actual2, this);
const FunctionType *FTy =
const FunctionType *FTy =
cast<FunctionType>(cast<PointerType>(Func->getType())->getElementType());
assert((FTy->getNumParams() == 2 ||
@ -162,8 +162,8 @@ void CallInst::init(Value *Func, Value *Actual) {
Use *OL = OperandList = new Use[2];
OL[0].init(Func, this);
OL[1].init(Actual, this);
const FunctionType *FTy =
const FunctionType *FTy =
cast<FunctionType>(cast<PointerType>(Func->getType())->getElementType());
assert((FTy->getNumParams() == 1 ||
@ -175,23 +175,23 @@ void CallInst::init(Value *Func) {
NumOperands = 1;
Use *OL = OperandList = new Use[1];
OL[0].init(Func, this);
const FunctionType *MTy =
const FunctionType *MTy =
cast<FunctionType>(cast<PointerType>(Func->getType())->getElementType());
assert(MTy->getNumParams() == 0 && "Calling a function with bad signature");
}
CallInst::CallInst(Value *Func, const std::vector<Value*> &Params,
const std::string &Name, Instruction *InsertBefore)
CallInst::CallInst(Value *Func, const std::vector<Value*> &Params,
const std::string &Name, Instruction *InsertBefore)
: Instruction(cast<FunctionType>(cast<PointerType>(Func->getType())
->getElementType())->getReturnType(),
Instruction::Call, 0, 0, Name, InsertBefore) {
init(Func, Params);
}
CallInst::CallInst(Value *Func, const std::vector<Value*> &Params,
const std::string &Name, BasicBlock *InsertAtEnd)
CallInst::CallInst(Value *Func, const std::vector<Value*> &Params,
const std::string &Name, BasicBlock *InsertAtEnd)
: Instruction(cast<FunctionType>(cast<PointerType>(Func->getType())
->getElementType())->getReturnType(),
Instruction::Call, 0, 0, Name, InsertAtEnd) {
@ -246,7 +246,7 @@ CallInst::CallInst(Value *Func, const std::string &Name,
init(Func);
}
CallInst::CallInst(const CallInst &CI)
CallInst::CallInst(const CallInst &CI)
: Instruction(CI.getType(), Instruction::Call, new Use[CI.getNumOperands()],
CI.getNumOperands()) {
Use *OL = OperandList;
@ -271,13 +271,13 @@ void InvokeInst::init(Value *Fn, BasicBlock *IfNormal, BasicBlock *IfException,
OL[0].init(Fn, this);
OL[1].init(IfNormal, this);
OL[2].init(IfException, this);
const FunctionType *FTy =
const FunctionType *FTy =
cast<FunctionType>(cast<PointerType>(Fn->getType())->getElementType());
assert((Params.size() == FTy->getNumParams()) ||
assert((Params.size() == FTy->getNumParams()) ||
(FTy->isVarArg() && Params.size() > FTy->getNumParams()) &&
"Calling a function with bad signature");
for (unsigned i = 0, e = Params.size(); i != e; i++)
OL[i+3].init(Params[i], this);
}
@ -302,7 +302,7 @@ InvokeInst::InvokeInst(Value *Fn, BasicBlock *IfNormal,
init(Fn, IfNormal, IfException, Params);
}
InvokeInst::InvokeInst(const InvokeInst &II)
InvokeInst::InvokeInst(const InvokeInst &II)
: TerminatorInst(II.getType(), Instruction::Invoke,
new Use[II.getNumOperands()], II.getNumOperands()) {
Use *OL = OperandList, *InOL = II.OperandList;
@ -327,7 +327,7 @@ void InvokeInst::setSuccessorV(unsigned idx, BasicBlock *B) {
void ReturnInst::init(Value *retVal) {
if (retVal && retVal->getType() != Type::VoidTy) {
assert(!isa<BasicBlock>(retVal) &&
assert(!isa<BasicBlock>(retVal) &&
"Cannot return basic block. Probably using the incorrect ctor");
NumOperands = 1;
RetVal.init(retVal, this);
@ -393,7 +393,7 @@ BasicBlock *UnreachableInst::getSuccessorV(unsigned idx) const {
void BranchInst::AssertOK() {
if (isConditional())
assert(getCondition()->getType() == Type::BoolTy &&
assert(getCondition()->getType() == Type::BoolTy &&
"May only branch on boolean predicates!");
}
@ -428,10 +428,10 @@ static Value *getAISize(Value *Amt) {
else
assert(Amt->getType() == Type::UIntTy &&
"Malloc/Allocation array size != UIntTy!");
return Amt;
return Amt;
}
AllocationInst::AllocationInst(const Type *Ty, Value *ArraySize, unsigned iTy,
AllocationInst::AllocationInst(const Type *Ty, Value *ArraySize, unsigned iTy,
const std::string &Name,
Instruction *InsertBefore)
: UnaryInstruction(PointerType::get(Ty), iTy, getAISize(ArraySize),
@ -439,7 +439,7 @@ AllocationInst::AllocationInst(const Type *Ty, Value *ArraySize, unsigned iTy,
assert(Ty != Type::VoidTy && "Cannot allocate void!");
}
AllocationInst::AllocationInst(const Type *Ty, Value *ArraySize, unsigned iTy,
AllocationInst::AllocationInst(const Type *Ty, Value *ArraySize, unsigned iTy,
const std::string &Name,
BasicBlock *InsertAtEnd)
: UnaryInstruction(PointerType::get(Ty), iTy, getAISize(ArraySize),
@ -492,7 +492,7 @@ FreeInst::FreeInst(Value *Ptr, BasicBlock *InsertAtEnd)
//===----------------------------------------------------------------------===//
void LoadInst::AssertOK() {
assert(isa<PointerType>(getOperand(0)->getType()) &&
assert(isa<PointerType>(getOperand(0)->getType()) &&
"Ptr must have pointer type.");
}
@ -556,7 +556,7 @@ StoreInst::StoreInst(Value *val, Value *addr, BasicBlock *InsertAtEnd)
AssertOK();
}
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
Instruction *InsertBefore)
: Instruction(Type::VoidTy, Store, Ops, 2, "", InsertBefore) {
Ops[0].init(val, this);
@ -565,7 +565,7 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
AssertOK();
}
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
BasicBlock *InsertAtEnd)
: Instruction(Type::VoidTy, Store, Ops, 2, "", InsertAtEnd) {
Ops[0].init(val, this);
@ -642,10 +642,10 @@ GetElementPtrInst::~GetElementPtrInst() {
// getIndexedType - Returns the type of the element that would be loaded with
// a load instruction with the specified parameters.
//
// A null type is returned if the indices are invalid for the specified
// A null type is returned if the indices are invalid for the specified
// pointer type.
//
const Type* GetElementPtrInst::getIndexedType(const Type *Ptr,
const Type* GetElementPtrInst::getIndexedType(const Type *Ptr,
const std::vector<Value*> &Idx,
bool AllowCompositeLeaf) {
if (!isa<PointerType>(Ptr)) return 0; // Type isn't a pointer type!
@ -657,7 +657,7 @@ const Type* GetElementPtrInst::getIndexedType(const Type *Ptr,
return cast<PointerType>(Ptr)->getElementType();
else
return 0;
unsigned CurIdx = 0;
while (const CompositeType *CT = dyn_cast<CompositeType>(Ptr)) {
if (Idx.size() == CurIdx) {
@ -682,7 +682,7 @@ const Type* GetElementPtrInst::getIndexedType(const Type *Ptr,
return CurIdx == Idx.size() ? Ptr : 0;
}
const Type* GetElementPtrInst::getIndexedType(const Type *Ptr,
const Type* GetElementPtrInst::getIndexedType(const Type *Ptr,
Value *Idx0, Value *Idx1,
bool AllowCompositeLeaf) {
const PointerType *PTy = dyn_cast<PointerType>(Ptr);
@ -716,9 +716,9 @@ void BinaryOperator::init(BinaryOps iType)
case Rem:
assert(getType() == LHS->getType() &&
"Arithmetic operation should return same type as operands!");
assert((getType()->isInteger() ||
getType()->isFloatingPoint() ||
isa<PackedType>(getType()) ) &&
assert((getType()->isInteger() ||
getType()->isFloatingPoint() ||
isa<PackedType>(getType()) ) &&
"Tried to create an arithmetic operation on a non-arithmetic type!");
break;
case And: case Or:
@ -870,7 +870,7 @@ bool BinaryOperator::swapOperands() {
// SetCondInst Class
//===----------------------------------------------------------------------===//
SetCondInst::SetCondInst(BinaryOps Opcode, Value *S1, Value *S2,
SetCondInst::SetCondInst(BinaryOps Opcode, Value *S1, Value *S2,
const std::string &Name, Instruction *InsertBefore)
: BinaryOperator(Opcode, S1, S2, Type::BoolTy, Name, InsertBefore) {
@ -878,7 +878,7 @@ SetCondInst::SetCondInst(BinaryOps Opcode, Value *S1, Value *S2,
assert(getInverseCondition(Opcode));
}
SetCondInst::SetCondInst(BinaryOps Opcode, Value *S1, Value *S2,
SetCondInst::SetCondInst(BinaryOps Opcode, Value *S1, Value *S2,
const std::string &Name, BasicBlock *InsertAtEnd)
: BinaryOperator(Opcode, S1, S2, Type::BoolTy, Name, InsertAtEnd) {
@ -931,7 +931,7 @@ void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumCases) {
OperandList[1].init(Default, this);
}
SwitchInst::SwitchInst(const SwitchInst &SI)
SwitchInst::SwitchInst(const SwitchInst &SI)
: TerminatorInst(Instruction::Switch, new Use[SI.getNumOperands()],
SI.getNumOperands()) {
Use *OL = OperandList, *InOL = SI.OperandList;

View File

@ -1,10 +1,10 @@
//===-- LeakDetector.cpp - Implement LeakDetector interface ---------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file implements the LeakDetector class.

View File

@ -1,10 +1,10 @@
//===-- Mangler.cpp - Self-contained c/asm llvm name mangler --------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// Unified name mangler for CWriter and assembly backends.
@ -27,10 +27,10 @@ static std::string MangleLetter(unsigned char C) {
/// makeNameProper - We don't want identifier names non-C-identifier characters
/// in them, so mangle them as appropriate.
///
///
std::string Mangler::makeNameProper(const std::string &X) {
std::string Result;
// Mangle the first letter specially, don't allow numbers...
if ((X[0] < 'a' || X[0] > 'z') && (X[0] < 'A' || X[0] > 'Z') && X[0] != '_')
Result += MangleLetter(X[0]);
@ -85,7 +85,7 @@ std::string Mangler::getValueName(const Value *V) {
} else {
name = "ltmp_" + utostr(Count++) + "_" + utostr(getTypeID(V->getType()));
}
Memo[V] = name;
return name;
}

View File

@ -1,10 +1,10 @@
//===-- Module.cpp - Implement the Module class ---------------------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file implements the Module class for the VMCore library.
@ -174,7 +174,7 @@ Function *Module::getMainFunction() {
if (Function *F = getFunction("main", FunctionType::get(Type::IntTy,
Params, false)))
return F;
// void main(int argc, char **argv)...
if (Function *F = getFunction("main", FunctionType::get(Type::VoidTy,
Params, false)))
@ -212,7 +212,7 @@ Function *Module::getNamedFunction(const std::string &Name) {
/// have the top-level PointerType, which represents the address of the
/// global.
///
GlobalVariable *Module::getGlobalVariable(const std::string &Name,
GlobalVariable *Module::getGlobalVariable(const std::string &Name,
const Type *Ty) {
if (Value *V = getSymbolTable().lookup(PointerType::get(Ty), Name)) {
GlobalVariable *Result = cast<GlobalVariable>(V);
@ -237,7 +237,7 @@ bool Module::addTypeName(const std::string &Name, const Type *Ty) {
SymbolTable &ST = getSymbolTable();
if (ST.lookupType(Name)) return true; // Already in symtab...
// Not in symbol table? Set the name with the Symtab as an argument so the
// type knows what to update...
ST.insert(Name, Ty);

View File

@ -1,10 +1,10 @@
//===-- ModuleProvider.cpp - Base implementation for module providers -----===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// Minimal implementation of the abstract interface for providing a module.

View File

@ -1,10 +1,10 @@
//===- Pass.cpp - LLVM Pass Infrastructure Implementation -----------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file implements the LLVM Pass infrastructure. It is primarily
@ -90,12 +90,12 @@ bool PassManager::run(Module &M) { return PM->runOnModule(M); }
// is a simple Pimpl class that wraps the PassManagerT template. It
// is like PassManager, but only deals in FunctionPasses.
//
FunctionPassManager::FunctionPassManager(ModuleProvider *P) :
FunctionPassManager::FunctionPassManager(ModuleProvider *P) :
PM(new PassManagerT<Function>()), MP(P) {}
FunctionPassManager::~FunctionPassManager() { delete PM; }
void FunctionPassManager::add(FunctionPass *P) { PM->add(P); }
void FunctionPassManager::add(ImmutablePass *IP) { PM->add(IP); }
bool FunctionPassManager::run(Function &F) {
bool FunctionPassManager::run(Function &F) {
try {
MP->materializeFunction(&F);
} catch (std::string& errstr) {
@ -149,7 +149,7 @@ void PMDebug::PrintArgumentInformation(const Pass *P) {
void PMDebug::PrintPassInformation(unsigned Depth, const char *Action,
Pass *P, Module *M) {
if (PassDebugging >= Executions) {
std::cerr << (void*)P << std::string(Depth*2+1, ' ') << Action << " '"
std::cerr << (void*)P << std::string(Depth*2+1, ' ') << Action << " '"
<< P->getPassName();
if (M) std::cerr << "' on Module '" << M->getModuleIdentifier() << "'\n";
std::cerr << "'...\n";
@ -159,7 +159,7 @@ void PMDebug::PrintPassInformation(unsigned Depth, const char *Action,
void PMDebug::PrintPassInformation(unsigned Depth, const char *Action,
Pass *P, Function *F) {
if (PassDebugging >= Executions) {
std::cerr << (void*)P << std::string(Depth*2+1, ' ') << Action << " '"
std::cerr << (void*)P << std::string(Depth*2+1, ' ') << Action << " '"
<< P->getPassName();
if (F) std::cerr << "' on Function '" << F->getName();
std::cerr << "'...\n";
@ -169,7 +169,7 @@ void PMDebug::PrintPassInformation(unsigned Depth, const char *Action,
void PMDebug::PrintPassInformation(unsigned Depth, const char *Action,
Pass *P, BasicBlock *BB) {
if (PassDebugging >= Executions) {
std::cerr << (void*)P << std::string(Depth*2+1, ' ') << Action << " '"
std::cerr << (void*)P << std::string(Depth*2+1, ' ') << Action << " '"
<< P->getPassName();
if (BB) std::cerr << "' on BasicBlock '" << BB->getName();
std::cerr << "'...\n";
@ -244,11 +244,11 @@ void ImmutablePass::addToPassManager(PassManagerT<Module> *PM,
//
bool FunctionPass::runOnModule(Module &M) {
bool Changed = doInitialization(M);
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I)
if (!I->isExternal()) // Passes are not run on external functions!
Changed |= runOnFunction(*I);
return Changed | doFinalization(M);
}
@ -441,7 +441,7 @@ RegisterAGBase::~RegisterAGBase() {
if (AGI.DefaultImpl == ImplementationInfo)
AGI.DefaultImpl = 0;
AGI.Implementations.erase(ImplementationInfo);
// Last member of this analysis group? Unregister PassInfo, delete map entry

View File

@ -1,10 +1,10 @@
//===- PassManagerT.h - Container for Passes --------------------*- C++ -*-===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file defines the PassManagerT class. This class is used to hold,
@ -131,7 +131,7 @@ template<class UnitType> class PassManagerTraits; // Do not define.
//===----------------------------------------------------------------------===//
// PassManagerT - Container object for passes. The PassManagerT destructor
// deletes all passes contained inside of the PassManagerT, so you shouldn't
// deletes all passes contained inside of the PassManagerT, so you shouldn't
// delete passes manually, and all passes should be dynamically allocated.
//
template<typename UnitType>
@ -147,7 +147,7 @@ class PassManagerT : public PassManagerTraits<UnitType>,public AnalysisResolver{
friend SubPassClass;
#else
friend class PassManagerTraits<UnitType>::PassClass;
friend class PassManagerTraits<UnitType>::SubPassClass;
friend class PassManagerTraits<UnitType>::SubPassClass;
#endif
friend class PassManagerTraits<UnitType>;
friend class ImmutablePass;
@ -219,7 +219,7 @@ public:
// Run all of the passes
for (unsigned i = 0, e = Passes.size(); i < e; ++i) {
PassClass *P = Passes[i];
PMDebug::PrintPassInformation(getDepth(), "Executing Pass", P, M);
// Get information about what analyses the pass uses...
@ -236,7 +236,7 @@ public:
P->AnalysisImpls.clear();
P->AnalysisImpls.reserve(AnUsage.getRequiredSet().size());
for (std::vector<const PassInfo *>::const_iterator
I = AnUsage.getRequiredSet().begin(),
I = AnUsage.getRequiredSet().begin(),
E = AnUsage.getRequiredSet().end(); I != E; ++I) {
Pass *Impl = getAnalysisOrNullUp(*I);
if (Impl == 0) {
@ -353,7 +353,7 @@ public:
const PassInfo *IPID = ImmutablePasses[i]->getPassInfo();
if (IPID == ID)
return ImmutablePasses[i];
// This pass is the current implementation of all of the interfaces it
// implements as well.
//
@ -420,14 +420,14 @@ public:
if (Parent) {
Parent->markPassUsed(P, this);
} else {
assert(getAnalysisOrNullUp(P) &&
assert(getAnalysisOrNullUp(P) &&
dynamic_cast<ImmutablePass*>(getAnalysisOrNullUp(P)) &&
"Pass available but not found! "
"Perhaps this is a module pass requiring a function pass?");
}
}
}
// Return the number of parent PassManagers that exist
virtual unsigned getDepth() const {
if (Parent == 0) return 0;
@ -514,8 +514,8 @@ private:
//const std::vector<AnalysisID> &ProvidedSet = AnUsage.getProvidedSet();
if (Batcher /*&& ProvidedSet.empty()*/)
closeBatcher(); // This pass cannot be batched!
// Set the Resolver instance variable in the Pass so that it knows where to
// Set the Resolver instance variable in the Pass so that it knows where to
// find this object...
//
setAnalysisResolver(P, this);
@ -559,7 +559,7 @@ private:
// For now assume that our results are never used...
LastUseOf[P] = P;
}
// For FunctionPass subclasses, we must be sure to batch the FunctionPass's
// together in a BatcherClass object so that all of the analyses are run
// together a function at a time.
@ -588,7 +588,7 @@ public:
return;
}
// Set the Resolver instance variable in the Pass so that it knows where to
// Set the Resolver instance variable in the Pass so that it knows where to
// find this object...
//
setAnalysisResolver(IP, this);
@ -601,7 +601,7 @@ public:
//
IP->AnalysisImpls.clear();
IP->AnalysisImpls.reserve(AU.getRequiredSet().size());
for (std::vector<const PassInfo *>::const_iterator
for (std::vector<const PassInfo *>::const_iterator
I = AU.getRequiredSet().begin(),
E = AU.getRequiredSet().end(); I != E; ++I) {
Pass *Impl = getAnalysisOrNullUp(*I);
@ -616,7 +616,7 @@ public:
}
IP->AnalysisImpls.push_back(std::make_pair(*I, Impl));
}
// Initialize the immutable pass...
IP->initializePass();
}
@ -642,7 +642,7 @@ public:
typedef PassManagerT<Module> SubPassClass;
// BatcherClass - The type to use for collation of subtypes... This class is
// never instantiated for the PassManager<BasicBlock>, but it must be an
// never instantiated for the PassManager<BasicBlock>, but it must be an
// instance of PassClass to typecheck.
//
typedef PassClass BatcherClass;

View File

@ -1,11 +1,11 @@
//===-- SymbolTable.cpp - Implement the SymbolTable class -----------------===//
//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and revised by Reid
// Spencer. It is distributed under the University of Illinois Open Source
// Spencer. It is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//
//===----------------------------------------------------------------------===//
//
// This file implements the SymbolTable class for the VMCore library.
@ -31,7 +31,7 @@ SymbolTable::~SymbolTable() {
cast<DerivedType>(TI->second)->removeAbstractTypeUser(this);
}
// TODO: FIXME: BIG ONE: This doesn't unreference abstract types for the
// TODO: FIXME: BIG ONE: This doesn't unreference abstract types for the
// planes that could still have entries!
#ifndef NDEBUG // Only do this in -g mode...
@ -45,7 +45,7 @@ SymbolTable::~SymbolTable() {
LeftoverValues = false;
}
}
assert(LeftoverValues && "Values remain in symbol table!");
#endif
}
@ -194,7 +194,7 @@ void SymbolTable::insertEntry(const std::string &Name, const Type *VTy,
#if DEBUG_SYMBOL_TABLE
dump();
std::cerr << " Inserting definition: " << Name << ": "
std::cerr << " Inserting definition: " << Name << ": "
<< VTy->getDescription() << "\n";
#endif
@ -243,7 +243,7 @@ void SymbolTable::insert(const std::string& Name, const Type* T) {
#if DEBUG_SYMBOL_TABLE
dump();
std::cerr << " Inserting type: " << UniqueName << ": "
std::cerr << " Inserting type: " << UniqueName << ": "
<< T->getDescription() << "\n";
#endif
@ -282,7 +282,7 @@ bool SymbolTable::strip() {
remove(TI++);
RemovedSymbol = true;
}
return RemovedSymbol;
}
@ -299,7 +299,7 @@ void SymbolTable::refineAbstractType(const DerivedType *OldType,
plane_iterator NewTypeIt = pmap.find(NewType);
if (NewTypeIt == pmap.end()) { // If no plane exists, add one
NewTypeIt = pmap.insert(make_pair(NewType, ValueMap())).first;
if (NewType->isAbstract()) {
cast<DerivedType>(NewType)->addAbstractTypeUser(this);
#if DEBUG_ABSTYPE
@ -338,7 +338,7 @@ void SymbolTable::refineAbstractType(const DerivedType *OldType,
// Ok we have two external global values. Make all uses of the new
// one use the old one...
NewGV->uncheckedReplaceAllUsesWith(ExistGV);
// Update NewGV's name, we're about the remove it from the symbol
// table.
NewGV->Name = "";
@ -387,7 +387,7 @@ void SymbolTable::refineAbstractType(const DerivedType *OldType,
std::cerr << "Removing type " << OldType->getDescription() << "\n";
#endif
OldType->removeAbstractTypeUser(this);
I->second = (Type*)NewType; // TODO FIXME when types aren't const
if (NewType->isAbstract()) {
#if DEBUG_ABSTYPE

Some files were not shown because too many files have changed in this diff Show More