1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-20 11:33:24 +02:00

Tidy up. Whitespace and 80 column.

llvm-svn: 127721
This commit is contained in:
Jim Grosbach 2011-03-16 01:21:55 +00:00
parent f0148aca2e
commit c68c99f640
2 changed files with 20 additions and 17 deletions

View File

@ -42,7 +42,7 @@ public:
FunctionPassManager &getPM(const MutexGuard &L) {
return PM;
}
Module *getModule() const { return M; }
std::vector<AssertingVH<Function> > &getPendingFunctions(const MutexGuard &L){
return PendingFunctions;
@ -86,7 +86,7 @@ public:
static void Register() {
JITCtor = createJIT;
}
/// getJITInfo - Return the target JIT information structure.
///
TargetJITInfo &getJITInfo() const { return TJI; }
@ -106,7 +106,7 @@ public:
}
virtual void addModule(Module *M);
/// removeModule - Remove a Module from the list of modules. Returns true if
/// M is found.
virtual bool removeModule(Module *M);
@ -146,7 +146,7 @@ public:
/// getPointerToBasicBlock - This returns the address of the specified basic
/// block, assuming function is compiled.
void *getPointerToBasicBlock(BasicBlock *BB);
/// getOrEmitGlobalVariable - Return the address of the specified global
/// variable, possibly emitting it to memory if needed. This is used by the
/// Emitter.
@ -172,7 +172,7 @@ public:
void freeMachineCodeForFunction(Function *F);
/// addPendingFunction - while jitting non-lazily, a called but non-codegen'd
/// function was encountered. Add it to a pending list to be processed after
/// function was encountered. Add it to a pending list to be processed after
/// the current function.
///
void addPendingFunction(Function *F);

View File

@ -123,8 +123,8 @@ namespace {
return FunctionToLazyStubMap;
}
GlobalToIndirectSymMapTy& getGlobalToIndirectSymMap(const MutexGuard& locked) {
assert(locked.holds(TheJIT->lock));
GlobalToIndirectSymMapTy& getGlobalToIndirectSymMap(const MutexGuard& lck) {
assert(lck.holds(TheJIT->lock));
return GlobalToIndirectSymMap;
}
@ -132,8 +132,9 @@ namespace {
const MutexGuard &locked, void *CallSite) const {
assert(locked.holds(TheJIT->lock));
// The address given to us for the stub may not be exactly right, it might be
// a little bit after the stub. As such, use upper_bound to find it.
// The address given to us for the stub may not be exactly right, it
// might be a little bit after the stub. As such, use upper_bound to
// find it.
CallSiteToFunctionMapTy::const_iterator I =
CallSiteToFunctionMap.upper_bound(CallSite);
assert(I != CallSiteToFunctionMap.begin() &&
@ -659,7 +660,8 @@ void *JITResolver::JITCompilerFn(void *Stub) {
// If lazy compilation is disabled, emit a useful error message and abort.
if (!JR->TheJIT->isCompilingLazily()) {
report_fatal_error("LLVM JIT requested to do lazy compilation of function '"
report_fatal_error("LLVM JIT requested to do lazy compilation of"
" function '"
+ F->getName() + "' when lazy compiles are disabled!");
}
@ -745,7 +747,7 @@ void *JITEmitter::getPointerToGVIndirectSym(GlobalValue *V, void *Reference) {
void JITEmitter::processDebugLoc(DebugLoc DL, bool BeforePrintingInsn) {
if (DL.isUnknown()) return;
if (!BeforePrintingInsn) return;
const LLVMContext &Context = EmissionDetails.MF->getFunction()->getContext();
if (DL.getScope(Context) != 0 && PrevDL != DL) {
@ -781,7 +783,7 @@ void JITEmitter::startFunction(MachineFunction &F) {
uintptr_t ActualSize = 0;
// Set the memory writable, if it's not already
MemMgr->setMemoryWritable();
if (SizeEstimate > 0) {
// SizeEstimate will be non-zero on reallocation attempts.
ActualSize = SizeEstimate;
@ -859,7 +861,8 @@ bool JITEmitter::finishFunction(MachineFunction &F) {
} else if (MR.isBasicBlock()) {
ResultPtr = (void*)getMachineBasicBlockAddress(MR.getBasicBlock());
} else if (MR.isConstantPoolIndex()) {
ResultPtr = (void*)getConstantPoolEntryAddress(MR.getConstantPoolIndex());
ResultPtr =
(void*)getConstantPoolEntryAddress(MR.getConstantPoolIndex());
} else {
assert(MR.isJumpTableIndex());
ResultPtr=(void*)getJumpTableEntryAddress(MR.getJumpTableIndex());
@ -1130,7 +1133,7 @@ void JITEmitter::emitJumpTableInfo(MachineJumpTableInfo *MJTI) {
const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
if (JT.empty() || JumpTableBase == 0) return;
switch (MJTI->getEntryKind()) {
case MachineJumpTableInfo::EK_Inline:
return;
@ -1139,11 +1142,11 @@ void JITEmitter::emitJumpTableInfo(MachineJumpTableInfo *MJTI) {
// .word LBB123
assert(MJTI->getEntrySize(*TheJIT->getTargetData()) == sizeof(void*) &&
"Cross JIT'ing?");
// For each jump table, map each target in the jump table to the address of
// an emitted MachineBasicBlock.
intptr_t *SlotPtr = (intptr_t*)JumpTableBase;
for (unsigned i = 0, e = JT.size(); i != e; ++i) {
const std::vector<MachineBasicBlock*> &MBBs = JT[i].MBBs;
// Store the address of the basic block for this jump table slot in the
@ -1153,7 +1156,7 @@ void JITEmitter::emitJumpTableInfo(MachineJumpTableInfo *MJTI) {
}
break;
}
case MachineJumpTableInfo::EK_Custom32:
case MachineJumpTableInfo::EK_GPRel32BlockAddress:
case MachineJumpTableInfo::EK_LabelDifference32: {