1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-20 03:23:01 +02:00

teach the new heuristic how to handle inline asm.

llvm-svn: 60088
This commit is contained in:
Chris Lattner 2008-11-26 04:59:11 +00:00
parent c17a1f06f3
commit 1bfd6e8709

View File

@ -899,12 +899,56 @@ bool AddressingModeMatcher::MatchAddr(Value *Addr, unsigned Depth) {
return false; return false;
} }
/// IsOperandAMemoryOperand - Check to see if all uses of OpVal by the specified
/// inline asm call are due to memory operands. If so, return true, otherwise
/// return false.
static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal,
const TargetLowering &TLI) {
std::vector<InlineAsm::ConstraintInfo>
Constraints = IA->ParseConstraints();
unsigned ArgNo = 1; // ArgNo - The operand of the CallInst.
for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
TargetLowering::AsmOperandInfo OpInfo(Constraints[i]);
// Compute the value type for each operand.
switch (OpInfo.Type) {
case InlineAsm::isOutput:
if (OpInfo.isIndirect)
OpInfo.CallOperandVal = CI->getOperand(ArgNo++);
break;
case InlineAsm::isInput:
OpInfo.CallOperandVal = CI->getOperand(ArgNo++);
break;
case InlineAsm::isClobber:
// Nothing to do.
break;
}
// Compute the constraint code and ConstraintType to use.
TLI.ComputeConstraintToUse(OpInfo, SDValue(),
OpInfo.ConstraintType == TargetLowering::C_Memory);
// If this asm operand is our Value*, and if it isn't an indirect memory
// operand, we can't fold it!
if (OpInfo.CallOperandVal == OpVal &&
(OpInfo.ConstraintType != TargetLowering::C_Memory ||
!OpInfo.isIndirect))
return false;
}
return true;
}
/// FindAllMemoryUses - Recursively walk all the uses of I until we find a /// FindAllMemoryUses - Recursively walk all the uses of I until we find a
/// memory use. If we find an obviously non-foldable instruction, return true. /// memory use. If we find an obviously non-foldable instruction, return true.
/// Add the ultimately found memory instructions to MemoryUses. /// Add the ultimately found memory instructions to MemoryUses.
static bool FindAllMemoryUses(Instruction *I, static bool FindAllMemoryUses(Instruction *I,
SmallVectorImpl<std::pair<Instruction*,unsigned> > &MemoryUses, SmallVectorImpl<std::pair<Instruction*,unsigned> > &MemoryUses,
SmallPtrSet<Instruction*, 16> &ConsideredInsts) { SmallPtrSet<Instruction*, 16> &ConsideredInsts,
const TargetLowering &TLI) {
// If we already considered this instruction, we're done. // If we already considered this instruction, we're done.
if (!ConsideredInsts.insert(I)) if (!ConsideredInsts.insert(I))
return false; return false;
@ -930,14 +974,15 @@ static bool FindAllMemoryUses(Instruction *I,
if (CallInst *CI = dyn_cast<CallInst>(*UI)) { if (CallInst *CI = dyn_cast<CallInst>(*UI)) {
InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue()); InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue());
if (IA == 0) return true; if (IA == 0) return true;
// FIXME: HANDLE MEM OPS // If this is a memory operand, we're cool, otherwise bail out.
//MemoryUses.push_back(std::make_pair(CI, UI.getOperandNo())); if (!IsOperandAMemoryOperand(CI, IA, I, TLI))
return true; return true;
continue;
} }
if (FindAllMemoryUses(cast<Instruction>(*UI), MemoryUses, ConsideredInsts)) if (FindAllMemoryUses(cast<Instruction>(*UI), MemoryUses, ConsideredInsts,
TLI))
return true; return true;
} }
@ -1039,7 +1084,7 @@ IsProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore,
// uses. // uses.
SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses; SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses;
SmallPtrSet<Instruction*, 16> ConsideredInsts; SmallPtrSet<Instruction*, 16> ConsideredInsts;
if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts)) if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI))
return false; // Has a non-memory, non-foldable use! return false; // Has a non-memory, non-foldable use!
// Now that we know that all uses of this instruction are part of a chain of // Now that we know that all uses of this instruction are part of a chain of