1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-24 11:42:57 +01:00

Propagate the AlignStack bit in InlineAsm's to the

PrologEpilog code, and use it to determine whether
the asm forces stack alignment or not.  gcc consistently
does not do this for GCC-style asms; Apple gcc inconsistently
sometimes does it for asm blocks.  There is no
convenient place to put a bit in either the SDNode or
the MachineInstr form, so I've added an extra operand
to each; unlovely, but it does allow for expansion for
more bits, should we need it.  PR 5125.  Some
existing testcases are affected.
The operand lists of the SDNode and MachineInstr forms
are indexed with awesome mnemonics, like "2"; I may
fix this someday, but not now.  I'm not making it any
worse.  If anyone is inspired I think you can find all
the right places from this patch.

llvm-svn: 107506
This commit is contained in:
Dale Johannesen 2010-07-02 20:16:09 +00:00
parent 6d64b42774
commit df6db93a4d
12 changed files with 60 additions and 17 deletions

View File

@ -154,7 +154,8 @@ public:
Op_InputChain = 0, Op_InputChain = 0,
Op_AsmString = 1, Op_AsmString = 1,
Op_MDNode = 2, Op_MDNode = 2,
Op_FirstOperand = 3, Op_IsAlignStack = 3,
Op_FirstOperand = 4,
Kind_RegUse = 1, Kind_RegUse = 1,
Kind_RegDef = 2, Kind_RegDef = 2,

View File

@ -279,7 +279,7 @@ void AsmPrinter::EmitInlineAsm(const MachineInstr *MI) const {
// Okay, we finally have a value number. Ask the target to print this // Okay, we finally have a value number. Ask the target to print this
// operand! // operand!
if (CurVariant == -1 || CurVariant == AsmPrinterVariant) { if (CurVariant == -1 || CurVariant == AsmPrinterVariant) {
unsigned OpNo = 1; unsigned OpNo = 2;
bool Error = false; bool Error = false;

View File

@ -881,14 +881,14 @@ int MachineInstr::findFirstPredOperandIdx() const {
bool MachineInstr:: bool MachineInstr::
isRegTiedToUseOperand(unsigned DefOpIdx, unsigned *UseOpIdx) const { isRegTiedToUseOperand(unsigned DefOpIdx, unsigned *UseOpIdx) const {
if (isInlineAsm()) { if (isInlineAsm()) {
assert(DefOpIdx >= 2); assert(DefOpIdx >= 3);
const MachineOperand &MO = getOperand(DefOpIdx); const MachineOperand &MO = getOperand(DefOpIdx);
if (!MO.isReg() || !MO.isDef() || MO.getReg() == 0) if (!MO.isReg() || !MO.isDef() || MO.getReg() == 0)
return false; return false;
// Determine the actual operand index that corresponds to this index. // Determine the actual operand index that corresponds to this index.
unsigned DefNo = 0; unsigned DefNo = 0;
unsigned DefPart = 0; unsigned DefPart = 0;
for (unsigned i = 1, e = getNumOperands(); i < e; ) { for (unsigned i = 2, e = getNumOperands(); i < e; ) {
const MachineOperand &FMO = getOperand(i); const MachineOperand &FMO = getOperand(i);
// After the normal asm operands there may be additional imp-def regs. // After the normal asm operands there may be additional imp-def regs.
if (!FMO.isImm()) if (!FMO.isImm())
@ -903,7 +903,7 @@ isRegTiedToUseOperand(unsigned DefOpIdx, unsigned *UseOpIdx) const {
} }
++DefNo; ++DefNo;
} }
for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { for (unsigned i = 2, e = getNumOperands(); i != e; ++i) {
const MachineOperand &FMO = getOperand(i); const MachineOperand &FMO = getOperand(i);
if (!FMO.isImm()) if (!FMO.isImm())
continue; continue;
@ -946,7 +946,7 @@ isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx) const {
// Find the flag operand corresponding to UseOpIdx // Find the flag operand corresponding to UseOpIdx
unsigned FlagIdx, NumOps=0; unsigned FlagIdx, NumOps=0;
for (FlagIdx = 1; FlagIdx < UseOpIdx; FlagIdx += NumOps+1) { for (FlagIdx = 2; FlagIdx < UseOpIdx; FlagIdx += NumOps+1) {
const MachineOperand &UFMO = getOperand(FlagIdx); const MachineOperand &UFMO = getOperand(FlagIdx);
// After the normal asm operands there may be additional imp-def regs. // After the normal asm operands there may be additional imp-def regs.
if (!UFMO.isImm()) if (!UFMO.isImm())
@ -964,9 +964,9 @@ isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx) const {
if (!DefOpIdx) if (!DefOpIdx)
return true; return true;
unsigned DefIdx = 1; unsigned DefIdx = 2;
// Remember to adjust the index. First operand is asm string, then there // Remember to adjust the index. First operand is asm string, second is
// is a flag for each. // the AlignStack bit, then there is a flag for each.
while (DefNo) { while (DefNo) {
const MachineOperand &FMO = getOperand(DefIdx); const MachineOperand &FMO = getOperand(DefIdx);
assert(FMO.isImm()); assert(FMO.isImm());

View File

@ -158,9 +158,9 @@ void PEI::calculateCallsInformation(MachineFunction &Fn) {
AdjustsStack = true; AdjustsStack = true;
FrameSDOps.push_back(I); FrameSDOps.push_back(I);
} else if (I->isInlineAsm()) { } else if (I->isInlineAsm()) {
// An InlineAsm might be a call; assume it is to get the stack frame // Some inline asm's need a stack frame, as indicated by operand 1.
// aligned correctly for calls. if (I->getOperand(1).getImm())
AdjustsStack = true; AdjustsStack = true;
} }
MFI->setAdjustsStack(AdjustsStack); MFI->setAdjustsStack(AdjustsStack);

View File

@ -834,6 +834,12 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
const char *AsmStr = cast<ExternalSymbolSDNode>(AsmStrV)->getSymbol(); const char *AsmStr = cast<ExternalSymbolSDNode>(AsmStrV)->getSymbol();
MI->addOperand(MachineOperand::CreateES(AsmStr)); MI->addOperand(MachineOperand::CreateES(AsmStr));
// Add the isAlignStack bit.
int64_t isAlignStack =
cast<ConstantSDNode>(Node->getOperand(InlineAsm::Op_IsAlignStack))->
getZExtValue();
MI->addOperand(MachineOperand::CreateImm(isAlignStack));
// Add all of the operand registers to the instruction. // Add all of the operand registers to the instruction.
for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) { for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
unsigned Flags = unsigned Flags =

View File

@ -5454,6 +5454,10 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
const MDNode *SrcLoc = CS.getInstruction()->getMetadata("srcloc"); const MDNode *SrcLoc = CS.getInstruction()->getMetadata("srcloc");
AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc)); AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc));
// Remember the AlignStack bit as operand 3.
AsmNodeOperands.push_back(DAG.getTargetConstant(IA->isAlignStack() ? 1 : 0,
MVT::i1));
// Loop over all of the inputs, copying the operand values into the // Loop over all of the inputs, copying the operand values into the
// appropriate registers and processing the output regs. // appropriate registers and processing the output regs.
RegsForValue RetValRegs; RegsForValue RetValRegs;
@ -5642,7 +5646,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
} }
// Finish up input operands. Set the input chain and add the flag last. // Finish up input operands. Set the input chain and add the flag last.
AsmNodeOperands[0] = Chain; AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
if (Flag.getNode()) AsmNodeOperands.push_back(Flag); if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
Chain = DAG.getNode(ISD::INLINEASM, getCurDebugLoc(), Chain = DAG.getNode(ISD::INLINEASM, getCurDebugLoc(),

View File

@ -1089,6 +1089,7 @@ SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops) {
Ops.push_back(InOps[InlineAsm::Op_InputChain]); // 0 Ops.push_back(InOps[InlineAsm::Op_InputChain]); // 0
Ops.push_back(InOps[InlineAsm::Op_AsmString]); // 1 Ops.push_back(InOps[InlineAsm::Op_AsmString]); // 1
Ops.push_back(InOps[InlineAsm::Op_MDNode]); // 2, !srcloc Ops.push_back(InOps[InlineAsm::Op_MDNode]); // 2, !srcloc
Ops.push_back(InOps[InlineAsm::Op_IsAlignStack]); // 3
unsigned i = InlineAsm::Op_FirstOperand, e = InOps.size(); unsigned i = InlineAsm::Op_FirstOperand, e = InOps.size();
if (InOps[e-1].getValueType() == MVT::Flag) if (InOps[e-1].getValueType() == MVT::Flag)

View File

@ -5,6 +5,6 @@ define void @t() nounwind {
; CHECK: t: ; CHECK: t:
; CHECK: push {r7} ; CHECK: push {r7}
entry: entry:
call void asm sideeffect ".long 0xe7ffdefe", ""() nounwind call void asm sideeffect alignstack ".long 0xe7ffdefe", ""() nounwind
ret void ret void
} }

View File

@ -1,4 +1,4 @@
; RUN: llc < %s -mtriple=x86_64-apple-darwin10.0 -relocation-model=pic -disable-fp-elim -stats |& grep asm-printer | grep 83 ; RUN: llc < %s -mtriple=x86_64-apple-darwin10.0 -relocation-model=pic -disable-fp-elim -stats |& grep asm-printer | grep 82
; rdar://6802189 ; rdar://6802189
; Test if linearscan is unfavoring registers for allocation to allow more reuse ; Test if linearscan is unfavoring registers for allocation to allow more reuse

View File

@ -0,0 +1,31 @@
; RUN: llc < %s -mtriple=x86_64-apple-darwin10 | FileCheck %s
define void @foo() nounwind ssp {
entry:
; CHECK: foo
; CHECK: subq $8, %rsp
; CHECK: int $3
call void asm sideeffect alignstack "# top of block", "~{dirflag},~{fpsr},~{flags},~{edi},~{esi},~{edx},~{ecx},~{eax}"() nounwind
call void asm sideeffect alignstack ".file \22small.c\22", "~{dirflag},~{fpsr},~{flags}"() nounwind
call void asm sideeffect alignstack ".line 3", "~{dirflag},~{fpsr},~{flags}"() nounwind
call void asm sideeffect alignstack "int $$3", "~{dirflag},~{fpsr},~{flags},~{memory}"() nounwind
br label %return
return: ; preds = %entry
ret void
}
define void @bar() nounwind ssp {
entry:
; CHECK: bar
; CHECK-NOT: subq $8, %rsp
; CHECK: int $3
call void asm sideeffect "# top of block", "~{dirflag},~{fpsr},~{flags},~{edi},~{esi},~{edx},~{ecx},~{eax}"() nounwind
call void asm sideeffect ".file \22small.c\22", "~{dirflag},~{fpsr},~{flags}"() nounwind
call void asm sideeffect ".line 3", "~{dirflag},~{fpsr},~{flags}"() nounwind
call void asm sideeffect "int $$3", "~{dirflag},~{fpsr},~{flags},~{memory}"() nounwind
br label %return
return: ; preds = %entry
ret void
}

View File

@ -1,4 +1,4 @@
; RUN: llc < %s -mtriple=i386-apple-darwin9 -O0 -regalloc=linearscan | grep {movl %edx, 12(%esp)} | count 2 ; RUN: llc < %s -mtriple=i386-apple-darwin9 -O0 -regalloc=linearscan | grep {movl %edx, 4(%esp)} | count 2
; rdar://6992609 ; rdar://6992609
target triple = "i386-apple-darwin9.0" target triple = "i386-apple-darwin9.0"

View File

@ -1,7 +1,7 @@
; RUN: llc < %s -march=x86-64 -o %t ; RUN: llc < %s -march=x86-64 -o %t
; RUN: not grep inc %t ; RUN: not grep inc %t
; RUN: grep dec %t | count 2 ; RUN: grep dec %t | count 2
; RUN: grep addq %t | count 13 ; RUN: grep addq %t | count 12
; RUN: not grep addb %t ; RUN: not grep addb %t
; RUN: not grep leaq %t ; RUN: not grep leaq %t
; RUN: not grep leal %t ; RUN: not grep leal %t