1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-24 11:42:57 +01:00

Make X86 call and return instructions non-variadic.

Function argument and return value registers aren't part of the
encoding, so they should be implicit operands.

llvm-svn: 159728
This commit is contained in:
Jakob Stoklund Olesen 2012-07-04 23:53:27 +00:00
parent e8399d2b3a
commit 6edf66ffe8
4 changed files with 38 additions and 38 deletions

View File

@ -1838,21 +1838,21 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
MIB.addGlobalAddress(GV, 0, OpFlags); MIB.addGlobalAddress(GV, 0, OpFlags);
} }
// Add an implicit use GOT pointer in EBX.
if (Subtarget->isPICStyleGOT())
MIB.addReg(X86::EBX);
if (Subtarget->is64Bit() && isVarArg && !Subtarget->isTargetWin64())
MIB.addReg(X86::AL);
// Add implicit physical register uses to the call.
for (unsigned i = 0, e = RegArgs.size(); i != e; ++i)
MIB.addReg(RegArgs[i]);
// Add a register mask with the call-preserved registers. // Add a register mask with the call-preserved registers.
// Proper defs for return values will be added by setPhysRegsDeadExcept(). // Proper defs for return values will be added by setPhysRegsDeadExcept().
MIB.addRegMask(TRI.getCallPreservedMask(CS.getCallingConv())); MIB.addRegMask(TRI.getCallPreservedMask(CS.getCallingConv()));
// Add an implicit use GOT pointer in EBX.
if (Subtarget->isPICStyleGOT())
MIB.addReg(X86::EBX, RegState::Implicit);
if (Subtarget->is64Bit() && isVarArg && !Subtarget->isTargetWin64())
MIB.addReg(X86::AL, RegState::Implicit);
// Add implicit physical register uses to the call.
for (unsigned i = 0, e = RegArgs.size(); i != e; ++i)
MIB.addReg(RegArgs[i], RegState::Implicit);
// Issue CALLSEQ_END // Issue CALLSEQ_END
unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
unsigned NumBytesCallee = 0; unsigned NumBytesCallee = 0;

View File

@ -1124,8 +1124,7 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
} }
MachineInstr *NewMI = prior(MBBI); MachineInstr *NewMI = prior(MBBI);
for (unsigned i = 2, e = MBBI->getNumOperands(); i != e; ++i) NewMI->copyImplicitOps(MBBI);
NewMI->addOperand(MBBI->getOperand(i));
// Delete the pseudo instruction TCRETURN. // Delete the pseudo instruction TCRETURN.
MBB.erase(MBBI); MBB.erase(MBBI);

View File

@ -12344,8 +12344,9 @@ X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI, MachineBasicBlock *BB,
BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI) BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
.addReg(sizeVReg); .addReg(sizeVReg);
BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32)) BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
.addExternalSymbol("__morestack_allocate_stack_space").addReg(X86::RDI) .addExternalSymbol("__morestack_allocate_stack_space")
.addRegMask(RegMask) .addRegMask(RegMask)
.addReg(X86::RDI, RegState::Implicit)
.addReg(X86::RAX, RegState::ImplicitDefine); .addReg(X86::RAX, RegState::ImplicitDefine);
} else { } else {
BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg) BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)

View File

@ -18,16 +18,16 @@
// Return instructions. // Return instructions.
let isTerminator = 1, isReturn = 1, isBarrier = 1, let isTerminator = 1, isReturn = 1, isBarrier = 1,
hasCtrlDep = 1, FPForm = SpecialFP in { hasCtrlDep = 1, FPForm = SpecialFP in {
def RET : I <0xC3, RawFrm, (outs), (ins variable_ops), def RET : I <0xC3, RawFrm, (outs), (ins),
"ret", "ret",
[(X86retflag 0)], IIC_RET>; [(X86retflag 0)], IIC_RET>;
def RETW : I <0xC3, RawFrm, (outs), (ins variable_ops), def RETW : I <0xC3, RawFrm, (outs), (ins),
"ret{w}", "ret{w}",
[], IIC_RET>, OpSize; [], IIC_RET>, OpSize;
def RETI : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt, variable_ops), def RETI : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt),
"ret\t$amt", "ret\t$amt",
[(X86retflag timm:$amt)], IIC_RET_IMM>; [(X86retflag timm:$amt)], IIC_RET_IMM>;
def RETIW : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt, variable_ops), def RETIW : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt),
"ret{w}\t$amt", "ret{w}\t$amt",
[], IIC_RET_IMM>, OpSize; [], IIC_RET_IMM>, OpSize;
def LRETL : I <0xCB, RawFrm, (outs), (ins), def LRETL : I <0xCB, RawFrm, (outs), (ins),
@ -148,12 +148,12 @@ let isCall = 1 in
// registers are added manually. // registers are added manually.
let Uses = [ESP] in { let Uses = [ESP] in {
def CALLpcrel32 : Ii32PCRel<0xE8, RawFrm, def CALLpcrel32 : Ii32PCRel<0xE8, RawFrm,
(outs), (ins i32imm_pcrel:$dst,variable_ops), (outs), (ins i32imm_pcrel:$dst),
"call{l}\t$dst", [], IIC_CALL_RI>, Requires<[In32BitMode]>; "call{l}\t$dst", [], IIC_CALL_RI>, Requires<[In32BitMode]>;
def CALL32r : I<0xFF, MRM2r, (outs), (ins GR32:$dst, variable_ops), def CALL32r : I<0xFF, MRM2r, (outs), (ins GR32:$dst),
"call{l}\t{*}$dst", [(X86call GR32:$dst)], IIC_CALL_RI>, "call{l}\t{*}$dst", [(X86call GR32:$dst)], IIC_CALL_RI>,
Requires<[In32BitMode]>; Requires<[In32BitMode]>;
def CALL32m : I<0xFF, MRM2m, (outs), (ins i32mem:$dst, variable_ops), def CALL32m : I<0xFF, MRM2m, (outs), (ins i32mem:$dst),
"call{l}\t{*}$dst", [(X86call (loadi32 addr:$dst))], IIC_CALL_MEM>, "call{l}\t{*}$dst", [(X86call (loadi32 addr:$dst))], IIC_CALL_MEM>,
Requires<[In32BitMode]>; Requires<[In32BitMode]>;
@ -174,7 +174,7 @@ let isCall = 1 in
// callw for 16 bit code for the assembler. // callw for 16 bit code for the assembler.
let isAsmParserOnly = 1 in let isAsmParserOnly = 1 in
def CALLpcrel16 : Ii16PCRel<0xE8, RawFrm, def CALLpcrel16 : Ii16PCRel<0xE8, RawFrm,
(outs), (ins i16imm_pcrel:$dst, variable_ops), (outs), (ins i16imm_pcrel:$dst),
"callw\t$dst", []>, OpSize; "callw\t$dst", []>, OpSize;
} }
@ -185,23 +185,23 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
isCodeGenOnly = 1 in isCodeGenOnly = 1 in
let Uses = [ESP] in { let Uses = [ESP] in {
def TCRETURNdi : PseudoI<(outs), def TCRETURNdi : PseudoI<(outs),
(ins i32imm_pcrel:$dst, i32imm:$offset, variable_ops), []>; (ins i32imm_pcrel:$dst, i32imm:$offset), []>;
def TCRETURNri : PseudoI<(outs), def TCRETURNri : PseudoI<(outs),
(ins ptr_rc_tailcall:$dst, i32imm:$offset, variable_ops), []>; (ins ptr_rc_tailcall:$dst, i32imm:$offset), []>;
let mayLoad = 1 in let mayLoad = 1 in
def TCRETURNmi : PseudoI<(outs), def TCRETURNmi : PseudoI<(outs),
(ins i32mem_TC:$dst, i32imm:$offset, variable_ops), []>; (ins i32mem_TC:$dst, i32imm:$offset), []>;
// FIXME: The should be pseudo instructions that are lowered when going to // FIXME: The should be pseudo instructions that are lowered when going to
// mcinst. // mcinst.
def TAILJMPd : Ii32PCRel<0xE9, RawFrm, (outs), def TAILJMPd : Ii32PCRel<0xE9, RawFrm, (outs),
(ins i32imm_pcrel:$dst, variable_ops), (ins i32imm_pcrel:$dst),
"jmp\t$dst # TAILCALL", "jmp\t$dst # TAILCALL",
[], IIC_JMP_REL>; [], IIC_JMP_REL>;
def TAILJMPr : I<0xFF, MRM4r, (outs), (ins ptr_rc_tailcall:$dst, variable_ops), def TAILJMPr : I<0xFF, MRM4r, (outs), (ins ptr_rc_tailcall:$dst),
"", [], IIC_JMP_REG>; // FIXME: Remove encoding when JIT is dead. "", [], IIC_JMP_REG>; // FIXME: Remove encoding when JIT is dead.
let mayLoad = 1 in let mayLoad = 1 in
def TAILJMPm : I<0xFF, MRM4m, (outs), (ins i32mem_TC:$dst, variable_ops), def TAILJMPm : I<0xFF, MRM4m, (outs), (ins i32mem_TC:$dst),
"jmp{l}\t{*}$dst # TAILCALL", [], IIC_JMP_MEM>; "jmp{l}\t{*}$dst # TAILCALL", [], IIC_JMP_MEM>;
} }
@ -218,14 +218,14 @@ let isCall = 1, Uses = [RSP] in {
// that the offset between an arbitrary immediate and the call will fit in // that the offset between an arbitrary immediate and the call will fit in
// the 32-bit pcrel field that we have. // the 32-bit pcrel field that we have.
def CALL64pcrel32 : Ii32PCRel<0xE8, RawFrm, def CALL64pcrel32 : Ii32PCRel<0xE8, RawFrm,
(outs), (ins i64i32imm_pcrel:$dst, variable_ops), (outs), (ins i64i32imm_pcrel:$dst),
"call{q}\t$dst", [], IIC_CALL_RI>, "call{q}\t$dst", [], IIC_CALL_RI>,
Requires<[In64BitMode]>; Requires<[In64BitMode]>;
def CALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops), def CALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst),
"call{q}\t{*}$dst", [(X86call GR64:$dst)], "call{q}\t{*}$dst", [(X86call GR64:$dst)],
IIC_CALL_RI>, IIC_CALL_RI>,
Requires<[In64BitMode]>; Requires<[In64BitMode]>;
def CALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst, variable_ops), def CALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst),
"call{q}\t{*}$dst", [(X86call (loadi64 addr:$dst))], "call{q}\t{*}$dst", [(X86call (loadi64 addr:$dst))],
IIC_CALL_MEM>, IIC_CALL_MEM>,
Requires<[In64BitMode]>; Requires<[In64BitMode]>;
@ -240,7 +240,7 @@ let isCall = 1, isCodeGenOnly = 1 in
let Defs = [RAX, R10, R11, RSP, EFLAGS], let Defs = [RAX, R10, R11, RSP, EFLAGS],
Uses = [RSP] in { Uses = [RSP] in {
def W64ALLOCA : Ii32PCRel<0xE8, RawFrm, def W64ALLOCA : Ii32PCRel<0xE8, RawFrm,
(outs), (ins i64i32imm_pcrel:$dst, variable_ops), (outs), (ins i64i32imm_pcrel:$dst),
"call{q}\t$dst", [], IIC_CALL_RI>, "call{q}\t$dst", [], IIC_CALL_RI>,
Requires<[IsWin64]>; Requires<[IsWin64]>;
} }
@ -250,21 +250,21 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
let Uses = [RSP], let Uses = [RSP],
usesCustomInserter = 1 in { usesCustomInserter = 1 in {
def TCRETURNdi64 : PseudoI<(outs), def TCRETURNdi64 : PseudoI<(outs),
(ins i64i32imm_pcrel:$dst, i32imm:$offset, variable_ops), (ins i64i32imm_pcrel:$dst, i32imm:$offset),
[]>; []>;
def TCRETURNri64 : PseudoI<(outs), def TCRETURNri64 : PseudoI<(outs),
(ins ptr_rc_tailcall:$dst, i32imm:$offset, variable_ops), []>; (ins ptr_rc_tailcall:$dst, i32imm:$offset), []>;
let mayLoad = 1 in let mayLoad = 1 in
def TCRETURNmi64 : PseudoI<(outs), def TCRETURNmi64 : PseudoI<(outs),
(ins i64mem_TC:$dst, i32imm:$offset, variable_ops), []>; (ins i64mem_TC:$dst, i32imm:$offset), []>;
def TAILJMPd64 : Ii32PCRel<0xE9, RawFrm, (outs), def TAILJMPd64 : Ii32PCRel<0xE9, RawFrm, (outs),
(ins i64i32imm_pcrel:$dst, variable_ops), (ins i64i32imm_pcrel:$dst),
"jmp\t$dst # TAILCALL", [], IIC_JMP_REL>; "jmp\t$dst # TAILCALL", [], IIC_JMP_REL>;
def TAILJMPr64 : I<0xFF, MRM4r, (outs), (ins ptr_rc_tailcall:$dst, variable_ops), def TAILJMPr64 : I<0xFF, MRM4r, (outs), (ins ptr_rc_tailcall:$dst),
"jmp{q}\t{*}$dst # TAILCALL", [], IIC_JMP_MEM>; "jmp{q}\t{*}$dst # TAILCALL", [], IIC_JMP_MEM>;
let mayLoad = 1 in let mayLoad = 1 in
def TAILJMPm64 : I<0xFF, MRM4m, (outs), (ins i64mem_TC:$dst, variable_ops), def TAILJMPm64 : I<0xFF, MRM4m, (outs), (ins i64mem_TC:$dst),
"jmp{q}\t{*}$dst # TAILCALL", [], IIC_JMP_MEM>; "jmp{q}\t{*}$dst # TAILCALL", [], IIC_JMP_MEM>;
} }