mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-01-31 20:51:52 +01:00
[X86] Fix variadic argument handling for x32
The X86-64 ABI defines va_list as typedef struct { unsigned int gp_offset; unsigned int fp_offset; void *overflow_arg_area; void *reg_save_area; } va_list[1]; This means the size, alignment, and reg_save_area offset will depend on whether we are in LP64 or in ILP32 mode, so this commit adds the checks. Additionally, the VAARG_64 pseudo-instruction assumed 64-bit pointers, so this commit adds a VAARG_X32 pseudo-instruction that behaves just like VAARG_64, except for assuming 32-bit pointers. Some of these changes were originally done by Michael Liao <michael.hliao@gmail.com>. Fixes https://bugs.llvm.org/show_bug.cgi?id=48428. Reviewed By: RKSimon Differential Revision: https://reviews.llvm.org/D93160
This commit is contained in:
parent
05dd4fb241
commit
9906752fd3
@ -24360,15 +24360,16 @@ SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
|
||||
Subtarget.hasSSE1());
|
||||
}
|
||||
|
||||
// Insert VAARG_64 node into the DAG
|
||||
// VAARG_64 returns two values: Variable Argument Address, Chain
|
||||
// Insert VAARG node into the DAG
|
||||
// VAARG returns two values: Variable Argument Address, Chain
|
||||
SDValue InstOps[] = {Chain, SrcPtr,
|
||||
DAG.getTargetConstant(ArgSize, dl, MVT::i32),
|
||||
DAG.getTargetConstant(ArgMode, dl, MVT::i8),
|
||||
DAG.getTargetConstant(Align, dl, MVT::i32)};
|
||||
SDVTList VTs = DAG.getVTList(getPointerTy(DAG.getDataLayout()), MVT::Other);
|
||||
SDValue VAARG = DAG.getMemIntrinsicNode(
|
||||
X86ISD::VAARG_64, dl, VTs, InstOps, MVT::i64, MachinePointerInfo(SV),
|
||||
Subtarget.isTarget64BitLP64() ? X86ISD::VAARG_64 : X86ISD::VAARG_X32, dl,
|
||||
VTs, InstOps, MVT::i64, MachinePointerInfo(SV),
|
||||
/*Alignment=*/None,
|
||||
MachineMemOperand::MOLoad | MachineMemOperand::MOStore);
|
||||
Chain = VAARG.getValue(1);
|
||||
@ -24394,9 +24395,11 @@ static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget,
|
||||
const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
|
||||
SDLoc DL(Op);
|
||||
|
||||
return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(24, DL),
|
||||
Align(8), /*isVolatile*/ false, false, false,
|
||||
MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
|
||||
return DAG.getMemcpy(
|
||||
Chain, DL, DstPtr, SrcPtr,
|
||||
DAG.getIntPtrConstant(Subtarget.isTarget64BitLP64() ? 24 : 16, DL),
|
||||
Align(Subtarget.isTarget64BitLP64() ? 8 : 4), /*isVolatile*/ false, false,
|
||||
false, MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
|
||||
}
|
||||
|
||||
// Helper to get immediate/variable SSE shift opcode from other shift opcodes.
|
||||
@ -30959,6 +30962,7 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
|
||||
NODE_NAME_CASE(DBPSADBW)
|
||||
NODE_NAME_CASE(VASTART_SAVE_XMM_REGS)
|
||||
NODE_NAME_CASE(VAARG_64)
|
||||
NODE_NAME_CASE(VAARG_X32)
|
||||
NODE_NAME_CASE(WIN_ALLOCA)
|
||||
NODE_NAME_CASE(MEMBARRIER)
|
||||
NODE_NAME_CASE(MFENCE)
|
||||
@ -31548,11 +31552,9 @@ static MachineBasicBlock *emitXBegin(MachineInstr &MI, MachineBasicBlock *MBB,
|
||||
return sinkMBB;
|
||||
}
|
||||
|
||||
|
||||
|
||||
MachineBasicBlock *
|
||||
X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
|
||||
MachineBasicBlock *MBB) const {
|
||||
X86TargetLowering::EmitVAARGWithCustomInserter(MachineInstr &MI,
|
||||
MachineBasicBlock *MBB) const {
|
||||
// Emit va_arg instruction on X86-64.
|
||||
|
||||
// Operands to this pseudo-instruction:
|
||||
@ -31563,9 +31565,8 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
|
||||
// 8 ) Align : Alignment of type
|
||||
// 9 ) EFLAGS (implicit-def)
|
||||
|
||||
assert(MI.getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
|
||||
static_assert(X86::AddrNumOperands == 5,
|
||||
"VAARG_64 assumes 5 address operands");
|
||||
assert(MI.getNumOperands() == 10 && "VAARG should have 10 operands!");
|
||||
static_assert(X86::AddrNumOperands == 5, "VAARG assumes 5 address operands");
|
||||
|
||||
Register DestReg = MI.getOperand(0).getReg();
|
||||
MachineOperand &Base = MI.getOperand(1);
|
||||
@ -31580,7 +31581,7 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
|
||||
MachineFunction *MF = MBB->getParent();
|
||||
|
||||
// Memory Reference
|
||||
assert(MI.hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
|
||||
assert(MI.hasOneMemOperand() && "Expected VAARG to have one memoperand");
|
||||
|
||||
MachineMemOperand *OldMMO = MI.memoperands().front();
|
||||
|
||||
@ -31593,7 +31594,8 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
|
||||
// Machine Information
|
||||
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
|
||||
MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
|
||||
const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
|
||||
const TargetRegisterClass *AddrRegClass =
|
||||
getRegClassFor(getPointerTy(MBB->getParent()->getDataLayout()));
|
||||
const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
|
||||
const DebugLoc &DL = MI.getDebugLoc();
|
||||
|
||||
@ -31704,25 +31706,35 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
|
||||
|
||||
// Read the reg_save_area address.
|
||||
Register RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
|
||||
BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg)
|
||||
BuildMI(
|
||||
offsetMBB, DL,
|
||||
TII->get(Subtarget.isTarget64BitLP64() ? X86::MOV64rm : X86::MOV32rm),
|
||||
RegSaveReg)
|
||||
.add(Base)
|
||||
.add(Scale)
|
||||
.add(Index)
|
||||
.addDisp(Disp, 16)
|
||||
.addDisp(Disp, Subtarget.isTarget64BitLP64() ? 16 : 12)
|
||||
.add(Segment)
|
||||
.setMemRefs(LoadOnlyMMO);
|
||||
|
||||
// Zero-extend the offset
|
||||
Register OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
|
||||
BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
|
||||
.addImm(0)
|
||||
.addReg(OffsetReg)
|
||||
.addImm(X86::sub_32bit);
|
||||
if (Subtarget.isTarget64BitLP64()) {
|
||||
// Zero-extend the offset
|
||||
Register OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
|
||||
BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
|
||||
.addImm(0)
|
||||
.addReg(OffsetReg)
|
||||
.addImm(X86::sub_32bit);
|
||||
|
||||
// Add the offset to the reg_save_area to get the final address.
|
||||
BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
|
||||
.addReg(OffsetReg64)
|
||||
.addReg(RegSaveReg);
|
||||
// Add the offset to the reg_save_area to get the final address.
|
||||
BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
|
||||
.addReg(OffsetReg64)
|
||||
.addReg(RegSaveReg);
|
||||
} else {
|
||||
// Add the offset to the reg_save_area to get the final address.
|
||||
BuildMI(offsetMBB, DL, TII->get(X86::ADD32rr), OffsetDestReg)
|
||||
.addReg(OffsetReg)
|
||||
.addReg(RegSaveReg);
|
||||
}
|
||||
|
||||
// Compute the offset for the next argument
|
||||
Register NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
|
||||
@ -31751,7 +31763,9 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
|
||||
|
||||
// Load the overflow_area address into a register.
|
||||
Register OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
|
||||
BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg)
|
||||
BuildMI(overflowMBB, DL,
|
||||
TII->get(Subtarget.isTarget64BitLP64() ? X86::MOV64rm : X86::MOV32rm),
|
||||
OverflowAddrReg)
|
||||
.add(Base)
|
||||
.add(Scale)
|
||||
.add(Index)
|
||||
@ -31766,11 +31780,17 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
|
||||
Register TmpReg = MRI.createVirtualRegister(AddrRegClass);
|
||||
|
||||
// aligned_addr = (addr + (align-1)) & ~(align-1)
|
||||
BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg)
|
||||
BuildMI(
|
||||
overflowMBB, DL,
|
||||
TII->get(Subtarget.isTarget64BitLP64() ? X86::ADD64ri32 : X86::ADD32ri),
|
||||
TmpReg)
|
||||
.addReg(OverflowAddrReg)
|
||||
.addImm(Alignment.value() - 1);
|
||||
|
||||
BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg)
|
||||
BuildMI(
|
||||
overflowMBB, DL,
|
||||
TII->get(Subtarget.isTarget64BitLP64() ? X86::AND64ri32 : X86::AND32ri),
|
||||
OverflowDestReg)
|
||||
.addReg(TmpReg)
|
||||
.addImm(~(uint64_t)(Alignment.value() - 1));
|
||||
} else {
|
||||
@ -31781,12 +31801,16 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
|
||||
// Compute the next overflow address after this argument.
|
||||
// (the overflow address should be kept 8-byte aligned)
|
||||
Register NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
|
||||
BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg)
|
||||
.addReg(OverflowDestReg)
|
||||
.addImm(ArgSizeA8);
|
||||
BuildMI(
|
||||
overflowMBB, DL,
|
||||
TII->get(Subtarget.isTarget64BitLP64() ? X86::ADD64ri32 : X86::ADD32ri),
|
||||
NextAddrReg)
|
||||
.addReg(OverflowDestReg)
|
||||
.addImm(ArgSizeA8);
|
||||
|
||||
// Store the new overflow address.
|
||||
BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr))
|
||||
BuildMI(overflowMBB, DL,
|
||||
TII->get(Subtarget.isTarget64BitLP64() ? X86::MOV64mr : X86::MOV32mr))
|
||||
.add(Base)
|
||||
.add(Scale)
|
||||
.add(Index)
|
||||
@ -33721,7 +33745,8 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
|
||||
return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
|
||||
|
||||
case X86::VAARG_64:
|
||||
return EmitVAARG64WithCustomInserter(MI, BB);
|
||||
case X86::VAARG_X32:
|
||||
return EmitVAARGWithCustomInserter(MI, BB);
|
||||
|
||||
case X86::EH_SjLj_SetJmp32:
|
||||
case X86::EH_SjLj_SetJmp64:
|
||||
|
@ -814,9 +814,10 @@ namespace llvm {
|
||||
/// specifies the type to store as.
|
||||
FST,
|
||||
|
||||
/// This instruction grabs the address of the next argument
|
||||
/// These instructions grab the address of the next argument
|
||||
/// from a va_list. (reads and modifies the va_list in memory)
|
||||
VAARG_64,
|
||||
VAARG_X32,
|
||||
|
||||
// Vector truncating store with unsigned/signed saturation
|
||||
VTRUNCSTOREUS,
|
||||
@ -1581,8 +1582,7 @@ namespace llvm {
|
||||
|
||||
// Utility function to emit the low-level va_arg code for X86-64.
|
||||
MachineBasicBlock *
|
||||
EmitVAARG64WithCustomInserter(MachineInstr &MI,
|
||||
MachineBasicBlock *MBB) const;
|
||||
EmitVAARGWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const;
|
||||
|
||||
/// Utility function to emit the xmm reg save portion of va_start.
|
||||
MachineBasicBlock *
|
||||
|
@ -81,17 +81,24 @@ def VASTART_SAVE_XMM_REGS : I<0, Pseudo,
|
||||
timm:$offset),
|
||||
(implicit EFLAGS)]>;
|
||||
|
||||
// The VAARG_64 pseudo-instruction takes the address of the va_list,
|
||||
// and places the address of the next argument into a register.
|
||||
let Defs = [EFLAGS] in
|
||||
// The VAARG_64 and VAARG_X32 pseudo-instructions take the address of the
|
||||
// va_list, and place the address of the next argument into a register.
|
||||
let Defs = [EFLAGS] in {
|
||||
def VAARG_64 : I<0, Pseudo,
|
||||
(outs GR64:$dst),
|
||||
(ins i8mem:$ap, i32imm:$size, i8imm:$mode, i32imm:$align),
|
||||
"#VAARG_64 $dst, $ap, $size, $mode, $align",
|
||||
[(set GR64:$dst,
|
||||
(X86vaarg64 addr:$ap, timm:$size, timm:$mode, timm:$align)),
|
||||
(implicit EFLAGS)]>;
|
||||
|
||||
(implicit EFLAGS)]>, Requires<[In64BitMode, IsLP64]>;
|
||||
def VAARG_X32 : I<0, Pseudo,
|
||||
(outs GR32:$dst),
|
||||
(ins i8mem:$ap, i32imm:$size, i8imm:$mode, i32imm:$align),
|
||||
"#VAARG_X32 $dst, $ap, $size, $mode, $align",
|
||||
[(set GR32:$dst,
|
||||
(X86vaargx32 addr:$ap, timm:$size, timm:$mode, timm:$align)),
|
||||
(implicit EFLAGS)]>, Requires<[In64BitMode, NotLP64]>;
|
||||
}
|
||||
|
||||
// When using segmented stacks these are lowered into instructions which first
|
||||
// check if the current stacklet has enough free memory. If it does, memory is
|
||||
|
@ -94,11 +94,11 @@ def SDT_X86VASTART_SAVE_XMM_REGS : SDTypeProfile<0, -1, [SDTCisVT<0, i8>,
|
||||
SDTCisVT<1, iPTR>,
|
||||
SDTCisVT<2, iPTR>]>;
|
||||
|
||||
def SDT_X86VAARG_64 : SDTypeProfile<1, -1, [SDTCisPtrTy<0>,
|
||||
SDTCisPtrTy<1>,
|
||||
SDTCisVT<2, i32>,
|
||||
SDTCisVT<3, i8>,
|
||||
SDTCisVT<4, i32>]>;
|
||||
def SDT_X86VAARG : SDTypeProfile<1, -1, [SDTCisPtrTy<0>,
|
||||
SDTCisPtrTy<1>,
|
||||
SDTCisVT<2, i32>,
|
||||
SDTCisVT<3, i8>,
|
||||
SDTCisVT<4, i32>]>;
|
||||
|
||||
def SDTX86RepStr : SDTypeProfile<0, 1, [SDTCisVT<0, OtherVT>]>;
|
||||
|
||||
@ -186,7 +186,11 @@ def X86vastart_save_xmm_regs :
|
||||
SDT_X86VASTART_SAVE_XMM_REGS,
|
||||
[SDNPHasChain, SDNPVariadic]>;
|
||||
def X86vaarg64 :
|
||||
SDNode<"X86ISD::VAARG_64", SDT_X86VAARG_64,
|
||||
SDNode<"X86ISD::VAARG_64", SDT_X86VAARG,
|
||||
[SDNPHasChain, SDNPMayLoad, SDNPMayStore,
|
||||
SDNPMemOperand]>;
|
||||
def X86vaargx32 :
|
||||
SDNode<"X86ISD::VAARG_X32", SDT_X86VAARG,
|
||||
[SDNPHasChain, SDNPMayLoad, SDNPMayStore,
|
||||
SDNPMemOperand]>;
|
||||
def X86callseq_start :
|
||||
|
@ -1,5 +1,6 @@
|
||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --no_x86_scrub_sp
|
||||
; RUN: llc < %s -mtriple=x86_64-apple-darwin -code-model=large -relocation-model=static | FileCheck --check-prefix=CHECK-X64 %s
|
||||
; RUN: llc < %s -mtriple=x86_64-linux-gnux32 | FileCheck --check-prefix=CHECK-X32 %s
|
||||
|
||||
@.str = internal constant [38 x i8] c"%d, %f, %d, %lld, %d, %f, %d, %d, %d\0A\00" ; <[38 x i8]*> [#uses=1]
|
||||
|
||||
@ -271,6 +272,261 @@ define void @func(...) nounwind {
|
||||
; CHECK-X64-NEXT: addq $240, %rsp
|
||||
; CHECK-X64-NEXT: popq %rbx
|
||||
; CHECK-X64-NEXT: retq
|
||||
;
|
||||
; CHECK-X32-LABEL: func:
|
||||
; CHECK-X32: # %bb.0: # %entry
|
||||
; CHECK-X32-NEXT: subl $216, %esp
|
||||
; CHECK-X32-NEXT: testb %al, %al
|
||||
; CHECK-X32-NEXT: je .LBB0_2
|
||||
; CHECK-X32-NEXT: # %bb.1: # %entry
|
||||
; CHECK-X32-NEXT: movaps %xmm0, 80(%esp)
|
||||
; CHECK-X32-NEXT: movaps %xmm1, 96(%esp)
|
||||
; CHECK-X32-NEXT: movaps %xmm2, 112(%esp)
|
||||
; CHECK-X32-NEXT: movaps %xmm3, 128(%esp)
|
||||
; CHECK-X32-NEXT: movaps %xmm4, 144(%esp)
|
||||
; CHECK-X32-NEXT: movaps %xmm5, 160(%esp)
|
||||
; CHECK-X32-NEXT: movaps %xmm6, 176(%esp)
|
||||
; CHECK-X32-NEXT: movaps %xmm7, 192(%esp)
|
||||
; CHECK-X32-NEXT: .LBB0_2: # %entry
|
||||
; CHECK-X32-NEXT: movq %rdi, 32(%esp)
|
||||
; CHECK-X32-NEXT: movq %rsi, 40(%esp)
|
||||
; CHECK-X32-NEXT: movq %rdx, 48(%esp)
|
||||
; CHECK-X32-NEXT: movq %rcx, 56(%esp)
|
||||
; CHECK-X32-NEXT: movq %r8, 64(%esp)
|
||||
; CHECK-X32-NEXT: movq %r9, 72(%esp)
|
||||
; CHECK-X32-NEXT: movabsq $206158430208, %rax # imm = 0x3000000000
|
||||
; CHECK-X32-NEXT: movq %rax, (%esp)
|
||||
; CHECK-X32-NEXT: leal 224(%rsp), %eax
|
||||
; CHECK-X32-NEXT: movl %eax, 8(%esp)
|
||||
; CHECK-X32-NEXT: leal 32(%rsp), %eax
|
||||
; CHECK-X32-NEXT: movl %eax, 12(%esp)
|
||||
; CHECK-X32-NEXT: movl (%esp), %ecx
|
||||
; CHECK-X32-NEXT: cmpl $48, %ecx
|
||||
; CHECK-X32-NEXT: jae .LBB0_4
|
||||
; CHECK-X32-NEXT: # %bb.3: # %entry
|
||||
; CHECK-X32-NEXT: movl 12(%esp), %eax
|
||||
; CHECK-X32-NEXT: addl %ecx, %eax
|
||||
; CHECK-X32-NEXT: addl $8, %ecx
|
||||
; CHECK-X32-NEXT: movl %ecx, (%esp)
|
||||
; CHECK-X32-NEXT: jmp .LBB0_5
|
||||
; CHECK-X32-NEXT: .LBB0_4: # %entry
|
||||
; CHECK-X32-NEXT: movl 8(%esp), %eax
|
||||
; CHECK-X32-NEXT: movl %eax, %ecx
|
||||
; CHECK-X32-NEXT: addl $8, %ecx
|
||||
; CHECK-X32-NEXT: movl %ecx, 8(%esp)
|
||||
; CHECK-X32-NEXT: .LBB0_5: # %entry
|
||||
; CHECK-X32-NEXT: movl (%eax), %r10d
|
||||
; CHECK-X32-NEXT: movl (%esp), %ecx
|
||||
; CHECK-X32-NEXT: cmpl $48, %ecx
|
||||
; CHECK-X32-NEXT: jae .LBB0_7
|
||||
; CHECK-X32-NEXT: # %bb.6: # %entry
|
||||
; CHECK-X32-NEXT: movl 12(%esp), %eax
|
||||
; CHECK-X32-NEXT: addl %ecx, %eax
|
||||
; CHECK-X32-NEXT: addl $8, %ecx
|
||||
; CHECK-X32-NEXT: movl %ecx, (%esp)
|
||||
; CHECK-X32-NEXT: jmp .LBB0_8
|
||||
; CHECK-X32-NEXT: .LBB0_7: # %entry
|
||||
; CHECK-X32-NEXT: movl 8(%esp), %eax
|
||||
; CHECK-X32-NEXT: movl %eax, %ecx
|
||||
; CHECK-X32-NEXT: addl $8, %ecx
|
||||
; CHECK-X32-NEXT: movl %ecx, 8(%esp)
|
||||
; CHECK-X32-NEXT: .LBB0_8: # %entry
|
||||
; CHECK-X32-NEXT: movl (%eax), %r11d
|
||||
; CHECK-X32-NEXT: movl (%esp), %ecx
|
||||
; CHECK-X32-NEXT: cmpl $48, %ecx
|
||||
; CHECK-X32-NEXT: jae .LBB0_10
|
||||
; CHECK-X32-NEXT: # %bb.9: # %entry
|
||||
; CHECK-X32-NEXT: movl 12(%esp), %eax
|
||||
; CHECK-X32-NEXT: addl %ecx, %eax
|
||||
; CHECK-X32-NEXT: addl $8, %ecx
|
||||
; CHECK-X32-NEXT: movl %ecx, (%esp)
|
||||
; CHECK-X32-NEXT: jmp .LBB0_11
|
||||
; CHECK-X32-NEXT: .LBB0_10: # %entry
|
||||
; CHECK-X32-NEXT: movl 8(%esp), %eax
|
||||
; CHECK-X32-NEXT: movl %eax, %ecx
|
||||
; CHECK-X32-NEXT: addl $8, %ecx
|
||||
; CHECK-X32-NEXT: movl %ecx, 8(%esp)
|
||||
; CHECK-X32-NEXT: .LBB0_11: # %entry
|
||||
; CHECK-X32-NEXT: movl (%eax), %r9d
|
||||
; CHECK-X32-NEXT: movq (%esp), %rax
|
||||
; CHECK-X32-NEXT: movq 8(%esp), %rcx
|
||||
; CHECK-X32-NEXT: movq %rcx, 24(%esp)
|
||||
; CHECK-X32-NEXT: movq %rax, 16(%esp)
|
||||
; CHECK-X32-NEXT: movl 4(%esp), %eax
|
||||
; CHECK-X32-NEXT: cmpl $176, %eax
|
||||
; CHECK-X32-NEXT: jae .LBB0_13
|
||||
; CHECK-X32-NEXT: # %bb.12: # %entry
|
||||
; CHECK-X32-NEXT: addl $16, %eax
|
||||
; CHECK-X32-NEXT: movl %eax, 4(%esp)
|
||||
; CHECK-X32-NEXT: jmp .LBB0_14
|
||||
; CHECK-X32-NEXT: .LBB0_13: # %entry
|
||||
; CHECK-X32-NEXT: movl 8(%esp), %eax
|
||||
; CHECK-X32-NEXT: addl $8, %eax
|
||||
; CHECK-X32-NEXT: movl %eax, 8(%esp)
|
||||
; CHECK-X32-NEXT: .LBB0_14: # %entry
|
||||
; CHECK-X32-NEXT: movl 20(%esp), %ecx
|
||||
; CHECK-X32-NEXT: cmpl $176, %ecx
|
||||
; CHECK-X32-NEXT: jae .LBB0_16
|
||||
; CHECK-X32-NEXT: # %bb.15: # %entry
|
||||
; CHECK-X32-NEXT: movl 28(%esp), %eax
|
||||
; CHECK-X32-NEXT: addl %ecx, %eax
|
||||
; CHECK-X32-NEXT: addl $16, %ecx
|
||||
; CHECK-X32-NEXT: movl %ecx, 20(%esp)
|
||||
; CHECK-X32-NEXT: jmp .LBB0_17
|
||||
; CHECK-X32-NEXT: .LBB0_16: # %entry
|
||||
; CHECK-X32-NEXT: movl 24(%esp), %eax
|
||||
; CHECK-X32-NEXT: movl %eax, %ecx
|
||||
; CHECK-X32-NEXT: addl $8, %ecx
|
||||
; CHECK-X32-NEXT: movl %ecx, 24(%esp)
|
||||
; CHECK-X32-NEXT: .LBB0_17: # %entry
|
||||
; CHECK-X32-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
|
||||
; CHECK-X32-NEXT: movl (%esp), %ecx
|
||||
; CHECK-X32-NEXT: cmpl $48, %ecx
|
||||
; CHECK-X32-NEXT: jae .LBB0_19
|
||||
; CHECK-X32-NEXT: # %bb.18: # %entry
|
||||
; CHECK-X32-NEXT: movl 12(%esp), %eax
|
||||
; CHECK-X32-NEXT: addl %ecx, %eax
|
||||
; CHECK-X32-NEXT: addl $8, %ecx
|
||||
; CHECK-X32-NEXT: movl %ecx, (%esp)
|
||||
; CHECK-X32-NEXT: jmp .LBB0_20
|
||||
; CHECK-X32-NEXT: .LBB0_19: # %entry
|
||||
; CHECK-X32-NEXT: movl 8(%esp), %eax
|
||||
; CHECK-X32-NEXT: movl %eax, %ecx
|
||||
; CHECK-X32-NEXT: addl $8, %ecx
|
||||
; CHECK-X32-NEXT: movl %ecx, 8(%esp)
|
||||
; CHECK-X32-NEXT: .LBB0_20: # %entry
|
||||
; CHECK-X32-NEXT: movl (%eax), %r8d
|
||||
; CHECK-X32-NEXT: movl 16(%esp), %eax
|
||||
; CHECK-X32-NEXT: cmpl $48, %eax
|
||||
; CHECK-X32-NEXT: jae .LBB0_22
|
||||
; CHECK-X32-NEXT: # %bb.21: # %entry
|
||||
; CHECK-X32-NEXT: addl $8, %eax
|
||||
; CHECK-X32-NEXT: movl %eax, 16(%esp)
|
||||
; CHECK-X32-NEXT: jmp .LBB0_23
|
||||
; CHECK-X32-NEXT: .LBB0_22: # %entry
|
||||
; CHECK-X32-NEXT: movl 24(%esp), %eax
|
||||
; CHECK-X32-NEXT: addl $8, %eax
|
||||
; CHECK-X32-NEXT: movl %eax, 24(%esp)
|
||||
; CHECK-X32-NEXT: .LBB0_23: # %entry
|
||||
; CHECK-X32-NEXT: movl (%esp), %eax
|
||||
; CHECK-X32-NEXT: cmpl $48, %eax
|
||||
; CHECK-X32-NEXT: jae .LBB0_25
|
||||
; CHECK-X32-NEXT: # %bb.24: # %entry
|
||||
; CHECK-X32-NEXT: addl $8, %eax
|
||||
; CHECK-X32-NEXT: movl %eax, (%esp)
|
||||
; CHECK-X32-NEXT: jmp .LBB0_26
|
||||
; CHECK-X32-NEXT: .LBB0_25: # %entry
|
||||
; CHECK-X32-NEXT: movl 8(%esp), %eax
|
||||
; CHECK-X32-NEXT: addl $8, %eax
|
||||
; CHECK-X32-NEXT: movl %eax, 8(%esp)
|
||||
; CHECK-X32-NEXT: .LBB0_26: # %entry
|
||||
; CHECK-X32-NEXT: movl 16(%esp), %ecx
|
||||
; CHECK-X32-NEXT: cmpl $48, %ecx
|
||||
; CHECK-X32-NEXT: jae .LBB0_28
|
||||
; CHECK-X32-NEXT: # %bb.27: # %entry
|
||||
; CHECK-X32-NEXT: movl 28(%esp), %eax
|
||||
; CHECK-X32-NEXT: addl %ecx, %eax
|
||||
; CHECK-X32-NEXT: addl $8, %ecx
|
||||
; CHECK-X32-NEXT: movl %ecx, 16(%esp)
|
||||
; CHECK-X32-NEXT: jmp .LBB0_29
|
||||
; CHECK-X32-NEXT: .LBB0_28: # %entry
|
||||
; CHECK-X32-NEXT: movl 24(%esp), %eax
|
||||
; CHECK-X32-NEXT: movl %eax, %ecx
|
||||
; CHECK-X32-NEXT: addl $8, %ecx
|
||||
; CHECK-X32-NEXT: movl %ecx, 24(%esp)
|
||||
; CHECK-X32-NEXT: .LBB0_29: # %entry
|
||||
; CHECK-X32-NEXT: movq (%eax), %rcx
|
||||
; CHECK-X32-NEXT: movl (%esp), %edx
|
||||
; CHECK-X32-NEXT: cmpl $48, %edx
|
||||
; CHECK-X32-NEXT: jae .LBB0_31
|
||||
; CHECK-X32-NEXT: # %bb.30: # %entry
|
||||
; CHECK-X32-NEXT: movl 12(%esp), %eax
|
||||
; CHECK-X32-NEXT: addl %edx, %eax
|
||||
; CHECK-X32-NEXT: addl $8, %edx
|
||||
; CHECK-X32-NEXT: movl %edx, (%esp)
|
||||
; CHECK-X32-NEXT: jmp .LBB0_32
|
||||
; CHECK-X32-NEXT: .LBB0_31: # %entry
|
||||
; CHECK-X32-NEXT: movl 8(%esp), %eax
|
||||
; CHECK-X32-NEXT: movl %eax, %edx
|
||||
; CHECK-X32-NEXT: addl $8, %edx
|
||||
; CHECK-X32-NEXT: movl %edx, 8(%esp)
|
||||
; CHECK-X32-NEXT: .LBB0_32: # %entry
|
||||
; CHECK-X32-NEXT: movl (%eax), %edx
|
||||
; CHECK-X32-NEXT: movl 16(%esp), %eax
|
||||
; CHECK-X32-NEXT: cmpl $48, %eax
|
||||
; CHECK-X32-NEXT: jae .LBB0_34
|
||||
; CHECK-X32-NEXT: # %bb.33: # %entry
|
||||
; CHECK-X32-NEXT: addl $8, %eax
|
||||
; CHECK-X32-NEXT: movl %eax, 16(%esp)
|
||||
; CHECK-X32-NEXT: jmp .LBB0_35
|
||||
; CHECK-X32-NEXT: .LBB0_34: # %entry
|
||||
; CHECK-X32-NEXT: movl 24(%esp), %eax
|
||||
; CHECK-X32-NEXT: addl $8, %eax
|
||||
; CHECK-X32-NEXT: movl %eax, 24(%esp)
|
||||
; CHECK-X32-NEXT: .LBB0_35: # %entry
|
||||
; CHECK-X32-NEXT: movl 4(%esp), %eax
|
||||
; CHECK-X32-NEXT: cmpl $176, %eax
|
||||
; CHECK-X32-NEXT: jae .LBB0_37
|
||||
; CHECK-X32-NEXT: # %bb.36: # %entry
|
||||
; CHECK-X32-NEXT: addl $16, %eax
|
||||
; CHECK-X32-NEXT: movl %eax, 4(%esp)
|
||||
; CHECK-X32-NEXT: jmp .LBB0_38
|
||||
; CHECK-X32-NEXT: .LBB0_37: # %entry
|
||||
; CHECK-X32-NEXT: movl 8(%esp), %eax
|
||||
; CHECK-X32-NEXT: addl $8, %eax
|
||||
; CHECK-X32-NEXT: movl %eax, 8(%esp)
|
||||
; CHECK-X32-NEXT: .LBB0_38: # %entry
|
||||
; CHECK-X32-NEXT: movl 20(%esp), %esi
|
||||
; CHECK-X32-NEXT: cmpl $176, %esi
|
||||
; CHECK-X32-NEXT: jae .LBB0_40
|
||||
; CHECK-X32-NEXT: # %bb.39: # %entry
|
||||
; CHECK-X32-NEXT: movl 28(%esp), %eax
|
||||
; CHECK-X32-NEXT: addl %esi, %eax
|
||||
; CHECK-X32-NEXT: addl $16, %esi
|
||||
; CHECK-X32-NEXT: movl %esi, 20(%esp)
|
||||
; CHECK-X32-NEXT: jmp .LBB0_41
|
||||
; CHECK-X32-NEXT: .LBB0_40: # %entry
|
||||
; CHECK-X32-NEXT: movl 24(%esp), %eax
|
||||
; CHECK-X32-NEXT: movl %eax, %esi
|
||||
; CHECK-X32-NEXT: addl $8, %esi
|
||||
; CHECK-X32-NEXT: movl %esi, 24(%esp)
|
||||
; CHECK-X32-NEXT: .LBB0_41: # %entry
|
||||
; CHECK-X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; CHECK-X32-NEXT: movl (%esp), %esi
|
||||
; CHECK-X32-NEXT: cmpl $48, %esi
|
||||
; CHECK-X32-NEXT: jae .LBB0_43
|
||||
; CHECK-X32-NEXT: # %bb.42: # %entry
|
||||
; CHECK-X32-NEXT: movl 12(%esp), %eax
|
||||
; CHECK-X32-NEXT: addl %esi, %eax
|
||||
; CHECK-X32-NEXT: addl $8, %esi
|
||||
; CHECK-X32-NEXT: movl %esi, (%esp)
|
||||
; CHECK-X32-NEXT: jmp .LBB0_44
|
||||
; CHECK-X32-NEXT: .LBB0_43: # %entry
|
||||
; CHECK-X32-NEXT: movl 8(%esp), %eax
|
||||
; CHECK-X32-NEXT: movl %eax, %esi
|
||||
; CHECK-X32-NEXT: addl $8, %esi
|
||||
; CHECK-X32-NEXT: movl %esi, 8(%esp)
|
||||
; CHECK-X32-NEXT: .LBB0_44: # %entry
|
||||
; CHECK-X32-NEXT: movl (%eax), %esi
|
||||
; CHECK-X32-NEXT: movl 16(%esp), %eax
|
||||
; CHECK-X32-NEXT: cmpl $48, %eax
|
||||
; CHECK-X32-NEXT: jae .LBB0_46
|
||||
; CHECK-X32-NEXT: # %bb.45: # %entry
|
||||
; CHECK-X32-NEXT: addl $8, %eax
|
||||
; CHECK-X32-NEXT: movl %eax, 16(%esp)
|
||||
; CHECK-X32-NEXT: jmp .LBB0_47
|
||||
; CHECK-X32-NEXT: .LBB0_46: # %entry
|
||||
; CHECK-X32-NEXT: movl 24(%esp), %eax
|
||||
; CHECK-X32-NEXT: addl $8, %eax
|
||||
; CHECK-X32-NEXT: movl %eax, 24(%esp)
|
||||
; CHECK-X32-NEXT: .LBB0_47: # %entry
|
||||
; CHECK-X32-NEXT: movl $.str, %edi
|
||||
; CHECK-X32-NEXT: movb $2, %al
|
||||
; CHECK-X32-NEXT: pushq %r10
|
||||
; CHECK-X32-NEXT: pushq %r11
|
||||
; CHECK-X32-NEXT: callq printf@PLT
|
||||
; CHECK-X32-NEXT: addl $232, %esp
|
||||
; CHECK-X32-NEXT: retq
|
||||
entry:
|
||||
%ap1 = alloca %struct.va_list
|
||||
%ap2 = alloca %struct.va_list
|
||||
@ -320,6 +576,24 @@ define i32 @main() nounwind {
|
||||
; CHECK-X64-NEXT: xorl %eax, %eax
|
||||
; CHECK-X64-NEXT: popq %rcx
|
||||
; CHECK-X64-NEXT: retq
|
||||
;
|
||||
; CHECK-X32-LABEL: main:
|
||||
; CHECK-X32: # %bb.0: # %entry
|
||||
; CHECK-X32-NEXT: pushq %rax
|
||||
; CHECK-X32-NEXT: movl $12, (%esp)
|
||||
; CHECK-X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; CHECK-X32-NEXT: movabsq $123456677890, %r8 # imm = 0x1CBE976802
|
||||
; CHECK-X32-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
|
||||
; CHECK-X32-NEXT: movl $1, %edi
|
||||
; CHECK-X32-NEXT: movl $2, %esi
|
||||
; CHECK-X32-NEXT: movl $3, %edx
|
||||
; CHECK-X32-NEXT: movl $-10, %ecx
|
||||
; CHECK-X32-NEXT: movl $120, %r9d
|
||||
; CHECK-X32-NEXT: movb $2, %al
|
||||
; CHECK-X32-NEXT: callq func
|
||||
; CHECK-X32-NEXT: xorl %eax, %eax
|
||||
; CHECK-X32-NEXT: popq %rcx
|
||||
; CHECK-X32-NEXT: retq
|
||||
entry:
|
||||
tail call void (...) @func(i32 1, i32 2, i32 3, double 4.500000e+15, i32 -10, i64 123456677890, i32 120, double 0x3FF3EB8520000000, i32 12) nounwind
|
||||
ret i32 0
|
||||
|
Loading…
x
Reference in New Issue
Block a user