1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 03:02:36 +01:00

[X86] Add intrinsics for reading and writing to the flags register

LLVM's targets need to know if stack pointer adjustments occur after the
prologue.  This is needed to correctly determine if the red-zone is
appropriate to use or if a frame pointer is required.

Normally, LLVM can figure this out very precisely by reasoning about the
contents of the MachineFunction.  There is an interesting corner case:
inline assembly.

The vast majority of inline assembly which will perform a push or pop is
done so to pair up with pushf or popf as appropriate.  Unfortunately,
this inline assembly doesn't mark the stack pointer as clobbered
because, well, it isn't.  The stack pointer is decremented and then
immediately incremented.  Because of this, LLVM was changed in r256456
to conservatively assume that inline assembly contain a sequence of
stack operations.  This is unfortunate because the vast majority of
inline assembly will not end up manipulating the stack pointer in any
way at all.

Instead, let's provide a more principled solution: an intrinsic.
FWIW, other compilers (MSVC and GCC among them) also provide this
functionality as an intrinsic.

llvm-svn: 256685
This commit is contained in:
David Majnemer 2016-01-01 06:50:01 +00:00
parent 2d3c7242d3
commit 93803262f4
12 changed files with 157 additions and 34 deletions

View File

@ -32,6 +32,19 @@ let TargetPrefix = "x86" in {
[IntrNoMem]>;
}
//===----------------------------------------------------------------------===//
// FLAGS.
let TargetPrefix = "x86" in {
def int_x86_flags_read_u32 : GCCBuiltin<"__builtin_ia32_readeflags_u32">,
Intrinsic<[llvm_i32_ty], [], []>;
def int_x86_flags_read_u64 : GCCBuiltin<"__builtin_ia32_readeflags_u64">,
Intrinsic<[llvm_i64_ty], [], []>;
def int_x86_flags_write_u32 : GCCBuiltin<"__builtin_ia32_writeeflags_u32">,
Intrinsic<[], [llvm_i32_ty], []>;
def int_x86_flags_write_u64 : GCCBuiltin<"__builtin_ia32_writeeflags_u64">,
Intrinsic<[], [llvm_i64_ty], []>;
}
//===----------------------------------------------------------------------===//
// Read Time Stamp Counter.
let TargetPrefix = "x86" in {

View File

@ -86,10 +86,6 @@ X86FrameLowering::needsFrameIndexResolution(const MachineFunction &MF) const {
static bool usesTheStack(const MachineFunction &MF) {
const MachineRegisterInfo &MRI = MF.getRegInfo();
// Conservativley assume that inline assembly might use the stack.
if (MF.hasInlineAsm())
return true;
return any_of(MRI.reg_instructions(X86::EFLAGS),
[](const MachineInstr &RI) { return RI.isCopy(); });
}

View File

@ -22537,6 +22537,40 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::CMOV_V64I1:
return EmitLoweredSelect(MI, BB);
case X86::RDFLAGS32:
case X86::RDFLAGS64: {
DebugLoc DL = MI->getDebugLoc();
MachineFunction *MF = BB->getParent();
MF->getFrameInfo()->setHasOpaqueSPAdjustment(true);
const TargetInstrInfo *TII = Subtarget->getInstrInfo();
unsigned PushF =
MI->getOpcode() == X86::RDFLAGS32 ? X86::PUSHF32 : X86::PUSHF64;
unsigned Pop =
MI->getOpcode() == X86::RDFLAGS32 ? X86::POP32r : X86::POP64r;
BuildMI(*BB, MI, DL, TII->get(PushF));
BuildMI(*BB, MI, DL, TII->get(Pop), MI->getOperand(0).getReg());
MI->eraseFromParent(); // The pseudo is gone now.
return BB;
}
case X86::WRFLAGS32:
case X86::WRFLAGS64: {
DebugLoc DL = MI->getDebugLoc();
MachineFunction *MF = BB->getParent();
MF->getFrameInfo()->setHasOpaqueSPAdjustment(true);
const TargetInstrInfo *TII = Subtarget->getInstrInfo();
unsigned Push =
MI->getOpcode() == X86::WRFLAGS32 ? X86::PUSH32r : X86::PUSH64r;
unsigned PopF =
MI->getOpcode() == X86::WRFLAGS32 ? X86::POPF32 : X86::POPF64;
BuildMI(*BB, MI, DL, TII->get(Push)).addReg(MI->getOperand(0).getReg());
BuildMI(*BB, MI, DL, TII->get(PopF));
MI->eraseFromParent(); // The pseudo is gone now.
return BB;
}
case X86::RELEASE_FADD32mr:
case X86::RELEASE_FADD64mr:
return EmitLoweredAtomicFP(MI, BB);

View File

@ -1093,6 +1093,32 @@ def PUSH32rmm: I<0xFF, MRM6m, (outs), (ins i32mem:$src), "push{l}\t$src",[],
}
let mayLoad = 1, mayStore = 1, usesCustomInserter = 1,
SchedRW = [WriteRMW], Defs = [ESP] in {
let Uses = [ESP, EFLAGS] in
def RDFLAGS32 : PseudoI<(outs GR32:$dst), (ins),
[(set GR32:$dst, (int_x86_flags_read_u32))]>,
Requires<[Not64BitMode]>;
let Uses = [RSP, EFLAGS] in
def RDFLAGS64 : PseudoI<(outs GR64:$dst), (ins),
[(set GR64:$dst, (int_x86_flags_read_u64))]>,
Requires<[In64BitMode]>;
}
let mayLoad = 1, mayStore = 1, usesCustomInserter = 1,
SchedRW = [WriteRMW] in {
let Defs = [ESP, EFLAGS], Uses = [ESP] in
def WRFLAGS32 : PseudoI<(outs), (ins GR32:$src),
[(int_x86_flags_write_u32 GR32:$src)]>,
Requires<[Not64BitMode]>;
let Defs = [RSP, EFLAGS], Uses = [RSP] in
def WRFLAGS64 : PseudoI<(outs), (ins GR64:$src),
[(int_x86_flags_write_u64 GR64:$src)]>,
Requires<[In64BitMode]>;
}
let Defs = [ESP, EFLAGS], Uses = [ESP], mayLoad = 1, hasSideEffects=0,
SchedRW = [WriteLoad] in {
def POPF16 : I<0x9D, RawFrm, (outs), (ins), "popf{w}", [], IIC_POP_F>,

View File

@ -1,9 +1,7 @@
; RUN: llc -mcpu=generic -mtriple=x86_64-mingw32 < %s | FileCheck %s
; CHECK: pushq %rbp
; CHECK: subq $32, %rsp
; CHECK: leaq 32(%rsp), %rbp
; CHECK: movaps %xmm8, -16(%rbp)
; CHECK: movaps %xmm7, -32(%rbp)
; CHECK: subq $40, %rsp
; CHECK: movaps %xmm8, 16(%rsp)
; CHECK: movaps %xmm7, (%rsp)
define i32 @a() nounwind {
entry:

View File

@ -21,11 +21,9 @@ define void @nop() nounwind {
;
; X64-LABEL: nop:
; X64: # BB#0:
; X64-NEXT: subq $24, %rsp
; X64-NEXT: #APP
; X64-NEXT: #NO_APP
; X64-NEXT: movaps %xmm0, (%rsp)
; X64-NEXT: addq $24, %rsp
; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: retq
%1 = alloca <4 x float>, align 16
%2 = call <4 x float> asm "", "=x,~{dirflag},~{fpsr},~{flags}"()

View File

@ -4,17 +4,15 @@
; defining %0 before it was read. This caused us to omit the
; movq -8(%rsp), %rdx
; CHECK: pushq %rax
; CHECK: #APP
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: movq %rcx, %rax
; CHECK-NEXT: movq %rax, (%rsp)
; CHECK-NEXT: movq (%rsp), %rdx
; CHECK-NEXT: movq %rax, -8(%rsp)
; CHECK-NEXT: movq -8(%rsp), %rdx
; CHECK-NEXT: #APP
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: movq %rdx, %rax
; CHECK-NEXT: movq %rdx, (%rsp)
; CHECK-NEXT: popq %rcx
; CHECK-NEXT: movq %rdx, -8(%rsp)
; CHECK-NEXT: ret
define i64 @foo() {

View File

@ -128,11 +128,9 @@ entry:
; CHECK: .seh_setframe 5, 0
; CHECK: .seh_endprologue
%call = call i64 asm sideeffect "pushf\0A\09popq $0\0A", "=r,~{dirflag},~{fpsr},~{flags}"()
; CHECK-NEXT: #APP
%call = call i64 @llvm.x86.flags.read.u64()
; CHECK-NEXT: pushfq
; CHECK-NEXT: popq %rax
; CHECK: #NO_APP
ret i64 %call
; CHECK-NEXT: popq %rbp
@ -187,5 +185,6 @@ define i64 @f10(i64* %foo, i64 %bar, i64 %baz) {
}
declare i8* @llvm.returnaddress(i32) nounwind readnone
declare i64 @llvm.x86.flags.read.u64()
declare void @llvm.va_start(i8*) nounwind

View File

@ -0,0 +1,37 @@
; RUN: llc < %s | FileCheck %s
target triple = "x86_64-pc-win32"
declare i64 @llvm.x86.flags.read.u64()
declare void @llvm.x86.flags.write.u64(i64)
define i64 @read_flags() {
entry:
%flags = call i64 @llvm.x86.flags.read.u64()
ret i64 %flags
}
; CHECK-LABEL: read_flags:
; CHECK: pushq %rbp
; CHECK: .seh_pushreg 5
; CHECK: movq %rsp, %rbp
; CHECK: .seh_setframe 5, 0
; CHECK: .seh_endprologue
; CHECK-NEXT: pushfq
; CHECK-NEXT: popq %rax
; CHECK-NEXT: popq %rbp
define void @write_flags(i64 %arg) {
entry:
call void @llvm.x86.flags.write.u64(i64 %arg)
ret void
}
; CHECK-LABEL: write_flags:
; CHECK: pushq %rbp
; CHECK: .seh_pushreg 5
; CHECK: movq %rsp, %rbp
; CHECK: .seh_setframe 5, 0
; CHECK: .seh_endprologue
; CHECK-NEXT: pushq %rcx
; CHECK-NEXT: popfq
; CHECK-NEXT: popq %rbp

View File

@ -0,0 +1,31 @@
; RUN: llc < %s | FileCheck %s
target triple = "i686-pc-win32"
declare i32 @llvm.x86.flags.read.u32()
declare void @llvm.x86.flags.write.u32(i32)
define i32 @read_flags() {
entry:
%flags = call i32 @llvm.x86.flags.read.u32()
ret i32 %flags
}
; CHECK-LABEL: _read_flags:
; CHECK: pushl %ebp
; CHECK-NEXT: movl %esp, %ebp
; CHECK-NEXT: pushfl
; CHECK-NEXT: popl %eax
; CHECK-NEXT: popl %ebp
define x86_fastcallcc void @write_flags(i32 inreg %arg) {
entry:
call void @llvm.x86.flags.write.u32(i32 %arg)
ret void
}
; CHECK-LABEL: @write_flags@4:
; CHECK: pushl %ebp
; CHECK-NEXT: movl %esp, %ebp
; CHECK-NEXT: pushl %ecx
; CHECK-NEXT: popfl
; CHECK-NEXT: popl %ebp

View File

@ -11,10 +11,8 @@ target triple = "x86_64--windows-gnu"
; etc.) prior to the return and this is forbidden for Win64.
; CHECK-LABEL: loopInfoSaveOutsideLoop:
; CHECK: push
; CHECK: push
; CHECK-NOT: popq
; CHECK: popq
; CHECK: popq
; CHECK-NOT: popq
; CHECK-NEXT: retq
define i32 @loopInfoSaveOutsideLoop(i32 %cond, i32 %N) #0 {
@ -57,7 +55,6 @@ if.end: ; preds = %if.else, %for.end
;
; Prologue code.
; Make sure we save the CSR used in the inline asm: rbx.
; CHECK: pushq %rbp
; CHECK: pushq %rbx
;
; DISABLE: testl %ecx, %ecx
@ -79,7 +76,6 @@ if.end: ; preds = %if.else, %for.end
; DISABLE: jmp [[EPILOG_BB:.LBB[0-9_]+]]
;
; ENABLE-NEXT: popq %rbx
; ENABLE-NEXT: popq %rbp
; ENABLE-NEXT: retq
;
; CHECK: [[ELSE_LABEL]]: # %if.else

View File

@ -130,15 +130,12 @@
; X64-NEXT: .L{{.*}}:{{$}}
; X64-NEXT: [[START:.*]]:{{$}}
; X64: # BB
; X64: pushq %rbp
; X64-NEXT: subq $32, %rsp
; X64-NEXT: leaq 32(%rsp), %rbp
; X64: subq $40, %rsp
; X64-NEXT: [[ASM_LINE:.*]]:{{$}}
; X64: [[CALL_LINE:.*]]:{{$}}
; X64: callq g
; X64-NEXT: [[EPILOG_AND_RET:.*]]:
; X64: addq $32, %rsp
; X64-NEXT: popq %rbp
; X64: addq $40, %rsp
; X64-NEXT: ret
; X64-NEXT: [[END_OF_F:.*]]:
;
@ -225,22 +222,22 @@
; OBJ64: ProcStart {
; OBJ64-NEXT: DisplayName: f
; OBJ64-NEXT: Section: f
; OBJ64-NEXT: CodeSize: 0x17
; OBJ64-NEXT: CodeSize: 0xE
; OBJ64-NEXT: }
; OBJ64-NEXT: ProcEnd
; OBJ64-NEXT: ]
; OBJ64: FunctionLineTable [
; OBJ64-NEXT: Name: f
; OBJ64-NEXT: Flags: 0x1
; OBJ64-NEXT: CodeSize: 0x17
; OBJ64-NEXT: CodeSize: 0xE
; OBJ64-NEXT: FilenameSegment [
; OBJ64-NEXT: Filename: D:\asm.c
; OBJ64-NEXT: +0x0: 3
; FIXME: An empty __asm stmt creates an extra entry.
; See PR18679 for the details.
; OBJ64-NEXT: +0xA: 4
; OBJ64-NEXT: +0xC: 5
; OBJ64-NEXT: +0x11: 6
; OBJ64-NEXT: +0x4: 4
; OBJ64-NEXT: +0x4: 5
; OBJ64-NEXT: +0x9: 6
; OBJ64-NEXT: ColStart: 0
; OBJ64-NEXT: ColEnd: 0
; OBJ64-NEXT: ColStart: 0