1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-26 04:32:44 +01:00
llvm-mirror/test/CodeGen/X86/vastart-defs-eflags.ll
Alexey Lapshin 468d967ea8 [X86][VARARG] Avoid spilling xmm registers for va_start.
That review is extracted from D69372.
It fixes https://bugs.llvm.org/show_bug.cgi?id=42219 bug.

For the noimplicitfloat mode, the compiler mustn't generate
floating-point code if it was not asked directly to do so.
This rule does not work with variable function arguments currently.
Though compiler correctly guards block of code, which copies xmm vararg
parameters with a check for %al, it does not protect spills for xmm registers.
Thus, such spills are generated in non-protected areas and could break code,
which does not expect floating-point data. The problem happens in -O0
optimization mode. With this optimization level there is used
FastRegisterAllocator, which spills virtual registers at basic block boundaries.
Register Allocator does not protect spills with additional control-flow modifications.
Thus to resolve that problem, it is suggested to not copy incoming physical
registers into virtual registers. Instead, store incoming physical xmm registers
into the memory from scratch.

Differential Revision: https://reviews.llvm.org/D80163
2021-03-06 15:25:47 +03:00

58 lines
2.1 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc %s -o - | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.10.0"
; Check that vastart handling doesn't get between testb and je for the branch.
define i32 @check_flag(i32 %flags, ...) nounwind {
; CHECK-LABEL: check_flag:
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: subq $56, %rsp
; CHECK-NEXT: movq %rsi, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movq %rdx, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: testb %al, %al
; CHECK-NEXT: je LBB0_4
; CHECK-NEXT: ## %bb.3: ## %entry
; CHECK-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movaps %xmm3, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movaps %xmm5, (%rsp)
; CHECK-NEXT: movaps %xmm6, {{[0-9]+}}(%rsp)
; CHECK-NEXT: movaps %xmm7, {{[0-9]+}}(%rsp)
; CHECK-NEXT: LBB0_4: ## %entry
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: testl $512, %edi ## imm = 0x200
; CHECK-NEXT: je LBB0_2
; CHECK-NEXT: ## %bb.1: ## %if.then
; CHECK-NEXT: leaq -{{[0-9]+}}(%rsp), %rax
; CHECK-NEXT: movq %rax, 16
; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rax
; CHECK-NEXT: movq %rax, 8
; CHECK-NEXT: movl $48, 4
; CHECK-NEXT: movl $8, 0
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: LBB0_2: ## %if.end
; CHECK-NEXT: addq $56, %rsp
; CHECK-NEXT: retq
entry:
%and = and i32 %flags, 512
%tobool = icmp eq i32 %and, 0
br i1 %tobool, label %if.end, label %if.then
if.then: ; preds = %entry
call void @llvm.va_start(i8* null)
br label %if.end
if.end: ; preds = %entry, %if.then
%hasflag = phi i32 [ 1, %if.then ], [ 0, %entry ]
ret i32 %hasflag
}
declare void @llvm.va_start(i8*) nounwind