1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 11:13:28 +01:00
llvm-mirror/test/CodeGen/X86/pr44396.ll
Craig Topper 835f91f74c [X86] Swap the 0 and the fudge factor in the constant pool for the 32-bit mode i64->f32/f64/f80 uint_to_fp algorithm.
This allows us to generate better code for selecting the fixup
to load.

Previously when the sign was set we had to load offset 0. And
when it was clear we had to load offset 4. This required a testl,
setns, zero extend, and finally a mul by 4. By switching the offsets
we can just shift the sign bit into the lsb and multiply it by 4.
2020-01-14 17:05:23 -08:00

46 lines
1.3 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i386-apple-macosx10.15.0 -mattr=+cmov | FileCheck %s
@b = global i32 0, align 4
@a = global i64 0, align 8
define double @c() nounwind {
; CHECK-LABEL: c:
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: pushl %esi
; CHECK-NEXT: subl $16, %esp
; CHECK-NEXT: movl _b, %eax
; CHECK-NEXT: movl %eax, %ecx
; CHECK-NEXT: sarl $31, %ecx
; CHECK-NEXT: movl _a+4, %edx
; CHECK-NEXT: movl _a, %esi
; CHECK-NEXT: subl %eax, %esi
; CHECK-NEXT: sbbl %ecx, %edx
; CHECK-NEXT: setb %al
; CHECK-NEXT: movl %esi, (%esp)
; CHECK-NEXT: movl %edx, {{[0-9]+}}(%esp)
; CHECK-NEXT: shrl $31, %edx
; CHECK-NEXT: fildll (%esp)
; CHECK-NEXT: fadds LCPI0_0(,%edx,4)
; CHECK-NEXT: fstpl {{[0-9]+}}(%esp)
; CHECK-NEXT: fldl {{[0-9]+}}(%esp)
; CHECK-NEXT: fldz
; CHECK-NEXT: testb %al, %al
; CHECK-NEXT: fxch %st(1)
; CHECK-NEXT: fcmovne %st(1), %st
; CHECK-NEXT: fstp %st(1)
; CHECK-NEXT: addl $16, %esp
; CHECK-NEXT: popl %esi
; CHECK-NEXT: retl
entry:
%0 = load i32, i32* @b, align 4
%conv = sext i32 %0 to i64
%1 = load i64, i64* @a, align 8
%cmp = icmp ult i64 %1, %conv
%sub = sub i64 %1, %conv
%conv3 = uitofp i64 %sub to double
%cond = select i1 %cmp, double 0.000000e+00, double %conv3
ret double %cond
}