1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 11:13:28 +01:00
llvm-mirror/test/CodeGen/X86/pr15309.ll
Craig Topper 835f91f74c [X86] Swap the 0 and the fudge factor in the constant pool for the 32-bit mode i64->f32/f64/f80 uint_to_fp algorithm.
This allows us to generate better code for selecting the fixup
to load.

Previously when the sign was set we had to load offset 0. And
when it was clear we had to load offset 4. This required a testl,
setns, zero extend, and finally a mul by 4. By switching the offsets
we can just shift the sign bit into the lsb and multiply it by 4.
2020-01-14 17:05:23 -08:00

40 lines
1.4 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-linux-pc | FileCheck %s
define void @test_convert_float2_ulong2(<2 x i64>* nocapture %src, <2 x float>* nocapture %dest) nounwind {
; CHECK-LABEL: test_convert_float2_ulong2:
; CHECK: # %bb.0:
; CHECK-NEXT: pushl %edi
; CHECK-NEXT: pushl %esi
; CHECK-NEXT: subl $20, %esp
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl 168(%ecx), %edx
; CHECK-NEXT: movl 172(%ecx), %esi
; CHECK-NEXT: movl 160(%ecx), %edi
; CHECK-NEXT: movl 164(%ecx), %ecx
; CHECK-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; CHECK-NEXT: movl %edi, (%esp)
; CHECK-NEXT: movl %esi, {{[0-9]+}}(%esp)
; CHECK-NEXT: movl %edx, {{[0-9]+}}(%esp)
; CHECK-NEXT: shrl $31, %ecx
; CHECK-NEXT: fildll (%esp)
; CHECK-NEXT: fadds {{\.LCPI.*}}(,%ecx,4)
; CHECK-NEXT: shrl $31, %esi
; CHECK-NEXT: fildll {{[0-9]+}}(%esp)
; CHECK-NEXT: fadds {{\.LCPI.*}}(,%esi,4)
; CHECK-NEXT: fstps 84(%eax)
; CHECK-NEXT: fstps 80(%eax)
; CHECK-NEXT: addl $20, %esp
; CHECK-NEXT: popl %esi
; CHECK-NEXT: popl %edi
; CHECK-NEXT: retl
%t0 = getelementptr <2 x i64>, <2 x i64>* %src, i32 10
%t1 = load <2 x i64>, <2 x i64>* %t0, align 16
%t2 = uitofp <2 x i64> %t1 to <2 x float>
%t3 = getelementptr <2 x float>, <2 x float>* %dest, i32 10
store <2 x float> %t2, <2 x float>* %t3, align 8
ret void
}