1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-02-01 13:11:39 +01:00
llvm-mirror/test/CodeGen/X86/constrained-fp80-trunc-ext.ll
Kevin P. Neal 3226a47280 [FPEnv] Lower STRICT_FP_EXTEND and STRICT_FP_ROUND nodes in preprocess phase of ISelLowering to mirror non-strict nodes on x86.
I recently discovered a bug on the x86 platform: The fp80 type was not handled well by x86 for constrained floating point nodes, as their regular counterparts are replaced by extending loads and truncating stores during the preprocess phase. Normally, platforms don't have this issue, as they don't typically attempt to perform such legalizations during instruction selection preprocessing. Before this change, strict_fp nodes survived until they were mutated to normal nodes, which happened shortly after preprocessing on other platforms. This modification lowers these nodes at the same phase while properly utilizing the chain.5

Submitted by:	Drew Wock <drew.wock@sas.com>
Reviewed by:	Craig Topper, Kevin P. Neal
Approved by:	Craig Topper
Differential Revision:	https://reviews.llvm.org/D63271

llvm-svn: 363417
2019-06-14 16:28:55 +00:00

62 lines
2.2 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -O3 -mtriple=x86_64-gnu-linux < %s | FileCheck %s
define x86_fp80 @constrained_fpext_f32_as_fp80(float %mem) {
; CHECK-LABEL: constrained_fpext_f32_as_fp80:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movss %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: flds -{{[0-9]+}}(%rsp)
; CHECK-NEXT: retq
entry:
%ext = call x86_fp80 @llvm.experimental.constrained.fpext.f80.f32(
float %mem,
metadata !"fpexcept.strict")
ret x86_fp80 %ext
}
define float @constrained_fptrunc_f80_to_f32(x86_fp80 %reg) {
; CHECK-LABEL: constrained_fptrunc_f80_to_f32:
; CHECK: # %bb.0:
; CHECK-NEXT: fldt {{[0-9]+}}(%rsp)
; CHECK-NEXT: fstps -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: retq
%trunc = call float @llvm.experimental.constrained.fptrunc.f32.f80(
x86_fp80 %reg,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret float %trunc
}
define x86_fp80 @constrained_fpext_f64_to_f80(double %mem) {
; CHECK-LABEL: constrained_fpext_f64_to_f80:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movsd %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl -{{[0-9]+}}(%rsp)
; CHECK-NEXT: retq
entry:
%ext = call x86_fp80 @llvm.experimental.constrained.fpext.f80.f64(
double %mem,
metadata !"fpexcept.strict")
ret x86_fp80 %ext
}
define double @constrained_fptrunc_f80_to_f64(x86_fp80 %reg) {
; CHECK-LABEL: constrained_fptrunc_f80_to_f64:
; CHECK: # %bb.0:
; CHECK-NEXT: fldt {{[0-9]+}}(%rsp)
; CHECK-NEXT: fstpl -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: retq
%trunc = call double @llvm.experimental.constrained.fptrunc.f64.f80(
x86_fp80 %reg,
metadata !"round.dynamic",
metadata !"fpexcept.strict")
ret double %trunc
}
declare x86_fp80 @llvm.experimental.constrained.fpext.f80.f32(float, metadata)
declare x86_fp80 @llvm.experimental.constrained.fpext.f80.f64(double, metadata)
declare float @llvm.experimental.constrained.fptrunc.f32.f80(x86_fp80, metadata, metadata)
declare double @llvm.experimental.constrained.fptrunc.f64.f80(x86_fp80, metadata, metadata)