1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-24 03:33:20 +01:00

[X86] Autogenerate checks in fp-intrinsics.ll. Split fma tests off to fp-intrinsics-fma.ll. NFC

This commit is contained in:
Craig Topper 2019-11-11 15:42:25 -08:00
parent b3b4447f0d
commit b69f476b76
2 changed files with 368 additions and 119 deletions

View File

@ -0,0 +1,91 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -O3 -mtriple=x86_64-pc-linux < %s | FileCheck %s --check-prefix=COMMON --check-prefix=SSE
; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+fma < %s | FileCheck %s --check-prefix=COMMON --check-prefix=AVX
; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+avx512f < %s | FileCheck %s --check-prefix=COMMON --check-prefix=AVX
; Verify that fma(3.5) isn't simplified when the rounding mode is
; unknown.
define float @f17() #0 {
; SSE-LABEL: f17:
; SSE: # %bb.0: # %entry
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: jmp fmaf # TAILCALL
;
; AVX-LABEL: f17:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vfmadd213ss {{.*#+}} xmm0 = (xmm0 * xmm0) + xmm0
; AVX-NEXT: retq
; NOFMA-LABEL: f17:
; NOFMA: # %bb.0: # %entry
; NOFMA-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; NOFMA-NEXT: vmovaps %xmm0, %xmm1
; NOFMA-NEXT: vmovaps %xmm0, %xmm2
; NOFMA-NEXT: jmp fmaf # TAILCALL
; FMA-LABEL: f17:
; FMA: # %bb.0: # %entry
; FMA-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; FMA-NEXT: vfmadd213ss {{.*#+}} xmm0 = (xmm0 * xmm0) + xmm0
; FMA-NEXT: retq
; AVX512-LABEL: f17:
; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX512-NEXT: vfmadd213ss {{.*#+}} xmm0 = (xmm0 * xmm0) + xmm0
; AVX512-NEXT: retq
entry:
%result = call float @llvm.experimental.constrained.fma.f32(
float 3.5,
float 3.5,
float 3.5,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret float %result
}
; Verify that fma(42.1) isn't simplified when the rounding mode is
; unknown.
define double @f18() #0 {
; SSE-LABEL: f18:
; SSE: # %bb.0: # %entry
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: jmp fma # TAILCALL
;
; AVX-LABEL: f18:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vfmadd213sd {{.*#+}} xmm0 = (xmm0 * xmm0) + xmm0
; AVX-NEXT: retq
; NOFMA-LABEL: f18:
; NOFMA: # %bb.0: # %entry
; NOFMA-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; NOFMA-NEXT: vmovaps %xmm0, %xmm1
; NOFMA-NEXT: vmovaps %xmm0, %xmm2
; NOFMA-NEXT: jmp fma # TAILCALL
; FMA-LABEL: f18:
; FMA: # %bb.0: # %entry
; FMA-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; FMA-NEXT: vfmadd213sd {{.*#+}} xmm0 = (xmm0 * xmm0) + xmm0
; FMA-NEXT: retq
; AVX512-LABEL: f18:
; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512-NEXT: vfmadd213sd {{.*#+}} xmm0 = (xmm0 * xmm0) + xmm0
; AVX512-NEXT: retq
entry:
%result = call double @llvm.experimental.constrained.fma.f64(
double 42.1,
double 42.1,
double 42.1,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret double %result
}
attributes #0 = { strictfp }
declare float @llvm.experimental.constrained.fma.f32(float, float, float, metadata, metadata)
declare double @llvm.experimental.constrained.fma.f64(double, double, double, metadata, metadata)

View File

@ -1,5 +1,8 @@
; RUN: llc -O3 -mtriple=x86_64-pc-linux < %s | FileCheck --check-prefix=COMMON --check-prefix=NO-FMA --check-prefix=FMACALL64 --check-prefix=FMACALL32 %s
; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+fma < %s | FileCheck -check-prefix=COMMON --check-prefix=HAS-FMA --check-prefix=FMA64 --check-prefix=FMA32 %s
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -O3 -mtriple=x86_64-pc-linux < %s | FileCheck %s --check-prefix=COMMON --check-prefix=SSE
; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+avx < %s | FileCheck %s --check-prefix=COMMON --check-prefix=AVX --check-prefix=AVX1
; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+avx512f < %s | FileCheck %s --check-prefix=COMMON --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512F
; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+avx512dq < %s | FileCheck %s --check-prefix=COMMON --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512DQ
; Verify that constants aren't folded to inexact results when the rounding mode
; is unknown.
@ -9,9 +12,18 @@
; return 1.0/10.0;
; }
;
; COMMON-LABEL: f1
; COMMON: divsd
define double @f1() #0 {
; SSE-LABEL: f1:
; SSE: # %bb.0: # %entry
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: divsd {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: f1:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vdivsd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
entry:
%div = call double @llvm.experimental.constrained.fdiv.f64(
double 1.000000e+00,
@ -29,9 +41,18 @@ entry:
; return a - 0;
; }
;
; COMMON-LABEL: f2
; COMMON: subsd
define double @f2(double %a) #0 {
; SSE-LABEL: f2:
; SSE: # %bb.0: # %entry
; SSE-NEXT: xorpd %xmm1, %xmm1
; SSE-NEXT: subsd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: f2:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
%sub = call double @llvm.experimental.constrained.fsub.f64(
double %a,
@ -50,11 +71,24 @@ entry:
; return -((-a)*b);
; }
;
; COMMON-LABEL: f3:
; COMMON: subsd
; COMMON: mulsd
; COMMON: subsd
define double @f3(double %a, double %b) #0 {
; SSE-LABEL: f3:
; SSE: # %bb.0: # %entry
; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
; SSE-NEXT: movapd %xmm2, %xmm3
; SSE-NEXT: subsd %xmm0, %xmm3
; SSE-NEXT: mulsd %xmm1, %xmm3
; SSE-NEXT: subsd %xmm3, %xmm2
; SSE-NEXT: movapd %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: f3:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; AVX-NEXT: vsubsd %xmm0, %xmm2, %xmm0
; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vsubsd %xmm0, %xmm2, %xmm0
; AVX-NEXT: retq
entry:
%sub = call double @llvm.experimental.constrained.fsub.f64(
double -0.000000e+00, double %a,
@ -83,11 +117,24 @@ entry:
; }
;
;
; COMMON-LABEL: f4:
; COMMON: testl
; COMMON: jle
; COMMON: addsd
define double @f4(i32 %n, double %a) #0 {
; SSE-LABEL: f4:
; SSE: # %bb.0: # %entry
; SSE-NEXT: testl %edi, %edi
; SSE-NEXT: jle .LBB3_2
; SSE-NEXT: # %bb.1: # %if.then
; SSE-NEXT: addsd {{.*}}(%rip), %xmm0
; SSE-NEXT: .LBB3_2: # %if.end
; SSE-NEXT: retq
;
; AVX-LABEL: f4:
; AVX: # %bb.0: # %entry
; AVX-NEXT: testl %edi, %edi
; AVX-NEXT: jle .LBB3_2
; AVX-NEXT: # %bb.1: # %if.then
; AVX-NEXT: vaddsd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: .LBB3_2: # %if.end
; AVX-NEXT: retq
entry:
%cmp = icmp sgt i32 %n, 0
br i1 %cmp, label %if.then, label %if.end
@ -105,9 +152,18 @@ if.end:
}
; Verify that sqrt(42.0) isn't simplified when the rounding mode is unknown.
; COMMON-LABEL: f5
; COMMON: sqrtsd
define double @f5() #0 {
; SSE-LABEL: f5:
; SSE: # %bb.0: # %entry
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: sqrtsd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: f5:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
%result = call double @llvm.experimental.constrained.sqrt.f64(double 42.0,
metadata !"round.dynamic",
@ -116,9 +172,18 @@ entry:
}
; Verify that pow(42.1, 3.0) isn't simplified when the rounding mode is unknown.
; COMMON-LABEL: f6
; COMMON: pow
define double @f6() #0 {
; SSE-LABEL: f6:
; SSE: # %bb.0: # %entry
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; SSE-NEXT: jmp pow # TAILCALL
;
; AVX-LABEL: f6:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: jmp pow # TAILCALL
entry:
%result = call double @llvm.experimental.constrained.pow.f64(double 42.1,
double 3.0,
@ -128,9 +193,18 @@ entry:
}
; Verify that powi(42.1, 3) isn't simplified when the rounding mode is unknown.
; COMMON-LABEL: f7
; COMMON: powi
define double @f7() #0 {
; SSE-LABEL: f7:
; SSE: # %bb.0: # %entry
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: movl $3, %edi
; SSE-NEXT: jmp __powidf2 # TAILCALL
;
; AVX-LABEL: f7:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: movl $3, %edi
; AVX-NEXT: jmp __powidf2 # TAILCALL
entry:
%result = call double @llvm.experimental.constrained.powi.f64(double 42.1,
i32 3,
@ -140,9 +214,16 @@ entry:
}
; Verify that sin(42.0) isn't simplified when the rounding mode is unknown.
; COMMON-LABEL: f8
; COMMON: sin
define double @f8() #0 {
; SSE-LABEL: f8:
; SSE: # %bb.0: # %entry
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: jmp sin # TAILCALL
;
; AVX-LABEL: f8:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: jmp sin # TAILCALL
entry:
%result = call double @llvm.experimental.constrained.sin.f64(double 42.0,
metadata !"round.dynamic",
@ -151,9 +232,16 @@ entry:
}
; Verify that cos(42.0) isn't simplified when the rounding mode is unknown.
; COMMON-LABEL: f9
; COMMON: cos
define double @f9() #0 {
; SSE-LABEL: f9:
; SSE: # %bb.0: # %entry
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: jmp cos # TAILCALL
;
; AVX-LABEL: f9:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: jmp cos # TAILCALL
entry:
%result = call double @llvm.experimental.constrained.cos.f64(double 42.0,
metadata !"round.dynamic",
@ -162,9 +250,16 @@ entry:
}
; Verify that exp(42.0) isn't simplified when the rounding mode is unknown.
; COMMON-LABEL: f10
; COMMON: exp
define double @f10() #0 {
; SSE-LABEL: f10:
; SSE: # %bb.0: # %entry
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: jmp exp # TAILCALL
;
; AVX-LABEL: f10:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: jmp exp # TAILCALL
entry:
%result = call double @llvm.experimental.constrained.exp.f64(double 42.0,
metadata !"round.dynamic",
@ -173,9 +268,16 @@ entry:
}
; Verify that exp2(42.1) isn't simplified when the rounding mode is unknown.
; COMMON-LABEL: f11
; COMMON: exp2
define double @f11() #0 {
; SSE-LABEL: f11:
; SSE: # %bb.0: # %entry
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: jmp exp2 # TAILCALL
;
; AVX-LABEL: f11:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: jmp exp2 # TAILCALL
entry:
%result = call double @llvm.experimental.constrained.exp2.f64(double 42.1,
metadata !"round.dynamic",
@ -184,9 +286,16 @@ entry:
}
; Verify that log(42.0) isn't simplified when the rounding mode is unknown.
; COMMON-LABEL: f12
; COMMON: log
define double @f12() #0 {
; SSE-LABEL: f12:
; SSE: # %bb.0: # %entry
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: jmp log # TAILCALL
;
; AVX-LABEL: f12:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: jmp log # TAILCALL
entry:
%result = call double @llvm.experimental.constrained.log.f64(double 42.0,
metadata !"round.dynamic",
@ -195,9 +304,16 @@ entry:
}
; Verify that log10(42.0) isn't simplified when the rounding mode is unknown.
; COMMON-LABEL: f13
; COMMON: log10
define double @f13() #0 {
; SSE-LABEL: f13:
; SSE: # %bb.0: # %entry
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: jmp log10 # TAILCALL
;
; AVX-LABEL: f13:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: jmp log10 # TAILCALL
entry:
%result = call double @llvm.experimental.constrained.log10.f64(double 42.0,
metadata !"round.dynamic",
@ -206,9 +322,16 @@ entry:
}
; Verify that log2(42.0) isn't simplified when the rounding mode is unknown.
; COMMON-LABEL: f14
; COMMON: log2
define double @f14() #0 {
; SSE-LABEL: f14:
; SSE: # %bb.0: # %entry
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: jmp log2 # TAILCALL
;
; AVX-LABEL: f14:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: jmp log2 # TAILCALL
entry:
%result = call double @llvm.experimental.constrained.log2.f64(double 42.0,
metadata !"round.dynamic",
@ -217,10 +340,17 @@ entry:
}
; Verify that rint(42.1) isn't simplified when the rounding mode is unknown.
; COMMON-LABEL: f15
; NO-FMA: rint
; HAS-FMA: vroundsd
define double @f15() #0 {
; SSE-LABEL: f15:
; SSE: # %bb.0: # %entry
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: jmp rint # TAILCALL
;
; AVX-LABEL: f15:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vroundsd $4, %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
%result = call double @llvm.experimental.constrained.rint.f64(double 42.1,
metadata !"round.dynamic",
@ -230,10 +360,17 @@ entry:
; Verify that nearbyint(42.1) isn't simplified when the rounding mode is
; unknown.
; COMMON-LABEL: f16
; NO-FMA: nearbyint
; HAS-FMA: vroundsd
define double @f16() #0 {
; SSE-LABEL: f16:
; SSE: # %bb.0: # %entry
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: jmp nearbyint # TAILCALL
;
; AVX-LABEL: f16:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vroundsd $12, %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
%result = call double @llvm.experimental.constrained.nearbyint.f64(
double 42.1,
@ -242,41 +379,18 @@ entry:
ret double %result
}
; Verify that fma(3.5) isn't simplified when the rounding mode is
; unknown.
; COMMON-LABEL: f17
; FMACALL32: jmp fmaf # TAILCALL
; FMA32: vfmadd213ss
define float @f17() #0 {
entry:
%result = call float @llvm.experimental.constrained.fma.f32(
float 3.5,
float 3.5,
float 3.5,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret float %result
}
; Verify that fma(42.1) isn't simplified when the rounding mode is
; unknown.
; COMMON-LABEL: f18
; FMACALL64: jmp fma # TAILCALL
; FMA64: vfmadd213sd
define double @f18() #0 {
entry:
%result = call double @llvm.experimental.constrained.fma.f64(
double 42.1,
double 42.1,
double 42.1,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret double %result
}
; COMMON-LABEL: f19
; COMMON: fmod
define double @f19() #0 {
; SSE-LABEL: f19:
; SSE: # %bb.0: # %entry
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; SSE-NEXT: jmp fmod # TAILCALL
;
; AVX-LABEL: f19:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: jmp fmod # TAILCALL
entry:
%rem = call double @llvm.experimental.constrained.frem.f64(
double 1.000000e+00,
@ -289,30 +403,52 @@ entry:
; Verify that fptoui(%x) isn't simplified when the rounding mode is
; unknown. The expansion should have only one conversion instruction.
; Verify that no gross errors happen.
; COMMON-LABEL: @f20u
; NO-FMA: cmpltsd
; NO-FMA: movapd
; NO-FMA: andpd
; NO-FMA: xorl
; NO-FMA: ucomisd
; NO-FMA: subsd
; NO-FMA: andnpd
; NO-FMA: orpd
; NO-FMA: cvttsd2si
; NO-FMA: setae
; NO-FMA: shll
; NO-FMA: xorl
;
; HAS-FMA: vcmpltsd
; HAS-FMA: vsubsd
; HAS-FMA: vblendvpd
; HAS-FMA: vcvttsd2si
; HAS-FMA: xorl
; HAS-FMA: vucomisd
; HAS-FMA: setae
; HAS-FMA: shll
; HAS-FMA: xorl
define i32 @f20u(double %x) #0 {
; SSE-LABEL: f20u:
; SSE: # %bb.0: # %entry
; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; SSE-NEXT: movapd %xmm0, %xmm2
; SSE-NEXT: cmpltsd %xmm1, %xmm2
; SSE-NEXT: movapd %xmm2, %xmm3
; SSE-NEXT: andpd %xmm0, %xmm2
; SSE-NEXT: xorl %eax, %eax
; SSE-NEXT: ucomisd %xmm1, %xmm0
; SSE-NEXT: subsd %xmm1, %xmm0
; SSE-NEXT: andnpd %xmm0, %xmm3
; SSE-NEXT: orpd %xmm3, %xmm2
; SSE-NEXT: cvttsd2si %xmm2, %ecx
; SSE-NEXT: setae %al
; SSE-NEXT: shll $31, %eax
; SSE-NEXT: xorl %ecx, %eax
; SSE-NEXT: retq
;
; AVX1-LABEL: f20u:
; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX1-NEXT: vcmpltsd %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vsubsd %xmm1, %xmm0, %xmm3
; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm3, %xmm2
; AVX1-NEXT: vcvttsd2si %xmm2, %ecx
; AVX1-NEXT: xorl %eax, %eax
; AVX1-NEXT: vucomisd %xmm1, %xmm0
; AVX1-NEXT: setae %al
; AVX1-NEXT: shll $31, %eax
; AVX1-NEXT: xorl %ecx, %eax
; AVX1-NEXT: retq
;
; AVX512-LABEL: f20u:
; AVX512: # %bb.0: # %entry
; AVX512-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; AVX512-NEXT: vcmpltsd %xmm1, %xmm0, %k1
; AVX512-NEXT: vsubsd %xmm1, %xmm0, %xmm2
; AVX512-NEXT: vmovsd %xmm0, %xmm2, %xmm2 {%k1}
; AVX512-NEXT: vcvttsd2si %xmm2, %ecx
; AVX512-NEXT: xorl %eax, %eax
; AVX512-NEXT: vucomisd %xmm1, %xmm0
; AVX512-NEXT: setae %al
; AVX512-NEXT: shll $31, %eax
; AVX512-NEXT: xorl %ecx, %eax
; AVX512-NEXT: retq
entry:
%result = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %x,
metadata !"fpexcept.strict") #0
@ -322,9 +458,18 @@ entry:
; Verify that round(42.1) isn't simplified when the rounding mode is
; unknown.
; Verify that no gross errors happen.
; COMMON-LABEL: @f21
; COMMON: cvtsd2ss
define float @f21() #0 {
; SSE-LABEL: f21:
; SSE: # %bb.0: # %entry
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: cvtsd2ss %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: f21:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
%result = call float @llvm.experimental.constrained.fptrunc.f32.f64(
double 42.1,
@ -333,18 +478,26 @@ entry:
ret float %result
}
; COMMON-LABEL: @f22
; COMMON: cvtss2sd
define double @f22(float %x) #0 {
; SSE-LABEL: f22:
; SSE: # %bb.0: # %entry
; SSE-NEXT: cvtss2sd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: f22:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
entry:
%result = call double @llvm.experimental.constrained.fpext.f64.f32(float %x,
metadata !"fpexcept.strict") #0
ret double %result
}
; COMMON-LABEL: f23
; COMMON: jmp lrint
define i32 @f23(double %x) #0 {
; COMMON-LABEL: f23:
; COMMON: # %bb.0: # %entry
; COMMON-NEXT: jmp lrint # TAILCALL
entry:
%result = call i32 @llvm.experimental.constrained.lrint.i32.f64(double %x,
metadata !"round.dynamic",
@ -352,9 +505,10 @@ entry:
ret i32 %result
}
; COMMON-LABEL: f24
; COMMON: jmp lrintf
define i32 @f24(float %x) #0 {
; COMMON-LABEL: f24:
; COMMON: # %bb.0: # %entry
; COMMON-NEXT: jmp lrintf # TAILCALL
entry:
%result = call i32 @llvm.experimental.constrained.lrint.i32.f32(float %x,
metadata !"round.dynamic",
@ -362,9 +516,10 @@ entry:
ret i32 %result
}
; COMMON-LABEL: f25
; COMMON: jmp llrint
define i64 @f25(double %x) #0 {
; COMMON-LABEL: f25:
; COMMON: # %bb.0: # %entry
; COMMON-NEXT: jmp llrint # TAILCALL
entry:
%result = call i64 @llvm.experimental.constrained.llrint.i64.f64(double %x,
metadata !"round.dynamic",
@ -372,9 +527,10 @@ entry:
ret i64 %result
}
; COMMON-LABEL: f26
; COMMON: jmp llrintf
define i64 @f26(float %x) {
; COMMON-LABEL: f26:
; COMMON: # %bb.0: # %entry
; COMMON-NEXT: jmp llrintf # TAILCALL
entry:
%result = call i64 @llvm.experimental.constrained.llrint.i64.f32(float %x,
metadata !"round.dynamic",
@ -382,36 +538,40 @@ entry:
ret i64 %result
}
; COMMON-LABEL: f27
; COMMON: jmp lround
define i32 @f27(double %x) #0 {
; COMMON-LABEL: f27:
; COMMON: # %bb.0: # %entry
; COMMON-NEXT: jmp lround # TAILCALL
entry:
%result = call i32 @llvm.experimental.constrained.lround.i32.f64(double %x,
metadata !"fpexcept.strict") #0
ret i32 %result
}
; COMMON-LABEL: f28
; COMMON: jmp lroundf
define i32 @f28(float %x) #0 {
; COMMON-LABEL: f28:
; COMMON: # %bb.0: # %entry
; COMMON-NEXT: jmp lroundf # TAILCALL
entry:
%result = call i32 @llvm.experimental.constrained.lround.i32.f32(float %x,
metadata !"fpexcept.strict") #0
ret i32 %result
}
; COMMON-LABEL: f29
; COMMON: jmp llround
define i64 @f29(double %x) #0 {
; COMMON-LABEL: f29:
; COMMON: # %bb.0: # %entry
; COMMON-NEXT: jmp llround # TAILCALL
entry:
%result = call i64 @llvm.experimental.constrained.llround.i64.f64(double %x,
metadata !"fpexcept.strict") #0
ret i64 %result
}
; COMMON-LABEL: f30
; COMMON: jmp llroundf
define i64 @f30(float %x) #0 {
; COMMON-LABEL: f30:
; COMMON: # %bb.0: # %entry
; COMMON-NEXT: jmp llroundf # TAILCALL
entry:
%result = call i64 @llvm.experimental.constrained.llround.i64.f32(float %x,
metadata !"fpexcept.strict") #0
@ -438,8 +598,6 @@ declare double @llvm.experimental.constrained.log10.f64(double, metadata, metada
declare double @llvm.experimental.constrained.log2.f64(double, metadata, metadata)
declare double @llvm.experimental.constrained.rint.f64(double, metadata, metadata)
declare double @llvm.experimental.constrained.nearbyint.f64(double, metadata, metadata)
declare float @llvm.experimental.constrained.fma.f32(float, float, float, metadata, metadata)
declare double @llvm.experimental.constrained.fma.f64(double, double, double, metadata, metadata)
declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata)
declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata)
declare float @llvm.experimental.constrained.fptrunc.f32.f64(double, metadata, metadata)