1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 20:51:52 +01:00
llvm-mirror/test/CodeGen/X86/fdiv-combine-vec.ll
Qiu Chaofan e59e06d663 [DAGCombiner] Require ninf for division estimation
Current implementation of division estimation isn't correct for some
cases like 1.0/0.0 (result is nan, not expected inf).

And this change exposes a potential infinite loop: we use
isConstOrConstSplatFP in combineRepeatedFPDivisors to look up if the
divisor is some constant. But it doesn't work after legalized on some
platforms. This patch restricts the method to act before LegalDAG.

Reviewed By: spatel

Differential Revision: https://reviews.llvm.org/D80542
2020-06-14 22:58:22 +08:00

160 lines
5.9 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-- -mattr=sse2 | FileCheck %s --check-prefix=SSE
; RUN: llc < %s -mtriple=x86_64-- -mattr=avx | FileCheck %s --check-prefix=AVX
define <2 x double> @splat_fdiv_v2f64(<2 x double> %x, double %y) {
; SSE-LABEL: splat_fdiv_v2f64:
; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
; SSE-NEXT: divsd %xmm1, %xmm2
; SSE-NEXT: unpcklpd {{.*#+}} xmm2 = xmm2[0,0]
; SSE-NEXT: mulpd %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: splat_fdiv_v2f64:
; AVX: # %bb.0:
; AVX-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; AVX-NEXT: vdivsd %xmm1, %xmm2, %xmm1
; AVX-NEXT: vmovddup {{.*#+}} xmm1 = xmm1[0,0]
; AVX-NEXT: vmulpd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%vy = insertelement <2 x double> undef, double %y, i32 0
%splaty = shufflevector <2 x double> %vy, <2 x double> undef, <2 x i32> zeroinitializer
%r = fdiv fast <2 x double> %x, %splaty
ret <2 x double> %r
}
define <4 x double> @splat_fdiv_v4f64(<4 x double> %x, double %y) {
; SSE-LABEL: splat_fdiv_v4f64:
; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
; SSE-NEXT: divsd %xmm2, %xmm3
; SSE-NEXT: unpcklpd {{.*#+}} xmm3 = xmm3[0,0]
; SSE-NEXT: mulpd %xmm3, %xmm0
; SSE-NEXT: mulpd %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: splat_fdiv_v4f64:
; AVX: # %bb.0:
; AVX-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; AVX-NEXT: vdivsd %xmm1, %xmm2, %xmm1
; AVX-NEXT: vmovddup {{.*#+}} xmm1 = xmm1[0,0]
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
; AVX-NEXT: vmulpd %ymm1, %ymm0, %ymm0
; AVX-NEXT: retq
%vy = insertelement <4 x double> undef, double %y, i32 0
%splaty = shufflevector <4 x double> %vy, <4 x double> undef, <4 x i32> zeroinitializer
%r = fdiv arcp <4 x double> %x, %splaty
ret <4 x double> %r
}
define <4 x float> @splat_fdiv_v4f32(<4 x float> %x, float %y) {
; SSE-LABEL: splat_fdiv_v4f32:
; SSE: # %bb.0:
; SSE-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSE-NEXT: divss %xmm1, %xmm2
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0,0,0]
; SSE-NEXT: mulps %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: splat_fdiv_v4f32:
; AVX: # %bb.0:
; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; AVX-NEXT: vdivss %xmm1, %xmm2, %xmm1
; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,0,0,0]
; AVX-NEXT: vmulps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%vy = insertelement <4 x float> undef, float %y, i32 0
%splaty = shufflevector <4 x float> %vy, <4 x float> undef, <4 x i32> zeroinitializer
%r = fdiv arcp reassoc <4 x float> %x, %splaty
ret <4 x float> %r
}
define <8 x float> @splat_fdiv_v8f32(<8 x float> %x, float %y) {
; SSE-LABEL: splat_fdiv_v8f32:
; SSE: # %bb.0:
; SSE-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
; SSE-NEXT: divss %xmm2, %xmm3
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,0,0,0]
; SSE-NEXT: mulps %xmm3, %xmm0
; SSE-NEXT: mulps %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: splat_fdiv_v8f32:
; AVX: # %bb.0:
; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; AVX-NEXT: vdivss %xmm1, %xmm2, %xmm1
; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,0,0,0]
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
; AVX-NEXT: vmulps %ymm1, %ymm0, %ymm0
; AVX-NEXT: retq
%vy = insertelement <8 x float> undef, float %y, i32 0
%splaty = shufflevector <8 x float> %vy, <8 x float> undef, <8 x i32> zeroinitializer
%r = fdiv fast <8 x float> %x, %splaty
ret <8 x float> %r
}
define <4 x float> @splat_fdiv_v4f32_estimate(<4 x float> %x, float %y) #0 {
; SSE-LABEL: splat_fdiv_v4f32_estimate:
; SSE: # %bb.0:
; SSE-NEXT: rcpss %xmm1, %xmm2
; SSE-NEXT: mulss %xmm2, %xmm1
; SSE-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
; SSE-NEXT: subss %xmm1, %xmm3
; SSE-NEXT: mulss %xmm2, %xmm3
; SSE-NEXT: addss %xmm2, %xmm3
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,0,0,0]
; SSE-NEXT: mulps %xmm3, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: splat_fdiv_v4f32_estimate:
; AVX: # %bb.0:
; AVX-NEXT: vrcpss %xmm1, %xmm1, %xmm2
; AVX-NEXT: vmulss %xmm2, %xmm1, %xmm1
; AVX-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
; AVX-NEXT: vsubss %xmm1, %xmm3, %xmm1
; AVX-NEXT: vmulss %xmm1, %xmm2, %xmm1
; AVX-NEXT: vaddss %xmm1, %xmm2, %xmm1
; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,0,0,0]
; AVX-NEXT: vmulps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%vy = insertelement <4 x float> undef, float %y, i32 0
%splaty = shufflevector <4 x float> %vy, <4 x float> undef, <4 x i32> zeroinitializer
%r = fdiv arcp reassoc ninf <4 x float> %x, %splaty
ret <4 x float> %r
}
define <8 x float> @splat_fdiv_v8f32_estimate(<8 x float> %x, float %y) #0 {
; SSE-LABEL: splat_fdiv_v8f32_estimate:
; SSE: # %bb.0:
; SSE-NEXT: rcpss %xmm2, %xmm3
; SSE-NEXT: mulss %xmm3, %xmm2
; SSE-NEXT: movss {{.*#+}} xmm4 = mem[0],zero,zero,zero
; SSE-NEXT: subss %xmm2, %xmm4
; SSE-NEXT: mulss %xmm3, %xmm4
; SSE-NEXT: addss %xmm3, %xmm4
; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,0,0,0]
; SSE-NEXT: mulps %xmm4, %xmm0
; SSE-NEXT: mulps %xmm4, %xmm1
; SSE-NEXT: retq
;
; AVX-LABEL: splat_fdiv_v8f32_estimate:
; AVX: # %bb.0:
; AVX-NEXT: vrcpss %xmm1, %xmm1, %xmm2
; AVX-NEXT: vmulss %xmm2, %xmm1, %xmm1
; AVX-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
; AVX-NEXT: vsubss %xmm1, %xmm3, %xmm1
; AVX-NEXT: vmulss %xmm1, %xmm2, %xmm1
; AVX-NEXT: vaddss %xmm1, %xmm2, %xmm1
; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,0,0,0]
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
; AVX-NEXT: vmulps %ymm1, %ymm0, %ymm0
; AVX-NEXT: retq
%vy = insertelement <8 x float> undef, float %y, i32 0
%splaty = shufflevector <8 x float> %vy, <8 x float> undef, <8 x i32> zeroinitializer
%r = fdiv fast <8 x float> %x, %splaty
ret <8 x float> %r
}
attributes #0 = { "reciprocal-estimates"="divf,vec-divf" }