1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-24 03:33:20 +01:00

Revert r281841, it does not work on Windows (PR30443).

llvm-svn: 281905
This commit is contained in:
Nico Weber 2016-09-19 15:22:04 +00:00
parent 9c39968ce1
commit 99a8836600

View File

@ -1,722 +0,0 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX
; copysign(x, c1) -> fabs(x) iff ispos(c1)
define <4 x float> @combine_vec_fcopysign_pos_constant0(<4 x float> %x) {
; SSE-LABEL: combine_vec_fcopysign_pos_constant0:
; SSE: # BB#0:
; SSE-NEXT: movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; SSE-NEXT: movaps {{.*#+}} xmm3 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
; SSE-NEXT: andps %xmm3, %xmm2
; SSE-NEXT: movss {{.*#+}} xmm4 = mem[0],zero,zero,zero
; SSE-NEXT: andps {{.*}}(%rip), %xmm4
; SSE-NEXT: orps %xmm4, %xmm2
; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: andps %xmm3, %xmm1
; SSE-NEXT: orps %xmm4, %xmm1
; SSE-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
; SSE-NEXT: andps %xmm3, %xmm2
; SSE-NEXT: orps %xmm4, %xmm2
; SSE-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE-NEXT: andps %xmm3, %xmm0
; SSE-NEXT: orps %xmm4, %xmm0
; SSE-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0]
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_fcopysign_pos_constant0:
; AVX: # BB#0:
; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-NEXT: vmovaps {{.*#+}} xmm2 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
; AVX-NEXT: vandps %xmm2, %xmm1, %xmm1
; AVX-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
; AVX-NEXT: vandps {{.*}}(%rip), %xmm3, %xmm3
; AVX-NEXT: vorps %xmm3, %xmm1, %xmm1
; AVX-NEXT: vandps %xmm2, %xmm0, %xmm4
; AVX-NEXT: vorps %xmm3, %xmm4, %xmm4
; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[2,3]
; AVX-NEXT: vpermilpd {{.*#+}} xmm4 = xmm0[1,0]
; AVX-NEXT: vandpd %xmm2, %xmm4, %xmm4
; AVX-NEXT: vorpd %xmm3, %xmm4, %xmm4
; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm4[0],xmm1[3]
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX-NEXT: vandps %xmm2, %xmm0, %xmm0
; AVX-NEXT: vorps %xmm3, %xmm0, %xmm0
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; AVX-NEXT: retq
%1 = call <4 x float> @llvm.copysign.v4f32(<4 x float> %x, <4 x float> <float 2.0, float 2.0, float 2.0, float 2.0>)
ret <4 x float> %1
}
define <4 x float> @combine_vec_fcopysign_pos_constant1(<4 x float> %x) {
; SSE-LABEL: combine_vec_fcopysign_pos_constant1:
; SSE: # BB#0:
; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE-NEXT: movaps {{.*#+}} xmm2 = [-0.000000e+00,0.000000e+00,0.000000e+00,0.000000e+00]
; SSE-NEXT: andps %xmm2, %xmm1
; SSE-NEXT: movshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
; SSE-NEXT: movaps {{.*#+}} xmm4 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
; SSE-NEXT: andps %xmm4, %xmm3
; SSE-NEXT: orps %xmm1, %xmm3
; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: andps %xmm4, %xmm1
; SSE-NEXT: xorps %xmm5, %xmm5
; SSE-NEXT: andps %xmm2, %xmm5
; SSE-NEXT: orps %xmm5, %xmm1
; SSE-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[2,3]
; SSE-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
; SSE-NEXT: andps %xmm2, %xmm3
; SSE-NEXT: movaps %xmm0, %xmm5
; SSE-NEXT: movhlps {{.*#+}} xmm5 = xmm5[1,1]
; SSE-NEXT: andps %xmm4, %xmm5
; SSE-NEXT: orps %xmm3, %xmm5
; SSE-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1],xmm5[0],xmm1[3]
; SSE-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
; SSE-NEXT: andps %xmm2, %xmm3
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE-NEXT: andps %xmm4, %xmm0
; SSE-NEXT: orps %xmm3, %xmm0
; SSE-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0]
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_fcopysign_pos_constant1:
; AVX: # BB#0:
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: vmovaps {{.*#+}} xmm2 = [-0.000000e+00,0.000000e+00,0.000000e+00,0.000000e+00]
; AVX-NEXT: vandps %xmm2, %xmm1, %xmm1
; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
; AVX-NEXT: vmovaps {{.*#+}} xmm4 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
; AVX-NEXT: vandps %xmm4, %xmm3, %xmm3
; AVX-NEXT: vorps %xmm1, %xmm3, %xmm1
; AVX-NEXT: vandps %xmm4, %xmm0, %xmm3
; AVX-NEXT: vxorps %xmm5, %xmm5, %xmm5
; AVX-NEXT: vandps %xmm2, %xmm5, %xmm5
; AVX-NEXT: vorps %xmm5, %xmm3, %xmm3
; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[2,3]
; AVX-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
; AVX-NEXT: vandps %xmm2, %xmm3, %xmm3
; AVX-NEXT: vpermilpd {{.*#+}} xmm5 = xmm0[1,0]
; AVX-NEXT: vandpd %xmm4, %xmm5, %xmm5
; AVX-NEXT: vorpd %xmm3, %xmm5, %xmm3
; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3]
; AVX-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
; AVX-NEXT: vandps %xmm2, %xmm3, %xmm2
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX-NEXT: vandps %xmm4, %xmm0, %xmm0
; AVX-NEXT: vorps %xmm2, %xmm0, %xmm0
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; AVX-NEXT: retq
%1 = call <4 x float> @llvm.copysign.v4f32(<4 x float> %x, <4 x float> <float 0.0, float 2.0, float 4.0, float 8.0>)
ret <4 x float> %1
}
define <4 x float> @combine_vec_fcopysign_fabs_sgn(<4 x float> %x, <4 x float> %y) {
; SSE-LABEL: combine_vec_fcopysign_fabs_sgn:
; SSE: # BB#0:
; SSE-NEXT: andps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_fcopysign_fabs_sgn:
; AVX: # BB#0:
; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm1
; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = call <4 x float> @llvm.fabs.v4f32(<4 x float> %y)
%2 = call <4 x float> @llvm.copysign.v4f32(<4 x float> %x, <4 x float> %1)
ret <4 x float> %2
}
; copysign(x, c1) -> fneg(fabs(x)) iff isneg(c1)
define <4 x float> @combine_vec_fcopysign_neg_constant0(<4 x float> %x) {
; SSE-LABEL: combine_vec_fcopysign_neg_constant0:
; SSE: # BB#0:
; SSE-NEXT: movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; SSE-NEXT: movaps {{.*#+}} xmm3 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
; SSE-NEXT: andps %xmm3, %xmm2
; SSE-NEXT: movss {{.*#+}} xmm4 = mem[0],zero,zero,zero
; SSE-NEXT: andps {{.*}}(%rip), %xmm4
; SSE-NEXT: orps %xmm4, %xmm2
; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: andps %xmm3, %xmm1
; SSE-NEXT: orps %xmm4, %xmm1
; SSE-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
; SSE-NEXT: andps %xmm3, %xmm2
; SSE-NEXT: orps %xmm4, %xmm2
; SSE-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE-NEXT: andps %xmm3, %xmm0
; SSE-NEXT: orps %xmm4, %xmm0
; SSE-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0]
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_fcopysign_neg_constant0:
; AVX: # BB#0:
; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-NEXT: vmovaps {{.*#+}} xmm2 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
; AVX-NEXT: vandps %xmm2, %xmm1, %xmm1
; AVX-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
; AVX-NEXT: vandps {{.*}}(%rip), %xmm3, %xmm3
; AVX-NEXT: vorps %xmm3, %xmm1, %xmm1
; AVX-NEXT: vandps %xmm2, %xmm0, %xmm4
; AVX-NEXT: vorps %xmm3, %xmm4, %xmm4
; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[2,3]
; AVX-NEXT: vpermilpd {{.*#+}} xmm4 = xmm0[1,0]
; AVX-NEXT: vandpd %xmm2, %xmm4, %xmm4
; AVX-NEXT: vorpd %xmm3, %xmm4, %xmm4
; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm4[0],xmm1[3]
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX-NEXT: vandps %xmm2, %xmm0, %xmm0
; AVX-NEXT: vorps %xmm3, %xmm0, %xmm0
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; AVX-NEXT: retq
%1 = call <4 x float> @llvm.copysign.v4f32(<4 x float> %x, <4 x float> <float -2.0, float -2.0, float -2.0, float -2.0>)
ret <4 x float> %1
}
define <4 x float> @combine_vec_fcopysign_neg_constant1(<4 x float> %x) {
; SSE-LABEL: combine_vec_fcopysign_neg_constant1:
; SSE: # BB#0:
; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE-NEXT: movaps {{.*#+}} xmm2 = [-0.000000e+00,0.000000e+00,0.000000e+00,0.000000e+00]
; SSE-NEXT: andps %xmm2, %xmm1
; SSE-NEXT: movshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
; SSE-NEXT: movaps {{.*#+}} xmm4 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
; SSE-NEXT: andps %xmm4, %xmm3
; SSE-NEXT: orps %xmm1, %xmm3
; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: andps %xmm4, %xmm1
; SSE-NEXT: movss {{.*#+}} xmm5 = mem[0],zero,zero,zero
; SSE-NEXT: andps %xmm2, %xmm5
; SSE-NEXT: orps %xmm5, %xmm1
; SSE-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[2,3]
; SSE-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
; SSE-NEXT: andps %xmm2, %xmm3
; SSE-NEXT: movaps %xmm0, %xmm5
; SSE-NEXT: movhlps {{.*#+}} xmm5 = xmm5[1,1]
; SSE-NEXT: andps %xmm4, %xmm5
; SSE-NEXT: orps %xmm3, %xmm5
; SSE-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1],xmm5[0],xmm1[3]
; SSE-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
; SSE-NEXT: andps %xmm2, %xmm3
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE-NEXT: andps %xmm4, %xmm0
; SSE-NEXT: orps %xmm3, %xmm0
; SSE-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0]
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_fcopysign_neg_constant1:
; AVX: # BB#0:
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-NEXT: vmovaps {{.*#+}} xmm2 = [-0.000000e+00,0.000000e+00,0.000000e+00,0.000000e+00]
; AVX-NEXT: vandps %xmm2, %xmm1, %xmm1
; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
; AVX-NEXT: vmovaps {{.*#+}} xmm4 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
; AVX-NEXT: vandps %xmm4, %xmm3, %xmm3
; AVX-NEXT: vorps %xmm1, %xmm3, %xmm1
; AVX-NEXT: vandps %xmm4, %xmm0, %xmm3
; AVX-NEXT: vmovss {{.*#+}} xmm5 = mem[0],zero,zero,zero
; AVX-NEXT: vandps %xmm2, %xmm5, %xmm5
; AVX-NEXT: vorps %xmm5, %xmm3, %xmm3
; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[2,3]
; AVX-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
; AVX-NEXT: vandps %xmm2, %xmm3, %xmm3
; AVX-NEXT: vpermilpd {{.*#+}} xmm5 = xmm0[1,0]
; AVX-NEXT: vandpd %xmm4, %xmm5, %xmm5
; AVX-NEXT: vorpd %xmm3, %xmm5, %xmm3
; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3]
; AVX-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
; AVX-NEXT: vandps %xmm2, %xmm3, %xmm2
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX-NEXT: vandps %xmm4, %xmm0, %xmm0
; AVX-NEXT: vorps %xmm2, %xmm0, %xmm0
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; AVX-NEXT: retq
%1 = call <4 x float> @llvm.copysign.v4f32(<4 x float> %x, <4 x float> <float -0.0, float -2.0, float -4.0, float -8.0>)
ret <4 x float> %1
}
define <4 x float> @combine_vec_fcopysign_fneg_fabs_sgn(<4 x float> %x, <4 x float> %y) {
; SSE-LABEL: combine_vec_fcopysign_fneg_fabs_sgn:
; SSE: # BB#0:
; SSE-NEXT: orps {{.*}}(%rip), %xmm1
; SSE-NEXT: movaps {{.*#+}} xmm4 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: andps %xmm4, %xmm2
; SSE-NEXT: movaps {{.*#+}} xmm3 = [-0.000000e+00,0.000000e+00,0.000000e+00,0.000000e+00]
; SSE-NEXT: movaps %xmm1, %xmm5
; SSE-NEXT: andps %xmm3, %xmm5
; SSE-NEXT: orps %xmm5, %xmm2
; SSE-NEXT: movshdup {{.*#+}} xmm5 = xmm0[1,1,3,3]
; SSE-NEXT: andps %xmm4, %xmm5
; SSE-NEXT: movshdup {{.*#+}} xmm6 = xmm1[1,1,3,3]
; SSE-NEXT: andps %xmm3, %xmm6
; SSE-NEXT: orps %xmm5, %xmm6
; SSE-NEXT: insertps {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[2,3]
; SSE-NEXT: movaps %xmm0, %xmm5
; SSE-NEXT: movhlps {{.*#+}} xmm5 = xmm5[1,1]
; SSE-NEXT: andps %xmm4, %xmm5
; SSE-NEXT: movaps %xmm1, %xmm6
; SSE-NEXT: movhlps {{.*#+}} xmm6 = xmm6[1,1]
; SSE-NEXT: andps %xmm3, %xmm6
; SSE-NEXT: orps %xmm5, %xmm6
; SSE-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1],xmm6[0],xmm2[3]
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE-NEXT: andps %xmm4, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; SSE-NEXT: andps %xmm3, %xmm1
; SSE-NEXT: orps %xmm0, %xmm1
; SSE-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[0]
; SSE-NEXT: movaps %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_fcopysign_fneg_fabs_sgn:
; AVX: # BB#0:
; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm2
; AVX-NEXT: vorps %xmm2, %xmm1, %xmm1
; AVX-NEXT: vmovaps {{.*#+}} xmm2 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
; AVX-NEXT: vandps %xmm2, %xmm0, %xmm3
; AVX-NEXT: vmovaps {{.*#+}} xmm4 = [-0.000000e+00,0.000000e+00,0.000000e+00,0.000000e+00]
; AVX-NEXT: vandps %xmm4, %xmm1, %xmm5
; AVX-NEXT: vorps %xmm5, %xmm3, %xmm3
; AVX-NEXT: vmovshdup {{.*#+}} xmm5 = xmm0[1,1,3,3]
; AVX-NEXT: vandps %xmm2, %xmm5, %xmm5
; AVX-NEXT: vmovshdup {{.*#+}} xmm6 = xmm1[1,1,3,3]
; AVX-NEXT: vandps %xmm4, %xmm6, %xmm6
; AVX-NEXT: vorps %xmm6, %xmm5, %xmm5
; AVX-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[2,3]
; AVX-NEXT: vpermilpd {{.*#+}} xmm5 = xmm0[1,0]
; AVX-NEXT: vandpd %xmm2, %xmm5, %xmm5
; AVX-NEXT: vpermilpd {{.*#+}} xmm6 = xmm1[1,0]
; AVX-NEXT: vandpd %xmm4, %xmm6, %xmm6
; AVX-NEXT: vorpd %xmm6, %xmm5, %xmm5
; AVX-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm5[0],xmm3[3]
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX-NEXT: vandps %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; AVX-NEXT: vandps %xmm4, %xmm1, %xmm1
; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm3[0,1,2],xmm0[0]
; AVX-NEXT: retq
%1 = call <4 x float> @llvm.fabs.v4f32(<4 x float> %y)
%2 = fsub <4 x float> <float -0.0, float -0.0, float -0.0, float -0.0>, %1
%3 = call <4 x float> @llvm.copysign.v4f32(<4 x float> %x, <4 x float> %2)
ret <4 x float> %3
}
; copysign(fabs(x), y) -> copysign(x, y)
define <4 x float> @combine_vec_fcopysign_fabs_mag(<4 x float> %x, <4 x float> %y) {
; SSE-LABEL: combine_vec_fcopysign_fabs_mag:
; SSE: # BB#0:
; SSE-NEXT: movaps {{.*#+}} xmm4 = [-0.000000e+00,0.000000e+00,0.000000e+00,0.000000e+00]
; SSE-NEXT: movaps %xmm1, %xmm5
; SSE-NEXT: andps %xmm4, %xmm5
; SSE-NEXT: movaps {{.*#+}} xmm3 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: andps %xmm3, %xmm2
; SSE-NEXT: orps %xmm5, %xmm2
; SSE-NEXT: movshdup {{.*#+}} xmm5 = xmm1[1,1,3,3]
; SSE-NEXT: andps %xmm4, %xmm5
; SSE-NEXT: movshdup {{.*#+}} xmm6 = xmm0[1,1,3,3]
; SSE-NEXT: andps %xmm3, %xmm6
; SSE-NEXT: orps %xmm5, %xmm6
; SSE-NEXT: insertps {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[2,3]
; SSE-NEXT: movaps %xmm1, %xmm5
; SSE-NEXT: movhlps {{.*#+}} xmm5 = xmm5[1,1]
; SSE-NEXT: andps %xmm4, %xmm5
; SSE-NEXT: movaps %xmm0, %xmm6
; SSE-NEXT: movhlps {{.*#+}} xmm6 = xmm6[1,1]
; SSE-NEXT: andps %xmm3, %xmm6
; SSE-NEXT: orps %xmm5, %xmm6
; SSE-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1],xmm6[0],xmm2[3]
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; SSE-NEXT: andps %xmm4, %xmm1
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE-NEXT: andps %xmm3, %xmm0
; SSE-NEXT: orps %xmm1, %xmm0
; SSE-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm0[0]
; SSE-NEXT: movaps %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_fcopysign_fabs_mag:
; AVX: # BB#0:
; AVX-NEXT: vmovaps {{.*#+}} xmm2 = [-0.000000e+00,0.000000e+00,0.000000e+00,0.000000e+00]
; AVX-NEXT: vandps %xmm2, %xmm1, %xmm3
; AVX-NEXT: vmovaps {{.*#+}} xmm4 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
; AVX-NEXT: vandps %xmm4, %xmm0, %xmm5
; AVX-NEXT: vorps %xmm3, %xmm5, %xmm3
; AVX-NEXT: vmovshdup {{.*#+}} xmm5 = xmm1[1,1,3,3]
; AVX-NEXT: vandps %xmm2, %xmm5, %xmm5
; AVX-NEXT: vmovshdup {{.*#+}} xmm6 = xmm0[1,1,3,3]
; AVX-NEXT: vandps %xmm4, %xmm6, %xmm6
; AVX-NEXT: vorps %xmm5, %xmm6, %xmm5
; AVX-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[2,3]
; AVX-NEXT: vpermilpd {{.*#+}} xmm5 = xmm1[1,0]
; AVX-NEXT: vandpd %xmm2, %xmm5, %xmm5
; AVX-NEXT: vpermilpd {{.*#+}} xmm6 = xmm0[1,0]
; AVX-NEXT: vandpd %xmm4, %xmm6, %xmm6
; AVX-NEXT: vorpd %xmm5, %xmm6, %xmm5
; AVX-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm5[0],xmm3[3]
; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; AVX-NEXT: vandps %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX-NEXT: vandps %xmm4, %xmm0, %xmm0
; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm3[0,1,2],xmm0[0]
; AVX-NEXT: retq
%1 = call <4 x float> @llvm.fabs.v4f32(<4 x float> %x)
%2 = call <4 x float> @llvm.copysign.v4f32(<4 x float> %1, <4 x float> %y)
ret <4 x float> %2
}
; copysign(fneg(x), y) -> copysign(x, y)
define <4 x float> @combine_vec_fcopysign_fneg_mag(<4 x float> %x, <4 x float> %y) {
; SSE-LABEL: combine_vec_fcopysign_fneg_mag:
; SSE: # BB#0:
; SSE-NEXT: movaps {{.*#+}} xmm4 = [-0.000000e+00,0.000000e+00,0.000000e+00,0.000000e+00]
; SSE-NEXT: movaps %xmm1, %xmm5
; SSE-NEXT: andps %xmm4, %xmm5
; SSE-NEXT: movaps {{.*#+}} xmm3 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: andps %xmm3, %xmm2
; SSE-NEXT: orps %xmm5, %xmm2
; SSE-NEXT: movshdup {{.*#+}} xmm5 = xmm1[1,1,3,3]
; SSE-NEXT: andps %xmm4, %xmm5
; SSE-NEXT: movshdup {{.*#+}} xmm6 = xmm0[1,1,3,3]
; SSE-NEXT: andps %xmm3, %xmm6
; SSE-NEXT: orps %xmm5, %xmm6
; SSE-NEXT: insertps {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[2,3]
; SSE-NEXT: movaps %xmm1, %xmm5
; SSE-NEXT: movhlps {{.*#+}} xmm5 = xmm5[1,1]
; SSE-NEXT: andps %xmm4, %xmm5
; SSE-NEXT: movaps %xmm0, %xmm6
; SSE-NEXT: movhlps {{.*#+}} xmm6 = xmm6[1,1]
; SSE-NEXT: andps %xmm3, %xmm6
; SSE-NEXT: orps %xmm5, %xmm6
; SSE-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1],xmm6[0],xmm2[3]
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; SSE-NEXT: andps %xmm4, %xmm1
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE-NEXT: andps %xmm3, %xmm0
; SSE-NEXT: orps %xmm1, %xmm0
; SSE-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm0[0]
; SSE-NEXT: movaps %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_fcopysign_fneg_mag:
; AVX: # BB#0:
; AVX-NEXT: vmovaps {{.*#+}} xmm2 = [-0.000000e+00,0.000000e+00,0.000000e+00,0.000000e+00]
; AVX-NEXT: vandps %xmm2, %xmm1, %xmm3
; AVX-NEXT: vmovaps {{.*#+}} xmm4 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
; AVX-NEXT: vandps %xmm4, %xmm0, %xmm5
; AVX-NEXT: vorps %xmm3, %xmm5, %xmm3
; AVX-NEXT: vmovshdup {{.*#+}} xmm5 = xmm1[1,1,3,3]
; AVX-NEXT: vandps %xmm2, %xmm5, %xmm5
; AVX-NEXT: vmovshdup {{.*#+}} xmm6 = xmm0[1,1,3,3]
; AVX-NEXT: vandps %xmm4, %xmm6, %xmm6
; AVX-NEXT: vorps %xmm5, %xmm6, %xmm5
; AVX-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[2,3]
; AVX-NEXT: vpermilpd {{.*#+}} xmm5 = xmm1[1,0]
; AVX-NEXT: vandpd %xmm2, %xmm5, %xmm5
; AVX-NEXT: vpermilpd {{.*#+}} xmm6 = xmm0[1,0]
; AVX-NEXT: vandpd %xmm4, %xmm6, %xmm6
; AVX-NEXT: vorpd %xmm5, %xmm6, %xmm5
; AVX-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm5[0],xmm3[3]
; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; AVX-NEXT: vandps %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX-NEXT: vandps %xmm4, %xmm0, %xmm0
; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm3[0,1,2],xmm0[0]
; AVX-NEXT: retq
%1 = fsub <4 x float> <float -0.0, float -0.0, float -0.0, float -0.0>, %x
%2 = call <4 x float> @llvm.copysign.v4f32(<4 x float> %1, <4 x float> %y)
ret <4 x float> %2
}
; copysign(copysign(x,z), y) -> copysign(x, y)
define <4 x float> @combine_vec_fcopysign_fcopysign_mag(<4 x float> %x, <4 x float> %y, <4 x float> %z) {
; SSE-LABEL: combine_vec_fcopysign_fcopysign_mag:
; SSE: # BB#0:
; SSE-NEXT: movaps {{.*#+}} xmm4 = [-0.000000e+00,0.000000e+00,0.000000e+00,0.000000e+00]
; SSE-NEXT: movaps %xmm1, %xmm5
; SSE-NEXT: andps %xmm4, %xmm5
; SSE-NEXT: movaps {{.*#+}} xmm3 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: andps %xmm3, %xmm2
; SSE-NEXT: orps %xmm5, %xmm2
; SSE-NEXT: movshdup {{.*#+}} xmm5 = xmm1[1,1,3,3]
; SSE-NEXT: andps %xmm4, %xmm5
; SSE-NEXT: movshdup {{.*#+}} xmm6 = xmm0[1,1,3,3]
; SSE-NEXT: andps %xmm3, %xmm6
; SSE-NEXT: orps %xmm5, %xmm6
; SSE-NEXT: insertps {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[2,3]
; SSE-NEXT: movaps %xmm1, %xmm5
; SSE-NEXT: movhlps {{.*#+}} xmm5 = xmm5[1,1]
; SSE-NEXT: andps %xmm4, %xmm5
; SSE-NEXT: movaps %xmm0, %xmm6
; SSE-NEXT: movhlps {{.*#+}} xmm6 = xmm6[1,1]
; SSE-NEXT: andps %xmm3, %xmm6
; SSE-NEXT: orps %xmm5, %xmm6
; SSE-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1],xmm6[0],xmm2[3]
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; SSE-NEXT: andps %xmm4, %xmm1
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE-NEXT: andps %xmm3, %xmm0
; SSE-NEXT: orps %xmm1, %xmm0
; SSE-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm0[0]
; SSE-NEXT: movaps %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_fcopysign_fcopysign_mag:
; AVX: # BB#0:
; AVX-NEXT: vmovaps {{.*#+}} xmm2 = [-0.000000e+00,0.000000e+00,0.000000e+00,0.000000e+00]
; AVX-NEXT: vandps %xmm2, %xmm1, %xmm3
; AVX-NEXT: vmovaps {{.*#+}} xmm4 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
; AVX-NEXT: vandps %xmm4, %xmm0, %xmm5
; AVX-NEXT: vorps %xmm3, %xmm5, %xmm3
; AVX-NEXT: vmovshdup {{.*#+}} xmm5 = xmm1[1,1,3,3]
; AVX-NEXT: vandps %xmm2, %xmm5, %xmm5
; AVX-NEXT: vmovshdup {{.*#+}} xmm6 = xmm0[1,1,3,3]
; AVX-NEXT: vandps %xmm4, %xmm6, %xmm6
; AVX-NEXT: vorps %xmm5, %xmm6, %xmm5
; AVX-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[2,3]
; AVX-NEXT: vpermilpd {{.*#+}} xmm5 = xmm1[1,0]
; AVX-NEXT: vandpd %xmm2, %xmm5, %xmm5
; AVX-NEXT: vpermilpd {{.*#+}} xmm6 = xmm0[1,0]
; AVX-NEXT: vandpd %xmm4, %xmm6, %xmm6
; AVX-NEXT: vorpd %xmm5, %xmm6, %xmm5
; AVX-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm5[0],xmm3[3]
; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; AVX-NEXT: vandps %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX-NEXT: vandps %xmm4, %xmm0, %xmm0
; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm3[0,1,2],xmm0[0]
; AVX-NEXT: retq
%1 = call <4 x float> @llvm.copysign.v4f32(<4 x float> %x, <4 x float> %z)
%2 = call <4 x float> @llvm.copysign.v4f32(<4 x float> %1, <4 x float> %y)
ret <4 x float> %2
}
; copysign(x, copysign(y,z)) -> copysign(x, z)
define <4 x float> @combine_vec_fcopysign_fcopysign_sgn(<4 x float> %x, <4 x float> %y, <4 x float> %z) {
; SSE-LABEL: combine_vec_fcopysign_fcopysign_sgn:
; SSE: # BB#0:
; SSE-NEXT: movaps {{.*#+}} xmm4 = [-0.000000e+00,0.000000e+00,0.000000e+00,0.000000e+00]
; SSE-NEXT: movaps %xmm2, %xmm5
; SSE-NEXT: andps %xmm4, %xmm5
; SSE-NEXT: movaps {{.*#+}} xmm3 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: andps %xmm3, %xmm1
; SSE-NEXT: orps %xmm5, %xmm1
; SSE-NEXT: movshdup {{.*#+}} xmm5 = xmm2[1,1,3,3]
; SSE-NEXT: andps %xmm4, %xmm5
; SSE-NEXT: movshdup {{.*#+}} xmm6 = xmm0[1,1,3,3]
; SSE-NEXT: andps %xmm3, %xmm6
; SSE-NEXT: orps %xmm5, %xmm6
; SSE-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[2,3]
; SSE-NEXT: movaps %xmm2, %xmm5
; SSE-NEXT: movhlps {{.*#+}} xmm5 = xmm5[1,1]
; SSE-NEXT: andps %xmm4, %xmm5
; SSE-NEXT: movaps %xmm0, %xmm6
; SSE-NEXT: movhlps {{.*#+}} xmm6 = xmm6[1,1]
; SSE-NEXT: andps %xmm3, %xmm6
; SSE-NEXT: orps %xmm5, %xmm6
; SSE-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1],xmm6[0],xmm1[3]
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1,2,3]
; SSE-NEXT: andps %xmm4, %xmm2
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE-NEXT: andps %xmm3, %xmm0
; SSE-NEXT: orps %xmm2, %xmm0
; SSE-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0]
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_fcopysign_fcopysign_sgn:
; AVX: # BB#0:
; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [-0.000000e+00,0.000000e+00,0.000000e+00,0.000000e+00]
; AVX-NEXT: vandps %xmm1, %xmm2, %xmm3
; AVX-NEXT: vmovaps {{.*#+}} xmm4 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
; AVX-NEXT: vandps %xmm4, %xmm0, %xmm5
; AVX-NEXT: vorps %xmm3, %xmm5, %xmm3
; AVX-NEXT: vmovshdup {{.*#+}} xmm5 = xmm2[1,1,3,3]
; AVX-NEXT: vandps %xmm1, %xmm5, %xmm5
; AVX-NEXT: vmovshdup {{.*#+}} xmm6 = xmm0[1,1,3,3]
; AVX-NEXT: vandps %xmm4, %xmm6, %xmm6
; AVX-NEXT: vorps %xmm5, %xmm6, %xmm5
; AVX-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[2,3]
; AVX-NEXT: vpermilpd {{.*#+}} xmm5 = xmm2[1,0]
; AVX-NEXT: vandpd %xmm1, %xmm5, %xmm5
; AVX-NEXT: vpermilpd {{.*#+}} xmm6 = xmm0[1,0]
; AVX-NEXT: vandpd %xmm4, %xmm6, %xmm6
; AVX-NEXT: vorpd %xmm5, %xmm6, %xmm5
; AVX-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm5[0],xmm3[3]
; AVX-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
; AVX-NEXT: vandps %xmm1, %xmm2, %xmm1
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX-NEXT: vandps %xmm4, %xmm0, %xmm0
; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm3[0,1,2],xmm0[0]
; AVX-NEXT: retq
%1 = call <4 x float> @llvm.copysign.v4f32(<4 x float> %y, <4 x float> %z)
%2 = call <4 x float> @llvm.copysign.v4f32(<4 x float> %x, <4 x float> %1)
ret <4 x float> %2
}
; copysign(x, fp_extend(y)) -> copysign(x, y)
define <4 x double> @combine_vec_fcopysign_fpext_sgn(<4 x double> %x, <4 x float> %y) {
; SSE-LABEL: combine_vec_fcopysign_fpext_sgn:
; SSE: # BB#0:
; SSE-NEXT: movaps %xmm2, %xmm3
; SSE-NEXT: cvtss2sd %xmm2, %xmm4
; SSE-NEXT: movshdup {{.*#+}} xmm5 = xmm2[1,1,3,3]
; SSE-NEXT: movaps %xmm2, %xmm6
; SSE-NEXT: movhlps {{.*#+}} xmm6 = xmm6[1,1]
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
; SSE-NEXT: movaps {{.*#+}} xmm7 = [nan,0.000000e+00]
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: andps %xmm7, %xmm2
; SSE-NEXT: movaps {{.*#+}} xmm8 = [-0.000000e+00,0.000000e+00]
; SSE-NEXT: andps %xmm8, %xmm4
; SSE-NEXT: orps %xmm4, %xmm2
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE-NEXT: andps %xmm7, %xmm0
; SSE-NEXT: xorps %xmm4, %xmm4
; SSE-NEXT: cvtss2sd %xmm5, %xmm4
; SSE-NEXT: andps %xmm8, %xmm4
; SSE-NEXT: orps %xmm0, %xmm4
; SSE-NEXT: unpcklpd {{.*#+}} xmm2 = xmm2[0],xmm4[0]
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE-NEXT: andps %xmm7, %xmm0
; SSE-NEXT: cvtss2sd %xmm3, %xmm3
; SSE-NEXT: andps %xmm8, %xmm3
; SSE-NEXT: orps %xmm0, %xmm3
; SSE-NEXT: andps %xmm7, %xmm1
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: cvtss2sd %xmm6, %xmm0
; SSE-NEXT: andps %xmm8, %xmm0
; SSE-NEXT: orps %xmm0, %xmm1
; SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm3[0]
; SSE-NEXT: movapd %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_fcopysign_fpext_sgn:
; AVX: # BB#0:
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm2[1,0]
; AVX-NEXT: vmovapd {{.*#+}} xmm4 = [nan,0.000000e+00]
; AVX-NEXT: vandpd %xmm4, %xmm3, %xmm3
; AVX-NEXT: vpermilps {{.*#+}} xmm5 = xmm1[3,1,2,3]
; AVX-NEXT: vcvtss2sd %xmm5, %xmm5, %xmm5
; AVX-NEXT: vmovapd {{.*#+}} xmm6 = [-0.000000e+00,0.000000e+00]
; AVX-NEXT: vandpd %xmm6, %xmm5, %xmm5
; AVX-NEXT: vorpd %xmm5, %xmm3, %xmm3
; AVX-NEXT: vandpd %xmm4, %xmm2, %xmm2
; AVX-NEXT: vpermilpd {{.*#+}} xmm5 = xmm1[1,0]
; AVX-NEXT: vcvtss2sd %xmm5, %xmm5, %xmm5
; AVX-NEXT: vandpd %xmm6, %xmm5, %xmm5
; AVX-NEXT: vorpd %xmm5, %xmm2, %xmm2
; AVX-NEXT: vunpcklpd {{.*#+}} xmm2 = xmm2[0],xmm3[0]
; AVX-NEXT: vandpd %xmm4, %xmm0, %xmm3
; AVX-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm5
; AVX-NEXT: vandpd %xmm6, %xmm5, %xmm5
; AVX-NEXT: vorpd %xmm5, %xmm3, %xmm3
; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX-NEXT: vandpd %xmm4, %xmm0, %xmm0
; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm1[1,1,3,3]
; AVX-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vandpd %xmm6, %xmm1, %xmm1
; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm3[0],xmm0[0]
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX-NEXT: retq
%1 = fpext <4 x float> %y to <4 x double>
%2 = call <4 x double> @llvm.copysign.v4f64(<4 x double> %x, <4 x double> %1)
ret <4 x double> %2
}
; copysign(x, fp_round(y)) -> copysign(x, y)
define <4 x float> @combine_vec_fcopysign_fptrunc_sgn(<4 x float> %x, <4 x double> %y) {
; SSE-LABEL: combine_vec_fcopysign_fptrunc_sgn:
; SSE: # BB#0:
; SSE-NEXT: movaps %xmm0, %xmm3
; SSE-NEXT: movaps {{.*#+}} xmm5 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
; SSE-NEXT: andps %xmm5, %xmm0
; SSE-NEXT: cvtsd2ss %xmm1, %xmm6
; SSE-NEXT: movaps {{.*#+}} xmm4 = [-0.000000e+00,0.000000e+00,0.000000e+00,0.000000e+00]
; SSE-NEXT: andps %xmm4, %xmm6
; SSE-NEXT: orps %xmm6, %xmm0
; SSE-NEXT: movshdup {{.*#+}} xmm6 = xmm3[1,1,3,3]
; SSE-NEXT: andps %xmm5, %xmm6
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
; SSE-NEXT: cvtsd2ss %xmm1, %xmm1
; SSE-NEXT: andps %xmm4, %xmm1
; SSE-NEXT: orps %xmm6, %xmm1
; SSE-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
; SSE-NEXT: movaps %xmm3, %xmm1
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
; SSE-NEXT: andps %xmm5, %xmm1
; SSE-NEXT: xorps %xmm6, %xmm6
; SSE-NEXT: cvtsd2ss %xmm2, %xmm6
; SSE-NEXT: andps %xmm4, %xmm6
; SSE-NEXT: orps %xmm1, %xmm6
; SSE-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm6[0],xmm0[3]
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
; SSE-NEXT: andps %xmm5, %xmm3
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
; SSE-NEXT: xorps %xmm1, %xmm1
; SSE-NEXT: cvtsd2ss %xmm2, %xmm1
; SSE-NEXT: andps %xmm4, %xmm1
; SSE-NEXT: orps %xmm3, %xmm1
; SSE-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_fcopysign_fptrunc_sgn:
; AVX: # BB#0:
; AVX-NEXT: vmovaps {{.*#+}} xmm2 = [nan,0.000000e+00,0.000000e+00,0.000000e+00]
; AVX-NEXT: vandps %xmm2, %xmm0, %xmm3
; AVX-NEXT: vcvtsd2ss %xmm1, %xmm1, %xmm4
; AVX-NEXT: vmovaps {{.*#+}} xmm5 = [-0.000000e+00,0.000000e+00,0.000000e+00,0.000000e+00]
; AVX-NEXT: vandps %xmm5, %xmm4, %xmm4
; AVX-NEXT: vorps %xmm4, %xmm3, %xmm3
; AVX-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
; AVX-NEXT: vandps %xmm2, %xmm4, %xmm4
; AVX-NEXT: vpermilpd {{.*#+}} xmm6 = xmm1[1,0]
; AVX-NEXT: vcvtsd2ss %xmm6, %xmm6, %xmm6
; AVX-NEXT: vandps %xmm5, %xmm6, %xmm6
; AVX-NEXT: vorps %xmm6, %xmm4, %xmm4
; AVX-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[2,3]
; AVX-NEXT: vpermilpd {{.*#+}} xmm4 = xmm0[1,0]
; AVX-NEXT: vandpd %xmm2, %xmm4, %xmm4
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX-NEXT: vcvtsd2ss %xmm1, %xmm1, %xmm6
; AVX-NEXT: vandps %xmm5, %xmm6, %xmm6
; AVX-NEXT: vorpd %xmm6, %xmm4, %xmm4
; AVX-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0],xmm3[3]
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX-NEXT: vandps %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX-NEXT: vcvtsd2ss %xmm1, %xmm1, %xmm1
; AVX-NEXT: vandps %xmm5, %xmm1, %xmm1
; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm3[0,1,2],xmm0[0]
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%1 = fptrunc <4 x double> %y to <4 x float>
%2 = call <4 x float> @llvm.copysign.v4f32(<4 x float> %x, <4 x float> %1)
ret <4 x float> %2
}
declare <4 x float> @llvm.fabs.v4f32(<4 x float> %p)
declare <4 x float> @llvm.copysign.v4f32(<4 x float> %Mag, <4 x float> %Sgn)
declare <4 x double> @llvm.copysign.v4f64(<4 x double> %Mag, <4 x double> %Sgn)