1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 11:13:28 +01:00

[X86] Add custom lowering for llvm.roundeven with sse4.1.

We can use the roundss/sd/ps/pd instructions like we do for
ceil/floor/trunc/rint/nearbyint.

Differential Revision: https://reviews.llvm.org/D84592
This commit is contained in:
Craig Topper 2020-07-29 10:08:12 -07:00
parent 7c95515f0a
commit 0afe3fbed5
4 changed files with 76 additions and 688 deletions

View File

@ -1005,6 +1005,8 @@ void X86DAGToDAGISel::PreprocessISelDAG() {
case ISD::STRICT_FFLOOR:
case ISD::FTRUNC:
case ISD::STRICT_FTRUNC:
case ISD::FROUNDEVEN:
case ISD::STRICT_FROUNDEVEN:
case ISD::FNEARBYINT:
case ISD::STRICT_FNEARBYINT:
case ISD::FRINT:
@ -1020,6 +1022,8 @@ void X86DAGToDAGISel::PreprocessISelDAG() {
case ISD::FFLOOR: Imm = 0x9; break;
case ISD::STRICT_FTRUNC:
case ISD::FTRUNC: Imm = 0xB; break;
case ISD::STRICT_FROUNDEVEN:
case ISD::FROUNDEVEN: Imm = 0x8; break;
case ISD::STRICT_FNEARBYINT:
case ISD::FNEARBYINT: Imm = 0xC; break;
case ISD::STRICT_FRINT:

View File

@ -1081,6 +1081,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::STRICT_FRINT, RoundedTy, Legal);
setOperationAction(ISD::FNEARBYINT, RoundedTy, Legal);
setOperationAction(ISD::STRICT_FNEARBYINT, RoundedTy, Legal);
setOperationAction(ISD::FROUNDEVEN, RoundedTy, Legal);
setOperationAction(ISD::STRICT_FROUNDEVEN, RoundedTy, Legal);
setOperationAction(ISD::FROUND, RoundedTy, Custom);
}
@ -1175,6 +1177,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::STRICT_FRINT, VT, Legal);
setOperationAction(ISD::FNEARBYINT, VT, Legal);
setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal);
setOperationAction(ISD::FROUNDEVEN, VT, Legal);
setOperationAction(ISD::STRICT_FROUNDEVEN, VT, Legal);
setOperationAction(ISD::FROUND, VT, Custom);
@ -1560,6 +1564,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::STRICT_FRINT, VT, Legal);
setOperationAction(ISD::FNEARBYINT, VT, Legal);
setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal);
setOperationAction(ISD::FROUNDEVEN, VT, Legal);
setOperationAction(ISD::STRICT_FROUNDEVEN, VT, Legal);
setOperationAction(ISD::FROUND, VT, Custom);
}

View File

@ -11,11 +11,13 @@ define float @roundeven_f32(float %x) {
;
; SSE41-LABEL: roundeven_f32:
; SSE41: ## %bb.0:
; SSE41-NEXT: jmp _roundevenf ## TAILCALL
; SSE41-NEXT: roundss $8, %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: roundeven_f32:
; AVX: ## %bb.0:
; AVX-NEXT: jmp _roundevenf ## TAILCALL
; AVX-NEXT: vroundss $8, %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%a = call float @llvm.roundeven.f32(float %x)
ret float %a
}
@ -27,11 +29,13 @@ define double @roundeven_f64(double %x) {
;
; SSE41-LABEL: roundeven_f64:
; SSE41: ## %bb.0:
; SSE41-NEXT: jmp _roundeven ## TAILCALL
; SSE41-NEXT: roundsd $8, %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: roundeven_f64:
; AVX: ## %bb.0:
; AVX-NEXT: jmp _roundeven ## TAILCALL
; AVX-NEXT: vroundsd $8, %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%a = call double @llvm.roundeven.f64(double %x)
ret double %a
}
@ -67,57 +71,12 @@ define <4 x float> @roundeven_v4f32(<4 x float> %x) {
;
; SSE41-LABEL: roundeven_v4f32:
; SSE41: ## %bb.0:
; SSE41-NEXT: subq $40, %rsp
; SSE41-NEXT: .cfi_def_cfa_offset 48
; SSE41-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; SSE41-NEXT: callq _roundevenf
; SSE41-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
; SSE41-NEXT: movshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; SSE41-NEXT: ## xmm0 = mem[1,1,3,3]
; SSE41-NEXT: callq _roundevenf
; SSE41-NEXT: movaps (%rsp), %xmm1 ## 16-byte Reload
; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[2,3]
; SSE41-NEXT: movaps %xmm1, (%rsp) ## 16-byte Spill
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
; SSE41-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE41-NEXT: callq _roundevenf
; SSE41-NEXT: movaps (%rsp), %xmm1 ## 16-byte Reload
; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0],xmm1[3]
; SSE41-NEXT: movaps %xmm1, (%rsp) ## 16-byte Spill
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
; SSE41-NEXT: callq _roundevenf
; SSE41-NEXT: movaps (%rsp), %xmm1 ## 16-byte Reload
; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0]
; SSE41-NEXT: movaps %xmm1, %xmm0
; SSE41-NEXT: addq $40, %rsp
; SSE41-NEXT: roundps $8, %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: roundeven_v4f32:
; AVX: ## %bb.0:
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX-NEXT: callq _roundevenf
; AVX-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
; AVX-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX-NEXT: ## xmm0 = mem[1,1,3,3]
; AVX-NEXT: callq _roundevenf
; AVX-NEXT: vmovaps (%rsp), %xmm1 ## 16-byte Reload
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; AVX-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
; AVX-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX-NEXT: ## xmm0 = mem[1,0]
; AVX-NEXT: callq _roundevenf
; AVX-NEXT: vmovaps (%rsp), %xmm1 ## 16-byte Reload
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
; AVX-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
; AVX-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX-NEXT: ## xmm0 = mem[3,3,3,3]
; AVX-NEXT: callq _roundevenf
; AVX-NEXT: vmovaps (%rsp), %xmm1 ## 16-byte Reload
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; AVX-NEXT: addq $40, %rsp
; AVX-NEXT: vroundps $8, %xmm0, %xmm0
; AVX-NEXT: retq
%a = call <4 x float> @llvm.roundeven.v4f32(<4 x float> %x)
ret <4 x float> %a
@ -142,33 +101,12 @@ define <2 x double> @roundeven_v2f64(<2 x double> %x) {
;
; SSE41-LABEL: roundeven_v2f64:
; SSE41: ## %bb.0:
; SSE41-NEXT: subq $40, %rsp
; SSE41-NEXT: .cfi_def_cfa_offset 48
; SSE41-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
; SSE41-NEXT: callq _roundeven
; SSE41-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; SSE41-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
; SSE41-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE41-NEXT: callq _roundeven
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
; SSE41-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE41-NEXT: movaps %xmm1, %xmm0
; SSE41-NEXT: addq $40, %rsp
; SSE41-NEXT: roundpd $8, %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: roundeven_v2f64:
; AVX: ## %bb.0:
; AVX-NEXT: subq $40, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 48
; AVX-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
; AVX-NEXT: callq _roundeven
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX-NEXT: vpermilpd $1, (%rsp), %xmm0 ## 16-byte Folded Reload
; AVX-NEXT: ## xmm0 = mem[1,0]
; AVX-NEXT: callq _roundeven
; AVX-NEXT: vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX-NEXT: addq $40, %rsp
; AVX-NEXT: vroundpd $8, %xmm0, %xmm0
; AVX-NEXT: retq
%a = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %x)
ret <2 x double> %a
@ -227,106 +165,13 @@ define <8 x float> @roundeven_v8f32(<8 x float> %x) {
;
; SSE41-LABEL: roundeven_v8f32:
; SSE41: ## %bb.0:
; SSE41-NEXT: subq $56, %rsp
; SSE41-NEXT: .cfi_def_cfa_offset 64
; SSE41-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; SSE41-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
; SSE41-NEXT: callq _roundevenf
; SSE41-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; SSE41-NEXT: movshdup (%rsp), %xmm0 ## 16-byte Folded Reload
; SSE41-NEXT: ## xmm0 = mem[1,1,3,3]
; SSE41-NEXT: callq _roundevenf
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[2,3]
; SSE41-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; SSE41-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
; SSE41-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE41-NEXT: callq _roundevenf
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0],xmm1[3]
; SSE41-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; SSE41-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
; SSE41-NEXT: callq _roundevenf
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0]
; SSE41-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
; SSE41-NEXT: callq _roundevenf
; SSE41-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
; SSE41-NEXT: movshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; SSE41-NEXT: ## xmm0 = mem[1,1,3,3]
; SSE41-NEXT: callq _roundevenf
; SSE41-NEXT: movaps (%rsp), %xmm1 ## 16-byte Reload
; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[2,3]
; SSE41-NEXT: movaps %xmm1, (%rsp) ## 16-byte Spill
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
; SSE41-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE41-NEXT: callq _roundevenf
; SSE41-NEXT: movaps (%rsp), %xmm1 ## 16-byte Reload
; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0],xmm1[3]
; SSE41-NEXT: movaps %xmm1, (%rsp) ## 16-byte Spill
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
; SSE41-NEXT: callq _roundevenf
; SSE41-NEXT: movaps (%rsp), %xmm1 ## 16-byte Reload
; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0]
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
; SSE41-NEXT: addq $56, %rsp
; SSE41-NEXT: roundps $8, %xmm0, %xmm0
; SSE41-NEXT: roundps $8, %xmm1, %xmm1
; SSE41-NEXT: retq
;
; AVX-LABEL: roundeven_v8f32:
; AVX: ## %bb.0:
; AVX-NEXT: subq $88, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 96
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 32-byte Spill
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX-NEXT: vzeroupper
; AVX-NEXT: callq _roundevenf
; AVX-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
; AVX-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX-NEXT: ## xmm0 = mem[1,1,3,3]
; AVX-NEXT: callq _roundevenf
; AVX-NEXT: vmovaps (%rsp), %xmm1 ## 16-byte Reload
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; AVX-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
; AVX-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX-NEXT: ## xmm0 = mem[1,0]
; AVX-NEXT: callq _roundevenf
; AVX-NEXT: vmovaps (%rsp), %xmm1 ## 16-byte Reload
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
; AVX-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
; AVX-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX-NEXT: ## xmm0 = mem[3,3,3,3]
; AVX-NEXT: callq _roundevenf
; AVX-NEXT: vmovaps (%rsp), %xmm1 ## 16-byte Reload
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 ## 32-byte Reload
; AVX-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: callq _roundevenf
; AVX-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
; AVX-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX-NEXT: ## xmm0 = mem[1,1,3,3]
; AVX-NEXT: callq _roundevenf
; AVX-NEXT: vmovaps (%rsp), %xmm1 ## 16-byte Reload
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; AVX-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
; AVX-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX-NEXT: ## xmm0 = mem[1,0]
; AVX-NEXT: callq _roundevenf
; AVX-NEXT: vmovaps (%rsp), %xmm1 ## 16-byte Reload
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
; AVX-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
; AVX-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX-NEXT: ## xmm0 = mem[3,3,3,3]
; AVX-NEXT: callq _roundevenf
; AVX-NEXT: vmovaps (%rsp), %xmm1 ## 16-byte Reload
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; AVX-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 ## 16-byte Folded Reload
; AVX-NEXT: addq $88, %rsp
; AVX-NEXT: vroundps $8, %ymm0, %ymm0
; AVX-NEXT: retq
%a = call <8 x float> @llvm.roundeven.v8f32(<8 x float> %x)
ret <8 x float> %a
@ -361,58 +206,13 @@ define <4 x double> @roundeven_v4f64(<4 x double> %x) {
;
; SSE41-LABEL: roundeven_v4f64:
; SSE41: ## %bb.0:
; SSE41-NEXT: subq $56, %rsp
; SSE41-NEXT: .cfi_def_cfa_offset 64
; SSE41-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; SSE41-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
; SSE41-NEXT: callq _roundeven
; SSE41-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; SSE41-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
; SSE41-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE41-NEXT: callq _roundeven
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
; SSE41-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE41-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
; SSE41-NEXT: callq _roundeven
; SSE41-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
; SSE41-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE41-NEXT: callq _roundeven
; SSE41-NEXT: movaps (%rsp), %xmm1 ## 16-byte Reload
; SSE41-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
; SSE41-NEXT: addq $56, %rsp
; SSE41-NEXT: roundpd $8, %xmm0, %xmm0
; SSE41-NEXT: roundpd $8, %xmm1, %xmm1
; SSE41-NEXT: retq
;
; AVX-LABEL: roundeven_v4f64:
; AVX: ## %bb.0:
; AVX-NEXT: subq $88, %rsp
; AVX-NEXT: .cfi_def_cfa_offset 96
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 32-byte Spill
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
; AVX-NEXT: vzeroupper
; AVX-NEXT: callq _roundeven
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX-NEXT: vpermilpd $1, (%rsp), %xmm0 ## 16-byte Folded Reload
; AVX-NEXT: ## xmm0 = mem[1,0]
; AVX-NEXT: callq _roundeven
; AVX-NEXT: vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 ## 32-byte Reload
; AVX-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: callq _roundeven
; AVX-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
; AVX-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX-NEXT: ## xmm0 = mem[1,0]
; AVX-NEXT: callq _roundeven
; AVX-NEXT: vmovapd (%rsp), %xmm1 ## 16-byte Reload
; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 ## 16-byte Folded Reload
; AVX-NEXT: addq $88, %rsp
; AVX-NEXT: vroundpd $8, %ymm0, %ymm0
; AVX-NEXT: retq
%a = call <4 x double> @llvm.roundeven.v4f64(<4 x double> %x)
ret <4 x double> %a
@ -517,306 +317,21 @@ define <16 x float> @roundeven_v16f32(<16 x float> %x) {
;
; SSE41-LABEL: roundeven_v16f32:
; SSE41: ## %bb.0:
; SSE41-NEXT: subq $88, %rsp
; SSE41-NEXT: .cfi_def_cfa_offset 96
; SSE41-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; SSE41-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; SSE41-NEXT: movaps %xmm1, (%rsp) ## 16-byte Spill
; SSE41-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; SSE41-NEXT: callq _roundevenf
; SSE41-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; SSE41-NEXT: movshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; SSE41-NEXT: ## xmm0 = mem[1,1,3,3]
; SSE41-NEXT: callq _roundevenf
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[2,3]
; SSE41-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
; SSE41-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE41-NEXT: callq _roundevenf
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0],xmm1[3]
; SSE41-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
; SSE41-NEXT: callq _roundevenf
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0]
; SSE41-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; SSE41-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
; SSE41-NEXT: callq _roundevenf
; SSE41-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; SSE41-NEXT: movshdup (%rsp), %xmm0 ## 16-byte Folded Reload
; SSE41-NEXT: ## xmm0 = mem[1,1,3,3]
; SSE41-NEXT: callq _roundevenf
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[2,3]
; SSE41-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; SSE41-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
; SSE41-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE41-NEXT: callq _roundevenf
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0],xmm1[3]
; SSE41-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; SSE41-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
; SSE41-NEXT: callq _roundevenf
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0]
; SSE41-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
; SSE41-NEXT: callq _roundevenf
; SSE41-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
; SSE41-NEXT: movshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; SSE41-NEXT: ## xmm0 = mem[1,1,3,3]
; SSE41-NEXT: callq _roundevenf
; SSE41-NEXT: movaps (%rsp), %xmm1 ## 16-byte Reload
; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[2,3]
; SSE41-NEXT: movaps %xmm1, (%rsp) ## 16-byte Spill
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
; SSE41-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE41-NEXT: callq _roundevenf
; SSE41-NEXT: movaps (%rsp), %xmm1 ## 16-byte Reload
; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0],xmm1[3]
; SSE41-NEXT: movaps %xmm1, (%rsp) ## 16-byte Spill
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
; SSE41-NEXT: callq _roundevenf
; SSE41-NEXT: movaps (%rsp), %xmm1 ## 16-byte Reload
; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0]
; SSE41-NEXT: movaps %xmm1, (%rsp) ## 16-byte Spill
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
; SSE41-NEXT: callq _roundevenf
; SSE41-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; SSE41-NEXT: movshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; SSE41-NEXT: ## xmm0 = mem[1,1,3,3]
; SSE41-NEXT: callq _roundevenf
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[2,3]
; SSE41-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
; SSE41-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE41-NEXT: callq _roundevenf
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0],xmm1[3]
; SSE41-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
; SSE41-NEXT: callq _roundevenf
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 ## 16-byte Reload
; SSE41-NEXT: insertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm0[0]
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
; SSE41-NEXT: movaps (%rsp), %xmm2 ## 16-byte Reload
; SSE41-NEXT: addq $88, %rsp
; SSE41-NEXT: roundps $8, %xmm0, %xmm0
; SSE41-NEXT: roundps $8, %xmm1, %xmm1
; SSE41-NEXT: roundps $8, %xmm2, %xmm2
; SSE41-NEXT: roundps $8, %xmm3, %xmm3
; SSE41-NEXT: retq
;
; AVX1-LABEL: roundeven_v16f32:
; AVX1: ## %bb.0:
; AVX1-NEXT: subq $152, %rsp
; AVX1-NEXT: .cfi_def_cfa_offset 160
; AVX1-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 32-byte Spill
; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 32-byte Spill
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq _roundevenf
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX1-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX1-NEXT: ## xmm0 = mem[1,1,3,3]
; AVX1-NEXT: callq _roundevenf
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX1-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX1-NEXT: ## xmm0 = mem[1,0]
; AVX1-NEXT: callq _roundevenf
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX1-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX1-NEXT: ## xmm0 = mem[3,3,3,3]
; AVX1-NEXT: callq _roundevenf
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 ## 32-byte Reload
; AVX1-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq _roundevenf
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX1-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX1-NEXT: ## xmm0 = mem[1,1,3,3]
; AVX1-NEXT: callq _roundevenf
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX1-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX1-NEXT: ## xmm0 = mem[1,0]
; AVX1-NEXT: callq _roundevenf
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX1-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX1-NEXT: ## xmm0 = mem[3,3,3,3]
; AVX1-NEXT: callq _roundevenf
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; AVX1-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 ## 16-byte Folded Reload
; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 32-byte Spill
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 ## 32-byte Reload
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq _roundevenf
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX1-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX1-NEXT: ## xmm0 = mem[1,1,3,3]
; AVX1-NEXT: callq _roundevenf
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX1-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX1-NEXT: ## xmm0 = mem[1,0]
; AVX1-NEXT: callq _roundevenf
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX1-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX1-NEXT: ## xmm0 = mem[3,3,3,3]
; AVX1-NEXT: callq _roundevenf
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 ## 32-byte Reload
; AVX1-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq _roundevenf
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX1-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX1-NEXT: ## xmm0 = mem[1,1,3,3]
; AVX1-NEXT: callq _roundevenf
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX1-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX1-NEXT: ## xmm0 = mem[1,0]
; AVX1-NEXT: callq _roundevenf
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX1-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX1-NEXT: ## xmm0 = mem[3,3,3,3]
; AVX1-NEXT: callq _roundevenf
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; AVX1-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 ## 16-byte Folded Reload
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 ## 32-byte Reload
; AVX1-NEXT: addq $152, %rsp
; AVX1-NEXT: vroundps $8, %ymm0, %ymm0
; AVX1-NEXT: vroundps $8, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX512-LABEL: roundeven_v16f32:
; AVX512: ## %bb.0:
; AVX512-NEXT: subq $184, %rsp
; AVX512-NEXT: .cfi_def_cfa_offset 192
; AVX512-NEXT: vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 64-byte Spill
; AVX512-NEXT: vextractf32x4 $3, %zmm0, %xmm0
; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: callq _roundevenf
; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX512-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX512-NEXT: ## xmm0 = mem[1,1,3,3]
; AVX512-NEXT: callq _roundevenf
; AVX512-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX512-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX512-NEXT: ## xmm0 = mem[1,0]
; AVX512-NEXT: callq _roundevenf
; AVX512-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX512-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX512-NEXT: ## xmm0 = mem[3,3,3,3]
; AVX512-NEXT: callq _roundevenf
; AVX512-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX512-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 ## 64-byte Reload
; AVX512-NEXT: vextractf32x4 $2, %zmm0, %xmm0
; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: callq _roundevenf
; AVX512-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
; AVX512-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX512-NEXT: ## xmm0 = mem[1,1,3,3]
; AVX512-NEXT: callq _roundevenf
; AVX512-NEXT: vmovaps (%rsp), %xmm1 ## 16-byte Reload
; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; AVX512-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
; AVX512-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX512-NEXT: ## xmm0 = mem[1,0]
; AVX512-NEXT: callq _roundevenf
; AVX512-NEXT: vmovaps (%rsp), %xmm1 ## 16-byte Reload
; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
; AVX512-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
; AVX512-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX512-NEXT: ## xmm0 = mem[3,3,3,3]
; AVX512-NEXT: callq _roundevenf
; AVX512-NEXT: vmovaps (%rsp), %xmm1 ## 16-byte Reload
; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; AVX512-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 ## 16-byte Folded Reload
; AVX512-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 32-byte Spill
; AVX512-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 ## 64-byte Reload
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: callq _roundevenf
; AVX512-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
; AVX512-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX512-NEXT: ## xmm0 = mem[1,1,3,3]
; AVX512-NEXT: callq _roundevenf
; AVX512-NEXT: vmovaps (%rsp), %xmm1 ## 16-byte Reload
; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; AVX512-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
; AVX512-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX512-NEXT: ## xmm0 = mem[1,0]
; AVX512-NEXT: callq _roundevenf
; AVX512-NEXT: vmovaps (%rsp), %xmm1 ## 16-byte Reload
; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
; AVX512-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
; AVX512-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX512-NEXT: ## xmm0 = mem[3,3,3,3]
; AVX512-NEXT: callq _roundevenf
; AVX512-NEXT: vmovaps (%rsp), %xmm1 ## 16-byte Reload
; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX512-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 ## 64-byte Reload
; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: callq _roundevenf
; AVX512-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
; AVX512-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX512-NEXT: ## xmm0 = mem[1,1,3,3]
; AVX512-NEXT: callq _roundevenf
; AVX512-NEXT: vmovaps (%rsp), %xmm1 ## 16-byte Reload
; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; AVX512-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
; AVX512-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX512-NEXT: ## xmm0 = mem[1,0]
; AVX512-NEXT: callq _roundevenf
; AVX512-NEXT: vmovaps (%rsp), %xmm1 ## 16-byte Reload
; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
; AVX512-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
; AVX512-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX512-NEXT: ## xmm0 = mem[3,3,3,3]
; AVX512-NEXT: callq _roundevenf
; AVX512-NEXT: vmovaps (%rsp), %xmm1 ## 16-byte Reload
; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; AVX512-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 ## 16-byte Folded Reload
; AVX512-NEXT: vinsertf64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 ## 32-byte Folded Reload
; AVX512-NEXT: addq $184, %rsp
; AVX512-NEXT: vrndscaleps $8, %zmm0, %zmm0
; AVX512-NEXT: retq
%a = call <16 x float> @llvm.roundeven.v16f32(<16 x float> %x)
ret <16 x float> %a
@ -873,162 +388,21 @@ define <8 x double> @roundeven_v8f64(<8 x double> %x) {
;
; SSE41-LABEL: roundeven_v8f64:
; SSE41: ## %bb.0:
; SSE41-NEXT: subq $88, %rsp
; SSE41-NEXT: .cfi_def_cfa_offset 96
; SSE41-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; SSE41-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; SSE41-NEXT: movaps %xmm1, (%rsp) ## 16-byte Spill
; SSE41-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; SSE41-NEXT: callq _roundeven
; SSE41-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
; SSE41-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE41-NEXT: callq _roundeven
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
; SSE41-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE41-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; SSE41-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
; SSE41-NEXT: callq _roundeven
; SSE41-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; SSE41-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload
; SSE41-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE41-NEXT: callq _roundeven
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
; SSE41-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE41-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
; SSE41-NEXT: callq _roundeven
; SSE41-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
; SSE41-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE41-NEXT: callq _roundeven
; SSE41-NEXT: movaps (%rsp), %xmm1 ## 16-byte Reload
; SSE41-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; SSE41-NEXT: movaps %xmm1, (%rsp) ## 16-byte Spill
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
; SSE41-NEXT: callq _roundeven
; SSE41-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
; SSE41-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE41-NEXT: callq _roundeven
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 ## 16-byte Reload
; SSE41-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm0[0]
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
; SSE41-NEXT: movaps (%rsp), %xmm2 ## 16-byte Reload
; SSE41-NEXT: addq $88, %rsp
; SSE41-NEXT: roundpd $8, %xmm0, %xmm0
; SSE41-NEXT: roundpd $8, %xmm1, %xmm1
; SSE41-NEXT: roundpd $8, %xmm2, %xmm2
; SSE41-NEXT: roundpd $8, %xmm3, %xmm3
; SSE41-NEXT: retq
;
; AVX1-LABEL: roundeven_v8f64:
; AVX1: ## %bb.0:
; AVX1-NEXT: subq $120, %rsp
; AVX1-NEXT: .cfi_def_cfa_offset 128
; AVX1-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) ## 32-byte Spill
; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 32-byte Spill
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq _roundeven
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX1-NEXT: vpermilpd $1, (%rsp), %xmm0 ## 16-byte Folded Reload
; AVX1-NEXT: ## xmm0 = mem[1,0]
; AVX1-NEXT: callq _roundeven
; AVX1-NEXT: vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
; AVX1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX1-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 ## 32-byte Reload
; AVX1-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq _roundeven
; AVX1-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
; AVX1-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX1-NEXT: ## xmm0 = mem[1,0]
; AVX1-NEXT: callq _roundeven
; AVX1-NEXT: vmovapd (%rsp), %xmm1 ## 16-byte Reload
; AVX1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX1-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 ## 16-byte Folded Reload
; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 32-byte Spill
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 ## 32-byte Reload
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq _roundeven
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX1-NEXT: vpermilpd $1, (%rsp), %xmm0 ## 16-byte Folded Reload
; AVX1-NEXT: ## xmm0 = mem[1,0]
; AVX1-NEXT: callq _roundeven
; AVX1-NEXT: vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
; AVX1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX1-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 ## 32-byte Reload
; AVX1-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq _roundeven
; AVX1-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
; AVX1-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX1-NEXT: ## xmm0 = mem[1,0]
; AVX1-NEXT: callq _roundeven
; AVX1-NEXT: vmovapd (%rsp), %xmm1 ## 16-byte Reload
; AVX1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX1-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 ## 16-byte Folded Reload
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 ## 32-byte Reload
; AVX1-NEXT: addq $120, %rsp
; AVX1-NEXT: vroundpd $8, %ymm0, %ymm0
; AVX1-NEXT: vroundpd $8, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX512-LABEL: roundeven_v8f64:
; AVX512: ## %bb.0:
; AVX512-NEXT: subq $184, %rsp
; AVX512-NEXT: .cfi_def_cfa_offset 192
; AVX512-NEXT: vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 64-byte Spill
; AVX512-NEXT: vextractf32x4 $3, %zmm0, %xmm0
; AVX512-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: callq _roundeven
; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX512-NEXT: vpermilpd $1, (%rsp), %xmm0 ## 16-byte Folded Reload
; AVX512-NEXT: ## xmm0 = mem[1,0]
; AVX512-NEXT: callq _roundeven
; AVX512-NEXT: vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
; AVX512-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX512-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX512-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 ## 64-byte Reload
; AVX512-NEXT: vextractf32x4 $2, %zmm0, %xmm0
; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: callq _roundeven
; AVX512-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
; AVX512-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX512-NEXT: ## xmm0 = mem[1,0]
; AVX512-NEXT: callq _roundeven
; AVX512-NEXT: vmovapd (%rsp), %xmm1 ## 16-byte Reload
; AVX512-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX512-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 ## 16-byte Folded Reload
; AVX512-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 32-byte Spill
; AVX512-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 ## 64-byte Reload
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: callq _roundeven
; AVX512-NEXT: vmovaps %xmm0, (%rsp) ## 16-byte Spill
; AVX512-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX512-NEXT: ## xmm0 = mem[1,0]
; AVX512-NEXT: callq _roundeven
; AVX512-NEXT: vmovapd (%rsp), %xmm1 ## 16-byte Reload
; AVX512-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX512-NEXT: vmovapd %xmm0, (%rsp) ## 16-byte Spill
; AVX512-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 ## 64-byte Reload
; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: callq _roundeven
; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
; AVX512-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
; AVX512-NEXT: ## xmm0 = mem[1,0]
; AVX512-NEXT: callq _roundeven
; AVX512-NEXT: vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
; AVX512-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX512-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 ## 16-byte Folded Reload
; AVX512-NEXT: vinsertf64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 ## 32-byte Folded Reload
; AVX512-NEXT: addq $184, %rsp
; AVX512-NEXT: vrndscalepd $8, %zmm0, %zmm0
; AVX512-NEXT: retq
%a = call <8 x double> @llvm.roundeven.v8f64(<8 x double> %x)
ret <8 x double> %a

View File

@ -591,19 +591,17 @@ define float @froundeven32(float %f) #0 {
; SSE41-X86-NEXT: pushl %eax
; SSE41-X86-NEXT: .cfi_def_cfa_offset 8
; SSE41-X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE41-X86-NEXT: roundss $8, %xmm0, %xmm0
; SSE41-X86-NEXT: movss %xmm0, (%esp)
; SSE41-X86-NEXT: calll roundevenf
; SSE41-X86-NEXT: flds (%esp)
; SSE41-X86-NEXT: wait
; SSE41-X86-NEXT: popl %eax
; SSE41-X86-NEXT: .cfi_def_cfa_offset 4
; SSE41-X86-NEXT: retl
;
; SSE41-X64-LABEL: froundeven32:
; SSE41-X64: # %bb.0:
; SSE41-X64-NEXT: pushq %rax
; SSE41-X64-NEXT: .cfi_def_cfa_offset 16
; SSE41-X64-NEXT: callq roundevenf
; SSE41-X64-NEXT: popq %rax
; SSE41-X64-NEXT: .cfi_def_cfa_offset 8
; SSE41-X64-NEXT: roundss $8, %xmm0, %xmm0
; SSE41-X64-NEXT: retq
;
; AVX-X86-LABEL: froundeven32:
@ -611,19 +609,17 @@ define float @froundeven32(float %f) #0 {
; AVX-X86-NEXT: pushl %eax
; AVX-X86-NEXT: .cfi_def_cfa_offset 8
; AVX-X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-X86-NEXT: vroundss $8, %xmm0, %xmm0, %xmm0
; AVX-X86-NEXT: vmovss %xmm0, (%esp)
; AVX-X86-NEXT: calll roundevenf
; AVX-X86-NEXT: flds (%esp)
; AVX-X86-NEXT: wait
; AVX-X86-NEXT: popl %eax
; AVX-X86-NEXT: .cfi_def_cfa_offset 4
; AVX-X86-NEXT: retl
;
; AVX-X64-LABEL: froundeven32:
; AVX-X64: # %bb.0:
; AVX-X64-NEXT: pushq %rax
; AVX-X64-NEXT: .cfi_def_cfa_offset 16
; AVX-X64-NEXT: callq roundevenf
; AVX-X64-NEXT: popq %rax
; AVX-X64-NEXT: .cfi_def_cfa_offset 8
; AVX-X64-NEXT: vroundss $8, %xmm0, %xmm0, %xmm0
; AVX-X64-NEXT: retq
%res = call float @llvm.experimental.constrained.roundeven.f32(
float %f, metadata !"fpexcept.strict") #0
@ -633,42 +629,50 @@ define float @froundeven32(float %f) #0 {
define double @froundevenf64(double %f) #0 {
; SSE41-X86-LABEL: froundevenf64:
; SSE41-X86: # %bb.0:
; SSE41-X86-NEXT: pushl %ebp
; SSE41-X86-NEXT: .cfi_def_cfa_offset 8
; SSE41-X86-NEXT: .cfi_offset %ebp, -8
; SSE41-X86-NEXT: movl %esp, %ebp
; SSE41-X86-NEXT: .cfi_def_cfa_register %ebp
; SSE41-X86-NEXT: andl $-8, %esp
; SSE41-X86-NEXT: subl $8, %esp
; SSE41-X86-NEXT: .cfi_def_cfa_offset 12
; SSE41-X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE41-X86-NEXT: roundsd $8, %xmm0, %xmm0
; SSE41-X86-NEXT: movsd %xmm0, (%esp)
; SSE41-X86-NEXT: calll roundeven
; SSE41-X86-NEXT: addl $8, %esp
; SSE41-X86-NEXT: .cfi_def_cfa_offset 4
; SSE41-X86-NEXT: fldl (%esp)
; SSE41-X86-NEXT: wait
; SSE41-X86-NEXT: movl %ebp, %esp
; SSE41-X86-NEXT: popl %ebp
; SSE41-X86-NEXT: .cfi_def_cfa %esp, 4
; SSE41-X86-NEXT: retl
;
; SSE41-X64-LABEL: froundevenf64:
; SSE41-X64: # %bb.0:
; SSE41-X64-NEXT: pushq %rax
; SSE41-X64-NEXT: .cfi_def_cfa_offset 16
; SSE41-X64-NEXT: callq roundeven
; SSE41-X64-NEXT: popq %rax
; SSE41-X64-NEXT: .cfi_def_cfa_offset 8
; SSE41-X64-NEXT: roundsd $8, %xmm0, %xmm0
; SSE41-X64-NEXT: retq
;
; AVX-X86-LABEL: froundevenf64:
; AVX-X86: # %bb.0:
; AVX-X86-NEXT: pushl %ebp
; AVX-X86-NEXT: .cfi_def_cfa_offset 8
; AVX-X86-NEXT: .cfi_offset %ebp, -8
; AVX-X86-NEXT: movl %esp, %ebp
; AVX-X86-NEXT: .cfi_def_cfa_register %ebp
; AVX-X86-NEXT: andl $-8, %esp
; AVX-X86-NEXT: subl $8, %esp
; AVX-X86-NEXT: .cfi_def_cfa_offset 12
; AVX-X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-X86-NEXT: vroundsd $8, %xmm0, %xmm0, %xmm0
; AVX-X86-NEXT: vmovsd %xmm0, (%esp)
; AVX-X86-NEXT: calll roundeven
; AVX-X86-NEXT: addl $8, %esp
; AVX-X86-NEXT: .cfi_def_cfa_offset 4
; AVX-X86-NEXT: fldl (%esp)
; AVX-X86-NEXT: wait
; AVX-X86-NEXT: movl %ebp, %esp
; AVX-X86-NEXT: popl %ebp
; AVX-X86-NEXT: .cfi_def_cfa %esp, 4
; AVX-X86-NEXT: retl
;
; AVX-X64-LABEL: froundevenf64:
; AVX-X64: # %bb.0:
; AVX-X64-NEXT: pushq %rax
; AVX-X64-NEXT: .cfi_def_cfa_offset 16
; AVX-X64-NEXT: callq roundeven
; AVX-X64-NEXT: popq %rax
; AVX-X64-NEXT: .cfi_def_cfa_offset 8
; AVX-X64-NEXT: vroundsd $8, %xmm0, %xmm0, %xmm0
; AVX-X64-NEXT: retq
%res = call double @llvm.experimental.constrained.roundeven.f64(
double %f, metadata !"fpexcept.strict") #0