1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-20 03:23:01 +02:00

[SelectionDAG] unroll unsupported vector FP ops earlier to avoid libcalls on undef elements (PR38527)

This solves the motivating case from:
https://bugs.llvm.org/show_bug.cgi?id=38527

If we are legalizing an FP vector op that maps to 1 of the LLVM intrinsics that mimic libm calls, 
but we're going to end up with scalar libcalls for that vector type anyway, then we should unroll 
the vector op into scalars before widening. This avoids libcalls because we've lost the knowledge 
that some of the scalar elements are undef.

Differential Revision: https://reviews.llvm.org/D50791

llvm-svn: 340469
This commit is contained in:
Sanjay Patel 2018-08-22 22:52:05 +00:00
parent a86aeb26c4
commit 917f7adc89
3 changed files with 109 additions and 294 deletions

View File

@ -2425,11 +2425,6 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
Res = WidenVecRes_Convert(N);
break;
case ISD::BITREVERSE:
case ISD::BSWAP:
case ISD::CTLZ:
case ISD::CTPOP:
case ISD::CTTZ:
case ISD::FABS:
case ISD::FCEIL:
case ISD::FCOS:
@ -2440,12 +2435,36 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
case ISD::FLOG10:
case ISD::FLOG2:
case ISD::FNEARBYINT:
case ISD::FNEG:
case ISD::FRINT:
case ISD::FROUND:
case ISD::FSIN:
case ISD::FSQRT:
case ISD::FTRUNC:
case ISD::FTRUNC: {
// We're going to widen this vector op to a legal type by padding with undef
// elements. If the wide vector op is eventually going to be expanded to
// scalar libcalls, then unroll into scalar ops now to avoid unnecessary
// libcalls on the undef elements. We are assuming that if the scalar op
// requires expanding, then the vector op needs expanding too.
EVT VT = N->getValueType(0);
if (TLI.isOperationExpand(N->getOpcode(), VT.getScalarType())) {
EVT WideVecVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
assert(!TLI.isOperationLegalOrCustom(N->getOpcode(), WideVecVT) &&
"Target supports vector op, but scalar requires expansion?");
Res = DAG.UnrollVectorOp(N, WideVecVT.getVectorNumElements());
break;
}
}
// If the target has custom/legal support for the scalar FP intrinsic ops
// (they are probably not destined to become libcalls), then widen those like
// any other unary ops.
LLVM_FALLTHROUGH;
case ISD::BITREVERSE:
case ISD::BSWAP:
case ISD::CTLZ:
case ISD::CTPOP:
case ISD::CTTZ:
case ISD::FNEG:
case ISD::FCANONICALIZE:
Res = WidenVecRes_Unary(N);
break;

View File

@ -35,22 +35,12 @@ declare <3 x float> @llvm.trunc.v3f32(<3 x float>)
define <1 x float> @sin_v1f32(<1 x float> %x) nounwind {
; CHECK-LABEL: sin_v1f32:
; CHECK: // %bb.0:
; CHECK-NEXT: sub sp, sp, #48 // =48
; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
; CHECK-NEXT: mov s0, v0.s[1]
; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill
; CHECK-NEXT: bl sinf
; CHECK-NEXT: str d0, [sp, #16] // 16-byte Folded Spill
; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-NEXT: bl sinf
; CHECK-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload
; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
; CHECK-NEXT: mov v0.s[1], v1.s[0]
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-NEXT: add sp, sp, #48 // =48
; CHECK-NEXT: // kill: def $s0 killed $s0 def $d0
; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
%r = call <1 x float> @llvm.sin.v1f32(<1 x float> %x)
ret <1 x float> %r
@ -100,16 +90,9 @@ define <3 x float> @sin_v3f32(<3 x float> %x) nounwind {
; CHECK-NEXT: mov s0, v0.s[2]
; CHECK-NEXT: bl sinf
; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
; CHECK-NEXT: mov v1.s[2], v0.s[0]
; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload
; CHECK-NEXT: str q1, [sp] // 16-byte Folded Spill
; CHECK-NEXT: mov s0, v0.s[3]
; CHECK-NEXT: bl sinf
; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
; CHECK-NEXT: mov v1.s[3], v0.s[0]
; CHECK-NEXT: mov v1.s[2], v0.s[0]
; CHECK-NEXT: mov v0.16b, v1.16b
; CHECK-NEXT: add sp, sp, #48 // =48
; CHECK-NEXT: ret
@ -195,28 +178,20 @@ define <5 x float> @sin_v5f32(<5 x float> %x) nounwind {
define <6 x float> @sin_v6f32(<6 x float> %x) nounwind {
; CHECK-LABEL: sin_v6f32:
; CHECK: // %bb.0:
; CHECK-NEXT: sub sp, sp, #80 // =80
; CHECK-NEXT: str d12, [sp, #32] // 8-byte Folded Spill
; CHECK-NEXT: mov v12.16b, v0.16b
; CHECK-NEXT: mov v0.16b, v5.16b
; CHECK-NEXT: stp d11, d10, [sp, #40] // 8-byte Folded Spill
; CHECK-NEXT: stp d9, d8, [sp, #56] // 8-byte Folded Spill
; CHECK-NEXT: str x30, [sp, #72] // 8-byte Folded Spill
; CHECK-NEXT: mov v8.16b, v4.16b
; CHECK-NEXT: mov v9.16b, v3.16b
; CHECK-NEXT: mov v10.16b, v2.16b
; CHECK-NEXT: mov v11.16b, v1.16b
; CHECK-NEXT: stp d13, d12, [sp, #-64]! // 8-byte Folded Spill
; CHECK-NEXT: stp d11, d10, [sp, #16] // 8-byte Folded Spill
; CHECK-NEXT: stp d9, d8, [sp, #32] // 8-byte Folded Spill
; CHECK-NEXT: str x30, [sp, #48] // 8-byte Folded Spill
; CHECK-NEXT: mov v8.16b, v5.16b
; CHECK-NEXT: mov v9.16b, v4.16b
; CHECK-NEXT: mov v10.16b, v3.16b
; CHECK-NEXT: mov v11.16b, v2.16b
; CHECK-NEXT: mov v12.16b, v1.16b
; CHECK-NEXT: bl sinf
; CHECK-NEXT: str d0, [sp, #16] // 16-byte Folded Spill
; CHECK-NEXT: mov v0.16b, v8.16b
; CHECK-NEXT: bl sinf
; CHECK-NEXT: mov v1.16b, v0.16b
; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload
; CHECK-NEXT: mov v1.s[1], v0.s[0]
; CHECK-NEXT: mov v13.16b, v0.16b
; CHECK-NEXT: mov v0.16b, v12.16b
; CHECK-NEXT: str q1, [sp] // 16-byte Folded Spill
; CHECK-NEXT: bl sinf
; CHECK-NEXT: mov v8.16b, v0.16b
; CHECK-NEXT: mov v12.16b, v0.16b
; CHECK-NEXT: mov v0.16b, v11.16b
; CHECK-NEXT: bl sinf
; CHECK-NEXT: mov v11.16b, v0.16b
@ -225,18 +200,19 @@ define <6 x float> @sin_v6f32(<6 x float> %x) nounwind {
; CHECK-NEXT: mov v10.16b, v0.16b
; CHECK-NEXT: mov v0.16b, v9.16b
; CHECK-NEXT: bl sinf
; CHECK-NEXT: ldp q4, q5, [sp] // 16-byte Folded Reload
; CHECK-NEXT: mov v3.16b, v0.16b
; CHECK-NEXT: mov v9.16b, v0.16b
; CHECK-NEXT: mov v0.16b, v8.16b
; CHECK-NEXT: mov v1.16b, v11.16b
; CHECK-NEXT: mov v2.16b, v10.16b
; CHECK-NEXT: ldr x30, [sp, #72] // 8-byte Folded Reload
; CHECK-NEXT: ldp d9, d8, [sp, #56] // 8-byte Folded Reload
; CHECK-NEXT: ldp d11, d10, [sp, #40] // 8-byte Folded Reload
; CHECK-NEXT: ldr d12, [sp, #32] // 8-byte Folded Reload
; CHECK-NEXT: // kill: def $s4 killed $s4 killed $q4
; CHECK-NEXT: // kill: def $s5 killed $s5 killed $q5
; CHECK-NEXT: add sp, sp, #80 // =80
; CHECK-NEXT: bl sinf
; CHECK-NEXT: mov v2.16b, v11.16b
; CHECK-NEXT: mov v3.16b, v10.16b
; CHECK-NEXT: mov v4.16b, v9.16b
; CHECK-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload
; CHECK-NEXT: ldp d9, d8, [sp, #32] // 8-byte Folded Reload
; CHECK-NEXT: ldp d11, d10, [sp, #16] // 8-byte Folded Reload
; CHECK-NEXT: mov v5.16b, v0.16b
; CHECK-NEXT: mov v0.16b, v13.16b
; CHECK-NEXT: mov v1.16b, v12.16b
; CHECK-NEXT: ldp d13, d12, [sp], #64 // 8-byte Folded Reload
; CHECK-NEXT: ret
%r = call <6 x float> @llvm.sin.v6f32(<6 x float> %x)
ret <6 x float> %r
@ -306,16 +282,9 @@ define <3 x float> @cos_v3f32(<3 x float> %x) nounwind {
; CHECK-NEXT: mov s0, v0.s[2]
; CHECK-NEXT: bl cosf
; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
; CHECK-NEXT: mov v1.s[2], v0.s[0]
; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload
; CHECK-NEXT: str q1, [sp] // 16-byte Folded Spill
; CHECK-NEXT: mov s0, v0.s[3]
; CHECK-NEXT: bl cosf
; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
; CHECK-NEXT: mov v1.s[3], v0.s[0]
; CHECK-NEXT: mov v1.s[2], v0.s[0]
; CHECK-NEXT: mov v0.16b, v1.16b
; CHECK-NEXT: add sp, sp, #48 // =48
; CHECK-NEXT: ret
@ -343,16 +312,9 @@ define <3 x float> @exp_v3f32(<3 x float> %x) nounwind {
; CHECK-NEXT: mov s0, v0.s[2]
; CHECK-NEXT: bl expf
; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
; CHECK-NEXT: mov v1.s[2], v0.s[0]
; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload
; CHECK-NEXT: str q1, [sp] // 16-byte Folded Spill
; CHECK-NEXT: mov s0, v0.s[3]
; CHECK-NEXT: bl expf
; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
; CHECK-NEXT: mov v1.s[3], v0.s[0]
; CHECK-NEXT: mov v1.s[2], v0.s[0]
; CHECK-NEXT: mov v0.16b, v1.16b
; CHECK-NEXT: add sp, sp, #48 // =48
; CHECK-NEXT: ret
@ -380,16 +342,9 @@ define <3 x float> @exp2_v3f32(<3 x float> %x) nounwind {
; CHECK-NEXT: mov s0, v0.s[2]
; CHECK-NEXT: bl exp2f
; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
; CHECK-NEXT: mov v1.s[2], v0.s[0]
; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload
; CHECK-NEXT: str q1, [sp] // 16-byte Folded Spill
; CHECK-NEXT: mov s0, v0.s[3]
; CHECK-NEXT: bl exp2f
; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
; CHECK-NEXT: mov v1.s[3], v0.s[0]
; CHECK-NEXT: mov v1.s[2], v0.s[0]
; CHECK-NEXT: mov v0.16b, v1.16b
; CHECK-NEXT: add sp, sp, #48 // =48
; CHECK-NEXT: ret
@ -426,16 +381,9 @@ define <3 x float> @log_v3f32(<3 x float> %x) nounwind {
; CHECK-NEXT: mov s0, v0.s[2]
; CHECK-NEXT: bl logf
; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
; CHECK-NEXT: mov v1.s[2], v0.s[0]
; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload
; CHECK-NEXT: str q1, [sp] // 16-byte Folded Spill
; CHECK-NEXT: mov s0, v0.s[3]
; CHECK-NEXT: bl logf
; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
; CHECK-NEXT: mov v1.s[3], v0.s[0]
; CHECK-NEXT: mov v1.s[2], v0.s[0]
; CHECK-NEXT: mov v0.16b, v1.16b
; CHECK-NEXT: add sp, sp, #48 // =48
; CHECK-NEXT: ret
@ -463,16 +411,9 @@ define <3 x float> @log10_v3f32(<3 x float> %x) nounwind {
; CHECK-NEXT: mov s0, v0.s[2]
; CHECK-NEXT: bl log10f
; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
; CHECK-NEXT: mov v1.s[2], v0.s[0]
; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload
; CHECK-NEXT: str q1, [sp] // 16-byte Folded Spill
; CHECK-NEXT: mov s0, v0.s[3]
; CHECK-NEXT: bl log10f
; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
; CHECK-NEXT: mov v1.s[3], v0.s[0]
; CHECK-NEXT: mov v1.s[2], v0.s[0]
; CHECK-NEXT: mov v0.16b, v1.16b
; CHECK-NEXT: add sp, sp, #48 // =48
; CHECK-NEXT: ret
@ -500,16 +441,9 @@ define <3 x float> @log2_v3f32(<3 x float> %x) nounwind {
; CHECK-NEXT: mov s0, v0.s[2]
; CHECK-NEXT: bl log2f
; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
; CHECK-NEXT: mov v1.s[2], v0.s[0]
; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload
; CHECK-NEXT: str q1, [sp] // 16-byte Folded Spill
; CHECK-NEXT: mov s0, v0.s[3]
; CHECK-NEXT: bl log2f
; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
; CHECK-NEXT: mov v1.s[3], v0.s[0]
; CHECK-NEXT: mov v1.s[2], v0.s[0]
; CHECK-NEXT: mov v0.16b, v1.16b
; CHECK-NEXT: add sp, sp, #48 // =48
; CHECK-NEXT: ret

View File

@ -51,26 +51,14 @@ define <2 x float> @sin_v2f32(<2 x float> %x) nounwind {
; CHECK-LABEL: sin_v2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: callq sinf
; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: callq sinf
; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: vmovshdup (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = mem[1,1,3,3]
; CHECK-NEXT: callq sinf
; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = mem[1,0]
; CHECK-NEXT: callq sinf
; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: vpermilps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = mem[3,1,2,3]
; CHECK-NEXT: callq sinf
; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: retq
%r = call <2 x float> @llvm.sin.v2f32(<2 x float> %x)
@ -95,12 +83,6 @@ define <3 x float> @sin_v3f32(<3 x float> %x) nounwind {
; CHECK-NEXT: callq sinf
; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: vpermilps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = mem[3,1,2,3]
; CHECK-NEXT: callq sinf
; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: retq
%r = call <3 x float> @llvm.sin.v3f32(<3 x float> %x)
@ -142,30 +124,6 @@ define <5 x float> @sin_v5f32(<5 x float> %x) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: subq $88, %rsp
; CHECK-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: callq sinf
; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = mem[1,1,3,3]
; CHECK-NEXT: callq sinf
; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = mem[1,0]
; CHECK-NEXT: callq sinf
; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: vpermilps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = mem[3,1,2,3]
; CHECK-NEXT: callq sinf
; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: callq sinf
@ -187,7 +145,13 @@ define <5 x float> @sin_v5f32(<5 x float> %x) nounwind {
; CHECK-NEXT: callq sinf
; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; CHECK-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
; CHECK-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: callq sinf
; CHECK-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; CHECK-NEXT: addq $88, %rsp
; CHECK-NEXT: retq
%r = call <5 x float> @llvm.sin.v5f32(<5 x float> %x)
@ -200,27 +164,15 @@ define <6 x float> @sin_v6f32(<6 x float> %x) nounwind {
; CHECK-NEXT: subq $88, %rsp
; CHECK-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: callq sinf
; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: vmovshdup (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = mem[1,1,3,3]
; CHECK-NEXT: callq sinf
; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = mem[1,0]
; CHECK-NEXT: callq sinf
; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: vpermilps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = mem[3,1,2,3]
; CHECK-NEXT: callq sinf
; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
@ -256,18 +208,6 @@ define <3 x double> @sin_v3f64(<3 x double> %x) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: subq $88, %rsp
; CHECK-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: callq sin
; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = mem[1,0]
; CHECK-NEXT: callq sin
; CHECK-NEXT: vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; CHECK-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: callq sin
@ -277,7 +217,13 @@ define <3 x double> @sin_v3f64(<3 x double> %x) nounwind {
; CHECK-NEXT: callq sin
; CHECK-NEXT: vmovapd (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; CHECK-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
; CHECK-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: callq sin
; CHECK-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; CHECK-NEXT: addq $88, %rsp
; CHECK-NEXT: retq
%r = call <3 x double> @llvm.sin.v3f64(<3 x double> %x)
@ -306,26 +252,14 @@ define <2 x float> @cos_v2f32(<2 x float> %x) nounwind {
; CHECK-LABEL: cos_v2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: callq cosf
; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: callq cosf
; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: vmovshdup (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = mem[1,1,3,3]
; CHECK-NEXT: callq cosf
; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = mem[1,0]
; CHECK-NEXT: callq cosf
; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: vpermilps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = mem[3,1,2,3]
; CHECK-NEXT: callq cosf
; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: retq
%r = call <2 x float> @llvm.cos.v2f32(<2 x float> %x)
@ -336,26 +270,14 @@ define <2 x float> @exp_v2f32(<2 x float> %x) nounwind {
; CHECK-LABEL: exp_v2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: callq expf
; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: callq expf
; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: vmovshdup (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = mem[1,1,3,3]
; CHECK-NEXT: callq expf
; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = mem[1,0]
; CHECK-NEXT: callq expf
; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: vpermilps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = mem[3,1,2,3]
; CHECK-NEXT: callq expf
; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: retq
%r = call <2 x float> @llvm.exp.v2f32(<2 x float> %x)
@ -366,26 +288,14 @@ define <2 x float> @exp2_v2f32(<2 x float> %x) nounwind {
; CHECK-LABEL: exp2_v2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: callq exp2f
; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: callq exp2f
; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: vmovshdup (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = mem[1,1,3,3]
; CHECK-NEXT: callq exp2f
; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = mem[1,0]
; CHECK-NEXT: callq exp2f
; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: vpermilps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = mem[3,1,2,3]
; CHECK-NEXT: callq exp2f
; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: retq
%r = call <2 x float> @llvm.exp2.v2f32(<2 x float> %x)
@ -405,26 +315,14 @@ define <2 x float> @log_v2f32(<2 x float> %x) nounwind {
; CHECK-LABEL: log_v2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: callq logf
; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: callq logf
; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: vmovshdup (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = mem[1,1,3,3]
; CHECK-NEXT: callq logf
; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = mem[1,0]
; CHECK-NEXT: callq logf
; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: vpermilps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = mem[3,1,2,3]
; CHECK-NEXT: callq logf
; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: retq
%r = call <2 x float> @llvm.log.v2f32(<2 x float> %x)
@ -435,26 +333,14 @@ define <2 x float> @log10_v2f32(<2 x float> %x) nounwind {
; CHECK-LABEL: log10_v2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: callq log10f
; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: callq log10f
; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: vmovshdup (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = mem[1,1,3,3]
; CHECK-NEXT: callq log10f
; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = mem[1,0]
; CHECK-NEXT: callq log10f
; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: vpermilps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = mem[3,1,2,3]
; CHECK-NEXT: callq log10f
; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: retq
%r = call <2 x float> @llvm.log10.v2f32(<2 x float> %x)
@ -465,26 +351,14 @@ define <2 x float> @log2_v2f32(<2 x float> %x) nounwind {
; CHECK-LABEL: log2_v2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: callq log2f
; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: callq log2f
; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: vmovshdup (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = mem[1,1,3,3]
; CHECK-NEXT: callq log2f
; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = mem[1,0]
; CHECK-NEXT: callq log2f
; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: vpermilps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = mem[3,1,2,3]
; CHECK-NEXT: callq log2f
; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: retq
%r = call <2 x float> @llvm.log2.v2f32(<2 x float> %x)
@ -513,26 +387,14 @@ define <2 x float> @round_v2f32(<2 x float> %x) nounwind {
; CHECK-LABEL: round_v2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: subq $40, %rsp
; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: callq roundf
; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: callq roundf
; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: vmovshdup (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = mem[1,1,3,3]
; CHECK-NEXT: callq roundf
; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = mem[1,0]
; CHECK-NEXT: callq roundf
; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-NEXT: vpermilps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: # xmm0 = mem[3,1,2,3]
; CHECK-NEXT: callq roundf
; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: retq
%r = call <2 x float> @llvm.round.v2f32(<2 x float> %x)