1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-18 18:42:46 +02:00

[LegalizeVectorOps][X86] Enable expansion of vector fp_to_uint in LegalizeVectorOps to avoid scalarization.

The code here isn't great in all caess. Particularly v4f64->v4i32
on 64-bit AVX targets. But there is some improvement in some
configurations.

There's definitely some issues with computeNumSignBits with
X86ISD::STRICT_FCMP. As well as not being able to propagate sign
bits through merge_values nodes that get created during custom
legalization.
This commit is contained in:
Craig Topper 2020-01-04 19:18:50 -08:00
parent 6a5bb0439e
commit 15ba608bd7
4 changed files with 113 additions and 258 deletions

View File

@ -1178,13 +1178,15 @@ SDValue VectorLegalizer::ExpandFP_TO_UINT(SDValue Op) {
// Attempt to expand using TargetLowering.
SDValue Result, Chain;
if (TLI.expandFP_TO_UINT(Op.getNode(), Result, Chain, DAG)) {
if (Op.getNode()->isStrictFPOpcode())
if (Op->isStrictFPOpcode())
// Relink the chain
DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Chain);
return Result;
}
// Otherwise go ahead and unroll.
if (Op->isStrictFPOpcode())
return UnrollStrictFPOp(Op);
return DAG.UnrollVectorOp(Op.getNode());
}
@ -1393,6 +1395,8 @@ SDValue VectorLegalizer::ExpandFixedPointMul(SDValue Op) {
SDValue VectorLegalizer::ExpandStrictFPOp(SDValue Op) {
if (Op.getOpcode() == ISD::STRICT_UINT_TO_FP)
return ExpandUINT_TO_FLOAT(Op);
if (Op.getOpcode() == ISD::STRICT_FP_TO_UINT)
return ExpandFP_TO_UINT(Op);
return UnrollStrictFPOp(Op);
}

View File

@ -2529,136 +2529,44 @@ define <4 x i32> @strict_vector_fptosi_v4f32_to_v4i32(<4 x float> %a) #0 {
define <4 x i32> @strict_vector_fptoui_v4f32_to_v4i32(<4 x float> %a) #0 {
; SSE-32-LABEL: strict_vector_fptoui_v4f32_to_v4i32:
; SSE-32: # %bb.0:
; SSE-32-NEXT: movaps %xmm0, %xmm1
; SSE-32-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[2,3]
; SSE-32-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
; SSE-32-NEXT: comiss %xmm3, %xmm1
; SSE-32-NEXT: xorps %xmm2, %xmm2
; SSE-32-NEXT: xorps %xmm4, %xmm4
; SSE-32-NEXT: jb .LBB21_2
; SSE-32-NEXT: # %bb.1:
; SSE-32-NEXT: movaps %xmm3, %xmm4
; SSE-32-NEXT: .LBB21_2:
; SSE-32-NEXT: setae %al
; SSE-32-NEXT: movzbl %al, %ecx
; SSE-32-NEXT: shll $31, %ecx
; SSE-32-NEXT: subss %xmm4, %xmm1
; SSE-32-NEXT: cvttss2si %xmm1, %eax
; SSE-32-NEXT: xorl %ecx, %eax
; SSE-32-NEXT: movaps %xmm0, %xmm4
; SSE-32-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm0[1]
; SSE-32-NEXT: comiss %xmm3, %xmm4
; SSE-32-NEXT: xorps %xmm5, %xmm5
; SSE-32-NEXT: jb .LBB21_4
; SSE-32-NEXT: # %bb.3:
; SSE-32-NEXT: movaps %xmm3, %xmm5
; SSE-32-NEXT: .LBB21_4:
; SSE-32-NEXT: movd %eax, %xmm1
; SSE-32-NEXT: setae %al
; SSE-32-NEXT: movzbl %al, %eax
; SSE-32-NEXT: shll $31, %eax
; SSE-32-NEXT: subss %xmm5, %xmm4
; SSE-32-NEXT: cvttss2si %xmm4, %ecx
; SSE-32-NEXT: xorl %eax, %ecx
; SSE-32-NEXT: movd %ecx, %xmm4
; SSE-32-NEXT: comiss %xmm3, %xmm0
; SSE-32-NEXT: xorps %xmm5, %xmm5
; SSE-32-NEXT: jb .LBB21_6
; SSE-32-NEXT: # %bb.5:
; SSE-32-NEXT: movaps %xmm3, %xmm5
; SSE-32-NEXT: .LBB21_6:
; SSE-32-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
; SSE-32-NEXT: setae %al
; SSE-32-NEXT: movzbl %al, %eax
; SSE-32-NEXT: shll $31, %eax
; SSE-32-NEXT: movaps %xmm0, %xmm1
; SSE-32-NEXT: subss %xmm5, %xmm1
; SSE-32-NEXT: cvttss2si %xmm1, %ecx
; SSE-32-NEXT: xorl %eax, %ecx
; SSE-32-NEXT: movd %ecx, %xmm1
; SSE-32-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3]
; SSE-32-NEXT: comiss %xmm3, %xmm0
; SSE-32-NEXT: jb .LBB21_8
; SSE-32-NEXT: # %bb.7:
; SSE-32-NEXT: movaps %xmm3, %xmm2
; SSE-32-NEXT: .LBB21_8:
; SSE-32-NEXT: setae %al
; SSE-32-NEXT: movzbl %al, %eax
; SSE-32-NEXT: shll $31, %eax
; SSE-32-NEXT: subss %xmm2, %xmm0
; SSE-32-NEXT: cvttss2si %xmm0, %ecx
; SSE-32-NEXT: xorl %eax, %ecx
; SSE-32-NEXT: movd %ecx, %xmm0
; SSE-32-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; SSE-32-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0]
; SSE-32-NEXT: movdqa %xmm1, %xmm0
; SSE-32-NEXT: movaps {{.*#+}} xmm2 = [2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9]
; SSE-32-NEXT: movaps %xmm0, %xmm3
; SSE-32-NEXT: cmpltps %xmm2, %xmm3
; SSE-32-NEXT: movaps %xmm3, %xmm1
; SSE-32-NEXT: andnps {{\.LCPI.*}}, %xmm1
; SSE-32-NEXT: andnps %xmm2, %xmm3
; SSE-32-NEXT: subps %xmm3, %xmm0
; SSE-32-NEXT: cvttps2dq %xmm0, %xmm0
; SSE-32-NEXT: xorps %xmm0, %xmm1
; SSE-32-NEXT: movaps %xmm1, %xmm0
; SSE-32-NEXT: retl
;
; SSE-64-LABEL: strict_vector_fptoui_v4f32_to_v4i32:
; SSE-64: # %bb.0:
; SSE-64-NEXT: movaps %xmm0, %xmm1
; SSE-64-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[2,3]
; SSE-64-NEXT: cvttss2si %xmm1, %rax
; SSE-64-NEXT: movd %eax, %xmm1
; SSE-64-NEXT: movaps %xmm0, %xmm2
; SSE-64-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE-64-NEXT: cvttss2si %xmm2, %rax
; SSE-64-NEXT: movd %eax, %xmm2
; SSE-64-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE-64-NEXT: cvttss2si %xmm0, %rax
; SSE-64-NEXT: movd %eax, %xmm1
; SSE-64-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3]
; SSE-64-NEXT: cvttss2si %xmm0, %rax
; SSE-64-NEXT: movd %eax, %xmm0
; SSE-64-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; SSE-64-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-64-NEXT: movdqa %xmm1, %xmm0
; SSE-64-NEXT: movaps {{.*#+}} xmm2 = [2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9]
; SSE-64-NEXT: movaps %xmm0, %xmm3
; SSE-64-NEXT: cmpltps %xmm2, %xmm3
; SSE-64-NEXT: movaps %xmm3, %xmm1
; SSE-64-NEXT: andnps {{.*}}(%rip), %xmm1
; SSE-64-NEXT: andnps %xmm2, %xmm3
; SSE-64-NEXT: subps %xmm3, %xmm0
; SSE-64-NEXT: cvttps2dq %xmm0, %xmm0
; SSE-64-NEXT: xorps %xmm0, %xmm1
; SSE-64-NEXT: movaps %xmm1, %xmm0
; SSE-64-NEXT: retq
;
; AVX-32-LABEL: strict_vector_fptoui_v4f32_to_v4i32:
; AVX-32: # %bb.0:
; AVX-32-NEXT: pushl %ebp
; AVX-32-NEXT: .cfi_def_cfa_offset 8
; AVX-32-NEXT: .cfi_offset %ebp, -8
; AVX-32-NEXT: movl %esp, %ebp
; AVX-32-NEXT: .cfi_def_cfa_register %ebp
; AVX-32-NEXT: andl $-8, %esp
; AVX-32-NEXT: subl $32, %esp
; AVX-32-NEXT: vmovss %xmm0, {{[0-9]+}}(%esp)
; AVX-32-NEXT: vextractps $1, %xmm0, {{[0-9]+}}(%esp)
; AVX-32-NEXT: vextractps $2, %xmm0, {{[0-9]+}}(%esp)
; AVX-32-NEXT: vextractps $3, %xmm0, (%esp)
; AVX-32-NEXT: flds {{[0-9]+}}(%esp)
; AVX-32-NEXT: fisttpll {{[0-9]+}}(%esp)
; AVX-32-NEXT: flds {{[0-9]+}}(%esp)
; AVX-32-NEXT: fisttpll {{[0-9]+}}(%esp)
; AVX-32-NEXT: flds {{[0-9]+}}(%esp)
; AVX-32-NEXT: fisttpll {{[0-9]+}}(%esp)
; AVX-32-NEXT: flds (%esp)
; AVX-32-NEXT: fisttpll (%esp)
; AVX-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
; AVX-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
; AVX-32-NEXT: vpinsrd $3, (%esp), %xmm0, %xmm0
; AVX-32-NEXT: movl %ebp, %esp
; AVX-32-NEXT: popl %ebp
; AVX-32-NEXT: .cfi_def_cfa %esp, 4
; AVX-32-NEXT: retl
;
; AVX-64-LABEL: strict_vector_fptoui_v4f32_to_v4i32:
; AVX-64: # %bb.0:
; AVX-64-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-64-NEXT: vcvttss2si %xmm1, %rax
; AVX-64-NEXT: vcvttss2si %xmm0, %rcx
; AVX-64-NEXT: vmovd %ecx, %xmm1
; AVX-64-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
; AVX-64-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX-64-NEXT: vcvttss2si %xmm2, %rax
; AVX-64-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
; AVX-64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX-64-NEXT: vcvttss2si %xmm0, %rax
; AVX-64-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
; AVX-64-NEXT: retq
; AVX-LABEL: strict_vector_fptoui_v4f32_to_v4i32:
; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9]
; AVX-NEXT: vcmpltps %xmm1, %xmm0, %xmm2
; AVX-NEXT: vxorps %xmm3, %xmm3, %xmm3
; AVX-NEXT: vmovaps {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
; AVX-NEXT: vblendvps %xmm2, %xmm3, %xmm4, %xmm4
; AVX-NEXT: vblendvps %xmm2, %xmm3, %xmm1, %xmm1
; AVX-NEXT: vsubps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vcvttps2dq %xmm0, %xmm0
; AVX-NEXT: vxorps %xmm4, %xmm0, %xmm0
; AVX-NEXT: ret{{[l|q]}}
;
; AVX512F-LABEL: strict_vector_fptoui_v4f32_to_v4i32:
; AVX512F: # %bb.0:

View File

@ -1145,49 +1145,35 @@ define <4 x i32> @strict_vector_fptosi_v4f64_to_v4i32(<4 x double> %a) #0 {
define <4 x i32> @strict_vector_fptoui_v4f64_to_v4i32(<4 x double> %a) #0 {
; AVX-32-LABEL: strict_vector_fptoui_v4f64_to_v4i32:
; AVX-32: # %bb.0:
; AVX-32-NEXT: pushl %ebp
; AVX-32-NEXT: .cfi_def_cfa_offset 8
; AVX-32-NEXT: .cfi_offset %ebp, -8
; AVX-32-NEXT: movl %esp, %ebp
; AVX-32-NEXT: .cfi_def_cfa_register %ebp
; AVX-32-NEXT: andl $-8, %esp
; AVX-32-NEXT: subl $32, %esp
; AVX-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
; AVX-32-NEXT: vmovhps %xmm0, {{[0-9]+}}(%esp)
; AVX-32-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
; AVX-32-NEXT: vmovhps %xmm0, (%esp)
; AVX-32-NEXT: fldl {{[0-9]+}}(%esp)
; AVX-32-NEXT: fisttpll {{[0-9]+}}(%esp)
; AVX-32-NEXT: fldl {{[0-9]+}}(%esp)
; AVX-32-NEXT: fisttpll {{[0-9]+}}(%esp)
; AVX-32-NEXT: fldl {{[0-9]+}}(%esp)
; AVX-32-NEXT: fisttpll {{[0-9]+}}(%esp)
; AVX-32-NEXT: fldl (%esp)
; AVX-32-NEXT: fisttpll (%esp)
; AVX-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
; AVX-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
; AVX-32-NEXT: vpinsrd $3, (%esp), %xmm0, %xmm0
; AVX-32-NEXT: movl %ebp, %esp
; AVX-32-NEXT: popl %ebp
; AVX-32-NEXT: .cfi_def_cfa %esp, 4
; AVX-32-NEXT: vmovapd {{.*#+}} ymm1 = [2.147483648E+9,2.147483648E+9,2.147483648E+9,2.147483648E+9]
; AVX-32-NEXT: vcmpltpd %ymm1, %ymm0, %ymm2
; AVX-32-NEXT: vextractf128 $1, %ymm2, %xmm3
; AVX-32-NEXT: vshufps {{.*#+}} xmm3 = xmm2[0,2],xmm3[0,2]
; AVX-32-NEXT: vxorps %xmm4, %xmm4, %xmm4
; AVX-32-NEXT: vmovaps {{.*#+}} xmm5 = [2147483648,2147483648,2147483648,2147483648]
; AVX-32-NEXT: vblendvps %xmm3, %xmm4, %xmm5, %xmm3
; AVX-32-NEXT: vxorps %xmm4, %xmm4, %xmm4
; AVX-32-NEXT: vblendvpd %ymm2, %ymm4, %ymm1, %ymm1
; AVX-32-NEXT: vsubpd %ymm1, %ymm0, %ymm0
; AVX-32-NEXT: vcvttpd2dq %ymm0, %xmm0
; AVX-32-NEXT: vxorpd %xmm3, %xmm0, %xmm0
; AVX-32-NEXT: vzeroupper
; AVX-32-NEXT: retl
;
; AVX-64-LABEL: strict_vector_fptoui_v4f64_to_v4i32:
; AVX-64: # %bb.0:
; AVX-64-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX-64-NEXT: vcvttsd2si %xmm1, %rax
; AVX-64-NEXT: vcvttsd2si %xmm0, %rcx
; AVX-64-NEXT: vmovd %ecx, %xmm1
; AVX-64-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX-64-NEXT: vcvttsd2si %xmm0, %rax
; AVX-64-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
; AVX-64-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX-64-NEXT: vcvttsd2si %xmm0, %rax
; AVX-64-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
; AVX-64-NEXT: vmovapd {{.*#+}} ymm1 = [2.147483648E+9,2.147483648E+9,2.147483648E+9,2.147483648E+9]
; AVX-64-NEXT: vcmpltpd %ymm1, %ymm0, %ymm2
; AVX-64-NEXT: vextractf128 $1, %ymm2, %xmm3
; AVX-64-NEXT: vshufps {{.*#+}} xmm3 = xmm2[0,2],xmm3[0,2]
; AVX-64-NEXT: vxorps %xmm4, %xmm4, %xmm4
; AVX-64-NEXT: vmovaps {{.*#+}} xmm5 = [2147483648,2147483648,2147483648,2147483648]
; AVX-64-NEXT: vblendvps %xmm3, %xmm4, %xmm5, %xmm3
; AVX-64-NEXT: vxorps %xmm4, %xmm4, %xmm4
; AVX-64-NEXT: vblendvpd %ymm2, %ymm4, %ymm1, %ymm1
; AVX-64-NEXT: vsubpd %ymm1, %ymm0, %ymm0
; AVX-64-NEXT: vcvttpd2dq %ymm0, %xmm0
; AVX-64-NEXT: vxorpd %xmm3, %xmm0, %xmm0
; AVX-64-NEXT: vzeroupper
; AVX-64-NEXT: retq
;
@ -1392,78 +1378,28 @@ define <8 x i32> @strict_vector_fptosi_v8f32_to_v8i32(<8 x float> %a) #0 {
define <8 x i32> @strict_vector_fptoui_v8f32_to_v8i32(<8 x float> %a) #0 {
; AVX-32-LABEL: strict_vector_fptoui_v8f32_to_v8i32:
; AVX-32: # %bb.0:
; AVX-32-NEXT: pushl %ebp
; AVX-32-NEXT: .cfi_def_cfa_offset 8
; AVX-32-NEXT: .cfi_offset %ebp, -8
; AVX-32-NEXT: movl %esp, %ebp
; AVX-32-NEXT: .cfi_def_cfa_register %ebp
; AVX-32-NEXT: andl $-8, %esp
; AVX-32-NEXT: subl $64, %esp
; AVX-32-NEXT: vmovss %xmm0, {{[0-9]+}}(%esp)
; AVX-32-NEXT: vextractps $1, %xmm0, {{[0-9]+}}(%esp)
; AVX-32-NEXT: vextractps $2, %xmm0, {{[0-9]+}}(%esp)
; AVX-32-NEXT: vextractps $3, %xmm0, {{[0-9]+}}(%esp)
; AVX-32-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX-32-NEXT: vmovss %xmm0, {{[0-9]+}}(%esp)
; AVX-32-NEXT: vextractps $1, %xmm0, {{[0-9]+}}(%esp)
; AVX-32-NEXT: vextractps $2, %xmm0, {{[0-9]+}}(%esp)
; AVX-32-NEXT: vextractps $3, %xmm0, (%esp)
; AVX-32-NEXT: flds {{[0-9]+}}(%esp)
; AVX-32-NEXT: fisttpll {{[0-9]+}}(%esp)
; AVX-32-NEXT: flds {{[0-9]+}}(%esp)
; AVX-32-NEXT: fisttpll {{[0-9]+}}(%esp)
; AVX-32-NEXT: flds {{[0-9]+}}(%esp)
; AVX-32-NEXT: fisttpll {{[0-9]+}}(%esp)
; AVX-32-NEXT: flds {{[0-9]+}}(%esp)
; AVX-32-NEXT: fisttpll {{[0-9]+}}(%esp)
; AVX-32-NEXT: flds {{[0-9]+}}(%esp)
; AVX-32-NEXT: fisttpll {{[0-9]+}}(%esp)
; AVX-32-NEXT: flds {{[0-9]+}}(%esp)
; AVX-32-NEXT: fisttpll {{[0-9]+}}(%esp)
; AVX-32-NEXT: flds {{[0-9]+}}(%esp)
; AVX-32-NEXT: fisttpll {{[0-9]+}}(%esp)
; AVX-32-NEXT: flds (%esp)
; AVX-32-NEXT: fisttpll (%esp)
; AVX-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
; AVX-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
; AVX-32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0
; AVX-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm1, %xmm1
; AVX-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm1, %xmm1
; AVX-32-NEXT: vpinsrd $3, (%esp), %xmm1, %xmm1
; AVX-32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX-32-NEXT: movl %ebp, %esp
; AVX-32-NEXT: popl %ebp
; AVX-32-NEXT: .cfi_def_cfa %esp, 4
; AVX-32-NEXT: vmovaps {{.*#+}} ymm1 = [2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9]
; AVX-32-NEXT: vcmpltps %ymm1, %ymm0, %ymm2
; AVX-32-NEXT: vxorps %xmm3, %xmm3, %xmm3
; AVX-32-NEXT: vmovaps {{.*#+}} ymm4 = [2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648]
; AVX-32-NEXT: vblendvps %ymm2, %ymm3, %ymm4, %ymm4
; AVX-32-NEXT: vblendvps %ymm2, %ymm3, %ymm1, %ymm1
; AVX-32-NEXT: vsubps %ymm1, %ymm0, %ymm0
; AVX-32-NEXT: vcvttps2dq %ymm0, %ymm0
; AVX-32-NEXT: vxorps %ymm4, %ymm0, %ymm0
; AVX-32-NEXT: retl
;
; AVX-64-LABEL: strict_vector_fptoui_v8f32_to_v8i32:
; AVX-64: # %bb.0:
; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-64-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX-64-NEXT: vcvttss2si %xmm2, %rax
; AVX-64-NEXT: vcvttss2si %xmm1, %rcx
; AVX-64-NEXT: vmovd %ecx, %xmm2
; AVX-64-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
; AVX-64-NEXT: vpermilpd {{.*#+}} xmm3 = xmm1[1,0]
; AVX-64-NEXT: vcvttss2si %xmm3, %rax
; AVX-64-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
; AVX-64-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; AVX-64-NEXT: vcvttss2si %xmm1, %rax
; AVX-64-NEXT: vpinsrd $3, %eax, %xmm2, %xmm1
; AVX-64-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX-64-NEXT: vcvttss2si %xmm2, %rax
; AVX-64-NEXT: vcvttss2si %xmm0, %rcx
; AVX-64-NEXT: vmovd %ecx, %xmm2
; AVX-64-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
; AVX-64-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
; AVX-64-NEXT: vcvttss2si %xmm3, %rax
; AVX-64-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
; AVX-64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX-64-NEXT: vcvttss2si %xmm0, %rax
; AVX-64-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0
; AVX-64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX-64-NEXT: vmovaps {{.*#+}} ymm1 = [2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9]
; AVX-64-NEXT: vcmpltps %ymm1, %ymm0, %ymm2
; AVX-64-NEXT: vxorps %xmm3, %xmm3, %xmm3
; AVX-64-NEXT: vmovaps {{.*#+}} ymm4 = [2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648,2147483648]
; AVX-64-NEXT: vblendvps %ymm2, %ymm3, %ymm4, %ymm4
; AVX-64-NEXT: vblendvps %ymm2, %ymm3, %ymm1, %ymm1
; AVX-64-NEXT: vsubps %ymm1, %ymm0, %ymm0
; AVX-64-NEXT: vcvttps2dq %ymm0, %ymm0
; AVX-64-NEXT: vxorps %ymm4, %ymm0, %ymm0
; AVX-64-NEXT: retq
;
; AVX512F-LABEL: strict_vector_fptoui_v8f32_to_v8i32:

View File

@ -4411,29 +4411,30 @@ entry:
define <4 x i32> @constrained_vector_fptoui_v4i32_v4f32() #0 {
; CHECK-LABEL: constrained_vector_fptoui_v4i32_v4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
; CHECK-NEXT: movd %eax, %xmm0
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
; CHECK-NEXT: movd %eax, %xmm1
; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
; CHECK-NEXT: movd %eax, %xmm2
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
; CHECK-NEXT: movd %eax, %xmm0
; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: movaps {{.*#+}} xmm0 = [2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9]
; CHECK-NEXT: movaps {{.*#+}} xmm1 = [4.2E+1,4.3E+1,4.4E+1,4.5E+1]
; CHECK-NEXT: movaps %xmm1, %xmm2
; CHECK-NEXT: cmpltps %xmm0, %xmm2
; CHECK-NEXT: movaps %xmm2, %xmm3
; CHECK-NEXT: andnps {{.*}}(%rip), %xmm3
; CHECK-NEXT: andnps %xmm0, %xmm2
; CHECK-NEXT: subps %xmm2, %xmm1
; CHECK-NEXT: cvttps2dq %xmm1, %xmm0
; CHECK-NEXT: xorps %xmm3, %xmm0
; CHECK-NEXT: retq
;
; AVX1-LABEL: constrained_vector_fptoui_v4i32_v4f32:
; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax
; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rcx
; AVX1-NEXT: vmovd %ecx, %xmm0
; AVX1-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax
; AVX1-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
; AVX1-NEXT: vcvttss2si {{.*}}(%rip), %rax
; AVX1-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
; AVX1-NEXT: vmovaps {{.*#+}} xmm0 = [2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9]
; AVX1-NEXT: vmovaps {{.*#+}} xmm1 = [4.2E+1,4.3E+1,4.4E+1,4.5E+1]
; AVX1-NEXT: vcmpltps %xmm0, %xmm1, %xmm2
; AVX1-NEXT: vxorps %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vmovaps {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
; AVX1-NEXT: vblendvps %xmm2, %xmm3, %xmm4, %xmm4
; AVX1-NEXT: vblendvps %xmm2, %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vsubps %xmm0, %xmm1, %xmm0
; AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
; AVX1-NEXT: vxorps %xmm4, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX512-LABEL: constrained_vector_fptoui_v4i32_v4f32:
@ -4967,14 +4968,20 @@ define <4 x i32> @constrained_vector_fptoui_v4i32_v4f64() #0 {
;
; AVX1-LABEL: constrained_vector_fptoui_v4i32_v4f64:
; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax
; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rcx
; AVX1-NEXT: vmovd %ecx, %xmm0
; AVX1-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax
; AVX1-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
; AVX1-NEXT: vcvttsd2si {{.*}}(%rip), %rax
; AVX1-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
; AVX1-NEXT: vmovapd {{.*#+}} ymm0 = [2.147483648E+9,2.147483648E+9,2.147483648E+9,2.147483648E+9]
; AVX1-NEXT: vmovapd {{.*#+}} ymm1 = [4.2100000000000001E+1,4.2200000000000003E+1,4.2299999999999997E+1,4.2399999999999999E+1]
; AVX1-NEXT: vcmpltpd %ymm0, %ymm1, %ymm2
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
; AVX1-NEXT: vshufps {{.*#+}} xmm3 = xmm2[0,2],xmm3[0,2]
; AVX1-NEXT: vxorps %xmm4, %xmm4, %xmm4
; AVX1-NEXT: vmovaps {{.*#+}} xmm5 = [2147483648,2147483648,2147483648,2147483648]
; AVX1-NEXT: vblendvps %xmm3, %xmm4, %xmm5, %xmm3
; AVX1-NEXT: vxorps %xmm4, %xmm4, %xmm4
; AVX1-NEXT: vblendvpd %ymm2, %ymm4, %ymm0, %ymm0
; AVX1-NEXT: vsubpd %ymm0, %ymm1, %ymm0
; AVX1-NEXT: vcvttpd2dq %ymm0, %xmm0
; AVX1-NEXT: vxorpd %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX512-LABEL: constrained_vector_fptoui_v4i32_v4f64: