1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-22 04:22:57 +02:00

X86: Improve AVX512 fptoui lowering

Summary:
Add patterns for
  fptoui <16 x float> to <16 x i8>
  fptoui <16 x float> to <16 x i16>

Reviewers: igorb, delena, craig.topper

Reviewed By: craig.topper

Subscribers: llvm-commits

Differential Revision: https://reviews.llvm.org/D37505

llvm-svn: 312704
This commit is contained in:
Zvi Rackover 2017-09-07 07:40:34 +00:00
parent 44c5591029
commit bf9b51d3fc
5 changed files with 21 additions and 146 deletions

View File

@ -1171,6 +1171,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal);
setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal);
setOperationAction(ISD::FP_TO_UINT, MVT::v16i8, Legal);
setOperationAction(ISD::FP_TO_UINT, MVT::v16i16, Legal);
setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Legal);
setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
setOperationAction(ISD::FP_TO_UINT, MVT::v2i32, Custom);

View File

@ -8269,6 +8269,11 @@ defm VPMOVSWB : avx512_trunc_wb<0x20, "vpmovswb", X86vtruncs,
defm VPMOVUSWB : avx512_trunc_wb<0x10, "vpmovuswb", X86vtruncus,
truncstore_us_vi8, masked_truncstore_us_vi8>;
def : Pat<(v16i16 (fp_to_uint (v16f32 VR512:$src1))),
(VPMOVDWZrr (v16i32 (VCVTTPS2UDQZrr VR512:$src1)))>, Requires<[HasAVX512]>;
def : Pat<(v16i8 (fp_to_uint (v16f32 VR512:$src1))),
(VPMOVDBZrr (v16i32 (VCVTTPS2UDQZrr VR512:$src1)))>, Requires<[HasAVX512]>;
let Predicates = [HasAVX512, NoVLX] in {
def: Pat<(v8i16 (X86vtrunc (v8i32 VR256X:$src))),
(v8i16 (EXTRACT_SUBREG

View File

@ -1170,7 +1170,11 @@ int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
{ ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 },
{ ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
{ ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 1 },
{ ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f64, 2 },
{ ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f64, 2 },
{ ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f32, 1 },
{ ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 2 },
{ ISD::FP_TO_UINT, MVT::v16i8, MVT::v16f32, 2 },
};
static const TypeConversionCostTblEntry AVX2ConversionTbl[] = {

View File

@ -94,7 +94,7 @@ define i32 @fptoui_double_i16(i32 %arg) {
; SSE42: cost of 27 {{.*}} %V8I16 = fptoui
; AVX1: cost of 25 {{.*}} %V8I16 = fptoui
; AVX2: cost of 25 {{.*}} %V8I16 = fptoui
; AVX512: cost of 1 {{.*}} %V8I16 = fptoui
; AVX512: cost of 2 {{.*}} %V8I16 = fptoui
%V8I16 = fptoui <8 x double> undef to <8 x i16>
ret i32 undef
@ -125,7 +125,7 @@ define i32 @fptoui_double_i8(i32 %arg) {
; SSE42: cost of 27 {{.*}} %V8I8 = fptoui
; AVX1: cost of 25 {{.*}} %V8I8 = fptoui
; AVX2: cost of 25 {{.*}} %V8I8 = fptoui
; AVX512: cost of 1 {{.*}} %V8I8 = fptoui
; AVX512: cost of 2 {{.*}} %V8I8 = fptoui
%V8I8 = fptoui <8 x double> undef to <8 x i8>
ret i32 undef
@ -225,7 +225,7 @@ define i32 @fptoui_float_i16(i32 %arg) {
; SSE42: cost of 51 {{.*}} %V16I16 = fptoui
; AVX1: cost of 3 {{.*}} %V16I16 = fptoui
; AVX2: cost of 3 {{.*}} %V16I16 = fptoui
; AVX512: cost of 48 {{.*}} %V16I16 = fptoui
; AVX512: cost of 2 {{.*}} %V16I16 = fptoui
%V16I16 = fptoui <16 x float> undef to <16 x i16>
ret i32 undef
@ -255,7 +255,7 @@ define i32 @fptoui_float_i8(i32 %arg) {
; SSE42: cost of 51 {{.*}} %V16I8 = fptoui
; AVX1: cost of 3 {{.*}} %V16I8 = fptoui
; AVX2: cost of 3 {{.*}} %V16I8 = fptoui
; AVX512: cost of 48 {{.*}} %V16I8 = fptoui
; AVX512: cost of 2 {{.*}} %V16I8 = fptoui
%V16I8 = fptoui <16 x float> undef to <16 x i8>
ret i32 undef

View File

@ -451,104 +451,14 @@ define <16 x i32> @f32to16ui(<16 x float> %a) nounwind {
define <16 x i8> @f32to16uc(<16 x float> %f) {
; KNL-LABEL: f32to16uc:
; KNL: # BB#0:
; KNL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; KNL-NEXT: vcvttss2si %xmm1, %eax
; KNL-NEXT: vcvttss2si %xmm0, %ecx
; KNL-NEXT: vmovd %ecx, %xmm1
; KNL-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
; KNL-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; KNL-NEXT: vcvttss2si %xmm2, %eax
; KNL-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
; KNL-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,1,2,3]
; KNL-NEXT: vcvttss2si %xmm2, %eax
; KNL-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
; KNL-NEXT: vextractf128 $1, %ymm0, %xmm2
; KNL-NEXT: vcvttss2si %xmm2, %eax
; KNL-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
; KNL-NEXT: vmovshdup {{.*#+}} xmm3 = xmm2[1,1,3,3]
; KNL-NEXT: vcvttss2si %xmm3, %eax
; KNL-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
; KNL-NEXT: vpermilpd {{.*#+}} xmm3 = xmm2[1,0]
; KNL-NEXT: vcvttss2si %xmm3, %eax
; KNL-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
; KNL-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
; KNL-NEXT: vcvttss2si %xmm2, %eax
; KNL-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
; KNL-NEXT: vextractf32x4 $2, %zmm0, %xmm2
; KNL-NEXT: vcvttss2si %xmm2, %eax
; KNL-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
; KNL-NEXT: vmovshdup {{.*#+}} xmm3 = xmm2[1,1,3,3]
; KNL-NEXT: vcvttss2si %xmm3, %eax
; KNL-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
; KNL-NEXT: vpermilpd {{.*#+}} xmm3 = xmm2[1,0]
; KNL-NEXT: vcvttss2si %xmm3, %eax
; KNL-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
; KNL-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
; KNL-NEXT: vcvttss2si %xmm2, %eax
; KNL-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
; KNL-NEXT: vextractf32x4 $3, %zmm0, %xmm0
; KNL-NEXT: vcvttss2si %xmm0, %eax
; KNL-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
; KNL-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; KNL-NEXT: vcvttss2si %xmm2, %eax
; KNL-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
; KNL-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; KNL-NEXT: vcvttss2si %xmm2, %eax
; KNL-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
; KNL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; KNL-NEXT: vcvttss2si %xmm0, %eax
; KNL-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0
; KNL-NEXT: vcvttps2udq %zmm0, %zmm0
; KNL-NEXT: vpmovdb %zmm0, %xmm0
; KNL-NEXT: retq
;
; AVX512-LABEL: f32to16uc:
; AVX512: # BB#0:
; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512-NEXT: vcvttss2si %xmm1, %eax
; AVX512-NEXT: vcvttss2si %xmm0, %ecx
; AVX512-NEXT: vmovd %ecx, %xmm1
; AVX512-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512-NEXT: vcvttss2si %xmm2, %eax
; AVX512-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,1,2,3]
; AVX512-NEXT: vcvttss2si %xmm2, %eax
; AVX512-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX512-NEXT: vcvttss2si %xmm2, %eax
; AVX512-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
; AVX512-NEXT: vmovshdup {{.*#+}} xmm3 = xmm2[1,1,3,3]
; AVX512-NEXT: vcvttss2si %xmm3, %eax
; AVX512-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm2[1,0]
; AVX512-NEXT: vcvttss2si %xmm3, %eax
; AVX512-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
; AVX512-NEXT: vcvttss2si %xmm2, %eax
; AVX512-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
; AVX512-NEXT: vextractf32x4 $2, %zmm0, %xmm2
; AVX512-NEXT: vcvttss2si %xmm2, %eax
; AVX512-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
; AVX512-NEXT: vmovshdup {{.*#+}} xmm3 = xmm2[1,1,3,3]
; AVX512-NEXT: vcvttss2si %xmm3, %eax
; AVX512-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm2[1,0]
; AVX512-NEXT: vcvttss2si %xmm3, %eax
; AVX512-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
; AVX512-NEXT: vcvttss2si %xmm2, %eax
; AVX512-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
; AVX512-NEXT: vextractf32x4 $3, %zmm0, %xmm0
; AVX512-NEXT: vcvttss2si %xmm0, %eax
; AVX512-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX512-NEXT: vcvttss2si %xmm2, %eax
; AVX512-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512-NEXT: vcvttss2si %xmm2, %eax
; AVX512-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX512-NEXT: vcvttss2si %xmm0, %eax
; AVX512-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0
; AVX512-NEXT: vcvttps2udq %zmm0, %zmm0
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%res = fptoui <16 x float> %f to <16 x i8>
@ -558,54 +468,8 @@ define <16 x i8> @f32to16uc(<16 x float> %f) {
define <16 x i16> @f32to16us(<16 x float> %f) {
; ALL-LABEL: f32to16us:
; ALL: # BB#0:
; ALL-NEXT: vextractf32x4 $2, %zmm0, %xmm1
; ALL-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; ALL-NEXT: vcvttss2si %xmm2, %eax
; ALL-NEXT: vcvttss2si %xmm1, %ecx
; ALL-NEXT: vmovd %ecx, %xmm2
; ALL-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
; ALL-NEXT: vpermilpd {{.*#+}} xmm3 = xmm1[1,0]
; ALL-NEXT: vcvttss2si %xmm3, %eax
; ALL-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
; ALL-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; ALL-NEXT: vcvttss2si %xmm1, %eax
; ALL-NEXT: vpinsrw $3, %eax, %xmm2, %xmm1
; ALL-NEXT: vextractf32x4 $3, %zmm0, %xmm2
; ALL-NEXT: vcvttss2si %xmm2, %eax
; ALL-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1
; ALL-NEXT: vmovshdup {{.*#+}} xmm3 = xmm2[1,1,3,3]
; ALL-NEXT: vcvttss2si %xmm3, %eax
; ALL-NEXT: vpinsrw $5, %eax, %xmm1, %xmm1
; ALL-NEXT: vpermilpd {{.*#+}} xmm3 = xmm2[1,0]
; ALL-NEXT: vcvttss2si %xmm3, %eax
; ALL-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1
; ALL-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
; ALL-NEXT: vcvttss2si %xmm2, %eax
; ALL-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1
; ALL-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; ALL-NEXT: vcvttss2si %xmm2, %eax
; ALL-NEXT: vcvttss2si %xmm0, %ecx
; ALL-NEXT: vmovd %ecx, %xmm2
; ALL-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2
; ALL-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
; ALL-NEXT: vcvttss2si %xmm3, %eax
; ALL-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2
; ALL-NEXT: vpermilps {{.*#+}} xmm3 = xmm0[3,1,2,3]
; ALL-NEXT: vcvttss2si %xmm3, %eax
; ALL-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2
; ALL-NEXT: vextractf128 $1, %ymm0, %xmm0
; ALL-NEXT: vcvttss2si %xmm0, %eax
; ALL-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2
; ALL-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
; ALL-NEXT: vcvttss2si %xmm3, %eax
; ALL-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2
; ALL-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
; ALL-NEXT: vcvttss2si %xmm3, %eax
; ALL-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2
; ALL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; ALL-NEXT: vcvttss2si %xmm0, %eax
; ALL-NEXT: vpinsrw $7, %eax, %xmm2, %xmm0
; ALL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; ALL-NEXT: vcvttps2udq %zmm0, %zmm0
; ALL-NEXT: vpmovdw %zmm0, %ymm0
; ALL-NEXT: retq
%res = fptoui <16 x float> %f to <16 x i16>
ret <16 x i16> %res