1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-25 12:12:47 +01:00

[X86][AVX] Update VBROADCAST folds to always use v2i64 X86vzload

The VBROADCAST combines and SimplifyDemandedVectorElts improvements mean that we now more consistently use shorter (128-bit) X86vzload input operands.

Follow up to D58053

llvm-svn: 354346
This commit is contained in:
Simon Pilgrim 2019-02-19 16:33:17 +00:00
parent 72662e2e3c
commit 3377ecdd30
5 changed files with 6 additions and 9 deletions

View File

@ -1378,7 +1378,7 @@ multiclass avx512_subvec_broadcast_rm_dq<bits<8> opc, string OpcodeStr,
let Predicates = [HasAVX512] in {
// 32-bit targets will fail to load a i64 directly but can use ZEXT_LOAD.
def : Pat<(v8i64 (X86VBroadcast (v8i64 (X86vzload addr:$src)))),
def : Pat<(v8i64 (X86VBroadcast (v2i64 (X86vzload addr:$src)))),
(VPBROADCASTQZm addr:$src)>;
}
@ -1386,7 +1386,7 @@ let Predicates = [HasVLX] in {
// 32-bit targets will fail to load a i64 directly but can use ZEXT_LOAD.
def : Pat<(v2i64 (X86VBroadcast (v2i64 (X86vzload addr:$src)))),
(VPBROADCASTQZ128m addr:$src)>;
def : Pat<(v4i64 (X86VBroadcast (v4i64 (X86vzload addr:$src)))),
def : Pat<(v4i64 (X86VBroadcast (v2i64 (X86vzload addr:$src)))),
(VPBROADCASTQZ256m addr:$src)>;
}
let Predicates = [HasVLX, HasBWI] in {

View File

@ -7850,7 +7850,7 @@ let Predicates = [HasAVX2, NoVLX] in {
// 32-bit targets will fail to load a i64 directly but can use ZEXT_LOAD.
def : Pat<(v2i64 (X86VBroadcast (v2i64 (X86vzload addr:$src)))),
(VPBROADCASTQrm addr:$src)>;
def : Pat<(v4i64 (X86VBroadcast (v4i64 (X86vzload addr:$src)))),
def : Pat<(v4i64 (X86VBroadcast (v2i64 (X86vzload addr:$src)))),
(VPBROADCASTQYrm addr:$src)>;
def : Pat<(v4i32 (X86VBroadcast (v4i32 (scalar_to_vector (loadi32 addr:$src))))),

View File

@ -95,8 +95,7 @@ define <8 x i64> @insert_subvector_into_undef(i32 %x0, i32 %x1) nounwind {
;
; X86_AVX512-LABEL: insert_subvector_into_undef:
; X86_AVX512: # %bb.0:
; X86_AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86_AVX512-NEXT: vbroadcastsd %xmm0, %zmm0
; X86_AVX512-NEXT: vbroadcastsd {{[0-9]+}}(%esp), %zmm0
; X86_AVX512-NEXT: retl
;
; X64_AVX512-LABEL: insert_subvector_into_undef:

View File

@ -655,8 +655,7 @@ define <16 x i8> @combine_pshufb_insertion_as_broadcast_v2i64(i64 %a0) {
define <8 x i32> @combine_permd_insertion_as_broadcast_v4i64(i64 %a0) {
; X86-LABEL: combine_permd_insertion_as_broadcast_v4i64:
; X86: # %bb.0:
; X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-NEXT: vbroadcastsd %xmm0, %ymm0
; X86-NEXT: vbroadcastsd {{[0-9]+}}(%esp), %ymm0
; X86-NEXT: retl
;
; X64-LABEL: combine_permd_insertion_as_broadcast_v4i64:

View File

@ -975,8 +975,7 @@ define <16 x float> @combine_vpermi2var_vpermvar_16f32_as_vperm2_zero(<16 x floa
define <8 x i64> @combine_vpermvar_insertion_as_broadcast_v8i64(i64 %a0) {
; X86-LABEL: combine_vpermvar_insertion_as_broadcast_v8i64:
; X86: # %bb.0:
; X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-NEXT: vbroadcastsd %xmm0, %zmm0
; X86-NEXT: vbroadcastsd {{[0-9]+}}(%esp), %zmm0
; X86-NEXT: retl
;
; X64-LABEL: combine_vpermvar_insertion_as_broadcast_v8i64: