1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-26 04:32:44 +01:00

[DAG] Optimize away degenerate INSERT_VECTOR_ELT nodes.

Summary:
Add missing vector write of vector read reduction, i.e.:

(insert_vector_elt x (extract_vector_elt x idx) idx) to x

Reviewers: spatel, RKSimon, efriedma

Reviewed By: RKSimon

Subscribers: llvm-commits

Differential Revision: https://reviews.llvm.org/D35563

llvm-svn: 308617
This commit is contained in:
Nirav Dave 2017-07-20 13:48:17 +00:00
parent 8812f7a373
commit 8bbb98afeb
5 changed files with 6 additions and 14 deletions

View File

@ -13572,6 +13572,12 @@ SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
EVT VT = InVec.getValueType();
// Remove redundant insertions:
// (insert_vector_elt x (extract_vector_elt x idx) idx) -> x
if (InVal.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
InVec == InVal->getOperand(0) && EltNo == InVal->getOperand(1))
return InVec;
// Check that we know which element is being inserted
if (!isa<ConstantSDNode>(EltNo))
return SDValue();

View File

@ -708,8 +708,6 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
;
; X32-AVX1-LABEL: splatvar_shift_v4i64:
; X32-AVX1: # BB#0:
; X32-AVX1-NEXT: vpextrd $1, %xmm1, %eax
; X32-AVX1-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648]
; X32-AVX1-NEXT: vpsrlq %xmm1, %xmm2, %xmm2
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
@ -724,8 +722,6 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
;
; X32-AVX2-LABEL: splatvar_shift_v4i64:
; X32-AVX2: # BB#0:
; X32-AVX2-NEXT: vpextrd $1, %xmm1, %eax
; X32-AVX2-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
; X32-AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2147483648,0,2147483648,0,2147483648,0,2147483648]
; X32-AVX2-NEXT: vpsrlq %xmm1, %ymm2, %ymm2
; X32-AVX2-NEXT: vpsrlq %xmm1, %ymm0, %ymm0

View File

@ -562,8 +562,6 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
;
; X32-AVX1-LABEL: splatvar_shift_v4i64:
; X32-AVX1: # BB#0:
; X32-AVX1-NEXT: vpextrd $1, %xmm1, %eax
; X32-AVX1-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; X32-AVX1-NEXT: vpsrlq %xmm1, %xmm2, %xmm2
; X32-AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
@ -572,8 +570,6 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
;
; X32-AVX2-LABEL: splatvar_shift_v4i64:
; X32-AVX2: # BB#0:
; X32-AVX2-NEXT: vpextrd $1, %xmm1, %eax
; X32-AVX2-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
; X32-AVX2-NEXT: vpsrlq %xmm1, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
%splat = shufflevector <4 x i64> %b, <4 x i64> undef, <4 x i32> zeroinitializer

View File

@ -506,8 +506,6 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
;
; X32-AVX1-LABEL: splatvar_shift_v4i64:
; X32-AVX1: # BB#0:
; X32-AVX1-NEXT: vpextrd $1, %xmm1, %eax
; X32-AVX1-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; X32-AVX1-NEXT: vpsllq %xmm1, %xmm2, %xmm2
; X32-AVX1-NEXT: vpsllq %xmm1, %xmm0, %xmm0
@ -516,8 +514,6 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
;
; X32-AVX2-LABEL: splatvar_shift_v4i64:
; X32-AVX2: # BB#0:
; X32-AVX2-NEXT: vpextrd $1, %xmm1, %eax
; X32-AVX2-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
; X32-AVX2-NEXT: vpsllq %xmm1, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
%splat = shufflevector <4 x i64> %b, <4 x i64> undef, <4 x i32> zeroinitializer

View File

@ -2735,8 +2735,6 @@ define <2 x i64> @test_v8i64_2_5 (<8 x i64> %v) {
; AVX512F-32-LABEL: test_v8i64_2_5:
; AVX512F-32: # BB#0:
; AVX512F-32-NEXT: vextracti32x4 $1, %zmm0, %xmm1
; AVX512F-32-NEXT: vpextrd $1, %xmm1, %eax
; AVX512F-32-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
; AVX512F-32-NEXT: vextracti32x4 $2, %zmm0, %xmm0
; AVX512F-32-NEXT: vpextrd $2, %xmm0, %eax
; AVX512F-32-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1