From 3f3acbc661ffe6b3148499c6a52e97bd9be0535c Mon Sep 17 00:00:00 2001 From: Max Kazantsev Date: Thu, 4 Mar 2021 15:41:22 +0700 Subject: [PATCH] [X86][CodeGenPrepare] Try to reuse IV's incremented value instead of adding the offset, part 2 This patch enables the case where we do not completely eliminate offset. Supposedly in this case we reduce live range overlap that never harms, but since there are doubts this is true, this goes as a separate change. Differential Revision: https://reviews.llvm.org/D96399 Reviewed By: reames --- lib/CodeGen/CodeGenPrepare.cpp | 6 ++++-- test/CodeGen/X86/2020_12_02_decrementing_loop.ll | 6 ++---- test/CodeGen/X86/overflowing-iv.ll | 4 ++-- test/CodeGen/X86/usub_inc_iv.ll | 4 ++-- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/lib/CodeGen/CodeGenPrepare.cpp b/lib/CodeGen/CodeGenPrepare.cpp index ecf14b1653c..5d6c8ec0569 100644 --- a/lib/CodeGen/CodeGenPrepare.cpp +++ b/lib/CodeGen/CodeGenPrepare.cpp @@ -3884,13 +3884,15 @@ bool AddressingModeMatcher::matchScaledValue(Value *ScaleReg, int64_t Scale, // In this case, we may reuse the IV increment instead of the IV Phi to // achieve the following advantages: // 1. If IV step matches the offset, we will have no need in the offset; + // 2. Even if they don't match, we will reduce the overlap of living IV + // and IV increment, that will potentially lead to better register + // assignment. if (AddrMode.BaseOffs) { if (auto IVStep = GetConstantStep(ScaleReg)) { Instruction *IVInc = IVStep->first; APInt Step = IVStep->second; APInt Offset = Step * AddrMode.Scale; - if (Offset.isSignedIntN(64) && TestAddrMode.BaseOffs == Offset && - DT.dominates(IVInc, MemoryInst)) { + if (Offset.isSignedIntN(64) && DT.dominates(IVInc, MemoryInst)) { TestAddrMode.InBounds = false; TestAddrMode.ScaledReg = IVInc; TestAddrMode.BaseOffs -= Offset.getLimitedValue(); diff --git a/test/CodeGen/X86/2020_12_02_decrementing_loop.ll b/test/CodeGen/X86/2020_12_02_decrementing_loop.ll index 900e12d71d6..c004523f19d 100644 --- a/test/CodeGen/X86/2020_12_02_decrementing_loop.ll +++ b/test/CodeGen/X86/2020_12_02_decrementing_loop.ll @@ -44,16 +44,14 @@ failure: ; preds = %backedge define i32 @test_01a(i32* %p, i64 %len, i32 %x) { ; CHECK-LABEL: test_01a: ; CHECK: ## %bb.0: ## %entry -; CHECK-NEXT: movq %rsi, %rax ; CHECK-NEXT: .p2align 4, 0x90 ; CHECK-NEXT: LBB1_1: ## %loop ; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: subq $1, %rax +; CHECK-NEXT: subq $1, %rsi ; CHECK-NEXT: jb LBB1_4 ; CHECK-NEXT: ## %bb.2: ## %backedge ; CHECK-NEXT: ## in Loop: Header=BB1_1 Depth=1 -; CHECK-NEXT: cmpl %edx, -28(%rdi,%rsi,4) -; CHECK-NEXT: movq %rax, %rsi +; CHECK-NEXT: cmpl %edx, -24(%rdi,%rsi,4) ; CHECK-NEXT: jne LBB1_1 ; CHECK-NEXT: ## %bb.3: ## %failure ; CHECK-NEXT: ud2 diff --git a/test/CodeGen/X86/overflowing-iv.ll b/test/CodeGen/X86/overflowing-iv.ll index 1d5f3c2034b..35234470c85 100644 --- a/test/CodeGen/X86/overflowing-iv.ll +++ b/test/CodeGen/X86/overflowing-iv.ll @@ -12,10 +12,10 @@ define i32 @test_01(i32* %p, i64 %len, i32 %x) { ; CHECK-NEXT: [[COND_1:%.*]] = icmp eq i64 [[IV]], [[LEN:%.*]] ; CHECK-NEXT: br i1 [[COND_1]], label [[EXIT:%.*]], label [[BACKEDGE]] ; CHECK: backedge: -; CHECK-NEXT: [[SUNKADDR:%.*]] = mul i64 [[IV]], 4 +; CHECK-NEXT: [[SUNKADDR:%.*]] = mul i64 [[IV_NEXT]], 4 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[P:%.*]] to i8* ; CHECK-NEXT: [[SUNKADDR1:%.*]] = getelementptr i8, i8* [[TMP0]], i64 [[SUNKADDR]] -; CHECK-NEXT: [[SUNKADDR2:%.*]] = getelementptr i8, i8* [[SUNKADDR1]], i64 -4 +; CHECK-NEXT: [[SUNKADDR2:%.*]] = getelementptr i8, i8* [[SUNKADDR1]], i64 -8 ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[SUNKADDR2]] to i32* ; CHECK-NEXT: [[LOADED:%.*]] = load atomic i32, i32* [[TMP1]] unordered, align 4 ; CHECK-NEXT: [[COND_2:%.*]] = icmp eq i32 [[LOADED]], [[X:%.*]] diff --git a/test/CodeGen/X86/usub_inc_iv.ll b/test/CodeGen/X86/usub_inc_iv.ll index 7744319f412..a3097e9314d 100644 --- a/test/CodeGen/X86/usub_inc_iv.ll +++ b/test/CodeGen/X86/usub_inc_iv.ll @@ -59,10 +59,10 @@ define i32 @test_01a(i32* %p, i64 %len, i32 %x) { ; CHECK-NEXT: [[OV:%.*]] = extractvalue { i64, i1 } [[TMP0]], 1 ; CHECK-NEXT: br i1 [[OV]], label [[EXIT:%.*]], label [[BACKEDGE]] ; CHECK: backedge: -; CHECK-NEXT: [[SUNKADDR:%.*]] = mul i64 [[IV]], 4 +; CHECK-NEXT: [[SUNKADDR:%.*]] = mul i64 [[MATH]], 4 ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[P:%.*]] to i8* ; CHECK-NEXT: [[SUNKADDR1:%.*]] = getelementptr i8, i8* [[TMP1]], i64 [[SUNKADDR]] -; CHECK-NEXT: [[SUNKADDR2:%.*]] = getelementptr i8, i8* [[SUNKADDR1]], i64 -28 +; CHECK-NEXT: [[SUNKADDR2:%.*]] = getelementptr i8, i8* [[SUNKADDR1]], i64 -24 ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[SUNKADDR2]] to i32* ; CHECK-NEXT: [[LOADED:%.*]] = load atomic i32, i32* [[TMP2]] unordered, align 4 ; CHECK-NEXT: [[COND_2:%.*]] = icmp eq i32 [[LOADED]], [[X:%.*]]