1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-21 18:22:53 +01:00

[RISCV] Restrict performANY_EXTENDCombine to prevent an infinite loop.

The sign_extend we insert here can get turned into a zero_extend if
the sign bit is known zero. This can enable a setcc combine that
shrinks compares with zero_extend. This reduces the use count of
the zero_extend allowing other combines to turn it back into an
any_extend.

This restricts the combine to only cases where the result is used
by a CopyToReg. This works for my original motivating case. I
hope the CopyToReg use will prevent any converted extends from
turning back into an any_extend.

Reviewed By: luismarques

Differential Revision: https://reviews.llvm.org/D106754

(cherry picked from commit 54588bcc052e5b08f90e672c33d0c1ad4eda2424)
This commit is contained in:
Craig Topper 2021-07-28 08:57:02 -07:00 committed by Tom Stellard
parent 276fcebbe0
commit 7c9c296915
2 changed files with 72 additions and 0 deletions

View File

@ -5814,6 +5814,13 @@ static SDValue performANY_EXTENDCombine(SDNode *N,
break;
}
// Only handle cases where the result is used by a CopyToReg that likely
// means the value is a liveout of the basic block. This helps prevent
// infinite combine loops like PR51206.
if (none_of(N->uses(),
[](SDNode *User) { return User->getOpcode() == ISD::CopyToReg; }))
return SDValue();
SmallVector<SDNode *, 4> SetCCs;
for (SDNode::use_iterator UI = Src.getNode()->use_begin(),
UE = Src.getNode()->use_end();

View File

@ -0,0 +1,65 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
;RUN: llc < %s -mtriple=riscv64-unknown-linux-gnu -mattr=+m | FileCheck %s
; This test used to cause an infinite loop.
@global = global i8 0, align 1
@global.1 = global i32 0, align 4
@global.2 = global i8 0, align 1
@global.3 = global i32 0, align 4
define signext i32 @wobble() nounwind {
; CHECK-LABEL: wobble:
; CHECK: # %bb.0: # %bb
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; CHECK-NEXT: lui a0, %hi(global)
; CHECK-NEXT: lbu a0, %lo(global)(a0)
; CHECK-NEXT: lui a1, %hi(global.2)
; CHECK-NEXT: lbu a1, %lo(global.2)(a1)
; CHECK-NEXT: addi a0, a0, 1
; CHECK-NEXT: lui a2, %hi(global.1)
; CHECK-NEXT: sw a0, %lo(global.1)(a2)
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: lui a1, 16
; CHECK-NEXT: addiw a1, a1, -1
; CHECK-NEXT: and a1, a0, a1
; CHECK-NEXT: lui a2, 13
; CHECK-NEXT: addiw a2, a2, -819
; CHECK-NEXT: mul a1, a1, a2
; CHECK-NEXT: srli a1, a1, 18
; CHECK-NEXT: lui a2, %hi(global.3)
; CHECK-NEXT: addi a3, zero, 5
; CHECK-NEXT: sw a1, %lo(global.3)(a2)
; CHECK-NEXT: bltu a0, a3, .LBB0_2
; CHECK-NEXT: # %bb.1: # %bb10
; CHECK-NEXT: call quux@plt
; CHECK-NEXT: .LBB0_2: # %bb12
; CHECK-NEXT: mv a0, zero
; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
bb:
%tmp = load i8, i8* @global, align 1
%tmp1 = zext i8 %tmp to i32
%tmp2 = add nuw nsw i32 %tmp1, 1
store i32 %tmp2, i32* @global.1, align 4
%tmp3 = load i8, i8* @global.2, align 1
%tmp4 = zext i8 %tmp3 to i32
%tmp5 = mul nuw nsw i32 %tmp2, %tmp4
%tmp6 = trunc i32 %tmp5 to i16
%tmp7 = udiv i16 %tmp6, 5
%tmp8 = zext i16 %tmp7 to i32
store i32 %tmp8, i32* @global.3, align 4
%tmp9 = icmp ult i32 %tmp5, 5
br i1 %tmp9, label %bb12, label %bb10
bb10: ; preds = %bb
%tmp11 = tail call signext i32 bitcast (i32 (...)* @quux to i32 ()*)()
br label %bb12
bb12: ; preds = %bb10, %bb
ret i32 undef
}
declare signext i32 @quux(...)