1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 03:02:36 +01:00

[RISCV] Fix ICE in isDesirableToCommuteWithShift

Summary:
Ana Pazos reported a bug where we were not checking that an APInt would
fit into 64-bits before calling `getSExtValue()`. This caused asserts when
compiling large constants, such as i128s, as happens when compiling compiler-rt.

This patch adds a testcase and makes the callback less error-prone.

Reviewers: apazos, asb, luismarques

Reviewed By: luismarques

Subscribers: hiraditya, rbar, johnrusso, simoncook, sabuasal, niosHD, kito-cheng, shiva0217, jrtc27, MaskRay, zzheng, edward-jones, rogfer01, MartinMosbeck, brucehoult, the_o, rkruppe, PkmX, jocewei, psnobl, benna, Jim, s.egerton, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D66081

llvm-svn: 368572
This commit is contained in:
Sam Elliott 2019-08-12 13:51:00 +00:00
parent 2430b215ca
commit 8662789349
2 changed files with 44 additions and 2 deletions

View File

@ -1031,12 +1031,14 @@ bool RISCVTargetLowering::isDesirableToCommuteWithShift(
// We can materialise `c1 << c2` into an add immediate, so it's "free",
// and the combine should happen, to potentially allow further combines
// later.
if (isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
if (ShiftedC1Int.getMinSignedBits() <= 64 &&
isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
return true;
// We can materialise `c1` in an add immediate, so it's "free", and the
// combine should be prevented.
if (isLegalAddImmediate(C1Int.getSExtValue()))
if (C1Int.getMinSignedBits() <= 64 &&
isLegalAddImmediate(C1Int.getSExtValue()))
return false;
// Neither constant will fit into an immediate, so find materialisation

View File

@ -91,3 +91,43 @@ define signext i24 @add_non_machine_type(i24 signext %a) nounwind {
%2 = shl i24 %1, 12
ret i24 %2
}
define i128 @add_wide_operand(i128 %a) nounwind {
; RV32I-LABEL: add_wide_operand:
; RV32I: # %bb.0:
; RV32I-NEXT: lw a2, 0(a1)
; RV32I-NEXT: srli a3, a2, 29
; RV32I-NEXT: lw a4, 4(a1)
; RV32I-NEXT: slli a5, a4, 3
; RV32I-NEXT: or a6, a5, a3
; RV32I-NEXT: srli a4, a4, 29
; RV32I-NEXT: lw a5, 8(a1)
; RV32I-NEXT: slli a3, a5, 3
; RV32I-NEXT: or a3, a3, a4
; RV32I-NEXT: slli a2, a2, 3
; RV32I-NEXT: sw a2, 0(a0)
; RV32I-NEXT: sw a3, 8(a0)
; RV32I-NEXT: sw a6, 4(a0)
; RV32I-NEXT: srli a2, a5, 29
; RV32I-NEXT: lw a1, 12(a1)
; RV32I-NEXT: slli a1, a1, 3
; RV32I-NEXT: or a1, a1, a2
; RV32I-NEXT: lui a2, 128
; RV32I-NEXT: add a1, a1, a2
; RV32I-NEXT: sw a1, 12(a0)
; RV32I-NEXT: ret
;
; RV64I-LABEL: add_wide_operand:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a1, 3
; RV64I-NEXT: srli a2, a0, 61
; RV64I-NEXT: or a1, a1, a2
; RV64I-NEXT: addi a2, zero, 1
; RV64I-NEXT: slli a2, a2, 51
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: slli a0, a0, 3
; RV64I-NEXT: ret
%1 = add i128 %a, 5192296858534827628530496329220096
%2 = shl i128 %1, 3
ret i128 %2
}