mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 18:54:02 +01:00
[RISCV] Improve 64-bit integer constant materialization for more cases.
For positive constants we try shifting left to remove leading zeros and fill the bottom bits with 1s. We then materialize that constant shift it right. This patch adds a new strategy to try filling the bottom bits with zeros instead. This catches some additional cases.
This commit is contained in:
parent
8dc5ea9ce9
commit
6e55b005fb
@ -94,6 +94,16 @@ InstSeq generateInstSeq(int64_t Val, bool IsRV64) {
|
|||||||
generateInstSeqImpl(Val, IsRV64, TmpSeq);
|
generateInstSeqImpl(Val, IsRV64, TmpSeq);
|
||||||
TmpSeq.push_back(RISCVMatInt::Inst(RISCV::SRLI, ShiftAmount));
|
TmpSeq.push_back(RISCVMatInt::Inst(RISCV::SRLI, ShiftAmount));
|
||||||
|
|
||||||
|
// Keep the new sequence if it is an improvement.
|
||||||
|
if (TmpSeq.size() < Res.size())
|
||||||
|
Res = TmpSeq;
|
||||||
|
|
||||||
|
// Some cases can benefit from filling the lower bits with zeros instead.
|
||||||
|
Val &= maskTrailingZeros<uint64_t>(ShiftAmount);
|
||||||
|
TmpSeq.clear();
|
||||||
|
generateInstSeqImpl(Val, IsRV64, TmpSeq);
|
||||||
|
TmpSeq.push_back(RISCVMatInt::Inst(RISCV::SRLI, ShiftAmount));
|
||||||
|
|
||||||
// Keep the new sequence if it is an improvement.
|
// Keep the new sequence if it is an improvement.
|
||||||
if (TmpSeq.size() < Res.size())
|
if (TmpSeq.size() < Res.size())
|
||||||
Res = TmpSeq;
|
Res = TmpSeq;
|
||||||
|
@ -347,10 +347,9 @@ define i32 @caller_half_on_stack() nounwind {
|
|||||||
; RV64IF: # %bb.0:
|
; RV64IF: # %bb.0:
|
||||||
; RV64IF-NEXT: addi sp, sp, -16
|
; RV64IF-NEXT: addi sp, sp, -16
|
||||||
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
|
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
|
||||||
; RV64IF-NEXT: lui a0, 256
|
; RV64IF-NEXT: addi a0, zero, -183
|
||||||
; RV64IF-NEXT: addiw a0, a0, -11
|
; RV64IF-NEXT: slli a0, a0, 40
|
||||||
; RV64IF-NEXT: slli a0, a0, 12
|
; RV64IF-NEXT: srli t0, a0, 32
|
||||||
; RV64IF-NEXT: addi t0, a0, -1792
|
|
||||||
; RV64IF-NEXT: addi a0, zero, 1
|
; RV64IF-NEXT: addi a0, zero, 1
|
||||||
; RV64IF-NEXT: addi a1, zero, 2
|
; RV64IF-NEXT: addi a1, zero, 2
|
||||||
; RV64IF-NEXT: addi a2, zero, 3
|
; RV64IF-NEXT: addi a2, zero, 3
|
||||||
|
@ -375,8 +375,7 @@ define i64 @imm_right_shifted_lui_1() nounwind {
|
|||||||
;
|
;
|
||||||
; RV64I-LABEL: imm_right_shifted_lui_1:
|
; RV64I-LABEL: imm_right_shifted_lui_1:
|
||||||
; RV64I: # %bb.0:
|
; RV64I: # %bb.0:
|
||||||
; RV64I-NEXT: lui a0, 983072
|
; RV64I-NEXT: lui a0, 983056
|
||||||
; RV64I-NEXT: addiw a0, a0, -1
|
|
||||||
; RV64I-NEXT: srli a0, a0, 16
|
; RV64I-NEXT: srli a0, a0, 16
|
||||||
; RV64I-NEXT: ret
|
; RV64I-NEXT: ret
|
||||||
ret i64 281474976706561 ; 0xFFFF_FFFF_F001
|
ret i64 281474976706561 ; 0xFFFF_FFFF_F001
|
||||||
|
@ -118,8 +118,7 @@ li x5, 0x100004000
|
|||||||
# CHECK-EXPAND-NEXT: addiw t1, t1, 1
|
# CHECK-EXPAND-NEXT: addiw t1, t1, 1
|
||||||
# CHECK-EXPAND-NEXT: slli t1, t1, 32
|
# CHECK-EXPAND-NEXT: slli t1, t1, 32
|
||||||
li x6, 0x100100000000
|
li x6, 0x100100000000
|
||||||
# CHECK-EXPAND: lui t2, 983072
|
# CHECK-EXPAND: lui t2, 983056
|
||||||
# CHECK-EXPAND-NEXT: addiw t2, t2, -1
|
|
||||||
# CHECK-EXPAND-NEXT: srli t2, t2, 16
|
# CHECK-EXPAND-NEXT: srli t2, t2, 16
|
||||||
li x7, 0xFFFFFFFFF001
|
li x7, 0xFFFFFFFFF001
|
||||||
# CHECK-EXPAND: lui s0, 65536
|
# CHECK-EXPAND: lui s0, 65536
|
||||||
|
Loading…
Reference in New Issue
Block a user