mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-25 12:12:47 +01:00
0a0548042f
We can often fold an ADDI into the offset of load/store instructions: (load (addi base, off1), off2) -> (load base, off1+off2) (store val, (addi base, off1), off2) -> (store val, base, off1+off2) This is possible when the off1+off2 continues to fit the 12-bit immediate. We remove the previous restriction where we would never fold the ADDIs if the load/stores had nonzero offsets. We now do the fold the the resulting constant still fits a 12-bit immediate, or if off1 is a variable's address and we know based on that variable's alignment that off1+offs2 won't overflow. Differential Revision: https://reviews.llvm.org/D79690
30 lines
836 B
LLVM
30 lines
836 B
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
|
|
; RUN: | FileCheck %s -check-prefix=RV32I
|
|
|
|
; Check load/store operations on values wider than what is natively supported
|
|
|
|
define i64 @load_i64(i64 *%a) nounwind {
|
|
; RV32I-LABEL: load_i64:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: lw a2, 0(a0)
|
|
; RV32I-NEXT: lw a1, 4(a0)
|
|
; RV32I-NEXT: mv a0, a2
|
|
; RV32I-NEXT: ret
|
|
%1 = load i64, i64* %a
|
|
ret i64 %1
|
|
}
|
|
|
|
@val64 = local_unnamed_addr global i64 2863311530, align 8
|
|
|
|
define i64 @load_i64_global() nounwind {
|
|
; RV32I-LABEL: load_i64_global:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: lui a1, %hi(val64)
|
|
; RV32I-NEXT: lw a0, %lo(val64)(a1)
|
|
; RV32I-NEXT: lw a1, %lo(val64+4)(a1)
|
|
; RV32I-NEXT: ret
|
|
%1 = load i64, i64* @val64
|
|
ret i64 %1
|
|
}
|