1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-25 04:02:41 +01:00
llvm-mirror/test/CodeGen/RISCV/wide-mem.ll
Sameer AbuAsal 103277308c [RISCV] Separate base from offset in lowerGlobalAddress
Summary:
When lowering global address, lower the base as a TargetGlobal first then
 create an SDNode for the offset separately and chain it to the address calculation

 This optimization will create a DAG where the base address of a global access will
 be reused between different access. The offset can later be folded into the immediate
 part of the memory access instruction.

  With this optimization we generate:

    lui a0, %hi(s)
    addi a0, a0, %lo(s) ; shared base address.

    addi a1, zero, 20 ; 2 instructions per access.
    sw a1, 44(a0)

    addi a1, zero, 10
    sw a1, 8(a0)

    addi a1, zero, 30
    sw a1, 80(a0)

    Instead of:

    lui a0, %hi(s+44) ; 3 instructions per access.
    addi a1, zero, 20
    sw a1, %lo(s+44)(a0)

    lui a0, %hi(s+8)
    addi a1, zero, 10
    sw a1, %lo(s+8)(a0)

    lui a0, %hi(s+80)
    addi a1, zero, 30
    sw a1, %lo(s+80)(a0)

    Which will save one instruction per access.

Reviewers: asb, apazos

Reviewed By: asb

Subscribers: rbar, johnrusso, simoncook, jordy.potman.lists, niosHD, kito-cheng, shiva0217, zzheng, edward-jones, mgrang, apazos, asb, llvm-commits

Differential Revision: https://reviews.llvm.org/D46989

llvm-svn: 332641
2018-05-17 18:14:53 +00:00

31 lines
866 B
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefix=RV32I
; Check load/store operations on values wider than what is natively supported
define i64 @load_i64(i64 *%a) nounwind {
; RV32I-LABEL: load_i64:
; RV32I: # %bb.0:
; RV32I-NEXT: lw a2, 0(a0)
; RV32I-NEXT: lw a1, 4(a0)
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: ret
%1 = load i64, i64* %a
ret i64 %1
}
@val64 = local_unnamed_addr global i64 2863311530, align 8
define i64 @load_i64_global() nounwind {
; RV32I-LABEL: load_i64_global:
; RV32I: # %bb.0:
; RV32I-NEXT: lui a1, %hi(val64)
; RV32I-NEXT: lw a0, %lo(val64)(a1)
; RV32I-NEXT: addi a1, a1, %lo(val64)
; RV32I-NEXT: lw a1, 4(a1)
; RV32I-NEXT: ret
%1 = load i64, i64* @val64
ret i64 %1
}