1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-18 18:42:46 +02:00
llvm-mirror/test/CodeGen/RISCV/rv64-large-stack.ll
Craig Topper a2254d3fcc [RISCV] Teach RISCVMatInt about cases where it can use LUI+SLLI to replace LUI+ADDI+SLLI for large constants.
If we need to shift left anyway we might be able to take advantage
of LUI implicitly shifting its immediate left by 12 to cover part
of the shift. This allows us to use more bits of the LUI immediate
to avoid an ADDI.

isDesirableToCommuteWithShift now considers compressed instruction
opportunities when deciding if commuting should be allowed.

I believe this is the same or similar to one of the optimizations
from D79492.

Reviewed By: luismarques, arcbbb

Differential Revision: https://reviews.llvm.org/D105417
2021-07-20 09:22:06 -07:00

33 lines
1.1 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s | FileCheck %s
;
; The test case check that RV64 could handle the stack adjustment offset exceed
; 32-bit.
define void @foo() nounwind {
; CHECK-LABEL: foo:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -2032
; CHECK-NEXT: sd ra, 2024(sp) # 8-byte Folded Spill
; CHECK-NEXT: lui a0, 390625
; CHECK-NEXT: slli a0, a0, 1
; CHECK-NEXT: addi a0, a0, -2000
; CHECK-NEXT: sub sp, sp, a0
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: call baz@plt
; CHECK-NEXT: lui a0, 390625
; CHECK-NEXT: slli a0, a0, 1
; CHECK-NEXT: addi a0, a0, -2000
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 2032
; CHECK-NEXT: ret
entry:
%w = alloca [100000000 x { fp128, fp128 }], align 16
%arraydecay = getelementptr inbounds [100000000 x { fp128, fp128 }], [100000000 x { fp128, fp128 }]* %w, i64 0, i64 0
call void @baz({ fp128, fp128 }* nonnull %arraydecay)
ret void
}
declare void @baz({ fp128, fp128 }*)