1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-24 19:52:54 +01:00
llvm-mirror/test/CodeGen/AArch64/arm64-2011-10-18-LdStOptBug.ll
Hao Liu 6c7f917b92 [AArch64]Select wide immediate offset into [Base+XReg] addressing mode
e.g Currently we'll generate following instructions if the immediate is too wide:
    MOV  X0, WideImmediate
    ADD  X1, BaseReg, X0
    LDR  X2, [X1, 0]

    Using [Base+XReg] addressing mode can save one ADD as following:
    MOV  X0, WideImmediate
    LDR  X2, [BaseReg, X0]

    Differential Revision: http://reviews.llvm.org/D5477

llvm-svn: 219665
2014-10-14 06:50:36 +00:00

32 lines
905 B
LLVM

; RUN: llc < %s -mtriple=arm64-apple-ios | FileCheck %s
; Can't fold the increment by 1<<12 into a post-increment load
; rdar://10301335
@test_data = common global i32 0, align 4
define void @t() nounwind ssp {
; CHECK-LABEL: t:
entry:
br label %for.body
for.body:
; CHECK: for.body
; CHECK: ldr w{{[0-9]+}}, [x{{[0-9]+}}, x{{[0-9]+}}]
; CHECK: add x[[REG:[0-9]+]],
; CHECK: x[[REG]], #1, lsl #12
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%0 = shl nsw i64 %indvars.iv, 12
%add = add nsw i64 %0, 34628173824
%1 = inttoptr i64 %add to i32*
%2 = load volatile i32* %1, align 4096
store volatile i32 %2, i32* @test_data, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, 200
br i1 %exitcond, label %for.end, label %for.body
for.end:
ret void
}