1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-20 11:33:24 +02:00
llvm-mirror/test/CodeGen/ARM64/collect-loh.ll
Tim Northover 2f13163a84 ARM64: initial backend import
This adds a second implementation of the AArch64 architecture to LLVM,
accessible in parallel via the "arm64" triple. The plan over the
coming weeks & months is to merge the two into a single backend,
during which time thorough code review should naturally occur.

Everything will be easier with the target in-tree though, hence this
commit.

llvm-svn: 205090
2014-03-29 10:18:08 +00:00

48 lines
1.5 KiB
LLVM

; RUN: llc -mtriple=arm64-apple-ios -O2 -arm64-collect-loh -arm64-collect-loh-bb-only=false < %s -o - | FileCheck %s
@a = internal unnamed_addr global i32 0, align 4
@b = external global i32
; Function Attrs: noinline nounwind ssp
define void @foo(i32 %t) {
entry:
%tmp = load i32* @a, align 4
%add = add nsw i32 %tmp, %t
store i32 %add, i32* @a, align 4
ret void
}
; Function Attrs: nounwind ssp
; Testcase for <rdar://problem/15438605>, AdrpAdrp reuse is valid only when the first adrp
; dominates the second.
; The first adrp comes from the loading of 'a' and the second the loading of 'b'.
; 'a' is loaded in if.then, 'b' in if.end4, if.then does not dominates if.end4.
; CHECK-LABEL: _test
; CHECK: ret
; CHECK-NOT: .loh AdrpAdrp
define i32 @test(i32 %t) {
entry:
%cmp = icmp sgt i32 %t, 5
br i1 %cmp, label %if.then, label %if.end4
if.then: ; preds = %entry
%tmp = load i32* @a, align 4
%add = add nsw i32 %tmp, %t
%cmp1 = icmp sgt i32 %add, 12
br i1 %cmp1, label %if.then2, label %if.end4
if.then2: ; preds = %if.then
tail call void @foo(i32 %add)
%tmp1 = load i32* @a, align 4
br label %if.end4
if.end4: ; preds = %if.then2, %if.then, %entry
%t.addr.0 = phi i32 [ %tmp1, %if.then2 ], [ %t, %if.then ], [ %t, %entry ]
%tmp2 = load i32* @b, align 4
%add5 = add nsw i32 %tmp2, %t.addr.0
tail call void @foo(i32 %add5)
%tmp3 = load i32* @b, align 4
%add6 = add nsw i32 %tmp3, %t.addr.0
ret i32 %add6
}