1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-25 04:02:41 +01:00
llvm-mirror/test/CodeGen/AArch64/arm64-addr-mode-folding.ll
Tim Northover ca0f4dc4f0 AArch64/ARM64: move ARM64 into AArch64's place
This commit starts with a "git mv ARM64 AArch64" and continues out
from there, renaming the C++ classes, intrinsics, and other
target-local objects for consistency.

"ARM64" test directories are also moved, and tests that began their
life in ARM64 use an arm64 triple, those from AArch64 use an aarch64
triple. Both should be equivalent though.

This finishes the AArch64 merge, and everyone should feel free to
continue committing as normal now.

llvm-svn: 209577
2014-05-24 12:50:23 +00:00

172 lines
5.8 KiB
LLVM

; RUN: llc -O3 -mtriple arm64-apple-ios3 %s -o - | FileCheck %s
; <rdar://problem/13621857>
@block = common global i8* null, align 8
define i32 @fct(i32 %i1, i32 %i2) {
; CHECK: @fct
; Sign extension is used more than once, thus it should not be folded.
; CodeGenPrepare is not sharing sext across uses, thus this is folded because
; of that.
; _CHECK-NOT_: , sxtw]
entry:
%idxprom = sext i32 %i1 to i64
%0 = load i8** @block, align 8
%arrayidx = getelementptr inbounds i8* %0, i64 %idxprom
%1 = load i8* %arrayidx, align 1
%idxprom1 = sext i32 %i2 to i64
%arrayidx2 = getelementptr inbounds i8* %0, i64 %idxprom1
%2 = load i8* %arrayidx2, align 1
%cmp = icmp eq i8 %1, %2
br i1 %cmp, label %if.end, label %if.then
if.then: ; preds = %entry
%cmp7 = icmp ugt i8 %1, %2
%conv8 = zext i1 %cmp7 to i32
br label %return
if.end: ; preds = %entry
%inc = add nsw i32 %i1, 1
%inc9 = add nsw i32 %i2, 1
%idxprom10 = sext i32 %inc to i64
%arrayidx11 = getelementptr inbounds i8* %0, i64 %idxprom10
%3 = load i8* %arrayidx11, align 1
%idxprom12 = sext i32 %inc9 to i64
%arrayidx13 = getelementptr inbounds i8* %0, i64 %idxprom12
%4 = load i8* %arrayidx13, align 1
%cmp16 = icmp eq i8 %3, %4
br i1 %cmp16, label %if.end23, label %if.then18
if.then18: ; preds = %if.end
%cmp21 = icmp ugt i8 %3, %4
%conv22 = zext i1 %cmp21 to i32
br label %return
if.end23: ; preds = %if.end
%inc24 = add nsw i32 %i1, 2
%inc25 = add nsw i32 %i2, 2
%idxprom26 = sext i32 %inc24 to i64
%arrayidx27 = getelementptr inbounds i8* %0, i64 %idxprom26
%5 = load i8* %arrayidx27, align 1
%idxprom28 = sext i32 %inc25 to i64
%arrayidx29 = getelementptr inbounds i8* %0, i64 %idxprom28
%6 = load i8* %arrayidx29, align 1
%cmp32 = icmp eq i8 %5, %6
br i1 %cmp32, label %return, label %if.then34
if.then34: ; preds = %if.end23
%cmp37 = icmp ugt i8 %5, %6
%conv38 = zext i1 %cmp37 to i32
br label %return
return: ; preds = %if.end23, %if.then34, %if.then18, %if.then
%retval.0 = phi i32 [ %conv8, %if.then ], [ %conv22, %if.then18 ], [ %conv38, %if.then34 ], [ 1, %if.end23 ]
ret i32 %retval.0
}
define i32 @fct1(i32 %i1, i32 %i2) optsize {
; CHECK: @fct1
; Addressing are folded when optimizing for code size.
; CHECK: , sxtw]
; CHECK: , sxtw]
entry:
%idxprom = sext i32 %i1 to i64
%0 = load i8** @block, align 8
%arrayidx = getelementptr inbounds i8* %0, i64 %idxprom
%1 = load i8* %arrayidx, align 1
%idxprom1 = sext i32 %i2 to i64
%arrayidx2 = getelementptr inbounds i8* %0, i64 %idxprom1
%2 = load i8* %arrayidx2, align 1
%cmp = icmp eq i8 %1, %2
br i1 %cmp, label %if.end, label %if.then
if.then: ; preds = %entry
%cmp7 = icmp ugt i8 %1, %2
%conv8 = zext i1 %cmp7 to i32
br label %return
if.end: ; preds = %entry
%inc = add nsw i32 %i1, 1
%inc9 = add nsw i32 %i2, 1
%idxprom10 = sext i32 %inc to i64
%arrayidx11 = getelementptr inbounds i8* %0, i64 %idxprom10
%3 = load i8* %arrayidx11, align 1
%idxprom12 = sext i32 %inc9 to i64
%arrayidx13 = getelementptr inbounds i8* %0, i64 %idxprom12
%4 = load i8* %arrayidx13, align 1
%cmp16 = icmp eq i8 %3, %4
br i1 %cmp16, label %if.end23, label %if.then18
if.then18: ; preds = %if.end
%cmp21 = icmp ugt i8 %3, %4
%conv22 = zext i1 %cmp21 to i32
br label %return
if.end23: ; preds = %if.end
%inc24 = add nsw i32 %i1, 2
%inc25 = add nsw i32 %i2, 2
%idxprom26 = sext i32 %inc24 to i64
%arrayidx27 = getelementptr inbounds i8* %0, i64 %idxprom26
%5 = load i8* %arrayidx27, align 1
%idxprom28 = sext i32 %inc25 to i64
%arrayidx29 = getelementptr inbounds i8* %0, i64 %idxprom28
%6 = load i8* %arrayidx29, align 1
%cmp32 = icmp eq i8 %5, %6
br i1 %cmp32, label %return, label %if.then34
if.then34: ; preds = %if.end23
%cmp37 = icmp ugt i8 %5, %6
%conv38 = zext i1 %cmp37 to i32
br label %return
return: ; preds = %if.end23, %if.then34, %if.then18, %if.then
%retval.0 = phi i32 [ %conv8, %if.then ], [ %conv22, %if.then18 ], [ %conv38, %if.then34 ], [ 1, %if.end23 ]
ret i32 %retval.0
}
; CHECK: @test
; CHECK-NOT: , uxtw #2]
define i32 @test(i32* %array, i8 zeroext %c, i32 %arg) {
entry:
%conv = zext i8 %c to i32
%add = sub i32 0, %arg
%tobool = icmp eq i32 %conv, %add
br i1 %tobool, label %if.end, label %if.then
if.then: ; preds = %entry
%idxprom = zext i8 %c to i64
%arrayidx = getelementptr inbounds i32* %array, i64 %idxprom
%0 = load volatile i32* %arrayidx, align 4
%1 = load volatile i32* %arrayidx, align 4
%add3 = add nsw i32 %1, %0
br label %if.end
if.end: ; preds = %entry, %if.then
%res.0 = phi i32 [ %add3, %if.then ], [ 0, %entry ]
ret i32 %res.0
}
; CHECK: @test2
; CHECK: , uxtw #2]
; CHECK: , uxtw #2]
define i32 @test2(i32* %array, i8 zeroext %c, i32 %arg) optsize {
entry:
%conv = zext i8 %c to i32
%add = sub i32 0, %arg
%tobool = icmp eq i32 %conv, %add
br i1 %tobool, label %if.end, label %if.then
if.then: ; preds = %entry
%idxprom = zext i8 %c to i64
%arrayidx = getelementptr inbounds i32* %array, i64 %idxprom
%0 = load volatile i32* %arrayidx, align 4
%1 = load volatile i32* %arrayidx, align 4
%add3 = add nsw i32 %1, %0
br label %if.end
if.end: ; preds = %entry, %if.then
%res.0 = phi i32 [ %add3, %if.then ], [ 0, %entry ]
ret i32 %res.0
}