1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-02-01 05:01:59 +01:00
llvm-mirror/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-br.mir
Amara Emerson 95442ef696 [AArch64][GlobalISel] Optimize conditional branches followed by unconditional branches
If we have an icmp->brcond->br sequence where the brcond just branches to the
next block jumping over the br, while the br takes the false edge, then we can
modify the conditional branch to jump to the br's target while inverting the
condition of the incoming icmp. This means we can eliminate the br as an
unconditional branch to the fallthrough block.

Differential Revision: https://reviews.llvm.org/D64354

llvm-svn: 365510
2019-07-09 16:05:59 +00:00

82 lines
2.3 KiB
YAML

# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -O0 -run-pass=aarch64-prelegalizer-combiner -global-isel -verify-machineinstrs %s -o - | FileCheck %s
--- |
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
target triple = "arm64-apple-ios5.0.0"
define i32 @foo(i32 %a, i32 %b) {
entry:
%cmp = icmp sgt i32 %a, 0
br i1 %cmp, label %if.then, label %if.end
if.then:
%add = add nsw i32 %b, %a
%add1 = add nsw i32 %a, %b
br label %return
if.end:
%mul = mul nsw i32 %b, %b
%add2 = add nuw nsw i32 %mul, 2
br label %return
return:
%retval.0 = phi i32 [ %add1, %if.then ], [ %add2, %if.end ]
ret i32 %retval.0
}
...
---
name: foo
tracksRegLiveness: true
body: |
; CHECK-LABEL: name: foo
; CHECK: bb.0.entry:
; CHECK: successors: %bb.1(0x40000000), %bb.2(0x40000000)
; CHECK: liveins: $w0, $w1
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sle), [[COPY]](s32), [[C]]
; CHECK: G_BRCOND [[ICMP]](s1), %bb.2
; CHECK: bb.1.if.then:
; CHECK: successors: %bb.3(0x80000000)
; CHECK: %5:_(s32) = nsw G_ADD [[COPY1]], [[COPY]]
; CHECK: %6:_(s32) = nsw G_ADD %5, [[COPY1]]
; CHECK: G_BR %bb.3
; CHECK: bb.2.if.end:
; CHECK: successors: %bb.3(0x80000000)
; CHECK: %7:_(s32) = nsw G_MUL [[COPY1]], [[COPY1]]
; CHECK: %8:_(s32) = nuw nsw G_ADD %7, [[C1]]
; CHECK: bb.3.return:
; CHECK: [[PHI:%[0-9]+]]:_(s32) = G_PHI %6(s32), %bb.1, %8(s32), %bb.2
; CHECK: $w0 = COPY [[PHI]](s32)
; CHECK: RET_ReallyLR implicit $w0
bb.1.entry:
liveins: $w0, $w1
%0:_(s32) = COPY $w0
%1:_(s32) = COPY $w1
%2:_(s32) = G_CONSTANT i32 0
%5:_(s32) = G_CONSTANT i32 2
%3:_(s1) = G_ICMP intpred(sgt), %0(s32), %2
G_BRCOND %3(s1), %bb.2
G_BR %bb.3
bb.2.if.then:
%7:_(s32) = nsw G_ADD %1, %0
%8:_(s32) = nsw G_ADD %7, %1
G_BR %bb.4
bb.3.if.end:
%4:_(s32) = nsw G_MUL %1, %1
%6:_(s32) = nuw nsw G_ADD %4, %5
bb.4.return:
%10:_(s32) = G_PHI %8(s32), %bb.2, %6(s32), %bb.3
$w0 = COPY %10(s32)
RET_ReallyLR implicit $w0
...