1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 10:42:39 +01:00
llvm-mirror/test/CodeGen/ARM/pr34045.ll
Roger Ferrer Ibanez 9a8d6a6c92 [ARM] Use ADDCARRY / SUBCARRY
This is a preparatory step for D34515.

This change:
 - makes nodes ISD::ADDCARRY and ISD::SUBCARRY legal for i32
 - lowering is done by first converting the boolean value into the carry flag
   using (_, C) ← (ARMISD::ADDC R, -1) and converted back to an integer value
   using (R, _) ← (ARMISD::ADDE 0, 0, C). An ARMISD::ADDE between the two
   operations does the actual addition.
 - for subtraction, given that ISD::SUBCARRY second result is actually a
   borrow, we need to invert the value of the second operand and result before
   and after using ARMISD::SUBE. We need to invert the carry result of
   ARMISD::SUBE to preserve the semantics.
 - given that the generic combiner may lower ISD::ADDCARRY and
   ISD::SUBCARRYinto ISD::UADDO and ISD::USUBO we need to update their lowering
   as well otherwise i64 operations now would require branches. This implies
   updating the corresponding test for unsigned.
 - add new combiner to remove the redundant conversions from/to carry flags
   to/from boolean values (ARMISD::ADDC (ARMISD::ADDE 0, 0, C), -1) → C
 - fixes PR34045
 - fixes PR34564
 - fixes PR35103

Differential Revision: https://reviews.llvm.org/D35192

llvm-svn: 320355
2017-12-11 12:13:45 +00:00

54 lines
1.8 KiB
LLVM

; RUN: llc < %s -mtriple thumbv7 | FileCheck %s
; ModuleID = 'bugpoint-reduced-simplified.bc'
define hidden void @bn_mul_comba8(i32* nocapture %r, i32* nocapture readonly %a, i32* nocapture readonly %b) local_unnamed_addr {
entry:
; This test is actually checking that no cycle is introduced but at least we
; want to see a couple of umull and one umlal in the output
; CHECK: umull
; CHECK: umull
; CHECK: umlal
%0 = load i32, i32* %a, align 4
%conv = zext i32 %0 to i64
%1 = load i32, i32* %b, align 4
%conv2 = zext i32 %1 to i64
%mul = mul nuw i64 %conv2, %conv
%shr = lshr i64 %mul, 32
%2 = load i32, i32* %a, align 4
%conv13 = zext i32 %2 to i64
%3 = load i32, i32* undef, align 4
%conv15 = zext i32 %3 to i64
%mul16 = mul nuw i64 %conv15, %conv13
%add18 = add i64 %mul16, %shr
%shr20 = lshr i64 %add18, 32
%conv21 = trunc i64 %shr20 to i32
%4 = load i32, i32* undef, align 4
%conv34 = zext i32 %4 to i64
%5 = load i32, i32* %b, align 4
%conv36 = zext i32 %5 to i64
%mul37 = mul nuw i64 %conv36, %conv34
%conv38 = and i64 %add18, 4294967295
%add39 = add i64 %mul37, %conv38
%shr41 = lshr i64 %add39, 32
%conv42 = trunc i64 %shr41 to i32
%add43 = add i32 %conv42, %conv21
%cmp44 = icmp ult i32 %add43, %conv42
%c1.1 = zext i1 %cmp44 to i32
%add65 = add i32 0, %c1.1
%add86 = add i32 %add65, 0
%add107 = add i32 %add86, 0
%conv124 = zext i32 %add107 to i64
%add125 = add i64 0, %conv124
%conv145 = and i64 %add125, 4294967295
%add146 = add i64 %conv145, 0
%conv166 = and i64 %add146, 4294967295
%add167 = add i64 %conv166, 0
%conv187 = and i64 %add167, 4294967295
%add188 = add i64 %conv187, 0
%conv189 = trunc i64 %add188 to i32
%arrayidx200 = getelementptr inbounds i32, i32* %r, i32 3
store i32 %conv189, i32* %arrayidx200, align 4
ret void
}