1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-24 03:33:20 +01:00
llvm-mirror/test/CodeGen/AArch64/sadd_sat_plus.ll
David Green e2e0a9d886 [Codegen] Alter the default promotion for saturating adds and subs
The default promotion for the add_sat/sub_sat nodes currently does:
    ANY_EXTEND iN to iM
    SHL by M-N
    [US][ADD|SUB]SAT
    L/ASHR by M-N

If the promoted add_sat or sub_sat node is not legal, this can produce code
that effectively does a lot of shifting (and requiring large constants to be
materialised) just to use the overflow flag. It is simpler to just do the
saturation manually, using the higher bitwidth addition and a min/max against
the saturating bounds. That is what this patch attempts to do.

Differential Revision: https://reviews.llvm.org/D68926

llvm-svn: 375211
2019-10-18 09:47:48 +00:00

95 lines
2.7 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s
declare i4 @llvm.sadd.sat.i4(i4, i4)
declare i8 @llvm.sadd.sat.i8(i8, i8)
declare i16 @llvm.sadd.sat.i16(i16, i16)
declare i32 @llvm.sadd.sat.i32(i32, i32)
declare i64 @llvm.sadd.sat.i64(i64, i64)
define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind {
; CHECK-LABEL: func32:
; CHECK: // %bb.0:
; CHECK-NEXT: mul w8, w1, w2
; CHECK-NEXT: adds w10, w0, w8
; CHECK-NEXT: mov w9, #2147483647
; CHECK-NEXT: cmp w10, #0 // =0
; CHECK-NEXT: cinv w9, w9, ge
; CHECK-NEXT: adds w8, w0, w8
; CHECK-NEXT: csel w0, w9, w8, vs
; CHECK-NEXT: ret
%a = mul i32 %y, %z
%tmp = call i32 @llvm.sadd.sat.i32(i32 %x, i32 %a)
ret i32 %tmp
}
define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
; CHECK-LABEL: func64:
; CHECK: // %bb.0:
; CHECK-NEXT: adds x8, x0, x2
; CHECK-NEXT: mov x9, #9223372036854775807
; CHECK-NEXT: cmp x8, #0 // =0
; CHECK-NEXT: cinv x8, x9, ge
; CHECK-NEXT: adds x9, x0, x2
; CHECK-NEXT: csel x0, x8, x9, vs
; CHECK-NEXT: ret
%a = mul i64 %y, %z
%tmp = call i64 @llvm.sadd.sat.i64(i64 %x, i64 %z)
ret i64 %tmp
}
define i16 @func16(i16 %x, i16 %y, i16 %z) nounwind {
; CHECK-LABEL: func16:
; CHECK: // %bb.0:
; CHECK-NEXT: sxth w8, w0
; CHECK-NEXT: mul w9, w1, w2
; CHECK-NEXT: mov w10, #32767
; CHECK-NEXT: add w8, w8, w9, sxth
; CHECK-NEXT: cmp w8, w10
; CHECK-NEXT: csel w8, w8, w10, lt
; CHECK-NEXT: cmn w8, #8, lsl #12 // =32768
; CHECK-NEXT: mov w9, #-32768
; CHECK-NEXT: csel w0, w8, w9, gt
; CHECK-NEXT: ret
%a = mul i16 %y, %z
%tmp = call i16 @llvm.sadd.sat.i16(i16 %x, i16 %a)
ret i16 %tmp
}
define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
; CHECK-LABEL: func8:
; CHECK: // %bb.0:
; CHECK-NEXT: sxtb w8, w0
; CHECK-NEXT: mul w9, w1, w2
; CHECK-NEXT: add w8, w8, w9, sxtb
; CHECK-NEXT: mov w10, #127
; CHECK-NEXT: cmp w8, #127 // =127
; CHECK-NEXT: csel w8, w8, w10, lt
; CHECK-NEXT: cmn w8, #128 // =128
; CHECK-NEXT: mov w9, #-128
; CHECK-NEXT: csel w0, w8, w9, gt
; CHECK-NEXT: ret
%a = mul i8 %y, %z
%tmp = call i8 @llvm.sadd.sat.i8(i8 %x, i8 %a)
ret i8 %tmp
}
define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
; CHECK-LABEL: func4:
; CHECK: // %bb.0:
; CHECK-NEXT: mul w9, w1, w2
; CHECK-NEXT: sbfx w8, w0, #0, #4
; CHECK-NEXT: lsl w9, w9, #28
; CHECK-NEXT: add w8, w8, w9, asr #28
; CHECK-NEXT: mov w10, #7
; CHECK-NEXT: cmp w8, #7 // =7
; CHECK-NEXT: csel w8, w8, w10, lt
; CHECK-NEXT: cmn w8, #8 // =8
; CHECK-NEXT: mov w9, #-8
; CHECK-NEXT: csel w0, w8, w9, gt
; CHECK-NEXT: ret
%a = mul i4 %y, %z
%tmp = call i4 @llvm.sadd.sat.i4(i4 %x, i4 %a)
ret i4 %tmp
}