1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 11:13:28 +01:00
llvm-mirror/test/CodeGen/AArch64/sadd_sat.ll
David Green e9d7161099 [AArch64] Select saturating Neon instructions
This adds some extra patterns to select AArch64 Neon SQADD, UQADD, SQSUB
and UQSUB from the existing target independent sadd_sat, uadd_sat,
ssub_sat and usub_sat nodes.

It does not attempt to replace the existing int_aarch64_neon_uqadd
intrinsic nodes as they are apparently used for both scalar and vector,
and need to be legal on scalar types for some of the patterns to work.
The int_aarch64_neon_uqadd on scalar would move the two integers into
floating point registers, perform a Neon uqadd and move the value back.
I don't believe this is good idea for uadd_sat to do the same as the
scalar alternative is simpler (an adds with a csinv). For signed it may
be smaller, but I'm not sure about it being better.

So this just adds some extra patterns for the existing vector
instructions, matching on the _sat nodes.

Differential Revision: https://reviews.llvm.org/D69374
2019-10-31 17:28:36 +00:00

96 lines
2.8 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s
declare i4 @llvm.sadd.sat.i4(i4, i4)
declare i8 @llvm.sadd.sat.i8(i8, i8)
declare i16 @llvm.sadd.sat.i16(i16, i16)
declare i32 @llvm.sadd.sat.i32(i32, i32)
declare i64 @llvm.sadd.sat.i64(i64, i64)
declare <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32>, <4 x i32>)
define i32 @func(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: func:
; CHECK: // %bb.0:
; CHECK-NEXT: adds w8, w0, w1
; CHECK-NEXT: mov w9, #2147483647
; CHECK-NEXT: cmp w8, #0 // =0
; CHECK-NEXT: cinv w8, w9, ge
; CHECK-NEXT: adds w9, w0, w1
; CHECK-NEXT: csel w0, w8, w9, vs
; CHECK-NEXT: ret
%tmp = call i32 @llvm.sadd.sat.i32(i32 %x, i32 %y);
ret i32 %tmp;
}
define i64 @func2(i64 %x, i64 %y) nounwind {
; CHECK-LABEL: func2:
; CHECK: // %bb.0:
; CHECK-NEXT: adds x8, x0, x1
; CHECK-NEXT: mov x9, #9223372036854775807
; CHECK-NEXT: cmp x8, #0 // =0
; CHECK-NEXT: cinv x8, x9, ge
; CHECK-NEXT: adds x9, x0, x1
; CHECK-NEXT: csel x0, x8, x9, vs
; CHECK-NEXT: ret
%tmp = call i64 @llvm.sadd.sat.i64(i64 %x, i64 %y);
ret i64 %tmp;
}
define i16 @func16(i16 %x, i16 %y) nounwind {
; CHECK-LABEL: func16:
; CHECK: // %bb.0:
; CHECK-NEXT: sxth w8, w0
; CHECK-NEXT: mov w9, #32767
; CHECK-NEXT: add w8, w8, w1, sxth
; CHECK-NEXT: cmp w8, w9
; CHECK-NEXT: csel w8, w8, w9, lt
; CHECK-NEXT: cmn w8, #8, lsl #12 // =32768
; CHECK-NEXT: mov w9, #-32768
; CHECK-NEXT: csel w0, w8, w9, gt
; CHECK-NEXT: ret
%tmp = call i16 @llvm.sadd.sat.i16(i16 %x, i16 %y);
ret i16 %tmp;
}
define i8 @func8(i8 %x, i8 %y) nounwind {
; CHECK-LABEL: func8:
; CHECK: // %bb.0:
; CHECK-NEXT: sxtb w8, w0
; CHECK-NEXT: add w8, w8, w1, sxtb
; CHECK-NEXT: mov w9, #127
; CHECK-NEXT: cmp w8, #127 // =127
; CHECK-NEXT: csel w8, w8, w9, lt
; CHECK-NEXT: cmn w8, #128 // =128
; CHECK-NEXT: mov w9, #-128
; CHECK-NEXT: csel w0, w8, w9, gt
; CHECK-NEXT: ret
%tmp = call i8 @llvm.sadd.sat.i8(i8 %x, i8 %y);
ret i8 %tmp;
}
define i4 @func3(i4 %x, i4 %y) nounwind {
; CHECK-LABEL: func3:
; CHECK: // %bb.0:
; CHECK-NEXT: lsl w8, w1, #28
; CHECK-NEXT: sbfx w9, w0, #0, #4
; CHECK-NEXT: add w8, w9, w8, asr #28
; CHECK-NEXT: mov w10, #7
; CHECK-NEXT: cmp w8, #7 // =7
; CHECK-NEXT: csel w8, w8, w10, lt
; CHECK-NEXT: cmn w8, #8 // =8
; CHECK-NEXT: mov w9, #-8
; CHECK-NEXT: csel w0, w8, w9, gt
; CHECK-NEXT: ret
%tmp = call i4 @llvm.sadd.sat.i4(i4 %x, i4 %y);
ret i4 %tmp;
}
define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) nounwind {
; CHECK-LABEL: vec:
; CHECK: // %bb.0:
; CHECK-NEXT: sqadd v0.4s, v0.4s, v1.4s
; CHECK-NEXT: ret
%tmp = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %x, <4 x i32> %y);
ret <4 x i32> %tmp;
}