1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 20:51:52 +01:00

[RISCV] Add Zbb command lines to uadd/usub/sadd/ssub tests.

The expansions of the saturating intrinsics can make use of
the min(u)/max(u) instructions in Zbb.
This commit is contained in:
Craig Topper 2021-02-18 11:24:04 -08:00
parent 5d1a8a5c59
commit 4fb5ffafaa
8 changed files with 1052 additions and 0 deletions

View File

@ -1,6 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=riscv32 -mattr=+m | FileCheck %s --check-prefix=RV32I
; RUN: llc < %s -mtriple=riscv64 -mattr=+m | FileCheck %s --check-prefix=RV64I
; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+experimental-zbb | FileCheck %s --check-prefix=RV32IZbb
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+experimental-zbb | FileCheck %s --check-prefix=RV64IZbb
declare i4 @llvm.sadd.sat.i4(i4, i4)
declare i8 @llvm.sadd.sat.i8(i8, i8)
@ -43,6 +45,33 @@ define signext i32 @func(i32 signext %x, i32 signext %y) nounwind {
; RV64I-NEXT: .LBB0_4:
; RV64I-NEXT: lui a0, 524288
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: mv a2, a0
; RV32IZbb-NEXT: add a3, a0, a1
; RV32IZbb-NEXT: lui a0, 524288
; RV32IZbb-NEXT: bgez a3, .LBB0_2
; RV32IZbb-NEXT: # %bb.1:
; RV32IZbb-NEXT: addi a0, a0, -1
; RV32IZbb-NEXT: .LBB0_2:
; RV32IZbb-NEXT: slt a2, a3, a2
; RV32IZbb-NEXT: slti a1, a1, 0
; RV32IZbb-NEXT: xor a1, a1, a2
; RV32IZbb-NEXT: bnez a1, .LBB0_4
; RV32IZbb-NEXT: # %bb.3:
; RV32IZbb-NEXT: mv a0, a3
; RV32IZbb-NEXT: .LBB0_4:
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: add a0, a0, a1
; RV64IZbb-NEXT: lui a1, 524288
; RV64IZbb-NEXT: addiw a2, a1, -1
; RV64IZbb-NEXT: min a0, a0, a2
; RV64IZbb-NEXT: max a0, a0, a1
; RV64IZbb-NEXT: ret
%tmp = call i32 @llvm.sadd.sat.i32(i32 %x, i32 %y);
ret i32 %tmp;
}
@ -113,6 +142,72 @@ define i64 @func2(i64 %x, i64 %y) nounwind {
; RV64I-NEXT: mv a0, a3
; RV64I-NEXT: .LBB1_4:
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func2:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: mv a4, a0
; RV32IZbb-NEXT: add a5, a1, a3
; RV32IZbb-NEXT: add a0, a0, a2
; RV32IZbb-NEXT: sltu a2, a0, a4
; RV32IZbb-NEXT: add a2, a5, a2
; RV32IZbb-NEXT: addi a6, zero, -1
; RV32IZbb-NEXT: addi a7, zero, 1
; RV32IZbb-NEXT: addi a4, zero, 1
; RV32IZbb-NEXT: beqz a2, .LBB1_2
; RV32IZbb-NEXT: # %bb.1:
; RV32IZbb-NEXT: slt a4, a6, a2
; RV32IZbb-NEXT: .LBB1_2:
; RV32IZbb-NEXT: addi a5, zero, 1
; RV32IZbb-NEXT: beqz a1, .LBB1_4
; RV32IZbb-NEXT: # %bb.3:
; RV32IZbb-NEXT: slt a5, a6, a1
; RV32IZbb-NEXT: .LBB1_4:
; RV32IZbb-NEXT: xor a1, a5, a4
; RV32IZbb-NEXT: snez a1, a1
; RV32IZbb-NEXT: beqz a3, .LBB1_6
; RV32IZbb-NEXT: # %bb.5:
; RV32IZbb-NEXT: slt a7, a6, a3
; RV32IZbb-NEXT: .LBB1_6:
; RV32IZbb-NEXT: xor a3, a5, a7
; RV32IZbb-NEXT: seqz a3, a3
; RV32IZbb-NEXT: and a3, a3, a1
; RV32IZbb-NEXT: bnez a3, .LBB1_10
; RV32IZbb-NEXT: # %bb.7:
; RV32IZbb-NEXT: lui a1, 524288
; RV32IZbb-NEXT: bltz a2, .LBB1_11
; RV32IZbb-NEXT: .LBB1_8:
; RV32IZbb-NEXT: beqz a3, .LBB1_12
; RV32IZbb-NEXT: .LBB1_9:
; RV32IZbb-NEXT: ret
; RV32IZbb-NEXT: .LBB1_10:
; RV32IZbb-NEXT: srai a0, a2, 31
; RV32IZbb-NEXT: lui a1, 524288
; RV32IZbb-NEXT: bgez a2, .LBB1_8
; RV32IZbb-NEXT: .LBB1_11:
; RV32IZbb-NEXT: addi a1, a1, -1
; RV32IZbb-NEXT: bnez a3, .LBB1_9
; RV32IZbb-NEXT: .LBB1_12:
; RV32IZbb-NEXT: mv a1, a2
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func2:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: mv a2, a0
; RV64IZbb-NEXT: add a3, a0, a1
; RV64IZbb-NEXT: addi a0, zero, -1
; RV64IZbb-NEXT: slli a0, a0, 63
; RV64IZbb-NEXT: bgez a3, .LBB1_2
; RV64IZbb-NEXT: # %bb.1:
; RV64IZbb-NEXT: addi a0, a0, -1
; RV64IZbb-NEXT: .LBB1_2:
; RV64IZbb-NEXT: slt a2, a3, a2
; RV64IZbb-NEXT: slti a1, a1, 0
; RV64IZbb-NEXT: xor a1, a1, a2
; RV64IZbb-NEXT: bnez a1, .LBB1_4
; RV64IZbb-NEXT: # %bb.3:
; RV64IZbb-NEXT: mv a0, a3
; RV64IZbb-NEXT: .LBB1_4:
; RV64IZbb-NEXT: ret
%tmp = call i64 @llvm.sadd.sat.i64(i64 %x, i64 %y);
ret i64 %tmp;
}
@ -155,6 +250,26 @@ define signext i16 @func16(i16 signext %x, i16 signext %y) nounwind {
; RV64I-NEXT: .LBB2_4:
; RV64I-NEXT: lui a0, 1048568
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func16:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: add a0, a0, a1
; RV32IZbb-NEXT: lui a1, 8
; RV32IZbb-NEXT: addi a1, a1, -1
; RV32IZbb-NEXT: min a0, a0, a1
; RV32IZbb-NEXT: lui a1, 1048568
; RV32IZbb-NEXT: max a0, a0, a1
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func16:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: add a0, a0, a1
; RV64IZbb-NEXT: lui a1, 8
; RV64IZbb-NEXT: addiw a1, a1, -1
; RV64IZbb-NEXT: min a0, a0, a1
; RV64IZbb-NEXT: lui a1, 1048568
; RV64IZbb-NEXT: max a0, a0, a1
; RV64IZbb-NEXT: ret
%tmp = call i16 @llvm.sadd.sat.i16(i16 %x, i16 %y);
ret i16 %tmp;
}
@ -195,6 +310,24 @@ define signext i8 @func8(i8 signext %x, i8 signext %y) nounwind {
; RV64I-NEXT: .LBB3_4:
; RV64I-NEXT: addi a0, zero, -128
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func8:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: add a0, a0, a1
; RV32IZbb-NEXT: addi a1, zero, 127
; RV32IZbb-NEXT: min a0, a0, a1
; RV32IZbb-NEXT: addi a1, zero, -128
; RV32IZbb-NEXT: max a0, a0, a1
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func8:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: add a0, a0, a1
; RV64IZbb-NEXT: addi a1, zero, 127
; RV64IZbb-NEXT: min a0, a0, a1
; RV64IZbb-NEXT: addi a1, zero, -128
; RV64IZbb-NEXT: max a0, a0, a1
; RV64IZbb-NEXT: ret
%tmp = call i8 @llvm.sadd.sat.i8(i8 %x, i8 %y);
ret i8 %tmp;
}
@ -235,6 +368,24 @@ define signext i4 @func3(i4 signext %x, i4 signext %y) nounwind {
; RV64I-NEXT: .LBB4_4:
; RV64I-NEXT: addi a0, zero, -8
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func3:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: add a0, a0, a1
; RV32IZbb-NEXT: addi a1, zero, 7
; RV32IZbb-NEXT: min a0, a0, a1
; RV32IZbb-NEXT: addi a1, zero, -8
; RV32IZbb-NEXT: max a0, a0, a1
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func3:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: add a0, a0, a1
; RV64IZbb-NEXT: addi a1, zero, 7
; RV64IZbb-NEXT: min a0, a0, a1
; RV64IZbb-NEXT: addi a1, zero, -8
; RV64IZbb-NEXT: max a0, a0, a1
; RV64IZbb-NEXT: ret
%tmp = call i4 @llvm.sadd.sat.i4(i4 %x, i4 %y);
ret i4 %tmp;
}

View File

@ -1,6 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=riscv32 -mattr=+m | FileCheck %s --check-prefix=RV32I
; RUN: llc < %s -mtriple=riscv64 -mattr=+m | FileCheck %s --check-prefix=RV64I
; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+experimental-zbb | FileCheck %s --check-prefix=RV32IZbb
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+experimental-zbb | FileCheck %s --check-prefix=RV64IZbb
declare i4 @llvm.sadd.sat.i4(i4, i4)
declare i8 @llvm.sadd.sat.i8(i8, i8)
@ -47,6 +49,37 @@ define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind {
; RV64I-NEXT: .LBB0_4:
; RV64I-NEXT: lui a0, 524288
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func32:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: mul a2, a1, a2
; RV32IZbb-NEXT: add a1, a0, a2
; RV32IZbb-NEXT: slt a0, a1, a0
; RV32IZbb-NEXT: slti a2, a2, 0
; RV32IZbb-NEXT: xor a2, a2, a0
; RV32IZbb-NEXT: lui a0, 524288
; RV32IZbb-NEXT: bltz a1, .LBB0_3
; RV32IZbb-NEXT: # %bb.1:
; RV32IZbb-NEXT: beqz a2, .LBB0_4
; RV32IZbb-NEXT: .LBB0_2:
; RV32IZbb-NEXT: ret
; RV32IZbb-NEXT: .LBB0_3:
; RV32IZbb-NEXT: addi a0, a0, -1
; RV32IZbb-NEXT: bnez a2, .LBB0_2
; RV32IZbb-NEXT: .LBB0_4:
; RV32IZbb-NEXT: mv a0, a1
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func32:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: sext.w a0, a0
; RV64IZbb-NEXT: mulw a1, a1, a2
; RV64IZbb-NEXT: add a0, a0, a1
; RV64IZbb-NEXT: lui a1, 524288
; RV64IZbb-NEXT: addiw a2, a1, -1
; RV64IZbb-NEXT: min a0, a0, a2
; RV64IZbb-NEXT: max a0, a0, a1
; RV64IZbb-NEXT: ret
%a = mul i32 %y, %z
%tmp = call i32 @llvm.sadd.sat.i32(i32 %x, i32 %a)
ret i32 %tmp
@ -118,6 +151,72 @@ define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
; RV64I-NEXT: mv a0, a3
; RV64I-NEXT: .LBB1_4:
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func64:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: mv a2, a0
; RV32IZbb-NEXT: add a3, a1, a5
; RV32IZbb-NEXT: add a0, a0, a4
; RV32IZbb-NEXT: sltu a2, a0, a2
; RV32IZbb-NEXT: add a2, a3, a2
; RV32IZbb-NEXT: addi a6, zero, -1
; RV32IZbb-NEXT: addi a7, zero, 1
; RV32IZbb-NEXT: addi a3, zero, 1
; RV32IZbb-NEXT: beqz a2, .LBB1_2
; RV32IZbb-NEXT: # %bb.1:
; RV32IZbb-NEXT: slt a3, a6, a2
; RV32IZbb-NEXT: .LBB1_2:
; RV32IZbb-NEXT: addi a4, zero, 1
; RV32IZbb-NEXT: beqz a1, .LBB1_4
; RV32IZbb-NEXT: # %bb.3:
; RV32IZbb-NEXT: slt a4, a6, a1
; RV32IZbb-NEXT: .LBB1_4:
; RV32IZbb-NEXT: xor a1, a4, a3
; RV32IZbb-NEXT: snez a1, a1
; RV32IZbb-NEXT: beqz a5, .LBB1_6
; RV32IZbb-NEXT: # %bb.5:
; RV32IZbb-NEXT: slt a7, a6, a5
; RV32IZbb-NEXT: .LBB1_6:
; RV32IZbb-NEXT: xor a3, a4, a7
; RV32IZbb-NEXT: seqz a3, a3
; RV32IZbb-NEXT: and a3, a3, a1
; RV32IZbb-NEXT: bnez a3, .LBB1_10
; RV32IZbb-NEXT: # %bb.7:
; RV32IZbb-NEXT: lui a1, 524288
; RV32IZbb-NEXT: bltz a2, .LBB1_11
; RV32IZbb-NEXT: .LBB1_8:
; RV32IZbb-NEXT: beqz a3, .LBB1_12
; RV32IZbb-NEXT: .LBB1_9:
; RV32IZbb-NEXT: ret
; RV32IZbb-NEXT: .LBB1_10:
; RV32IZbb-NEXT: srai a0, a2, 31
; RV32IZbb-NEXT: lui a1, 524288
; RV32IZbb-NEXT: bgez a2, .LBB1_8
; RV32IZbb-NEXT: .LBB1_11:
; RV32IZbb-NEXT: addi a1, a1, -1
; RV32IZbb-NEXT: bnez a3, .LBB1_9
; RV32IZbb-NEXT: .LBB1_12:
; RV32IZbb-NEXT: mv a1, a2
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func64:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: mv a1, a0
; RV64IZbb-NEXT: add a3, a0, a2
; RV64IZbb-NEXT: addi a0, zero, -1
; RV64IZbb-NEXT: slli a0, a0, 63
; RV64IZbb-NEXT: bgez a3, .LBB1_2
; RV64IZbb-NEXT: # %bb.1:
; RV64IZbb-NEXT: addi a0, a0, -1
; RV64IZbb-NEXT: .LBB1_2:
; RV64IZbb-NEXT: slt a1, a3, a1
; RV64IZbb-NEXT: slti a2, a2, 0
; RV64IZbb-NEXT: xor a1, a2, a1
; RV64IZbb-NEXT: bnez a1, .LBB1_4
; RV64IZbb-NEXT: # %bb.3:
; RV64IZbb-NEXT: mv a0, a3
; RV64IZbb-NEXT: .LBB1_4:
; RV64IZbb-NEXT: ret
%a = mul i64 %y, %z
%tmp = call i64 @llvm.sadd.sat.i64(i64 %x, i64 %z)
ret i64 %tmp
@ -171,6 +270,32 @@ define i16 @func16(i16 %x, i16 %y, i16 %z) nounwind {
; RV64I-NEXT: .LBB2_4:
; RV64I-NEXT: lui a0, 1048568
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func16:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: sext.h a0, a0
; RV32IZbb-NEXT: mul a1, a1, a2
; RV32IZbb-NEXT: sext.h a1, a1
; RV32IZbb-NEXT: add a0, a0, a1
; RV32IZbb-NEXT: lui a1, 8
; RV32IZbb-NEXT: addi a1, a1, -1
; RV32IZbb-NEXT: min a0, a0, a1
; RV32IZbb-NEXT: lui a1, 1048568
; RV32IZbb-NEXT: max a0, a0, a1
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func16:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: sext.h a0, a0
; RV64IZbb-NEXT: mul a1, a1, a2
; RV64IZbb-NEXT: sext.h a1, a1
; RV64IZbb-NEXT: add a0, a0, a1
; RV64IZbb-NEXT: lui a1, 8
; RV64IZbb-NEXT: addiw a1, a1, -1
; RV64IZbb-NEXT: min a0, a0, a1
; RV64IZbb-NEXT: lui a1, 1048568
; RV64IZbb-NEXT: max a0, a0, a1
; RV64IZbb-NEXT: ret
%a = mul i16 %y, %z
%tmp = call i16 @llvm.sadd.sat.i16(i16 %x, i16 %a)
ret i16 %tmp
@ -222,6 +347,30 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
; RV64I-NEXT: .LBB3_4:
; RV64I-NEXT: addi a0, zero, -128
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func8:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: sext.b a0, a0
; RV32IZbb-NEXT: mul a1, a1, a2
; RV32IZbb-NEXT: sext.b a1, a1
; RV32IZbb-NEXT: add a0, a0, a1
; RV32IZbb-NEXT: addi a1, zero, 127
; RV32IZbb-NEXT: min a0, a0, a1
; RV32IZbb-NEXT: addi a1, zero, -128
; RV32IZbb-NEXT: max a0, a0, a1
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func8:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: sext.b a0, a0
; RV64IZbb-NEXT: mul a1, a1, a2
; RV64IZbb-NEXT: sext.b a1, a1
; RV64IZbb-NEXT: add a0, a0, a1
; RV64IZbb-NEXT: addi a1, zero, 127
; RV64IZbb-NEXT: min a0, a0, a1
; RV64IZbb-NEXT: addi a1, zero, -128
; RV64IZbb-NEXT: max a0, a0, a1
; RV64IZbb-NEXT: ret
%a = mul i8 %y, %z
%tmp = call i8 @llvm.sadd.sat.i8(i8 %x, i8 %a)
ret i8 %tmp
@ -273,6 +422,34 @@ define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
; RV64I-NEXT: .LBB4_4:
; RV64I-NEXT: addi a0, zero, -8
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func4:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: slli a0, a0, 28
; RV32IZbb-NEXT: srai a0, a0, 28
; RV32IZbb-NEXT: mul a1, a1, a2
; RV32IZbb-NEXT: slli a1, a1, 28
; RV32IZbb-NEXT: srai a1, a1, 28
; RV32IZbb-NEXT: add a0, a0, a1
; RV32IZbb-NEXT: addi a1, zero, 7
; RV32IZbb-NEXT: min a0, a0, a1
; RV32IZbb-NEXT: addi a1, zero, -8
; RV32IZbb-NEXT: max a0, a0, a1
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func4:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: slli a0, a0, 60
; RV64IZbb-NEXT: srai a0, a0, 60
; RV64IZbb-NEXT: mul a1, a1, a2
; RV64IZbb-NEXT: slli a1, a1, 60
; RV64IZbb-NEXT: srai a1, a1, 60
; RV64IZbb-NEXT: add a0, a0, a1
; RV64IZbb-NEXT: addi a1, zero, 7
; RV64IZbb-NEXT: min a0, a0, a1
; RV64IZbb-NEXT: addi a1, zero, -8
; RV64IZbb-NEXT: max a0, a0, a1
; RV64IZbb-NEXT: ret
%a = mul i4 %y, %z
%tmp = call i4 @llvm.sadd.sat.i4(i4 %x, i4 %a)
ret i4 %tmp

View File

@ -1,6 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=riscv32 -mattr=+m | FileCheck %s --check-prefix=RV32I
; RUN: llc < %s -mtriple=riscv64 -mattr=+m | FileCheck %s --check-prefix=RV64I
; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+experimental-zbb | FileCheck %s --check-prefix=RV32IZbb
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+experimental-zbb | FileCheck %s --check-prefix=RV64IZbb
declare i4 @llvm.ssub.sat.i4(i4, i4)
declare i8 @llvm.ssub.sat.i8(i8, i8)
@ -44,6 +46,34 @@ define signext i32 @func(i32 signext %x, i32 signext %y) nounwind {
; RV64I-NEXT: .LBB0_4:
; RV64I-NEXT: lui a0, 524288
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: sgtz a2, a1
; RV32IZbb-NEXT: sub a1, a0, a1
; RV32IZbb-NEXT: slt a0, a1, a0
; RV32IZbb-NEXT: xor a2, a2, a0
; RV32IZbb-NEXT: lui a0, 524288
; RV32IZbb-NEXT: bltz a1, .LBB0_3
; RV32IZbb-NEXT: # %bb.1:
; RV32IZbb-NEXT: beqz a2, .LBB0_4
; RV32IZbb-NEXT: .LBB0_2:
; RV32IZbb-NEXT: ret
; RV32IZbb-NEXT: .LBB0_3:
; RV32IZbb-NEXT: addi a0, a0, -1
; RV32IZbb-NEXT: bnez a2, .LBB0_2
; RV32IZbb-NEXT: .LBB0_4:
; RV32IZbb-NEXT: mv a0, a1
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: sub a0, a0, a1
; RV64IZbb-NEXT: lui a1, 524288
; RV64IZbb-NEXT: addiw a2, a1, -1
; RV64IZbb-NEXT: min a0, a0, a2
; RV64IZbb-NEXT: max a0, a0, a1
; RV64IZbb-NEXT: ret
%tmp = call i32 @llvm.ssub.sat.i32(i32 %x, i32 %y);
ret i32 %tmp;
}
@ -114,6 +144,72 @@ define i64 @func2(i64 %x, i64 %y) nounwind {
; RV64I-NEXT: .LBB1_4:
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func2:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: sltu a4, a0, a2
; RV32IZbb-NEXT: sub a5, a1, a3
; RV32IZbb-NEXT: sub t0, a5, a4
; RV32IZbb-NEXT: addi a6, zero, -1
; RV32IZbb-NEXT: addi a7, zero, 1
; RV32IZbb-NEXT: addi a4, zero, 1
; RV32IZbb-NEXT: beqz t0, .LBB1_2
; RV32IZbb-NEXT: # %bb.1:
; RV32IZbb-NEXT: slt a4, a6, t0
; RV32IZbb-NEXT: .LBB1_2:
; RV32IZbb-NEXT: addi a5, zero, 1
; RV32IZbb-NEXT: beqz a1, .LBB1_4
; RV32IZbb-NEXT: # %bb.3:
; RV32IZbb-NEXT: slt a5, a6, a1
; RV32IZbb-NEXT: .LBB1_4:
; RV32IZbb-NEXT: xor a1, a5, a4
; RV32IZbb-NEXT: snez a1, a1
; RV32IZbb-NEXT: beqz a3, .LBB1_6
; RV32IZbb-NEXT: # %bb.5:
; RV32IZbb-NEXT: slt a7, a6, a3
; RV32IZbb-NEXT: .LBB1_6:
; RV32IZbb-NEXT: xor a3, a5, a7
; RV32IZbb-NEXT: snez a3, a3
; RV32IZbb-NEXT: and a3, a3, a1
; RV32IZbb-NEXT: bnez a3, .LBB1_8
; RV32IZbb-NEXT: # %bb.7:
; RV32IZbb-NEXT: sub a0, a0, a2
; RV32IZbb-NEXT: lui a1, 524288
; RV32IZbb-NEXT: bltz t0, .LBB1_9
; RV32IZbb-NEXT: j .LBB1_10
; RV32IZbb-NEXT: .LBB1_8:
; RV32IZbb-NEXT: srai a0, t0, 31
; RV32IZbb-NEXT: lui a1, 524288
; RV32IZbb-NEXT: bgez t0, .LBB1_10
; RV32IZbb-NEXT: .LBB1_9:
; RV32IZbb-NEXT: addi a1, a1, -1
; RV32IZbb-NEXT: .LBB1_10:
; RV32IZbb-NEXT: beqz a3, .LBB1_12
; RV32IZbb-NEXT: # %bb.11:
; RV32IZbb-NEXT: ret
; RV32IZbb-NEXT: .LBB1_12:
; RV32IZbb-NEXT: mv a1, t0
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func2:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: sgtz a2, a1
; RV64IZbb-NEXT: sub a1, a0, a1
; RV64IZbb-NEXT: slt a0, a1, a0
; RV64IZbb-NEXT: xor a2, a2, a0
; RV64IZbb-NEXT: addi a0, zero, -1
; RV64IZbb-NEXT: slli a0, a0, 63
; RV64IZbb-NEXT: bltz a1, .LBB1_3
; RV64IZbb-NEXT: # %bb.1:
; RV64IZbb-NEXT: beqz a2, .LBB1_4
; RV64IZbb-NEXT: .LBB1_2:
; RV64IZbb-NEXT: ret
; RV64IZbb-NEXT: .LBB1_3:
; RV64IZbb-NEXT: addi a0, a0, -1
; RV64IZbb-NEXT: bnez a2, .LBB1_2
; RV64IZbb-NEXT: .LBB1_4:
; RV64IZbb-NEXT: mv a0, a1
; RV64IZbb-NEXT: ret
%tmp = call i64 @llvm.ssub.sat.i64(i64 %x, i64 %y);
ret i64 %tmp;
}
@ -156,6 +252,26 @@ define signext i16 @func16(i16 signext %x, i16 signext %y) nounwind {
; RV64I-NEXT: .LBB2_4:
; RV64I-NEXT: lui a0, 1048568
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func16:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: sub a0, a0, a1
; RV32IZbb-NEXT: lui a1, 8
; RV32IZbb-NEXT: addi a1, a1, -1
; RV32IZbb-NEXT: min a0, a0, a1
; RV32IZbb-NEXT: lui a1, 1048568
; RV32IZbb-NEXT: max a0, a0, a1
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func16:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: sub a0, a0, a1
; RV64IZbb-NEXT: lui a1, 8
; RV64IZbb-NEXT: addiw a1, a1, -1
; RV64IZbb-NEXT: min a0, a0, a1
; RV64IZbb-NEXT: lui a1, 1048568
; RV64IZbb-NEXT: max a0, a0, a1
; RV64IZbb-NEXT: ret
%tmp = call i16 @llvm.ssub.sat.i16(i16 %x, i16 %y);
ret i16 %tmp;
}
@ -196,6 +312,24 @@ define signext i8 @func8(i8 signext %x, i8 signext %y) nounwind {
; RV64I-NEXT: .LBB3_4:
; RV64I-NEXT: addi a0, zero, -128
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func8:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: sub a0, a0, a1
; RV32IZbb-NEXT: addi a1, zero, 127
; RV32IZbb-NEXT: min a0, a0, a1
; RV32IZbb-NEXT: addi a1, zero, -128
; RV32IZbb-NEXT: max a0, a0, a1
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func8:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: sub a0, a0, a1
; RV64IZbb-NEXT: addi a1, zero, 127
; RV64IZbb-NEXT: min a0, a0, a1
; RV64IZbb-NEXT: addi a1, zero, -128
; RV64IZbb-NEXT: max a0, a0, a1
; RV64IZbb-NEXT: ret
%tmp = call i8 @llvm.ssub.sat.i8(i8 %x, i8 %y);
ret i8 %tmp;
}
@ -236,6 +370,24 @@ define signext i4 @func3(i4 signext %x, i4 signext %y) nounwind {
; RV64I-NEXT: .LBB4_4:
; RV64I-NEXT: addi a0, zero, -8
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func3:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: sub a0, a0, a1
; RV32IZbb-NEXT: addi a1, zero, 7
; RV32IZbb-NEXT: min a0, a0, a1
; RV32IZbb-NEXT: addi a1, zero, -8
; RV32IZbb-NEXT: max a0, a0, a1
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func3:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: sub a0, a0, a1
; RV64IZbb-NEXT: addi a1, zero, 7
; RV64IZbb-NEXT: min a0, a0, a1
; RV64IZbb-NEXT: addi a1, zero, -8
; RV64IZbb-NEXT: max a0, a0, a1
; RV64IZbb-NEXT: ret
%tmp = call i4 @llvm.ssub.sat.i4(i4 %x, i4 %y);
ret i4 %tmp;
}

View File

@ -1,6 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=riscv32 -mattr=+m | FileCheck %s --check-prefix=RV32I
; RUN: llc < %s -mtriple=riscv64 -mattr=+m | FileCheck %s --check-prefix=RV64I
; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+experimental-zbb | FileCheck %s --check-prefix=RV32IZbb
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+experimental-zbb | FileCheck %s --check-prefix=RV64IZbb
declare i4 @llvm.ssub.sat.i4(i4, i4)
declare i8 @llvm.ssub.sat.i8(i8, i8)
@ -47,6 +49,37 @@ define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind {
; RV64I-NEXT: .LBB0_4:
; RV64I-NEXT: lui a0, 524288
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func32:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: mul a1, a1, a2
; RV32IZbb-NEXT: sgtz a2, a1
; RV32IZbb-NEXT: sub a1, a0, a1
; RV32IZbb-NEXT: slt a0, a1, a0
; RV32IZbb-NEXT: xor a2, a2, a0
; RV32IZbb-NEXT: lui a0, 524288
; RV32IZbb-NEXT: bltz a1, .LBB0_3
; RV32IZbb-NEXT: # %bb.1:
; RV32IZbb-NEXT: beqz a2, .LBB0_4
; RV32IZbb-NEXT: .LBB0_2:
; RV32IZbb-NEXT: ret
; RV32IZbb-NEXT: .LBB0_3:
; RV32IZbb-NEXT: addi a0, a0, -1
; RV32IZbb-NEXT: bnez a2, .LBB0_2
; RV32IZbb-NEXT: .LBB0_4:
; RV32IZbb-NEXT: mv a0, a1
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func32:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: sext.w a0, a0
; RV64IZbb-NEXT: mulw a1, a1, a2
; RV64IZbb-NEXT: sub a0, a0, a1
; RV64IZbb-NEXT: lui a1, 524288
; RV64IZbb-NEXT: addiw a2, a1, -1
; RV64IZbb-NEXT: min a0, a0, a2
; RV64IZbb-NEXT: max a0, a0, a1
; RV64IZbb-NEXT: ret
%a = mul i32 %y, %z
%tmp = call i32 @llvm.ssub.sat.i32(i32 %x, i32 %a)
ret i32 %tmp
@ -118,6 +151,72 @@ define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
; RV64I-NEXT: .LBB1_4:
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func64:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: sltu a2, a0, a4
; RV32IZbb-NEXT: sub a3, a1, a5
; RV32IZbb-NEXT: sub t0, a3, a2
; RV32IZbb-NEXT: addi a6, zero, -1
; RV32IZbb-NEXT: addi a7, zero, 1
; RV32IZbb-NEXT: addi a2, zero, 1
; RV32IZbb-NEXT: beqz t0, .LBB1_2
; RV32IZbb-NEXT: # %bb.1:
; RV32IZbb-NEXT: slt a2, a6, t0
; RV32IZbb-NEXT: .LBB1_2:
; RV32IZbb-NEXT: addi a3, zero, 1
; RV32IZbb-NEXT: beqz a1, .LBB1_4
; RV32IZbb-NEXT: # %bb.3:
; RV32IZbb-NEXT: slt a3, a6, a1
; RV32IZbb-NEXT: .LBB1_4:
; RV32IZbb-NEXT: xor a1, a3, a2
; RV32IZbb-NEXT: snez a1, a1
; RV32IZbb-NEXT: beqz a5, .LBB1_6
; RV32IZbb-NEXT: # %bb.5:
; RV32IZbb-NEXT: slt a7, a6, a5
; RV32IZbb-NEXT: .LBB1_6:
; RV32IZbb-NEXT: xor a2, a3, a7
; RV32IZbb-NEXT: snez a2, a2
; RV32IZbb-NEXT: and a3, a2, a1
; RV32IZbb-NEXT: bnez a3, .LBB1_8
; RV32IZbb-NEXT: # %bb.7:
; RV32IZbb-NEXT: sub a0, a0, a4
; RV32IZbb-NEXT: lui a1, 524288
; RV32IZbb-NEXT: bltz t0, .LBB1_9
; RV32IZbb-NEXT: j .LBB1_10
; RV32IZbb-NEXT: .LBB1_8:
; RV32IZbb-NEXT: srai a0, t0, 31
; RV32IZbb-NEXT: lui a1, 524288
; RV32IZbb-NEXT: bgez t0, .LBB1_10
; RV32IZbb-NEXT: .LBB1_9:
; RV32IZbb-NEXT: addi a1, a1, -1
; RV32IZbb-NEXT: .LBB1_10:
; RV32IZbb-NEXT: beqz a3, .LBB1_12
; RV32IZbb-NEXT: # %bb.11:
; RV32IZbb-NEXT: ret
; RV32IZbb-NEXT: .LBB1_12:
; RV32IZbb-NEXT: mv a1, t0
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func64:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: sgtz a3, a2
; RV64IZbb-NEXT: sub a1, a0, a2
; RV64IZbb-NEXT: slt a0, a1, a0
; RV64IZbb-NEXT: xor a2, a3, a0
; RV64IZbb-NEXT: addi a0, zero, -1
; RV64IZbb-NEXT: slli a0, a0, 63
; RV64IZbb-NEXT: bltz a1, .LBB1_3
; RV64IZbb-NEXT: # %bb.1:
; RV64IZbb-NEXT: beqz a2, .LBB1_4
; RV64IZbb-NEXT: .LBB1_2:
; RV64IZbb-NEXT: ret
; RV64IZbb-NEXT: .LBB1_3:
; RV64IZbb-NEXT: addi a0, a0, -1
; RV64IZbb-NEXT: bnez a2, .LBB1_2
; RV64IZbb-NEXT: .LBB1_4:
; RV64IZbb-NEXT: mv a0, a1
; RV64IZbb-NEXT: ret
%a = mul i64 %y, %z
%tmp = call i64 @llvm.ssub.sat.i64(i64 %x, i64 %z)
ret i64 %tmp
@ -171,6 +270,32 @@ define i16 @func16(i16 %x, i16 %y, i16 %z) nounwind {
; RV64I-NEXT: .LBB2_4:
; RV64I-NEXT: lui a0, 1048568
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func16:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: sext.h a0, a0
; RV32IZbb-NEXT: mul a1, a1, a2
; RV32IZbb-NEXT: sext.h a1, a1
; RV32IZbb-NEXT: sub a0, a0, a1
; RV32IZbb-NEXT: lui a1, 8
; RV32IZbb-NEXT: addi a1, a1, -1
; RV32IZbb-NEXT: min a0, a0, a1
; RV32IZbb-NEXT: lui a1, 1048568
; RV32IZbb-NEXT: max a0, a0, a1
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func16:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: sext.h a0, a0
; RV64IZbb-NEXT: mul a1, a1, a2
; RV64IZbb-NEXT: sext.h a1, a1
; RV64IZbb-NEXT: sub a0, a0, a1
; RV64IZbb-NEXT: lui a1, 8
; RV64IZbb-NEXT: addiw a1, a1, -1
; RV64IZbb-NEXT: min a0, a0, a1
; RV64IZbb-NEXT: lui a1, 1048568
; RV64IZbb-NEXT: max a0, a0, a1
; RV64IZbb-NEXT: ret
%a = mul i16 %y, %z
%tmp = call i16 @llvm.ssub.sat.i16(i16 %x, i16 %a)
ret i16 %tmp
@ -222,6 +347,30 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
; RV64I-NEXT: .LBB3_4:
; RV64I-NEXT: addi a0, zero, -128
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func8:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: sext.b a0, a0
; RV32IZbb-NEXT: mul a1, a1, a2
; RV32IZbb-NEXT: sext.b a1, a1
; RV32IZbb-NEXT: sub a0, a0, a1
; RV32IZbb-NEXT: addi a1, zero, 127
; RV32IZbb-NEXT: min a0, a0, a1
; RV32IZbb-NEXT: addi a1, zero, -128
; RV32IZbb-NEXT: max a0, a0, a1
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func8:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: sext.b a0, a0
; RV64IZbb-NEXT: mul a1, a1, a2
; RV64IZbb-NEXT: sext.b a1, a1
; RV64IZbb-NEXT: sub a0, a0, a1
; RV64IZbb-NEXT: addi a1, zero, 127
; RV64IZbb-NEXT: min a0, a0, a1
; RV64IZbb-NEXT: addi a1, zero, -128
; RV64IZbb-NEXT: max a0, a0, a1
; RV64IZbb-NEXT: ret
%a = mul i8 %y, %z
%tmp = call i8 @llvm.ssub.sat.i8(i8 %x, i8 %a)
ret i8 %tmp
@ -273,6 +422,34 @@ define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
; RV64I-NEXT: .LBB4_4:
; RV64I-NEXT: addi a0, zero, -8
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func4:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: slli a0, a0, 28
; RV32IZbb-NEXT: srai a0, a0, 28
; RV32IZbb-NEXT: mul a1, a1, a2
; RV32IZbb-NEXT: slli a1, a1, 28
; RV32IZbb-NEXT: srai a1, a1, 28
; RV32IZbb-NEXT: sub a0, a0, a1
; RV32IZbb-NEXT: addi a1, zero, 7
; RV32IZbb-NEXT: min a0, a0, a1
; RV32IZbb-NEXT: addi a1, zero, -8
; RV32IZbb-NEXT: max a0, a0, a1
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func4:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: slli a0, a0, 60
; RV64IZbb-NEXT: srai a0, a0, 60
; RV64IZbb-NEXT: mul a1, a1, a2
; RV64IZbb-NEXT: slli a1, a1, 60
; RV64IZbb-NEXT: srai a1, a1, 60
; RV64IZbb-NEXT: sub a0, a0, a1
; RV64IZbb-NEXT: addi a1, zero, 7
; RV64IZbb-NEXT: min a0, a0, a1
; RV64IZbb-NEXT: addi a1, zero, -8
; RV64IZbb-NEXT: max a0, a0, a1
; RV64IZbb-NEXT: ret
%a = mul i4 %y, %z
%tmp = call i4 @llvm.ssub.sat.i4(i4 %x, i4 %a)
ret i4 %tmp

View File

@ -1,6 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=riscv32 -mattr=+m | FileCheck %s --check-prefix=RV32I
; RUN: llc < %s -mtriple=riscv64 -mattr=+m | FileCheck %s --check-prefix=RV64I
; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+experimental-zbb | FileCheck %s --check-prefix=RV32IZbb
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+experimental-zbb | FileCheck %s --check-prefix=RV64IZbb
declare i4 @llvm.uadd.sat.i4(i4, i4)
declare i8 @llvm.uadd.sat.i8(i8, i8)
@ -36,6 +38,27 @@ define signext i32 @func(i32 signext %x, i32 signext %y) nounwind {
; RV64I-NEXT: .LBB0_2:
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: not a2, a1
; RV32IZbb-NEXT: minu a0, a0, a2
; RV32IZbb-NEXT: add a0, a0, a1
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: slli a1, a1, 32
; RV64IZbb-NEXT: srli a1, a1, 32
; RV64IZbb-NEXT: slli a0, a0, 32
; RV64IZbb-NEXT: srli a0, a0, 32
; RV64IZbb-NEXT: add a0, a0, a1
; RV64IZbb-NEXT: addi a1, zero, 1
; RV64IZbb-NEXT: slli a1, a1, 32
; RV64IZbb-NEXT: addi a1, a1, -1
; RV64IZbb-NEXT: minu a0, a0, a1
; RV64IZbb-NEXT: sext.w a0, a0
; RV64IZbb-NEXT: ret
%tmp = call i32 @llvm.uadd.sat.i32(i32 %x, i32 %y);
ret i32 %tmp;
}
@ -70,6 +93,32 @@ define i64 @func2(i64 %x, i64 %y) nounwind {
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: .LBB1_2:
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func2:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: add a3, a1, a3
; RV32IZbb-NEXT: add a2, a0, a2
; RV32IZbb-NEXT: sltu a4, a2, a0
; RV32IZbb-NEXT: add a3, a3, a4
; RV32IZbb-NEXT: beq a3, a1, .LBB1_2
; RV32IZbb-NEXT: # %bb.1:
; RV32IZbb-NEXT: sltu a4, a3, a1
; RV32IZbb-NEXT: .LBB1_2:
; RV32IZbb-NEXT: addi a0, zero, -1
; RV32IZbb-NEXT: addi a1, zero, -1
; RV32IZbb-NEXT: bnez a4, .LBB1_4
; RV32IZbb-NEXT: # %bb.3:
; RV32IZbb-NEXT: mv a0, a2
; RV32IZbb-NEXT: mv a1, a3
; RV32IZbb-NEXT: .LBB1_4:
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func2:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: not a2, a1
; RV64IZbb-NEXT: minu a0, a0, a2
; RV64IZbb-NEXT: add a0, a0, a1
; RV64IZbb-NEXT: ret
%tmp = call i64 @llvm.uadd.sat.i64(i64 %x, i64 %y);
ret i64 %tmp;
}
@ -96,6 +145,22 @@ define zeroext i16 @func16(i16 zeroext %x, i16 zeroext %y) nounwind {
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: .LBB2_2:
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func16:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: add a0, a0, a1
; RV32IZbb-NEXT: lui a1, 16
; RV32IZbb-NEXT: addi a1, a1, -1
; RV32IZbb-NEXT: minu a0, a0, a1
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func16:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: add a0, a0, a1
; RV64IZbb-NEXT: lui a1, 16
; RV64IZbb-NEXT: addiw a1, a1, -1
; RV64IZbb-NEXT: minu a0, a0, a1
; RV64IZbb-NEXT: ret
%tmp = call i16 @llvm.uadd.sat.i16(i16 %x, i16 %y);
ret i16 %tmp;
}
@ -120,6 +185,20 @@ define zeroext i8 @func8(i8 zeroext %x, i8 zeroext %y) nounwind {
; RV64I-NEXT: addi a0, zero, 255
; RV64I-NEXT: .LBB3_2:
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func8:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: add a0, a0, a1
; RV32IZbb-NEXT: addi a1, zero, 255
; RV32IZbb-NEXT: minu a0, a0, a1
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func8:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: add a0, a0, a1
; RV64IZbb-NEXT: addi a1, zero, 255
; RV64IZbb-NEXT: minu a0, a0, a1
; RV64IZbb-NEXT: ret
%tmp = call i8 @llvm.uadd.sat.i8(i8 %x, i8 %y);
ret i8 %tmp;
}
@ -144,6 +223,20 @@ define zeroext i4 @func3(i4 zeroext %x, i4 zeroext %y) nounwind {
; RV64I-NEXT: addi a0, zero, 15
; RV64I-NEXT: .LBB4_2:
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func3:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: add a0, a0, a1
; RV32IZbb-NEXT: addi a1, zero, 15
; RV32IZbb-NEXT: minu a0, a0, a1
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func3:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: add a0, a0, a1
; RV64IZbb-NEXT: addi a1, zero, 15
; RV64IZbb-NEXT: minu a0, a0, a1
; RV64IZbb-NEXT: ret
%tmp = call i4 @llvm.uadd.sat.i4(i4 %x, i4 %y);
ret i4 %tmp;
}

View File

@ -1,6 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=riscv32 -mattr=+m | FileCheck %s --check-prefix=RV32I
; RUN: llc < %s -mtriple=riscv64 -mattr=+m | FileCheck %s --check-prefix=RV64I
; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+experimental-zbb | FileCheck %s --check-prefix=RV32IZbb
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+experimental-zbb | FileCheck %s --check-prefix=RV64IZbb
declare i4 @llvm.uadd.sat.i4(i4, i4)
declare i8 @llvm.uadd.sat.i8(i8, i8)
@ -37,6 +39,28 @@ define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind {
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: .LBB0_2:
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func32:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: mul a1, a1, a2
; RV32IZbb-NEXT: not a2, a1
; RV32IZbb-NEXT: minu a0, a0, a2
; RV32IZbb-NEXT: add a0, a0, a1
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func32:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: slli a0, a0, 32
; RV64IZbb-NEXT: srli a0, a0, 32
; RV64IZbb-NEXT: mul a1, a1, a2
; RV64IZbb-NEXT: slli a1, a1, 32
; RV64IZbb-NEXT: srli a1, a1, 32
; RV64IZbb-NEXT: add a0, a0, a1
; RV64IZbb-NEXT: addi a1, zero, 1
; RV64IZbb-NEXT: slli a1, a1, 32
; RV64IZbb-NEXT: addi a1, a1, -1
; RV64IZbb-NEXT: minu a0, a0, a1
; RV64IZbb-NEXT: ret
%a = mul i32 %y, %z
%tmp = call i32 @llvm.uadd.sat.i32(i32 %x, i32 %a)
ret i32 %tmp
@ -72,6 +96,32 @@ define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: .LBB1_2:
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func64:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: add a3, a1, a5
; RV32IZbb-NEXT: add a2, a0, a4
; RV32IZbb-NEXT: sltu a4, a2, a0
; RV32IZbb-NEXT: add a3, a3, a4
; RV32IZbb-NEXT: beq a3, a1, .LBB1_2
; RV32IZbb-NEXT: # %bb.1:
; RV32IZbb-NEXT: sltu a4, a3, a1
; RV32IZbb-NEXT: .LBB1_2:
; RV32IZbb-NEXT: addi a0, zero, -1
; RV32IZbb-NEXT: addi a1, zero, -1
; RV32IZbb-NEXT: bnez a4, .LBB1_4
; RV32IZbb-NEXT: # %bb.3:
; RV32IZbb-NEXT: mv a0, a2
; RV32IZbb-NEXT: mv a1, a3
; RV32IZbb-NEXT: .LBB1_4:
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func64:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: not a1, a2
; RV64IZbb-NEXT: minu a0, a0, a1
; RV64IZbb-NEXT: add a0, a0, a2
; RV64IZbb-NEXT: ret
%a = mul i64 %y, %z
%tmp = call i64 @llvm.uadd.sat.i64(i64 %x, i64 %z)
ret i64 %tmp
@ -105,6 +155,28 @@ define i16 @func16(i16 %x, i16 %y, i16 %z) nounwind {
; RV64I-NEXT: mv a0, a3
; RV64I-NEXT: .LBB2_2:
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func16:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: zext.h a0, a0
; RV32IZbb-NEXT: mul a1, a1, a2
; RV32IZbb-NEXT: zext.h a1, a1
; RV32IZbb-NEXT: add a0, a0, a1
; RV32IZbb-NEXT: lui a1, 16
; RV32IZbb-NEXT: addi a1, a1, -1
; RV32IZbb-NEXT: minu a0, a0, a1
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func16:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: zext.h a0, a0
; RV64IZbb-NEXT: mul a1, a1, a2
; RV64IZbb-NEXT: zext.h a1, a1
; RV64IZbb-NEXT: add a0, a0, a1
; RV64IZbb-NEXT: lui a1, 16
; RV64IZbb-NEXT: addiw a1, a1, -1
; RV64IZbb-NEXT: minu a0, a0, a1
; RV64IZbb-NEXT: ret
%a = mul i16 %y, %z
%tmp = call i16 @llvm.uadd.sat.i16(i16 %x, i16 %a)
ret i16 %tmp
@ -136,6 +208,26 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
; RV64I-NEXT: addi a0, zero, 255
; RV64I-NEXT: .LBB3_2:
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func8:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: andi a0, a0, 255
; RV32IZbb-NEXT: mul a1, a1, a2
; RV32IZbb-NEXT: andi a1, a1, 255
; RV32IZbb-NEXT: add a0, a0, a1
; RV32IZbb-NEXT: addi a1, zero, 255
; RV32IZbb-NEXT: minu a0, a0, a1
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func8:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: andi a0, a0, 255
; RV64IZbb-NEXT: mul a1, a1, a2
; RV64IZbb-NEXT: andi a1, a1, 255
; RV64IZbb-NEXT: add a0, a0, a1
; RV64IZbb-NEXT: addi a1, zero, 255
; RV64IZbb-NEXT: minu a0, a0, a1
; RV64IZbb-NEXT: ret
%a = mul i8 %y, %z
%tmp = call i8 @llvm.uadd.sat.i8(i8 %x, i8 %a)
ret i8 %tmp
@ -167,6 +259,26 @@ define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
; RV64I-NEXT: addi a0, zero, 15
; RV64I-NEXT: .LBB4_2:
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func4:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: andi a0, a0, 15
; RV32IZbb-NEXT: mul a1, a1, a2
; RV32IZbb-NEXT: andi a1, a1, 15
; RV32IZbb-NEXT: add a0, a0, a1
; RV32IZbb-NEXT: addi a1, zero, 15
; RV32IZbb-NEXT: minu a0, a0, a1
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func4:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: andi a0, a0, 15
; RV64IZbb-NEXT: mul a1, a1, a2
; RV64IZbb-NEXT: andi a1, a1, 15
; RV64IZbb-NEXT: add a0, a0, a1
; RV64IZbb-NEXT: addi a1, zero, 15
; RV64IZbb-NEXT: minu a0, a0, a1
; RV64IZbb-NEXT: ret
%a = mul i4 %y, %z
%tmp = call i4 @llvm.uadd.sat.i4(i4 %x, i4 %a)
ret i4 %tmp

View File

@ -1,6 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=riscv32 -mattr=+m | FileCheck %s --check-prefix=RV32I
; RUN: llc < %s -mtriple=riscv64 -mattr=+m | FileCheck %s --check-prefix=RV64I
; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+experimental-zbb | FileCheck %s --check-prefix=RV32IZbb
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+experimental-zbb | FileCheck %s --check-prefix=RV64IZbb
declare i4 @llvm.usub.sat.i4(i4, i4)
declare i8 @llvm.usub.sat.i8(i8, i8)
@ -35,6 +37,22 @@ define signext i32 @func(i32 signext %x, i32 signext %y) nounwind {
; RV64I-NEXT: .LBB0_2:
; RV64I-NEXT: sext.w a0, a1
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: maxu a0, a0, a1
; RV32IZbb-NEXT: sub a0, a0, a1
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: slli a2, a1, 32
; RV64IZbb-NEXT: srli a2, a2, 32
; RV64IZbb-NEXT: slli a0, a0, 32
; RV64IZbb-NEXT: srli a0, a0, 32
; RV64IZbb-NEXT: maxu a0, a0, a2
; RV64IZbb-NEXT: subw a0, a0, a1
; RV64IZbb-NEXT: ret
%tmp = call i32 @llvm.usub.sat.i32(i32 %x, i32 %y);
ret i32 %tmp;
}
@ -72,6 +90,34 @@ define i64 @func2(i64 %x, i64 %y) nounwind {
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: .LBB1_2:
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func2:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: sltu a4, a0, a2
; RV32IZbb-NEXT: sub a3, a1, a3
; RV32IZbb-NEXT: sub a3, a3, a4
; RV32IZbb-NEXT: sub a2, a0, a2
; RV32IZbb-NEXT: beq a3, a1, .LBB1_2
; RV32IZbb-NEXT: # %bb.1:
; RV32IZbb-NEXT: sltu a4, a1, a3
; RV32IZbb-NEXT: j .LBB1_3
; RV32IZbb-NEXT: .LBB1_2:
; RV32IZbb-NEXT: sltu a4, a0, a2
; RV32IZbb-NEXT: .LBB1_3:
; RV32IZbb-NEXT: mv a0, zero
; RV32IZbb-NEXT: mv a1, zero
; RV32IZbb-NEXT: bnez a4, .LBB1_5
; RV32IZbb-NEXT: # %bb.4:
; RV32IZbb-NEXT: mv a0, a2
; RV32IZbb-NEXT: mv a1, a3
; RV32IZbb-NEXT: .LBB1_5:
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func2:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: maxu a0, a0, a1
; RV64IZbb-NEXT: sub a0, a0, a1
; RV64IZbb-NEXT: ret
%tmp = call i64 @llvm.usub.sat.i64(i64 %x, i64 %y);
ret i64 %tmp;
}
@ -104,6 +150,20 @@ define zeroext i16 @func16(i16 zeroext %x, i16 zeroext %y) nounwind {
; RV64I-NEXT: addiw a1, a1, -1
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func16:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: maxu a0, a0, a1
; RV32IZbb-NEXT: sub a0, a0, a1
; RV32IZbb-NEXT: zext.h a0, a0
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func16:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: maxu a0, a0, a1
; RV64IZbb-NEXT: sub a0, a0, a1
; RV64IZbb-NEXT: zext.h a0, a0
; RV64IZbb-NEXT: ret
%tmp = call i16 @llvm.usub.sat.i16(i16 %x, i16 %y);
ret i16 %tmp;
}
@ -132,6 +192,20 @@ define zeroext i8 @func8(i8 zeroext %x, i8 zeroext %y) nounwind {
; RV64I-NEXT: .LBB3_2:
; RV64I-NEXT: andi a0, a0, 255
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func8:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: maxu a0, a0, a1
; RV32IZbb-NEXT: sub a0, a0, a1
; RV32IZbb-NEXT: andi a0, a0, 255
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func8:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: maxu a0, a0, a1
; RV64IZbb-NEXT: sub a0, a0, a1
; RV64IZbb-NEXT: andi a0, a0, 255
; RV64IZbb-NEXT: ret
%tmp = call i8 @llvm.usub.sat.i8(i8 %x, i8 %y);
ret i8 %tmp;
}
@ -160,6 +234,20 @@ define zeroext i4 @func3(i4 zeroext %x, i4 zeroext %y) nounwind {
; RV64I-NEXT: .LBB4_2:
; RV64I-NEXT: andi a0, a0, 15
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func3:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: maxu a0, a0, a1
; RV32IZbb-NEXT: sub a0, a0, a1
; RV32IZbb-NEXT: andi a0, a0, 15
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func3:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: maxu a0, a0, a1
; RV64IZbb-NEXT: sub a0, a0, a1
; RV64IZbb-NEXT: andi a0, a0, 15
; RV64IZbb-NEXT: ret
%tmp = call i4 @llvm.usub.sat.i4(i4 %x, i4 %y);
ret i4 %tmp;
}

View File

@ -1,6 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=riscv32 -mattr=+m | FileCheck %s --check-prefix=RV32I
; RUN: llc < %s -mtriple=riscv64 -mattr=+m | FileCheck %s --check-prefix=RV64I
; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+experimental-zbb | FileCheck %s --check-prefix=RV32IZbb
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+experimental-zbb | FileCheck %s --check-prefix=RV64IZbb
declare i4 @llvm.usub.sat.i4(i4, i4)
declare i8 @llvm.usub.sat.i8(i8, i8)
@ -36,6 +38,24 @@ define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind {
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: .LBB0_2:
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func32:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: mul a1, a1, a2
; RV32IZbb-NEXT: maxu a0, a0, a1
; RV32IZbb-NEXT: sub a0, a0, a1
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func32:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: slli a0, a0, 32
; RV64IZbb-NEXT: srli a0, a0, 32
; RV64IZbb-NEXT: mul a1, a1, a2
; RV64IZbb-NEXT: slli a1, a1, 32
; RV64IZbb-NEXT: srli a1, a1, 32
; RV64IZbb-NEXT: maxu a0, a0, a1
; RV64IZbb-NEXT: sub a0, a0, a1
; RV64IZbb-NEXT: ret
%a = mul i32 %y, %z
%tmp = call i32 @llvm.usub.sat.i32(i32 %x, i32 %a)
ret i32 %tmp
@ -74,6 +94,34 @@ define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: .LBB1_2:
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func64:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: sltu a2, a0, a4
; RV32IZbb-NEXT: sub a3, a1, a5
; RV32IZbb-NEXT: sub a2, a3, a2
; RV32IZbb-NEXT: sub a3, a0, a4
; RV32IZbb-NEXT: beq a2, a1, .LBB1_2
; RV32IZbb-NEXT: # %bb.1:
; RV32IZbb-NEXT: sltu a4, a1, a2
; RV32IZbb-NEXT: j .LBB1_3
; RV32IZbb-NEXT: .LBB1_2:
; RV32IZbb-NEXT: sltu a4, a0, a3
; RV32IZbb-NEXT: .LBB1_3:
; RV32IZbb-NEXT: mv a0, zero
; RV32IZbb-NEXT: mv a1, zero
; RV32IZbb-NEXT: bnez a4, .LBB1_5
; RV32IZbb-NEXT: # %bb.4:
; RV32IZbb-NEXT: mv a0, a3
; RV32IZbb-NEXT: mv a1, a2
; RV32IZbb-NEXT: .LBB1_5:
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func64:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: maxu a0, a0, a2
; RV64IZbb-NEXT: sub a0, a0, a2
; RV64IZbb-NEXT: ret
%a = mul i64 %y, %z
%tmp = call i64 @llvm.usub.sat.i64(i64 %x, i64 %z)
ret i64 %tmp
@ -111,6 +159,24 @@ define i16 @func16(i16 %x, i16 %y, i16 %z) nounwind {
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: .LBB2_2:
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func16:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: zext.h a0, a0
; RV32IZbb-NEXT: mul a1, a1, a2
; RV32IZbb-NEXT: zext.h a1, a1
; RV32IZbb-NEXT: maxu a0, a0, a1
; RV32IZbb-NEXT: sub a0, a0, a1
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func16:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: zext.h a0, a0
; RV64IZbb-NEXT: mul a1, a1, a2
; RV64IZbb-NEXT: zext.h a1, a1
; RV64IZbb-NEXT: maxu a0, a0, a1
; RV64IZbb-NEXT: sub a0, a0, a1
; RV64IZbb-NEXT: ret
%a = mul i16 %y, %z
%tmp = call i16 @llvm.usub.sat.i16(i16 %x, i16 %a)
ret i16 %tmp
@ -144,6 +210,24 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: .LBB3_2:
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func8:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: andi a0, a0, 255
; RV32IZbb-NEXT: mul a1, a1, a2
; RV32IZbb-NEXT: andi a1, a1, 255
; RV32IZbb-NEXT: maxu a0, a0, a1
; RV32IZbb-NEXT: sub a0, a0, a1
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func8:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: andi a0, a0, 255
; RV64IZbb-NEXT: mul a1, a1, a2
; RV64IZbb-NEXT: andi a1, a1, 255
; RV64IZbb-NEXT: maxu a0, a0, a1
; RV64IZbb-NEXT: sub a0, a0, a1
; RV64IZbb-NEXT: ret
%a = mul i8 %y, %z
%tmp = call i8 @llvm.usub.sat.i8(i8 %x, i8 %a)
ret i8 %tmp
@ -177,6 +261,24 @@ define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: .LBB4_2:
; RV64I-NEXT: ret
;
; RV32IZbb-LABEL: func4:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: andi a0, a0, 15
; RV32IZbb-NEXT: mul a1, a1, a2
; RV32IZbb-NEXT: andi a1, a1, 15
; RV32IZbb-NEXT: maxu a0, a0, a1
; RV32IZbb-NEXT: sub a0, a0, a1
; RV32IZbb-NEXT: ret
;
; RV64IZbb-LABEL: func4:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: andi a0, a0, 15
; RV64IZbb-NEXT: mul a1, a1, a2
; RV64IZbb-NEXT: andi a1, a1, 15
; RV64IZbb-NEXT: maxu a0, a0, a1
; RV64IZbb-NEXT: sub a0, a0, a1
; RV64IZbb-NEXT: ret
%a = mul i4 %y, %z
%tmp = call i4 @llvm.usub.sat.i4(i4 %x, i4 %a)
ret i4 %tmp