1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-21 18:22:53 +01:00

[AArch64][x86] add tests for add-with-overflow folds; NFC

There's a generic combine for these, but no test coverage.
It's not clear if this is actually a good fold.
The combine was added with D58874, but it has a bug that
can cause crashing ( https://llvm.org/PR51238 ).

(cherry picked from commit e427077ec10ea18ac21f5065342183481d87783a)
This commit is contained in:
Sanjay Patel 2021-07-28 14:53:47 -04:00 committed by Tom Stellard
parent b92c9f9565
commit a0686462c3
2 changed files with 96 additions and 20 deletions

View File

@ -223,4 +223,48 @@ test6:
ret:
ret void
}
declare {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
define i1 @sadd_add(i32 %a, i32 %b, i32* %p) {
; CHECK-LABEL: sadd_add:
; CHECK: // %bb.0:
; CHECK-NEXT: mvn w8, w0
; CHECK-NEXT: cmn w8, w1
; CHECK-NEXT: cset w8, vs
; CHECK-NEXT: sub w9, w1, w0
; CHECK-NEXT: mov w0, w8
; CHECK-NEXT: str w9, [x2]
; CHECK-NEXT: ret
%nota = xor i32 %a, -1
%a0 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %nota, i32 %b)
%e0 = extractvalue {i32, i1} %a0, 0
%e1 = extractvalue {i32, i1} %a0, 1
%res = add i32 %e0, 1
store i32 %res, i32* %p
ret i1 %e1
}
declare {i8, i1} @llvm.uadd.with.overflow.i8(i8 %a, i8 %b)
define i1 @uadd_add(i8 %a, i8 %b, i8* %p) {
; CHECK-LABEL: uadd_add:
; CHECK: // %bb.0:
; CHECK-NEXT: mvn w8, w0
; CHECK-NEXT: and w8, w8, #0xff
; CHECK-NEXT: add w8, w8, w1, uxtb
; CHECK-NEXT: lsr w8, w8, #8
; CHECK-NEXT: sub w9, w1, w0
; CHECK-NEXT: mov w0, w8
; CHECK-NEXT: strb w9, [x2]
; CHECK-NEXT: ret
%nota = xor i8 %a, -1
%a0 = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 %nota, i8 %b)
%e0 = extractvalue {i8, i1} %a0, 0
%e1 = extractvalue {i8, i1} %a0, 1
%res = add i8 %e0, 1
store i8 %res, i8* %p
ret i1 %e1
}
; TODO: adds/subs

View File

@ -1,16 +1,12 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX
; fold (add x, 0) -> x
define <4 x i32> @combine_vec_add_to_zero(<4 x i32> %a) {
; SSE-LABEL: combine_vec_add_to_zero:
; SSE: # %bb.0:
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_add_to_zero:
; AVX: # %bb.0:
; AVX-NEXT: retq
; CHECK-LABEL: combine_vec_add_to_zero:
; CHECK: # %bb.0:
; CHECK-NEXT: retq
%1 = add <4 x i32> %a, zeroinitializer
ret <4 x i32> %1
}
@ -352,17 +348,11 @@ define <4 x i32> @combine_vec_add_sextinreg(<4 x i32> %a0, <4 x i32> %a1) {
; (add (add (xor a, -1), b), 1) -> (sub b, a)
define i32 @combine_add_add_not(i32 %a, i32 %b) {
; SSE-LABEL: combine_add_add_not:
; SSE: # %bb.0:
; SSE-NEXT: movl %esi, %eax
; SSE-NEXT: subl %edi, %eax
; SSE-NEXT: retq
;
; AVX-LABEL: combine_add_add_not:
; AVX: # %bb.0:
; AVX-NEXT: movl %esi, %eax
; AVX-NEXT: subl %edi, %eax
; AVX-NEXT: retq
; CHECK-LABEL: combine_add_add_not:
; CHECK: # %bb.0:
; CHECK-NEXT: movl %esi, %eax
; CHECK-NEXT: subl %edi, %eax
; CHECK-NEXT: retq
%nota = xor i32 %a, -1
%add = add i32 %nota, %b
%r = add i32 %add, 1
@ -385,3 +375,45 @@ define <4 x i32> @combine_vec_add_add_not(<4 x i32> %a, <4 x i32> %b) {
%r = add <4 x i32> %add, <i32 1, i32 1, i32 1, i32 1>
ret <4 x i32> %r
}
declare {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
define i1 @sadd_add(i32 %a, i32 %b, i32* %p) {
; CHECK-LABEL: sadd_add:
; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: notl %eax
; CHECK-NEXT: addl %esi, %eax
; CHECK-NEXT: seto %al
; CHECK-NEXT: subl %edi, %esi
; CHECK-NEXT: movl %esi, (%rdx)
; CHECK-NEXT: retq
%nota = xor i32 %a, -1
%a0 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %nota, i32 %b)
%e0 = extractvalue {i32, i1} %a0, 0
%e1 = extractvalue {i32, i1} %a0, 1
%res = add i32 %e0, 1
store i32 %res, i32* %p
ret i1 %e1
}
declare {i8, i1} @llvm.uadd.with.overflow.i8(i8 %a, i8 %b)
define i1 @uadd_add(i8 %a, i8 %b, i8* %p) {
; CHECK-LABEL: uadd_add:
; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: notb %al
; CHECK-NEXT: addb %sil, %al
; CHECK-NEXT: setb %al
; CHECK-NEXT: subb %dil, %sil
; CHECK-NEXT: movb %sil, (%rdx)
; CHECK-NEXT: retq
%nota = xor i8 %a, -1
%a0 = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 %nota, i8 %b)
%e0 = extractvalue {i8, i1} %a0, 0
%e1 = extractvalue {i8, i1} %a0, 1
%res = add i8 %e0, 1
store i8 %res, i8* %p
ret i1 %e1
}