mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-23 11:13:28 +01:00
8c5cdae314
Differential Revision: https://reviews.llvm.org/D58965 llvm-svn: 355517
180 lines
6.1 KiB
LLVM
180 lines
6.1 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX
|
|
|
|
declare {i32, i1} @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone
|
|
declare {i32, i1} @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone
|
|
|
|
declare {<4 x i32>, <4 x i1>} @llvm.sadd.with.overflow.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
|
|
declare {<4 x i32>, <4 x i1>} @llvm.uadd.with.overflow.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
|
|
|
|
; fold (sadd x, 0) -> x
|
|
define i32 @combine_sadd_zero(i32 %a0, i32 %a1) {
|
|
; SSE-LABEL: combine_sadd_zero:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: movl %edi, %eax
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: combine_sadd_zero:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: movl %edi, %eax
|
|
; AVX-NEXT: retq
|
|
%1 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a0, i32 zeroinitializer)
|
|
%2 = extractvalue {i32, i1} %1, 0
|
|
%3 = extractvalue {i32, i1} %1, 1
|
|
%4 = select i1 %3, i32 %a1, i32 %2
|
|
ret i32 %4
|
|
}
|
|
|
|
define <4 x i32> @combine_vec_sadd_zero(<4 x i32> %a0, <4 x i32> %a1) {
|
|
; SSE-LABEL: combine_vec_sadd_zero:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: combine_vec_sadd_zero:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: retq
|
|
%1 = call {<4 x i32>, <4 x i1>} @llvm.sadd.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> zeroinitializer)
|
|
%2 = extractvalue {<4 x i32>, <4 x i1>} %1, 0
|
|
%3 = extractvalue {<4 x i32>, <4 x i1>} %1, 1
|
|
%4 = select <4 x i1> %3, <4 x i32> %a1, <4 x i32> %2
|
|
ret <4 x i32> %4
|
|
}
|
|
|
|
; fold (uadd x, 0) -> x
|
|
define i32 @combine_uadd_zero(i32 %a0, i32 %a1) {
|
|
; SSE-LABEL: combine_uadd_zero:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: movl %edi, %eax
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: combine_uadd_zero:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: movl %edi, %eax
|
|
; AVX-NEXT: retq
|
|
%1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a0, i32 zeroinitializer)
|
|
%2 = extractvalue {i32, i1} %1, 0
|
|
%3 = extractvalue {i32, i1} %1, 1
|
|
%4 = select i1 %3, i32 %a1, i32 %2
|
|
ret i32 %4
|
|
}
|
|
|
|
define <4 x i32> @combine_vec_uadd_zero(<4 x i32> %a0, <4 x i32> %a1) {
|
|
; SSE-LABEL: combine_vec_uadd_zero:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: combine_vec_uadd_zero:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: retq
|
|
%1 = call {<4 x i32>, <4 x i1>} @llvm.uadd.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> zeroinitializer)
|
|
%2 = extractvalue {<4 x i32>, <4 x i1>} %1, 0
|
|
%3 = extractvalue {<4 x i32>, <4 x i1>} %1, 1
|
|
%4 = select <4 x i1> %3, <4 x i32> %a1, <4 x i32> %2
|
|
ret <4 x i32> %4
|
|
}
|
|
|
|
; fold (uadd (xor a, -1), 1) -> (usub 0, a) and flip carry
|
|
define i32 @combine_uadd_not(i32 %a0, i32 %a1) {
|
|
; SSE-LABEL: combine_uadd_not:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: movl %edi, %eax
|
|
; SSE-NEXT: negl %eax
|
|
; SSE-NEXT: cmovael %esi, %eax
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: combine_uadd_not:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: movl %edi, %eax
|
|
; AVX-NEXT: negl %eax
|
|
; AVX-NEXT: cmovael %esi, %eax
|
|
; AVX-NEXT: retq
|
|
%1 = xor i32 %a0, -1
|
|
%2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %1, i32 1)
|
|
%3 = extractvalue {i32, i1} %2, 0
|
|
%4 = extractvalue {i32, i1} %2, 1
|
|
%5 = select i1 %4, i32 %a1, i32 %3
|
|
ret i32 %5
|
|
}
|
|
|
|
define <4 x i32> @combine_vec_uadd_not(<4 x i32> %a0, <4 x i32> %a1) {
|
|
; SSE-LABEL: combine_vec_uadd_not:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: pxor %xmm2, %xmm2
|
|
; SSE-NEXT: psubd %xmm0, %xmm2
|
|
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1]
|
|
; SSE-NEXT: pmaxud %xmm2, %xmm0
|
|
; SSE-NEXT: pcmpeqd %xmm2, %xmm0
|
|
; SSE-NEXT: blendvps %xmm0, %xmm2, %xmm1
|
|
; SSE-NEXT: movaps %xmm1, %xmm0
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: combine_vec_uadd_not:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
|
; AVX-NEXT: vpsubd %xmm0, %xmm2, %xmm0
|
|
; AVX-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1,1,1,1]
|
|
; AVX-NEXT: vpmaxud %xmm2, %xmm0, %xmm2
|
|
; AVX-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm2
|
|
; AVX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
|
|
; AVX-NEXT: retq
|
|
%1 = xor <4 x i32> %a0, <i32 -1, i32 -1, i32 -1, i32 -1>
|
|
%2 = call {<4 x i32>, <4 x i1>} @llvm.uadd.with.overflow.v4i32(<4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
|
|
%3 = extractvalue {<4 x i32>, <4 x i1>} %2, 0
|
|
%4 = extractvalue {<4 x i32>, <4 x i1>} %2, 1
|
|
%5 = select <4 x i1> %4, <4 x i32> %a1, <4 x i32> %3
|
|
ret <4 x i32> %5
|
|
}
|
|
|
|
; if uaddo never overflows, replace with add
|
|
define i32 @combine_uadd_no_overflow(i32 %a0, i32 %a1, i32 %a2) {
|
|
; SSE-LABEL: combine_uadd_no_overflow:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: # kill: def $edx killed $edx def $rdx
|
|
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
|
|
; SSE-NEXT: shrl $16, %esi
|
|
; SSE-NEXT: shrl $16, %edx
|
|
; SSE-NEXT: leal (%rdx,%rsi), %eax
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: combine_uadd_no_overflow:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: # kill: def $edx killed $edx def $rdx
|
|
; AVX-NEXT: # kill: def $esi killed $esi def $rsi
|
|
; AVX-NEXT: shrl $16, %esi
|
|
; AVX-NEXT: shrl $16, %edx
|
|
; AVX-NEXT: leal (%rdx,%rsi), %eax
|
|
; AVX-NEXT: retq
|
|
%1 = lshr i32 %a1, 16
|
|
%2 = lshr i32 %a2, 16
|
|
%3 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %1, i32 %2)
|
|
%4 = extractvalue {i32, i1} %3, 0
|
|
%5 = extractvalue {i32, i1} %3, 1
|
|
%6 = select i1 %5, i32 %a2, i32 %4
|
|
ret i32 %4
|
|
}
|
|
|
|
define <4 x i32> @combine_vec_uadd_no_overflow(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) {
|
|
; SSE-LABEL: combine_vec_uadd_no_overflow:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: movdqa %xmm2, %xmm0
|
|
; SSE-NEXT: psrld $16, %xmm1
|
|
; SSE-NEXT: psrld $16, %xmm0
|
|
; SSE-NEXT: paddd %xmm1, %xmm0
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: combine_vec_uadd_no_overflow:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vpsrld $16, %xmm1, %xmm0
|
|
; AVX-NEXT: vpsrld $16, %xmm2, %xmm1
|
|
; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: retq
|
|
%1 = lshr <4 x i32> %a1, <i32 16, i32 16, i32 16, i32 16>
|
|
%2 = lshr <4 x i32> %a2, <i32 16, i32 16, i32 16, i32 16>
|
|
%3 = call {<4 x i32>, <4 x i1>} @llvm.uadd.with.overflow.v4i32(<4 x i32> %1, <4 x i32> %2)
|
|
%4 = extractvalue {<4 x i32>, <4 x i1>} %3, 0
|
|
%5 = extractvalue {<4 x i32>, <4 x i1>} %3, 1
|
|
%6 = select <4 x i1> %5, <4 x i32> %a2, <4 x i32> %4
|
|
ret <4 x i32> %4
|
|
}
|