mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-23 11:13:28 +01:00
7c243aea4b
Summary: This patch adds two intrinsics, llvm.sshl.sat and llvm.ushl.sat, which perform signed and unsigned saturating left shift, respectively. These are useful for implementing the Embedded-C fixed point support in Clang, originally discussed in http://lists.llvm.org/pipermail/llvm-dev/2018-August/125433.html and http://lists.llvm.org/pipermail/cfe-dev/2018-May/058019.html Reviewers: leonardchan, craig.topper, bjope, jdoerfert Subscribers: hiraditya, jdoerfert, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D83216
116 lines
4.0 KiB
LLVM
116 lines
4.0 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s --check-prefix=X64
|
|
; RUN: llc < %s -mtriple=i686 -mattr=cmov | FileCheck %s --check-prefix=X86
|
|
|
|
declare <4 x i32> @llvm.ushl.sat.v4i32(<4 x i32>, <4 x i32>)
|
|
|
|
define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) nounwind {
|
|
; X64-LABEL: vec:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,3,3,3]
|
|
; X64-NEXT: movd %xmm2, %eax
|
|
; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,3,3,3]
|
|
; X64-NEXT: movd %xmm2, %ecx
|
|
; X64-NEXT: movl %eax, %edx
|
|
; X64-NEXT: shll %cl, %edx
|
|
; X64-NEXT: movl %edx, %esi
|
|
; X64-NEXT: # kill: def $cl killed $cl killed $ecx
|
|
; X64-NEXT: shrl %cl, %esi
|
|
; X64-NEXT: cmpl %esi, %eax
|
|
; X64-NEXT: movl $-1, %eax
|
|
; X64-NEXT: cmovnel %eax, %edx
|
|
; X64-NEXT: movd %edx, %xmm2
|
|
; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
|
|
; X64-NEXT: movd %xmm3, %edx
|
|
; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
|
|
; X64-NEXT: movd %xmm3, %ecx
|
|
; X64-NEXT: movl %edx, %esi
|
|
; X64-NEXT: shll %cl, %esi
|
|
; X64-NEXT: movl %esi, %edi
|
|
; X64-NEXT: # kill: def $cl killed $cl killed $ecx
|
|
; X64-NEXT: shrl %cl, %edi
|
|
; X64-NEXT: cmpl %edi, %edx
|
|
; X64-NEXT: cmovnel %eax, %esi
|
|
; X64-NEXT: movd %esi, %xmm3
|
|
; X64-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
|
|
; X64-NEXT: movd %xmm0, %edx
|
|
; X64-NEXT: movd %xmm1, %ecx
|
|
; X64-NEXT: movl %edx, %esi
|
|
; X64-NEXT: shll %cl, %esi
|
|
; X64-NEXT: movl %esi, %edi
|
|
; X64-NEXT: # kill: def $cl killed $cl killed $ecx
|
|
; X64-NEXT: shrl %cl, %edi
|
|
; X64-NEXT: cmpl %edi, %edx
|
|
; X64-NEXT: cmovnel %eax, %esi
|
|
; X64-NEXT: movd %esi, %xmm2
|
|
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
|
|
; X64-NEXT: movd %xmm0, %edx
|
|
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
|
|
; X64-NEXT: movd %xmm0, %ecx
|
|
; X64-NEXT: movl %edx, %esi
|
|
; X64-NEXT: shll %cl, %esi
|
|
; X64-NEXT: movl %esi, %edi
|
|
; X64-NEXT: # kill: def $cl killed $cl killed $ecx
|
|
; X64-NEXT: shrl %cl, %edi
|
|
; X64-NEXT: cmpl %edi, %edx
|
|
; X64-NEXT: cmovnel %eax, %esi
|
|
; X64-NEXT: movd %esi, %xmm0
|
|
; X64-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
|
|
; X64-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
|
|
; X64-NEXT: movdqa %xmm2, %xmm0
|
|
; X64-NEXT: retq
|
|
;
|
|
; X86-LABEL: vec:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: pushl %ebp
|
|
; X86-NEXT: pushl %ebx
|
|
; X86-NEXT: pushl %edi
|
|
; X86-NEXT: pushl %esi
|
|
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
|
|
; X86-NEXT: movb {{[0-9]+}}(%esp), %ch
|
|
; X86-NEXT: movb {{[0-9]+}}(%esp), %ah
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X86-NEXT: movl %edx, %ebp
|
|
; X86-NEXT: shll %cl, %ebp
|
|
; X86-NEXT: movl %ebp, %esi
|
|
; X86-NEXT: shrl %cl, %esi
|
|
; X86-NEXT: cmpl %esi, %edx
|
|
; X86-NEXT: movl $-1, %edx
|
|
; X86-NEXT: cmovnel %edx, %ebp
|
|
; X86-NEXT: movl %edi, %ebx
|
|
; X86-NEXT: movb %ah, %cl
|
|
; X86-NEXT: shll %cl, %ebx
|
|
; X86-NEXT: movl %ebx, %esi
|
|
; X86-NEXT: shrl %cl, %esi
|
|
; X86-NEXT: cmpl %esi, %edi
|
|
; X86-NEXT: cmovnel %edx, %ebx
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X86-NEXT: movb %ch, %cl
|
|
; X86-NEXT: shll %cl, %esi
|
|
; X86-NEXT: movl %esi, %edi
|
|
; X86-NEXT: shrl %cl, %edi
|
|
; X86-NEXT: cmpl %edi, {{[0-9]+}}(%esp)
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X86-NEXT: cmovnel %edx, %esi
|
|
; X86-NEXT: movl %eax, %ecx
|
|
; X86-NEXT: shll %cl, %edi
|
|
; X86-NEXT: movl %edi, %eax
|
|
; X86-NEXT: shrl %cl, %eax
|
|
; X86-NEXT: cmpl %eax, {{[0-9]+}}(%esp)
|
|
; X86-NEXT: cmovnel %edx, %edi
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-NEXT: movl %edi, 12(%eax)
|
|
; X86-NEXT: movl %esi, 8(%eax)
|
|
; X86-NEXT: movl %ebx, 4(%eax)
|
|
; X86-NEXT: movl %ebp, (%eax)
|
|
; X86-NEXT: popl %esi
|
|
; X86-NEXT: popl %edi
|
|
; X86-NEXT: popl %ebx
|
|
; X86-NEXT: popl %ebp
|
|
; X86-NEXT: retl $4
|
|
%tmp = call <4 x i32> @llvm.ushl.sat.v4i32(<4 x i32> %x, <4 x i32> %y)
|
|
ret <4 x i32> %tmp
|
|
}
|