1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-22 04:22:57 +02:00
llvm-mirror/test/CodeGen/X86/shift-and.ll
Sanjay Patel 8fe52dcc51 [x86] reduce 64-bit mask constant to 32-bits by right shifting
This is a follow-up from D38181 (r314023). We have to put 64-bit
constants into a register using a separate instruction, so we
should try harder to avoid that.

From what I see, we're not likely to encounter this pattern in the 
DAG because the upstream setcc combines from this don't (usually?) 
produce this pattern. If we fix that, then this will become more 
relevant. Since the cost of handling this case is just loosening 
the predicate of the existing fold, we might as well do it now.

llvm-svn: 314064
2017-09-23 14:32:07 +00:00

217 lines
5.5 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i386-unknown-unknown | FileCheck %s --check-prefix=X32
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64
define i32 @t1(i32 %t, i32 %val) nounwind {
; X32-LABEL: t1:
; X32: # BB#0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %cl
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: shll %cl, %eax
; X32-NEXT: retl
;
; X64-LABEL: t1:
; X64: # BB#0:
; X64-NEXT: movl %edi, %ecx
; X64-NEXT: shll %cl, %esi
; X64-NEXT: movl %esi, %eax
; X64-NEXT: retq
%shamt = and i32 %t, 31
%res = shl i32 %val, %shamt
ret i32 %res
}
define i32 @t2(i32 %t, i32 %val) nounwind {
; X32-LABEL: t2:
; X32: # BB#0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %cl
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: shll %cl, %eax
; X32-NEXT: retl
;
; X64-LABEL: t2:
; X64: # BB#0:
; X64-NEXT: movl %edi, %ecx
; X64-NEXT: shll %cl, %esi
; X64-NEXT: movl %esi, %eax
; X64-NEXT: retq
%shamt = and i32 %t, 63
%res = shl i32 %val, %shamt
ret i32 %res
}
@X = internal global i16 0
define void @t3(i16 %t) nounwind {
; X32-LABEL: t3:
; X32: # BB#0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %cl
; X32-NEXT: sarw %cl, X
; X32-NEXT: retl
;
; X64-LABEL: t3:
; X64: # BB#0:
; X64-NEXT: movl %edi, %ecx
; X64-NEXT: sarw %cl, {{.*}}(%rip)
; X64-NEXT: retq
%shamt = and i16 %t, 31
%tmp = load i16, i16* @X
%tmp1 = ashr i16 %tmp, %shamt
store i16 %tmp1, i16* @X
ret void
}
define i64 @t4(i64 %t, i64 %val) nounwind {
; X32-LABEL: t4:
; X32: # BB#0:
; X32-NEXT: pushl %esi
; X32-NEXT: movb {{[0-9]+}}(%esp), %cl
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
; X32-NEXT: movl %esi, %edx
; X32-NEXT: shrl %cl, %edx
; X32-NEXT: shrdl %cl, %esi, %eax
; X32-NEXT: testb $32, %cl
; X32-NEXT: je .LBB3_2
; X32-NEXT: # BB#1:
; X32-NEXT: movl %edx, %eax
; X32-NEXT: xorl %edx, %edx
; X32-NEXT: .LBB3_2:
; X32-NEXT: popl %esi
; X32-NEXT: retl
;
; X64-LABEL: t4:
; X64: # BB#0:
; X64-NEXT: movl %edi, %ecx
; X64-NEXT: shrq %cl, %rsi
; X64-NEXT: movq %rsi, %rax
; X64-NEXT: retq
%shamt = and i64 %t, 63
%res = lshr i64 %val, %shamt
ret i64 %res
}
define i64 @t5(i64 %t, i64 %val) nounwind {
; X32-LABEL: t5:
; X32: # BB#0:
; X32-NEXT: pushl %esi
; X32-NEXT: movb {{[0-9]+}}(%esp), %cl
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
; X32-NEXT: movl %esi, %edx
; X32-NEXT: shrl %cl, %edx
; X32-NEXT: shrdl %cl, %esi, %eax
; X32-NEXT: testb $32, %cl
; X32-NEXT: je .LBB4_2
; X32-NEXT: # BB#1:
; X32-NEXT: movl %edx, %eax
; X32-NEXT: xorl %edx, %edx
; X32-NEXT: .LBB4_2:
; X32-NEXT: popl %esi
; X32-NEXT: retl
;
; X64-LABEL: t5:
; X64: # BB#0:
; X64-NEXT: movl %edi, %ecx
; X64-NEXT: shrq %cl, %rsi
; X64-NEXT: movq %rsi, %rax
; X64-NEXT: retq
%shamt = and i64 %t, 191
%res = lshr i64 %val, %shamt
ret i64 %res
}
define void @t5ptr(i64 %t, i64* %ptr) nounwind {
; X32-LABEL: t5ptr:
; X32: # BB#0:
; X32-NEXT: pushl %edi
; X32-NEXT: pushl %esi
; X32-NEXT: movb {{[0-9]+}}(%esp), %cl
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl (%eax), %edx
; X32-NEXT: movl 4(%eax), %edi
; X32-NEXT: movl %edi, %esi
; X32-NEXT: shrl %cl, %esi
; X32-NEXT: shrdl %cl, %edi, %edx
; X32-NEXT: testb $32, %cl
; X32-NEXT: je .LBB5_2
; X32-NEXT: # BB#1:
; X32-NEXT: movl %esi, %edx
; X32-NEXT: xorl %esi, %esi
; X32-NEXT: .LBB5_2:
; X32-NEXT: movl %esi, 4(%eax)
; X32-NEXT: movl %edx, (%eax)
; X32-NEXT: popl %esi
; X32-NEXT: popl %edi
; X32-NEXT: retl
;
; X64-LABEL: t5ptr:
; X64: # BB#0:
; X64-NEXT: movl %edi, %ecx
; X64-NEXT: shrq %cl, (%rsi)
; X64-NEXT: retq
%shamt = and i64 %t, 191
%tmp = load i64, i64* %ptr
%tmp1 = lshr i64 %tmp, %shamt
store i64 %tmp1, i64* %ptr
ret void
}
; rdar://11866926
define i64 @t6(i64 %key, i64* nocapture %val) nounwind {
; X32-LABEL: t6:
; X32: # BB#0:
; X32-NEXT: pushl %edi
; X32-NEXT: pushl %esi
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: shrdl $3, %eax, %esi
; X32-NEXT: movl %eax, %edi
; X32-NEXT: shrl $3, %edi
; X32-NEXT: movl (%ecx), %eax
; X32-NEXT: movl 4(%ecx), %edx
; X32-NEXT: addl $-1, %eax
; X32-NEXT: adcl $-1, %edx
; X32-NEXT: andl %esi, %eax
; X32-NEXT: andl %edi, %edx
; X32-NEXT: popl %esi
; X32-NEXT: popl %edi
; X32-NEXT: retl
;
; X64-LABEL: t6:
; X64: # BB#0:
; X64-NEXT: shrq $3, %rdi
; X64-NEXT: movq (%rsi), %rax
; X64-NEXT: decq %rax
; X64-NEXT: andq %rdi, %rax
; X64-NEXT: retq
%shr = lshr i64 %key, 3
%1 = load i64, i64* %val, align 8
%sub = add i64 %1, 2305843009213693951
%and = and i64 %sub, %shr
ret i64 %and
}
define i64 @big_mask_constant(i64 %x) nounwind {
; X32-LABEL: big_mask_constant:
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: andl $4, %eax
; X32-NEXT: shll $25, %eax
; X32-NEXT: xorl %edx, %edx
; X32-NEXT: retl
;
; X64-LABEL: big_mask_constant:
; X64: # BB#0:
; X64-NEXT: shrq $7, %rdi
; X64-NEXT: andl $134217728, %edi # imm = 0x8000000
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: retq
%and = and i64 %x, 17179869184 ; 0x400000000
%sh = lshr i64 %and, 7
ret i64 %sh
}