1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-22 12:33:33 +02:00
llvm-mirror/test/CodeGen/X86/shift-combine.ll
Simon Pilgrim 3613b1137a [X86] Regenerated shift combine tests.
Added x86_64 tests

llvm-svn: 281341
2016-09-13 14:41:39 +00:00

157 lines
4.4 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=i686-unknown < %s | FileCheck %s --check-prefix=X32
; RUN: llc -mtriple=x86_64-unknown < %s | FileCheck %s --check-prefix=X64
@array = weak global [4 x i32] zeroinitializer
define i32 @test_lshr_and(i32 %x) {
; X32-LABEL: test_lshr_and:
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: andl $12, %eax
; X32-NEXT: movl array(%eax), %eax
; X32-NEXT: retl
;
; X64-LABEL: test_lshr_and:
; X64: # BB#0:
; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; X64-NEXT: shrl $2, %edi
; X64-NEXT: andl $3, %edi
; X64-NEXT: movl array(,%rdi,4), %eax
; X64-NEXT: retq
%tmp2 = lshr i32 %x, 2
%tmp3 = and i32 %tmp2, 3
%tmp4 = getelementptr [4 x i32], [4 x i32]* @array, i32 0, i32 %tmp3
%tmp5 = load i32, i32* %tmp4, align 4
ret i32 %tmp5
}
define i32* @test_exact1(i32 %a, i32 %b, i32* %x) {
; X32-LABEL: test_exact1:
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: subl {{[0-9]+}}(%esp), %eax
; X32-NEXT: sarl %eax
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
; X32-NEXT: retl
;
; X64-LABEL: test_exact1:
; X64: # BB#0:
; X64-NEXT: subl %edi, %esi
; X64-NEXT: sarl $3, %esi
; X64-NEXT: movslq %esi, %rax
; X64-NEXT: leaq (%rdx,%rax,4), %rax
; X64-NEXT: retq
%sub = sub i32 %b, %a
%shr = ashr exact i32 %sub, 3
%gep = getelementptr inbounds i32, i32* %x, i32 %shr
ret i32* %gep
}
define i32* @test_exact2(i32 %a, i32 %b, i32* %x) {
; X32-LABEL: test_exact2:
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: subl {{[0-9]+}}(%esp), %eax
; X32-NEXT: sarl %eax
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
; X32-NEXT: retl
;
; X64-LABEL: test_exact2:
; X64: # BB#0:
; X64-NEXT: subl %edi, %esi
; X64-NEXT: sarl $3, %esi
; X64-NEXT: movslq %esi, %rax
; X64-NEXT: leaq (%rdx,%rax,4), %rax
; X64-NEXT: retq
%sub = sub i32 %b, %a
%shr = ashr exact i32 %sub, 3
%gep = getelementptr inbounds i32, i32* %x, i32 %shr
ret i32* %gep
}
define i32* @test_exact3(i32 %a, i32 %b, i32* %x) {
; X32-LABEL: test_exact3:
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: subl {{[0-9]+}}(%esp), %eax
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
; X32-NEXT: retl
;
; X64-LABEL: test_exact3:
; X64: # BB#0:
; X64-NEXT: subl %edi, %esi
; X64-NEXT: sarl $2, %esi
; X64-NEXT: movslq %esi, %rax
; X64-NEXT: leaq (%rdx,%rax,4), %rax
; X64-NEXT: retq
%sub = sub i32 %b, %a
%shr = ashr exact i32 %sub, 2
%gep = getelementptr inbounds i32, i32* %x, i32 %shr
ret i32* %gep
}
define i32* @test_exact4(i32 %a, i32 %b, i32* %x) {
; X32-LABEL: test_exact4:
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: subl {{[0-9]+}}(%esp), %eax
; X32-NEXT: shrl %eax
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
; X32-NEXT: retl
;
; X64-LABEL: test_exact4:
; X64: # BB#0:
; X64-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
; X64-NEXT: subl %edi, %esi
; X64-NEXT: shrl $3, %esi
; X64-NEXT: leaq (%rdx,%rsi,4), %rax
; X64-NEXT: retq
%sub = sub i32 %b, %a
%shr = lshr exact i32 %sub, 3
%gep = getelementptr inbounds i32, i32* %x, i32 %shr
ret i32* %gep
}
define i32* @test_exact5(i32 %a, i32 %b, i32* %x) {
; X32-LABEL: test_exact5:
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: subl {{[0-9]+}}(%esp), %eax
; X32-NEXT: shrl %eax
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
; X32-NEXT: retl
;
; X64-LABEL: test_exact5:
; X64: # BB#0:
; X64-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
; X64-NEXT: subl %edi, %esi
; X64-NEXT: shrl $3, %esi
; X64-NEXT: leaq (%rdx,%rsi,4), %rax
; X64-NEXT: retq
%sub = sub i32 %b, %a
%shr = lshr exact i32 %sub, 3
%gep = getelementptr inbounds i32, i32* %x, i32 %shr
ret i32* %gep
}
define i32* @test_exact6(i32 %a, i32 %b, i32* %x) {
; X32-LABEL: test_exact6:
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: subl {{[0-9]+}}(%esp), %eax
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
; X32-NEXT: retl
;
; X64-LABEL: test_exact6:
; X64: # BB#0:
; X64-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
; X64-NEXT: subl %edi, %esi
; X64-NEXT: leaq (%rsi,%rdx), %rax
; X64-NEXT: retq
%sub = sub i32 %b, %a
%shr = lshr exact i32 %sub, 2
%gep = getelementptr inbounds i32, i32* %x, i32 %shr
ret i32* %gep
}