1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-26 04:32:44 +01:00

[x86] try to make test immune to better div optimization; NFCI

llvm-svn: 345640
This commit is contained in:
Sanjay Patel 2018-10-30 20:44:54 +00:00
parent f849d7b423
commit 80bcaeb444

View File

@ -308,47 +308,46 @@ bb1:
; Use a particular instruction pattern in order to lower to the post-RA pseudo
; used to lower SETB into an SBB pattern in order to make sure that kind of
; usage of a copied EFLAGS continues to work.
define void @PR37431(i32* %arg1, i8* %arg2, i8* %arg3) {
define void @PR37431(i32* %arg1, i8* %arg2, i8* %arg3, i32 %x) nounwind {
; X32-LABEL: PR37431:
; X32: # %bb.0: # %entry
; X32-NEXT: pushl %edi
; X32-NEXT: pushl %esi
; X32-NEXT: .cfi_def_cfa_offset 8
; X32-NEXT: .cfi_offset %esi, -8
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl (%eax), %eax
; X32-NEXT: movl %eax, %ecx
; X32-NEXT: sarl $31, %ecx
; X32-NEXT: cmpl %eax, %eax
; X32-NEXT: sbbl %ecx, %eax
; X32-NEXT: setb %al
; X32-NEXT: sbbb %cl, %cl
; X32-NEXT: setb %cl
; X32-NEXT: sbbb %dl, %dl
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movb %cl, (%edx)
; X32-NEXT: movzbl %al, %eax
; X32-NEXT: xorl %ecx, %ecx
; X32-NEXT: subl %eax, %ecx
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: xorl %edx, %edx
; X32-NEXT: idivl %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
; X32-NEXT: movb %dl, (%edi)
; X32-NEXT: movzbl %cl, %ecx
; X32-NEXT: xorl %edi, %edi
; X32-NEXT: subl %ecx, %edi
; X32-NEXT: cltd
; X32-NEXT: idivl %edi
; X32-NEXT: movb %dl, (%esi)
; X32-NEXT: popl %esi
; X32-NEXT: .cfi_def_cfa_offset 4
; X32-NEXT: popl %edi
; X32-NEXT: retl
;
; X64-LABEL: PR37431:
; X64: # %bb.0: # %entry
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movslq (%rdi), %rax
; X64-NEXT: cmpq %rax, %rax
; X64-NEXT: sbbb %dl, %dl
; X64-NEXT: cmpq %rax, %rax
; X64-NEXT: movb %dl, (%rsi)
; X64-NEXT: sbbl %esi, %esi
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: xorl %edx, %edx
; X64-NEXT: idivl %esi
; X64-NEXT: movb %dl, (%rcx)
; X64-NEXT: movl %ecx, %eax
; X64-NEXT: movq %rdx, %r8
; X64-NEXT: movslq (%rdi), %rdx
; X64-NEXT: cmpq %rdx, %rax
; X64-NEXT: sbbb %cl, %cl
; X64-NEXT: cmpq %rdx, %rax
; X64-NEXT: movb %cl, (%rsi)
; X64-NEXT: sbbl %ecx, %ecx
; X64-NEXT: cltd
; X64-NEXT: idivl %ecx
; X64-NEXT: movb %dl, (%r8)
; X64-NEXT: retq
entry:
%tmp = load i32, i32* %arg1
@ -358,7 +357,7 @@ entry:
%tmp4 = sub i8 0, %tmp3
store i8 %tmp4, i8* %arg2
%tmp5 = sext i8 %tmp4 to i32
%tmp6 = srem i32 0, %tmp5
%tmp6 = srem i32 %x, %tmp5
%tmp7 = trunc i32 %tmp6 to i8
store i8 %tmp7, i8* %arg3
ret void