mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-23 19:23:23 +01:00
b8b5181de7
As discussed in the review, that fold is only valid for positive divisors, so while we can negate negative divisors, we have to special-case INT_MIN. llvm-svn: 367294
528 lines
16 KiB
LLVM
528 lines
16 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -mtriple=i686-unknown-linux-gnu < %s | FileCheck %s --check-prefixes=CHECK,X86
|
|
; RUN: llc -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s --check-prefixes=CHECK,X64
|
|
|
|
;------------------------------------------------------------------------------;
|
|
; Odd divisors
|
|
;------------------------------------------------------------------------------;
|
|
|
|
define i32 @test_srem_odd(i32 %X) nounwind {
|
|
; X86-LABEL: test_srem_odd:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl $1717986919, %edx # imm = 0x66666667
|
|
; X86-NEXT: movl %ecx, %eax
|
|
; X86-NEXT: imull %edx
|
|
; X86-NEXT: movl %edx, %eax
|
|
; X86-NEXT: shrl $31, %eax
|
|
; X86-NEXT: sarl %edx
|
|
; X86-NEXT: addl %eax, %edx
|
|
; X86-NEXT: leal (%edx,%edx,4), %edx
|
|
; X86-NEXT: xorl %eax, %eax
|
|
; X86-NEXT: cmpl %edx, %ecx
|
|
; X86-NEXT: sete %al
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_srem_odd:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: movslq %edi, %rcx
|
|
; X64-NEXT: imulq $1717986919, %rcx, %rax # imm = 0x66666667
|
|
; X64-NEXT: movq %rax, %rdx
|
|
; X64-NEXT: shrq $63, %rdx
|
|
; X64-NEXT: sarq $33, %rax
|
|
; X64-NEXT: addl %edx, %eax
|
|
; X64-NEXT: leal (%rax,%rax,4), %edx
|
|
; X64-NEXT: xorl %eax, %eax
|
|
; X64-NEXT: cmpl %edx, %ecx
|
|
; X64-NEXT: sete %al
|
|
; X64-NEXT: retq
|
|
%srem = srem i32 %X, 5
|
|
%cmp = icmp eq i32 %srem, 0
|
|
%ret = zext i1 %cmp to i32
|
|
ret i32 %ret
|
|
}
|
|
|
|
define i32 @test_srem_odd_25(i32 %X) nounwind {
|
|
; X86-LABEL: test_srem_odd_25:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl $1374389535, %edx # imm = 0x51EB851F
|
|
; X86-NEXT: movl %ecx, %eax
|
|
; X86-NEXT: imull %edx
|
|
; X86-NEXT: movl %edx, %eax
|
|
; X86-NEXT: shrl $31, %eax
|
|
; X86-NEXT: sarl $3, %edx
|
|
; X86-NEXT: addl %eax, %edx
|
|
; X86-NEXT: leal (%edx,%edx,4), %eax
|
|
; X86-NEXT: leal (%eax,%eax,4), %edx
|
|
; X86-NEXT: xorl %eax, %eax
|
|
; X86-NEXT: cmpl %edx, %ecx
|
|
; X86-NEXT: sete %al
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_srem_odd_25:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: movslq %edi, %rcx
|
|
; X64-NEXT: imulq $1374389535, %rcx, %rax # imm = 0x51EB851F
|
|
; X64-NEXT: movq %rax, %rdx
|
|
; X64-NEXT: shrq $63, %rdx
|
|
; X64-NEXT: sarq $35, %rax
|
|
; X64-NEXT: addl %edx, %eax
|
|
; X64-NEXT: leal (%rax,%rax,4), %eax
|
|
; X64-NEXT: leal (%rax,%rax,4), %edx
|
|
; X64-NEXT: xorl %eax, %eax
|
|
; X64-NEXT: cmpl %edx, %ecx
|
|
; X64-NEXT: sete %al
|
|
; X64-NEXT: retq
|
|
%srem = srem i32 %X, 25
|
|
%cmp = icmp eq i32 %srem, 0
|
|
%ret = zext i1 %cmp to i32
|
|
ret i32 %ret
|
|
}
|
|
|
|
; This is like test_srem_odd, except the divisor has bit 30 set.
|
|
define i32 @test_srem_odd_bit30(i32 %X) nounwind {
|
|
; X86-LABEL: test_srem_odd_bit30:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl $536870911, %edx # imm = 0x1FFFFFFF
|
|
; X86-NEXT: movl %ecx, %eax
|
|
; X86-NEXT: imull %edx
|
|
; X86-NEXT: movl %edx, %eax
|
|
; X86-NEXT: shrl $31, %eax
|
|
; X86-NEXT: sarl $27, %edx
|
|
; X86-NEXT: addl %eax, %edx
|
|
; X86-NEXT: imull $1073741827, %edx, %edx # imm = 0x40000003
|
|
; X86-NEXT: xorl %eax, %eax
|
|
; X86-NEXT: cmpl %edx, %ecx
|
|
; X86-NEXT: sete %al
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_srem_odd_bit30:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: movslq %edi, %rcx
|
|
; X64-NEXT: movq %rcx, %rax
|
|
; X64-NEXT: shlq $29, %rax
|
|
; X64-NEXT: subq %rcx, %rax
|
|
; X64-NEXT: movq %rax, %rdx
|
|
; X64-NEXT: shrq $63, %rdx
|
|
; X64-NEXT: sarq $59, %rax
|
|
; X64-NEXT: addl %edx, %eax
|
|
; X64-NEXT: imull $1073741827, %eax, %edx # imm = 0x40000003
|
|
; X64-NEXT: xorl %eax, %eax
|
|
; X64-NEXT: cmpl %edx, %ecx
|
|
; X64-NEXT: sete %al
|
|
; X64-NEXT: retq
|
|
%srem = srem i32 %X, 1073741827
|
|
%cmp = icmp eq i32 %srem, 0
|
|
%ret = zext i1 %cmp to i32
|
|
ret i32 %ret
|
|
}
|
|
|
|
; This is like test_srem_odd, except the divisor has bit 31 set.
|
|
define i32 @test_srem_odd_bit31(i32 %X) nounwind {
|
|
; X86-LABEL: test_srem_odd_bit31:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl $-536870913, %edx # imm = 0xDFFFFFFF
|
|
; X86-NEXT: movl %ecx, %eax
|
|
; X86-NEXT: imull %edx
|
|
; X86-NEXT: movl %edx, %eax
|
|
; X86-NEXT: shrl $31, %eax
|
|
; X86-NEXT: sarl $28, %edx
|
|
; X86-NEXT: addl %eax, %edx
|
|
; X86-NEXT: imull $-2147483645, %edx, %edx # imm = 0x80000003
|
|
; X86-NEXT: xorl %eax, %eax
|
|
; X86-NEXT: cmpl %edx, %ecx
|
|
; X86-NEXT: sete %al
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_srem_odd_bit31:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: movslq %edi, %rcx
|
|
; X64-NEXT: movq %rcx, %rax
|
|
; X64-NEXT: shlq $29, %rax
|
|
; X64-NEXT: addq %rcx, %rax
|
|
; X64-NEXT: negq %rax
|
|
; X64-NEXT: movq %rax, %rdx
|
|
; X64-NEXT: shrq $63, %rdx
|
|
; X64-NEXT: sarq $60, %rax
|
|
; X64-NEXT: addl %edx, %eax
|
|
; X64-NEXT: imull $-2147483645, %eax, %edx # imm = 0x80000003
|
|
; X64-NEXT: xorl %eax, %eax
|
|
; X64-NEXT: cmpl %edx, %ecx
|
|
; X64-NEXT: sete %al
|
|
; X64-NEXT: retq
|
|
%srem = srem i32 %X, 2147483651
|
|
%cmp = icmp eq i32 %srem, 0
|
|
%ret = zext i1 %cmp to i32
|
|
ret i32 %ret
|
|
}
|
|
|
|
;------------------------------------------------------------------------------;
|
|
; Even divisors
|
|
;------------------------------------------------------------------------------;
|
|
|
|
define i16 @test_srem_even(i16 %X) nounwind {
|
|
; X86-LABEL: test_srem_even:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movswl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: imull $18725, %ecx, %eax # imm = 0x4925
|
|
; X86-NEXT: movl %eax, %edx
|
|
; X86-NEXT: shrl $31, %edx
|
|
; X86-NEXT: sarl $18, %eax
|
|
; X86-NEXT: addl %edx, %eax
|
|
; X86-NEXT: movl %eax, %edx
|
|
; X86-NEXT: shll $4, %edx
|
|
; X86-NEXT: subl %eax, %edx
|
|
; X86-NEXT: subl %eax, %edx
|
|
; X86-NEXT: xorl %eax, %eax
|
|
; X86-NEXT: cmpw %dx, %cx
|
|
; X86-NEXT: setne %al
|
|
; X86-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_srem_even:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: movswl %di, %ecx
|
|
; X64-NEXT: imull $18725, %ecx, %eax # imm = 0x4925
|
|
; X64-NEXT: movl %eax, %edx
|
|
; X64-NEXT: shrl $31, %edx
|
|
; X64-NEXT: sarl $18, %eax
|
|
; X64-NEXT: addl %edx, %eax
|
|
; X64-NEXT: movl %eax, %edx
|
|
; X64-NEXT: shll $4, %edx
|
|
; X64-NEXT: subl %eax, %edx
|
|
; X64-NEXT: subl %eax, %edx
|
|
; X64-NEXT: xorl %eax, %eax
|
|
; X64-NEXT: cmpw %dx, %cx
|
|
; X64-NEXT: setne %al
|
|
; X64-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; X64-NEXT: retq
|
|
%srem = srem i16 %X, 14
|
|
%cmp = icmp ne i16 %srem, 0
|
|
%ret = zext i1 %cmp to i16
|
|
ret i16 %ret
|
|
}
|
|
|
|
define i32 @test_srem_even_100(i32 %X) nounwind {
|
|
; X86-LABEL: test_srem_even_100:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl $1374389535, %edx # imm = 0x51EB851F
|
|
; X86-NEXT: movl %ecx, %eax
|
|
; X86-NEXT: imull %edx
|
|
; X86-NEXT: movl %edx, %eax
|
|
; X86-NEXT: shrl $31, %eax
|
|
; X86-NEXT: sarl $5, %edx
|
|
; X86-NEXT: addl %eax, %edx
|
|
; X86-NEXT: imull $100, %edx, %edx
|
|
; X86-NEXT: xorl %eax, %eax
|
|
; X86-NEXT: cmpl %edx, %ecx
|
|
; X86-NEXT: sete %al
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_srem_even_100:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: movslq %edi, %rcx
|
|
; X64-NEXT: imulq $1374389535, %rcx, %rax # imm = 0x51EB851F
|
|
; X64-NEXT: movq %rax, %rdx
|
|
; X64-NEXT: shrq $63, %rdx
|
|
; X64-NEXT: sarq $37, %rax
|
|
; X64-NEXT: addl %edx, %eax
|
|
; X64-NEXT: imull $100, %eax, %edx
|
|
; X64-NEXT: xorl %eax, %eax
|
|
; X64-NEXT: cmpl %edx, %ecx
|
|
; X64-NEXT: sete %al
|
|
; X64-NEXT: retq
|
|
%srem = srem i32 %X, 100
|
|
%cmp = icmp eq i32 %srem, 0
|
|
%ret = zext i1 %cmp to i32
|
|
ret i32 %ret
|
|
}
|
|
|
|
; This is like test_srem_even, except the divisor has bit 30 set.
|
|
define i32 @test_srem_even_bit30(i32 %X) nounwind {
|
|
; X86-LABEL: test_srem_even_bit30:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl $1073741721, %edx # imm = 0x3FFFFF99
|
|
; X86-NEXT: movl %ecx, %eax
|
|
; X86-NEXT: imull %edx
|
|
; X86-NEXT: movl %edx, %eax
|
|
; X86-NEXT: shrl $31, %eax
|
|
; X86-NEXT: sarl $28, %edx
|
|
; X86-NEXT: addl %eax, %edx
|
|
; X86-NEXT: imull $1073741928, %edx, %edx # imm = 0x40000068
|
|
; X86-NEXT: xorl %eax, %eax
|
|
; X86-NEXT: cmpl %edx, %ecx
|
|
; X86-NEXT: sete %al
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_srem_even_bit30:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: movslq %edi, %rcx
|
|
; X64-NEXT: imulq $1073741721, %rcx, %rax # imm = 0x3FFFFF99
|
|
; X64-NEXT: movq %rax, %rdx
|
|
; X64-NEXT: shrq $63, %rdx
|
|
; X64-NEXT: sarq $60, %rax
|
|
; X64-NEXT: addl %edx, %eax
|
|
; X64-NEXT: imull $1073741928, %eax, %edx # imm = 0x40000068
|
|
; X64-NEXT: xorl %eax, %eax
|
|
; X64-NEXT: cmpl %edx, %ecx
|
|
; X64-NEXT: sete %al
|
|
; X64-NEXT: retq
|
|
%srem = srem i32 %X, 1073741928
|
|
%cmp = icmp eq i32 %srem, 0
|
|
%ret = zext i1 %cmp to i32
|
|
ret i32 %ret
|
|
}
|
|
|
|
; This is like test_srem_odd, except the divisor has bit 31 set.
|
|
define i32 @test_srem_even_bit31(i32 %X) nounwind {
|
|
; X86-LABEL: test_srem_even_bit31:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl $2147483545, %edx # imm = 0x7FFFFF99
|
|
; X86-NEXT: movl %ecx, %eax
|
|
; X86-NEXT: imull %edx
|
|
; X86-NEXT: subl %ecx, %edx
|
|
; X86-NEXT: movl %edx, %eax
|
|
; X86-NEXT: shrl $31, %eax
|
|
; X86-NEXT: sarl $30, %edx
|
|
; X86-NEXT: addl %eax, %edx
|
|
; X86-NEXT: imull $-2147483546, %edx, %edx # imm = 0x80000066
|
|
; X86-NEXT: xorl %eax, %eax
|
|
; X86-NEXT: cmpl %edx, %ecx
|
|
; X86-NEXT: sete %al
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_srem_even_bit31:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: movslq %edi, %rcx
|
|
; X64-NEXT: imulq $2147483545, %rcx, %rax # imm = 0x7FFFFF99
|
|
; X64-NEXT: shrq $32, %rax
|
|
; X64-NEXT: subl %ecx, %eax
|
|
; X64-NEXT: movl %eax, %edx
|
|
; X64-NEXT: shrl $31, %edx
|
|
; X64-NEXT: sarl $30, %eax
|
|
; X64-NEXT: addl %edx, %eax
|
|
; X64-NEXT: imull $-2147483546, %eax, %edx # imm = 0x80000066
|
|
; X64-NEXT: xorl %eax, %eax
|
|
; X64-NEXT: cmpl %edx, %ecx
|
|
; X64-NEXT: sete %al
|
|
; X64-NEXT: retq
|
|
%srem = srem i32 %X, 2147483750
|
|
%cmp = icmp eq i32 %srem, 0
|
|
%ret = zext i1 %cmp to i32
|
|
ret i32 %ret
|
|
}
|
|
|
|
;------------------------------------------------------------------------------;
|
|
; Special case
|
|
;------------------------------------------------------------------------------;
|
|
|
|
; 'NE' predicate is fine too.
|
|
define i32 @test_srem_odd_setne(i32 %X) nounwind {
|
|
; X86-LABEL: test_srem_odd_setne:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl $1717986919, %edx # imm = 0x66666667
|
|
; X86-NEXT: movl %ecx, %eax
|
|
; X86-NEXT: imull %edx
|
|
; X86-NEXT: movl %edx, %eax
|
|
; X86-NEXT: shrl $31, %eax
|
|
; X86-NEXT: sarl %edx
|
|
; X86-NEXT: addl %eax, %edx
|
|
; X86-NEXT: leal (%edx,%edx,4), %edx
|
|
; X86-NEXT: xorl %eax, %eax
|
|
; X86-NEXT: cmpl %edx, %ecx
|
|
; X86-NEXT: setne %al
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_srem_odd_setne:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: movslq %edi, %rcx
|
|
; X64-NEXT: imulq $1717986919, %rcx, %rax # imm = 0x66666667
|
|
; X64-NEXT: movq %rax, %rdx
|
|
; X64-NEXT: shrq $63, %rdx
|
|
; X64-NEXT: sarq $33, %rax
|
|
; X64-NEXT: addl %edx, %eax
|
|
; X64-NEXT: leal (%rax,%rax,4), %edx
|
|
; X64-NEXT: xorl %eax, %eax
|
|
; X64-NEXT: cmpl %edx, %ecx
|
|
; X64-NEXT: setne %al
|
|
; X64-NEXT: retq
|
|
%srem = srem i32 %X, 5
|
|
%cmp = icmp ne i32 %srem, 0
|
|
%ret = zext i1 %cmp to i32
|
|
ret i32 %ret
|
|
}
|
|
|
|
; The fold is only valid for positive divisors, negative-ones should be negated.
|
|
define i32 @test_srem_negative_odd(i32 %X) nounwind {
|
|
; X86-LABEL: test_srem_negative_odd:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl $-1717986919, %edx # imm = 0x99999999
|
|
; X86-NEXT: movl %ecx, %eax
|
|
; X86-NEXT: imull %edx
|
|
; X86-NEXT: movl %edx, %eax
|
|
; X86-NEXT: shrl $31, %eax
|
|
; X86-NEXT: sarl %edx
|
|
; X86-NEXT: addl %eax, %edx
|
|
; X86-NEXT: leal (%edx,%edx,4), %edx
|
|
; X86-NEXT: xorl %eax, %eax
|
|
; X86-NEXT: addl %ecx, %edx
|
|
; X86-NEXT: setne %al
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_srem_negative_odd:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: movslq %edi, %rcx
|
|
; X64-NEXT: imulq $-1717986919, %rcx, %rax # imm = 0x99999999
|
|
; X64-NEXT: movq %rax, %rdx
|
|
; X64-NEXT: shrq $63, %rdx
|
|
; X64-NEXT: sarq $33, %rax
|
|
; X64-NEXT: addl %edx, %eax
|
|
; X64-NEXT: leal (%rax,%rax,4), %edx
|
|
; X64-NEXT: xorl %eax, %eax
|
|
; X64-NEXT: addl %edx, %ecx
|
|
; X64-NEXT: setne %al
|
|
; X64-NEXT: retq
|
|
%srem = srem i32 %X, -5
|
|
%cmp = icmp ne i32 %srem, 0
|
|
%ret = zext i1 %cmp to i32
|
|
ret i32 %ret
|
|
}
|
|
define i32 @test_srem_negative_even(i32 %X) nounwind {
|
|
; X86-LABEL: test_srem_negative_even:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl $1840700269, %edx # imm = 0x6DB6DB6D
|
|
; X86-NEXT: movl %ecx, %eax
|
|
; X86-NEXT: imull %edx
|
|
; X86-NEXT: subl %ecx, %edx
|
|
; X86-NEXT: movl %edx, %eax
|
|
; X86-NEXT: shrl $31, %eax
|
|
; X86-NEXT: sarl $3, %edx
|
|
; X86-NEXT: addl %eax, %edx
|
|
; X86-NEXT: imull $-14, %edx, %edx
|
|
; X86-NEXT: xorl %eax, %eax
|
|
; X86-NEXT: cmpl %edx, %ecx
|
|
; X86-NEXT: setne %al
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_srem_negative_even:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: movslq %edi, %rcx
|
|
; X64-NEXT: imulq $1840700269, %rcx, %rax # imm = 0x6DB6DB6D
|
|
; X64-NEXT: shrq $32, %rax
|
|
; X64-NEXT: subl %ecx, %eax
|
|
; X64-NEXT: movl %eax, %edx
|
|
; X64-NEXT: shrl $31, %edx
|
|
; X64-NEXT: sarl $3, %eax
|
|
; X64-NEXT: addl %edx, %eax
|
|
; X64-NEXT: imull $-14, %eax, %edx
|
|
; X64-NEXT: xorl %eax, %eax
|
|
; X64-NEXT: cmpl %edx, %ecx
|
|
; X64-NEXT: setne %al
|
|
; X64-NEXT: retq
|
|
%srem = srem i32 %X, -14
|
|
%cmp = icmp ne i32 %srem, 0
|
|
%ret = zext i1 %cmp to i32
|
|
ret i32 %ret
|
|
}
|
|
|
|
;------------------------------------------------------------------------------;
|
|
; Negative tests
|
|
;------------------------------------------------------------------------------;
|
|
|
|
; We can lower remainder of division by one much better elsewhere.
|
|
define i32 @test_srem_one(i32 %X) nounwind {
|
|
; CHECK-LABEL: test_srem_one:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: movl $1, %eax
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%srem = srem i32 %X, 1
|
|
%cmp = icmp eq i32 %srem, 0
|
|
%ret = zext i1 %cmp to i32
|
|
ret i32 %ret
|
|
}
|
|
|
|
; We can lower remainder of division by powers of two much better elsewhere.
|
|
define i32 @test_srem_pow2(i32 %X) nounwind {
|
|
; X86-LABEL: test_srem_pow2:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl %ecx, %edx
|
|
; X86-NEXT: sarl $31, %edx
|
|
; X86-NEXT: shrl $28, %edx
|
|
; X86-NEXT: addl %ecx, %edx
|
|
; X86-NEXT: andl $-16, %edx
|
|
; X86-NEXT: xorl %eax, %eax
|
|
; X86-NEXT: cmpl %edx, %ecx
|
|
; X86-NEXT: sete %al
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_srem_pow2:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: movl %edi, %ecx
|
|
; X64-NEXT: sarl $31, %ecx
|
|
; X64-NEXT: shrl $28, %ecx
|
|
; X64-NEXT: addl %edi, %ecx
|
|
; X64-NEXT: andl $-16, %ecx
|
|
; X64-NEXT: xorl %eax, %eax
|
|
; X64-NEXT: cmpl %ecx, %edi
|
|
; X64-NEXT: sete %al
|
|
; X64-NEXT: retq
|
|
%srem = srem i32 %X, 16
|
|
%cmp = icmp eq i32 %srem, 0
|
|
%ret = zext i1 %cmp to i32
|
|
ret i32 %ret
|
|
}
|
|
|
|
; The fold is only valid for positive divisors, and we can't negate INT_MIN.
|
|
define i32 @test_srem_int_min(i32 %X) nounwind {
|
|
; X86-LABEL: test_srem_int_min:
|
|
; X86: # %bb.0:
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-NEXT: movl %ecx, %edx
|
|
; X86-NEXT: sarl $31, %edx
|
|
; X86-NEXT: shrl %edx
|
|
; X86-NEXT: addl %ecx, %edx
|
|
; X86-NEXT: andl $-2147483648, %edx # imm = 0x80000000
|
|
; X86-NEXT: xorl %eax, %eax
|
|
; X86-NEXT: addl %ecx, %edx
|
|
; X86-NEXT: sete %al
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_srem_int_min:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: movl %edi, %ecx
|
|
; X64-NEXT: sarl $31, %ecx
|
|
; X64-NEXT: shrl %ecx
|
|
; X64-NEXT: addl %edi, %ecx
|
|
; X64-NEXT: andl $-2147483648, %ecx # imm = 0x80000000
|
|
; X64-NEXT: xorl %eax, %eax
|
|
; X64-NEXT: addl %edi, %ecx
|
|
; X64-NEXT: sete %al
|
|
; X64-NEXT: retq
|
|
%srem = srem i32 %X, 2147483648
|
|
%cmp = icmp eq i32 %srem, 0
|
|
%ret = zext i1 %cmp to i32
|
|
ret i32 %ret
|
|
}
|
|
|
|
; We can lower remainder of division by all-ones much better elsewhere.
|
|
define i32 @test_srem_allones(i32 %X) nounwind {
|
|
; CHECK-LABEL: test_srem_allones:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: movl $1, %eax
|
|
; CHECK-NEXT: ret{{[l|q]}}
|
|
%srem = srem i32 %X, 4294967295
|
|
%cmp = icmp eq i32 %srem, 0
|
|
%ret = zext i1 %cmp to i32
|
|
ret i32 %ret
|
|
}
|