1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 12:41:49 +01:00

[DAGCombiner] Add X / X -> 1 & X % X -> 0 folds (test tweaks)

Adjust tests to avoid the X / X -> 1 & X % X -> 0 folds while keeping their original purposes.

Differential Revision: https://reviews.llvm.org/D50636

llvm-svn: 340916
This commit is contained in:
Simon Pilgrim 2018-08-29 11:18:14 +00:00
parent fe3bd488d4
commit 4d12fd47a1
3 changed files with 63 additions and 50 deletions

View File

@ -433,9 +433,10 @@ entry:
; CHECK: call #__mspabi_divi
%0 = load volatile i16, i16* @g_i16, align 8
%1 = sdiv i16 %0, %0
%1 = load volatile i16, i16* @g_i16, align 8
%2 = sdiv i16 %0, %1
ret i16 %1
ret i16 %2
}
define i32 @divli() #0 {
@ -444,9 +445,10 @@ entry:
; CHECK: call #__mspabi_divli
%0 = load volatile i32, i32* @g_i32, align 8
%1 = sdiv i32 %0, %0
%1 = load volatile i32, i32* @g_i32, align 8
%2 = sdiv i32 %0, %1
ret i32 %1
ret i32 %2
}
define i64 @divlli() #0 {
@ -455,9 +457,10 @@ entry:
; CHECK: call #__mspabi_divlli
%0 = load volatile i64, i64* @g_i64, align 8
%1 = sdiv i64 %0, %0
%1 = load volatile i64, i64* @g_i64, align 8
%2 = sdiv i64 %0, %1
ret i64 %1
ret i64 %2
}
define i16 @divu() #0 {
@ -466,9 +469,10 @@ entry:
; CHECK: call #__mspabi_divu
%0 = load volatile i16, i16* @g_i16, align 8
%1 = udiv i16 %0, %0
%1 = load volatile i16, i16* @g_i16, align 8
%2 = udiv i16 %0, %1
ret i16 %1
ret i16 %2
}
define i32 @divul() #0 {
@ -477,9 +481,10 @@ entry:
; CHECK: call #__mspabi_divul
%0 = load volatile i32, i32* @g_i32, align 8
%1 = udiv i32 %0, %0
%1 = load volatile i32, i32* @g_i32, align 8
%2 = udiv i32 %0, %1
ret i32 %1
ret i32 %2
}
define i64 @divull() #0 {
@ -488,9 +493,10 @@ entry:
; CHECK: call #__mspabi_divull
%0 = load volatile i64, i64* @g_i64, align 8
%1 = udiv i64 %0, %0
%1 = load volatile i64, i64* @g_i64, align 8
%2 = udiv i64 %0, %1
ret i64 %1
ret i64 %2
}
define i16 @remi() #0 {
@ -499,9 +505,10 @@ entry:
; CHECK: call #__mspabi_remi
%0 = load volatile i16, i16* @g_i16, align 8
%1 = srem i16 %0, %0
%1 = load volatile i16, i16* @g_i16, align 8
%2 = srem i16 %0, %1
ret i16 %1
ret i16 %2
}
define i32 @remli() #0 {
@ -510,9 +517,10 @@ entry:
; CHECK: call #__mspabi_remli
%0 = load volatile i32, i32* @g_i32, align 8
%1 = srem i32 %0, %0
%1 = load volatile i32, i32* @g_i32, align 8
%2 = srem i32 %0, %1
ret i32 %1
ret i32 %2
}
define i64 @remlli() #0 {
@ -521,9 +529,10 @@ entry:
; CHECK: call #__mspabi_remlli
%0 = load volatile i64, i64* @g_i64, align 8
%1 = srem i64 %0, %0
%1 = load volatile i64, i64* @g_i64, align 8
%2 = srem i64 %0, %1
ret i64 %1
ret i64 %2
}
define i16 @remu() #0 {
@ -532,9 +541,10 @@ entry:
; CHECK: call #__mspabi_remu
%0 = load volatile i16, i16* @g_i16, align 8
%1 = urem i16 %0, %0
%1 = load volatile i16, i16* @g_i16, align 8
%2 = urem i16 %0, %1
ret i16 %1
ret i16 %2
}
define i32 @remul() #0 {
@ -543,9 +553,10 @@ entry:
; CHECK: call #__mspabi_remul
%0 = load volatile i32, i32* @g_i32, align 8
%1 = urem i32 %0, %0
%1 = load volatile i32, i32* @g_i32, align 8
%2 = urem i32 %0, %1
ret i32 %1
ret i32 %2
}
define i64 @remull() #0 {
@ -554,9 +565,10 @@ entry:
; CHECK: call #__mspabi_remull
%0 = load volatile i64, i64* @g_i64, align 8
%1 = urem i64 %0, %0
%1 = load volatile i64, i64* @g_i64, align 8
%2 = urem i64 %0, %1
ret i64 %1
ret i64 %2
}
define i16 @mpyi() #0 {

View File

@ -4,10 +4,7 @@
define void @pr32372(i8*) {
; CHECK-LABEL: pr32372:
; CHECK: # %bb.0: # %BB
; CHECK-NEXT: llc %r1, 0(%r2)
; CHECK-NEXT: mvhhi 0(%r1), -3825
; CHECK-NEXT: llill %r0, 0
; CHECK-NEXT: dlr %r0, %r1
; CHECK-NEXT: .LBB0_1: # %CF251
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: j .LBB0_1
@ -15,7 +12,8 @@ BB:
%L = load i8, i8* %0
store i16 -3825, i16* undef
%L5 = load i8, i8* %0
%B9 = urem i8 %L5, %L
%B8 = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 %L5, i8 %L)
%B9 = extractvalue {i8, i1} %B8, 0
%I107 = insertelement <8 x i8> zeroinitializer, i8 %B9, i32 7
%ZE141 = zext i8 %L5 to i16
br label %CF251
@ -29,3 +27,5 @@ CF258: ; preds = %CF251
%Shuff230 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 1>
br label %CF251
}
declare {i8, i1} @llvm.umul.with.overflow.i8(i8, i8) nounwind readnone

View File

@ -187,7 +187,7 @@ define void @g() {
; X64-NEXT: movb (%rax), %al
; X64-NEXT: movzbl %al, %eax
; X64-NEXT: # kill: def $eax killed $eax def $ax
; X64-NEXT: divb %al
; X64-NEXT: divb (%rax)
; X64-NEXT: movl %eax, %r8d
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: xorl %edx, %edx
@ -239,47 +239,47 @@ define void @g() {
; X86-NEXT: .cfi_offset %edi, -16
; X86-NEXT: .cfi_offset %ebx, -12
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %esi
; X86-NEXT: movl %esi, %eax
; X86-NEXT: shll $30, %eax
; X86-NEXT: sarl $30, %eax
; X86-NEXT: movl %esi, %ecx
; X86-NEXT: shll $30, %ecx
; X86-NEXT: sarl $30, %ecx
; X86-NEXT: movl (%esp), %edi
; X86-NEXT: movb (%eax), %bl
; X86-NEXT: pushl %eax
; X86-NEXT: movb (%eax), %al
; X86-NEXT: movzbl %al, %eax
; X86-NEXT: # kill: def $eax killed $eax def $ax
; X86-NEXT: divb (%eax)
; X86-NEXT: movl %eax, %ebx
; X86-NEXT: pushl %ecx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl $0
; X86-NEXT: pushl $0
; X86-NEXT: calll __moddi3
; X86-NEXT: addl $16, %esp
; X86-NEXT: andl $3, %edx
; X86-NEXT: testb %al, %al
; X86-NEXT: setne (%eax)
; X86-NEXT: cmpl %eax, %edi
; X86-NEXT: sbbl %edx, %esi
; X86-NEXT: setb %dl
; X86-NEXT: setae %dh
; X86-NEXT: setae %dl
; X86-NEXT: sbbb %cl, %cl
; X86-NEXT: testb %al, %al
; X86-NEXT: setne %bh
; X86-NEXT: setne (%eax)
; X86-NEXT: movzbl %bl, %eax
; X86-NEXT: xorl %ecx, %ecx
; X86-NEXT: subb %dl, %cl
; X86-NEXT: # kill: def $eax killed $eax def $ax
; X86-NEXT: divb %bl
; X86-NEXT: negb %dh
; X86-NEXT: cmpb %al, %al
; X86-NEXT: setne %ch
; X86-NEXT: negb %dl
; X86-NEXT: cmpb %bl, %al
; X86-NEXT: setle %al
; X86-NEXT: negb %al
; X86-NEXT: cbtw
; X86-NEXT: idivb %dh
; X86-NEXT: idivb %dl
; X86-NEXT: movsbl %ah, %eax
; X86-NEXT: movzbl %al, %eax
; X86-NEXT: andl $1, %eax
; X86-NEXT: shll $3, %eax
; X86-NEXT: negl %eax
; X86-NEXT: negb %bh
; X86-NEXT: negb %ch
; X86-NEXT: leal -8(%esp,%eax), %eax
; X86-NEXT: movl %eax, (%eax)
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: cbtw
; X86-NEXT: idivb %bh
; X86-NEXT: idivb %ch
; X86-NEXT: movsbl %ah, %eax
; X86-NEXT: andb $1, %al
; X86-NEXT: movb %al, (%eax)
@ -295,8 +295,9 @@ BB:
%L17 = load i34, i34* %A30
%B20 = and i34 %L17, -1
%G2 = getelementptr i34, i34* %A30, i1 true
%L10 = load i8, i8* undef
%B6 = udiv i8 %L10, %L10
%L10 = load volatile i8, i8* undef
%L11 = load volatile i8, i8* undef
%B6 = udiv i8 %L10, %L11
%C15 = icmp eq i8 undef, 0
%B8 = srem i34 0, %B20
%C2 = icmp ule i34 %B8, %B20