mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-25 20:23:11 +01:00
e3e67d4a0a
This changes the SelectionDAG scheduling preference to source order. Soon, the SelectionDAG scheduler can be bypassed saving a nice chunk of compile time. Performance differences that result from this change are often a consequence of register coalescing. The register coalescer is far from perfect. Bugs can be filed for deficiencies. On x86 SandyBridge/Haswell, the source order schedule is often preserved, particularly for small blocks. Register pressure is generally improved over the SD scheduler's ILP mode. However, we are still able to handle large blocks that require latency hiding, unlike the SD scheduler's BURR mode. MI scheduler also attempts to discover the critical path in single-block loops and adjust heuristics accordingly. The MI scheduler relies on the new machine model. This is currently unimplemented for AVX, so we may not be generating the best code yet. Unit tests are updated so they don't depend on SD scheduling heuristics. llvm-svn: 192750
151 lines
3.4 KiB
LLVM
151 lines
3.4 KiB
LLVM
; RUN: llc < %s -mcpu=generic -march=x86 | FileCheck %s -check-prefix=X32
|
|
; RUN: llc < %s -mcpu=generic -mtriple=x86_64-linux | FileCheck %s -check-prefix=X64
|
|
; RUN: llc < %s -mcpu=generic -mtriple=x86_64-win32 | FileCheck %s -check-prefix=X64
|
|
|
|
; The immediate can be encoded in a smaller way if the
|
|
; instruction is a sub instead of an add.
|
|
|
|
define i32 @test1(i32 inreg %a) nounwind {
|
|
%b = add i32 %a, 128
|
|
ret i32 %b
|
|
; X32: subl $-128, %eax
|
|
; X64: subl $-128,
|
|
}
|
|
define i64 @test2(i64 inreg %a) nounwind {
|
|
%b = add i64 %a, 2147483648
|
|
ret i64 %b
|
|
; X32: addl $-2147483648, %eax
|
|
; X64: subq $-2147483648,
|
|
}
|
|
define i64 @test3(i64 inreg %a) nounwind {
|
|
%b = add i64 %a, 128
|
|
ret i64 %b
|
|
|
|
; X32: addl $128, %eax
|
|
; X64: subq $-128,
|
|
}
|
|
|
|
define i1 @test4(i32 %v1, i32 %v2, i32* %X) nounwind {
|
|
entry:
|
|
%t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
|
|
%sum = extractvalue {i32, i1} %t, 0
|
|
%obit = extractvalue {i32, i1} %t, 1
|
|
br i1 %obit, label %overflow, label %normal
|
|
|
|
normal:
|
|
store i32 0, i32* %X
|
|
br label %overflow
|
|
|
|
overflow:
|
|
ret i1 false
|
|
|
|
; X32-LABEL: test4:
|
|
; X32: addl
|
|
; X32-NEXT: jo
|
|
|
|
; X64-LABEL: test4:
|
|
; X64: addl %e[[A1:si|dx]], %e[[A0:di|cx]]
|
|
; X64-NEXT: jo
|
|
}
|
|
|
|
define i1 @test5(i32 %v1, i32 %v2, i32* %X) nounwind {
|
|
entry:
|
|
%t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
|
|
%sum = extractvalue {i32, i1} %t, 0
|
|
%obit = extractvalue {i32, i1} %t, 1
|
|
br i1 %obit, label %carry, label %normal
|
|
|
|
normal:
|
|
store i32 0, i32* %X
|
|
br label %carry
|
|
|
|
carry:
|
|
ret i1 false
|
|
|
|
; X32-LABEL: test5:
|
|
; X32: addl
|
|
; X32-NEXT: jb
|
|
|
|
; X64-LABEL: test5:
|
|
; X64: addl %e[[A1]], %e[[A0]]
|
|
; X64-NEXT: jb
|
|
}
|
|
|
|
declare {i32, i1} @llvm.sadd.with.overflow.i32(i32, i32)
|
|
declare {i32, i1} @llvm.uadd.with.overflow.i32(i32, i32)
|
|
|
|
|
|
define i64 @test6(i64 %A, i32 %B) nounwind {
|
|
%tmp12 = zext i32 %B to i64 ; <i64> [#uses=1]
|
|
%tmp3 = shl i64 %tmp12, 32 ; <i64> [#uses=1]
|
|
%tmp5 = add i64 %tmp3, %A ; <i64> [#uses=1]
|
|
ret i64 %tmp5
|
|
|
|
; X32-LABEL: test6:
|
|
; X32: movl 4(%esp), %eax
|
|
; X32-NEXT: movl 12(%esp), %edx
|
|
; X32-NEXT: addl 8(%esp), %edx
|
|
; X32-NEXT: ret
|
|
|
|
; X64-LABEL: test6:
|
|
; X64: shlq $32, %r[[A1]]
|
|
; X64: leaq (%r[[A1]],%r[[A0]]), %rax
|
|
; X64: ret
|
|
}
|
|
|
|
define {i32, i1} @test7(i32 %v1, i32 %v2) nounwind {
|
|
%t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
|
|
ret {i32, i1} %t
|
|
}
|
|
|
|
; X64-LABEL: test7:
|
|
; X64: addl %e[[A1]], %e
|
|
; X64-NEXT: setb %dl
|
|
; X64: ret
|
|
|
|
; PR5443
|
|
define {i64, i1} @test8(i64 %left, i64 %right) nounwind {
|
|
entry:
|
|
%extleft = zext i64 %left to i65
|
|
%extright = zext i64 %right to i65
|
|
%sum = add i65 %extleft, %extright
|
|
%res.0 = trunc i65 %sum to i64
|
|
%overflow = and i65 %sum, -18446744073709551616
|
|
%res.1 = icmp ne i65 %overflow, 0
|
|
%final0 = insertvalue {i64, i1} undef, i64 %res.0, 0
|
|
%final1 = insertvalue {i64, i1} %final0, i1 %res.1, 1
|
|
ret {i64, i1} %final1
|
|
}
|
|
|
|
; X64-LABEL: test8:
|
|
; X64: addq
|
|
; X64-NEXT: setb
|
|
; X64: ret
|
|
|
|
define i32 @test9(i32 %x, i32 %y) nounwind readnone {
|
|
%cmp = icmp eq i32 %x, 10
|
|
%sub = sext i1 %cmp to i32
|
|
%cond = add i32 %sub, %y
|
|
ret i32 %cond
|
|
; X64-LABEL: test9:
|
|
; X64: cmpl $10
|
|
; X64: sete
|
|
; X64: subl
|
|
; X64: ret
|
|
}
|
|
|
|
define i1 @test10(i32 %x) nounwind {
|
|
entry:
|
|
%t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %x, i32 1)
|
|
%obit = extractvalue {i32, i1} %t, 1
|
|
ret i1 %obit
|
|
|
|
; X32-LABEL: test10:
|
|
; X32: incl
|
|
; X32-NEXT: seto
|
|
|
|
; X64-LABEL: test10:
|
|
; X64: incl
|
|
; X64-NEXT: seto
|
|
}
|