1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 11:13:28 +01:00
llvm-mirror/test/CodeGen/X86/peep-setb.ll
Simon Pilgrim d654e7d40c [X86] Handle COPYs of physregs better (regalloc hints)
Enable enableMultipleCopyHints() on X86.

Original Patch by @jonpa:

While enabling the mischeduler for SystemZ, it was discovered that for some reason a test needed one extra seemingly needless COPY (test/CodeGen/SystemZ/call-03.ll). The handling for that is resulted in this patch, which improves the register coalescing by providing not just one copy hint, but a sorted list of copy hints. On SystemZ, this gives ~12500 less register moves on SPEC, as well as marginally less spilling.

Instead of improving just the SystemZ backend, the improvement has been implemented in common-code (calculateSpillWeightAndHint(). This gives a lot of test failures, but since this should be a general improvement I hope that the involved targets will help and review the test updates.

Differential Revision: https://reviews.llvm.org/D38128

llvm-svn: 342578
2018-09-19 18:59:08 +00:00

127 lines
3.2 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=x86_64-unknown-unknown < %s | FileCheck %s
; These tests use cmp+adc/sbb in place of test+set+add/sub. Should this transform
; be enabled by micro-architecture rather than as part of generic lowering/isel?
define i8 @test1(i8 %a, i8 %b) nounwind {
; CHECK-LABEL: test1:
; CHECK: # %bb.0:
; CHECK-NEXT: movl %esi, %eax
; CHECK-NEXT: cmpb %al, %dil
; CHECK-NEXT: adcb $0, %al
; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%cmp = icmp ult i8 %a, %b
%cond = zext i1 %cmp to i8
%add = add i8 %cond, %b
ret i8 %add
}
define i32 @test2(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: test2:
; CHECK: # %bb.0:
; CHECK-NEXT: movl %esi, %eax
; CHECK-NEXT: cmpl %esi, %edi
; CHECK-NEXT: adcl $0, %eax
; CHECK-NEXT: retq
%cmp = icmp ult i32 %a, %b
%cond = zext i1 %cmp to i32
%add = add i32 %cond, %b
ret i32 %add
}
define i64 @test3(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: test3:
; CHECK: # %bb.0:
; CHECK-NEXT: movq %rsi, %rax
; CHECK-NEXT: cmpq %rsi, %rdi
; CHECK-NEXT: adcq $0, %rax
; CHECK-NEXT: retq
%cmp = icmp ult i64 %a, %b
%conv = zext i1 %cmp to i64
%add = add i64 %conv, %b
ret i64 %add
}
define i8 @test4(i8 %a, i8 %b) nounwind {
; CHECK-LABEL: test4:
; CHECK: # %bb.0:
; CHECK-NEXT: movl %esi, %eax
; CHECK-NEXT: cmpb %al, %dil
; CHECK-NEXT: sbbb $0, %al
; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%cmp = icmp ult i8 %a, %b
%cond = zext i1 %cmp to i8
%sub = sub i8 %b, %cond
ret i8 %sub
}
define i32 @test5(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: test5:
; CHECK: # %bb.0:
; CHECK-NEXT: movl %esi, %eax
; CHECK-NEXT: cmpl %esi, %edi
; CHECK-NEXT: sbbl $0, %eax
; CHECK-NEXT: retq
%cmp = icmp ult i32 %a, %b
%cond = zext i1 %cmp to i32
%sub = sub i32 %b, %cond
ret i32 %sub
}
define i64 @test6(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: test6:
; CHECK: # %bb.0:
; CHECK-NEXT: movq %rsi, %rax
; CHECK-NEXT: cmpq %rsi, %rdi
; CHECK-NEXT: sbbq $0, %rax
; CHECK-NEXT: retq
%cmp = icmp ult i64 %a, %b
%conv = zext i1 %cmp to i64
%sub = sub i64 %b, %conv
ret i64 %sub
}
define i8 @test7(i8 %a, i8 %b) nounwind {
; CHECK-LABEL: test7:
; CHECK: # %bb.0:
; CHECK-NEXT: movl %esi, %eax
; CHECK-NEXT: cmpb %al, %dil
; CHECK-NEXT: adcb $0, %al
; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%cmp = icmp ult i8 %a, %b
%cond = sext i1 %cmp to i8
%sub = sub i8 %b, %cond
ret i8 %sub
}
define i32 @test8(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: test8:
; CHECK: # %bb.0:
; CHECK-NEXT: movl %esi, %eax
; CHECK-NEXT: cmpl %esi, %edi
; CHECK-NEXT: adcl $0, %eax
; CHECK-NEXT: retq
%cmp = icmp ult i32 %a, %b
%cond = sext i1 %cmp to i32
%sub = sub i32 %b, %cond
ret i32 %sub
}
define i64 @test9(i64 %a, i64 %b) nounwind {
; CHECK-LABEL: test9:
; CHECK: # %bb.0:
; CHECK-NEXT: movq %rsi, %rax
; CHECK-NEXT: cmpq %rsi, %rdi
; CHECK-NEXT: adcq $0, %rax
; CHECK-NEXT: retq
%cmp = icmp ult i64 %a, %b
%conv = sext i1 %cmp to i64
%sub = sub i64 %b, %conv
ret i64 %sub
}