1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 20:51:52 +01:00
llvm-mirror/test/CodeGen/X86/fast-isel-select-cmov.ll
Simon Pilgrim d654e7d40c [X86] Handle COPYs of physregs better (regalloc hints)
Enable enableMultipleCopyHints() on X86.

Original Patch by @jonpa:

While enabling the mischeduler for SystemZ, it was discovered that for some reason a test needed one extra seemingly needless COPY (test/CodeGen/SystemZ/call-03.ll). The handling for that is resulted in this patch, which improves the register coalescing by providing not just one copy hint, but a sorted list of copy hints. On SystemZ, this gives ~12500 less register moves on SPEC, as well as marginally less spilling.

Instead of improving just the SystemZ backend, the improvement has been implemented in common-code (calculateSpillWeightAndHint(). This gives a lot of test failures, but since this should be a general improvement I hope that the involved targets will help and review the test updates.

Differential Revision: https://reviews.llvm.org/D38128

llvm-svn: 342578
2018-09-19 18:59:08 +00:00

77 lines
2.3 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -fast-isel -fast-isel-abort=1 -mtriple=x86_64-apple-darwin10 | FileCheck %s --check-prefix=CHECK --check-prefix=NOAVX512
; RUN: llc < %s -fast-isel -fast-isel-abort=1 -mtriple=x86_64-apple-darwin10 -mattr=+avx512f | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512
; Test conditional move for the supported types (i16, i32, and i32) and
; conditon input (argument or cmp). Currently i8 is not supported.
define zeroext i16 @select_cmov_i16(i1 zeroext %cond, i16 zeroext %a, i16 zeroext %b) {
; CHECK-LABEL: select_cmov_i16:
; CHECK: ## %bb.0:
; CHECK-NEXT: testb $1, %dil
; CHECK-NEXT: cmovew %dx, %si
; CHECK-NEXT: movzwl %si, %eax
; CHECK-NEXT: retq
%1 = select i1 %cond, i16 %a, i16 %b
ret i16 %1
}
define zeroext i16 @select_cmp_cmov_i16(i16 zeroext %a, i16 zeroext %b) {
; CHECK-LABEL: select_cmp_cmov_i16:
; CHECK: ## %bb.0:
; CHECK-NEXT: cmpw %si, %di
; CHECK-NEXT: cmovbw %di, %si
; CHECK-NEXT: movzwl %si, %eax
; CHECK-NEXT: retq
%1 = icmp ult i16 %a, %b
%2 = select i1 %1, i16 %a, i16 %b
ret i16 %2
}
define i32 @select_cmov_i32(i1 zeroext %cond, i32 %a, i32 %b) {
; CHECK-LABEL: select_cmov_i32:
; CHECK: ## %bb.0:
; CHECK-NEXT: movl %esi, %eax
; CHECK-NEXT: testb $1, %dil
; CHECK-NEXT: cmovel %edx, %eax
; CHECK-NEXT: retq
%1 = select i1 %cond, i32 %a, i32 %b
ret i32 %1
}
define i32 @select_cmp_cmov_i32(i32 %a, i32 %b) {
; CHECK-LABEL: select_cmp_cmov_i32:
; CHECK: ## %bb.0:
; CHECK-NEXT: movl %esi, %eax
; CHECK-NEXT: cmpl %esi, %edi
; CHECK-NEXT: cmovbl %edi, %eax
; CHECK-NEXT: retq
%1 = icmp ult i32 %a, %b
%2 = select i1 %1, i32 %a, i32 %b
ret i32 %2
}
define i64 @select_cmov_i64(i1 zeroext %cond, i64 %a, i64 %b) {
; CHECK-LABEL: select_cmov_i64:
; CHECK: ## %bb.0:
; CHECK-NEXT: movq %rsi, %rax
; CHECK-NEXT: testb $1, %dil
; CHECK-NEXT: cmoveq %rdx, %rax
; CHECK-NEXT: retq
%1 = select i1 %cond, i64 %a, i64 %b
ret i64 %1
}
define i64 @select_cmp_cmov_i64(i64 %a, i64 %b) {
; CHECK-LABEL: select_cmp_cmov_i64:
; CHECK: ## %bb.0:
; CHECK-NEXT: movq %rsi, %rax
; CHECK-NEXT: cmpq %rsi, %rdi
; CHECK-NEXT: cmovbq %rdi, %rax
; CHECK-NEXT: retq
%1 = icmp ult i64 %a, %b
%2 = select i1 %1, i64 %a, i64 %b
ret i64 %2
}