mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-25 12:12:47 +01:00
d654e7d40c
Enable enableMultipleCopyHints() on X86. Original Patch by @jonpa: While enabling the mischeduler for SystemZ, it was discovered that for some reason a test needed one extra seemingly needless COPY (test/CodeGen/SystemZ/call-03.ll). The handling for that is resulted in this patch, which improves the register coalescing by providing not just one copy hint, but a sorted list of copy hints. On SystemZ, this gives ~12500 less register moves on SPEC, as well as marginally less spilling. Instead of improving just the SystemZ backend, the improvement has been implemented in common-code (calculateSpillWeightAndHint(). This gives a lot of test failures, but since this should be a general improvement I hope that the involved targets will help and review the test updates. Differential Revision: https://reviews.llvm.org/D38128 llvm-svn: 342578
46 lines
1.2 KiB
LLVM
46 lines
1.2 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64
|
|
|
|
define i32 @neg_lshr_signbit(i32 %x) {
|
|
; X64-LABEL: neg_lshr_signbit:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: movl %edi, %eax
|
|
; X64-NEXT: sarl $31, %eax
|
|
; X64-NEXT: retq
|
|
%sh = lshr i32 %x, 31
|
|
%neg = sub i32 0, %sh
|
|
ret i32 %neg
|
|
}
|
|
|
|
define i64 @neg_ashr_signbit(i64 %x) {
|
|
; X64-LABEL: neg_ashr_signbit:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: movq %rdi, %rax
|
|
; X64-NEXT: shrq $63, %rax
|
|
; X64-NEXT: retq
|
|
%sh = ashr i64 %x, 63
|
|
%neg = sub i64 0, %sh
|
|
ret i64 %neg
|
|
}
|
|
|
|
define <4 x i32> @neg_ashr_signbit_vec(<4 x i32> %x) {
|
|
; X64-LABEL: neg_ashr_signbit_vec:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: psrld $31, %xmm0
|
|
; X64-NEXT: retq
|
|
%sh = ashr <4 x i32> %x, <i32 31, i32 31, i32 31, i32 31>
|
|
%neg = sub <4 x i32> zeroinitializer, %sh
|
|
ret <4 x i32> %neg
|
|
}
|
|
|
|
define <8 x i16> @neg_lshr_signbit_vec(<8 x i16> %x) {
|
|
; X64-LABEL: neg_lshr_signbit_vec:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: psraw $15, %xmm0
|
|
; X64-NEXT: retq
|
|
%sh = lshr <8 x i16> %x, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
|
|
%neg = sub <8 x i16> zeroinitializer, %sh
|
|
ret <8 x i16> %neg
|
|
}
|
|
|