mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-25 04:02:41 +01:00
d654e7d40c
Enable enableMultipleCopyHints() on X86. Original Patch by @jonpa: While enabling the mischeduler for SystemZ, it was discovered that for some reason a test needed one extra seemingly needless COPY (test/CodeGen/SystemZ/call-03.ll). The handling for that is resulted in this patch, which improves the register coalescing by providing not just one copy hint, but a sorted list of copy hints. On SystemZ, this gives ~12500 less register moves on SPEC, as well as marginally less spilling. Instead of improving just the SystemZ backend, the improvement has been implemented in common-code (calculateSpillWeightAndHint(). This gives a lot of test failures, but since this should be a general improvement I hope that the involved targets will help and review the test updates. Differential Revision: https://reviews.llvm.org/D38128 llvm-svn: 342578
72 lines
1.9 KiB
LLVM
72 lines
1.9 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
|
|
|
|
define i32 @mask_negated_zext_bool1(i1 %x) {
|
|
; CHECK-LABEL: mask_negated_zext_bool1:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: movl %edi, %eax
|
|
; CHECK-NEXT: andl $1, %eax
|
|
; CHECK-NEXT: retq
|
|
%ext = zext i1 %x to i32
|
|
%neg = sub i32 0, %ext
|
|
%and = and i32 %neg, 1
|
|
ret i32 %and
|
|
}
|
|
|
|
define i32 @mask_negated_zext_bool2(i1 zeroext %x) {
|
|
; CHECK-LABEL: mask_negated_zext_bool2:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: movl %edi, %eax
|
|
; CHECK-NEXT: retq
|
|
%ext = zext i1 %x to i32
|
|
%neg = sub i32 0, %ext
|
|
%and = and i32 %neg, 1
|
|
ret i32 %and
|
|
}
|
|
|
|
define <4 x i32> @mask_negated_zext_bool_vec(<4 x i1> %x) {
|
|
; CHECK-LABEL: mask_negated_zext_bool_vec:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: andps {{.*}}(%rip), %xmm0
|
|
; CHECK-NEXT: retq
|
|
%ext = zext <4 x i1> %x to <4 x i32>
|
|
%neg = sub <4 x i32> zeroinitializer, %ext
|
|
%and = and <4 x i32> %neg, <i32 1, i32 1, i32 1, i32 1>
|
|
ret <4 x i32> %and
|
|
}
|
|
|
|
define i32 @mask_negated_sext_bool1(i1 %x) {
|
|
; CHECK-LABEL: mask_negated_sext_bool1:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: movl %edi, %eax
|
|
; CHECK-NEXT: andl $1, %eax
|
|
; CHECK-NEXT: retq
|
|
%ext = sext i1 %x to i32
|
|
%neg = sub i32 0, %ext
|
|
%and = and i32 %neg, 1
|
|
ret i32 %and
|
|
}
|
|
|
|
define i32 @mask_negated_sext_bool2(i1 zeroext %x) {
|
|
; CHECK-LABEL: mask_negated_sext_bool2:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: movl %edi, %eax
|
|
; CHECK-NEXT: retq
|
|
%ext = sext i1 %x to i32
|
|
%neg = sub i32 0, %ext
|
|
%and = and i32 %neg, 1
|
|
ret i32 %and
|
|
}
|
|
|
|
define <4 x i32> @mask_negated_sext_bool_vec(<4 x i1> %x) {
|
|
; CHECK-LABEL: mask_negated_sext_bool_vec:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: andps {{.*}}(%rip), %xmm0
|
|
; CHECK-NEXT: retq
|
|
%ext = sext <4 x i1> %x to <4 x i32>
|
|
%neg = sub <4 x i32> zeroinitializer, %ext
|
|
%and = and <4 x i32> %neg, <i32 1, i32 1, i32 1, i32 1>
|
|
ret <4 x i32> %and
|
|
}
|
|
|