mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-01-31 12:41:49 +01:00
8ba52a75ed
The AVX512 diffs are neutral, but the bswap test shows a clear overreach in hoistLogicOpWithSameOpcodeHands(). If we don't check for other uses, we can increase the instruction count. This could also fight with transforms trying to go in the opposite direction and possibly blow up/infinite loop. This might be enough to solve the bug noted here: http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20181203/608593.html I did not add the hasOneUse() checks to all opcodes because I see a perf regression for at least one opcode. We may decide that's irrelevant in the face of potential compiler crashing, but I'll see if I can salvage that first. llvm-svn: 348508
70 lines
2.1 KiB
LLVM
70 lines
2.1 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=skx | FileCheck %s
|
|
|
|
define i8 @mask8(i8 %x) {
|
|
; CHECK-LABEL: mask8:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: movl %edi, %eax
|
|
; CHECK-NEXT: notb %al
|
|
; CHECK-NEXT: ## kill: def $al killed $al killed $eax
|
|
; CHECK-NEXT: retq
|
|
%m0 = bitcast i8 %x to <8 x i1>
|
|
%m1 = xor <8 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
|
|
%ret = bitcast <8 x i1> %m1 to i8
|
|
ret i8 %ret
|
|
}
|
|
|
|
define void @mask8_mem(i8* %ptr) {
|
|
; CHECK-LABEL: mask8_mem:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovb (%rdi), %k0
|
|
; CHECK-NEXT: knotb %k0, %k0
|
|
; CHECK-NEXT: kmovb %k0, (%rdi)
|
|
; CHECK-NEXT: retq
|
|
%x = load i8, i8* %ptr, align 4
|
|
%m0 = bitcast i8 %x to <8 x i1>
|
|
%m1 = xor <8 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
|
|
%ret = bitcast <8 x i1> %m1 to i8
|
|
store i8 %ret, i8* %ptr, align 4
|
|
ret void
|
|
}
|
|
|
|
define i8 @mand8(i8 %x, i8 %y) {
|
|
; CHECK-LABEL: mand8:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: movl %edi, %eax
|
|
; CHECK-NEXT: movl %eax, %ecx
|
|
; CHECK-NEXT: andb %sil, %cl
|
|
; CHECK-NEXT: xorb %sil, %al
|
|
; CHECK-NEXT: orb %cl, %al
|
|
; CHECK-NEXT: ## kill: def $al killed $al killed $eax
|
|
; CHECK-NEXT: retq
|
|
%ma = bitcast i8 %x to <8 x i1>
|
|
%mb = bitcast i8 %y to <8 x i1>
|
|
%mc = and <8 x i1> %ma, %mb
|
|
%md = xor <8 x i1> %ma, %mb
|
|
%me = or <8 x i1> %mc, %md
|
|
%ret = bitcast <8 x i1> %me to i8
|
|
ret i8 %ret
|
|
}
|
|
|
|
define i8 @mand8_mem(<8 x i1>* %x, <8 x i1>* %y) {
|
|
; CHECK-LABEL: mand8_mem:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovb (%rdi), %k0
|
|
; CHECK-NEXT: kmovb (%rsi), %k1
|
|
; CHECK-NEXT: kandb %k1, %k0, %k2
|
|
; CHECK-NEXT: kxorb %k1, %k0, %k0
|
|
; CHECK-NEXT: korb %k0, %k2, %k0
|
|
; CHECK-NEXT: kmovd %k0, %eax
|
|
; CHECK-NEXT: ## kill: def $al killed $al killed $eax
|
|
; CHECK-NEXT: retq
|
|
%ma = load <8 x i1>, <8 x i1>* %x
|
|
%mb = load <8 x i1>, <8 x i1>* %y
|
|
%mc = and <8 x i1> %ma, %mb
|
|
%md = xor <8 x i1> %ma, %mb
|
|
%me = or <8 x i1> %mc, %md
|
|
%ret = bitcast <8 x i1> %me to i8
|
|
ret i8 %ret
|
|
}
|