1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-22 20:43:44 +02:00
llvm-mirror/test/CodeGen/X86/avx512dq-mask-op.ll
Craig Topper d41951c118 [AVX-512] Fix accidental uses of AH/BH/CH/DH after copies to/from mask registers
We've had several bugs(PR32256, PR32241) recently that resulted from usages of AH/BH/CH/DH either before or after a copy to/from a mask register.

This ultimately occurs because we create COPY_TO_REGCLASS with VK1 and GR8. Then in CopyToFromAsymmetricReg in X86InstrInfo we find a 32-bit super register for the GR8 to emit the KMOV with. But as these tests are demonstrating, its possible for the GR8 register to be a high register and we end up doing an accidental extra or insert from bits 15:8.

I think the best way forward is to stop making copies directly between mask registers and GR8/GR16. Instead I think we should restrict to only copies between mask registers and GR32/GR64 and use EXTRACT_SUBREG/INSERT_SUBREG to handle the conversion from GR32 to GR16/8 or vice versa.

Unfortunately, this complicates fastisel a bit more now to create the subreg extracts where we used to create GR8 copies. We can probably make a helper function to bring down the repitition.

This does result in KMOVD being used for copies when BWI is available because we don't know the original mask register size. This caused a lot of deltas on tests because we have to split the checks for KMOVD vs KMOVW based on BWI.

Differential Revision: https://reviews.llvm.org/D30968

llvm-svn: 298928
2017-03-28 16:35:29 +00:00

70 lines
2.0 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=skx | FileCheck %s
define i8 @mask8(i8 %x) {
; CHECK-LABEL: mask8:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %edi, %k0
; CHECK-NEXT: knotb %k0, %k0
; CHECK-NEXT: kmovd %k0, %eax
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq
%m0 = bitcast i8 %x to <8 x i1>
%m1 = xor <8 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
%ret = bitcast <8 x i1> %m1 to i8
ret i8 %ret
}
define void @mask8_mem(i8* %ptr) {
; CHECK-LABEL: mask8_mem:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovb (%rdi), %k0
; CHECK-NEXT: knotb %k0, %k0
; CHECK-NEXT: kmovb %k0, (%rdi)
; CHECK-NEXT: retq
%x = load i8, i8* %ptr, align 4
%m0 = bitcast i8 %x to <8 x i1>
%m1 = xor <8 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
%ret = bitcast <8 x i1> %m1 to i8
store i8 %ret, i8* %ptr, align 4
ret void
}
define i8 @mand8(i8 %x, i8 %y) {
; CHECK-LABEL: mand8:
; CHECK: ## BB#0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: xorl %esi, %eax
; CHECK-NEXT: andl %esi, %edi
; CHECK-NEXT: orl %eax, %edi
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
%ma = bitcast i8 %x to <8 x i1>
%mb = bitcast i8 %y to <8 x i1>
%mc = and <8 x i1> %ma, %mb
%md = xor <8 x i1> %ma, %mb
%me = or <8 x i1> %mc, %md
%ret = bitcast <8 x i1> %me to i8
ret i8 %ret
}
define i8 @mand8_mem(<8 x i1>* %x, <8 x i1>* %y) {
; CHECK-LABEL: mand8_mem:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovb (%rdi), %k0
; CHECK-NEXT: kmovb (%rsi), %k1
; CHECK-NEXT: kandb %k1, %k0, %k2
; CHECK-NEXT: kxorb %k1, %k0, %k0
; CHECK-NEXT: korb %k0, %k2, %k0
; CHECK-NEXT: kmovd %k0, %eax
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq
%ma = load <8 x i1>, <8 x i1>* %x
%mb = load <8 x i1>, <8 x i1>* %y
%mc = and <8 x i1> %ma, %mb
%md = xor <8 x i1> %ma, %mb
%me = or <8 x i1> %mc, %md
%ret = bitcast <8 x i1> %me to i8
ret i8 %ret
}