1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-22 04:22:57 +02:00

[AVX-512] Add EVEX encoded VPCMPEQ and VPCMPGT to the load folding tables.

llvm-svn: 280581
This commit is contained in:
Craig Topper 2016-09-03 04:37:50 +00:00
parent 268d2fea8e
commit 5a9e876f61
2 changed files with 72 additions and 0 deletions

View File

@ -1769,6 +1769,14 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VPMULUDQZrr, X86::VPMULUDQZrm, 0 },
{ X86::VBROADCASTSSZrkz, X86::VBROADCASTSSZmkz, TB_NO_REVERSE },
{ X86::VBROADCASTSDZrkz, X86::VBROADCASTSDZmkz, TB_NO_REVERSE },
{ X86::VPCMPEQBZrr, X86::VPCMPEQBZrm, 0 },
{ X86::VPCMPEQDZrr, X86::VPCMPEQDZrm, 0 },
{ X86::VPCMPEQQZrr, X86::VPCMPEQQZrm, 0 },
{ X86::VPCMPEQWZrr, X86::VPCMPEQWZrm, 0 },
{ X86::VPCMPGTBZrr, X86::VPCMPGTBZrm, 0 },
{ X86::VPCMPGTDZrr, X86::VPCMPGTDZrm, 0 },
{ X86::VPCMPGTQZrr, X86::VPCMPGTQZrm, 0 },
{ X86::VPCMPGTWZrr, X86::VPCMPGTWZrm, 0 },
// AVX-512{F,VL} foldable instructions
{ X86::VBROADCASTSSZ256rkz, X86::VBROADCASTSSZ256mkz, TB_NO_REVERSE },
@ -1836,6 +1844,22 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
{ X86::VCMPPDZ256rri, X86::VCMPPDZ256rmi, 0 },
{ X86::VCMPPSZ128rri, X86::VCMPPSZ128rmi, 0 },
{ X86::VCMPPSZ256rri, X86::VCMPPSZ256rmi, 0 },
{ X86::VPCMPEQBZ128rr, X86::VPCMPEQBZ128rm, 0 },
{ X86::VPCMPEQBZ256rr, X86::VPCMPEQBZ256rm, 0 },
{ X86::VPCMPEQDZ128rr, X86::VPCMPEQDZ128rm, 0 },
{ X86::VPCMPEQDZ256rr, X86::VPCMPEQDZ256rm, 0 },
{ X86::VPCMPEQQZ128rr, X86::VPCMPEQQZ128rm, 0 },
{ X86::VPCMPEQQZ256rr, X86::VPCMPEQQZ256rm, 0 },
{ X86::VPCMPEQWZ128rr, X86::VPCMPEQWZ128rm, 0 },
{ X86::VPCMPEQWZ256rr, X86::VPCMPEQWZ256rm, 0 },
{ X86::VPCMPGTBZ128rr, X86::VPCMPGTBZ128rm, 0 },
{ X86::VPCMPGTBZ256rr, X86::VPCMPGTBZ256rm, 0 },
{ X86::VPCMPGTDZ128rr, X86::VPCMPGTDZ128rm, 0 },
{ X86::VPCMPGTDZ256rr, X86::VPCMPGTDZ256rm, 0 },
{ X86::VPCMPGTQZ128rr, X86::VPCMPGTQZ128rm, 0 },
{ X86::VPCMPGTQZ256rr, X86::VPCMPGTQZ256rm, 0 },
{ X86::VPCMPGTWZ128rr, X86::VPCMPGTWZ128rm, 0 },
{ X86::VPCMPGTWZ256rr, X86::VPCMPGTWZ256rm, 0 },
// AES foldable instructions
{ X86::AESDECLASTrr, X86::AESDECLASTrm, TB_ALIGN_16 },

View File

@ -0,0 +1,48 @@
; RUN: llc -O3 -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+avx512bw < %s | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-unknown"
; Stack reload folding tests.
;
; By including a nop call with sideeffects we can force a partial register spill of the
; relevant registers and check that the reload is correctly folded into the instruction.
define i16 @stack_fold_pcmpeqb(<16 x i8> %a0, <16 x i8> %a1) {
;CHECK-LABEL: stack_fold_pcmpeqb
;CHECK: vpcmpeqb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%k[0-7]}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
%2 = icmp eq <16 x i8> %a0, %a1
%3 = bitcast <16 x i1> %2 to i16
ret i16 %3
}
define i8 @stack_fold_pcmpeqd(<4 x i32> %a0, <4 x i32> %a1) {
;CHECK-LABEL: stack_fold_pcmpeqd
;CHECK: vpcmpeqd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%k[0-7]}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
%2 = icmp eq <4 x i32> %a0, %a1
%3 = shufflevector <4 x i1> %2, <4 x i1> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%4 = bitcast <8 x i1> %3 to i8
ret i8 %4
}
define i8 @stack_fold_pcmpeqq(<2 x i64> %a0, <2 x i64> %a1) {
;CHECK-LABEL: stack_fold_pcmpeqq
;CHECK: vpcmpeqq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%k[0-7]}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
%2 = icmp eq <2 x i64> %a0, %a1
%3 = shufflevector <2 x i1> %2, <2 x i1> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
%4 = bitcast <8 x i1> %3 to i8
ret i8 %4
}
define i8 @stack_fold_pcmpeqw(<8 x i16> %a0, <8 x i16> %a1) {
;CHECK-LABEL: stack_fold_pcmpeqw
;CHECK: vpcmpeqw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%k[0-7]}} {{.*#+}} 16-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
%2 = icmp eq <8 x i16> %a0, %a1
%3 = bitcast <8 x i1> %2 to i8
ret i8 %3
}