1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 11:13:28 +01:00

[X86] Standardize VPSLLDQ/VPSRLDQ enum names (PR31079)

Tweak EVEX implementation names so it matches the other variants
This commit is contained in:
Simon Pilgrim 2020-02-08 14:51:10 +00:00
parent 3db641e8fe
commit f4b16e412a
4 changed files with 32 additions and 33 deletions

View File

@ -669,14 +669,14 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::PSLLDQri:
case X86::VPSLLDQri:
case X86::VPSLLDQYri:
case X86::VPSLLDQZ128rr:
case X86::VPSLLDQZ256rr:
case X86::VPSLLDQZrr:
case X86::VPSLLDQZ128ri:
case X86::VPSLLDQZ256ri:
case X86::VPSLLDQZri:
Src1Name = getRegName(MI->getOperand(1).getReg());
LLVM_FALLTHROUGH;
case X86::VPSLLDQZ128rm:
case X86::VPSLLDQZ256rm:
case X86::VPSLLDQZrm:
case X86::VPSLLDQZ128mi:
case X86::VPSLLDQZ256mi:
case X86::VPSLLDQZmi:
DestName = getRegName(MI->getOperand(0).getReg());
if (MI->getOperand(NumOperands - 1).isImm())
DecodePSLLDQMask(getRegOperandNumElts(MI, 8, 0),
@ -687,14 +687,14 @@ bool llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS,
case X86::PSRLDQri:
case X86::VPSRLDQri:
case X86::VPSRLDQYri:
case X86::VPSRLDQZ128rr:
case X86::VPSRLDQZ256rr:
case X86::VPSRLDQZrr:
case X86::VPSRLDQZ128ri:
case X86::VPSRLDQZ256ri:
case X86::VPSRLDQZri:
Src1Name = getRegName(MI->getOperand(1).getReg());
LLVM_FALLTHROUGH;
case X86::VPSRLDQZ128rm:
case X86::VPSRLDQZ256rm:
case X86::VPSRLDQZrm:
case X86::VPSRLDQZ128mi:
case X86::VPSRLDQZ256mi:
case X86::VPSRLDQZmi:
DestName = getRegName(MI->getOperand(0).getReg());
if (MI->getOperand(NumOperands - 1).isImm())
DecodePSRLDQMask(getRegOperandNumElts(MI, 8, 0),

View File

@ -10972,16 +10972,15 @@ defm VSHUFPD: avx512_shufp<"vshufpd", avx512vl_i64_info, avx512vl_f64_info>, PD,
// AVX-512 - Byte shift Left/Right
//===----------------------------------------------------------------------===//
// FIXME: The SSE/AVX names are PSLLDQri etc. - should we add the i here as well?
multiclass avx512_shift_packed<bits<8> opc, SDNode OpNode, Format MRMr,
Format MRMm, string OpcodeStr,
X86FoldableSchedWrite sched, X86VectorVTInfo _>{
def rr : AVX512<opc, MRMr,
def ri : AVX512<opc, MRMr,
(outs _.RC:$dst), (ins _.RC:$src1, u8imm:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set _.RC:$dst,(_.VT (OpNode _.RC:$src1, (i8 timm:$src2))))]>,
Sched<[sched]>;
def rm : AVX512<opc, MRMm,
def mi : AVX512<opc, MRMm,
(outs _.RC:$dst), (ins _.MemOp:$src1, u8imm:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set _.RC:$dst,(_.VT (OpNode

View File

@ -1100,9 +1100,9 @@ static const X86MemoryFoldTableEntry MemoryFoldTable1[] = {
{ X86::VPSHUFLWZ256ri, X86::VPSHUFLWZ256mi, 0 },
{ X86::VPSHUFLWZri, X86::VPSHUFLWZmi, 0 },
{ X86::VPSHUFLWri, X86::VPSHUFLWmi, 0 },
{ X86::VPSLLDQZ128rr, X86::VPSLLDQZ128rm, 0 },
{ X86::VPSLLDQZ256rr, X86::VPSLLDQZ256rm, 0 },
{ X86::VPSLLDQZrr, X86::VPSLLDQZrm, 0 },
{ X86::VPSLLDQZ128ri, X86::VPSLLDQZ128mi, 0 },
{ X86::VPSLLDQZ256ri, X86::VPSLLDQZ256mi, 0 },
{ X86::VPSLLDQZri, X86::VPSLLDQZmi, 0 },
{ X86::VPSLLDZ128ri, X86::VPSLLDZ128mi, 0 },
{ X86::VPSLLDZ256ri, X86::VPSLLDZ256mi, 0 },
{ X86::VPSLLDZri, X86::VPSLLDZmi, 0 },
@ -1121,9 +1121,9 @@ static const X86MemoryFoldTableEntry MemoryFoldTable1[] = {
{ X86::VPSRAWZ128ri, X86::VPSRAWZ128mi, 0 },
{ X86::VPSRAWZ256ri, X86::VPSRAWZ256mi, 0 },
{ X86::VPSRAWZri, X86::VPSRAWZmi, 0 },
{ X86::VPSRLDQZ128rr, X86::VPSRLDQZ128rm, 0 },
{ X86::VPSRLDQZ256rr, X86::VPSRLDQZ256rm, 0 },
{ X86::VPSRLDQZrr, X86::VPSRLDQZrm, 0 },
{ X86::VPSRLDQZ128ri, X86::VPSRLDQZ128mi, 0 },
{ X86::VPSRLDQZ256ri, X86::VPSRLDQZ256mi, 0 },
{ X86::VPSRLDQZri, X86::VPSRLDQZmi, 0 },
{ X86::VPSRLDZ128ri, X86::VPSRLDZ128mi, 0 },
{ X86::VPSRLDZ256ri, X86::VPSRLDZ256mi, 0 },
{ X86::VPSRLDZri, X86::VPSRLDZmi, 0 },

View File

@ -620,7 +620,7 @@ body: |
; CHECK: $ymm0 = VPSRAWYrr $ymm0, $xmm1
$ymm0 = VPSRAWZ256rr $ymm0, $xmm1
; CHECK: $ymm0 = VPSRLDQYri $ymm0, 7
$ymm0 = VPSRLDQZ256rr $ymm0, 7
$ymm0 = VPSRLDQZ256ri $ymm0, 7
; CHECK: $ymm0 = VPSRLDYri $ymm0, 7
$ymm0 = VPSRLDZ256ri $ymm0, 7
; CHECK: $ymm0 = VPSRLDYrm $ymm0, $rip, 1, $rax, 0, $noreg
@ -780,7 +780,7 @@ body: |
; CHECK: $ymm0 = VPERMQYri $ymm0, 7
$ymm0 = VPERMQZ256ri $ymm0, 7
; CHECK: $ymm0 = VPSLLDQYri $ymm0, 14
$ymm0 = VPSLLDQZ256rr $ymm0, 14
$ymm0 = VPSLLDQZ256ri $ymm0, 14
; CHECK: $ymm0 = VPSLLDYri $ymm0, 7
$ymm0 = VPSLLDZ256ri $ymm0, 7
; CHECK: $ymm0 = VPSLLDYrm $ymm0, $rip, 1, $rax, 0, $noreg
@ -1610,7 +1610,7 @@ body: |
; CHECK: $xmm0 = VPSRAWrr $xmm0, $xmm0
$xmm0 = VPSRAWZ128rr $xmm0, $xmm0
; CHECK: $xmm0 = VPSRLDQri $xmm0, 14
$xmm0 = VPSRLDQZ128rr $xmm0, 14
$xmm0 = VPSRLDQZ128ri $xmm0, 14
; CHECK: $xmm0 = VPSRLDri $xmm0, 7
$xmm0 = VPSRLDZ128ri $xmm0, 7
; CHECK: $xmm0 = VPSRLDrm $xmm0, $rip, 1, $rax, 0, $noreg
@ -1726,7 +1726,7 @@ body: |
; CHECK: $xmm0 = VPSHUFLWri $xmm0, -24
$xmm0 = VPSHUFLWZ128ri $xmm0, -24
; CHECK: $xmm0 = VPSLLDQri $xmm0, 7
$xmm0 = VPSLLDQZ128rr $xmm0, 7
$xmm0 = VPSLLDQZ128ri $xmm0, 7
; CHECK: $xmm0 = VSHUFPDrmi $xmm0, $rip, 1, $rax, 0, $noreg, -24
$xmm0 = VSHUFPDZ128rmi $xmm0, $rip, 1, $rax, 0, $noreg, -24
; CHECK: $xmm0 = VSHUFPDrri $xmm0, $xmm1, -24
@ -2982,8 +2982,8 @@ body: |
$ymm16 = VPSRAWZ256rm $ymm16, $rip, 1, $rax, 0, $noreg
; CHECK: $ymm16 = VPSRAWZ256rr $ymm16, $xmm1
$ymm16 = VPSRAWZ256rr $ymm16, $xmm1
; CHECK: $ymm16 = VPSRLDQZ256rr $ymm16, 7
$ymm16 = VPSRLDQZ256rr $ymm16, 7
; CHECK: $ymm16 = VPSRLDQZ256ri $ymm16, 7
$ymm16 = VPSRLDQZ256ri $ymm16, 7
; CHECK: $ymm16 = VPSRLDZ256ri $ymm16, 7
$ymm16 = VPSRLDZ256ri $ymm16, 7
; CHECK: $ymm16 = VPSRLDZ256rm $ymm16, $rip, 1, $rax, 0, $noreg
@ -3142,8 +3142,8 @@ body: |
$ymm16 = VPERMQZ256mi $rdi, 1, $noreg, 0, $noreg, 7
; CHECK: $ymm16 = VPERMQZ256ri $ymm16, 7
$ymm16 = VPERMQZ256ri $ymm16, 7
; CHECK: $ymm16 = VPSLLDQZ256rr $ymm16, 14
$ymm16 = VPSLLDQZ256rr $ymm16, 14
; CHECK: $ymm16 = VPSLLDQZ256ri $ymm16, 14
$ymm16 = VPSLLDQZ256ri $ymm16, 14
; CHECK: $ymm16 = VPSLLDZ256ri $ymm16, 7
$ymm16 = VPSLLDZ256ri $ymm16, 7
; CHECK: $ymm16 = VPSLLDZ256rm $ymm16, $rip, 1, $rax, 0, $noreg
@ -3980,8 +3980,8 @@ body: |
$xmm16 = VPSRAWZ128rm $xmm16, $rip, 1, $rax, 0, $noreg
; CHECK: $xmm16 = VPSRAWZ128rr $xmm16, $xmm16
$xmm16 = VPSRAWZ128rr $xmm16, $xmm16
; CHECK: $xmm16 = VPSRLDQZ128rr $xmm16, 14
$xmm16 = VPSRLDQZ128rr $xmm16, 14
; CHECK: $xmm16 = VPSRLDQZ128ri $xmm16, 14
$xmm16 = VPSRLDQZ128ri $xmm16, 14
; CHECK: $xmm16 = VPSRLDZ128ri $xmm16, 7
$xmm16 = VPSRLDZ128ri $xmm16, 7
; CHECK: $xmm16 = VPSRLDZ128rm $xmm16, $rip, 1, $rax, 0, $noreg
@ -4096,8 +4096,8 @@ body: |
$xmm16 = VPSHUFLWZ128mi $rdi, 1, $noreg, 0, $noreg, -24
; CHECK: $xmm16 = VPSHUFLWZ128ri $xmm16, -24
$xmm16 = VPSHUFLWZ128ri $xmm16, -24
; CHECK: $xmm16 = VPSLLDQZ128rr $xmm16, 1
$xmm16 = VPSLLDQZ128rr $xmm16, 1
; CHECK: $xmm16 = VPSLLDQZ128ri $xmm16, 1
$xmm16 = VPSLLDQZ128ri $xmm16, 1
; CHECK: $xmm16 = VSHUFPDZ128rmi $xmm16, $rip, 1, $rax, 0, $noreg, -24
$xmm16 = VSHUFPDZ128rmi $xmm16, $rip, 1, $rax, 0, $noreg, -24
; CHECK: $xmm16 = VSHUFPDZ128rri $xmm16, $xmm1, -24