1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-20 03:23:01 +02:00

[X86][AVX512] Use the proper load/store for AVX512 registers.

When loading or storing AVX512 registers we were not using the AVX512
variant of the load and store for VR128 and VR256 like registers.
Thus, we ended up with the wrong encoding and actually were dropping the
high bits of the instruction. The result was that we load or store the
wrong register. The effect is visible only when we emit the object file
directly and disassemble it. Then, the output of the disassembler does
not match the assembly input.

This is related to llvm.org/PR27481.

llvm-svn: 269001
This commit is contained in:
Quentin Colombet 2016-05-10 01:09:14 +00:00
parent aba7343f5c
commit 4c5a2694f9
4 changed files with 44 additions and 10 deletions

View File

@ -899,6 +899,8 @@ def CSR_64_AllRegs : CalleeSavedRegs<(add CSR_64_MostRegs, RAX, RSP,
def CSR_64_AllRegs_AVX : CalleeSavedRegs<(sub (add CSR_64_MostRegs, RAX, RSP,
(sequence "YMM%u", 0, 15)),
(sequence "XMM%u", 0, 15))>;
def CSR_64_AllRegs_AVX512 : CalleeSavedRegs<(add CSR_64_AllRegs_AVX,
(sequence "YMM%u", 16, 31))>;
// Standard C + YMM6-15
def CSR_Win64_Intel_OCL_BI_AVX : CalleeSavedRegs<(add RBX, RBP, RDI, RSI, R12,

View File

@ -4645,23 +4645,35 @@ static unsigned getLoadStoreRegOpcode(unsigned Reg,
assert((X86::VR128RegClass.hasSubClassEq(RC) ||
X86::VR128XRegClass.hasSubClassEq(RC))&& "Unknown 16-byte regclass");
// If stack is realigned we can use aligned stores.
if (X86::VR128RegClass.hasSubClassEq(RC)) {
if (isStackAligned)
return load ? (HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm)
: (HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr);
else
return load ? (HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm)
: (HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr);
}
assert(STI.hasAVX512() && "Using extended register requires AVX512");
if (isStackAligned)
return load ?
(HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm) :
(HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr);
return load ? X86::VMOVAPSZ128rm : X86::VMOVAPSZ128mr;
else
return load ?
(HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm) :
(HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr);
return load ? X86::VMOVUPSZ128rm : X86::VMOVUPSZ128mr;
}
case 32:
assert((X86::VR256RegClass.hasSubClassEq(RC) ||
X86::VR256XRegClass.hasSubClassEq(RC)) && "Unknown 32-byte regclass");
// If stack is realigned we can use aligned stores.
if (X86::VR256RegClass.hasSubClassEq(RC)) {
if (isStackAligned)
return load ? X86::VMOVAPSYrm : X86::VMOVAPSYmr;
else
return load ? X86::VMOVUPSYrm : X86::VMOVUPSYmr;
}
assert(STI.hasAVX512() && "Using extended register requires AVX512");
if (isStackAligned)
return load ? X86::VMOVAPSYrm : X86::VMOVAPSYmr;
return load ? X86::VMOVAPSZ256rm : X86::VMOVAPSZ256mr;
else
return load ? X86::VMOVUPSYrm : X86::VMOVUPSYmr;
return load ? X86::VMOVUPSZ256rm : X86::VMOVUPSZ256mr;
case 64:
assert(X86::VR512RegClass.hasSubClassEq(RC) && "Unknown 64-byte regclass");
if (isStackAligned)

View File

@ -294,10 +294,11 @@ X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
return CSR_64_SaveList;
case CallingConv::X86_INTR:
if (Is64Bit) {
if (HasAVX512)
return CSR_64_AllRegs_AVX512_SaveList;
if (HasAVX)
return CSR_64_AllRegs_AVX_SaveList;
else
return CSR_64_AllRegs_SaveList;
return CSR_64_AllRegs_SaveList;
} else {
if (HasSSE)
return CSR_32_AllRegs_SSE_SaveList;

View File

@ -0,0 +1,19 @@
; RUN: llc -verify-machineinstrs -mtriple=x86_64-apple-macosx -show-mc-encoding -mattr=+avx512f < %s | FileCheck %s
; Make sure we spill the high numbered YMM registers with the right encoding.
; CHECK-LABEL: foo
; CHECK: movups %ymm31, {{.+}}
; CHECK: encoding: [0x62,0x61,0x7c,0x28,0x11,0xbc,0x24,0xf0,0x03,0x00,0x00]
; ymm30 is used as an anchor for the previous regexp.
; CHECK-NEXT: movups %ymm30
; CHECK: call
; CHECK: iret
define x86_intrcc void @foo(i8* %frame) {
call void @bar()
ret void
}
declare void @bar()