1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-25 22:12:57 +02:00
llvm-mirror/lib/Target/X86/X86RegisterInfo.cpp

792 lines
28 KiB
C++
Raw Normal View History

//===-- X86RegisterInfo.cpp - X86 Register Information --------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the X86 implementation of the TargetRegisterInfo class.
// This file is responsible for the frame pointer elimination optimization
// on X86.
//
//===----------------------------------------------------------------------===//
#include "X86RegisterInfo.h"
#include "X86FrameLowering.h"
#include "X86InstrBuilder.h"
#include "X86MachineFunctionInfo.h"
#include "X86Subtarget.h"
#include "X86TargetMachine.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/MachineValueType.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Type.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
using namespace llvm;
#define GET_REGINFO_TARGET_DESC
#include "X86GenRegisterInfo.inc"
static cl::opt<bool>
EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true),
cl::desc("Enable use of a base pointer for complex stack frames"));
X86RegisterInfo::X86RegisterInfo(const Triple &TT)
: X86GenRegisterInfo((TT.isArch64Bit() ? X86::RIP : X86::EIP),
X86_MC::getDwarfRegFlavour(TT, false),
X86_MC::getDwarfRegFlavour(TT, true),
(TT.isArch64Bit() ? X86::RIP : X86::EIP)) {
X86_MC::InitLLVM2SEHRegisterMapping(this);
// Cache some information.
Is64Bit = TT.isArch64Bit();
IsWin64 = Is64Bit && TT.isOSWindows();
// Use a callee-saved register as the base pointer. These registers must
// not conflict with any ABI requirements. For example, in 32-bit mode PIC
// requires GOT in the EBX register before function calls via PLT GOT pointer.
if (Is64Bit) {
SlotSize = 8;
// This matches the simplified 32-bit pointer code in the data layout
// computation.
// FIXME: Should use the data layout?
bool Use64BitReg = TT.getEnvironment() != Triple::GNUX32;
StackPtr = Use64BitReg ? X86::RSP : X86::ESP;
FramePtr = Use64BitReg ? X86::RBP : X86::EBP;
BasePtr = Use64BitReg ? X86::RBX : X86::EBX;
} else {
SlotSize = 4;
StackPtr = X86::ESP;
FramePtr = X86::EBP;
BasePtr = X86::ESI;
}
}
This patch fixes a problem which arose when using the Post-RA scheduler on X86 Atom. Some of our tests failed because the tail merging part of the BranchFolding pass was creating new basic blocks which did not contain live-in information. When the anti-dependency code in the Post-RA scheduler ran, it would sometimes rename the register containing the function return value because the fact that the return value was live-in to the subsequent block had been lost. To fix this, it is necessary to run the RegisterScavenging code in the BranchFolding pass. This patch makes sure that the register scavenging code is invoked in the X86 subtarget only when post-RA scheduling is being done. Post RA scheduling in the X86 subtarget is only done for Atom. This patch adds a new function to the TargetRegisterClass to control whether or not live-ins should be preserved during branch folding. This is necessary in order for the anti-dependency optimizations done during the PostRASchedulerList pass to work properly when doing Post-RA scheduling for the X86 in general and for the Intel Atom in particular. The patch adds and invokes the new function trackLivenessAfterRegAlloc() instead of using the existing requiresRegisterScavenging(). It changes BranchFolding.cpp to call trackLivenessAfterRegAlloc() instead of requiresRegisterScavenging(). It changes the all the targets that implemented requiresRegisterScavenging() to also implement trackLivenessAfterRegAlloc(). It adds an assertion in the Post RA scheduler to make sure that post RA liveness information is available when it is needed. It changes the X86 break-anti-dependencies test to use –mcpu=atom, in order to avoid running into the added assertion. Finally, this patch restores the use of anti-dependency checking (which was turned off temporarily for the 3.1 release) for Intel Atom in the Post RA scheduler. Patch by Andy Zhang! Thanks to Jakob and Anton for their reviews. llvm-svn: 155395
2012-04-23 23:39:35 +02:00
bool
X86RegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
// ExeDepsFixer and PostRAScheduler require liveness.
return true;
This patch fixes a problem which arose when using the Post-RA scheduler on X86 Atom. Some of our tests failed because the tail merging part of the BranchFolding pass was creating new basic blocks which did not contain live-in information. When the anti-dependency code in the Post-RA scheduler ran, it would sometimes rename the register containing the function return value because the fact that the return value was live-in to the subsequent block had been lost. To fix this, it is necessary to run the RegisterScavenging code in the BranchFolding pass. This patch makes sure that the register scavenging code is invoked in the X86 subtarget only when post-RA scheduling is being done. Post RA scheduling in the X86 subtarget is only done for Atom. This patch adds a new function to the TargetRegisterClass to control whether or not live-ins should be preserved during branch folding. This is necessary in order for the anti-dependency optimizations done during the PostRASchedulerList pass to work properly when doing Post-RA scheduling for the X86 in general and for the Intel Atom in particular. The patch adds and invokes the new function trackLivenessAfterRegAlloc() instead of using the existing requiresRegisterScavenging(). It changes BranchFolding.cpp to call trackLivenessAfterRegAlloc() instead of requiresRegisterScavenging(). It changes the all the targets that implemented requiresRegisterScavenging() to also implement trackLivenessAfterRegAlloc(). It adds an assertion in the Post RA scheduler to make sure that post RA liveness information is available when it is needed. It changes the X86 break-anti-dependencies test to use –mcpu=atom, in order to avoid running into the added assertion. Finally, this patch restores the use of anti-dependency checking (which was turned off temporarily for the 3.1 release) for Intel Atom in the Post RA scheduler. Patch by Andy Zhang! Thanks to Jakob and Anton for their reviews. llvm-svn: 155395
2012-04-23 23:39:35 +02:00
}
int
X86RegisterInfo::getSEHRegNum(unsigned i) const {
return getEncodingValue(i);
}
const TargetRegisterClass *
X86RegisterInfo::getSubClassWithSubReg(const TargetRegisterClass *RC,
unsigned Idx) const {
// The sub_8bit sub-register index is more constrained in 32-bit mode.
// It behaves just like the sub_8bit_hi index.
if (!Is64Bit && Idx == X86::sub_8bit)
Idx = X86::sub_8bit_hi;
// Forward to TableGen's default version.
return X86GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
}
const TargetRegisterClass *
X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
const TargetRegisterClass *B,
unsigned SubIdx) const {
// The sub_8bit sub-register index is more constrained in 32-bit mode.
if (!Is64Bit && SubIdx == X86::sub_8bit) {
A = X86GenRegisterInfo::getSubClassWithSubReg(A, X86::sub_8bit_hi);
if (!A)
return nullptr;
}
return X86GenRegisterInfo::getMatchingSuperRegClass(A, B, SubIdx);
}
const TargetRegisterClass *
X86RegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
const MachineFunction &MF) const {
// Don't allow super-classes of GR8_NOREX. This class is only used after
// extracting sub_8bit_hi sub-registers. The H sub-registers cannot be copied
// to the full GR8 register class in 64-bit mode, so we cannot allow the
// reigster class inflation.
//
// The GR8_NOREX class is always used in a way that won't be constrained to a
// sub-class, so sub-classes like GR8_ABCD_L are allowed to expand to the
// full GR8 class.
if (RC == &X86::GR8_NOREXRegClass)
return RC;
const TargetRegisterClass *Super = RC;
TargetRegisterClass::sc_iterator I = RC->getSuperClasses();
do {
switch (Super->getID()) {
case X86::GR8RegClassID:
case X86::GR16RegClassID:
case X86::GR32RegClassID:
case X86::GR64RegClassID:
case X86::FR32RegClassID:
case X86::FR64RegClassID:
case X86::RFP32RegClassID:
case X86::RFP64RegClassID:
case X86::RFP80RegClassID:
case X86::VR128RegClassID:
case X86::VR256RegClassID:
// Don't return a super-class that would shrink the spill size.
// That can happen with the vector and float classes.
if (Super->getSize() == RC->getSize())
return Super;
}
Super = *I++;
} while (Super);
return RC;
}
const TargetRegisterClass *
X86RegisterInfo::getPointerRegClass(const MachineFunction &MF,
unsigned Kind) const {
const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
switch (Kind) {
default: llvm_unreachable("Unexpected Kind in getPointerRegClass!");
case 0: // Normal GPRs.
if (Subtarget.isTarget64BitLP64())
return &X86::GR64RegClass;
return &X86::GR32RegClass;
case 1: // Normal GPRs except the stack pointer (for encoding reasons).
if (Subtarget.isTarget64BitLP64())
return &X86::GR64_NOSPRegClass;
return &X86::GR32_NOSPRegClass;
case 2: // NOREX GPRs.
if (Subtarget.isTarget64BitLP64())
return &X86::GR64_NOREXRegClass;
return &X86::GR32_NOREXRegClass;
case 3: // NOREX GPRs except the stack pointer (for encoding reasons).
if (Subtarget.isTarget64BitLP64())
return &X86::GR64_NOREX_NOSPRegClass;
return &X86::GR32_NOREX_NOSPRegClass;
case 4: // Available for tailcall (not callee-saved GPRs).
return getGPRsForTailCall(MF);
}
}
const TargetRegisterClass *
X86RegisterInfo::getGPRsForTailCall(const MachineFunction &MF) const {
const Function *F = MF.getFunction();
if (IsWin64 || (F && F->getCallingConv() == CallingConv::X86_64_Win64))
return &X86::GR64_TCW64RegClass;
else if (Is64Bit)
return &X86::GR64_TCRegClass;
bool hasHipeCC = (F ? F->getCallingConv() == CallingConv::HiPE : false);
if (hasHipeCC)
return &X86::GR32RegClass;
return &X86::GR32_TCRegClass;
}
const TargetRegisterClass *
X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
if (RC == &X86::CCRRegClass) {
if (Is64Bit)
return &X86::GR64RegClass;
else
return &X86::GR32RegClass;
}
return RC;
}
unsigned
X86RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
MachineFunction &MF) const {
const X86FrameLowering *TFI = getFrameLowering(MF);
unsigned FPDiff = TFI->hasFP(MF) ? 1 : 0;
switch (RC->getID()) {
default:
return 0;
case X86::GR32RegClassID:
return 4 - FPDiff;
case X86::GR64RegClassID:
return 12 - FPDiff;
case X86::VR128RegClassID:
return Is64Bit ? 10 : 4;
case X86::VR64RegClassID:
return 4;
}
}
const MCPhysReg *
X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
const X86Subtarget &Subtarget = MF->getSubtarget<X86Subtarget>();
bool HasAVX = Subtarget.hasAVX();
bool HasAVX512 = Subtarget.hasAVX512();
bool CallsEHReturn = MF->getMMI().callsEHReturn();
assert(MF && "MachineFunction required");
switch (MF->getFunction()->getCallingConv()) {
case CallingConv::GHC:
case CallingConv::HiPE:
return CSR_NoRegs_SaveList;
case CallingConv::AnyReg:
if (HasAVX)
return CSR_64_AllRegs_AVX_SaveList;
return CSR_64_AllRegs_SaveList;
case CallingConv::PreserveMost:
return CSR_64_RT_MostRegs_SaveList;
case CallingConv::PreserveAll:
if (HasAVX)
return CSR_64_RT_AllRegs_AVX_SaveList;
return CSR_64_RT_AllRegs_SaveList;
case CallingConv::Intel_OCL_BI: {
if (HasAVX512 && IsWin64)
return CSR_Win64_Intel_OCL_BI_AVX512_SaveList;
if (HasAVX512 && Is64Bit)
return CSR_64_Intel_OCL_BI_AVX512_SaveList;
if (HasAVX && IsWin64)
return CSR_Win64_Intel_OCL_BI_AVX_SaveList;
if (HasAVX && Is64Bit)
return CSR_64_Intel_OCL_BI_AVX_SaveList;
if (!HasAVX && !IsWin64 && Is64Bit)
return CSR_64_Intel_OCL_BI_SaveList;
break;
}
case CallingConv::HHVM:
return CSR_64_HHVM_SaveList;
case CallingConv::Cold:
if (Is64Bit)
return CSR_64_MostRegs_SaveList;
break;
case CallingConv::X86_64_Win64:
return CSR_Win64_SaveList;
case CallingConv::X86_64_SysV:
if (CallsEHReturn)
return CSR_64EHRet_SaveList;
return CSR_64_SaveList;
default:
break;
}
if (Is64Bit) {
if (IsWin64)
return CSR_Win64_SaveList;
if (CallsEHReturn)
return CSR_64EHRet_SaveList;
return CSR_64_SaveList;
}
if (CallsEHReturn)
return CSR_32EHRet_SaveList;
return CSR_32_SaveList;
}
const uint32_t *
X86RegisterInfo::getCallPreservedMask(const MachineFunction &MF,
CallingConv::ID CC) const {
const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
bool HasAVX = Subtarget.hasAVX();
bool HasAVX512 = Subtarget.hasAVX512();
switch (CC) {
case CallingConv::GHC:
case CallingConv::HiPE:
return CSR_NoRegs_RegMask;
case CallingConv::AnyReg:
if (HasAVX)
return CSR_64_AllRegs_AVX_RegMask;
return CSR_64_AllRegs_RegMask;
case CallingConv::PreserveMost:
return CSR_64_RT_MostRegs_RegMask;
case CallingConv::PreserveAll:
if (HasAVX)
return CSR_64_RT_AllRegs_AVX_RegMask;
return CSR_64_RT_AllRegs_RegMask;
case CallingConv::Intel_OCL_BI: {
if (HasAVX512 && IsWin64)
return CSR_Win64_Intel_OCL_BI_AVX512_RegMask;
if (HasAVX512 && Is64Bit)
return CSR_64_Intel_OCL_BI_AVX512_RegMask;
if (HasAVX && IsWin64)
return CSR_Win64_Intel_OCL_BI_AVX_RegMask;
if (HasAVX && Is64Bit)
return CSR_64_Intel_OCL_BI_AVX_RegMask;
if (!HasAVX && !IsWin64 && Is64Bit)
return CSR_64_Intel_OCL_BI_RegMask;
break;
}
case CallingConv::HHVM:
return CSR_64_HHVM_RegMask;
case CallingConv::Cold:
if (Is64Bit)
return CSR_64_MostRegs_RegMask;
break;
default:
break;
case CallingConv::X86_64_Win64:
return CSR_Win64_RegMask;
case CallingConv::X86_64_SysV:
return CSR_64_RegMask;
}
// Unlike getCalleeSavedRegs(), we don't have MMI so we can't check
// callsEHReturn().
if (Is64Bit) {
if (IsWin64)
return CSR_Win64_RegMask;
return CSR_64_RegMask;
}
return CSR_32_RegMask;
}
const uint32_t*
X86RegisterInfo::getNoPreservedMask() const {
return CSR_NoRegs_RegMask;
}
const uint32_t *X86RegisterInfo::getDarwinTLSCallPreservedMask() const {
return CSR_64_TLS_Darwin_RegMask;
}
BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
BitVector Reserved(getNumRegs());
const X86FrameLowering *TFI = getFrameLowering(MF);
// Set the stack-pointer register and its aliases as reserved.
for (MCSubRegIterator I(X86::RSP, this, /*IncludeSelf=*/true); I.isValid();
++I)
Reserved.set(*I);
// Set the instruction pointer register and its aliases as reserved.
for (MCSubRegIterator I(X86::RIP, this, /*IncludeSelf=*/true); I.isValid();
++I)
Reserved.set(*I);
// Set the frame-pointer register and its aliases as reserved if needed.
if (TFI->hasFP(MF)) {
for (MCSubRegIterator I(X86::RBP, this, /*IncludeSelf=*/true); I.isValid();
++I)
Reserved.set(*I);
}
// Set the base-pointer register and its aliases as reserved if needed.
if (hasBasePointer(MF)) {
CallingConv::ID CC = MF.getFunction()->getCallingConv();
const uint32_t *RegMask = getCallPreservedMask(MF, CC);
if (MachineOperand::clobbersPhysReg(RegMask, getBaseRegister()))
report_fatal_error(
"Stack realignment in presence of dynamic allocas is not supported with"
"this calling convention.");
unsigned BasePtr = getX86SubSuperRegister(getBaseRegister(), MVT::i64,
false);
for (MCSubRegIterator I(BasePtr, this, /*IncludeSelf=*/true);
I.isValid(); ++I)
Reserved.set(*I);
}
// Mark the segment registers as reserved.
Reserved.set(X86::CS);
Reserved.set(X86::SS);
Reserved.set(X86::DS);
Reserved.set(X86::ES);
Reserved.set(X86::FS);
Reserved.set(X86::GS);
// Mark the floating point stack registers as reserved.
for (unsigned n = 0; n != 8; ++n)
Reserved.set(X86::ST0 + n);
// Reserve the registers that only exist in 64-bit mode.
if (!Is64Bit) {
// These 8-bit registers are part of the x86-64 extension even though their
// super-registers are old 32-bits.
Reserved.set(X86::SIL);
Reserved.set(X86::DIL);
Reserved.set(X86::BPL);
Reserved.set(X86::SPL);
for (unsigned n = 0; n != 8; ++n) {
// R8, R9, ...
for (MCRegAliasIterator AI(X86::R8 + n, this, true); AI.isValid(); ++AI)
Reserved.set(*AI);
// XMM8, XMM9, ...
for (MCRegAliasIterator AI(X86::XMM8 + n, this, true); AI.isValid(); ++AI)
Reserved.set(*AI);
}
}
if (!Is64Bit || !MF.getSubtarget<X86Subtarget>().hasAVX512()) {
for (unsigned n = 16; n != 32; ++n) {
for (MCRegAliasIterator AI(X86::XMM0 + n, this, true); AI.isValid(); ++AI)
Reserved.set(*AI);
}
}
return Reserved;
}
void X86RegisterInfo::adjustStackMapLiveOutMask(uint32_t *Mask) const {
// Check if the EFLAGS register is marked as live-out. This shouldn't happen,
// because the calling convention defines the EFLAGS register as NOT
// preserved.
//
// Unfortunatelly the EFLAGS show up as live-out after branch folding. Adding
// an assert to track this and clear the register afterwards to avoid
// unnecessary crashes during release builds.
assert(!(Mask[X86::EFLAGS / 32] & (1U << (X86::EFLAGS % 32))) &&
"EFLAGS are not live-out from a patchpoint.");
// Also clean other registers that don't need preserving (IP).
for (auto Reg : {X86::EFLAGS, X86::RIP, X86::EIP, X86::IP})
Mask[Reg / 32] &= ~(1U << (Reg % 32));
}
//===----------------------------------------------------------------------===//
// Stack Frame Processing methods
//===----------------------------------------------------------------------===//
static bool CantUseSP(const MachineFrameInfo *MFI) {
return MFI->hasVarSizedObjects() || MFI->hasOpaqueSPAdjustment();
}
bool X86RegisterInfo::hasBasePointer(const MachineFunction &MF) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
if (!EnableBasePointer)
return false;
// When we need stack realignment, we can't address the stack from the frame
// pointer. When we have dynamic allocas or stack-adjusting inline asm, we
// can't address variables from the stack pointer. MS inline asm can
// reference locals while also adjusting the stack pointer. When we can't
// use both the SP and the FP, we need a separate base pointer register.
bool CantUseFP = needsStackRealignment(MF);
return CantUseFP && CantUseSP(MFI);
}
bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const {
Targets: commonize some stack realignment code This patch does the following: * Fix FIXME on `needsStackRealignment`: it is now shared between multiple targets, implemented in `TargetRegisterInfo`, and isn't `virtual` anymore. This will break out-of-tree targets, silently if they used `virtual` and with a build error if they used `override`. * Factor out `canRealignStack` as a `virtual` function on `TargetRegisterInfo`, by default only looks for the `no-realign-stack` function attribute. Multiple targets duplicated the same `needsStackRealignment` code: - Aarch64. - ARM. - Mips almost: had extra `DEBUG` diagnostic, which the default implementation now has. - PowerPC. - WebAssembly. - x86 almost: has an extra `-force-align-stack` option, which the default implementation now has. The default implementation of `needsStackRealignment` used to just return `false`. My current patch changes the behavior by simply using the above shared behavior. This affects: - AMDGPU - BPF - CppBackend - MSP430 - NVPTX - Sparc - SystemZ - XCore - Out-of-tree targets This is a breaking change! `make check` passes. The only implementation of the `virtual` function (besides the slight different in x86) was Hexagon (which did `MF.getFrameInfo()->getMaxAlignment() > 8`), and potentially some out-of-tree targets. Hexagon now uses the default implementation. `needsStackRealignment` was being overwritten in `<Target>GenRegisterInfo.inc`, to return `false` as the default also did. That was odd and is now gone. Reviewers: sunfish Subscribers: aemerson, llvm-commits, jfb Differential Revision: http://reviews.llvm.org/D11160 llvm-svn: 242727
2015-07-21 00:51:32 +02:00
if (!TargetRegisterInfo::canRealignStack(MF))
return false;
const MachineFrameInfo *MFI = MF.getFrameInfo();
const MachineRegisterInfo *MRI = &MF.getRegInfo();
// Stack realignment requires a frame pointer. If we already started
// register allocation with frame pointer elimination, it is too late now.
if (!MRI->canReserveReg(FramePtr))
return false;
// If a base pointer is necessary. Check that it isn't too late to reserve
// it.
if (CantUseSP(MFI))
return MRI->canReserveReg(BasePtr);
return true;
}
bool X86RegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
unsigned Reg, int &FrameIdx) const {
// Since X86 defines assignCalleeSavedSpillSlots which always return true
// this function neither used nor tested.
llvm_unreachable("Unused function on X86. Otherwise need a test case.");
}
void
X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
int SPAdj, unsigned FIOperandNum,
RegScavenger *RS) const {
MachineInstr &MI = *II;
MachineFunction &MF = *MI.getParent()->getParent();
const X86FrameLowering *TFI = getFrameLowering(MF);
int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
unsigned BasePtr;
unsigned Opc = MI.getOpcode();
bool AfterFPPop = Opc == X86::TAILJMPm64 || Opc == X86::TAILJMPm ||
Opc == X86::TCRETURNmi || Opc == X86::TCRETURNmi64;
if (hasBasePointer(MF))
BasePtr = (FrameIndex < 0 ? FramePtr : getBaseRegister());
else if (needsStackRealignment(MF))
BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr);
else if (AfterFPPop)
BasePtr = StackPtr;
else
BasePtr = (TFI->hasFP(MF) ? FramePtr : StackPtr);
// LOCAL_ESCAPE uses a single offset, with no register. It only works in the
// simple FP case, and doesn't work with stack realignment. On 32-bit, the
// offset is from the traditional base pointer location. On 64-bit, the
// offset is from the SP at the end of the prologue, not the FP location. This
// matches the behavior of llvm.frameaddress.
unsigned IgnoredFrameReg;
if (Opc == TargetOpcode::LOCAL_ESCAPE) {
MachineOperand &FI = MI.getOperand(FIOperandNum);
int Offset;
Offset = TFI->getFrameIndexReference(MF, FrameIndex, IgnoredFrameReg);
FI.ChangeToImmediate(Offset);
return;
}
// For LEA64_32r when BasePtr is 32-bits (X32) we can use full-size 64-bit
// register as source operand, semantic is the same and destination is
// 32-bits. It saves one byte per lea in code since 0x67 prefix is avoided.
if (Opc == X86::LEA64_32r && X86::GR32RegClass.contains(BasePtr))
BasePtr = getX86SubSuperRegister(BasePtr, MVT::i64, false);
// This must be part of a four operand memory reference. Replace the
// FrameIndex with base register with EBP. Add an offset to the offset.
MI.getOperand(FIOperandNum).ChangeToRegister(BasePtr, false);
// Now add the frame object offset to the offset from EBP.
int FIOffset;
if (AfterFPPop) {
// Tail call jmp happens after FP is popped.
const MachineFrameInfo *MFI = MF.getFrameInfo();
FIOffset = MFI->getObjectOffset(FrameIndex) - TFI->getOffsetOfLocalArea();
} else
FIOffset = TFI->getFrameIndexReference(MF, FrameIndex, IgnoredFrameReg);
if (BasePtr == StackPtr)
FIOffset += SPAdj;
// The frame index format for stackmaps and patchpoints is different from the
// X86 format. It only has a FI and an offset.
if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) {
assert(BasePtr == FramePtr && "Expected the FP as base register");
int64_t Offset = MI.getOperand(FIOperandNum + 1).getImm() + FIOffset;
MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
return;
}
if (MI.getOperand(FIOperandNum+3).isImm()) {
// Offset is a 32-bit integer.
int Imm = (int)(MI.getOperand(FIOperandNum + 3).getImm());
int Offset = FIOffset + Imm;
assert((!Is64Bit || isInt<32>((long long)FIOffset + Imm)) &&
"Requesting 64-bit offset in 32-bit immediate!");
MI.getOperand(FIOperandNum + 3).ChangeToImmediate(Offset);
} else {
// Offset is symbolic. This is extremely rare.
uint64_t Offset = FIOffset +
(uint64_t)MI.getOperand(FIOperandNum+3).getOffset();
MI.getOperand(FIOperandNum + 3).setOffset(Offset);
}
}
unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
const X86FrameLowering *TFI = getFrameLowering(MF);
return TFI->hasFP(MF) ? FramePtr : StackPtr;
}
unsigned
X86RegisterInfo::getPtrSizedFrameRegister(const MachineFunction &MF) const {
const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
unsigned FrameReg = getFrameRegister(MF);
if (Subtarget.isTarget64BitILP32())
FrameReg = getX86SubSuperRegister(FrameReg, MVT::i32, false);
return FrameReg;
}
namespace llvm {
unsigned getX86SubSuperRegisterOrZero(unsigned Reg, MVT::SimpleValueType VT,
bool High) {
switch (VT) {
default: return 0;
case MVT::i8:
if (High) {
switch (Reg) {
default: return getX86SubSuperRegister(Reg, MVT::i64);
case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
return X86::SI;
case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
return X86::DI;
case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
return X86::BP;
case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
return X86::SP;
case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
return X86::AH;
case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
return X86::DH;
case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
return X86::CH;
case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
return X86::BH;
}
} else {
switch (Reg) {
default: return 0;
case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
return X86::AL;
case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
return X86::DL;
case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
return X86::CL;
case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
return X86::BL;
case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
return X86::SIL;
case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
return X86::DIL;
case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
return X86::BPL;
case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
return X86::SPL;
case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
return X86::R8B;
case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
return X86::R9B;
case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
return X86::R10B;
case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
return X86::R11B;
case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
return X86::R12B;
case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
return X86::R13B;
case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
return X86::R14B;
case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
return X86::R15B;
}
}
case MVT::i16:
switch (Reg) {
default: return 0;
case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
return X86::AX;
case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
return X86::DX;
case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
return X86::CX;
case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
return X86::BX;
case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
return X86::SI;
case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
return X86::DI;
case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
return X86::BP;
case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
return X86::SP;
case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
return X86::R8W;
case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
return X86::R9W;
case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
return X86::R10W;
case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
return X86::R11W;
case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
return X86::R12W;
case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
return X86::R13W;
case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
return X86::R14W;
case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
return X86::R15W;
}
case MVT::i32:
switch (Reg) {
default: return 0;
case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
return X86::EAX;
case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
return X86::EDX;
case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
return X86::ECX;
case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
return X86::EBX;
case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
return X86::ESI;
case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
return X86::EDI;
case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
return X86::EBP;
case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
return X86::ESP;
case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
return X86::R8D;
case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
return X86::R9D;
case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
return X86::R10D;
case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
return X86::R11D;
case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
return X86::R12D;
case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
return X86::R13D;
case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
return X86::R14D;
case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
return X86::R15D;
}
case MVT::i64:
switch (Reg) {
default: return 0;
case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
return X86::RAX;
case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
return X86::RDX;
case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
return X86::RCX;
case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
return X86::RBX;
case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
return X86::RSI;
case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
return X86::RDI;
case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
return X86::RBP;
case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
return X86::RSP;
case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
return X86::R8;
case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
return X86::R9;
case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
return X86::R10;
case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
return X86::R11;
case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
return X86::R12;
case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
return X86::R13;
case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
return X86::R14;
case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
return X86::R15;
}
}
}
unsigned getX86SubSuperRegister(unsigned Reg, MVT::SimpleValueType VT,
bool High) {
unsigned Res = getX86SubSuperRegisterOrZero(Reg, VT, High);
if (Res == 0)
llvm_unreachable("Unexpected register or VT");
return Res;
}
unsigned get512BitSuperRegister(unsigned Reg) {
if (Reg >= X86::XMM0 && Reg <= X86::XMM31)
return X86::ZMM0 + (Reg - X86::XMM0);
if (Reg >= X86::YMM0 && Reg <= X86::YMM31)
return X86::ZMM0 + (Reg - X86::YMM0);
if (Reg >= X86::ZMM0 && Reg <= X86::ZMM31)
return Reg;
llvm_unreachable("Unexpected SIMD register");
}
}