1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-26 14:33:02 +02:00
llvm-mirror/lib/Target/Mips/MipsRegisterInfo.cpp

329 lines
10 KiB
C++
Raw Normal View History

//===-- MipsRegisterInfo.cpp - MIPS Register Information -== --------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the MIPS implementation of the TargetRegisterInfo class.
//
//===----------------------------------------------------------------------===//
#include "MipsRegisterInfo.h"
#include "Mips.h"
#include "MipsAnalyzeImmediate.h"
#include "MipsInstrInfo.h"
#include "MipsMachineFunction.h"
#include "MipsSubtarget.h"
#include "MipsTargetMachine.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
using namespace llvm;
#define DEBUG_TYPE "mips-reg-info"
#define GET_REGINFO_TARGET_DESC
#include "MipsGenRegisterInfo.inc"
MipsRegisterInfo::MipsRegisterInfo() : MipsGenRegisterInfo(Mips::RA) {}
unsigned MipsRegisterInfo::getPICCallReg() { return Mips::T9; }
const TargetRegisterClass *
MipsRegisterInfo::getPointerRegClass(const MachineFunction &MF,
unsigned Kind) const {
MipsABIInfo ABI = MF.getSubtarget<MipsSubtarget>().getABI();
return ABI.ArePtrs64bit() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass;
}
unsigned
MipsRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
MachineFunction &MF) const {
switch (RC->getID()) {
default:
return 0;
case Mips::GPR32RegClassID:
case Mips::GPR64RegClassID:
case Mips::DSPRRegClassID: {
const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
return 28 - TFI->hasFP(MF);
}
case Mips::FGR32RegClassID:
return 32;
case Mips::AFGR64RegClassID:
return 16;
case Mips::FGR64RegClassID:
return 32;
}
}
//===----------------------------------------------------------------------===//
// Callee Saved Registers methods
//===----------------------------------------------------------------------===//
/// Mips Callee Saved Registers
const MCPhysReg *
MipsRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
const MipsSubtarget &Subtarget = MF->getSubtarget<MipsSubtarget>();
const Function *F = MF->getFunction();
if (F->hasFnAttribute("interrupt")) {
if (Subtarget.hasMips64())
return Subtarget.hasMips64r6() ? CSR_Interrupt_64R6_SaveList
: CSR_Interrupt_64_SaveList;
else
return Subtarget.hasMips32r6() ? CSR_Interrupt_32R6_SaveList
: CSR_Interrupt_32_SaveList;
}
if (Subtarget.isSingleFloat())
return CSR_SingleFloatOnly_SaveList;
if (Subtarget.isABI_N64())
return CSR_N64_SaveList;
if (Subtarget.isABI_N32())
return CSR_N32_SaveList;
if (Subtarget.isFP64bit())
return CSR_O32_FP64_SaveList;
if (Subtarget.isFPXX())
return CSR_O32_FPXX_SaveList;
return CSR_O32_SaveList;
}
const uint32_t *
MipsRegisterInfo::getCallPreservedMask(const MachineFunction &MF,
CallingConv::ID) const {
const MipsSubtarget &Subtarget = MF.getSubtarget<MipsSubtarget>();
if (Subtarget.isSingleFloat())
return CSR_SingleFloatOnly_RegMask;
if (Subtarget.isABI_N64())
return CSR_N64_RegMask;
if (Subtarget.isABI_N32())
return CSR_N32_RegMask;
if (Subtarget.isFP64bit())
return CSR_O32_FP64_RegMask;
if (Subtarget.isFPXX())
return CSR_O32_FPXX_RegMask;
return CSR_O32_RegMask;
}
const uint32_t *MipsRegisterInfo::getMips16RetHelperMask() {
return CSR_Mips16RetHelper_RegMask;
}
BitVector MipsRegisterInfo::
getReservedRegs(const MachineFunction &MF) const {
static const MCPhysReg ReservedGPR32[] = {
Mips::ZERO, Mips::K0, Mips::K1, Mips::SP
};
static const MCPhysReg ReservedGPR64[] = {
Mips::ZERO_64, Mips::K0_64, Mips::K1_64, Mips::SP_64
};
BitVector Reserved(getNumRegs());
const MipsSubtarget &Subtarget = MF.getSubtarget<MipsSubtarget>();
typedef TargetRegisterClass::const_iterator RegIter;
for (unsigned I = 0; I < array_lengthof(ReservedGPR32); ++I)
Reserved.set(ReservedGPR32[I]);
// Reserve registers for the NaCl sandbox.
if (Subtarget.isTargetNaCl()) {
Reserved.set(Mips::T6); // Reserved for control flow mask.
Reserved.set(Mips::T7); // Reserved for memory access mask.
Reserved.set(Mips::T8); // Reserved for thread pointer.
}
for (unsigned I = 0; I < array_lengthof(ReservedGPR64); ++I)
Reserved.set(ReservedGPR64[I]);
// For mno-abicalls, GP is a program invariant!
if (!Subtarget.isABICalls()) {
Reserved.set(Mips::GP);
Reserved.set(Mips::GP_64);
}
if (Subtarget.isFP64bit()) {
// Reserve all registers in AFGR64.
for (RegIter Reg = Mips::AFGR64RegClass.begin(),
EReg = Mips::AFGR64RegClass.end(); Reg != EReg; ++Reg)
Reserved.set(*Reg);
} else {
// Reserve all registers in FGR64.
for (RegIter Reg = Mips::FGR64RegClass.begin(),
EReg = Mips::FGR64RegClass.end(); Reg != EReg; ++Reg)
Reserved.set(*Reg);
}
// Reserve FP if this function should have a dedicated frame pointer register.
if (Subtarget.getFrameLowering()->hasFP(MF)) {
if (Subtarget.inMips16Mode())
Reserved.set(Mips::S0);
else {
Reserved.set(Mips::FP);
Reserved.set(Mips::FP_64);
// Reserve the base register if we need to both realign the stack and
// allocate variable-sized objects at runtime. This should test the
// same conditions as MipsFrameLowering::hasBP().
if (needsStackRealignment(MF) &&
MF.getFrameInfo()->hasVarSizedObjects()) {
Reserved.set(Mips::S7);
Reserved.set(Mips::S7_64);
}
}
}
// Reserve hardware registers.
Reserved.set(Mips::HWR29);
// Reserve DSP control register.
Reserved.set(Mips::DSPPos);
Reserved.set(Mips::DSPSCount);
Reserved.set(Mips::DSPCarry);
Reserved.set(Mips::DSPEFI);
Reserved.set(Mips::DSPOutFlag);
// Reserve MSA control registers.
Reserved.set(Mips::MSAIR);
Reserved.set(Mips::MSACSR);
Reserved.set(Mips::MSAAccess);
Reserved.set(Mips::MSASave);
Reserved.set(Mips::MSAModify);
Reserved.set(Mips::MSARequest);
Reserved.set(Mips::MSAMap);
Reserved.set(Mips::MSAUnmap);
// Reserve RA if in mips16 mode.
if (Subtarget.inMips16Mode()) {
const MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
Reserved.set(Mips::RA);
Reserved.set(Mips::RA_64);
Reserved.set(Mips::T0);
Reserved.set(Mips::T1);
if (MF.getFunction()->hasFnAttribute("saveS2") || MipsFI->hasSaveS2())
Reserved.set(Mips::S2);
}
// Reserve GP if small section is used.
if (Subtarget.useSmallSection()) {
Reserved.set(Mips::GP);
Reserved.set(Mips::GP_64);
}
[mips] Add support for -modd-spreg/-mno-odd-spreg Summary: When -mno-odd-spreg is in effect, 32-bit floating point values are not permitted in odd FPU registers. The option also prohibits 32-bit and 64-bit floating point comparison results from being written to odd registers. This option has three purposes: * It allows support for certain MIPS implementations such as loongson-3a that do not allow the use of odd registers for single precision arithmetic. * When using -mfpxx, -mno-odd-spreg is the default and this allows us to statically check that code is compliant with the O32 FPXX ABI since mtc1/mfc1 instructions to/from odd registers are guaranteed not to appear for any reason. Once this has been established, the user can then re-enable -modd-spreg to regain the use of all 32 single-precision registers. * When using -mfp64 and -mno-odd-spreg together, an O32 extension named O32 FP64A is used as the ABI. This is intended to provide almost all functionality of an FR=1 processor but can also be executed on a FR=0 core with the assistance of a hardware compatibility mode which emulates FR=0 behaviour on an FR=1 processor. * Added '.module oddspreg' and '.module nooddspreg' each of which update the .MIPS.abiflags section appropriately * Moved setFpABI() call inside emitDirectiveModuleFP() so that the caller doesn't have to remember to do it. * MipsABIFlags now calculates the flags1 and flags2 member on demand rather than trying to maintain them in the same format they will be emitted in. There is one portion of the -mfp64 and -mno-odd-spreg combination that is not implemented yet. Moves to/from odd-numbered double-precision registers must not use mtc1. I will fix this in a follow-up. Differential Revision: http://reviews.llvm.org/D4383 llvm-svn: 212717
2014-07-10 15:38:23 +02:00
if (Subtarget.isABI_O32() && !Subtarget.useOddSPReg()) {
for (const auto &Reg : Mips::OddSPRegClass)
Reserved.set(Reg);
}
return Reserved;
}
bool
MipsRegisterInfo::requiresRegisterScavenging(const MachineFunction &MF) const {
return true;
}
This patch fixes a problem which arose when using the Post-RA scheduler on X86 Atom. Some of our tests failed because the tail merging part of the BranchFolding pass was creating new basic blocks which did not contain live-in information. When the anti-dependency code in the Post-RA scheduler ran, it would sometimes rename the register containing the function return value because the fact that the return value was live-in to the subsequent block had been lost. To fix this, it is necessary to run the RegisterScavenging code in the BranchFolding pass. This patch makes sure that the register scavenging code is invoked in the X86 subtarget only when post-RA scheduling is being done. Post RA scheduling in the X86 subtarget is only done for Atom. This patch adds a new function to the TargetRegisterClass to control whether or not live-ins should be preserved during branch folding. This is necessary in order for the anti-dependency optimizations done during the PostRASchedulerList pass to work properly when doing Post-RA scheduling for the X86 in general and for the Intel Atom in particular. The patch adds and invokes the new function trackLivenessAfterRegAlloc() instead of using the existing requiresRegisterScavenging(). It changes BranchFolding.cpp to call trackLivenessAfterRegAlloc() instead of requiresRegisterScavenging(). It changes the all the targets that implemented requiresRegisterScavenging() to also implement trackLivenessAfterRegAlloc(). It adds an assertion in the Post RA scheduler to make sure that post RA liveness information is available when it is needed. It changes the X86 break-anti-dependencies test to use –mcpu=atom, in order to avoid running into the added assertion. Finally, this patch restores the use of anti-dependency checking (which was turned off temporarily for the 3.1 release) for Intel Atom in the Post RA scheduler. Patch by Andy Zhang! Thanks to Jakob and Anton for their reviews. llvm-svn: 155395
2012-04-23 23:39:35 +02:00
bool
MipsRegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
return true;
}
// FrameIndex represent objects inside a abstract stack.
// We must replace FrameIndex with an stack/frame pointer
// direct reference.
void MipsRegisterInfo::
eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj,
unsigned FIOperandNum, RegScavenger *RS) const {
MachineInstr &MI = *II;
MachineFunction &MF = *MI.getParent()->getParent();
DEBUG(errs() << "\nFunction : " << MF.getName() << "\n";
errs() << "<--------->\n" << MI);
int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
uint64_t stackSize = MF.getFrameInfo()->getStackSize();
int64_t spOffset = MF.getFrameInfo()->getObjectOffset(FrameIndex);
DEBUG(errs() << "FrameIndex : " << FrameIndex << "\n"
<< "spOffset : " << spOffset << "\n"
<< "stackSize : " << stackSize << "\n");
eliminateFI(MI, FIOperandNum, FrameIndex, stackSize, spOffset);
}
unsigned MipsRegisterInfo::
getFrameRegister(const MachineFunction &MF) const {
const MipsSubtarget &Subtarget = MF.getSubtarget<MipsSubtarget>();
const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
bool IsN64 =
static_cast<const MipsTargetMachine &>(MF.getTarget()).getABI().IsN64();
if (Subtarget.inMips16Mode())
return TFI->hasFP(MF) ? Mips::S0 : Mips::SP;
else
return TFI->hasFP(MF) ? (IsN64 ? Mips::FP_64 : Mips::FP) :
(IsN64 ? Mips::SP_64 : Mips::SP);
}
bool MipsRegisterInfo::canRealignStack(const MachineFunction &MF) const {
Targets: commonize some stack realignment code This patch does the following: * Fix FIXME on `needsStackRealignment`: it is now shared between multiple targets, implemented in `TargetRegisterInfo`, and isn't `virtual` anymore. This will break out-of-tree targets, silently if they used `virtual` and with a build error if they used `override`. * Factor out `canRealignStack` as a `virtual` function on `TargetRegisterInfo`, by default only looks for the `no-realign-stack` function attribute. Multiple targets duplicated the same `needsStackRealignment` code: - Aarch64. - ARM. - Mips almost: had extra `DEBUG` diagnostic, which the default implementation now has. - PowerPC. - WebAssembly. - x86 almost: has an extra `-force-align-stack` option, which the default implementation now has. The default implementation of `needsStackRealignment` used to just return `false`. My current patch changes the behavior by simply using the above shared behavior. This affects: - AMDGPU - BPF - CppBackend - MSP430 - NVPTX - Sparc - SystemZ - XCore - Out-of-tree targets This is a breaking change! `make check` passes. The only implementation of the `virtual` function (besides the slight different in x86) was Hexagon (which did `MF.getFrameInfo()->getMaxAlignment() > 8`), and potentially some out-of-tree targets. Hexagon now uses the default implementation. `needsStackRealignment` was being overwritten in `<Target>GenRegisterInfo.inc`, to return `false` as the default also did. That was odd and is now gone. Reviewers: sunfish Subscribers: aemerson, llvm-commits, jfb Differential Revision: http://reviews.llvm.org/D11160 llvm-svn: 242727
2015-07-21 00:51:32 +02:00
// Avoid realigning functions that explicitly do not want to be realigned.
// Normally, we should report an error when a function should be dynamically
// realigned but also has the attribute no-realign-stack. Unfortunately,
// with this attribute, MachineFrameInfo clamps each new object's alignment
// to that of the stack's alignment as specified by the ABI. As a result,
// the information of whether we have objects with larger alignment
// requirement than the stack's alignment is already lost at this point.
if (!TargetRegisterInfo::canRealignStack(MF))
return false;
const MipsSubtarget &Subtarget = MF.getSubtarget<MipsSubtarget>();
unsigned FP = Subtarget.isGP32bit() ? Mips::FP : Mips::FP_64;
unsigned BP = Subtarget.isGP32bit() ? Mips::S7 : Mips::S7_64;
// Support dynamic stack realignment only for targets with standard encoding.
if (!Subtarget.hasStandardEncoding())
return false;
// We can't perform dynamic stack realignment if we can't reserve the
// frame pointer register.
if (!MF.getRegInfo().canReserveReg(FP))
return false;
// We can realign the stack if we know the maximum call frame size and we
// don't have variable sized objects.
if (Subtarget.getFrameLowering()->hasReservedCallFrame(MF))
return true;
// We have to reserve the base pointer register in the presence of variable
// sized objects.
return MF.getRegInfo().canReserveReg(BP);
}