1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-23 04:52:54 +02:00
llvm-mirror/lib/Target/XCore/XCoreRegisterInfo.cpp

331 lines
10 KiB
C++
Raw Normal View History

//===-- XCoreRegisterInfo.cpp - XCore Register Information ----------------===//
2008-11-07 11:59:00 +01:00
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the XCore implementation of the MRegisterInfo class.
//
//===----------------------------------------------------------------------===//
#include "XCoreRegisterInfo.h"
#include "XCore.h"
#include "XCoreInstrInfo.h"
#include "XCoreMachineFunctionInfo.h"
#include "XCoreSubtarget.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
2008-11-07 11:59:00 +01:00
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
2008-11-07 11:59:00 +01:00
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterScavenging.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Type.h"
2008-11-07 11:59:00 +01:00
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
2008-11-07 11:59:00 +01:00
using namespace llvm;
[Modules] Make Support/Debug.h modular. This requires it to not change behavior based on other files defining DEBUG_TYPE, which means it cannot define DEBUG_TYPE at all. This is actually better IMO as it forces folks to define relevant DEBUG_TYPEs for their files. However, it requires all files that currently use DEBUG(...) to define a DEBUG_TYPE if they don't already. I've updated all such files in LLVM and will do the same for other upstream projects. This still leaves one important change in how LLVM uses the DEBUG_TYPE macro going forward: we need to only define the macro *after* header files have been #include-ed. Previously, this wasn't possible because Debug.h required the macro to be pre-defined. This commit removes that. By defining DEBUG_TYPE after the includes two things are fixed: - Header files that need to provide a DEBUG_TYPE for some inline code can do so by defining the macro before their inline code and undef-ing it afterward so the macro does not escape. - We no longer have rampant ODR violations due to including headers with different DEBUG_TYPE definitions. This may be mostly an academic violation today, but with modules these types of violations are easy to check for and potentially very relevant. Where necessary to suppor headers with DEBUG_TYPE, I have moved the definitions below the includes in this commit. I plan to move the rest of the DEBUG_TYPE macros in LLVM in subsequent commits; this one is big enough. The comments in Debug.h, which were hilariously out of date already, have been updated to reflect the recommended practice going forward. llvm-svn: 206822
2014-04-22 00:55:11 +02:00
#define DEBUG_TYPE "xcore-reg-info"
#define GET_REGINFO_TARGET_DESC
#include "XCoreGenRegisterInfo.inc"
XCoreRegisterInfo::XCoreRegisterInfo()
: XCoreGenRegisterInfo(XCore::LR) {
2008-11-07 11:59:00 +01:00
}
// helper functions
static inline bool isImmUs(unsigned val) {
return val <= 11;
}
static inline bool isImmU6(unsigned val) {
return val < (1 << 6);
}
static inline bool isImmU16(unsigned val) {
return val < (1 << 16);
}
static void InsertFPImmInst(MachineBasicBlock::iterator II,
const XCoreInstrInfo &TII,
unsigned Reg, unsigned FrameReg, int Offset ) {
MachineInstr &MI = *II;
MachineBasicBlock &MBB = *MI.getParent();
DebugLoc dl = MI.getDebugLoc();
switch (MI.getOpcode()) {
case XCore::LDWFI:
BuildMI(MBB, II, dl, TII.get(XCore::LDW_2rus), Reg)
.addReg(FrameReg)
.addImm(Offset)
.addMemOperand(*MI.memoperands_begin());
break;
case XCore::STWFI:
BuildMI(MBB, II, dl, TII.get(XCore::STW_2rus))
.addReg(Reg, getKillRegState(MI.getOperand(0).isKill()))
.addReg(FrameReg)
.addImm(Offset)
.addMemOperand(*MI.memoperands_begin());
break;
case XCore::LDAWFI:
BuildMI(MBB, II, dl, TII.get(XCore::LDAWF_l2rus), Reg)
.addReg(FrameReg)
.addImm(Offset);
break;
default:
llvm_unreachable("Unexpected Opcode");
}
}
static void InsertFPConstInst(MachineBasicBlock::iterator II,
const XCoreInstrInfo &TII,
unsigned Reg, unsigned FrameReg,
int Offset, RegScavenger *RS ) {
assert(RS && "requiresRegisterScavenging failed");
MachineInstr &MI = *II;
MachineBasicBlock &MBB = *MI.getParent();
DebugLoc dl = MI.getDebugLoc();
unsigned ScratchOffset = RS->scavengeRegister(&XCore::GRRegsRegClass, II, 0);
RS->setRegUsed(ScratchOffset);
TII.loadImmediate(MBB, II, ScratchOffset, Offset);
switch (MI.getOpcode()) {
case XCore::LDWFI:
BuildMI(MBB, II, dl, TII.get(XCore::LDW_3r), Reg)
.addReg(FrameReg)
.addReg(ScratchOffset, RegState::Kill)
.addMemOperand(*MI.memoperands_begin());
break;
case XCore::STWFI:
BuildMI(MBB, II, dl, TII.get(XCore::STW_l3r))
.addReg(Reg, getKillRegState(MI.getOperand(0).isKill()))
.addReg(FrameReg)
.addReg(ScratchOffset, RegState::Kill)
.addMemOperand(*MI.memoperands_begin());
break;
case XCore::LDAWFI:
BuildMI(MBB, II, dl, TII.get(XCore::LDAWF_l3r), Reg)
.addReg(FrameReg)
.addReg(ScratchOffset, RegState::Kill);
break;
default:
llvm_unreachable("Unexpected Opcode");
}
}
static void InsertSPImmInst(MachineBasicBlock::iterator II,
const XCoreInstrInfo &TII,
unsigned Reg, int Offset) {
MachineInstr &MI = *II;
MachineBasicBlock &MBB = *MI.getParent();
DebugLoc dl = MI.getDebugLoc();
bool isU6 = isImmU6(Offset);
switch (MI.getOpcode()) {
int NewOpcode;
case XCore::LDWFI:
NewOpcode = (isU6) ? XCore::LDWSP_ru6 : XCore::LDWSP_lru6;
BuildMI(MBB, II, dl, TII.get(NewOpcode), Reg)
.addImm(Offset)
.addMemOperand(*MI.memoperands_begin());
break;
case XCore::STWFI:
NewOpcode = (isU6) ? XCore::STWSP_ru6 : XCore::STWSP_lru6;
BuildMI(MBB, II, dl, TII.get(NewOpcode))
.addReg(Reg, getKillRegState(MI.getOperand(0).isKill()))
.addImm(Offset)
.addMemOperand(*MI.memoperands_begin());
break;
case XCore::LDAWFI:
NewOpcode = (isU6) ? XCore::LDAWSP_ru6 : XCore::LDAWSP_lru6;
BuildMI(MBB, II, dl, TII.get(NewOpcode), Reg)
.addImm(Offset);
break;
default:
llvm_unreachable("Unexpected Opcode");
}
}
static void InsertSPConstInst(MachineBasicBlock::iterator II,
const XCoreInstrInfo &TII,
unsigned Reg, int Offset, RegScavenger *RS ) {
assert(RS && "requiresRegisterScavenging failed");
MachineInstr &MI = *II;
MachineBasicBlock &MBB = *MI.getParent();
DebugLoc dl = MI.getDebugLoc();
unsigned OpCode = MI.getOpcode();
unsigned ScratchBase;
if (OpCode==XCore::STWFI) {
ScratchBase = RS->scavengeRegister(&XCore::GRRegsRegClass, II, 0);
RS->setRegUsed(ScratchBase);
} else
ScratchBase = Reg;
BuildMI(MBB, II, dl, TII.get(XCore::LDAWSP_ru6), ScratchBase).addImm(0);
unsigned ScratchOffset = RS->scavengeRegister(&XCore::GRRegsRegClass, II, 0);
RS->setRegUsed(ScratchOffset);
TII.loadImmediate(MBB, II, ScratchOffset, Offset);
switch (OpCode) {
case XCore::LDWFI:
BuildMI(MBB, II, dl, TII.get(XCore::LDW_3r), Reg)
.addReg(ScratchBase, RegState::Kill)
.addReg(ScratchOffset, RegState::Kill)
.addMemOperand(*MI.memoperands_begin());
break;
case XCore::STWFI:
BuildMI(MBB, II, dl, TII.get(XCore::STW_l3r))
.addReg(Reg, getKillRegState(MI.getOperand(0).isKill()))
.addReg(ScratchBase, RegState::Kill)
.addReg(ScratchOffset, RegState::Kill)
.addMemOperand(*MI.memoperands_begin());
break;
case XCore::LDAWFI:
BuildMI(MBB, II, dl, TII.get(XCore::LDAWF_l3r), Reg)
.addReg(ScratchBase, RegState::Kill)
.addReg(ScratchOffset, RegState::Kill);
break;
default:
llvm_unreachable("Unexpected Opcode");
}
}
bool XCoreRegisterInfo::needsFrameMoves(const MachineFunction &MF) {
return MF.getMMI().hasDebugInfo() ||
MF.getFunction()->needsUnwindTableEntry();
2008-11-07 11:59:00 +01:00
}
const MCPhysReg* XCoreRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF)
2008-11-07 11:59:00 +01:00
const {
// The callee saved registers LR & FP are explicitly handled during
// emitPrologue & emitEpilogue and related functions.
static const MCPhysReg CalleeSavedRegs[] = {
2008-11-07 11:59:00 +01:00
XCore::R4, XCore::R5, XCore::R6, XCore::R7,
XCore::R8, XCore::R9, XCore::R10,
2008-11-07 11:59:00 +01:00
0
};
static const MCPhysReg CalleeSavedRegsFP[] = {
XCore::R4, XCore::R5, XCore::R6, XCore::R7,
XCore::R8, XCore::R9,
0
};
const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
if (TFI->hasFP(*MF))
return CalleeSavedRegsFP;
2008-11-07 11:59:00 +01:00
return CalleeSavedRegs;
}
BitVector XCoreRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
BitVector Reserved(getNumRegs());
const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
2008-11-07 11:59:00 +01:00
Reserved.set(XCore::CP);
Reserved.set(XCore::DP);
Reserved.set(XCore::SP);
Reserved.set(XCore::LR);
if (TFI->hasFP(MF)) {
2008-11-07 11:59:00 +01:00
Reserved.set(XCore::R10);
}
return Reserved;
}
bool
XCoreRegisterInfo::requiresRegisterScavenging(const MachineFunction &MF) const {
return true;
2008-11-07 11:59:00 +01:00
}
This patch fixes a problem which arose when using the Post-RA scheduler on X86 Atom. Some of our tests failed because the tail merging part of the BranchFolding pass was creating new basic blocks which did not contain live-in information. When the anti-dependency code in the Post-RA scheduler ran, it would sometimes rename the register containing the function return value because the fact that the return value was live-in to the subsequent block had been lost. To fix this, it is necessary to run the RegisterScavenging code in the BranchFolding pass. This patch makes sure that the register scavenging code is invoked in the X86 subtarget only when post-RA scheduling is being done. Post RA scheduling in the X86 subtarget is only done for Atom. This patch adds a new function to the TargetRegisterClass to control whether or not live-ins should be preserved during branch folding. This is necessary in order for the anti-dependency optimizations done during the PostRASchedulerList pass to work properly when doing Post-RA scheduling for the X86 in general and for the Intel Atom in particular. The patch adds and invokes the new function trackLivenessAfterRegAlloc() instead of using the existing requiresRegisterScavenging(). It changes BranchFolding.cpp to call trackLivenessAfterRegAlloc() instead of requiresRegisterScavenging(). It changes the all the targets that implemented requiresRegisterScavenging() to also implement trackLivenessAfterRegAlloc(). It adds an assertion in the Post RA scheduler to make sure that post RA liveness information is available when it is needed. It changes the X86 break-anti-dependencies test to use –mcpu=atom, in order to avoid running into the added assertion. Finally, this patch restores the use of anti-dependency checking (which was turned off temporarily for the 3.1 release) for Intel Atom in the Post RA scheduler. Patch by Andy Zhang! Thanks to Jakob and Anton for their reviews. llvm-svn: 155395
2012-04-23 23:39:35 +02:00
bool
XCoreRegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
return true;
This patch fixes a problem which arose when using the Post-RA scheduler on X86 Atom. Some of our tests failed because the tail merging part of the BranchFolding pass was creating new basic blocks which did not contain live-in information. When the anti-dependency code in the Post-RA scheduler ran, it would sometimes rename the register containing the function return value because the fact that the return value was live-in to the subsequent block had been lost. To fix this, it is necessary to run the RegisterScavenging code in the BranchFolding pass. This patch makes sure that the register scavenging code is invoked in the X86 subtarget only when post-RA scheduling is being done. Post RA scheduling in the X86 subtarget is only done for Atom. This patch adds a new function to the TargetRegisterClass to control whether or not live-ins should be preserved during branch folding. This is necessary in order for the anti-dependency optimizations done during the PostRASchedulerList pass to work properly when doing Post-RA scheduling for the X86 in general and for the Intel Atom in particular. The patch adds and invokes the new function trackLivenessAfterRegAlloc() instead of using the existing requiresRegisterScavenging(). It changes BranchFolding.cpp to call trackLivenessAfterRegAlloc() instead of requiresRegisterScavenging(). It changes the all the targets that implemented requiresRegisterScavenging() to also implement trackLivenessAfterRegAlloc(). It adds an assertion in the Post RA scheduler to make sure that post RA liveness information is available when it is needed. It changes the X86 break-anti-dependencies test to use –mcpu=atom, in order to avoid running into the added assertion. Finally, this patch restores the use of anti-dependency checking (which was turned off temporarily for the 3.1 release) for Intel Atom in the Post RA scheduler. Patch by Andy Zhang! Thanks to Jakob and Anton for their reviews. llvm-svn: 155395
2012-04-23 23:39:35 +02:00
}
bool
XCoreRegisterInfo::useFPForScavengingIndex(const MachineFunction &MF) const {
return false;
}
void
XCoreRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
int SPAdj, unsigned FIOperandNum,
RegScavenger *RS) const {
2008-11-07 11:59:00 +01:00
assert(SPAdj == 0 && "Unexpected");
MachineInstr &MI = *II;
MachineOperand &FrameOp = MI.getOperand(FIOperandNum);
2008-11-07 11:59:00 +01:00
int FrameIndex = FrameOp.getIndex();
MachineFunction &MF = *MI.getParent()->getParent();
const XCoreInstrInfo &TII =
*static_cast<const XCoreInstrInfo *>(MF.getSubtarget().getInstrInfo());
const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
2008-11-07 11:59:00 +01:00
int Offset = MF.getFrameInfo()->getObjectOffset(FrameIndex);
int StackSize = MF.getFrameInfo()->getStackSize();
#ifndef NDEBUG
DEBUG(errs() << "\nFunction : "
<< MF.getName() << "\n");
DEBUG(errs() << "<--------->\n");
DEBUG(MI.print(errs()));
DEBUG(errs() << "FrameIndex : " << FrameIndex << "\n");
DEBUG(errs() << "FrameOffset : " << Offset << "\n");
DEBUG(errs() << "StackSize : " << StackSize << "\n");
2008-11-07 11:59:00 +01:00
#endif
Offset += StackSize;
unsigned FrameReg = getFrameRegister(MF);
// Special handling of DBG_VALUE instructions.
if (MI.isDebugValue()) {
MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false /*isDef*/);
MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
return;
}
2008-11-07 11:59:00 +01:00
// fold constant into offset.
Offset += MI.getOperand(FIOperandNum + 1).getImm();
MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
2008-11-07 11:59:00 +01:00
assert(Offset%4 == 0 && "Misaligned stack offset");
DEBUG(errs() << "Offset : " << Offset << "\n" << "<--------->\n");
2008-11-07 11:59:00 +01:00
Offset/=4;
unsigned Reg = MI.getOperand(0).getReg();
assert(XCore::GRRegsRegClass.contains(Reg) && "Unexpected register operand");
if (TFI->hasFP(MF)) {
if (isImmUs(Offset))
InsertFPImmInst(II, TII, Reg, FrameReg, Offset);
else
InsertFPConstInst(II, TII, Reg, FrameReg, Offset, RS);
2008-11-07 11:59:00 +01:00
} else {
if (isImmU16(Offset))
InsertSPImmInst(II, TII, Reg, Offset);
else
InsertSPConstInst(II, TII, Reg, Offset, RS);
2008-11-07 11:59:00 +01:00
}
// Erase old instruction.
MachineBasicBlock &MBB = *MI.getParent();
MBB.erase(II);
2008-11-07 11:59:00 +01:00
}
unsigned XCoreRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
return TFI->hasFP(MF) ? XCore::R10 : XCore::SP;
2008-11-07 11:59:00 +01:00
}