1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-24 03:33:20 +01:00

Checkpoint Thumb2 Instr info work. Generalized base code so that it can be shared between ARM and Thumb2. Not yet activated because register information must be generalized first.

llvm-svn: 75010
This commit is contained in:
David Goodwin 2009-07-08 16:09:28 +00:00
parent f3d675f3dd
commit 5bdef4b3f7
12 changed files with 1417 additions and 1118 deletions

View File

@ -0,0 +1,816 @@
//===- ARMBaseInstrInfo.cpp - ARM Instruction Information -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the Base ARM implementation of the TargetInstrInfo class.
//
//===----------------------------------------------------------------------===//
#include "ARMBaseInstrInfo.h"
#include "ARM.h"
#include "ARMAddressingModes.h"
#include "ARMGenInstrInfo.inc"
#include "ARMMachineFunctionInfo.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/Target/TargetAsmInfo.h"
#include "llvm/Support/CommandLine.h"
using namespace llvm;
static cl::opt<bool>
EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden,
cl::desc("Enable ARM 2-addr to 3-addr conv"));
static inline
const MachineInstrBuilder &AddDefaultPred(const MachineInstrBuilder &MIB) {
return MIB.addImm((int64_t)ARMCC::AL).addReg(0);
}
static inline
const MachineInstrBuilder &AddDefaultCC(const MachineInstrBuilder &MIB) {
return MIB.addReg(0);
}
ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget &STI)
: TargetInstrInfoImpl(ARMInsts, array_lengthof(ARMInsts)) {
}
MachineInstr *
ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
MachineBasicBlock::iterator &MBBI,
LiveVariables *LV) const {
if (!EnableARM3Addr)
return NULL;
MachineInstr *MI = MBBI;
MachineFunction &MF = *MI->getParent()->getParent();
unsigned TSFlags = MI->getDesc().TSFlags;
bool isPre = false;
switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) {
default: return NULL;
case ARMII::IndexModePre:
isPre = true;
break;
case ARMII::IndexModePost:
break;
}
// Try splitting an indexed load/store to an un-indexed one plus an add/sub
// operation.
unsigned MemOpc = getUnindexedOpcode(MI->getOpcode());
if (MemOpc == 0)
return NULL;
MachineInstr *UpdateMI = NULL;
MachineInstr *MemMI = NULL;
unsigned AddrMode = (TSFlags & ARMII::AddrModeMask);
const TargetInstrDesc &TID = MI->getDesc();
unsigned NumOps = TID.getNumOperands();
bool isLoad = !TID.mayStore();
const MachineOperand &WB = isLoad ? MI->getOperand(1) : MI->getOperand(0);
const MachineOperand &Base = MI->getOperand(2);
const MachineOperand &Offset = MI->getOperand(NumOps-3);
unsigned WBReg = WB.getReg();
unsigned BaseReg = Base.getReg();
unsigned OffReg = Offset.getReg();
unsigned OffImm = MI->getOperand(NumOps-2).getImm();
ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI->getOperand(NumOps-1).getImm();
switch (AddrMode) {
default:
assert(false && "Unknown indexed op!");
return NULL;
case ARMII::AddrMode2: {
bool isSub = ARM_AM::getAM2Op(OffImm) == ARM_AM::sub;
unsigned Amt = ARM_AM::getAM2Offset(OffImm);
if (OffReg == 0) {
int SOImmVal = ARM_AM::getSOImmVal(Amt);
if (SOImmVal == -1)
// Can't encode it in a so_imm operand. This transformation will
// add more than 1 instruction. Abandon!
return NULL;
UpdateMI = BuildMI(MF, MI->getDebugLoc(),
get(isSub ? getOpcode(ARMII::SUBri) :
getOpcode(ARMII::ADDri)), WBReg)
.addReg(BaseReg).addImm(SOImmVal)
.addImm(Pred).addReg(0).addReg(0);
} else if (Amt != 0) {
ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm);
unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt);
UpdateMI = BuildMI(MF, MI->getDebugLoc(),
get(isSub ? getOpcode(ARMII::SUBrs) :
getOpcode(ARMII::ADDrs)), WBReg)
.addReg(BaseReg).addReg(OffReg).addReg(0).addImm(SOOpc)
.addImm(Pred).addReg(0).addReg(0);
} else
UpdateMI = BuildMI(MF, MI->getDebugLoc(),
get(isSub ? getOpcode(ARMII::SUBrr) :
getOpcode(ARMII::ADDrr)), WBReg)
.addReg(BaseReg).addReg(OffReg)
.addImm(Pred).addReg(0).addReg(0);
break;
}
case ARMII::AddrMode3 : {
bool isSub = ARM_AM::getAM3Op(OffImm) == ARM_AM::sub;
unsigned Amt = ARM_AM::getAM3Offset(OffImm);
if (OffReg == 0)
// Immediate is 8-bits. It's guaranteed to fit in a so_imm operand.
UpdateMI = BuildMI(MF, MI->getDebugLoc(),
get(isSub ? getOpcode(ARMII::SUBri) :
getOpcode(ARMII::ADDri)), WBReg)
.addReg(BaseReg).addImm(Amt)
.addImm(Pred).addReg(0).addReg(0);
else
UpdateMI = BuildMI(MF, MI->getDebugLoc(),
get(isSub ? getOpcode(ARMII::SUBrr) :
getOpcode(ARMII::ADDrr)), WBReg)
.addReg(BaseReg).addReg(OffReg)
.addImm(Pred).addReg(0).addReg(0);
break;
}
}
std::vector<MachineInstr*> NewMIs;
if (isPre) {
if (isLoad)
MemMI = BuildMI(MF, MI->getDebugLoc(),
get(MemOpc), MI->getOperand(0).getReg())
.addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
else
MemMI = BuildMI(MF, MI->getDebugLoc(),
get(MemOpc)).addReg(MI->getOperand(1).getReg())
.addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
NewMIs.push_back(MemMI);
NewMIs.push_back(UpdateMI);
} else {
if (isLoad)
MemMI = BuildMI(MF, MI->getDebugLoc(),
get(MemOpc), MI->getOperand(0).getReg())
.addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
else
MemMI = BuildMI(MF, MI->getDebugLoc(),
get(MemOpc)).addReg(MI->getOperand(1).getReg())
.addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
if (WB.isDead())
UpdateMI->getOperand(0).setIsDead();
NewMIs.push_back(UpdateMI);
NewMIs.push_back(MemMI);
}
// Transfer LiveVariables states, kill / dead info.
if (LV) {
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.getReg() &&
TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
unsigned Reg = MO.getReg();
LiveVariables::VarInfo &VI = LV->getVarInfo(Reg);
if (MO.isDef()) {
MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI;
if (MO.isDead())
LV->addVirtualRegisterDead(Reg, NewMI);
}
if (MO.isUse() && MO.isKill()) {
for (unsigned j = 0; j < 2; ++j) {
// Look at the two new MI's in reverse order.
MachineInstr *NewMI = NewMIs[j];
if (!NewMI->readsRegister(Reg))
continue;
LV->addVirtualRegisterKilled(Reg, NewMI);
if (VI.removeKill(MI))
VI.Kills.push_back(NewMI);
break;
}
}
}
}
}
MFI->insert(MBBI, NewMIs[1]);
MFI->insert(MBBI, NewMIs[0]);
return NewMIs[0];
}
// Branch analysis.
bool
ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const {
// If the block has no terminators, it just falls into the block after it.
MachineBasicBlock::iterator I = MBB.end();
if (I == MBB.begin() || !isUnpredicatedTerminator(--I))
return false;
// Get the last instruction in the block.
MachineInstr *LastInst = I;
// If there is only one terminator instruction, process it.
unsigned LastOpc = LastInst->getOpcode();
if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
if (LastOpc == getOpcode(ARMII::B)) {
TBB = LastInst->getOperand(0).getMBB();
return false;
}
if (LastOpc == getOpcode(ARMII::Bcc)) {
// Block ends with fall-through condbranch.
TBB = LastInst->getOperand(0).getMBB();
Cond.push_back(LastInst->getOperand(1));
Cond.push_back(LastInst->getOperand(2));
return false;
}
return true; // Can't handle indirect branch.
}
// Get the instruction before it if it is a terminator.
MachineInstr *SecondLastInst = I;
// If there are three terminators, we don't know what sort of block this is.
if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
return true;
// If the block ends with ARMII::B and a ARMII::Bcc, handle it.
unsigned SecondLastOpc = SecondLastInst->getOpcode();
if ((SecondLastOpc == getOpcode(ARMII::Bcc)) &&
(LastOpc == getOpcode(ARMII::B))) {
TBB = SecondLastInst->getOperand(0).getMBB();
Cond.push_back(SecondLastInst->getOperand(1));
Cond.push_back(SecondLastInst->getOperand(2));
FBB = LastInst->getOperand(0).getMBB();
return false;
}
// If the block ends with two unconditional branches, handle it. The second
// one is not executed, so remove it.
if ((SecondLastOpc == getOpcode(ARMII::B)) &&
(LastOpc == getOpcode(ARMII::B))) {
TBB = SecondLastInst->getOperand(0).getMBB();
I = LastInst;
if (AllowModify)
I->eraseFromParent();
return false;
}
// ...likewise if it ends with a branch table followed by an unconditional
// branch. The branch folder can create these, and we must get rid of them for
// correctness of Thumb constant islands.
if (((SecondLastOpc == getOpcode(ARMII::BR_JTr)) ||
(SecondLastOpc == getOpcode(ARMII::BR_JTm)) ||
(SecondLastOpc == getOpcode(ARMII::BR_JTadd))) &&
(LastOpc == getOpcode(ARMII::B))) {
I = LastInst;
if (AllowModify)
I->eraseFromParent();
return true;
}
// Otherwise, can't handle this.
return true;
}
unsigned ARMBaseInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
int BOpc = getOpcode(ARMII::B);
int BccOpc = getOpcode(ARMII::Bcc);
MachineBasicBlock::iterator I = MBB.end();
if (I == MBB.begin()) return 0;
--I;
if (I->getOpcode() != BOpc && I->getOpcode() != BccOpc)
return 0;
// Remove the branch.
I->eraseFromParent();
I = MBB.end();
if (I == MBB.begin()) return 1;
--I;
if (I->getOpcode() != BccOpc)
return 1;
// Remove the branch.
I->eraseFromParent();
return 2;
}
unsigned
ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
const SmallVectorImpl<MachineOperand> &Cond) const {
// FIXME this should probably have a DebugLoc argument
DebugLoc dl = DebugLoc::getUnknownLoc();
int BOpc = getOpcode(ARMII::B);
int BccOpc = getOpcode(ARMII::Bcc);
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 2 || Cond.size() == 0) &&
"ARM branch conditions have two components!");
if (FBB == 0) {
if (Cond.empty()) // Unconditional branch?
BuildMI(&MBB, dl, get(BOpc)).addMBB(TBB);
else
BuildMI(&MBB, dl, get(BccOpc)).addMBB(TBB)
.addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
return 1;
}
// Two-way conditional branch.
BuildMI(&MBB, dl, get(BccOpc)).addMBB(TBB)
.addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
BuildMI(&MBB, dl, get(BOpc)).addMBB(FBB);
return 2;
}
bool ARMBaseInstrInfo::
ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm();
Cond[0].setImm(ARMCC::getOppositeCondition(CC));
return false;
}
bool ARMBaseInstrInfo::isPredicated(const MachineInstr *MI) const {
int PIdx = MI->findFirstPredOperandIdx();
return PIdx != -1 && MI->getOperand(PIdx).getImm() != ARMCC::AL;
}
bool ARMBaseInstrInfo::
PredicateInstruction(MachineInstr *MI,
const SmallVectorImpl<MachineOperand> &Pred) const {
unsigned Opc = MI->getOpcode();
if (Opc == getOpcode(ARMII::B)) {
MI->setDesc(get(getOpcode(ARMII::Bcc)));
MI->addOperand(MachineOperand::CreateImm(Pred[0].getImm()));
MI->addOperand(MachineOperand::CreateReg(Pred[1].getReg(), false));
return true;
}
int PIdx = MI->findFirstPredOperandIdx();
if (PIdx != -1) {
MachineOperand &PMO = MI->getOperand(PIdx);
PMO.setImm(Pred[0].getImm());
MI->getOperand(PIdx+1).setReg(Pred[1].getReg());
return true;
}
return false;
}
bool ARMBaseInstrInfo::
SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
const SmallVectorImpl<MachineOperand> &Pred2) const {
if (Pred1.size() > 2 || Pred2.size() > 2)
return false;
ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm();
ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm();
if (CC1 == CC2)
return true;
switch (CC1) {
default:
return false;
case ARMCC::AL:
return true;
case ARMCC::HS:
return CC2 == ARMCC::HI;
case ARMCC::LS:
return CC2 == ARMCC::LO || CC2 == ARMCC::EQ;
case ARMCC::GE:
return CC2 == ARMCC::GT;
case ARMCC::LE:
return CC2 == ARMCC::LT;
}
}
bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI,
std::vector<MachineOperand> &Pred) const {
const TargetInstrDesc &TID = MI->getDesc();
if (!TID.getImplicitDefs() && !TID.hasOptionalDef())
return false;
bool Found = false;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.getReg() == ARM::CPSR) {
Pred.push_back(MO);
Found = true;
}
}
return Found;
}
/// FIXME: Works around a gcc miscompilation with -fstrict-aliasing
static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
unsigned JTI) DISABLE_INLINE;
static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
unsigned JTI) {
return JT[JTI].MBBs.size();
}
/// GetInstSize - Return the size of the specified MachineInstr.
///
unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
const MachineBasicBlock &MBB = *MI->getParent();
const MachineFunction *MF = MBB.getParent();
const TargetAsmInfo *TAI = MF->getTarget().getTargetAsmInfo();
// Basic size info comes from the TSFlags field.
const TargetInstrDesc &TID = MI->getDesc();
unsigned TSFlags = TID.TSFlags;
switch ((TSFlags & ARMII::SizeMask) >> ARMII::SizeShift) {
default: {
// If this machine instr is an inline asm, measure it.
if (MI->getOpcode() == ARM::INLINEASM)
return TAI->getInlineAsmLength(MI->getOperand(0).getSymbolName());
if (MI->isLabel())
return 0;
switch (MI->getOpcode()) {
default:
assert(0 && "Unknown or unset size field for instr!");
break;
case TargetInstrInfo::IMPLICIT_DEF:
case TargetInstrInfo::DECLARE:
case TargetInstrInfo::DBG_LABEL:
case TargetInstrInfo::EH_LABEL:
return 0;
}
break;
}
case ARMII::Size8Bytes: return 8; // Arm instruction x 2.
case ARMII::Size4Bytes: return 4; // Arm instruction.
case ARMII::Size2Bytes: return 2; // Thumb instruction.
case ARMII::SizeSpecial: {
switch (MI->getOpcode()) {
case ARM::CONSTPOOL_ENTRY:
// If this machine instr is a constant pool entry, its size is recorded as
// operand #2.
return MI->getOperand(2).getImm();
case ARM::Int_eh_sjlj_setjmp: return 12;
case ARM::BR_JTr:
case ARM::BR_JTm:
case ARM::BR_JTadd:
case ARM::t2BR_JTr:
case ARM::t2BR_JTm:
case ARM::t2BR_JTadd:
case ARM::tBR_JTr: {
// These are jumptable branches, i.e. a branch followed by an inlined
// jumptable. The size is 4 + 4 * number of entries.
unsigned NumOps = TID.getNumOperands();
MachineOperand JTOP =
MI->getOperand(NumOps - (TID.isPredicable() ? 3 : 2));
unsigned JTI = JTOP.getIndex();
const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
assert(JTI < JT.size());
// Thumb instructions are 2 byte aligned, but JT entries are 4 byte
// 4 aligned. The assembler / linker may add 2 byte padding just before
// the JT entries. The size does not include this padding; the
// constant islands pass does separate bookkeeping for it.
// FIXME: If we know the size of the function is less than (1 << 16) *2
// bytes, we can use 16-bit entries instead. Then there won't be an
// alignment issue.
return getNumJTEntries(JT, JTI) * 4 +
((MI->getOpcode()==ARM::tBR_JTr) ? 2 : 4);
}
default:
// Otherwise, pseudo-instruction sizes are zero.
return 0;
}
}
}
return 0; // Not reached
}
/// Return true if the instruction is a register to register move and
/// leave the source and dest operands in the passed parameters.
///
bool
ARMBaseInstrInfo::isMoveInstr(const MachineInstr &MI,
unsigned &SrcReg, unsigned &DstReg,
unsigned& SrcSubIdx, unsigned& DstSubIdx) const {
SrcSubIdx = DstSubIdx = 0; // No sub-registers.
unsigned oc = MI.getOpcode();
if ((oc == getOpcode(ARMII::FCPYS)) ||
(oc == getOpcode(ARMII::FCPYD)) ||
(oc == getOpcode(ARMII::VMOVD)) ||
(oc == getOpcode(ARMII::VMOVQ))) {
SrcReg = MI.getOperand(1).getReg();
DstReg = MI.getOperand(0).getReg();
return true;
}
else if (oc == getOpcode(ARMII::MOVr)) {
assert(MI.getDesc().getNumOperands() >= 2 &&
MI.getOperand(0).isReg() &&
MI.getOperand(1).isReg() &&
"Invalid ARM MOV instruction");
SrcReg = MI.getOperand(1).getReg();
DstReg = MI.getOperand(0).getReg();
return true;
}
return false;
}
unsigned
ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
int &FrameIndex) const {
unsigned oc = MI->getOpcode();
if (oc == getOpcode(ARMII::LDR)) {
if (MI->getOperand(1).isFI() &&
MI->getOperand(2).isReg() &&
MI->getOperand(3).isImm() &&
MI->getOperand(2).getReg() == 0 &&
MI->getOperand(3).getImm() == 0) {
FrameIndex = MI->getOperand(1).getIndex();
return MI->getOperand(0).getReg();
}
}
else if ((oc == getOpcode(ARMII::FLDD)) ||
(oc == getOpcode(ARMII::FLDS))) {
if (MI->getOperand(1).isFI() &&
MI->getOperand(2).isImm() &&
MI->getOperand(2).getImm() == 0) {
FrameIndex = MI->getOperand(1).getIndex();
return MI->getOperand(0).getReg();
}
}
return 0;
}
unsigned
ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
int &FrameIndex) const {
unsigned oc = MI->getOpcode();
if (oc == getOpcode(ARMII::STR)) {
if (MI->getOperand(1).isFI() &&
MI->getOperand(2).isReg() &&
MI->getOperand(3).isImm() &&
MI->getOperand(2).getReg() == 0 &&
MI->getOperand(3).getImm() == 0) {
FrameIndex = MI->getOperand(1).getIndex();
return MI->getOperand(0).getReg();
}
}
else if ((oc == getOpcode(ARMII::FSTD)) ||
(oc == getOpcode(ARMII::FSTS))) {
if (MI->getOperand(1).isFI() &&
MI->getOperand(2).isImm() &&
MI->getOperand(2).getImm() == 0) {
FrameIndex = MI->getOperand(1).getIndex();
return MI->getOperand(0).getReg();
}
}
return 0;
}
bool
ARMBaseInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
unsigned DestReg, unsigned SrcReg,
const TargetRegisterClass *DestRC,
const TargetRegisterClass *SrcRC) const {
DebugLoc DL = DebugLoc::getUnknownLoc();
if (I != MBB.end()) DL = I->getDebugLoc();
if (DestRC != SrcRC) {
// Not yet supported!
return false;
}
if (DestRC == ARM::GPRRegisterClass)
AddDefaultCC(AddDefaultPred(BuildMI(MBB, I, DL, get(getOpcode(ARMII::MOVr)), DestReg)
.addReg(SrcReg)));
else if (DestRC == ARM::SPRRegisterClass)
AddDefaultPred(BuildMI(MBB, I, DL, get(getOpcode(ARMII::FCPYS)), DestReg)
.addReg(SrcReg));
else if (DestRC == ARM::DPRRegisterClass)
AddDefaultPred(BuildMI(MBB, I, DL, get(getOpcode(ARMII::FCPYD)), DestReg)
.addReg(SrcReg));
else if (DestRC == ARM::QPRRegisterClass)
BuildMI(MBB, I, DL, get(getOpcode(ARMII::VMOVQ)), DestReg).addReg(SrcReg);
else
return false;
return true;
}
void ARMBaseInstrInfo::
storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
unsigned SrcReg, bool isKill, int FI,
const TargetRegisterClass *RC) const {
DebugLoc DL = DebugLoc::getUnknownLoc();
if (I != MBB.end()) DL = I->getDebugLoc();
if (RC == ARM::GPRRegisterClass) {
AddDefaultPred(BuildMI(MBB, I, DL, get(getOpcode(ARMII::STR)))
.addReg(SrcReg, getKillRegState(isKill))
.addFrameIndex(FI).addReg(0).addImm(0));
} else if (RC == ARM::DPRRegisterClass) {
AddDefaultPred(BuildMI(MBB, I, DL, get(getOpcode(ARMII::FSTD)))
.addReg(SrcReg, getKillRegState(isKill))
.addFrameIndex(FI).addImm(0));
} else {
assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
AddDefaultPred(BuildMI(MBB, I, DL, get(getOpcode(ARMII::FSTS)))
.addReg(SrcReg, getKillRegState(isKill))
.addFrameIndex(FI).addImm(0));
}
}
void
ARMBaseInstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
bool isKill,
SmallVectorImpl<MachineOperand> &Addr,
const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs) const{
DebugLoc DL = DebugLoc::getUnknownLoc();
unsigned Opc = 0;
if (RC == ARM::GPRRegisterClass) {
Opc = getOpcode(ARMII::STR);
} else if (RC == ARM::DPRRegisterClass) {
Opc = getOpcode(ARMII::FSTD);
} else {
assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
Opc = getOpcode(ARMII::FSTS);
}
MachineInstrBuilder MIB =
BuildMI(MF, DL, get(Opc)).addReg(SrcReg, getKillRegState(isKill));
for (unsigned i = 0, e = Addr.size(); i != e; ++i)
MIB.addOperand(Addr[i]);
AddDefaultPred(MIB);
NewMIs.push_back(MIB);
return;
}
void ARMBaseInstrInfo::
loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
unsigned DestReg, int FI,
const TargetRegisterClass *RC) const {
DebugLoc DL = DebugLoc::getUnknownLoc();
if (I != MBB.end()) DL = I->getDebugLoc();
if (RC == ARM::GPRRegisterClass) {
AddDefaultPred(BuildMI(MBB, I, DL, get(getOpcode(ARMII::LDR)), DestReg)
.addFrameIndex(FI).addReg(0).addImm(0));
} else if (RC == ARM::DPRRegisterClass) {
AddDefaultPred(BuildMI(MBB, I, DL, get(getOpcode(ARMII::FLDD)), DestReg)
.addFrameIndex(FI).addImm(0));
} else {
assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
AddDefaultPred(BuildMI(MBB, I, DL, get(getOpcode(ARMII::FLDS)), DestReg)
.addFrameIndex(FI).addImm(0));
}
}
void ARMBaseInstrInfo::
loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
SmallVectorImpl<MachineOperand> &Addr,
const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs) const {
DebugLoc DL = DebugLoc::getUnknownLoc();
unsigned Opc = 0;
if (RC == ARM::GPRRegisterClass) {
Opc = getOpcode(ARMII::LDR);
} else if (RC == ARM::DPRRegisterClass) {
Opc = getOpcode(ARMII::FLDD);
} else {
assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
Opc = getOpcode(ARMII::FLDS);
}
MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), DestReg);
for (unsigned i = 0, e = Addr.size(); i != e; ++i)
MIB.addOperand(Addr[i]);
AddDefaultPred(MIB);
NewMIs.push_back(MIB);
return;
}
MachineInstr *ARMBaseInstrInfo::
foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
const SmallVectorImpl<unsigned> &Ops, int FI) const {
if (Ops.size() != 1) return NULL;
unsigned OpNum = Ops[0];
unsigned Opc = MI->getOpcode();
MachineInstr *NewMI = NULL;
if (Opc == getOpcode(ARMII::MOVr)) {
// If it is updating CPSR, then it cannot be folded.
if (MI->getOperand(4).getReg() != ARM::CPSR) {
unsigned Pred = MI->getOperand(2).getImm();
unsigned PredReg = MI->getOperand(3).getReg();
if (OpNum == 0) { // move -> store
unsigned SrcReg = MI->getOperand(1).getReg();
bool isKill = MI->getOperand(1).isKill();
bool isUndef = MI->getOperand(1).isUndef();
NewMI = BuildMI(MF, MI->getDebugLoc(), get(getOpcode(ARMII::STR)))
.addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
.addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
} else { // move -> load
unsigned DstReg = MI->getOperand(0).getReg();
bool isDead = MI->getOperand(0).isDead();
bool isUndef = MI->getOperand(0).isUndef();
NewMI = BuildMI(MF, MI->getDebugLoc(), get(getOpcode(ARMII::LDR)))
.addReg(DstReg,
RegState::Define |
getDeadRegState(isDead) |
getUndefRegState(isUndef))
.addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
}
}
}
else if (Opc == getOpcode(ARMII::FCPYS)) {
unsigned Pred = MI->getOperand(2).getImm();
unsigned PredReg = MI->getOperand(3).getReg();
if (OpNum == 0) { // move -> store
unsigned SrcReg = MI->getOperand(1).getReg();
bool isKill = MI->getOperand(1).isKill();
bool isUndef = MI->getOperand(1).isUndef();
NewMI = BuildMI(MF, MI->getDebugLoc(), get(getOpcode(ARMII::FSTS)))
.addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
.addFrameIndex(FI)
.addImm(0).addImm(Pred).addReg(PredReg);
} else { // move -> load
unsigned DstReg = MI->getOperand(0).getReg();
bool isDead = MI->getOperand(0).isDead();
bool isUndef = MI->getOperand(0).isUndef();
NewMI = BuildMI(MF, MI->getDebugLoc(), get(getOpcode(ARMII::FLDS)))
.addReg(DstReg,
RegState::Define |
getDeadRegState(isDead) |
getUndefRegState(isUndef))
.addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
}
}
else if (Opc == getOpcode(ARMII::FCPYD)) {
unsigned Pred = MI->getOperand(2).getImm();
unsigned PredReg = MI->getOperand(3).getReg();
if (OpNum == 0) { // move -> store
unsigned SrcReg = MI->getOperand(1).getReg();
bool isKill = MI->getOperand(1).isKill();
bool isUndef = MI->getOperand(1).isUndef();
NewMI = BuildMI(MF, MI->getDebugLoc(), get(getOpcode(ARMII::FSTD)))
.addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
.addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
} else { // move -> load
unsigned DstReg = MI->getOperand(0).getReg();
bool isDead = MI->getOperand(0).isDead();
bool isUndef = MI->getOperand(0).isUndef();
NewMI = BuildMI(MF, MI->getDebugLoc(), get(getOpcode(ARMII::FLDD)))
.addReg(DstReg,
RegState::Define |
getDeadRegState(isDead) |
getUndefRegState(isUndef))
.addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
}
}
return NewMI;
}
MachineInstr*
ARMBaseInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr* MI,
const SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}
bool
ARMBaseInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
const SmallVectorImpl<unsigned> &Ops) const {
if (Ops.size() != 1) return false;
unsigned Opc = MI->getOpcode();
if (Opc == getOpcode(ARMII::MOVr)) {
// If it is updating CPSR, then it cannot be folded.
return MI->getOperand(4).getReg() != ARM::CPSR;
}
else if ((Opc == getOpcode(ARMII::FCPYS)) ||
(Opc == getOpcode(ARMII::FCPYD))) {
return true;
}
else if ((Opc == getOpcode(ARMII::VMOVD)) ||
(Opc == getOpcode(ARMII::VMOVQ))) {
return false; // FIXME
}
return false;
}

View File

@ -0,0 +1,298 @@
//===- ARMBaseInstrInfo.h - ARM Base Instruction Information -------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the Base ARM implementation of the TargetInstrInfo class.
//
//===----------------------------------------------------------------------===//
#ifndef ARMBASEINSTRUCTIONINFO_H
#define ARMBASEINSTRUCTIONINFO_H
#include "llvm/Target/TargetInstrInfo.h"
#include "ARMRegisterInfo.h"
#include "ARM.h"
namespace llvm {
class ARMSubtarget;
/// ARMII - This namespace holds all of the target specific flags that
/// instruction info tracks.
///
namespace ARMII {
enum {
//===------------------------------------------------------------------===//
// Instruction Flags.
//===------------------------------------------------------------------===//
// This four-bit field describes the addressing mode used.
AddrModeMask = 0xf,
AddrModeNone = 0,
AddrMode1 = 1,
AddrMode2 = 2,
AddrMode3 = 3,
AddrMode4 = 4,
AddrMode5 = 5,
AddrMode6 = 6,
AddrModeT1_1 = 7,
AddrModeT1_2 = 8,
AddrModeT1_4 = 9,
AddrModeT1_s = 10, // i8 * 4 for pc and sp relative data
AddrModeT2_i12 = 11,
AddrModeT2_i8 = 12,
AddrModeT2_so = 13,
AddrModeT2_pc = 14, // +/- i12 for pc relative data
AddrModeT2_i8s4 = 15, // i8 * 4
// Size* - Flags to keep track of the size of an instruction.
SizeShift = 4,
SizeMask = 7 << SizeShift,
SizeSpecial = 1, // 0 byte pseudo or special case.
Size8Bytes = 2,
Size4Bytes = 3,
Size2Bytes = 4,
// IndexMode - Unindex, pre-indexed, or post-indexed. Only valid for load
// and store ops
IndexModeShift = 7,
IndexModeMask = 3 << IndexModeShift,
IndexModePre = 1,
IndexModePost = 2,
//===------------------------------------------------------------------===//
// Instruction encoding formats.
//
FormShift = 9,
FormMask = 0x3f << FormShift,
// Pseudo instructions
Pseudo = 0 << FormShift,
// Multiply instructions
MulFrm = 1 << FormShift,
// Branch instructions
BrFrm = 2 << FormShift,
BrMiscFrm = 3 << FormShift,
// Data Processing instructions
DPFrm = 4 << FormShift,
DPSoRegFrm = 5 << FormShift,
// Load and Store
LdFrm = 6 << FormShift,
StFrm = 7 << FormShift,
LdMiscFrm = 8 << FormShift,
StMiscFrm = 9 << FormShift,
LdStMulFrm = 10 << FormShift,
// Miscellaneous arithmetic instructions
ArithMiscFrm = 11 << FormShift,
// Extend instructions
ExtFrm = 12 << FormShift,
// VFP formats
VFPUnaryFrm = 13 << FormShift,
VFPBinaryFrm = 14 << FormShift,
VFPConv1Frm = 15 << FormShift,
VFPConv2Frm = 16 << FormShift,
VFPConv3Frm = 17 << FormShift,
VFPConv4Frm = 18 << FormShift,
VFPConv5Frm = 19 << FormShift,
VFPLdStFrm = 20 << FormShift,
VFPLdStMulFrm = 21 << FormShift,
VFPMiscFrm = 22 << FormShift,
// Thumb format
ThumbFrm = 23 << FormShift,
// NEON format
NEONFrm = 24 << FormShift,
NEONGetLnFrm = 25 << FormShift,
NEONSetLnFrm = 26 << FormShift,
NEONDupFrm = 27 << FormShift,
//===------------------------------------------------------------------===//
// Misc flags.
// UnaryDP - Indicates this is a unary data processing instruction, i.e.
// it doesn't have a Rn operand.
UnaryDP = 1 << 15,
// Xform16Bit - Indicates this Thumb2 instruction may be transformed into
// a 16-bit Thumb instruction if certain conditions are met.
Xform16Bit = 1 << 16,
//===------------------------------------------------------------------===//
// Field shifts - such shifts are used to set field while generating
// machine instructions.
M_BitShift = 5,
ShiftImmShift = 5,
ShiftShift = 7,
N_BitShift = 7,
ImmHiShift = 8,
SoRotImmShift = 8,
RegRsShift = 8,
ExtRotImmShift = 10,
RegRdLoShift = 12,
RegRdShift = 12,
RegRdHiShift = 16,
RegRnShift = 16,
S_BitShift = 20,
W_BitShift = 21,
AM3_I_BitShift = 22,
D_BitShift = 22,
U_BitShift = 23,
P_BitShift = 24,
I_BitShift = 25,
CondShift = 28
};
/// ARMII::Op - Holds all of the instruction types required by
/// target specific instruction and register code. ARMBaseInstrInfo
/// and subclasses should return a specific opcode that implements
/// the instruction type.
///
enum Op {
ADDri,
ADDrs,
ADDrr,
B,
Bcc,
BR_JTr,
BR_JTm,
BR_JTadd,
FCPYS,
FCPYD,
FLDD,
FLDS,
FSTD,
FSTS,
LDR,
MOVr,
STR,
SUBri,
SUBrs,
SUBrr,
VMOVD,
VMOVQ
};
}
class ARMBaseInstrInfo : public TargetInstrInfoImpl {
protected:
// Can be only subclassed.
explicit ARMBaseInstrInfo(const ARMSubtarget &STI);
public:
// Return the non-pre/post incrementing version of 'Opc'. Return 0
// if there is not such an opcode.
virtual unsigned getUnindexedOpcode(unsigned Opc) const =0;
// Return the opcode that implements 'Op', or 0 if no opcode
virtual unsigned getOpcode(ARMII::Op Op) const =0;
// Return true if the block does not fall through.
virtual bool BlockHasNoFallThrough(const MachineBasicBlock &MBB) const =0;
virtual MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI,
MachineBasicBlock::iterator &MBBI,
LiveVariables *LV) const;
virtual const ARMBaseRegisterInfo &getRegisterInfo() const =0;
// Branch analysis.
virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const;
virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
const SmallVectorImpl<MachineOperand> &Cond) const;
virtual
bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
// Predication support.
virtual bool isPredicated(const MachineInstr *MI) const;
ARMCC::CondCodes getPredicate(const MachineInstr *MI) const {
int PIdx = MI->findFirstPredOperandIdx();
return PIdx != -1 ? (ARMCC::CondCodes)MI->getOperand(PIdx).getImm()
: ARMCC::AL;
}
virtual
bool PredicateInstruction(MachineInstr *MI,
const SmallVectorImpl<MachineOperand> &Pred) const;
virtual
bool SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
const SmallVectorImpl<MachineOperand> &Pred2) const;
virtual bool DefinesPredicate(MachineInstr *MI,
std::vector<MachineOperand> &Pred) const;
/// GetInstSize - Returns the size of the specified MachineInstr.
///
virtual unsigned GetInstSizeInBytes(const MachineInstr* MI) const;
/// Return true if the instruction is a register to register move and return
/// the source and dest operands and their sub-register indices by reference.
virtual bool isMoveInstr(const MachineInstr &MI,
unsigned &SrcReg, unsigned &DstReg,
unsigned &SrcSubIdx, unsigned &DstSubIdx) const;
virtual unsigned isLoadFromStackSlot(const MachineInstr *MI,
int &FrameIndex) const;
virtual unsigned isStoreToStackSlot(const MachineInstr *MI,
int &FrameIndex) const;
virtual bool copyRegToReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
unsigned DestReg, unsigned SrcReg,
const TargetRegisterClass *DestRC,
const TargetRegisterClass *SrcRC) const;
virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned SrcReg, bool isKill, int FrameIndex,
const TargetRegisterClass *RC) const;
virtual void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
SmallVectorImpl<MachineOperand> &Addr,
const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs) const;
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned DestReg, int FrameIndex,
const TargetRegisterClass *RC) const;
virtual void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
SmallVectorImpl<MachineOperand> &Addr,
const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs) const;
virtual bool canFoldMemoryOperand(const MachineInstr *MI,
const SmallVectorImpl<unsigned> &Ops) const;
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr* MI,
const SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr* MI,
const SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const;
};
}
#endif

View File

@ -25,48 +25,12 @@
#include "llvm/Support/CommandLine.h"
using namespace llvm;
static cl::opt<bool>
EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden,
cl::desc("Enable ARM 2-addr to 3-addr conv"));
static inline
const MachineInstrBuilder &AddDefaultPred(const MachineInstrBuilder &MIB) {
return MIB.addImm((int64_t)ARMCC::AL).addReg(0);
}
static inline
const MachineInstrBuilder &AddDefaultCC(const MachineInstrBuilder &MIB) {
return MIB.addReg(0);
}
ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget &STI)
: TargetInstrInfoImpl(ARMInsts, array_lengthof(ARMInsts)) {
}
ARMInstrInfo::ARMInstrInfo(const ARMSubtarget &STI)
: ARMBaseInstrInfo(STI), RI(*this, STI) {
}
void ARMInstrInfo::reMaterialize(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
unsigned DestReg,
const MachineInstr *Orig) const {
DebugLoc dl = Orig->getDebugLoc();
if (Orig->getOpcode() == ARM::MOVi2pieces) {
RI.emitLoadConstPool(MBB, I, this, dl,
DestReg,
Orig->getOperand(1).getImm(),
(ARMCC::CondCodes)Orig->getOperand(2).getImm(),
Orig->getOperand(3).getReg());
return;
}
MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
MI->getOperand(0).setReg(DestReg);
MBB.insert(I, MI);
}
static unsigned getUnindexedOpcode(unsigned Opc) {
unsigned ARMInstrInfo::
getUnindexedOpcode(unsigned Opc) const {
switch (Opc) {
default: break;
case ARM::LDR_PRE:
@ -94,820 +58,77 @@ static unsigned getUnindexedOpcode(unsigned Opc) {
case ARM::STRB_POST:
return ARM::STRB;
}
return 0;
}
MachineInstr *
ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
MachineBasicBlock::iterator &MBBI,
LiveVariables *LV) const {
if (!EnableARM3Addr)
return NULL;
MachineInstr *MI = MBBI;
MachineFunction &MF = *MI->getParent()->getParent();
unsigned TSFlags = MI->getDesc().TSFlags;
bool isPre = false;
switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) {
default: return NULL;
case ARMII::IndexModePre:
isPre = true;
break;
case ARMII::IndexModePost:
break;
}
// Try splitting an indexed load/store to an un-indexed one plus an add/sub
// operation.
unsigned MemOpc = getUnindexedOpcode(MI->getOpcode());
if (MemOpc == 0)
return NULL;
MachineInstr *UpdateMI = NULL;
MachineInstr *MemMI = NULL;
unsigned AddrMode = (TSFlags & ARMII::AddrModeMask);
const TargetInstrDesc &TID = MI->getDesc();
unsigned NumOps = TID.getNumOperands();
bool isLoad = !TID.mayStore();
const MachineOperand &WB = isLoad ? MI->getOperand(1) : MI->getOperand(0);
const MachineOperand &Base = MI->getOperand(2);
const MachineOperand &Offset = MI->getOperand(NumOps-3);
unsigned WBReg = WB.getReg();
unsigned BaseReg = Base.getReg();
unsigned OffReg = Offset.getReg();
unsigned OffImm = MI->getOperand(NumOps-2).getImm();
ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI->getOperand(NumOps-1).getImm();
switch (AddrMode) {
unsigned ARMInstrInfo::
getOpcode(ARMII::Op Op) const {
switch (Op) {
case ARMII::ADDri: return ARM::ADDri;
case ARMII::ADDrs: return ARM::ADDrs;
case ARMII::ADDrr: return ARM::ADDrr;
case ARMII::B: return ARM::B;
case ARMII::Bcc: return ARM::Bcc;
case ARMII::BR_JTr: return ARM::BR_JTr;
case ARMII::BR_JTm: return ARM::BR_JTm;
case ARMII::BR_JTadd: return ARM::BR_JTadd;
case ARMII::FCPYS: return ARM::FCPYS;
case ARMII::FCPYD: return ARM::FCPYD;
case ARMII::FLDD: return ARM::FLDD;
case ARMII::FLDS: return ARM::FLDS;
case ARMII::FSTD: return ARM::FSTD;
case ARMII::FSTS: return ARM::FSTS;
case ARMII::LDR: return ARM::LDR;
case ARMII::MOVr: return ARM::MOVr;
case ARMII::STR: return ARM::STR;
case ARMII::SUBri: return ARM::SUBri;
case ARMII::SUBrs: return ARM::SUBrs;
case ARMII::SUBrr: return ARM::SUBrr;
case ARMII::VMOVD: return ARM::VMOVD;
case ARMII::VMOVQ: return ARM::VMOVQ;
default:
assert(false && "Unknown indexed op!");
return NULL;
case ARMII::AddrMode2: {
bool isSub = ARM_AM::getAM2Op(OffImm) == ARM_AM::sub;
unsigned Amt = ARM_AM::getAM2Offset(OffImm);
if (OffReg == 0) {
int SOImmVal = ARM_AM::getSOImmVal(Amt);
if (SOImmVal == -1)
// Can't encode it in a so_imm operand. This transformation will
// add more than 1 instruction. Abandon!
return NULL;
UpdateMI = BuildMI(MF, MI->getDebugLoc(),
get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
.addReg(BaseReg).addImm(SOImmVal)
.addImm(Pred).addReg(0).addReg(0);
} else if (Amt != 0) {
ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm);
unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt);
UpdateMI = BuildMI(MF, MI->getDebugLoc(),
get(isSub ? ARM::SUBrs : ARM::ADDrs), WBReg)
.addReg(BaseReg).addReg(OffReg).addReg(0).addImm(SOOpc)
.addImm(Pred).addReg(0).addReg(0);
} else
UpdateMI = BuildMI(MF, MI->getDebugLoc(),
get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
.addReg(BaseReg).addReg(OffReg)
.addImm(Pred).addReg(0).addReg(0);
break;
}
case ARMII::AddrMode3 : {
bool isSub = ARM_AM::getAM3Op(OffImm) == ARM_AM::sub;
unsigned Amt = ARM_AM::getAM3Offset(OffImm);
if (OffReg == 0)
// Immediate is 8-bits. It's guaranteed to fit in a so_imm operand.
UpdateMI = BuildMI(MF, MI->getDebugLoc(),
get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
.addReg(BaseReg).addImm(Amt)
.addImm(Pred).addReg(0).addReg(0);
else
UpdateMI = BuildMI(MF, MI->getDebugLoc(),
get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
.addReg(BaseReg).addReg(OffReg)
.addImm(Pred).addReg(0).addReg(0);
break;
}
}
std::vector<MachineInstr*> NewMIs;
if (isPre) {
if (isLoad)
MemMI = BuildMI(MF, MI->getDebugLoc(),
get(MemOpc), MI->getOperand(0).getReg())
.addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
else
MemMI = BuildMI(MF, MI->getDebugLoc(),
get(MemOpc)).addReg(MI->getOperand(1).getReg())
.addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
NewMIs.push_back(MemMI);
NewMIs.push_back(UpdateMI);
} else {
if (isLoad)
MemMI = BuildMI(MF, MI->getDebugLoc(),
get(MemOpc), MI->getOperand(0).getReg())
.addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
else
MemMI = BuildMI(MF, MI->getDebugLoc(),
get(MemOpc)).addReg(MI->getOperand(1).getReg())
.addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
if (WB.isDead())
UpdateMI->getOperand(0).setIsDead();
NewMIs.push_back(UpdateMI);
NewMIs.push_back(MemMI);
}
// Transfer LiveVariables states, kill / dead info.
if (LV) {
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.getReg() &&
TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
unsigned Reg = MO.getReg();
LiveVariables::VarInfo &VI = LV->getVarInfo(Reg);
if (MO.isDef()) {
MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI;
if (MO.isDead())
LV->addVirtualRegisterDead(Reg, NewMI);
}
if (MO.isUse() && MO.isKill()) {
for (unsigned j = 0; j < 2; ++j) {
// Look at the two new MI's in reverse order.
MachineInstr *NewMI = NewMIs[j];
if (!NewMI->readsRegister(Reg))
continue;
LV->addVirtualRegisterKilled(Reg, NewMI);
if (VI.removeKill(MI))
VI.Kills.push_back(NewMI);
break;
}
}
}
}
}
MFI->insert(MBBI, NewMIs[1]);
MFI->insert(MBBI, NewMIs[0]);
return NewMIs[0];
return 0;
}
// Branch analysis.
bool
ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const {
// If the block has no terminators, it just falls into the block after it.
MachineBasicBlock::iterator I = MBB.end();
if (I == MBB.begin() || !isUnpredicatedTerminator(--I))
return false;
// Get the last instruction in the block.
MachineInstr *LastInst = I;
// If there is only one terminator instruction, process it.
unsigned LastOpc = LastInst->getOpcode();
if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
if (LastOpc == ARM::B || LastOpc == ARM::tB || LastOpc == ARM::t2B) {
TBB = LastInst->getOperand(0).getMBB();
return false;
}
if (LastOpc == ARM::Bcc || LastOpc == ARM::tBcc || LastOpc == ARM::t2Bcc) {
// Block ends with fall-through condbranch.
TBB = LastInst->getOperand(0).getMBB();
Cond.push_back(LastInst->getOperand(1));
Cond.push_back(LastInst->getOperand(2));
return false;
}
return true; // Can't handle indirect branch.
}
// Get the instruction before it if it is a terminator.
MachineInstr *SecondLastInst = I;
// If there are three terminators, we don't know what sort of block this is.
if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
return true;
// If the block ends with ARM::B/ARM::tB/ARM::t2B and a
// ARM::Bcc/ARM::tBcc/ARM::t2Bcc, handle it.
unsigned SecondLastOpc = SecondLastInst->getOpcode();
if ((SecondLastOpc == ARM::Bcc && LastOpc == ARM::B) ||
(SecondLastOpc == ARM::tBcc && LastOpc == ARM::tB) ||
(SecondLastOpc == ARM::t2Bcc && LastOpc == ARM::t2B)) {
TBB = SecondLastInst->getOperand(0).getMBB();
Cond.push_back(SecondLastInst->getOperand(1));
Cond.push_back(SecondLastInst->getOperand(2));
FBB = LastInst->getOperand(0).getMBB();
return false;
}
// If the block ends with two unconditional branches, handle it. The second
// one is not executed, so remove it.
if ((SecondLastOpc == ARM::B || SecondLastOpc==ARM::tB ||
SecondLastOpc==ARM::t2B) &&
(LastOpc == ARM::B || LastOpc == ARM::tB || LastOpc == ARM::t2B)) {
TBB = SecondLastInst->getOperand(0).getMBB();
I = LastInst;
if (AllowModify)
I->eraseFromParent();
return false;
}
// ...likewise if it ends with a branch table followed by an unconditional
// branch. The branch folder can create these, and we must get rid of them for
// correctness of Thumb constant islands.
if ((SecondLastOpc == ARM::BR_JTr || SecondLastOpc==ARM::BR_JTm ||
SecondLastOpc == ARM::BR_JTadd || SecondLastOpc==ARM::tBR_JTr ||
SecondLastOpc == ARM::t2BR_JTr || SecondLastOpc==ARM::t2BR_JTm ||
SecondLastOpc == ARM::t2BR_JTadd) &&
(LastOpc == ARM::B || LastOpc == ARM::tB || LastOpc == ARM::t2B)) {
I = LastInst;
if (AllowModify)
I->eraseFromParent();
return true;
}
// Otherwise, can't handle this.
return true;
}
unsigned ARMBaseInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
MachineFunction &MF = *MBB.getParent();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
int BOpc = AFI->isThumbFunction() ?
(AFI->isThumb2Function() ? ARM::t2B : ARM::tB) : ARM::B;
int BccOpc = AFI->isThumbFunction() ?
(AFI->isThumb2Function() ? ARM::t2Bcc : ARM::tBcc) : ARM::Bcc;
MachineBasicBlock::iterator I = MBB.end();
if (I == MBB.begin()) return 0;
--I;
if (I->getOpcode() != BOpc && I->getOpcode() != BccOpc)
return 0;
// Remove the branch.
I->eraseFromParent();
I = MBB.end();
if (I == MBB.begin()) return 1;
--I;
if (I->getOpcode() != BccOpc)
return 1;
// Remove the branch.
I->eraseFromParent();
return 2;
}
unsigned
ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
const SmallVectorImpl<MachineOperand> &Cond) const {
// FIXME this should probably have a DebugLoc argument
DebugLoc dl = DebugLoc::getUnknownLoc();
MachineFunction &MF = *MBB.getParent();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
int BOpc = AFI->isThumbFunction() ?
(AFI->isThumb2Function() ? ARM::t2B : ARM::tB) : ARM::B;
int BccOpc = AFI->isThumbFunction() ?
(AFI->isThumb2Function() ? ARM::t2Bcc : ARM::tBcc) : ARM::Bcc;
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 2 || Cond.size() == 0) &&
"ARM branch conditions have two components!");
if (FBB == 0) {
if (Cond.empty()) // Unconditional branch?
BuildMI(&MBB, dl, get(BOpc)).addMBB(TBB);
else
BuildMI(&MBB, dl, get(BccOpc)).addMBB(TBB)
.addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
return 1;
}
// Two-way conditional branch.
BuildMI(&MBB, dl, get(BccOpc)).addMBB(TBB)
.addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
BuildMI(&MBB, dl, get(BOpc)).addMBB(FBB);
return 2;
}
bool
ARMBaseInstrInfo::BlockHasNoFallThrough(const MachineBasicBlock &MBB) const {
bool ARMInstrInfo::
BlockHasNoFallThrough(const MachineBasicBlock &MBB) const {
if (MBB.empty()) return false;
switch (MBB.back().getOpcode()) {
case ARM::BX_RET: // Return.
case ARM::LDM_RET:
case ARM::tBX_RET:
case ARM::tBX_RET_vararg:
case ARM::tPOP_RET:
case ARM::B:
case ARM::tB:
case ARM::t2B: // Uncond branch.
case ARM::tBR_JTr:
case ARM::t2BR_JTr:
case ARM::BR_JTr: // Jumptable branch.
case ARM::t2BR_JTm:
case ARM::BR_JTm: // Jumptable branch through mem.
case ARM::t2BR_JTadd:
case ARM::BR_JTadd: // Jumptable branch add to pc.
return true;
default: return false;
}
}
bool ARMBaseInstrInfo::
ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm();
Cond[0].setImm(ARMCC::getOppositeCondition(CC));
return false;
}
bool ARMBaseInstrInfo::isPredicated(const MachineInstr *MI) const {
int PIdx = MI->findFirstPredOperandIdx();
return PIdx != -1 && MI->getOperand(PIdx).getImm() != ARMCC::AL;
}
bool ARMBaseInstrInfo::
PredicateInstruction(MachineInstr *MI,
const SmallVectorImpl<MachineOperand> &Pred) const {
unsigned Opc = MI->getOpcode();
if (Opc == ARM::B || Opc == ARM::tB || Opc == ARM::t2B) {
MI->setDesc(get((Opc == ARM::B) ? ARM::Bcc :
((Opc == ARM::tB) ? ARM::tBcc : ARM::t2Bcc)));
MI->addOperand(MachineOperand::CreateImm(Pred[0].getImm()));
MI->addOperand(MachineOperand::CreateReg(Pred[1].getReg(), false));
return true;
}
int PIdx = MI->findFirstPredOperandIdx();
if (PIdx != -1) {
MachineOperand &PMO = MI->getOperand(PIdx);
PMO.setImm(Pred[0].getImm());
MI->getOperand(PIdx+1).setReg(Pred[1].getReg());
return true;
}
return false;
}
bool ARMBaseInstrInfo::
SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
const SmallVectorImpl<MachineOperand> &Pred2) const {
if (Pred1.size() > 2 || Pred2.size() > 2)
return false;
ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm();
ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm();
if (CC1 == CC2)
return true;
switch (CC1) {
default:
return false;
case ARMCC::AL:
return true;
case ARMCC::HS:
return CC2 == ARMCC::HI;
case ARMCC::LS:
return CC2 == ARMCC::LO || CC2 == ARMCC::EQ;
case ARMCC::GE:
return CC2 == ARMCC::GT;
case ARMCC::LE:
return CC2 == ARMCC::LT;
}
}
bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI,
std::vector<MachineOperand> &Pred) const {
const TargetInstrDesc &TID = MI->getDesc();
if (!TID.getImplicitDefs() && !TID.hasOptionalDef())
return false;
bool Found = false;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.getReg() == ARM::CPSR) {
Pred.push_back(MO);
Found = true;
}
}
return Found;
}
/// FIXME: Works around a gcc miscompilation with -fstrict-aliasing
static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
unsigned JTI) DISABLE_INLINE;
static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
unsigned JTI) {
return JT[JTI].MBBs.size();
}
/// GetInstSize - Return the size of the specified MachineInstr.
///
unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
const MachineBasicBlock &MBB = *MI->getParent();
const MachineFunction *MF = MBB.getParent();
const TargetAsmInfo *TAI = MF->getTarget().getTargetAsmInfo();
// Basic size info comes from the TSFlags field.
const TargetInstrDesc &TID = MI->getDesc();
unsigned TSFlags = TID.TSFlags;
switch ((TSFlags & ARMII::SizeMask) >> ARMII::SizeShift) {
default: {
// If this machine instr is an inline asm, measure it.
if (MI->getOpcode() == ARM::INLINEASM)
return TAI->getInlineAsmLength(MI->getOperand(0).getSymbolName());
if (MI->isLabel())
return 0;
switch (MI->getOpcode()) {
default:
assert(0 && "Unknown or unset size field for instr!");
break;
case TargetInstrInfo::IMPLICIT_DEF:
case TargetInstrInfo::DECLARE:
case TargetInstrInfo::DBG_LABEL:
case TargetInstrInfo::EH_LABEL:
return 0;
}
break;
}
case ARMII::Size8Bytes: return 8; // Arm instruction x 2.
case ARMII::Size4Bytes: return 4; // Arm instruction.
case ARMII::Size2Bytes: return 2; // Thumb instruction.
case ARMII::SizeSpecial: {
switch (MI->getOpcode()) {
case ARM::CONSTPOOL_ENTRY:
// If this machine instr is a constant pool entry, its size is recorded as
// operand #2.
return MI->getOperand(2).getImm();
case ARM::Int_eh_sjlj_setjmp: return 12;
case ARM::BR_JTr:
case ARM::BR_JTm:
case ARM::BR_JTadd:
case ARM::t2BR_JTr:
case ARM::t2BR_JTm:
case ARM::t2BR_JTadd:
case ARM::tBR_JTr: {
// These are jumptable branches, i.e. a branch followed by an inlined
// jumptable. The size is 4 + 4 * number of entries.
unsigned NumOps = TID.getNumOperands();
MachineOperand JTOP =
MI->getOperand(NumOps - (TID.isPredicable() ? 3 : 2));
unsigned JTI = JTOP.getIndex();
const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
assert(JTI < JT.size());
// Thumb instructions are 2 byte aligned, but JT entries are 4 byte
// 4 aligned. The assembler / linker may add 2 byte padding just before
// the JT entries. The size does not include this padding; the
// constant islands pass does separate bookkeeping for it.
// FIXME: If we know the size of the function is less than (1 << 16) *2
// bytes, we can use 16-bit entries instead. Then there won't be an
// alignment issue.
return getNumJTEntries(JT, JTI) * 4 +
((MI->getOpcode()==ARM::tBR_JTr) ? 2 : 4);
}
default:
// Otherwise, pseudo-instruction sizes are zero.
return 0;
}
}
}
return 0; // Not reached
}
/// Return true if the instruction is a register to register move and
/// leave the source and dest operands in the passed parameters.
///
bool
ARMBaseInstrInfo::isMoveInstr(const MachineInstr &MI,
unsigned &SrcReg, unsigned &DstReg,
unsigned& SrcSubIdx, unsigned& DstSubIdx) const {
SrcSubIdx = DstSubIdx = 0; // No sub-registers.
unsigned oc = MI.getOpcode();
switch (oc) {
default:
return false;
case ARM::FCPYS:
case ARM::FCPYD:
case ARM::VMOVD:
case ARM::VMOVQ:
SrcReg = MI.getOperand(1).getReg();
DstReg = MI.getOperand(0).getReg();
return true;
case ARM::MOVr:
assert(MI.getDesc().getNumOperands() >= 2 &&
MI.getOperand(0).isReg() &&
MI.getOperand(1).isReg() &&
"Invalid ARM MOV instruction");
SrcReg = MI.getOperand(1).getReg();
DstReg = MI.getOperand(0).getReg();
return true;
}
}
unsigned
ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
int &FrameIndex) const {
switch (MI->getOpcode()) {
default: break;
case ARM::LDR:
if (MI->getOperand(1).isFI() &&
MI->getOperand(2).isReg() &&
MI->getOperand(3).isImm() &&
MI->getOperand(2).getReg() == 0 &&
MI->getOperand(3).getImm() == 0) {
FrameIndex = MI->getOperand(1).getIndex();
return MI->getOperand(0).getReg();
}
break;
case ARM::FLDD:
case ARM::FLDS:
if (MI->getOperand(1).isFI() &&
MI->getOperand(2).isImm() &&
MI->getOperand(2).getImm() == 0) {
FrameIndex = MI->getOperand(1).getIndex();
return MI->getOperand(0).getReg();
}
break;
}
return 0;
}
unsigned
ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
int &FrameIndex) const {
switch (MI->getOpcode()) {
default: break;
case ARM::STR:
if (MI->getOperand(1).isFI() &&
MI->getOperand(2).isReg() &&
MI->getOperand(3).isImm() &&
MI->getOperand(2).getReg() == 0 &&
MI->getOperand(3).getImm() == 0) {
FrameIndex = MI->getOperand(1).getIndex();
return MI->getOperand(0).getReg();
}
break;
case ARM::FSTD:
case ARM::FSTS:
if (MI->getOperand(1).isFI() &&
MI->getOperand(2).isImm() &&
MI->getOperand(2).getImm() == 0) {
FrameIndex = MI->getOperand(1).getIndex();
return MI->getOperand(0).getReg();
}
break;
}
return 0;
}
bool
ARMBaseInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
unsigned DestReg, unsigned SrcReg,
const TargetRegisterClass *DestRC,
const TargetRegisterClass *SrcRC) const {
DebugLoc DL = DebugLoc::getUnknownLoc();
if (I != MBB.end()) DL = I->getDebugLoc();
if (DestRC != SrcRC) {
// Not yet supported!
return false;
}
if (DestRC == ARM::GPRRegisterClass)
AddDefaultCC(AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::MOVr), DestReg)
.addReg(SrcReg)));
else if (DestRC == ARM::SPRRegisterClass)
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FCPYS), DestReg)
.addReg(SrcReg));
else if (DestRC == ARM::DPRRegisterClass)
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FCPYD), DestReg)
.addReg(SrcReg));
else if (DestRC == ARM::QPRRegisterClass)
BuildMI(MBB, I, DL, get(ARM::VMOVQ), DestReg).addReg(SrcReg);
else
return false;
return true;
}
void ARMBaseInstrInfo::
storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
unsigned SrcReg, bool isKill, int FI,
const TargetRegisterClass *RC) const {
DebugLoc DL = DebugLoc::getUnknownLoc();
if (I != MBB.end()) DL = I->getDebugLoc();
if (RC == ARM::GPRRegisterClass) {
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STR))
.addReg(SrcReg, getKillRegState(isKill))
.addFrameIndex(FI).addReg(0).addImm(0));
} else if (RC == ARM::DPRRegisterClass) {
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FSTD))
.addReg(SrcReg, getKillRegState(isKill))
.addFrameIndex(FI).addImm(0));
} else {
assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FSTS))
.addReg(SrcReg, getKillRegState(isKill))
.addFrameIndex(FI).addImm(0));
}
}
void
ARMBaseInstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
bool isKill,
SmallVectorImpl<MachineOperand> &Addr,
const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs) const{
DebugLoc DL = DebugLoc::getUnknownLoc();
unsigned Opc = 0;
if (RC == ARM::GPRRegisterClass) {
Opc = ARM::STR;
} else if (RC == ARM::DPRRegisterClass) {
Opc = ARM::FSTD;
} else {
assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
Opc = ARM::FSTS;
}
MachineInstrBuilder MIB =
BuildMI(MF, DL, get(Opc)).addReg(SrcReg, getKillRegState(isKill));
for (unsigned i = 0, e = Addr.size(); i != e; ++i)
MIB.addOperand(Addr[i]);
AddDefaultPred(MIB);
NewMIs.push_back(MIB);
return;
}
void ARMBaseInstrInfo::
loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
unsigned DestReg, int FI,
const TargetRegisterClass *RC) const {
DebugLoc DL = DebugLoc::getUnknownLoc();
if (I != MBB.end()) DL = I->getDebugLoc();
if (RC == ARM::GPRRegisterClass) {
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::LDR), DestReg)
.addFrameIndex(FI).addReg(0).addImm(0));
} else if (RC == ARM::DPRRegisterClass) {
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FLDD), DestReg)
.addFrameIndex(FI).addImm(0));
} else {
assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FLDS), DestReg)
.addFrameIndex(FI).addImm(0));
}
}
void ARMBaseInstrInfo::
loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
SmallVectorImpl<MachineOperand> &Addr,
const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs) const {
DebugLoc DL = DebugLoc::getUnknownLoc();
unsigned Opc = 0;
if (RC == ARM::GPRRegisterClass) {
Opc = ARM::LDR;
} else if (RC == ARM::DPRRegisterClass) {
Opc = ARM::FLDD;
} else {
assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
Opc = ARM::FLDS;
}
MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), DestReg);
for (unsigned i = 0, e = Addr.size(); i != e; ++i)
MIB.addOperand(Addr[i]);
AddDefaultPred(MIB);
NewMIs.push_back(MIB);
return;
}
MachineInstr *ARMBaseInstrInfo::
foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
const SmallVectorImpl<unsigned> &Ops, int FI) const {
if (Ops.size() != 1) return NULL;
unsigned OpNum = Ops[0];
unsigned Opc = MI->getOpcode();
MachineInstr *NewMI = NULL;
switch (Opc) {
default: break;
case ARM::MOVr: {
if (MI->getOperand(4).getReg() == ARM::CPSR)
// If it is updating CPSR, then it cannot be folded.
break;
unsigned Pred = MI->getOperand(2).getImm();
unsigned PredReg = MI->getOperand(3).getReg();
if (OpNum == 0) { // move -> store
unsigned SrcReg = MI->getOperand(1).getReg();
bool isKill = MI->getOperand(1).isKill();
bool isUndef = MI->getOperand(1).isUndef();
NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::STR))
.addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
.addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
} else { // move -> load
unsigned DstReg = MI->getOperand(0).getReg();
bool isDead = MI->getOperand(0).isDead();
bool isUndef = MI->getOperand(0).isUndef();
NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::LDR))
.addReg(DstReg,
RegState::Define |
getDeadRegState(isDead) |
getUndefRegState(isUndef))
.addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
}
break;
}
case ARM::FCPYS: {
unsigned Pred = MI->getOperand(2).getImm();
unsigned PredReg = MI->getOperand(3).getReg();
if (OpNum == 0) { // move -> store
unsigned SrcReg = MI->getOperand(1).getReg();
bool isKill = MI->getOperand(1).isKill();
bool isUndef = MI->getOperand(1).isUndef();
NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::FSTS))
.addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
.addFrameIndex(FI)
.addImm(0).addImm(Pred).addReg(PredReg);
} else { // move -> load
unsigned DstReg = MI->getOperand(0).getReg();
bool isDead = MI->getOperand(0).isDead();
bool isUndef = MI->getOperand(0).isUndef();
NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::FLDS))
.addReg(DstReg,
RegState::Define |
getDeadRegState(isDead) |
getUndefRegState(isUndef))
.addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
}
break;
}
case ARM::FCPYD: {
unsigned Pred = MI->getOperand(2).getImm();
unsigned PredReg = MI->getOperand(3).getReg();
if (OpNum == 0) { // move -> store
unsigned SrcReg = MI->getOperand(1).getReg();
bool isKill = MI->getOperand(1).isKill();
bool isUndef = MI->getOperand(1).isUndef();
NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::FSTD))
.addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
.addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
} else { // move -> load
unsigned DstReg = MI->getOperand(0).getReg();
bool isDead = MI->getOperand(0).isDead();
bool isUndef = MI->getOperand(0).isUndef();
NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::FLDD))
.addReg(DstReg,
RegState::Define |
getDeadRegState(isDead) |
getUndefRegState(isUndef))
.addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
}
break;
}
}
return NewMI;
}
MachineInstr*
ARMBaseInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr* MI,
const SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}
bool
ARMBaseInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
const SmallVectorImpl<unsigned> &Ops) const {
if (Ops.size() != 1) return false;
unsigned Opc = MI->getOpcode();
switch (Opc) {
default: break;
case ARM::MOVr:
// If it is updating CPSR, then it cannot be folded.
return MI->getOperand(4).getReg() != ARM::CPSR;
case ARM::FCPYS:
case ARM::FCPYD:
return true;
case ARM::VMOVD:
case ARM::VMOVQ:
return false; // FIXME
}
return false;
}
void ARMInstrInfo::
reMaterialize(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
unsigned DestReg,
const MachineInstr *Orig) const {
DebugLoc dl = Orig->getDebugLoc();
if (Orig->getOpcode() == ARM::MOVi2pieces) {
RI.emitLoadConstPool(MBB, I, this, dl,
DestReg,
Orig->getOperand(1).getImm(),
(ARMCC::CondCodes)Orig->getOperand(2).getImm(),
Orig->getOperand(3).getReg());
return;
}
MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
MI->getOperand(0).setReg(DestReg);
MBB.insert(I, MI);
}

View File

@ -15,251 +15,29 @@
#define ARMINSTRUCTIONINFO_H
#include "llvm/Target/TargetInstrInfo.h"
#include "ARMBaseInstrInfo.h"
#include "ARMRegisterInfo.h"
#include "ARMSubtarget.h"
#include "ARM.h"
namespace llvm {
class ARMSubtarget;
/// ARMII - This namespace holds all of the target specific flags that
/// instruction info tracks.
///
namespace ARMII {
enum {
//===------------------------------------------------------------------===//
// Instruction Flags.
//===------------------------------------------------------------------===//
// This four-bit field describes the addressing mode used.
AddrModeMask = 0xf,
AddrModeNone = 0,
AddrMode1 = 1,
AddrMode2 = 2,
AddrMode3 = 3,
AddrMode4 = 4,
AddrMode5 = 5,
AddrMode6 = 6,
AddrModeT1_1 = 7,
AddrModeT1_2 = 8,
AddrModeT1_4 = 9,
AddrModeT1_s = 10, // i8 * 4 for pc and sp relative data
AddrModeT2_i12 = 11,
AddrModeT2_i8 = 12,
AddrModeT2_so = 13,
AddrModeT2_pc = 14, // +/- i12 for pc relative data
AddrModeT2_i8s4 = 15, // i8 * 4
// Size* - Flags to keep track of the size of an instruction.
SizeShift = 4,
SizeMask = 7 << SizeShift,
SizeSpecial = 1, // 0 byte pseudo or special case.
Size8Bytes = 2,
Size4Bytes = 3,
Size2Bytes = 4,
// IndexMode - Unindex, pre-indexed, or post-indexed. Only valid for load
// and store ops
IndexModeShift = 7,
IndexModeMask = 3 << IndexModeShift,
IndexModePre = 1,
IndexModePost = 2,
//===------------------------------------------------------------------===//
// Instruction encoding formats.
//
FormShift = 9,
FormMask = 0x3f << FormShift,
// Pseudo instructions
Pseudo = 0 << FormShift,
// Multiply instructions
MulFrm = 1 << FormShift,
// Branch instructions
BrFrm = 2 << FormShift,
BrMiscFrm = 3 << FormShift,
// Data Processing instructions
DPFrm = 4 << FormShift,
DPSoRegFrm = 5 << FormShift,
// Load and Store
LdFrm = 6 << FormShift,
StFrm = 7 << FormShift,
LdMiscFrm = 8 << FormShift,
StMiscFrm = 9 << FormShift,
LdStMulFrm = 10 << FormShift,
// Miscellaneous arithmetic instructions
ArithMiscFrm = 11 << FormShift,
// Extend instructions
ExtFrm = 12 << FormShift,
// VFP formats
VFPUnaryFrm = 13 << FormShift,
VFPBinaryFrm = 14 << FormShift,
VFPConv1Frm = 15 << FormShift,
VFPConv2Frm = 16 << FormShift,
VFPConv3Frm = 17 << FormShift,
VFPConv4Frm = 18 << FormShift,
VFPConv5Frm = 19 << FormShift,
VFPLdStFrm = 20 << FormShift,
VFPLdStMulFrm = 21 << FormShift,
VFPMiscFrm = 22 << FormShift,
// Thumb format
ThumbFrm = 23 << FormShift,
// NEON format
NEONFrm = 24 << FormShift,
NEONGetLnFrm = 25 << FormShift,
NEONSetLnFrm = 26 << FormShift,
NEONDupFrm = 27 << FormShift,
//===------------------------------------------------------------------===//
// Misc flags.
// UnaryDP - Indicates this is a unary data processing instruction, i.e.
// it doesn't have a Rn operand.
UnaryDP = 1 << 15,
// Xform16Bit - Indicates this Thumb2 instruction may be transformed into
// a 16-bit Thumb instruction if certain conditions are met.
Xform16Bit = 1 << 16,
//===------------------------------------------------------------------===//
// Field shifts - such shifts are used to set field while generating
// machine instructions.
M_BitShift = 5,
ShiftImmShift = 5,
ShiftShift = 7,
N_BitShift = 7,
ImmHiShift = 8,
SoRotImmShift = 8,
RegRsShift = 8,
ExtRotImmShift = 10,
RegRdLoShift = 12,
RegRdShift = 12,
RegRdHiShift = 16,
RegRnShift = 16,
S_BitShift = 20,
W_BitShift = 21,
AM3_I_BitShift = 22,
D_BitShift = 22,
U_BitShift = 23,
P_BitShift = 24,
I_BitShift = 25,
CondShift = 28
};
}
class ARMBaseInstrInfo : public TargetInstrInfoImpl {
protected:
// Can be only subclassed.
explicit ARMBaseInstrInfo(const ARMSubtarget &STI);
public:
virtual MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI,
MachineBasicBlock::iterator &MBBI,
LiveVariables *LV) const;
virtual const ARMBaseRegisterInfo &getRegisterInfo() const =0;
// Branch analysis.
virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const;
virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
const SmallVectorImpl<MachineOperand> &Cond) const;
virtual bool BlockHasNoFallThrough(const MachineBasicBlock &MBB) const;
virtual
bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
// Predication support.
virtual bool isPredicated(const MachineInstr *MI) const;
ARMCC::CondCodes getPredicate(const MachineInstr *MI) const {
int PIdx = MI->findFirstPredOperandIdx();
return PIdx != -1 ? (ARMCC::CondCodes)MI->getOperand(PIdx).getImm()
: ARMCC::AL;
}
virtual
bool PredicateInstruction(MachineInstr *MI,
const SmallVectorImpl<MachineOperand> &Pred) const;
virtual
bool SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
const SmallVectorImpl<MachineOperand> &Pred2) const;
virtual bool DefinesPredicate(MachineInstr *MI,
std::vector<MachineOperand> &Pred) const;
/// GetInstSize - Returns the size of the specified MachineInstr.
///
virtual unsigned GetInstSizeInBytes(const MachineInstr* MI) const;
/// Return true if the instruction is a register to register move and return
/// the source and dest operands and their sub-register indices by reference.
virtual bool isMoveInstr(const MachineInstr &MI,
unsigned &SrcReg, unsigned &DstReg,
unsigned &SrcSubIdx, unsigned &DstSubIdx) const;
virtual unsigned isLoadFromStackSlot(const MachineInstr *MI,
int &FrameIndex) const;
virtual unsigned isStoreToStackSlot(const MachineInstr *MI,
int &FrameIndex) const;
virtual bool copyRegToReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
unsigned DestReg, unsigned SrcReg,
const TargetRegisterClass *DestRC,
const TargetRegisterClass *SrcRC) const;
virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned SrcReg, bool isKill, int FrameIndex,
const TargetRegisterClass *RC) const;
virtual void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
SmallVectorImpl<MachineOperand> &Addr,
const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs) const;
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned DestReg, int FrameIndex,
const TargetRegisterClass *RC) const;
virtual void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
SmallVectorImpl<MachineOperand> &Addr,
const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs) const;
virtual bool canFoldMemoryOperand(const MachineInstr *MI,
const SmallVectorImpl<unsigned> &Ops) const;
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr* MI,
const SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr* MI,
const SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const;
};
class ARMInstrInfo : public ARMBaseInstrInfo {
ARMRegisterInfo RI;
public:
explicit ARMInstrInfo(const ARMSubtarget &STI);
// Return the non-pre/post incrementing version of 'Opc'. Return 0
// if there is not such an opcode.
unsigned getUnindexedOpcode(unsigned Opc) const;
// Return the opcode that implements 'Op', or 0 if no opcode
unsigned getOpcode(ARMII::Op Op) const;
// Return true if the block does not fall through.
bool BlockHasNoFallThrough(const MachineBasicBlock &MBB) const;
/// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
/// such, whenever a client has an instance of instruction info, it should
/// always be able to get register info as well (through this method).

View File

@ -157,29 +157,29 @@ def tADDspi : T1It<(outs GPR:$dst), (ins GPR:$lhs, i32imm:$rhs),
let isReturn = 1, isTerminator = 1 in {
def tBX_RET : TI<(outs), (ins), "bx lr", [(ARMretflag)]>;
// Alternative return instruction used by vararg functions.
def tBX_RET_vararg : TI<(outs), (ins tGPR:$target), "bx $target", []>;
def tBX_RET_vararg : T1I<(outs), (ins tGPR:$target), "bx $target", []>;
}
// FIXME: remove when we have a way to marking a MI with these properties.
let isReturn = 1, isTerminator = 1 in
def tPOP_RET : TI<(outs reglist:$dst1, variable_ops), (ins),
def tPOP_RET : T1I<(outs reglist:$dst1, variable_ops), (ins),
"pop $dst1", []>;
let isCall = 1,
Defs = [R0, R1, R2, R3, LR,
D0, D1, D2, D3, D4, D5, D6, D7] in {
def tBL : TIx2<(outs), (ins i32imm:$func, variable_ops),
def tBL : T1Ix2<(outs), (ins i32imm:$func, variable_ops),
"bl ${func:call}",
[(ARMtcall tglobaladdr:$func)]>;
// ARMv5T and above
def tBLXi : TIx2<(outs), (ins i32imm:$func, variable_ops),
def tBLXi : T1Ix2<(outs), (ins i32imm:$func, variable_ops),
"blx ${func:call}",
[(ARMcall tglobaladdr:$func)]>, Requires<[HasV5T]>;
def tBLXr : TI<(outs), (ins tGPR:$func, variable_ops),
def tBLXr : T1I<(outs), (ins tGPR:$func, variable_ops),
"blx $func",
[(ARMtcall tGPR:$func)]>, Requires<[HasV5T]>;
// ARMv4T
def tBX : TIx2<(outs), (ins tGPR:$func, variable_ops),
def tBX : T1Ix2<(outs), (ins tGPR:$func, variable_ops),
"cpy lr, pc\n\tbx $func",
[(ARMcall_nolink tGPR:$func)]>;
}
@ -284,11 +284,11 @@ def tSpill : T1Is<(outs), (ins tGPR:$src, t_addrmode_sp:$addr),
// TODO: A7-44: LDMIA - load multiple
let mayLoad = 1 in
def tPOP : TI<(outs reglist:$dst1, variable_ops), (ins),
def tPOP : T1I<(outs reglist:$dst1, variable_ops), (ins),
"pop $dst1", []>;
let mayStore = 1 in
def tPUSH : TI<(outs), (ins reglist:$src1, variable_ops),
def tPUSH : T1I<(outs), (ins reglist:$src1, variable_ops),
"push $src1", []>;
//===----------------------------------------------------------------------===//
@ -577,14 +577,14 @@ let usesCustomDAGSchedInserter = 1 in // Expanded by the scheduler.
// tLEApcrel - Load a pc-relative address into a register without offending the
// assembler.
def tLEApcrel : TIx2<(outs tGPR:$dst), (ins i32imm:$label),
def tLEApcrel : T1Ix2<(outs tGPR:$dst), (ins i32imm:$label),
!strconcat(!strconcat(".set PCRELV${:uid}, ($label-(",
"${:private}PCRELL${:uid}+4))\n"),
!strconcat("\tmov $dst, #PCRELV${:uid}\n",
"${:private}PCRELL${:uid}:\n\tadd $dst, pc")),
[]>;
def tLEApcrelJT : TIx2<(outs tGPR:$dst), (ins i32imm:$label, i32imm:$id),
def tLEApcrelJT : T1Ix2<(outs tGPR:$dst), (ins i32imm:$label, i32imm:$id),
!strconcat(!strconcat(".set PCRELV${:uid}, (${label}_${id:no_hash}-(",
"${:private}PCRELL${:uid}+4))\n"),
!strconcat("\tmov $dst, #PCRELV${:uid}\n",
@ -598,7 +598,7 @@ def tLEApcrelJT : TIx2<(outs tGPR:$dst), (ins i32imm:$label, i32imm:$id),
// __aeabi_read_tp preserves the registers r1-r3.
let isCall = 1,
Defs = [R0, LR] in {
def tTPsoft : TIx2<(outs), (ins),
def tTPsoft : T1Ix2<(outs), (ins),
"bl __aeabi_read_tp",
[(set R0, ARMthread_pointer)]>;
}

View File

@ -1047,10 +1047,51 @@ def t2MOVCCi : T2I<(outs GPR:$dst), (ins GPR:$false, t2_so_imm:$true),
[/*(set GPR:$dst, (ARMcmov GPR:$false, t2_so_imm:$true, imm:$cc, CCR:$ccr))*/]>,
RegConstraint<"$false = $dst">;
//===----------------------------------------------------------------------===//
// TLS Instructions
//
// __aeabi_read_tp preserves the registers r1-r3.
let isCall = 1,
Defs = [R0, R12, LR, CPSR] in {
def t2TPsoft : T2XI<(outs), (ins),
"bl __aeabi_read_tp",
[(set R0, ARMthread_pointer)]>;
}
//===----------------------------------------------------------------------===//
// Control-Flow Instructions
//
//let isReturn = 1, isTerminator = 1 in
// def t2BX_RET : T2XI<(outs), (ins), "bx lr", [(ARMretflag)]>;
//
// On non-Darwin platforms R9 is callee-saved.
//let isCall = 1,
// Defs = [R0, R1, R2, R3, R12, LR,
// D0, D1, D2, D3, D4, D5, D6, D7, CPSR] in {
//def t2BL : T2XI<(outs), (ins i32imm:$func, variable_ops),
// "bl ${func:call}",
// [(ARMcall tglobaladdr:$func)]>, Requires<[IsNotDarwin]>;
//
//def t2BLX : T2XI<(outs), (ins GPR:$func, variable_ops),
// "blx $func",
// [(ARMcall GPR:$func)]>, Requires<[IsNotDarwin]>;
//}
// On Darwin R9 is call-clobbered.
//let isCall = 1,
// Defs = [R0, R1, R2, R3, R9, R12, LR,
// D0, D1, D2, D3, D4, D5, D6, D7, CPSR] in {
//def t2BLr9 : T2XI<(outs), (ins i32imm:$func, variable_ops),
// "bl ${func:call}",
// [(ARMcall tglobaladdr:$func)]>, Requires<[IsDarwin]>;
//
//def t2BLXr9 : T2XI<(outs), (ins GPR:$func, variable_ops),
// "blx $func",
// [(ARMcall GPR:$func)]>, Requires<[IsDarwin]>;
//}
let isBranch = 1, isTerminator = 1, isBarrier = 1 in {
let isPredicable = 1 in
def t2B : T2XI<(outs), (ins brtarget:$target),

View File

@ -12,6 +12,7 @@ tablegen(ARMGenCallingConv.inc -gen-callingconv)
tablegen(ARMGenSubtarget.inc -gen-subtarget)
add_llvm_target(ARMCodeGen
ARMBaseInstrInfo.cpp
ARMCodeEmitter.cpp
ARMConstantIslandPass.cpp
ARMConstantPoolValue.cpp

View File

@ -26,6 +26,61 @@ Thumb1InstrInfo::Thumb1InstrInfo(const ARMSubtarget &STI)
: ARMBaseInstrInfo(STI), RI(*this, STI) {
}
unsigned Thumb1InstrInfo::
getUnindexedOpcode(unsigned Opc) const {
return 0;
}
unsigned Thumb1InstrInfo::
getOpcode(ARMII::Op Op) const {
switch (Op) {
case ARMII::ADDri: return ARM::tADDi8;
case ARMII::ADDrs: return 0;
case ARMII::ADDrr: return ARM::tADDrr;
case ARMII::B: return ARM::tB;
case ARMII::Bcc: return ARM::tBcc;
case ARMII::BR_JTr: return ARM::tBR_JTr;
case ARMII::BR_JTm: return 0;
case ARMII::BR_JTadd: return 0;
case ARMII::FCPYS: return 0;
case ARMII::FCPYD: return 0;
case ARMII::FLDD: return 0;
case ARMII::FLDS: return 0;
case ARMII::FSTD: return 0;
case ARMII::FSTS: return 0;
case ARMII::LDR: return ARM::tLDR;
case ARMII::MOVr: return ARM::tMOVr;
case ARMII::STR: return ARM::tSTR;
case ARMII::SUBri: return ARM::tSUBi8;
case ARMII::SUBrs: return 0;
case ARMII::SUBrr: return ARM::tSUBrr;
case ARMII::VMOVD: return 0;
case ARMII::VMOVQ: return 0;
default:
break;
}
return 0;
}
bool
Thumb1InstrInfo::BlockHasNoFallThrough(const MachineBasicBlock &MBB) const {
if (MBB.empty()) return false;
switch (MBB.back().getOpcode()) {
case ARM::tBX_RET:
case ARM::tBX_RET_vararg:
case ARM::tPOP_RET:
case ARM::tB:
case ARM::tBR_JTr:
return true;
default:
break;
}
return false;
}
bool Thumb1InstrInfo::isMoveInstr(const MachineInstr &MI,
unsigned &SrcReg, unsigned &DstReg,
unsigned& SrcSubIdx, unsigned& DstSubIdx) const {

View File

@ -27,6 +27,16 @@ class Thumb1InstrInfo : public ARMBaseInstrInfo {
public:
explicit Thumb1InstrInfo(const ARMSubtarget &STI);
// Return the non-pre/post incrementing version of 'Opc'. Return 0
// if there is not such an opcode.
unsigned getUnindexedOpcode(unsigned Opc) const;
// Return the opcode that implements 'Op', or 0 if no opcode
unsigned getOpcode(ARMII::Op Op) const;
// Return true if the block does not fall through.
bool BlockHasNoFallThrough(const MachineBasicBlock &MBB) const;
/// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
/// such, whenever a client has an instance of instruction info, it should
/// always be able to get register info as well (through this method).

View File

@ -26,6 +26,104 @@ Thumb2InstrInfo::Thumb2InstrInfo(const ARMSubtarget &STI)
: ARMBaseInstrInfo(STI), RI(*this, STI) {
}
unsigned Thumb2InstrInfo::
getUnindexedOpcode(unsigned Opc) const {
// FIXME
return 0;
}
unsigned Thumb2InstrInfo::
getOpcode(ARMII::Op Op) const {
switch (Op) {
case ARMII::ADDri: return ARM::t2ADDri;
case ARMII::ADDrs: return ARM::t2ADDrs;
case ARMII::ADDrr: return ARM::t2ADDrr;
case ARMII::B: return ARM::t2B;
case ARMII::Bcc: return ARM::t2Bcc;
case ARMII::BR_JTr: return ARM::t2BR_JTr;
case ARMII::BR_JTm: return ARM::t2BR_JTm;
case ARMII::BR_JTadd: return ARM::t2BR_JTadd;
case ARMII::FCPYS: return ARM::FCPYS;
case ARMII::FCPYD: return ARM::FCPYD;
case ARMII::FLDD: return ARM::FLDD;
case ARMII::FLDS: return ARM::FLDS;
case ARMII::FSTD: return ARM::FSTD;
case ARMII::FSTS: return ARM::FSTS;
case ARMII::LDR: return ARM::LDR; // FIXME
case ARMII::MOVr: return ARM::t2MOVr;
case ARMII::STR: return ARM::STR; // FIXME
case ARMII::SUBri: return ARM::t2SUBri;
case ARMII::SUBrs: return ARM::t2SUBrs;
case ARMII::SUBrr: return ARM::t2SUBrr;
case ARMII::VMOVD: return ARM::VMOVD;
case ARMII::VMOVQ: return ARM::VMOVQ;
default:
break;
}
return 0;
}
bool
Thumb2InstrInfo::BlockHasNoFallThrough(const MachineBasicBlock &MBB) const {
if (MBB.empty()) return false;
// FIXME
switch (MBB.back().getOpcode()) {
//case ARM::t2BX_RET:
// case ARM::LDM_RET:
case ARM::t2B: // Uncond branch.
case ARM::t2BR_JTr: // Jumptable branch.
case ARM::t2BR_JTm: // Jumptable branch through mem.
case ARM::t2BR_JTadd: // Jumptable branch add to pc.
return true;
case ARM::tBX_RET:
case ARM::tBX_RET_vararg:
case ARM::tPOP_RET:
case ARM::tB:
case ARM::tBR_JTr:
return true;
default:
break;
}
return false;
}
bool Thumb2InstrInfo::copyRegToReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
unsigned DestReg, unsigned SrcReg,
const TargetRegisterClass *DestRC,
const TargetRegisterClass *SrcRC) const {
DebugLoc DL = DebugLoc::getUnknownLoc();
if (I != MBB.end()) DL = I->getDebugLoc();
if (DestRC == ARM::GPRRegisterClass) {
if (SrcRC == ARM::GPRRegisterClass) {
return ARMBaseInstrInfo::copyRegToReg(MBB, I, DestReg, SrcReg, DestRC, SrcRC);
} else if (SrcRC == ARM::tGPRRegisterClass) {
BuildMI(MBB, I, DL, get(ARM::tMOVlor2hir), DestReg).addReg(SrcReg);
return true;
}
} else if (DestRC == ARM::tGPRRegisterClass) {
if (SrcRC == ARM::GPRRegisterClass) {
BuildMI(MBB, I, DL, get(ARM::tMOVhir2lor), DestReg).addReg(SrcReg);
return true;
} else if (SrcRC == ARM::tGPRRegisterClass) {
BuildMI(MBB, I, DL, get(ARM::tMOVr), DestReg).addReg(SrcReg);
return true;
}
}
return false;
}
bool Thumb2InstrInfo::isMoveInstr(const MachineInstr &MI,
unsigned &SrcReg, unsigned &DstReg,
unsigned& SrcSubIdx, unsigned& DstSubIdx) const {
@ -35,7 +133,6 @@ bool Thumb2InstrInfo::isMoveInstr(const MachineInstr &MI,
switch (oc) {
default:
return false;
// FIXME: Thumb2
case ARM::tMOVr:
case ARM::tMOVhir2lor:
case ARM::tMOVlor2hir:
@ -54,7 +151,6 @@ unsigned Thumb2InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
int &FrameIndex) const {
switch (MI->getOpcode()) {
default: break;
// FIXME: Thumb2
case ARM::tRestore:
if (MI->getOperand(1).isFI() &&
MI->getOperand(2).isImm() &&
@ -71,7 +167,6 @@ unsigned Thumb2InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
int &FrameIndex) const {
switch (MI->getOpcode()) {
default: break;
// FIXME: Thumb2
case ARM::tSpill:
if (MI->getOperand(1).isFI() &&
MI->getOperand(2).isImm() &&
@ -84,36 +179,6 @@ unsigned Thumb2InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
return 0;
}
bool Thumb2InstrInfo::copyRegToReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
unsigned DestReg, unsigned SrcReg,
const TargetRegisterClass *DestRC,
const TargetRegisterClass *SrcRC) const {
DebugLoc DL = DebugLoc::getUnknownLoc();
if (I != MBB.end()) DL = I->getDebugLoc();
// FIXME: Thumb2
if (DestRC == ARM::GPRRegisterClass) {
if (SrcRC == ARM::GPRRegisterClass) {
BuildMI(MBB, I, DL, get(ARM::tMOVhir2hir), DestReg).addReg(SrcReg);
return true;
} else if (SrcRC == ARM::tGPRRegisterClass) {
BuildMI(MBB, I, DL, get(ARM::tMOVlor2hir), DestReg).addReg(SrcReg);
return true;
}
} else if (DestRC == ARM::tGPRRegisterClass) {
if (SrcRC == ARM::GPRRegisterClass) {
BuildMI(MBB, I, DL, get(ARM::tMOVhir2lor), DestReg).addReg(SrcReg);
return true;
} else if (SrcRC == ARM::tGPRRegisterClass) {
BuildMI(MBB, I, DL, get(ARM::tMOVr), DestReg).addReg(SrcReg);
return true;
}
}
return false;
}
bool Thumb2InstrInfo::
canFoldMemoryOperand(const MachineInstr *MI,
const SmallVectorImpl<unsigned> &Ops) const {
@ -154,7 +219,6 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
assert(RC == ARM::tGPRRegisterClass && "Unknown regclass!");
// FIXME: Thumb2
if (RC == ARM::tGPRRegisterClass) {
BuildMI(MBB, I, DL, get(ARM::tSpill))
.addReg(SrcReg, getKillRegState(isKill))
@ -170,7 +234,6 @@ void Thumb2InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
DebugLoc DL = DebugLoc::getUnknownLoc();
unsigned Opc = 0;
// FIXME: Thumb2. Is GPRRegClass here correct?
assert(RC == ARM::GPRRegisterClass && "Unknown regclass!");
if (RC == ARM::GPRRegisterClass) {
Opc = Addr[0].isFI() ? ARM::tSpill : ARM::tSTR;
@ -191,7 +254,6 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
DebugLoc DL = DebugLoc::getUnknownLoc();
if (I != MBB.end()) DL = I->getDebugLoc();
// FIXME: Thumb2
assert(RC == ARM::tGPRRegisterClass && "Unknown regclass!");
if (RC == ARM::tGPRRegisterClass) {
@ -208,7 +270,6 @@ loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
DebugLoc DL = DebugLoc::getUnknownLoc();
unsigned Opc = 0;
// FIXME: Thumb2. Is GPRRegClass ok here?
if (RC == ARM::GPRRegisterClass) {
Opc = Addr[0].isFI() ? ARM::tRestore : ARM::tLDR;
}

View File

@ -27,12 +27,31 @@ class Thumb2InstrInfo : public ARMBaseInstrInfo {
public:
explicit Thumb2InstrInfo(const ARMSubtarget &STI);
// Return the non-pre/post incrementing version of 'Opc'. Return 0
// if there is not such an opcode.
unsigned getUnindexedOpcode(unsigned Opc) const;
// Return the opcode that implements 'Op', or 0 if no opcode
unsigned getOpcode(ARMII::Op Op) const;
// Return true if the block does not fall through.
bool BlockHasNoFallThrough(const MachineBasicBlock &MBB) const;
/// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
/// such, whenever a client has an instance of instruction info, it should
/// always be able to get register info as well (through this method).
///
const Thumb2RegisterInfo &getRegisterInfo() const { return RI; }
bool copyRegToReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
unsigned DestReg, unsigned SrcReg,
const TargetRegisterClass *DestRC,
const TargetRegisterClass *SrcRC) const;
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI) const;
@ -48,11 +67,6 @@ public:
unsigned isStoreToStackSlot(const MachineInstr *MI,
int &FrameIndex) const;
bool copyRegToReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
unsigned DestReg, unsigned SrcReg,
const TargetRegisterClass *DestRC,
const TargetRegisterClass *SrcRC) const;
void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned SrcReg, bool isKill, int FrameIndex,
@ -80,13 +94,15 @@ public:
MachineInstr* MI,
const SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr* MI,
const SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}
};
}

View File

@ -1,3 +1,5 @@
; XFAIL: *
; fixme
; RUN: llvm-as < %s | llc -march=thumb -mattr=+thumb2 | grep mov | count 3
; RUN: llvm-as < %s | llc -march=thumb -mattr=+thumb2 | grep mvn | count 1