1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-24 11:42:57 +01:00

Move the search for the appropriate AND instruction

into OptimizeCompareInstr.
This necessitates the passing of CmpValue around,
so widen the virtual functions to accomodate.

No functionality changes.

llvm-svn: 114428
This commit is contained in:
Gabor Greif 2010-09-21 12:01:15 +00:00
parent 2294629636
commit 99c07b1d95
4 changed files with 50 additions and 26 deletions

View File

@ -581,7 +581,7 @@ public:
/// in SrcReg and the value it compares against in CmpValue. Return true if
/// the comparison instruction can be analyzed.
virtual bool AnalyzeCompare(const MachineInstr *MI,
unsigned &SrcReg, int &CmpValue) const {
unsigned &SrcReg, int &Mask, int &Value) const {
return false;
}
@ -589,8 +589,8 @@ public:
/// into something more efficient. E.g., on ARM most instructions can set the
/// flags register, obviating the need for a separate CMP. Update the iterator
/// *only* if a transformation took place.
virtual bool OptimizeCompareInstr(MachineInstr * /*CmpInstr*/,
unsigned /*SrcReg*/, int /*CmpValue*/,
virtual bool OptimizeCompareInstr(MachineInstr *CmpInstr,
unsigned SrcReg, int Mask, int Value,
MachineBasicBlock::iterator &) const {
return false;
}

View File

@ -238,13 +238,13 @@ bool PeepholeOptimizer::OptimizeCmpInstr(MachineInstr *MI,
// If this instruction is a comparison against zero and isn't comparing a
// physical register, we can try to optimize it.
unsigned SrcReg;
int CmpValue;
if (!TII->AnalyzeCompare(MI, SrcReg, CmpValue) ||
int CmpMask, CmpValue;
if (!TII->AnalyzeCompare(MI, SrcReg, CmpMask, CmpValue) ||
TargetRegisterInfo::isPhysicalRegister(SrcReg))
return false;
// Attempt to optimize the comparison instruction.
if (TII->OptimizeCompareInstr(MI, SrcReg, CmpValue, NextIter)) {
if (TII->OptimizeCompareInstr(MI, SrcReg, CmpMask, CmpValue, NextIter)) {
++NumEliminated;
return true;
}

View File

@ -1376,7 +1376,7 @@ bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
}
bool ARMBaseInstrInfo::
AnalyzeCompare(const MachineInstr *MI, unsigned &SrcReg, int &CmpValue) const {
AnalyzeCompare(const MachineInstr *MI, unsigned &SrcReg, int &CmpMask, int &CmpValue) const {
switch (MI->getOpcode()) {
default: break;
case ARM::CMPri:
@ -1384,23 +1384,29 @@ AnalyzeCompare(const MachineInstr *MI, unsigned &SrcReg, int &CmpValue) const {
case ARM::t2CMPri:
case ARM::t2CMPzri:
SrcReg = MI->getOperand(0).getReg();
CmpMask = ~0;
CmpValue = MI->getOperand(1).getImm();
return true;
case ARM::TSTri: {
MachineBasicBlock::const_iterator MII(MI);
if (MI->getParent()->begin() == MII)
return false;
const MachineInstr *AND = llvm::prior(MII);
if (AND->getOpcode() != ARM::ANDri)
return false;
if (MI->getOperand(0).getReg() == AND->getOperand(1).getReg() &&
MI->getOperand(1).getImm() == AND->getOperand(2).getImm()) {
SrcReg = AND->getOperand(0).getReg();
CmpValue = 0;
return true;
}
}
break;
case ARM::TSTri:
case ARM::t2TSTri:
SrcReg = MI->getOperand(0).getReg();
CmpMask = MI->getOperand(1).getImm();
CmpValue = 0;
return true;
}
return false;
}
static bool isSuitableForMask(const MachineInstr &MI, unsigned SrcReg,
int CmpMask) {
switch (MI.getOpcode()) {
case ARM::ANDri:
case ARM::t2ANDri:
if (SrcReg == MI.getOperand(1).getReg() &&
CmpMask == MI.getOperand(2).getImm())
return true;
break;
}
return false;
@ -1410,8 +1416,8 @@ AnalyzeCompare(const MachineInstr *MI, unsigned &SrcReg, int &CmpValue) const {
/// comparison into one that sets the zero bit in the flags register. Update the
/// iterator *only* if a transformation took place.
bool ARMBaseInstrInfo::
OptimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, int CmpValue,
MachineBasicBlock::iterator &MII) const {
OptimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, int CmpMask,
int CmpValue, MachineBasicBlock::iterator &MII) const {
if (CmpValue != 0)
return false;
@ -1423,6 +1429,24 @@ OptimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, int CmpValue,
MachineInstr *MI = &*DI;
// Masked compares sometimes use the same register as the corresponding 'and'.
if (CmpMask != ~0) {
if (!isSuitableForMask(*MI, SrcReg, CmpMask)) {
MI = 0;
for (MachineRegisterInfo::use_iterator UI = MRI.use_begin(SrcReg),
UE = MRI.use_end(); UI != UE; ++UI) {
if (UI->getParent() != CmpInstr->getParent()) continue;
MachineInstr &PotentialAND = *UI;
if (!isSuitableForMask(PotentialAND, SrcReg, CmpMask))
continue;
SrcReg = PotentialAND.getOperand(0).getReg();
MI = &PotentialAND;
break;
}
if (!MI) return false;
}
}
// Conservatively refuse to convert an instruction which isn't in the same BB
// as the comparison.
if (MI->getParent() != CmpInstr->getParent())

View File

@ -326,12 +326,12 @@ public:
/// in SrcReg and the value it compares against in CmpValue. Return true if
/// the comparison instruction can be analyzed.
virtual bool AnalyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
int &CmpValue) const;
int &CmpMask, int &CmpValue) const;
/// OptimizeCompareInstr - Convert the instruction to set the zero flag so
/// that we can remove a "comparison with zero".
virtual bool OptimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg,
int CmpValue,
int CmpMask, int CmpValue,
MachineBasicBlock::iterator &MII) const;
virtual unsigned getNumMicroOps(const MachineInstr *MI,