1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 03:02:36 +01:00
llvm-mirror/lib/CodeGen/GlobalISel/CombinerHelper.cpp
Daniel Sanders f380a26222 [globalisel][combiner] Make the CombinerChangeObserver a MachineFunction::Delegate
Summary:
This allows us to register it with the MachineFunction delegate and be
notified automatically about erasure and creation of instructions. However,
we still need explicit notification for modifications such as those caused
by setReg() or replaceRegWith().

There is a catch with this though. The notification for creation is
delivered before any operands can be added. While appropriate for
scheduling combiner work. This is unfortunate for debug output since an
opcode by itself doesn't provide sufficient information on what happened.
As a result, the work list remembers the instructions (when debug output is
requested) and emits a more complete dump later.

Another nit is that the MachineFunction::Delegate provides const pointers
which is inconvenient since we want to use it to schedule future
modification. To resolve this GISelWorkList now has an optional pointer to
the MachineFunction which describes the scope of the work it is permitted
to schedule. If a given MachineInstr* is in this function then it is
permitted to schedule work to be performed on the MachineInstr's. An
alternative to this would be to remove the const from the
MachineFunction::Delegate interface, however delegates are not permitted
to modify the MachineInstr's they receive.

In addition to this, the observer has three interface changes.
* erasedInstr() is now erasingInstr() to indicate it is about to be erased
  but still exists at the moment.
* changingInstr() and changedInstr() have been added to report changes
  before and after they are made. This allows us to trace the changes
  in the debug output.
* As a convenience changingAllUsesOfReg() and
  finishedChangingAllUsesOfReg() will report changingInstr() and
  changedInstr() for each use of a given register. This is primarily useful
  for changes caused by MachineRegisterInfo::replaceRegWith()

With this in place, both combine rules have been updated to report their
changes to the observer.

Finally, make some cosmetic changes to the debug output and make Combiner
and CombinerHelp

Reviewers: aditya_nandakumar, bogner, volkan, rtereshin, javed.absar

Reviewed By: aditya_nandakumar

Subscribers: mgorny, rovka, kristof.beyls, llvm-commits

Differential Revision: https://reviews.llvm.org/D52947

llvm-svn: 349167
2018-12-14 17:50:14 +00:00

336 lines
14 KiB
C++

//===-- lib/CodeGen/GlobalISel/GICombinerHelper.cpp -----------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
#include "llvm/CodeGen/GlobalISel/Combiner.h"
#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/GlobalISel/Utils.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#define DEBUG_TYPE "gi-combiner"
using namespace llvm;
CombinerHelper::CombinerHelper(GISelChangeObserver &Observer,
MachineIRBuilder &B)
: Builder(B), MRI(Builder.getMF().getRegInfo()), Observer(Observer) {}
void CombinerHelper::replaceRegWith(MachineRegisterInfo &MRI, unsigned FromReg,
unsigned ToReg) const {
Observer.changingAllUsesOfReg(MRI, FromReg);
if (MRI.constrainRegAttrs(ToReg, FromReg))
MRI.replaceRegWith(FromReg, ToReg);
else
Builder.buildCopy(ToReg, FromReg);
Observer.finishedChangingAllUsesOfReg();
}
void CombinerHelper::replaceRegOpWith(MachineRegisterInfo &MRI,
MachineOperand &FromRegOp,
unsigned ToReg) const {
assert(FromRegOp.getParent() && "Expected an operand in an MI");
Observer.changingInstr(*FromRegOp.getParent());
FromRegOp.setReg(ToReg);
Observer.changedInstr(*FromRegOp.getParent());
}
bool CombinerHelper::tryCombineCopy(MachineInstr &MI) {
if (MI.getOpcode() != TargetOpcode::COPY)
return false;
unsigned DstReg = MI.getOperand(0).getReg();
unsigned SrcReg = MI.getOperand(1).getReg();
LLT DstTy = MRI.getType(DstReg);
LLT SrcTy = MRI.getType(SrcReg);
// Simple Copy Propagation.
// a(sx) = COPY b(sx) -> Replace all uses of a with b.
if (DstTy.isValid() && SrcTy.isValid() && DstTy == SrcTy) {
MI.eraseFromParent();
replaceRegWith(MRI, DstReg, SrcReg);
return true;
}
return false;
}
namespace {
struct PreferredTuple {
LLT Ty; // The result type of the extend.
unsigned ExtendOpcode; // G_ANYEXT/G_SEXT/G_ZEXT
MachineInstr *MI;
};
/// Select a preference between two uses. CurrentUse is the current preference
/// while *ForCandidate is attributes of the candidate under consideration.
PreferredTuple ChoosePreferredUse(PreferredTuple &CurrentUse,
const LLT &TyForCandidate,
unsigned OpcodeForCandidate,
MachineInstr *MIForCandidate) {
if (!CurrentUse.Ty.isValid()) {
if (CurrentUse.ExtendOpcode == OpcodeForCandidate ||
CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT)
return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
return CurrentUse;
}
// We permit the extend to hoist through basic blocks but this is only
// sensible if the target has extending loads. If you end up lowering back
// into a load and extend during the legalizer then the end result is
// hoisting the extend up to the load.
// Prefer defined extensions to undefined extensions as these are more
// likely to reduce the number of instructions.
if (OpcodeForCandidate == TargetOpcode::G_ANYEXT &&
CurrentUse.ExtendOpcode != TargetOpcode::G_ANYEXT)
return CurrentUse;
else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT &&
OpcodeForCandidate != TargetOpcode::G_ANYEXT)
return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
// Prefer sign extensions to zero extensions as sign-extensions tend to be
// more expensive.
if (CurrentUse.Ty == TyForCandidate) {
if (CurrentUse.ExtendOpcode == TargetOpcode::G_SEXT &&
OpcodeForCandidate == TargetOpcode::G_ZEXT)
return CurrentUse;
else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ZEXT &&
OpcodeForCandidate == TargetOpcode::G_SEXT)
return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
}
// This is potentially target specific. We've chosen the largest type
// because G_TRUNC is usually free. One potential catch with this is that
// some targets have a reduced number of larger registers than smaller
// registers and this choice potentially increases the live-range for the
// larger value.
if (TyForCandidate.getSizeInBits() > CurrentUse.Ty.getSizeInBits()) {
return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
}
return CurrentUse;
}
/// Find a suitable place to insert some instructions and insert them. This
/// function accounts for special cases like inserting before a PHI node.
/// The current strategy for inserting before PHI's is to duplicate the
/// instructions for each predecessor. However, while that's ok for G_TRUNC
/// on most targets since it generally requires no code, other targets/cases may
/// want to try harder to find a dominating block.
static void InsertInsnsWithoutSideEffectsBeforeUse(
MachineIRBuilder &Builder, MachineInstr &DefMI, MachineOperand &UseMO,
std::function<void(MachineBasicBlock *, MachineBasicBlock::iterator)>
Inserter) {
MachineInstr &UseMI = *UseMO.getParent();
MachineBasicBlock *InsertBB = UseMI.getParent();
// If the use is a PHI then we want the predecessor block instead.
if (UseMI.isPHI()) {
MachineOperand *PredBB = std::next(&UseMO);
InsertBB = PredBB->getMBB();
}
// If the block is the same block as the def then we want to insert just after
// the def instead of at the start of the block.
if (InsertBB == DefMI.getParent()) {
MachineBasicBlock::iterator InsertPt = &DefMI;
Inserter(InsertBB, std::next(InsertPt));
return;
}
// Otherwise we want the start of the BB
Inserter(InsertBB, InsertBB->getFirstNonPHI());
}
} // end anonymous namespace
bool CombinerHelper::tryCombineExtendingLoads(MachineInstr &MI) {
struct InsertionPoint {
MachineOperand *UseMO;
MachineBasicBlock *InsertIntoBB;
MachineBasicBlock::iterator InsertBefore;
InsertionPoint(MachineOperand *UseMO, MachineBasicBlock *InsertIntoBB,
MachineBasicBlock::iterator InsertBefore)
: UseMO(UseMO), InsertIntoBB(InsertIntoBB), InsertBefore(InsertBefore) {
}
};
// We match the loads and follow the uses to the extend instead of matching
// the extends and following the def to the load. This is because the load
// must remain in the same position for correctness (unless we also add code
// to find a safe place to sink it) whereas the extend is freely movable.
// It also prevents us from duplicating the load for the volatile case or just
// for performance.
if (MI.getOpcode() != TargetOpcode::G_LOAD &&
MI.getOpcode() != TargetOpcode::G_SEXTLOAD &&
MI.getOpcode() != TargetOpcode::G_ZEXTLOAD)
return false;
auto &LoadValue = MI.getOperand(0);
assert(LoadValue.isReg() && "Result wasn't a register?");
LLT LoadValueTy = MRI.getType(LoadValue.getReg());
if (!LoadValueTy.isScalar())
return false;
// Find the preferred type aside from the any-extends (unless it's the only
// one) and non-extending ops. We'll emit an extending load to that type and
// and emit a variant of (extend (trunc X)) for the others according to the
// relative type sizes. At the same time, pick an extend to use based on the
// extend involved in the chosen type.
unsigned PreferredOpcode = MI.getOpcode() == TargetOpcode::G_LOAD
? TargetOpcode::G_ANYEXT
: MI.getOpcode() == TargetOpcode::G_SEXTLOAD
? TargetOpcode::G_SEXT
: TargetOpcode::G_ZEXT;
PreferredTuple Preferred = {LLT(), PreferredOpcode, nullptr};
for (auto &UseMI : MRI.use_instructions(LoadValue.getReg())) {
if (UseMI.getOpcode() == TargetOpcode::G_SEXT ||
UseMI.getOpcode() == TargetOpcode::G_ZEXT ||
UseMI.getOpcode() == TargetOpcode::G_ANYEXT) {
Preferred = ChoosePreferredUse(Preferred,
MRI.getType(UseMI.getOperand(0).getReg()),
UseMI.getOpcode(), &UseMI);
}
}
// There were no extends
if (!Preferred.MI)
return false;
// It should be impossible to chose an extend without selecting a different
// type since by definition the result of an extend is larger.
assert(Preferred.Ty != LoadValueTy && "Extending to same type?");
LLVM_DEBUG(dbgs() << "Preferred use is: " << *Preferred.MI);
// Rewrite the load to the chosen extending load.
unsigned ChosenDstReg = Preferred.MI->getOperand(0).getReg();
Observer.changingInstr(MI);
MI.setDesc(
Builder.getTII().get(Preferred.ExtendOpcode == TargetOpcode::G_SEXT
? TargetOpcode::G_SEXTLOAD
: Preferred.ExtendOpcode == TargetOpcode::G_ZEXT
? TargetOpcode::G_ZEXTLOAD
: TargetOpcode::G_LOAD));
// Rewrite all the uses to fix up the types.
SmallVector<MachineInstr *, 1> ScheduleForErase;
SmallVector<InsertionPoint, 4> ScheduleForInsert;
for (auto &UseMO : MRI.use_operands(LoadValue.getReg())) {
MachineInstr *UseMI = UseMO.getParent();
// If the extend is compatible with the preferred extend then we should fix
// up the type and extend so that it uses the preferred use.
if (UseMI->getOpcode() == Preferred.ExtendOpcode ||
UseMI->getOpcode() == TargetOpcode::G_ANYEXT) {
unsigned UseDstReg = UseMI->getOperand(0).getReg();
MachineOperand &UseSrcMO = UseMI->getOperand(1);
const LLT &UseDstTy = MRI.getType(UseDstReg);
if (UseDstReg != ChosenDstReg) {
if (Preferred.Ty == UseDstTy) {
// If the use has the same type as the preferred use, then merge
// the vregs and erase the extend. For example:
// %1:_(s8) = G_LOAD ...
// %2:_(s32) = G_SEXT %1(s8)
// %3:_(s32) = G_ANYEXT %1(s8)
// ... = ... %3(s32)
// rewrites to:
// %2:_(s32) = G_SEXTLOAD ...
// ... = ... %2(s32)
replaceRegWith(MRI, UseDstReg, ChosenDstReg);
ScheduleForErase.push_back(UseMO.getParent());
} else if (Preferred.Ty.getSizeInBits() < UseDstTy.getSizeInBits()) {
// If the preferred size is smaller, then keep the extend but extend
// from the result of the extending load. For example:
// %1:_(s8) = G_LOAD ...
// %2:_(s32) = G_SEXT %1(s8)
// %3:_(s64) = G_ANYEXT %1(s8)
// ... = ... %3(s64)
/// rewrites to:
// %2:_(s32) = G_SEXTLOAD ...
// %3:_(s64) = G_ANYEXT %2:_(s32)
// ... = ... %3(s64)
replaceRegOpWith(MRI, UseSrcMO, ChosenDstReg);
} else {
// If the preferred size is large, then insert a truncate. For
// example:
// %1:_(s8) = G_LOAD ...
// %2:_(s64) = G_SEXT %1(s8)
// %3:_(s32) = G_ZEXT %1(s8)
// ... = ... %3(s32)
/// rewrites to:
// %2:_(s64) = G_SEXTLOAD ...
// %4:_(s8) = G_TRUNC %2:_(s32)
// %3:_(s64) = G_ZEXT %2:_(s8)
// ... = ... %3(s64)
InsertInsnsWithoutSideEffectsBeforeUse(
Builder, MI, UseMO,
[&](MachineBasicBlock *InsertIntoBB,
MachineBasicBlock::iterator InsertBefore) {
ScheduleForInsert.emplace_back(&UseMO, InsertIntoBB, InsertBefore);
});
}
continue;
}
// The use is (one of) the uses of the preferred use we chose earlier.
// We're going to update the load to def this value later so just erase
// the old extend.
ScheduleForErase.push_back(UseMO.getParent());
continue;
}
// The use isn't an extend. Truncate back to the type we originally loaded.
// This is free on many targets.
InsertInsnsWithoutSideEffectsBeforeUse(
Builder, MI, UseMO,
[&](MachineBasicBlock *InsertIntoBB,
MachineBasicBlock::iterator InsertBefore) {
ScheduleForInsert.emplace_back(&UseMO, InsertIntoBB, InsertBefore);
});
}
DenseMap<MachineBasicBlock *, MachineInstr *> EmittedInsns;
for (auto &InsertionInfo : ScheduleForInsert) {
MachineOperand *UseMO = InsertionInfo.UseMO;
MachineBasicBlock *InsertIntoBB = InsertionInfo.InsertIntoBB;
MachineBasicBlock::iterator InsertBefore = InsertionInfo.InsertBefore;
MachineInstr *PreviouslyEmitted = EmittedInsns.lookup(InsertIntoBB);
if (PreviouslyEmitted) {
Observer.changingInstr(*UseMO->getParent());
UseMO->setReg(PreviouslyEmitted->getOperand(0).getReg());
Observer.changedInstr(*UseMO->getParent());
continue;
}
Builder.setInsertPt(*InsertIntoBB, InsertBefore);
unsigned NewDstReg = MRI.cloneVirtualRegister(MI.getOperand(0).getReg());
MachineInstr *NewMI = Builder.buildTrunc(NewDstReg, ChosenDstReg);
EmittedInsns[InsertIntoBB] = NewMI;
replaceRegOpWith(MRI, *UseMO, NewDstReg);
}
for (auto &EraseMI : ScheduleForErase) {
Observer.erasingInstr(*EraseMI);
EraseMI->eraseFromParent();
}
MI.getOperand(0).setReg(ChosenDstReg);
Observer.changedInstr(MI);
return true;
}
bool CombinerHelper::tryCombine(MachineInstr &MI) {
if (tryCombineCopy(MI))
return true;
return tryCombineExtendingLoads(MI);
}