mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-01-31 20:51:52 +01:00
GlobalISel: Remove unsigned variant of SrcOp
Force using Register. One downside is the generated register enums require explicit conversion. llvm-svn: 364194
This commit is contained in:
parent
0738f328d7
commit
ae5999e106
@ -145,7 +145,7 @@ public:
|
||||
|
||||
bool needsCustom() const { return isCustom; }
|
||||
|
||||
unsigned getLocReg() const { assert(isRegLoc()); return Loc; }
|
||||
Register getLocReg() const { assert(isRegLoc()); return Loc; }
|
||||
unsigned getLocMemOffset() const { assert(isMemLoc()); return Loc; }
|
||||
unsigned getExtraInfo() const { return Loc; }
|
||||
MVT getLocVT() const { return LocVT; }
|
||||
|
@ -42,7 +42,7 @@ class CallLowering {
|
||||
virtual void anchor();
|
||||
public:
|
||||
struct ArgInfo {
|
||||
unsigned Reg;
|
||||
Register Reg;
|
||||
Type *Ty;
|
||||
ISD::ArgFlagsTy Flags;
|
||||
bool IsFixed;
|
||||
@ -77,19 +77,19 @@ public:
|
||||
/// direct SP manipulation, depending on the context. \p MPO
|
||||
/// should be initialized to an appropriate description of the
|
||||
/// address created.
|
||||
virtual unsigned getStackAddress(uint64_t Size, int64_t Offset,
|
||||
virtual Register getStackAddress(uint64_t Size, int64_t Offset,
|
||||
MachinePointerInfo &MPO) = 0;
|
||||
|
||||
/// The specified value has been assigned to a physical register,
|
||||
/// handle the appropriate COPY (either to or from) and mark any
|
||||
/// relevant uses/defines as needed.
|
||||
virtual void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
|
||||
virtual void assignValueToReg(Register ValVReg, Register PhysReg,
|
||||
CCValAssign &VA) = 0;
|
||||
|
||||
/// The specified value has been assigned to a stack
|
||||
/// location. Load or store it there, with appropriate extension
|
||||
/// if necessary.
|
||||
virtual void assignValueToAddress(unsigned ValVReg, unsigned Addr,
|
||||
virtual void assignValueToAddress(Register ValVReg, Register Addr,
|
||||
uint64_t Size, MachinePointerInfo &MPO,
|
||||
CCValAssign &VA) = 0;
|
||||
|
||||
@ -104,7 +104,7 @@ public:
|
||||
llvm_unreachable("Custom values not supported");
|
||||
}
|
||||
|
||||
unsigned extendRegister(unsigned ValReg, CCValAssign &VA);
|
||||
Register extendRegister(Register ValReg, CCValAssign &VA);
|
||||
|
||||
virtual bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT,
|
||||
CCValAssign::LocInfo LocInfo, const ArgInfo &Info,
|
||||
|
@ -18,6 +18,7 @@
|
||||
#define LLVM_CODEGEN_GLOBALISEL_COMBINER_HELPER_H
|
||||
|
||||
#include "llvm/CodeGen/LowLevelType.h"
|
||||
#include "llvm/CodeGen/Register.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
@ -42,12 +43,12 @@ public:
|
||||
CombinerHelper(GISelChangeObserver &Observer, MachineIRBuilder &B);
|
||||
|
||||
/// MachineRegisterInfo::replaceRegWith() and inform the observer of the changes
|
||||
void replaceRegWith(MachineRegisterInfo &MRI, unsigned FromReg, unsigned ToReg) const;
|
||||
void replaceRegWith(MachineRegisterInfo &MRI, Register FromReg, Register ToReg) const;
|
||||
|
||||
/// Replace a single register operand with a new register and inform the
|
||||
/// observer of the changes.
|
||||
void replaceRegOpWith(MachineRegisterInfo &MRI, MachineOperand &FromRegOp,
|
||||
unsigned ToReg) const;
|
||||
Register ToReg) const;
|
||||
|
||||
/// If \p MI is COPY, try to combine it.
|
||||
/// Returns true if MI changed.
|
||||
|
@ -200,7 +200,7 @@ private:
|
||||
/// the function.
|
||||
///
|
||||
/// \return true if the materialization succeeded.
|
||||
bool translate(const Constant &C, unsigned Reg);
|
||||
bool translate(const Constant &C, Register Reg);
|
||||
|
||||
/// Translate an LLVM bitcast into generic IR. Either a COPY or a G_BITCAST is
|
||||
/// emitted.
|
||||
@ -216,7 +216,7 @@ private:
|
||||
bool translateMemfunc(const CallInst &CI, MachineIRBuilder &MIRBuilder,
|
||||
unsigned ID);
|
||||
|
||||
void getStackGuard(unsigned DstReg, MachineIRBuilder &MIRBuilder);
|
||||
void getStackGuard(Register DstReg, MachineIRBuilder &MIRBuilder);
|
||||
|
||||
bool translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
|
||||
MachineIRBuilder &MIRBuilder);
|
||||
@ -241,9 +241,9 @@ private:
|
||||
// until it is refactored.
|
||||
/// Combines all component registers of \p V into a single scalar with size
|
||||
/// "max(Offsets) + last size".
|
||||
unsigned packRegs(const Value &V, MachineIRBuilder &MIRBuilder);
|
||||
Register packRegs(const Value &V, MachineIRBuilder &MIRBuilder);
|
||||
|
||||
void unpackRegs(const Value &V, unsigned Src, MachineIRBuilder &MIRBuilder);
|
||||
void unpackRegs(const Value &V, Register Src, MachineIRBuilder &MIRBuilder);
|
||||
|
||||
/// Returns true if the value should be split into multiple LLTs.
|
||||
/// If \p Offsets is given then the split type's offsets will be stored in it.
|
||||
|
@ -126,7 +126,6 @@ class SrcOp {
|
||||
|
||||
public:
|
||||
enum class SrcType { Ty_Reg, Ty_MIB, Ty_Predicate };
|
||||
SrcOp(unsigned R) : Reg(R), Ty(SrcType::Ty_Reg) {}
|
||||
SrcOp(Register R) : Reg(R), Ty(SrcType::Ty_Reg) {}
|
||||
SrcOp(const MachineOperand &Op) : Reg(Op.getReg()), Ty(SrcType::Ty_Reg) {}
|
||||
SrcOp(const MachineInstrBuilder &MIB) : SrcMIB(MIB), Ty(SrcType::Ty_MIB) {}
|
||||
@ -158,7 +157,7 @@ public:
|
||||
llvm_unreachable("Unrecognised SrcOp::SrcType enum");
|
||||
}
|
||||
|
||||
unsigned getReg() const {
|
||||
Register getReg() const {
|
||||
switch (Ty) {
|
||||
case SrcType::Ty_Predicate:
|
||||
llvm_unreachable("Not a register operand");
|
||||
|
@ -82,7 +82,7 @@ public:
|
||||
/// Get the register for the operand index.
|
||||
/// The operand at the index should be a register (asserted by
|
||||
/// MachineOperand).
|
||||
unsigned getReg(unsigned Idx) const { return MI->getOperand(Idx).getReg(); }
|
||||
Register getReg(unsigned Idx) const { return MI->getOperand(Idx).getReg(); }
|
||||
|
||||
/// Add a new virtual register operand.
|
||||
const MachineInstrBuilder &addReg(unsigned RegNo, unsigned flags = 0,
|
||||
|
@ -77,7 +77,7 @@ public:
|
||||
|
||||
/// Get or create the swifterror value virtual register in
|
||||
/// VRegDefMap for this basic block.
|
||||
unsigned getOrCreateVReg(const MachineBasicBlock *, const Value *);
|
||||
Register getOrCreateVReg(const MachineBasicBlock *, const Value *);
|
||||
|
||||
/// Set the swifterror virtual register in the VRegDefMap for this
|
||||
/// basic block.
|
||||
@ -85,12 +85,12 @@ public:
|
||||
|
||||
/// Get or create the swifterror value virtual register for a def of a
|
||||
/// swifterror by an instruction.
|
||||
unsigned getOrCreateVRegDefAt(const Instruction *, const MachineBasicBlock *,
|
||||
Register getOrCreateVRegDefAt(const Instruction *, const MachineBasicBlock *,
|
||||
const Value *);
|
||||
|
||||
/// Get or create the swifterror value virtual register for a use of a
|
||||
/// swifterror by an instruction.
|
||||
unsigned getOrCreateVRegUseAt(const Instruction *, const MachineBasicBlock *,
|
||||
Register getOrCreateVRegUseAt(const Instruction *, const MachineBasicBlock *,
|
||||
const Value *);
|
||||
|
||||
/// Create initial definitions of swifterror values in the entry block of the
|
||||
|
@ -195,7 +195,7 @@ bool CallLowering::handleAssignments(MachineIRBuilder &MIRBuilder,
|
||||
return true;
|
||||
}
|
||||
|
||||
unsigned CallLowering::ValueHandler::extendRegister(unsigned ValReg,
|
||||
Register CallLowering::ValueHandler::extendRegister(Register ValReg,
|
||||
CCValAssign &VA) {
|
||||
LLT LocTy{VA.getLocVT()};
|
||||
if (LocTy.getSizeInBits() == MRI.getType(ValReg).getSizeInBits())
|
||||
|
@ -22,8 +22,8 @@ CombinerHelper::CombinerHelper(GISelChangeObserver &Observer,
|
||||
MachineIRBuilder &B)
|
||||
: Builder(B), MRI(Builder.getMF().getRegInfo()), Observer(Observer) {}
|
||||
|
||||
void CombinerHelper::replaceRegWith(MachineRegisterInfo &MRI, unsigned FromReg,
|
||||
unsigned ToReg) const {
|
||||
void CombinerHelper::replaceRegWith(MachineRegisterInfo &MRI, Register FromReg,
|
||||
Register ToReg) const {
|
||||
Observer.changingAllUsesOfReg(MRI, FromReg);
|
||||
|
||||
if (MRI.constrainRegAttrs(ToReg, FromReg))
|
||||
@ -36,7 +36,7 @@ void CombinerHelper::replaceRegWith(MachineRegisterInfo &MRI, unsigned FromReg,
|
||||
|
||||
void CombinerHelper::replaceRegOpWith(MachineRegisterInfo &MRI,
|
||||
MachineOperand &FromRegOp,
|
||||
unsigned ToReg) const {
|
||||
Register ToReg) const {
|
||||
assert(FromRegOp.getParent() && "Expected an operand in an MI");
|
||||
Observer.changingInstr(*FromRegOp.getParent());
|
||||
|
||||
@ -235,7 +235,7 @@ bool CombinerHelper::matchCombineExtendingLoads(MachineInstr &MI,
|
||||
void CombinerHelper::applyCombineExtendingLoads(MachineInstr &MI,
|
||||
PreferredTuple &Preferred) {
|
||||
// Rewrite the load to the chosen extending load.
|
||||
unsigned ChosenDstReg = Preferred.MI->getOperand(0).getReg();
|
||||
Register ChosenDstReg = Preferred.MI->getOperand(0).getReg();
|
||||
|
||||
// Inserter to insert a truncate back to the original type at a given point
|
||||
// with some basic CSE to limit truncate duplication to one per BB.
|
||||
@ -252,7 +252,7 @@ void CombinerHelper::applyCombineExtendingLoads(MachineInstr &MI,
|
||||
}
|
||||
|
||||
Builder.setInsertPt(*InsertIntoBB, InsertBefore);
|
||||
unsigned NewDstReg = MRI.cloneVirtualRegister(MI.getOperand(0).getReg());
|
||||
Register NewDstReg = MRI.cloneVirtualRegister(MI.getOperand(0).getReg());
|
||||
MachineInstr *NewMI = Builder.buildTrunc(NewDstReg, ChosenDstReg);
|
||||
EmittedInsns[InsertIntoBB] = NewMI;
|
||||
replaceRegOpWith(MRI, UseMO, NewDstReg);
|
||||
|
@ -289,9 +289,9 @@ bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
|
||||
// Unless the value is a Constant => loadimm cst?
|
||||
// or inline constant each time?
|
||||
// Creation of a virtual register needs to have a size.
|
||||
unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
|
||||
unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
|
||||
unsigned Res = getOrCreateVReg(U);
|
||||
Register Op0 = getOrCreateVReg(*U.getOperand(0));
|
||||
Register Op1 = getOrCreateVReg(*U.getOperand(1));
|
||||
Register Res = getOrCreateVReg(U);
|
||||
uint16_t Flags = 0;
|
||||
if (isa<Instruction>(U)) {
|
||||
const Instruction &I = cast<Instruction>(U);
|
||||
@ -306,8 +306,8 @@ bool IRTranslator::translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
|
||||
// -0.0 - X --> G_FNEG
|
||||
if (isa<Constant>(U.getOperand(0)) &&
|
||||
U.getOperand(0) == ConstantFP::getZeroValueForNegation(U.getType())) {
|
||||
unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
|
||||
unsigned Res = getOrCreateVReg(U);
|
||||
Register Op1 = getOrCreateVReg(*U.getOperand(1));
|
||||
Register Res = getOrCreateVReg(U);
|
||||
uint16_t Flags = 0;
|
||||
if (isa<Instruction>(U)) {
|
||||
const Instruction &I = cast<Instruction>(U);
|
||||
@ -321,8 +321,8 @@ bool IRTranslator::translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
|
||||
}
|
||||
|
||||
bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {
|
||||
unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
|
||||
unsigned Res = getOrCreateVReg(U);
|
||||
Register Op0 = getOrCreateVReg(*U.getOperand(0));
|
||||
Register Res = getOrCreateVReg(U);
|
||||
uint16_t Flags = 0;
|
||||
if (isa<Instruction>(U)) {
|
||||
const Instruction &I = cast<Instruction>(U);
|
||||
@ -335,9 +335,9 @@ bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {
|
||||
bool IRTranslator::translateCompare(const User &U,
|
||||
MachineIRBuilder &MIRBuilder) {
|
||||
const CmpInst *CI = dyn_cast<CmpInst>(&U);
|
||||
unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
|
||||
unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
|
||||
unsigned Res = getOrCreateVReg(U);
|
||||
Register Op0 = getOrCreateVReg(*U.getOperand(0));
|
||||
Register Op1 = getOrCreateVReg(*U.getOperand(1));
|
||||
Register Res = getOrCreateVReg(U);
|
||||
CmpInst::Predicate Pred =
|
||||
CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>(
|
||||
cast<ConstantExpr>(U).getPredicate());
|
||||
@ -384,7 +384,7 @@ bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
|
||||
unsigned Succ = 0;
|
||||
if (!BrInst.isUnconditional()) {
|
||||
// We want a G_BRCOND to the true BB followed by an unconditional branch.
|
||||
unsigned Tst = getOrCreateVReg(*BrInst.getCondition());
|
||||
Register Tst = getOrCreateVReg(*BrInst.getCondition());
|
||||
const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++));
|
||||
MachineBasicBlock &TrueBB = getMBB(TrueTgt);
|
||||
MIRBuilder.buildBrCond(Tst, TrueBB);
|
||||
@ -526,7 +526,7 @@ bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable &JT,
|
||||
const Value &SValue = *JTH.SValue;
|
||||
// Subtract the lowest switch case value from the value being switched on.
|
||||
const LLT SwitchTy = getLLTForType(*SValue.getType(), *DL);
|
||||
unsigned SwitchOpReg = getOrCreateVReg(SValue);
|
||||
Register SwitchOpReg = getOrCreateVReg(SValue);
|
||||
auto FirstCst = MIB.buildConstant(SwitchTy, JTH.First);
|
||||
auto Sub = MIB.buildSub({SwitchTy}, SwitchOpReg, FirstCst);
|
||||
|
||||
@ -563,8 +563,8 @@ bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable &JT,
|
||||
void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB,
|
||||
MachineBasicBlock *SwitchBB,
|
||||
MachineIRBuilder &MIB) {
|
||||
unsigned CondLHS = getOrCreateVReg(*CB.CmpLHS);
|
||||
unsigned Cond = 0;
|
||||
Register CondLHS = getOrCreateVReg(*CB.CmpLHS);
|
||||
Register Cond;
|
||||
DebugLoc OldDbgLoc = MIB.getDebugLoc();
|
||||
MIB.setDebugLoc(CB.DbgLoc);
|
||||
MIB.setMBB(*CB.ThisBB);
|
||||
@ -584,7 +584,7 @@ void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB,
|
||||
const LLT i1Ty = LLT::scalar(1);
|
||||
// Build the compare.
|
||||
if (!CB.CmpMHS) {
|
||||
unsigned CondRHS = getOrCreateVReg(*CB.CmpRHS);
|
||||
Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
|
||||
Cond = MIB.buildICmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0);
|
||||
} else {
|
||||
assert(CB.PredInfo.Pred == CmpInst::ICMP_ULE &&
|
||||
@ -593,9 +593,9 @@ void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB,
|
||||
const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
|
||||
const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
|
||||
|
||||
unsigned CmpOpReg = getOrCreateVReg(*CB.CmpMHS);
|
||||
Register CmpOpReg = getOrCreateVReg(*CB.CmpMHS);
|
||||
if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
|
||||
unsigned CondRHS = getOrCreateVReg(*CB.CmpRHS);
|
||||
Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
|
||||
Cond =
|
||||
MIB.buildICmp(CmpInst::ICMP_ULE, i1Ty, CmpOpReg, CondRHS).getReg(0);
|
||||
} else {
|
||||
@ -829,7 +829,7 @@ bool IRTranslator::translateIndirectBr(const User &U,
|
||||
MachineIRBuilder &MIRBuilder) {
|
||||
const IndirectBrInst &BrInst = cast<IndirectBrInst>(U);
|
||||
|
||||
const unsigned Tgt = getOrCreateVReg(*BrInst.getAddress());
|
||||
const Register Tgt = getOrCreateVReg(*BrInst.getAddress());
|
||||
MIRBuilder.buildBrIndirect(Tgt);
|
||||
|
||||
// Link successors.
|
||||
@ -860,14 +860,14 @@ bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
|
||||
|
||||
ArrayRef<Register> Regs = getOrCreateVRegs(LI);
|
||||
ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);
|
||||
unsigned Base = getOrCreateVReg(*LI.getPointerOperand());
|
||||
Register Base = getOrCreateVReg(*LI.getPointerOperand());
|
||||
|
||||
Type *OffsetIRTy = DL->getIntPtrType(LI.getPointerOperandType());
|
||||
LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
|
||||
|
||||
if (CLI->supportSwiftError() && isSwiftError(LI.getPointerOperand())) {
|
||||
assert(Regs.size() == 1 && "swifterror should be single pointer");
|
||||
unsigned VReg = SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.getMBB(),
|
||||
Register VReg = SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.getMBB(),
|
||||
LI.getPointerOperand());
|
||||
MIRBuilder.buildCopy(Regs[0], VReg);
|
||||
return true;
|
||||
@ -901,7 +901,7 @@ bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
|
||||
|
||||
ArrayRef<Register> Vals = getOrCreateVRegs(*SI.getValueOperand());
|
||||
ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());
|
||||
unsigned Base = getOrCreateVReg(*SI.getPointerOperand());
|
||||
Register Base = getOrCreateVReg(*SI.getPointerOperand());
|
||||
|
||||
Type *OffsetIRTy = DL->getIntPtrType(SI.getPointerOperandType());
|
||||
LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
|
||||
@ -909,7 +909,7 @@ bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
|
||||
if (CLI->supportSwiftError() && isSwiftError(SI.getPointerOperand())) {
|
||||
assert(Vals.size() == 1 && "swifterror should be single pointer");
|
||||
|
||||
unsigned VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.getMBB(),
|
||||
Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.getMBB(),
|
||||
SI.getPointerOperand());
|
||||
MIRBuilder.buildCopy(VReg, Vals[0]);
|
||||
return true;
|
||||
@ -991,7 +991,7 @@ bool IRTranslator::translateInsertValue(const User &U,
|
||||
|
||||
bool IRTranslator::translateSelect(const User &U,
|
||||
MachineIRBuilder &MIRBuilder) {
|
||||
unsigned Tst = getOrCreateVReg(*U.getOperand(0));
|
||||
Register Tst = getOrCreateVReg(*U.getOperand(0));
|
||||
ArrayRef<Register> ResRegs = getOrCreateVRegs(U);
|
||||
ArrayRef<Register> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
|
||||
ArrayRef<Register> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
|
||||
@ -1014,7 +1014,7 @@ bool IRTranslator::translateBitCast(const User &U,
|
||||
// If we're bitcasting to the source type, we can reuse the source vreg.
|
||||
if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
|
||||
getLLTForType(*U.getType(), *DL)) {
|
||||
unsigned SrcReg = getOrCreateVReg(*U.getOperand(0));
|
||||
Register SrcReg = getOrCreateVReg(*U.getOperand(0));
|
||||
auto &Regs = *VMap.getVRegs(U);
|
||||
// If we already assigned a vreg for this bitcast, we can't change that.
|
||||
// Emit a copy to satisfy the users we already emitted.
|
||||
@ -1031,8 +1031,8 @@ bool IRTranslator::translateBitCast(const User &U,
|
||||
|
||||
bool IRTranslator::translateCast(unsigned Opcode, const User &U,
|
||||
MachineIRBuilder &MIRBuilder) {
|
||||
unsigned Op = getOrCreateVReg(*U.getOperand(0));
|
||||
unsigned Res = getOrCreateVReg(U);
|
||||
Register Op = getOrCreateVReg(*U.getOperand(0));
|
||||
Register Res = getOrCreateVReg(U);
|
||||
MIRBuilder.buildInstr(Opcode, {Res}, {Op});
|
||||
return true;
|
||||
}
|
||||
@ -1044,7 +1044,7 @@ bool IRTranslator::translateGetElementPtr(const User &U,
|
||||
return false;
|
||||
|
||||
Value &Op0 = *U.getOperand(0);
|
||||
unsigned BaseReg = getOrCreateVReg(Op0);
|
||||
Register BaseReg = getOrCreateVReg(Op0);
|
||||
Type *PtrIRTy = Op0.getType();
|
||||
LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
|
||||
Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy);
|
||||
@ -1069,7 +1069,7 @@ bool IRTranslator::translateGetElementPtr(const User &U,
|
||||
}
|
||||
|
||||
if (Offset != 0) {
|
||||
unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
|
||||
Register NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
|
||||
LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
|
||||
auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset);
|
||||
MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetMIB.getReg(0));
|
||||
@ -1078,16 +1078,16 @@ bool IRTranslator::translateGetElementPtr(const User &U,
|
||||
Offset = 0;
|
||||
}
|
||||
|
||||
unsigned IdxReg = getOrCreateVReg(*Idx);
|
||||
Register IdxReg = getOrCreateVReg(*Idx);
|
||||
if (MRI->getType(IdxReg) != OffsetTy) {
|
||||
unsigned NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy);
|
||||
Register NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy);
|
||||
MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg);
|
||||
IdxReg = NewIdxReg;
|
||||
}
|
||||
|
||||
// N = N + Idx * ElementSize;
|
||||
// Avoid doing it for ElementSize of 1.
|
||||
unsigned GepOffsetReg;
|
||||
Register GepOffsetReg;
|
||||
if (ElementSize != 1) {
|
||||
GepOffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
|
||||
auto ElementSizeMIB = MIRBuilder.buildConstant(
|
||||
@ -1096,7 +1096,7 @@ bool IRTranslator::translateGetElementPtr(const User &U,
|
||||
} else
|
||||
GepOffsetReg = IdxReg;
|
||||
|
||||
unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
|
||||
Register NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
|
||||
MIRBuilder.buildGEP(NewBaseReg, BaseReg, GepOffsetReg);
|
||||
BaseReg = NewBaseReg;
|
||||
}
|
||||
@ -1163,7 +1163,7 @@ bool IRTranslator::translateMemfunc(const CallInst &CI,
|
||||
CallLowering::ArgInfo(0, CI.getType()), Args);
|
||||
}
|
||||
|
||||
void IRTranslator::getStackGuard(unsigned DstReg,
|
||||
void IRTranslator::getStackGuard(Register DstReg,
|
||||
MachineIRBuilder &MIRBuilder) {
|
||||
const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
|
||||
MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
|
||||
@ -1373,7 +1373,7 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
|
||||
} else if (const auto *CI = dyn_cast<Constant>(V)) {
|
||||
MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression());
|
||||
} else {
|
||||
unsigned Reg = getOrCreateVReg(*V);
|
||||
Register Reg = getOrCreateVReg(*V);
|
||||
// FIXME: This does not handle register-indirect values at offset 0. The
|
||||
// direct/indirect thing shouldn't really be handled by something as
|
||||
// implicit as reg+noreg vs reg+imm in the first palce, but it seems
|
||||
@ -1397,10 +1397,10 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
|
||||
case Intrinsic::fmuladd: {
|
||||
const TargetMachine &TM = MF->getTarget();
|
||||
const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
|
||||
unsigned Dst = getOrCreateVReg(CI);
|
||||
unsigned Op0 = getOrCreateVReg(*CI.getArgOperand(0));
|
||||
unsigned Op1 = getOrCreateVReg(*CI.getArgOperand(1));
|
||||
unsigned Op2 = getOrCreateVReg(*CI.getArgOperand(2));
|
||||
Register Dst = getOrCreateVReg(CI);
|
||||
Register Op0 = getOrCreateVReg(*CI.getArgOperand(0));
|
||||
Register Op1 = getOrCreateVReg(*CI.getArgOperand(1));
|
||||
Register Op2 = getOrCreateVReg(*CI.getArgOperand(2));
|
||||
if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
|
||||
TLI.isFMAFasterThanFMulAndFAdd(TLI.getValueType(*DL, CI.getType()))) {
|
||||
// TODO: Revisit this to see if we should move this part of the
|
||||
@ -1422,7 +1422,7 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
|
||||
return translateMemfunc(CI, MIRBuilder, ID);
|
||||
case Intrinsic::eh_typeid_for: {
|
||||
GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0));
|
||||
unsigned Reg = getOrCreateVReg(CI);
|
||||
Register Reg = getOrCreateVReg(CI);
|
||||
unsigned TypeID = MF->getTypeIDFor(GV);
|
||||
MIRBuilder.buildConstant(Reg, TypeID);
|
||||
return true;
|
||||
@ -1444,7 +1444,7 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
|
||||
return true;
|
||||
case Intrinsic::stackprotector: {
|
||||
LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
|
||||
unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy);
|
||||
Register GuardVal = MRI->createGenericVirtualRegister(PtrTy);
|
||||
getStackGuard(GuardVal, MIRBuilder);
|
||||
|
||||
AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
|
||||
@ -1461,8 +1461,8 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
|
||||
}
|
||||
case Intrinsic::stacksave: {
|
||||
// Save the stack pointer to the location provided by the intrinsic.
|
||||
unsigned Reg = getOrCreateVReg(CI);
|
||||
unsigned StackPtr = MF->getSubtarget()
|
||||
Register Reg = getOrCreateVReg(CI);
|
||||
Register StackPtr = MF->getSubtarget()
|
||||
.getTargetLowering()
|
||||
->getStackPointerRegisterToSaveRestore();
|
||||
|
||||
@ -1475,8 +1475,8 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
|
||||
}
|
||||
case Intrinsic::stackrestore: {
|
||||
// Restore the stack pointer from the location provided by the intrinsic.
|
||||
unsigned Reg = getOrCreateVReg(*CI.getArgOperand(0));
|
||||
unsigned StackPtr = MF->getSubtarget()
|
||||
Register Reg = getOrCreateVReg(*CI.getArgOperand(0));
|
||||
Register StackPtr = MF->getSubtarget()
|
||||
.getTargetLowering()
|
||||
->getStackPointerRegisterToSaveRestore();
|
||||
|
||||
@ -1503,7 +1503,7 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
|
||||
}
|
||||
case Intrinsic::invariant_start: {
|
||||
LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
|
||||
unsigned Undef = MRI->createGenericVirtualRegister(PtrTy);
|
||||
Register Undef = MRI->createGenericVirtualRegister(PtrTy);
|
||||
MIRBuilder.buildUndef(Undef);
|
||||
return true;
|
||||
}
|
||||
@ -1537,7 +1537,7 @@ bool IRTranslator::translateInlineAsm(const CallInst &CI,
|
||||
return true;
|
||||
}
|
||||
|
||||
unsigned IRTranslator::packRegs(const Value &V,
|
||||
Register IRTranslator::packRegs(const Value &V,
|
||||
MachineIRBuilder &MIRBuilder) {
|
||||
ArrayRef<Register> Regs = getOrCreateVRegs(V);
|
||||
ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V);
|
||||
@ -1546,17 +1546,17 @@ unsigned IRTranslator::packRegs(const Value &V,
|
||||
if (Regs.size() == 1)
|
||||
return Regs[0];
|
||||
|
||||
unsigned Dst = MRI->createGenericVirtualRegister(BigTy);
|
||||
Register Dst = MRI->createGenericVirtualRegister(BigTy);
|
||||
MIRBuilder.buildUndef(Dst);
|
||||
for (unsigned i = 0; i < Regs.size(); ++i) {
|
||||
unsigned NewDst = MRI->createGenericVirtualRegister(BigTy);
|
||||
Register NewDst = MRI->createGenericVirtualRegister(BigTy);
|
||||
MIRBuilder.buildInsert(NewDst, Dst, Regs[i], Offsets[i]);
|
||||
Dst = NewDst;
|
||||
}
|
||||
return Dst;
|
||||
}
|
||||
|
||||
void IRTranslator::unpackRegs(const Value &V, unsigned Src,
|
||||
void IRTranslator::unpackRegs(const Value &V, Register Src,
|
||||
MachineIRBuilder &MIRBuilder) {
|
||||
ArrayRef<Register> Regs = getOrCreateVRegs(V);
|
||||
ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V);
|
||||
@ -1595,7 +1595,7 @@ bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
|
||||
for (auto &Arg: CI.arg_operands()) {
|
||||
if (CLI->supportSwiftError() && isSwiftError(Arg)) {
|
||||
LLT Ty = getLLTForType(*Arg->getType(), *DL);
|
||||
unsigned InVReg = MRI->createGenericVirtualRegister(Ty);
|
||||
Register InVReg = MRI->createGenericVirtualRegister(Ty);
|
||||
MIRBuilder.buildCopy(InVReg, SwiftError.getOrCreateVRegUseAt(
|
||||
&CI, &MIRBuilder.getMBB(), Arg));
|
||||
Args.push_back(InVReg);
|
||||
@ -1687,7 +1687,7 @@ bool IRTranslator::translateInvoke(const User &U,
|
||||
MCSymbol *BeginSymbol = Context.createTempSymbol();
|
||||
MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
|
||||
|
||||
unsigned Res = 0;
|
||||
Register Res;
|
||||
if (!I.getType()->isVoidTy())
|
||||
Res = MRI->createGenericVirtualRegister(getLLTForType(*I.getType(), *DL));
|
||||
SmallVector<Register, 8> Args;
|
||||
@ -1695,7 +1695,7 @@ bool IRTranslator::translateInvoke(const User &U,
|
||||
for (auto &Arg : I.arg_operands()) {
|
||||
if (CLI->supportSwiftError() && isSwiftError(Arg)) {
|
||||
LLT Ty = getLLTForType(*Arg->getType(), *DL);
|
||||
unsigned InVReg = MRI->createGenericVirtualRegister(Ty);
|
||||
Register InVReg = MRI->createGenericVirtualRegister(Ty);
|
||||
MIRBuilder.buildCopy(InVReg, SwiftError.getOrCreateVRegUseAt(
|
||||
&I, &MIRBuilder.getMBB(), Arg));
|
||||
Args.push_back(InVReg);
|
||||
@ -1762,7 +1762,7 @@ bool IRTranslator::translateLandingPad(const User &U,
|
||||
.addSym(MF->addLandingPad(&MBB));
|
||||
|
||||
LLT Ty = getLLTForType(*LP.getType(), *DL);
|
||||
unsigned Undef = MRI->createGenericVirtualRegister(Ty);
|
||||
Register Undef = MRI->createGenericVirtualRegister(Ty);
|
||||
MIRBuilder.buildUndef(Undef);
|
||||
|
||||
SmallVector<LLT, 2> Tys;
|
||||
@ -1771,7 +1771,7 @@ bool IRTranslator::translateLandingPad(const User &U,
|
||||
assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
|
||||
|
||||
// Mark exception register as live in.
|
||||
unsigned ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn);
|
||||
Register ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn);
|
||||
if (!ExceptionReg)
|
||||
return false;
|
||||
|
||||
@ -1779,12 +1779,12 @@ bool IRTranslator::translateLandingPad(const User &U,
|
||||
ArrayRef<Register> ResRegs = getOrCreateVRegs(LP);
|
||||
MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
|
||||
|
||||
unsigned SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
|
||||
Register SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
|
||||
if (!SelectorReg)
|
||||
return false;
|
||||
|
||||
MBB.addLiveIn(SelectorReg);
|
||||
unsigned PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
|
||||
Register PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
|
||||
MIRBuilder.buildCopy(PtrVReg, SelectorReg);
|
||||
MIRBuilder.buildCast(ResRegs[1], PtrVReg);
|
||||
|
||||
@ -1799,7 +1799,7 @@ bool IRTranslator::translateAlloca(const User &U,
|
||||
return true;
|
||||
|
||||
if (AI.isStaticAlloca()) {
|
||||
unsigned Res = getOrCreateVReg(AI);
|
||||
Register Res = getOrCreateVReg(AI);
|
||||
int FI = getOrCreateFrameIndex(AI);
|
||||
MIRBuilder.buildFrameIndex(Res, FI);
|
||||
return true;
|
||||
@ -1814,29 +1814,29 @@ bool IRTranslator::translateAlloca(const User &U,
|
||||
unsigned Align =
|
||||
std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI.getAlignment());
|
||||
|
||||
unsigned NumElts = getOrCreateVReg(*AI.getArraySize());
|
||||
Register NumElts = getOrCreateVReg(*AI.getArraySize());
|
||||
|
||||
Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
|
||||
LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
|
||||
if (MRI->getType(NumElts) != IntPtrTy) {
|
||||
unsigned ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
|
||||
Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
|
||||
MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
|
||||
NumElts = ExtElts;
|
||||
}
|
||||
|
||||
unsigned AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
|
||||
unsigned TySize =
|
||||
Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
|
||||
Register TySize =
|
||||
getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, -DL->getTypeAllocSize(Ty)));
|
||||
MIRBuilder.buildMul(AllocSize, NumElts, TySize);
|
||||
|
||||
LLT PtrTy = getLLTForType(*AI.getType(), *DL);
|
||||
auto &TLI = *MF->getSubtarget().getTargetLowering();
|
||||
unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
|
||||
Register SPReg = TLI.getStackPointerRegisterToSaveRestore();
|
||||
|
||||
unsigned SPTmp = MRI->createGenericVirtualRegister(PtrTy);
|
||||
Register SPTmp = MRI->createGenericVirtualRegister(PtrTy);
|
||||
MIRBuilder.buildCopy(SPTmp, SPReg);
|
||||
|
||||
unsigned AllocTmp = MRI->createGenericVirtualRegister(PtrTy);
|
||||
Register AllocTmp = MRI->createGenericVirtualRegister(PtrTy);
|
||||
MIRBuilder.buildGEP(AllocTmp, SPTmp, AllocSize);
|
||||
|
||||
// Handle alignment. We have to realign if the allocation granule was smaller
|
||||
@ -1849,7 +1849,7 @@ bool IRTranslator::translateAlloca(const User &U,
|
||||
// Round the size of the allocation up to the stack alignment size
|
||||
// by add SA-1 to the size. This doesn't overflow because we're computing
|
||||
// an address inside an alloca.
|
||||
unsigned AlignedAlloc = MRI->createGenericVirtualRegister(PtrTy);
|
||||
Register AlignedAlloc = MRI->createGenericVirtualRegister(PtrTy);
|
||||
MIRBuilder.buildPtrMask(AlignedAlloc, AllocTmp, Log2_32(Align));
|
||||
AllocTmp = AlignedAlloc;
|
||||
}
|
||||
@ -1879,7 +1879,7 @@ bool IRTranslator::translateInsertElement(const User &U,
|
||||
// If it is a <1 x Ty> vector, use the scalar as it is
|
||||
// not a legal vector type in LLT.
|
||||
if (U.getType()->getVectorNumElements() == 1) {
|
||||
unsigned Elt = getOrCreateVReg(*U.getOperand(1));
|
||||
Register Elt = getOrCreateVReg(*U.getOperand(1));
|
||||
auto &Regs = *VMap.getVRegs(U);
|
||||
if (Regs.empty()) {
|
||||
Regs.push_back(Elt);
|
||||
@ -1890,10 +1890,10 @@ bool IRTranslator::translateInsertElement(const User &U,
|
||||
return true;
|
||||
}
|
||||
|
||||
unsigned Res = getOrCreateVReg(U);
|
||||
unsigned Val = getOrCreateVReg(*U.getOperand(0));
|
||||
unsigned Elt = getOrCreateVReg(*U.getOperand(1));
|
||||
unsigned Idx = getOrCreateVReg(*U.getOperand(2));
|
||||
Register Res = getOrCreateVReg(U);
|
||||
Register Val = getOrCreateVReg(*U.getOperand(0));
|
||||
Register Elt = getOrCreateVReg(*U.getOperand(1));
|
||||
Register Idx = getOrCreateVReg(*U.getOperand(2));
|
||||
MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
|
||||
return true;
|
||||
}
|
||||
@ -1903,7 +1903,7 @@ bool IRTranslator::translateExtractElement(const User &U,
|
||||
// If it is a <1 x Ty> vector, use the scalar as it is
|
||||
// not a legal vector type in LLT.
|
||||
if (U.getOperand(0)->getType()->getVectorNumElements() == 1) {
|
||||
unsigned Elt = getOrCreateVReg(*U.getOperand(0));
|
||||
Register Elt = getOrCreateVReg(*U.getOperand(0));
|
||||
auto &Regs = *VMap.getVRegs(U);
|
||||
if (Regs.empty()) {
|
||||
Regs.push_back(Elt);
|
||||
@ -1913,11 +1913,11 @@ bool IRTranslator::translateExtractElement(const User &U,
|
||||
}
|
||||
return true;
|
||||
}
|
||||
unsigned Res = getOrCreateVReg(U);
|
||||
unsigned Val = getOrCreateVReg(*U.getOperand(0));
|
||||
Register Res = getOrCreateVReg(U);
|
||||
Register Val = getOrCreateVReg(*U.getOperand(0));
|
||||
const auto &TLI = *MF->getSubtarget().getTargetLowering();
|
||||
unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits();
|
||||
unsigned Idx = 0;
|
||||
Register Idx;
|
||||
if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) {
|
||||
if (CI->getBitWidth() != PreferredVecIdxWidth) {
|
||||
APInt NewIdx = CI->getValue().sextOrTrunc(PreferredVecIdxWidth);
|
||||
@ -1973,11 +1973,11 @@ bool IRTranslator::translateAtomicCmpXchg(const User &U,
|
||||
Type *ValType = ResType->Type::getStructElementType(0);
|
||||
|
||||
auto Res = getOrCreateVRegs(I);
|
||||
unsigned OldValRes = Res[0];
|
||||
unsigned SuccessRes = Res[1];
|
||||
unsigned Addr = getOrCreateVReg(*I.getPointerOperand());
|
||||
unsigned Cmp = getOrCreateVReg(*I.getCompareOperand());
|
||||
unsigned NewVal = getOrCreateVReg(*I.getNewValOperand());
|
||||
Register OldValRes = Res[0];
|
||||
Register SuccessRes = Res[1];
|
||||
Register Addr = getOrCreateVReg(*I.getPointerOperand());
|
||||
Register Cmp = getOrCreateVReg(*I.getCompareOperand());
|
||||
Register NewVal = getOrCreateVReg(*I.getNewValOperand());
|
||||
|
||||
MIRBuilder.buildAtomicCmpXchgWithSuccess(
|
||||
OldValRes, SuccessRes, Addr, Cmp, NewVal,
|
||||
@ -1999,9 +1999,9 @@ bool IRTranslator::translateAtomicRMW(const User &U,
|
||||
|
||||
Type *ResType = I.getType();
|
||||
|
||||
unsigned Res = getOrCreateVReg(I);
|
||||
unsigned Addr = getOrCreateVReg(*I.getPointerOperand());
|
||||
unsigned Val = getOrCreateVReg(*I.getValOperand());
|
||||
Register Res = getOrCreateVReg(I);
|
||||
Register Addr = getOrCreateVReg(*I.getPointerOperand());
|
||||
Register Val = getOrCreateVReg(*I.getValOperand());
|
||||
|
||||
unsigned Opcode = 0;
|
||||
switch (I.getOperation()) {
|
||||
@ -2113,7 +2113,7 @@ bool IRTranslator::translate(const Instruction &Inst) {
|
||||
}
|
||||
}
|
||||
|
||||
bool IRTranslator::translate(const Constant &C, unsigned Reg) {
|
||||
bool IRTranslator::translate(const Constant &C, Register Reg) {
|
||||
if (auto CI = dyn_cast<ConstantInt>(&C))
|
||||
EntryBuilder->buildConstant(Reg, *CI);
|
||||
else if (auto CF = dyn_cast<ConstantFP>(&C))
|
||||
@ -2126,7 +2126,7 @@ bool IRTranslator::translate(const Constant &C, unsigned Reg) {
|
||||
unsigned NullSize = DL->getTypeSizeInBits(C.getType());
|
||||
auto *ZeroTy = Type::getIntNTy(C.getContext(), NullSize);
|
||||
auto *ZeroVal = ConstantInt::get(ZeroTy, 0);
|
||||
unsigned ZeroReg = getOrCreateVReg(*ZeroVal);
|
||||
Register ZeroReg = getOrCreateVReg(*ZeroVal);
|
||||
EntryBuilder->buildCast(Reg, ZeroReg);
|
||||
} else if (auto GV = dyn_cast<GlobalValue>(&C))
|
||||
EntryBuilder->buildGlobalValue(Reg, GV);
|
||||
|
@ -526,12 +526,12 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
|
||||
extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src1Regs);
|
||||
extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src2Regs);
|
||||
|
||||
unsigned CarryIn = MRI.createGenericVirtualRegister(LLT::scalar(1));
|
||||
Register CarryIn = MRI.createGenericVirtualRegister(LLT::scalar(1));
|
||||
MIRBuilder.buildConstant(CarryIn, 0);
|
||||
|
||||
for (int i = 0; i < NumParts; ++i) {
|
||||
unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy);
|
||||
unsigned CarryOut = MRI.createGenericVirtualRegister(LLT::scalar(1));
|
||||
Register DstReg = MRI.createGenericVirtualRegister(NarrowTy);
|
||||
Register CarryOut = MRI.createGenericVirtualRegister(LLT::scalar(1));
|
||||
|
||||
MIRBuilder.buildUAdde(DstReg, CarryOut, Src1Regs[i],
|
||||
Src2Regs[i], CarryIn);
|
||||
@ -539,7 +539,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
|
||||
DstRegs.push_back(DstReg);
|
||||
CarryIn = CarryOut;
|
||||
}
|
||||
unsigned DstReg = MI.getOperand(0).getReg();
|
||||
Register DstReg = MI.getOperand(0).getReg();
|
||||
if(MRI.getType(DstReg).isVector())
|
||||
MIRBuilder.buildBuildVector(DstReg, DstRegs);
|
||||
else
|
||||
@ -559,12 +559,12 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
|
||||
extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src1Regs);
|
||||
extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src2Regs);
|
||||
|
||||
unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy);
|
||||
unsigned BorrowOut = MRI.createGenericVirtualRegister(LLT::scalar(1));
|
||||
Register DstReg = MRI.createGenericVirtualRegister(NarrowTy);
|
||||
Register BorrowOut = MRI.createGenericVirtualRegister(LLT::scalar(1));
|
||||
MIRBuilder.buildInstr(TargetOpcode::G_USUBO, {DstReg, BorrowOut},
|
||||
{Src1Regs[0], Src2Regs[0]});
|
||||
DstRegs.push_back(DstReg);
|
||||
unsigned BorrowIn = BorrowOut;
|
||||
Register BorrowIn = BorrowOut;
|
||||
for (int i = 1; i < NumParts; ++i) {
|
||||
DstReg = MRI.createGenericVirtualRegister(NarrowTy);
|
||||
BorrowOut = MRI.createGenericVirtualRegister(LLT::scalar(1));
|
||||
@ -588,13 +588,13 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
|
||||
return narrowScalarInsert(MI, TypeIdx, NarrowTy);
|
||||
case TargetOpcode::G_LOAD: {
|
||||
const auto &MMO = **MI.memoperands_begin();
|
||||
unsigned DstReg = MI.getOperand(0).getReg();
|
||||
Register DstReg = MI.getOperand(0).getReg();
|
||||
LLT DstTy = MRI.getType(DstReg);
|
||||
if (DstTy.isVector())
|
||||
return UnableToLegalize;
|
||||
|
||||
if (8 * MMO.getSize() != DstTy.getSizeInBits()) {
|
||||
unsigned TmpReg = MRI.createGenericVirtualRegister(NarrowTy);
|
||||
Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy);
|
||||
auto &MMO = **MI.memoperands_begin();
|
||||
MIRBuilder.buildLoad(TmpReg, MI.getOperand(1).getReg(), MMO);
|
||||
MIRBuilder.buildAnyExt(DstReg, TmpReg);
|
||||
@ -607,10 +607,10 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
|
||||
case TargetOpcode::G_ZEXTLOAD:
|
||||
case TargetOpcode::G_SEXTLOAD: {
|
||||
bool ZExt = MI.getOpcode() == TargetOpcode::G_ZEXTLOAD;
|
||||
unsigned DstReg = MI.getOperand(0).getReg();
|
||||
unsigned PtrReg = MI.getOperand(1).getReg();
|
||||
Register DstReg = MI.getOperand(0).getReg();
|
||||
Register PtrReg = MI.getOperand(1).getReg();
|
||||
|
||||
unsigned TmpReg = MRI.createGenericVirtualRegister(NarrowTy);
|
||||
Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy);
|
||||
auto &MMO = **MI.memoperands_begin();
|
||||
if (MMO.getSizeInBits() == NarrowSize) {
|
||||
MIRBuilder.buildLoad(TmpReg, PtrReg, MMO);
|
||||
@ -634,7 +634,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
|
||||
case TargetOpcode::G_STORE: {
|
||||
const auto &MMO = **MI.memoperands_begin();
|
||||
|
||||
unsigned SrcReg = MI.getOperand(0).getReg();
|
||||
Register SrcReg = MI.getOperand(0).getReg();
|
||||
LLT SrcTy = MRI.getType(SrcReg);
|
||||
if (SrcTy.isVector())
|
||||
return UnableToLegalize;
|
||||
@ -646,7 +646,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
|
||||
return UnableToLegalize;
|
||||
|
||||
if (8 * MMO.getSize() != SrcTy.getSizeInBits()) {
|
||||
unsigned TmpReg = MRI.createGenericVirtualRegister(NarrowTy);
|
||||
Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy);
|
||||
auto &MMO = **MI.memoperands_begin();
|
||||
MIRBuilder.buildTrunc(TmpReg, SrcReg);
|
||||
MIRBuilder.buildStore(TmpReg, MI.getOperand(1).getReg(), MMO);
|
||||
@ -725,7 +725,7 @@ void LegalizerHelper::narrowScalarSrc(MachineInstr &MI, LLT NarrowTy,
|
||||
void LegalizerHelper::widenScalarDst(MachineInstr &MI, LLT WideTy,
|
||||
unsigned OpIdx, unsigned TruncOpcode) {
|
||||
MachineOperand &MO = MI.getOperand(OpIdx);
|
||||
unsigned DstExt = MRI.createGenericVirtualRegister(WideTy);
|
||||
Register DstExt = MRI.createGenericVirtualRegister(WideTy);
|
||||
MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
|
||||
MIRBuilder.buildInstr(TruncOpcode, {MO.getReg()}, {DstExt});
|
||||
MO.setReg(DstExt);
|
||||
@ -734,7 +734,7 @@ void LegalizerHelper::widenScalarDst(MachineInstr &MI, LLT WideTy,
|
||||
void LegalizerHelper::narrowScalarDst(MachineInstr &MI, LLT NarrowTy,
|
||||
unsigned OpIdx, unsigned ExtOpcode) {
|
||||
MachineOperand &MO = MI.getOperand(OpIdx);
|
||||
unsigned DstTrunc = MRI.createGenericVirtualRegister(NarrowTy);
|
||||
Register DstTrunc = MRI.createGenericVirtualRegister(NarrowTy);
|
||||
MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
|
||||
MIRBuilder.buildInstr(ExtOpcode, {MO.getReg()}, {DstTrunc});
|
||||
MO.setReg(DstTrunc);
|
||||
@ -743,7 +743,7 @@ void LegalizerHelper::narrowScalarDst(MachineInstr &MI, LLT NarrowTy,
|
||||
void LegalizerHelper::moreElementsVectorDst(MachineInstr &MI, LLT WideTy,
|
||||
unsigned OpIdx) {
|
||||
MachineOperand &MO = MI.getOperand(OpIdx);
|
||||
unsigned DstExt = MRI.createGenericVirtualRegister(WideTy);
|
||||
Register DstExt = MRI.createGenericVirtualRegister(WideTy);
|
||||
MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
|
||||
MIRBuilder.buildExtract(MO.getReg(), DstExt, 0);
|
||||
MO.setReg(DstExt);
|
||||
@ -773,8 +773,8 @@ void LegalizerHelper::moreElementsVectorSrc(MachineInstr &MI, LLT MoreTy,
|
||||
return;
|
||||
}
|
||||
|
||||
unsigned MoreReg = MRI.createGenericVirtualRegister(MoreTy);
|
||||
unsigned ImpDef = MIRBuilder.buildUndef(MoreTy).getReg(0);
|
||||
Register MoreReg = MRI.createGenericVirtualRegister(MoreTy);
|
||||
Register ImpDef = MIRBuilder.buildUndef(MoreTy).getReg(0);
|
||||
MIRBuilder.buildInsert(MoreReg, ImpDef, MO.getReg(), 0);
|
||||
MO.setReg(MoreReg);
|
||||
}
|
||||
@ -794,7 +794,7 @@ LegalizerHelper::widenScalarMergeValues(MachineInstr &MI, unsigned TypeIdx,
|
||||
unsigned NumSrc = MI.getNumOperands() - 1;
|
||||
unsigned PartSize = DstTy.getSizeInBits() / NumSrc;
|
||||
|
||||
unsigned Src1 = MI.getOperand(1).getReg();
|
||||
Register Src1 = MI.getOperand(1).getReg();
|
||||
Register ResultReg = MIRBuilder.buildZExt(DstTy, Src1)->getOperand(0).getReg();
|
||||
|
||||
for (unsigned I = 2; I != NumOps; ++I) {
|
||||
@ -1002,7 +1002,7 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
|
||||
return Legalized;
|
||||
}
|
||||
|
||||
unsigned SrcReg = MI.getOperand(1).getReg();
|
||||
Register SrcReg = MI.getOperand(1).getReg();
|
||||
|
||||
// First ZEXT the input.
|
||||
auto MIBSrc = MIRBuilder.buildZExt(WideTy, SrcReg);
|
||||
@ -1035,11 +1035,11 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
|
||||
}
|
||||
case TargetOpcode::G_BSWAP: {
|
||||
Observer.changingInstr(MI);
|
||||
unsigned DstReg = MI.getOperand(0).getReg();
|
||||
Register DstReg = MI.getOperand(0).getReg();
|
||||
|
||||
unsigned ShrReg = MRI.createGenericVirtualRegister(WideTy);
|
||||
unsigned DstExt = MRI.createGenericVirtualRegister(WideTy);
|
||||
unsigned ShiftAmtReg = MRI.createGenericVirtualRegister(WideTy);
|
||||
Register ShrReg = MRI.createGenericVirtualRegister(WideTy);
|
||||
Register DstExt = MRI.createGenericVirtualRegister(WideTy);
|
||||
Register ShiftAmtReg = MRI.createGenericVirtualRegister(WideTy);
|
||||
widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
|
||||
|
||||
MI.getOperand(0).setReg(DstExt);
|
||||
@ -1299,7 +1299,7 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
|
||||
}
|
||||
case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
|
||||
if (TypeIdx == 0) {
|
||||
unsigned VecReg = MI.getOperand(1).getReg();
|
||||
Register VecReg = MI.getOperand(1).getReg();
|
||||
LLT VecTy = MRI.getType(VecReg);
|
||||
Observer.changingInstr(MI);
|
||||
|
||||
@ -1381,13 +1381,13 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
|
||||
return UnableToLegalize;
|
||||
case TargetOpcode::G_SREM:
|
||||
case TargetOpcode::G_UREM: {
|
||||
unsigned QuotReg = MRI.createGenericVirtualRegister(Ty);
|
||||
Register QuotReg = MRI.createGenericVirtualRegister(Ty);
|
||||
MIRBuilder.buildInstr(MI.getOpcode() == G_SREM ? G_SDIV : G_UDIV)
|
||||
.addDef(QuotReg)
|
||||
.addUse(MI.getOperand(1).getReg())
|
||||
.addUse(MI.getOperand(2).getReg());
|
||||
|
||||
unsigned ProdReg = MRI.createGenericVirtualRegister(Ty);
|
||||
Register ProdReg = MRI.createGenericVirtualRegister(Ty);
|
||||
MIRBuilder.buildMul(ProdReg, QuotReg, MI.getOperand(2).getReg());
|
||||
MIRBuilder.buildSub(MI.getOperand(0).getReg(), MI.getOperand(1).getReg(),
|
||||
ProdReg);
|
||||
@ -1398,10 +1398,10 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
|
||||
case TargetOpcode::G_UMULO: {
|
||||
// Generate G_UMULH/G_SMULH to check for overflow and a normal G_MUL for the
|
||||
// result.
|
||||
unsigned Res = MI.getOperand(0).getReg();
|
||||
unsigned Overflow = MI.getOperand(1).getReg();
|
||||
unsigned LHS = MI.getOperand(2).getReg();
|
||||
unsigned RHS = MI.getOperand(3).getReg();
|
||||
Register Res = MI.getOperand(0).getReg();
|
||||
Register Overflow = MI.getOperand(1).getReg();
|
||||
Register LHS = MI.getOperand(2).getReg();
|
||||
Register RHS = MI.getOperand(3).getReg();
|
||||
|
||||
MIRBuilder.buildMul(Res, LHS, RHS);
|
||||
|
||||
@ -1409,20 +1409,20 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
|
||||
? TargetOpcode::G_SMULH
|
||||
: TargetOpcode::G_UMULH;
|
||||
|
||||
unsigned HiPart = MRI.createGenericVirtualRegister(Ty);
|
||||
Register HiPart = MRI.createGenericVirtualRegister(Ty);
|
||||
MIRBuilder.buildInstr(Opcode)
|
||||
.addDef(HiPart)
|
||||
.addUse(LHS)
|
||||
.addUse(RHS);
|
||||
|
||||
unsigned Zero = MRI.createGenericVirtualRegister(Ty);
|
||||
Register Zero = MRI.createGenericVirtualRegister(Ty);
|
||||
MIRBuilder.buildConstant(Zero, 0);
|
||||
|
||||
// For *signed* multiply, overflow is detected by checking:
|
||||
// (hi != (lo >> bitwidth-1))
|
||||
if (Opcode == TargetOpcode::G_SMULH) {
|
||||
unsigned Shifted = MRI.createGenericVirtualRegister(Ty);
|
||||
unsigned ShiftAmt = MRI.createGenericVirtualRegister(Ty);
|
||||
Register Shifted = MRI.createGenericVirtualRegister(Ty);
|
||||
Register ShiftAmt = MRI.createGenericVirtualRegister(Ty);
|
||||
MIRBuilder.buildConstant(ShiftAmt, Ty.getSizeInBits() - 1);
|
||||
MIRBuilder.buildInstr(TargetOpcode::G_ASHR)
|
||||
.addDef(Shifted)
|
||||
@ -1440,7 +1440,7 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
|
||||
// represent them.
|
||||
if (Ty.isVector())
|
||||
return UnableToLegalize;
|
||||
unsigned Res = MI.getOperand(0).getReg();
|
||||
Register Res = MI.getOperand(0).getReg();
|
||||
Type *ZeroTy;
|
||||
LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext();
|
||||
switch (Ty.getSizeInBits()) {
|
||||
@ -1462,8 +1462,8 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
|
||||
ConstantFP &ZeroForNegation =
|
||||
*cast<ConstantFP>(ConstantFP::getZeroValueForNegation(ZeroTy));
|
||||
auto Zero = MIRBuilder.buildFConstant(Ty, ZeroForNegation);
|
||||
unsigned SubByReg = MI.getOperand(1).getReg();
|
||||
unsigned ZeroReg = Zero->getOperand(0).getReg();
|
||||
Register SubByReg = MI.getOperand(1).getReg();
|
||||
Register ZeroReg = Zero->getOperand(0).getReg();
|
||||
MIRBuilder.buildInstr(TargetOpcode::G_FSUB, {Res}, {ZeroReg, SubByReg},
|
||||
MI.getFlags());
|
||||
MI.eraseFromParent();
|
||||
@ -1475,21 +1475,21 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
|
||||
// end up with an infinite loop as G_FSUB is used to legalize G_FNEG.
|
||||
if (LI.getAction({G_FNEG, {Ty}}).Action == Lower)
|
||||
return UnableToLegalize;
|
||||
unsigned Res = MI.getOperand(0).getReg();
|
||||
unsigned LHS = MI.getOperand(1).getReg();
|
||||
unsigned RHS = MI.getOperand(2).getReg();
|
||||
unsigned Neg = MRI.createGenericVirtualRegister(Ty);
|
||||
Register Res = MI.getOperand(0).getReg();
|
||||
Register LHS = MI.getOperand(1).getReg();
|
||||
Register RHS = MI.getOperand(2).getReg();
|
||||
Register Neg = MRI.createGenericVirtualRegister(Ty);
|
||||
MIRBuilder.buildInstr(TargetOpcode::G_FNEG).addDef(Neg).addUse(RHS);
|
||||
MIRBuilder.buildInstr(TargetOpcode::G_FADD, {Res}, {LHS, Neg}, MI.getFlags());
|
||||
MI.eraseFromParent();
|
||||
return Legalized;
|
||||
}
|
||||
case TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS: {
|
||||
unsigned OldValRes = MI.getOperand(0).getReg();
|
||||
unsigned SuccessRes = MI.getOperand(1).getReg();
|
||||
unsigned Addr = MI.getOperand(2).getReg();
|
||||
unsigned CmpVal = MI.getOperand(3).getReg();
|
||||
unsigned NewVal = MI.getOperand(4).getReg();
|
||||
Register OldValRes = MI.getOperand(0).getReg();
|
||||
Register SuccessRes = MI.getOperand(1).getReg();
|
||||
Register Addr = MI.getOperand(2).getReg();
|
||||
Register CmpVal = MI.getOperand(3).getReg();
|
||||
Register NewVal = MI.getOperand(4).getReg();
|
||||
MIRBuilder.buildAtomicCmpXchg(OldValRes, Addr, CmpVal, NewVal,
|
||||
**MI.memoperands_begin());
|
||||
MIRBuilder.buildICmp(CmpInst::ICMP_EQ, SuccessRes, OldValRes, CmpVal);
|
||||
@ -1500,8 +1500,8 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
|
||||
case TargetOpcode::G_SEXTLOAD:
|
||||
case TargetOpcode::G_ZEXTLOAD: {
|
||||
// Lower to a memory-width G_LOAD and a G_SEXT/G_ZEXT/G_ANYEXT
|
||||
unsigned DstReg = MI.getOperand(0).getReg();
|
||||
unsigned PtrReg = MI.getOperand(1).getReg();
|
||||
Register DstReg = MI.getOperand(0).getReg();
|
||||
Register PtrReg = MI.getOperand(1).getReg();
|
||||
LLT DstTy = MRI.getType(DstReg);
|
||||
auto &MMO = **MI.memoperands_begin();
|
||||
|
||||
@ -1516,7 +1516,7 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
|
||||
}
|
||||
|
||||
if (DstTy.isScalar()) {
|
||||
unsigned TmpReg =
|
||||
Register TmpReg =
|
||||
MRI.createGenericVirtualRegister(LLT::scalar(MMO.getSizeInBits()));
|
||||
MIRBuilder.buildLoad(TmpReg, PtrReg, MMO);
|
||||
switch (MI.getOpcode()) {
|
||||
@ -1545,10 +1545,10 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
|
||||
case TargetOpcode::G_CTPOP:
|
||||
return lowerBitCount(MI, TypeIdx, Ty);
|
||||
case G_UADDO: {
|
||||
unsigned Res = MI.getOperand(0).getReg();
|
||||
unsigned CarryOut = MI.getOperand(1).getReg();
|
||||
unsigned LHS = MI.getOperand(2).getReg();
|
||||
unsigned RHS = MI.getOperand(3).getReg();
|
||||
Register Res = MI.getOperand(0).getReg();
|
||||
Register CarryOut = MI.getOperand(1).getReg();
|
||||
Register LHS = MI.getOperand(2).getReg();
|
||||
Register RHS = MI.getOperand(3).getReg();
|
||||
|
||||
MIRBuilder.buildAdd(Res, LHS, RHS);
|
||||
MIRBuilder.buildICmp(CmpInst::ICMP_ULT, CarryOut, Res, RHS);
|
||||
@ -1557,14 +1557,14 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
|
||||
return Legalized;
|
||||
}
|
||||
case G_UADDE: {
|
||||
unsigned Res = MI.getOperand(0).getReg();
|
||||
unsigned CarryOut = MI.getOperand(1).getReg();
|
||||
unsigned LHS = MI.getOperand(2).getReg();
|
||||
unsigned RHS = MI.getOperand(3).getReg();
|
||||
unsigned CarryIn = MI.getOperand(4).getReg();
|
||||
Register Res = MI.getOperand(0).getReg();
|
||||
Register CarryOut = MI.getOperand(1).getReg();
|
||||
Register LHS = MI.getOperand(2).getReg();
|
||||
Register RHS = MI.getOperand(3).getReg();
|
||||
Register CarryIn = MI.getOperand(4).getReg();
|
||||
|
||||
unsigned TmpRes = MRI.createGenericVirtualRegister(Ty);
|
||||
unsigned ZExtCarryIn = MRI.createGenericVirtualRegister(Ty);
|
||||
Register TmpRes = MRI.createGenericVirtualRegister(Ty);
|
||||
Register ZExtCarryIn = MRI.createGenericVirtualRegister(Ty);
|
||||
|
||||
MIRBuilder.buildAdd(TmpRes, LHS, RHS);
|
||||
MIRBuilder.buildZExt(ZExtCarryIn, CarryIn);
|
||||
@ -1575,10 +1575,10 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
|
||||
return Legalized;
|
||||
}
|
||||
case G_USUBO: {
|
||||
unsigned Res = MI.getOperand(0).getReg();
|
||||
unsigned BorrowOut = MI.getOperand(1).getReg();
|
||||
unsigned LHS = MI.getOperand(2).getReg();
|
||||
unsigned RHS = MI.getOperand(3).getReg();
|
||||
Register Res = MI.getOperand(0).getReg();
|
||||
Register BorrowOut = MI.getOperand(1).getReg();
|
||||
Register LHS = MI.getOperand(2).getReg();
|
||||
Register RHS = MI.getOperand(3).getReg();
|
||||
|
||||
MIRBuilder.buildSub(Res, LHS, RHS);
|
||||
MIRBuilder.buildICmp(CmpInst::ICMP_ULT, BorrowOut, LHS, RHS);
|
||||
@ -1587,16 +1587,16 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
|
||||
return Legalized;
|
||||
}
|
||||
case G_USUBE: {
|
||||
unsigned Res = MI.getOperand(0).getReg();
|
||||
unsigned BorrowOut = MI.getOperand(1).getReg();
|
||||
unsigned LHS = MI.getOperand(2).getReg();
|
||||
unsigned RHS = MI.getOperand(3).getReg();
|
||||
unsigned BorrowIn = MI.getOperand(4).getReg();
|
||||
Register Res = MI.getOperand(0).getReg();
|
||||
Register BorrowOut = MI.getOperand(1).getReg();
|
||||
Register LHS = MI.getOperand(2).getReg();
|
||||
Register RHS = MI.getOperand(3).getReg();
|
||||
Register BorrowIn = MI.getOperand(4).getReg();
|
||||
|
||||
unsigned TmpRes = MRI.createGenericVirtualRegister(Ty);
|
||||
unsigned ZExtBorrowIn = MRI.createGenericVirtualRegister(Ty);
|
||||
unsigned LHS_EQ_RHS = MRI.createGenericVirtualRegister(LLT::scalar(1));
|
||||
unsigned LHS_ULT_RHS = MRI.createGenericVirtualRegister(LLT::scalar(1));
|
||||
Register TmpRes = MRI.createGenericVirtualRegister(Ty);
|
||||
Register ZExtBorrowIn = MRI.createGenericVirtualRegister(Ty);
|
||||
Register LHS_EQ_RHS = MRI.createGenericVirtualRegister(LLT::scalar(1));
|
||||
Register LHS_ULT_RHS = MRI.createGenericVirtualRegister(LLT::scalar(1));
|
||||
|
||||
MIRBuilder.buildSub(TmpRes, LHS, RHS);
|
||||
MIRBuilder.buildZExt(ZExtBorrowIn, BorrowIn);
|
||||
@ -1620,7 +1620,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsVectorImplicitDef(
|
||||
SmallVector<Register, 2> DstRegs;
|
||||
|
||||
unsigned NarrowSize = NarrowTy.getSizeInBits();
|
||||
unsigned DstReg = MI.getOperand(0).getReg();
|
||||
Register DstReg = MI.getOperand(0).getReg();
|
||||
unsigned Size = MRI.getType(DstReg).getSizeInBits();
|
||||
int NumParts = Size / NarrowSize;
|
||||
// FIXME: Don't know how to handle the situation where the small vectors
|
||||
@ -1629,7 +1629,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsVectorImplicitDef(
|
||||
return UnableToLegalize;
|
||||
|
||||
for (int i = 0; i < NumParts; ++i) {
|
||||
unsigned TmpReg = MRI.createGenericVirtualRegister(NarrowTy);
|
||||
Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy);
|
||||
MIRBuilder.buildUndef(TmpReg);
|
||||
DstRegs.push_back(TmpReg);
|
||||
}
|
||||
@ -1664,7 +1664,7 @@ LegalizerHelper::fewerElementsVectorBasic(MachineInstr &MI, unsigned TypeIdx,
|
||||
return UnableToLegalize;
|
||||
|
||||
if (BitsForNumParts != Size) {
|
||||
unsigned AccumDstReg = MRI.createGenericVirtualRegister(DstTy);
|
||||
Register AccumDstReg = MRI.createGenericVirtualRegister(DstTy);
|
||||
MIRBuilder.buildUndef(AccumDstReg);
|
||||
|
||||
// Handle the pieces which evenly divide into the requested type with
|
||||
@ -1672,15 +1672,15 @@ LegalizerHelper::fewerElementsVectorBasic(MachineInstr &MI, unsigned TypeIdx,
|
||||
for (unsigned Offset = 0; Offset < BitsForNumParts; Offset += NarrowSize) {
|
||||
SmallVector<SrcOp, 4> SrcOps;
|
||||
for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) {
|
||||
unsigned PartOpReg = MRI.createGenericVirtualRegister(NarrowTy);
|
||||
Register PartOpReg = MRI.createGenericVirtualRegister(NarrowTy);
|
||||
MIRBuilder.buildExtract(PartOpReg, MI.getOperand(I).getReg(), Offset);
|
||||
SrcOps.push_back(PartOpReg);
|
||||
}
|
||||
|
||||
unsigned PartDstReg = MRI.createGenericVirtualRegister(NarrowTy);
|
||||
Register PartDstReg = MRI.createGenericVirtualRegister(NarrowTy);
|
||||
MIRBuilder.buildInstr(Opc, {PartDstReg}, SrcOps, Flags);
|
||||
|
||||
unsigned PartInsertReg = MRI.createGenericVirtualRegister(DstTy);
|
||||
Register PartInsertReg = MRI.createGenericVirtualRegister(DstTy);
|
||||
MIRBuilder.buildInsert(PartInsertReg, AccumDstReg, PartDstReg, Offset);
|
||||
AccumDstReg = PartInsertReg;
|
||||
}
|
||||
@ -1688,13 +1688,13 @@ LegalizerHelper::fewerElementsVectorBasic(MachineInstr &MI, unsigned TypeIdx,
|
||||
// Handle the remaining element sized leftover piece.
|
||||
SmallVector<SrcOp, 4> SrcOps;
|
||||
for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) {
|
||||
unsigned PartOpReg = MRI.createGenericVirtualRegister(EltTy);
|
||||
Register PartOpReg = MRI.createGenericVirtualRegister(EltTy);
|
||||
MIRBuilder.buildExtract(PartOpReg, MI.getOperand(I).getReg(),
|
||||
BitsForNumParts);
|
||||
SrcOps.push_back(PartOpReg);
|
||||
}
|
||||
|
||||
unsigned PartDstReg = MRI.createGenericVirtualRegister(EltTy);
|
||||
Register PartDstReg = MRI.createGenericVirtualRegister(EltTy);
|
||||
MIRBuilder.buildInstr(Opc, {PartDstReg}, SrcOps, Flags);
|
||||
MIRBuilder.buildInsert(DstReg, AccumDstReg, PartDstReg, BitsForNumParts);
|
||||
MI.eraseFromParent();
|
||||
@ -1713,7 +1713,7 @@ LegalizerHelper::fewerElementsVectorBasic(MachineInstr &MI, unsigned TypeIdx,
|
||||
extractParts(MI.getOperand(3).getReg(), NarrowTy, NumParts, Src2Regs);
|
||||
|
||||
for (int i = 0; i < NumParts; ++i) {
|
||||
unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy);
|
||||
Register DstReg = MRI.createGenericVirtualRegister(NarrowTy);
|
||||
|
||||
if (NumOps == 1)
|
||||
MIRBuilder.buildInstr(Opc, {DstReg}, {Src0Regs[i]}, Flags);
|
||||
@ -1758,7 +1758,7 @@ LegalizerHelper::fewerElementsVectorMultiEltType(
|
||||
const unsigned NewNumElts =
|
||||
NarrowTy0.isVector() ? NarrowTy0.getNumElements() : 1;
|
||||
|
||||
const unsigned DstReg = MI.getOperand(0).getReg();
|
||||
const Register DstReg = MI.getOperand(0).getReg();
|
||||
LLT DstTy = MRI.getType(DstReg);
|
||||
LLT LeftoverTy0;
|
||||
|
||||
@ -1778,7 +1778,7 @@ LegalizerHelper::fewerElementsVectorMultiEltType(
|
||||
|
||||
for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) {
|
||||
LLT LeftoverTy;
|
||||
unsigned SrcReg = MI.getOperand(I).getReg();
|
||||
Register SrcReg = MI.getOperand(I).getReg();
|
||||
LLT SrcTyI = MRI.getType(SrcReg);
|
||||
LLT NarrowTyI = LLT::scalarOrVector(NewNumElts, SrcTyI.getScalarType());
|
||||
LLT LeftoverTyI;
|
||||
@ -1792,16 +1792,16 @@ LegalizerHelper::fewerElementsVectorMultiEltType(
|
||||
if (I == 1) {
|
||||
// For the first operand, create an instruction for each part and setup
|
||||
// the result.
|
||||
for (unsigned PartReg : PartRegs) {
|
||||
unsigned PartDstReg = MRI.createGenericVirtualRegister(NarrowTy0);
|
||||
for (Register PartReg : PartRegs) {
|
||||
Register PartDstReg = MRI.createGenericVirtualRegister(NarrowTy0);
|
||||
NewInsts.push_back(MIRBuilder.buildInstrNoInsert(MI.getOpcode())
|
||||
.addDef(PartDstReg)
|
||||
.addUse(PartReg));
|
||||
DstRegs.push_back(PartDstReg);
|
||||
}
|
||||
|
||||
for (unsigned LeftoverReg : LeftoverRegs) {
|
||||
unsigned PartDstReg = MRI.createGenericVirtualRegister(LeftoverTy0);
|
||||
for (Register LeftoverReg : LeftoverRegs) {
|
||||
Register PartDstReg = MRI.createGenericVirtualRegister(LeftoverTy0);
|
||||
NewInsts.push_back(MIRBuilder.buildInstrNoInsert(MI.getOpcode())
|
||||
.addDef(PartDstReg)
|
||||
.addUse(LeftoverReg));
|
||||
@ -1840,8 +1840,8 @@ LegalizerHelper::fewerElementsVectorCasts(MachineInstr &MI, unsigned TypeIdx,
|
||||
if (TypeIdx != 0)
|
||||
return UnableToLegalize;
|
||||
|
||||
unsigned DstReg = MI.getOperand(0).getReg();
|
||||
unsigned SrcReg = MI.getOperand(1).getReg();
|
||||
Register DstReg = MI.getOperand(0).getReg();
|
||||
Register SrcReg = MI.getOperand(1).getReg();
|
||||
LLT DstTy = MRI.getType(DstReg);
|
||||
LLT SrcTy = MRI.getType(SrcReg);
|
||||
|
||||
@ -1865,7 +1865,7 @@ LegalizerHelper::fewerElementsVectorCasts(MachineInstr &MI, unsigned TypeIdx,
|
||||
extractParts(SrcReg, NarrowTy1, NumParts, SrcRegs);
|
||||
|
||||
for (unsigned I = 0; I < NumParts; ++I) {
|
||||
unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy0);
|
||||
Register DstReg = MRI.createGenericVirtualRegister(NarrowTy0);
|
||||
MachineInstr *NewInst = MIRBuilder.buildInstr(MI.getOpcode())
|
||||
.addDef(DstReg)
|
||||
.addUse(SrcRegs[I]);
|
||||
@ -1886,8 +1886,8 @@ LegalizerHelper::fewerElementsVectorCasts(MachineInstr &MI, unsigned TypeIdx,
|
||||
LegalizerHelper::LegalizeResult
|
||||
LegalizerHelper::fewerElementsVectorCmp(MachineInstr &MI, unsigned TypeIdx,
|
||||
LLT NarrowTy) {
|
||||
unsigned DstReg = MI.getOperand(0).getReg();
|
||||
unsigned Src0Reg = MI.getOperand(2).getReg();
|
||||
Register DstReg = MI.getOperand(0).getReg();
|
||||
Register Src0Reg = MI.getOperand(2).getReg();
|
||||
LLT DstTy = MRI.getType(DstReg);
|
||||
LLT SrcTy = MRI.getType(Src0Reg);
|
||||
|
||||
@ -1929,7 +1929,7 @@ LegalizerHelper::fewerElementsVectorCmp(MachineInstr &MI, unsigned TypeIdx,
|
||||
extractParts(MI.getOperand(3).getReg(), NarrowTy1, NumParts, Src2Regs);
|
||||
|
||||
for (unsigned I = 0; I < NumParts; ++I) {
|
||||
unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy0);
|
||||
Register DstReg = MRI.createGenericVirtualRegister(NarrowTy0);
|
||||
DstRegs.push_back(DstReg);
|
||||
|
||||
if (MI.getOpcode() == TargetOpcode::G_ICMP)
|
||||
@ -2025,7 +2025,7 @@ LegalizerHelper::fewerElementsVectorSelect(MachineInstr &MI, unsigned TypeIdx,
|
||||
LegalizerHelper::LegalizeResult
|
||||
LegalizerHelper::fewerElementsVectorPhi(MachineInstr &MI, unsigned TypeIdx,
|
||||
LLT NarrowTy) {
|
||||
const unsigned DstReg = MI.getOperand(0).getReg();
|
||||
const Register DstReg = MI.getOperand(0).getReg();
|
||||
LLT PhiTy = MRI.getType(DstReg);
|
||||
LLT LeftoverTy;
|
||||
|
||||
@ -2066,7 +2066,7 @@ LegalizerHelper::fewerElementsVectorPhi(MachineInstr &MI, unsigned TypeIdx,
|
||||
PartRegs.clear();
|
||||
LeftoverRegs.clear();
|
||||
|
||||
unsigned SrcReg = MI.getOperand(I).getReg();
|
||||
Register SrcReg = MI.getOperand(I).getReg();
|
||||
MachineBasicBlock &OpMBB = *MI.getOperand(I + 1).getMBB();
|
||||
MIRBuilder.setInsertPt(OpMBB, OpMBB.getFirstTerminator());
|
||||
|
||||
@ -2266,8 +2266,8 @@ LegalizerHelper::LegalizeResult
|
||||
LegalizerHelper::narrowScalarShiftByConstant(MachineInstr &MI, const APInt &Amt,
|
||||
const LLT HalfTy, const LLT AmtTy) {
|
||||
|
||||
unsigned InL = MRI.createGenericVirtualRegister(HalfTy);
|
||||
unsigned InH = MRI.createGenericVirtualRegister(HalfTy);
|
||||
Register InL = MRI.createGenericVirtualRegister(HalfTy);
|
||||
Register InH = MRI.createGenericVirtualRegister(HalfTy);
|
||||
MIRBuilder.buildUnmerge({InL, InH}, MI.getOperand(1).getReg());
|
||||
|
||||
if (Amt.isNullValue()) {
|
||||
@ -2280,7 +2280,7 @@ LegalizerHelper::narrowScalarShiftByConstant(MachineInstr &MI, const APInt &Amt,
|
||||
unsigned NVTBits = HalfTy.getSizeInBits();
|
||||
unsigned VTBits = 2 * NVTBits;
|
||||
|
||||
SrcOp Lo(0), Hi(0);
|
||||
SrcOp Lo(Register(0)), Hi(Register(0));
|
||||
if (MI.getOpcode() == TargetOpcode::G_SHL) {
|
||||
if (Amt.ugt(VTBits)) {
|
||||
Lo = Hi = MIRBuilder.buildConstant(NVT, 0);
|
||||
@ -2361,12 +2361,12 @@ LegalizerHelper::narrowScalarShift(MachineInstr &MI, unsigned TypeIdx,
|
||||
return Legalized;
|
||||
}
|
||||
|
||||
unsigned DstReg = MI.getOperand(0).getReg();
|
||||
Register DstReg = MI.getOperand(0).getReg();
|
||||
LLT DstTy = MRI.getType(DstReg);
|
||||
if (DstTy.isVector())
|
||||
return UnableToLegalize;
|
||||
|
||||
unsigned Amt = MI.getOperand(2).getReg();
|
||||
Register Amt = MI.getOperand(2).getReg();
|
||||
LLT ShiftAmtTy = MRI.getType(Amt);
|
||||
const unsigned DstEltSize = DstTy.getScalarSizeInBits();
|
||||
if (DstEltSize % 2 != 0)
|
||||
@ -2390,8 +2390,8 @@ LegalizerHelper::narrowScalarShift(MachineInstr &MI, unsigned TypeIdx,
|
||||
// Handle the fully general expansion by an unknown amount.
|
||||
auto NewBits = MIRBuilder.buildConstant(ShiftAmtTy, NewBitSize);
|
||||
|
||||
unsigned InL = MRI.createGenericVirtualRegister(HalfTy);
|
||||
unsigned InH = MRI.createGenericVirtualRegister(HalfTy);
|
||||
Register InL = MRI.createGenericVirtualRegister(HalfTy);
|
||||
Register InH = MRI.createGenericVirtualRegister(HalfTy);
|
||||
MIRBuilder.buildUnmerge({InL, InH}, MI.getOperand(1).getReg());
|
||||
|
||||
auto AmtExcess = MIRBuilder.buildSub(ShiftAmtTy, Amt, NewBits);
|
||||
@ -2565,7 +2565,7 @@ void LegalizerHelper::multiplyRegisters(SmallVectorImpl<Register> &DstRegs,
|
||||
unsigned DstParts = DstRegs.size();
|
||||
|
||||
unsigned DstIdx = 0; // Low bits of the result.
|
||||
unsigned FactorSum =
|
||||
Register FactorSum =
|
||||
B.buildMul(NarrowTy, Src1Regs[DstIdx], Src2Regs[DstIdx]).getReg(0);
|
||||
DstRegs[DstIdx] = FactorSum;
|
||||
|
||||
@ -2592,7 +2592,7 @@ void LegalizerHelper::multiplyRegisters(SmallVectorImpl<Register> &DstRegs,
|
||||
Factors.push_back(CarrySumPrevDstIdx);
|
||||
}
|
||||
|
||||
unsigned CarrySum = 0;
|
||||
Register CarrySum;
|
||||
// Add all factors and accumulate all carries into CarrySum.
|
||||
if (DstIdx != DstParts - 1) {
|
||||
MachineInstrBuilder Uaddo =
|
||||
@ -2673,7 +2673,7 @@ LegalizerHelper::narrowScalarExtract(MachineInstr &MI, unsigned TypeIdx,
|
||||
SmallVector<uint64_t, 2> Indexes;
|
||||
extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, SrcRegs);
|
||||
|
||||
unsigned OpReg = MI.getOperand(0).getReg();
|
||||
Register OpReg = MI.getOperand(0).getReg();
|
||||
uint64_t OpStart = MI.getOperand(2).getImm();
|
||||
uint64_t OpSize = MRI.getType(OpReg).getSizeInBits();
|
||||
for (int i = 0; i < NumParts; ++i) {
|
||||
@ -2700,7 +2700,7 @@ LegalizerHelper::narrowScalarExtract(MachineInstr &MI, unsigned TypeIdx,
|
||||
SegSize = std::min(SrcStart + NarrowSize - OpStart, OpSize);
|
||||
}
|
||||
|
||||
unsigned SegReg = SrcRegs[i];
|
||||
Register SegReg = SrcRegs[i];
|
||||
if (ExtractOffset != 0 || SegSize != NarrowSize) {
|
||||
// A genuine extract is needed.
|
||||
SegReg = MRI.createGenericVirtualRegister(LLT::scalar(SegSize));
|
||||
@ -2710,7 +2710,7 @@ LegalizerHelper::narrowScalarExtract(MachineInstr &MI, unsigned TypeIdx,
|
||||
DstRegs.push_back(SegReg);
|
||||
}
|
||||
|
||||
unsigned DstReg = MI.getOperand(0).getReg();
|
||||
Register DstReg = MI.getOperand(0).getReg();
|
||||
if(MRI.getType(DstReg).isVector())
|
||||
MIRBuilder.buildBuildVector(DstReg, DstRegs);
|
||||
else
|
||||
@ -2740,7 +2740,7 @@ LegalizerHelper::narrowScalarInsert(MachineInstr &MI, unsigned TypeIdx,
|
||||
SmallVector<uint64_t, 2> Indexes;
|
||||
extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, SrcRegs);
|
||||
|
||||
unsigned OpReg = MI.getOperand(2).getReg();
|
||||
Register OpReg = MI.getOperand(2).getReg();
|
||||
uint64_t OpStart = MI.getOperand(3).getImm();
|
||||
uint64_t OpSize = MRI.getType(OpReg).getSizeInBits();
|
||||
for (int i = 0; i < NumParts; ++i) {
|
||||
@ -2772,20 +2772,20 @@ LegalizerHelper::narrowScalarInsert(MachineInstr &MI, unsigned TypeIdx,
|
||||
std::min(NarrowSize - InsertOffset, OpStart + OpSize - DstStart);
|
||||
}
|
||||
|
||||
unsigned SegReg = OpReg;
|
||||
Register SegReg = OpReg;
|
||||
if (ExtractOffset != 0 || SegSize != OpSize) {
|
||||
// A genuine extract is needed.
|
||||
SegReg = MRI.createGenericVirtualRegister(LLT::scalar(SegSize));
|
||||
MIRBuilder.buildExtract(SegReg, OpReg, ExtractOffset);
|
||||
}
|
||||
|
||||
unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy);
|
||||
Register DstReg = MRI.createGenericVirtualRegister(NarrowTy);
|
||||
MIRBuilder.buildInsert(DstReg, SrcRegs[i], SegReg, InsertOffset);
|
||||
DstRegs.push_back(DstReg);
|
||||
}
|
||||
|
||||
assert(DstRegs.size() == (unsigned)NumParts && "not all parts covered");
|
||||
unsigned DstReg = MI.getOperand(0).getReg();
|
||||
Register DstReg = MI.getOperand(0).getReg();
|
||||
if(MRI.getType(DstReg).isVector())
|
||||
MIRBuilder.buildBuildVector(DstReg, DstRegs);
|
||||
else
|
||||
@ -2797,7 +2797,7 @@ LegalizerHelper::narrowScalarInsert(MachineInstr &MI, unsigned TypeIdx,
|
||||
LegalizerHelper::LegalizeResult
|
||||
LegalizerHelper::narrowScalarBasic(MachineInstr &MI, unsigned TypeIdx,
|
||||
LLT NarrowTy) {
|
||||
unsigned DstReg = MI.getOperand(0).getReg();
|
||||
Register DstReg = MI.getOperand(0).getReg();
|
||||
LLT DstTy = MRI.getType(DstReg);
|
||||
|
||||
assert(MI.getNumOperands() == 3 && TypeIdx == 0);
|
||||
@ -2841,12 +2841,12 @@ LegalizerHelper::narrowScalarSelect(MachineInstr &MI, unsigned TypeIdx,
|
||||
if (TypeIdx != 0)
|
||||
return UnableToLegalize;
|
||||
|
||||
unsigned CondReg = MI.getOperand(1).getReg();
|
||||
Register CondReg = MI.getOperand(1).getReg();
|
||||
LLT CondTy = MRI.getType(CondReg);
|
||||
if (CondTy.isVector()) // TODO: Handle vselect
|
||||
return UnableToLegalize;
|
||||
|
||||
unsigned DstReg = MI.getOperand(0).getReg();
|
||||
Register DstReg = MI.getOperand(0).getReg();
|
||||
LLT DstTy = MRI.getType(DstReg);
|
||||
|
||||
SmallVector<Register, 4> DstRegs, DstLeftoverRegs;
|
||||
@ -2900,7 +2900,7 @@ LegalizerHelper::lowerBitCount(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
|
||||
return Legalized;
|
||||
}
|
||||
case TargetOpcode::G_CTLZ: {
|
||||
unsigned SrcReg = MI.getOperand(1).getReg();
|
||||
Register SrcReg = MI.getOperand(1).getReg();
|
||||
unsigned Len = Ty.getSizeInBits();
|
||||
if (isSupported({TargetOpcode::G_CTLZ_ZERO_UNDEF, {Ty, Ty}})) {
|
||||
// If CTLZ_ZERO_UNDEF is supported, emit that and a select for zero.
|
||||
@ -2926,7 +2926,7 @@ LegalizerHelper::lowerBitCount(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
|
||||
// return Len - popcount(x);
|
||||
//
|
||||
// Ref: "Hacker's Delight" by Henry Warren
|
||||
unsigned Op = SrcReg;
|
||||
Register Op = SrcReg;
|
||||
unsigned NewLen = PowerOf2Ceil(Len);
|
||||
for (unsigned i = 0; (1U << i) <= (NewLen / 2); ++i) {
|
||||
auto MIBShiftAmt = MIRBuilder.buildConstant(Ty, 1ULL << i);
|
||||
@ -2950,7 +2950,7 @@ LegalizerHelper::lowerBitCount(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
|
||||
return Legalized;
|
||||
}
|
||||
case TargetOpcode::G_CTTZ: {
|
||||
unsigned SrcReg = MI.getOperand(1).getReg();
|
||||
Register SrcReg = MI.getOperand(1).getReg();
|
||||
unsigned Len = Ty.getSizeInBits();
|
||||
if (isSupported({TargetOpcode::G_CTTZ_ZERO_UNDEF, {Ty, Ty}})) {
|
||||
// If CTTZ_ZERO_UNDEF is legal or custom, emit that and a select with
|
||||
@ -2998,8 +2998,8 @@ LegalizerHelper::lowerBitCount(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
|
||||
// representation.
|
||||
LegalizerHelper::LegalizeResult
|
||||
LegalizerHelper::lowerU64ToF32BitOps(MachineInstr &MI) {
|
||||
unsigned Dst = MI.getOperand(0).getReg();
|
||||
unsigned Src = MI.getOperand(1).getReg();
|
||||
Register Dst = MI.getOperand(0).getReg();
|
||||
Register Src = MI.getOperand(1).getReg();
|
||||
const LLT S64 = LLT::scalar(64);
|
||||
const LLT S32 = LLT::scalar(32);
|
||||
const LLT S1 = LLT::scalar(1);
|
||||
@ -3054,8 +3054,8 @@ LegalizerHelper::lowerU64ToF32BitOps(MachineInstr &MI) {
|
||||
|
||||
LegalizerHelper::LegalizeResult
|
||||
LegalizerHelper::lowerUITOFP(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
|
||||
unsigned Dst = MI.getOperand(0).getReg();
|
||||
unsigned Src = MI.getOperand(1).getReg();
|
||||
Register Dst = MI.getOperand(0).getReg();
|
||||
Register Src = MI.getOperand(1).getReg();
|
||||
LLT DstTy = MRI.getType(Dst);
|
||||
LLT SrcTy = MRI.getType(Src);
|
||||
|
||||
@ -3075,8 +3075,8 @@ LegalizerHelper::lowerUITOFP(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
|
||||
|
||||
LegalizerHelper::LegalizeResult
|
||||
LegalizerHelper::lowerSITOFP(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
|
||||
unsigned Dst = MI.getOperand(0).getReg();
|
||||
unsigned Src = MI.getOperand(1).getReg();
|
||||
Register Dst = MI.getOperand(0).getReg();
|
||||
Register Src = MI.getOperand(1).getReg();
|
||||
LLT DstTy = MRI.getType(Dst);
|
||||
LLT SrcTy = MRI.getType(Src);
|
||||
|
||||
@ -3093,7 +3093,7 @@ LegalizerHelper::lowerSITOFP(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
|
||||
// float r = cul2f((l + s) ^ s);
|
||||
// return s ? -r : r;
|
||||
// }
|
||||
unsigned L = Src;
|
||||
Register L = Src;
|
||||
auto SignBit = MIRBuilder.buildConstant(S64, 63);
|
||||
auto S = MIRBuilder.buildAShr(S64, L, SignBit);
|
||||
|
||||
|
@ -22,7 +22,7 @@
|
||||
|
||||
using namespace llvm;
|
||||
|
||||
unsigned SwiftErrorValueTracking::getOrCreateVReg(const MachineBasicBlock *MBB,
|
||||
Register SwiftErrorValueTracking::getOrCreateVReg(const MachineBasicBlock *MBB,
|
||||
const Value *Val) {
|
||||
auto Key = std::make_pair(MBB, Val);
|
||||
auto It = VRegDefMap.find(Key);
|
||||
@ -46,7 +46,7 @@ void SwiftErrorValueTracking::setCurrentVReg(const MachineBasicBlock *MBB,
|
||||
VRegDefMap[std::make_pair(MBB, Val)] = VReg;
|
||||
}
|
||||
|
||||
unsigned SwiftErrorValueTracking::getOrCreateVRegDefAt(
|
||||
Register SwiftErrorValueTracking::getOrCreateVRegDefAt(
|
||||
const Instruction *I, const MachineBasicBlock *MBB, const Value *Val) {
|
||||
auto Key = PointerIntPair<const Instruction *, 1, bool>(I, true);
|
||||
auto It = VRegDefUses.find(Key);
|
||||
@ -55,20 +55,20 @@ unsigned SwiftErrorValueTracking::getOrCreateVRegDefAt(
|
||||
|
||||
auto &DL = MF->getDataLayout();
|
||||
const TargetRegisterClass *RC = TLI->getRegClassFor(TLI->getPointerTy(DL));
|
||||
unsigned VReg = MF->getRegInfo().createVirtualRegister(RC);
|
||||
Register VReg = MF->getRegInfo().createVirtualRegister(RC);
|
||||
VRegDefUses[Key] = VReg;
|
||||
setCurrentVReg(MBB, Val, VReg);
|
||||
return VReg;
|
||||
}
|
||||
|
||||
unsigned SwiftErrorValueTracking::getOrCreateVRegUseAt(
|
||||
Register SwiftErrorValueTracking::getOrCreateVRegUseAt(
|
||||
const Instruction *I, const MachineBasicBlock *MBB, const Value *Val) {
|
||||
auto Key = PointerIntPair<const Instruction *, 1, bool>(I, false);
|
||||
auto It = VRegDefUses.find(Key);
|
||||
if (It != VRegDefUses.end())
|
||||
return It->second;
|
||||
|
||||
unsigned VReg = getOrCreateVReg(MBB, Val);
|
||||
Register VReg = getOrCreateVReg(MBB, Val);
|
||||
VRegDefUses[Key] = VReg;
|
||||
return VReg;
|
||||
}
|
||||
@ -129,7 +129,7 @@ bool SwiftErrorValueTracking::createEntriesInEntryBlock(DebugLoc DbgLoc) {
|
||||
// least by the 'return' of the swifterror.
|
||||
if (SwiftErrorArg && SwiftErrorArg == SwiftErrorVal)
|
||||
continue;
|
||||
unsigned VReg = MF->getRegInfo().createVirtualRegister(RC);
|
||||
Register VReg = MF->getRegInfo().createVirtualRegister(RC);
|
||||
// Assign Undef to Vreg. We construct MI directly to make sure it works
|
||||
// with FastISel.
|
||||
BuildMI(*MBB, MBB->getFirstNonPHI(), DbgLoc,
|
||||
@ -177,7 +177,7 @@ void SwiftErrorValueTracking::propagateVRegs() {
|
||||
|
||||
// Check whether we have a single vreg def from all predecessors.
|
||||
// Otherwise we need a phi.
|
||||
SmallVector<std::pair<MachineBasicBlock *, unsigned>, 4> VRegs;
|
||||
SmallVector<std::pair<MachineBasicBlock *, Register>, 4> VRegs;
|
||||
SmallSet<const MachineBasicBlock *, 8> Visited;
|
||||
for (auto *Pred : MBB->predecessors()) {
|
||||
if (!Visited.insert(Pred).second)
|
||||
@ -203,7 +203,7 @@ void SwiftErrorValueTracking::propagateVRegs() {
|
||||
VRegs.size() >= 1 &&
|
||||
std::find_if(
|
||||
VRegs.begin(), VRegs.end(),
|
||||
[&](const std::pair<const MachineBasicBlock *, unsigned> &V)
|
||||
[&](const std::pair<const MachineBasicBlock *, Register> &V)
|
||||
-> bool { return V.second != VRegs[0].second; }) !=
|
||||
VRegs.end();
|
||||
|
||||
@ -227,7 +227,7 @@ void SwiftErrorValueTracking::propagateVRegs() {
|
||||
assert(UpwardsUse);
|
||||
assert(!VRegs.empty() &&
|
||||
"No predecessors? Is the Calling Convention correct?");
|
||||
unsigned DestReg = UUseVReg;
|
||||
Register DestReg = UUseVReg;
|
||||
BuildMI(*MBB, MBB->getFirstNonPHI(), DLoc, TII->get(TargetOpcode::COPY),
|
||||
DestReg)
|
||||
.addReg(VRegs[0].second);
|
||||
|
@ -57,18 +57,18 @@ struct IncomingArgHandler : public CallLowering::ValueHandler {
|
||||
CCAssignFn *AssignFn)
|
||||
: ValueHandler(MIRBuilder, MRI, AssignFn), StackUsed(0) {}
|
||||
|
||||
unsigned getStackAddress(uint64_t Size, int64_t Offset,
|
||||
Register getStackAddress(uint64_t Size, int64_t Offset,
|
||||
MachinePointerInfo &MPO) override {
|
||||
auto &MFI = MIRBuilder.getMF().getFrameInfo();
|
||||
int FI = MFI.CreateFixedObject(Size, Offset, true);
|
||||
MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
|
||||
unsigned AddrReg = MRI.createGenericVirtualRegister(LLT::pointer(0, 64));
|
||||
Register AddrReg = MRI.createGenericVirtualRegister(LLT::pointer(0, 64));
|
||||
MIRBuilder.buildFrameIndex(AddrReg, FI);
|
||||
StackUsed = std::max(StackUsed, Size + Offset);
|
||||
return AddrReg;
|
||||
}
|
||||
|
||||
void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
|
||||
void assignValueToReg(Register ValVReg, Register PhysReg,
|
||||
CCValAssign &VA) override {
|
||||
markPhysRegUsed(PhysReg);
|
||||
switch (VA.getLocInfo()) {
|
||||
@ -85,7 +85,7 @@ struct IncomingArgHandler : public CallLowering::ValueHandler {
|
||||
}
|
||||
}
|
||||
|
||||
void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size,
|
||||
void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
|
||||
MachinePointerInfo &MPO, CCValAssign &VA) override {
|
||||
// FIXME: Get alignment
|
||||
auto MMO = MIRBuilder.getMF().getMachineMemOperand(
|
||||
@ -133,31 +133,31 @@ struct OutgoingArgHandler : public CallLowering::ValueHandler {
|
||||
: ValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB),
|
||||
AssignFnVarArg(AssignFnVarArg), StackSize(0) {}
|
||||
|
||||
unsigned getStackAddress(uint64_t Size, int64_t Offset,
|
||||
Register getStackAddress(uint64_t Size, int64_t Offset,
|
||||
MachinePointerInfo &MPO) override {
|
||||
LLT p0 = LLT::pointer(0, 64);
|
||||
LLT s64 = LLT::scalar(64);
|
||||
unsigned SPReg = MRI.createGenericVirtualRegister(p0);
|
||||
MIRBuilder.buildCopy(SPReg, AArch64::SP);
|
||||
Register SPReg = MRI.createGenericVirtualRegister(p0);
|
||||
MIRBuilder.buildCopy(SPReg, Register(AArch64::SP));
|
||||
|
||||
unsigned OffsetReg = MRI.createGenericVirtualRegister(s64);
|
||||
Register OffsetReg = MRI.createGenericVirtualRegister(s64);
|
||||
MIRBuilder.buildConstant(OffsetReg, Offset);
|
||||
|
||||
unsigned AddrReg = MRI.createGenericVirtualRegister(p0);
|
||||
Register AddrReg = MRI.createGenericVirtualRegister(p0);
|
||||
MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg);
|
||||
|
||||
MPO = MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset);
|
||||
return AddrReg;
|
||||
}
|
||||
|
||||
void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
|
||||
void assignValueToReg(Register ValVReg, Register PhysReg,
|
||||
CCValAssign &VA) override {
|
||||
MIB.addUse(PhysReg, RegState::Implicit);
|
||||
unsigned ExtReg = extendRegister(ValVReg, VA);
|
||||
Register ExtReg = extendRegister(ValVReg, VA);
|
||||
MIRBuilder.buildCopy(PhysReg, ExtReg);
|
||||
}
|
||||
|
||||
void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size,
|
||||
void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
|
||||
MachinePointerInfo &MPO, CCValAssign &VA) override {
|
||||
if (VA.getLocInfo() == CCValAssign::LocInfo::AExt) {
|
||||
Size = VA.getLocVT().getSizeInBits() / 8;
|
||||
@ -263,7 +263,7 @@ bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
|
||||
return false;
|
||||
}
|
||||
|
||||
unsigned CurVReg = VRegs[i];
|
||||
Register CurVReg = VRegs[i];
|
||||
ArgInfo CurArgInfo = ArgInfo{CurVReg, SplitEVTs[i].getTypeForEVT(Ctx)};
|
||||
setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
|
||||
|
||||
@ -367,7 +367,7 @@ bool AArch64CallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
|
||||
setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, F);
|
||||
bool Split = false;
|
||||
LLT Ty = MRI.getType(VRegs[i]);
|
||||
unsigned Dst = VRegs[i];
|
||||
Register Dst = VRegs[i];
|
||||
|
||||
splitToValueTypes(OrigArg, SplitArgs, DL, MRI, F.getCallingConv(),
|
||||
[&](unsigned Reg, uint64_t Offset) {
|
||||
@ -436,7 +436,7 @@ bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
|
||||
SmallVector<ArgInfo, 8> SplitArgs;
|
||||
for (auto &OrigArg : OrigArgs) {
|
||||
splitToValueTypes(OrigArg, SplitArgs, DL, MRI, CallConv,
|
||||
[&](unsigned Reg, uint64_t Offset) {
|
||||
[&](Register Reg, uint64_t Offset) {
|
||||
MIRBuilder.buildExtract(Reg, OrigArg.Reg, Offset);
|
||||
});
|
||||
// AAPCS requires that we zero-extend i1 to 8 bits by the caller.
|
||||
@ -512,7 +512,7 @@ bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
|
||||
|
||||
if (SwiftErrorVReg) {
|
||||
MIB.addDef(AArch64::X21, RegState::Implicit);
|
||||
MIRBuilder.buildCopy(SwiftErrorVReg, AArch64::X21);
|
||||
MIRBuilder.buildCopy(SwiftErrorVReg, Register(AArch64::X21));
|
||||
}
|
||||
|
||||
CallSeqStart.addImm(Handler.StackSize).addImm(0);
|
||||
|
@ -74,7 +74,7 @@ private:
|
||||
// returned via 'Dst'.
|
||||
MachineInstr *emitScalarToVector(unsigned EltSize,
|
||||
const TargetRegisterClass *DstRC,
|
||||
unsigned Scalar,
|
||||
Register Scalar,
|
||||
MachineIRBuilder &MIRBuilder) const;
|
||||
|
||||
/// Emit a lane insert into \p DstReg, or a new vector register if None is
|
||||
@ -83,8 +83,8 @@ private:
|
||||
/// The lane inserted into is defined by \p LaneIdx. The vector source
|
||||
/// register is given by \p SrcReg. The register containing the element is
|
||||
/// given by \p EltReg.
|
||||
MachineInstr *emitLaneInsert(Optional<unsigned> DstReg, unsigned SrcReg,
|
||||
unsigned EltReg, unsigned LaneIdx,
|
||||
MachineInstr *emitLaneInsert(Optional<Register> DstReg, Register SrcReg,
|
||||
Register EltReg, unsigned LaneIdx,
|
||||
const RegisterBank &RB,
|
||||
MachineIRBuilder &MIRBuilder) const;
|
||||
bool selectInsertElt(MachineInstr &I, MachineRegisterInfo &MRI) const;
|
||||
@ -113,12 +113,12 @@ private:
|
||||
MachineIRBuilder &MIRBuilder) const;
|
||||
|
||||
// Emit a vector concat operation.
|
||||
MachineInstr *emitVectorConcat(Optional<unsigned> Dst, unsigned Op1,
|
||||
unsigned Op2,
|
||||
MachineInstr *emitVectorConcat(Optional<Register> Dst, Register Op1,
|
||||
Register Op2,
|
||||
MachineIRBuilder &MIRBuilder) const;
|
||||
MachineInstr *emitExtractVectorElt(Optional<unsigned> DstReg,
|
||||
MachineInstr *emitExtractVectorElt(Optional<Register> DstReg,
|
||||
const RegisterBank &DstRB, LLT ScalarTy,
|
||||
unsigned VecReg, unsigned LaneIdx,
|
||||
Register VecReg, unsigned LaneIdx,
|
||||
MachineIRBuilder &MIRBuilder) const;
|
||||
|
||||
/// Helper function for selecting G_FCONSTANT. If the G_FCONSTANT can be
|
||||
@ -128,7 +128,7 @@ private:
|
||||
MachineRegisterInfo &MRI) const;
|
||||
|
||||
/// Emit a CSet for a compare.
|
||||
MachineInstr *emitCSetForICMP(unsigned DefReg, unsigned Pred,
|
||||
MachineInstr *emitCSetForICMP(Register DefReg, unsigned Pred,
|
||||
MachineIRBuilder &MIRBuilder) const;
|
||||
|
||||
ComplexRendererFns selectArithImmed(MachineOperand &Root) const;
|
||||
@ -861,7 +861,7 @@ static void changeFCMPPredToAArch64CC(CmpInst::Predicate P,
|
||||
bool AArch64InstructionSelector::selectCompareBranch(
|
||||
MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
|
||||
|
||||
const unsigned CondReg = I.getOperand(0).getReg();
|
||||
const Register CondReg = I.getOperand(0).getReg();
|
||||
MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
|
||||
MachineInstr *CCMI = MRI.getVRegDef(CondReg);
|
||||
if (CCMI->getOpcode() == TargetOpcode::G_TRUNC)
|
||||
@ -869,8 +869,8 @@ bool AArch64InstructionSelector::selectCompareBranch(
|
||||
if (CCMI->getOpcode() != TargetOpcode::G_ICMP)
|
||||
return false;
|
||||
|
||||
unsigned LHS = CCMI->getOperand(2).getReg();
|
||||
unsigned RHS = CCMI->getOperand(3).getReg();
|
||||
Register LHS = CCMI->getOperand(2).getReg();
|
||||
Register RHS = CCMI->getOperand(3).getReg();
|
||||
if (!getConstantVRegVal(RHS, MRI))
|
||||
std::swap(RHS, LHS);
|
||||
|
||||
@ -907,10 +907,10 @@ bool AArch64InstructionSelector::selectCompareBranch(
|
||||
bool AArch64InstructionSelector::selectVectorSHL(
|
||||
MachineInstr &I, MachineRegisterInfo &MRI) const {
|
||||
assert(I.getOpcode() == TargetOpcode::G_SHL);
|
||||
unsigned DstReg = I.getOperand(0).getReg();
|
||||
Register DstReg = I.getOperand(0).getReg();
|
||||
const LLT Ty = MRI.getType(DstReg);
|
||||
unsigned Src1Reg = I.getOperand(1).getReg();
|
||||
unsigned Src2Reg = I.getOperand(2).getReg();
|
||||
Register Src1Reg = I.getOperand(1).getReg();
|
||||
Register Src2Reg = I.getOperand(2).getReg();
|
||||
|
||||
if (!Ty.isVector())
|
||||
return false;
|
||||
@ -935,10 +935,10 @@ bool AArch64InstructionSelector::selectVectorSHL(
|
||||
bool AArch64InstructionSelector::selectVectorASHR(
|
||||
MachineInstr &I, MachineRegisterInfo &MRI) const {
|
||||
assert(I.getOpcode() == TargetOpcode::G_ASHR);
|
||||
unsigned DstReg = I.getOperand(0).getReg();
|
||||
Register DstReg = I.getOperand(0).getReg();
|
||||
const LLT Ty = MRI.getType(DstReg);
|
||||
unsigned Src1Reg = I.getOperand(1).getReg();
|
||||
unsigned Src2Reg = I.getOperand(2).getReg();
|
||||
Register Src1Reg = I.getOperand(1).getReg();
|
||||
Register Src2Reg = I.getOperand(2).getReg();
|
||||
|
||||
if (!Ty.isVector())
|
||||
return false;
|
||||
@ -980,9 +980,9 @@ bool AArch64InstructionSelector::selectVaStartAAPCS(
|
||||
bool AArch64InstructionSelector::selectVaStartDarwin(
|
||||
MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
|
||||
AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
|
||||
unsigned ListReg = I.getOperand(0).getReg();
|
||||
Register ListReg = I.getOperand(0).getReg();
|
||||
|
||||
unsigned ArgsAddrReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
|
||||
Register ArgsAddrReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
|
||||
|
||||
auto MIB =
|
||||
BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::ADDXri))
|
||||
@ -1036,7 +1036,7 @@ void AArch64InstructionSelector::materializeLargeCMVal(
|
||||
constrainSelectedInstRegOperands(*MovI, TII, TRI, RBI);
|
||||
return DstReg;
|
||||
};
|
||||
unsigned DstReg = BuildMovK(MovZ.getReg(0),
|
||||
Register DstReg = BuildMovK(MovZ.getReg(0),
|
||||
AArch64II::MO_G1 | AArch64II::MO_NC, 16, 0);
|
||||
DstReg = BuildMovK(DstReg, AArch64II::MO_G2 | AArch64II::MO_NC, 32, 0);
|
||||
BuildMovK(DstReg, AArch64II::MO_G3, 48, I.getOperand(0).getReg());
|
||||
@ -1061,7 +1061,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
|
||||
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
|
||||
|
||||
if (Opcode == TargetOpcode::PHI || Opcode == TargetOpcode::G_PHI) {
|
||||
const unsigned DefReg = I.getOperand(0).getReg();
|
||||
const Register DefReg = I.getOperand(0).getReg();
|
||||
const LLT DefTy = MRI.getType(DefReg);
|
||||
|
||||
const TargetRegisterClass *DefRC = nullptr;
|
||||
@ -1122,7 +1122,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
|
||||
return false;
|
||||
}
|
||||
|
||||
const unsigned CondReg = I.getOperand(0).getReg();
|
||||
const Register CondReg = I.getOperand(0).getReg();
|
||||
MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
|
||||
|
||||
// Speculation tracking/SLH assumes that optimized TB(N)Z/CB(N)Z
|
||||
@ -1167,7 +1167,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
|
||||
|
||||
case TargetOpcode::G_BSWAP: {
|
||||
// Handle vector types for G_BSWAP directly.
|
||||
unsigned DstReg = I.getOperand(0).getReg();
|
||||
Register DstReg = I.getOperand(0).getReg();
|
||||
LLT DstTy = MRI.getType(DstReg);
|
||||
|
||||
// We should only get vector types here; everything else is handled by the
|
||||
@ -1212,7 +1212,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
|
||||
const LLT s64 = LLT::scalar(64);
|
||||
const LLT p0 = LLT::pointer(0, 64);
|
||||
|
||||
const unsigned DefReg = I.getOperand(0).getReg();
|
||||
const Register DefReg = I.getOperand(0).getReg();
|
||||
const LLT DefTy = MRI.getType(DefReg);
|
||||
const unsigned DefSize = DefTy.getSizeInBits();
|
||||
const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
|
||||
@ -1270,7 +1270,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
|
||||
return true;
|
||||
|
||||
// Nope. Emit a copy and use a normal mov instead.
|
||||
const unsigned DefGPRReg = MRI.createVirtualRegister(&GPRRC);
|
||||
const Register DefGPRReg = MRI.createVirtualRegister(&GPRRC);
|
||||
MachineOperand &RegOp = I.getOperand(0);
|
||||
RegOp.setReg(DefGPRReg);
|
||||
MIB.setInsertPt(MIB.getMBB(), std::next(I.getIterator()));
|
||||
@ -1317,7 +1317,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
|
||||
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
|
||||
}
|
||||
|
||||
unsigned DstReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
|
||||
Register DstReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
|
||||
MIB.setInsertPt(MIB.getMBB(), std::next(I.getIterator()));
|
||||
MIB.buildInstr(TargetOpcode::COPY, {I.getOperand(0).getReg()}, {})
|
||||
.addReg(DstReg, 0, AArch64::sub_32);
|
||||
@ -1349,7 +1349,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
|
||||
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
|
||||
}
|
||||
|
||||
unsigned SrcReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
|
||||
Register SrcReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
|
||||
BuildMI(MBB, I.getIterator(), I.getDebugLoc(),
|
||||
TII.get(AArch64::SUBREG_TO_REG))
|
||||
.addDef(SrcReg)
|
||||
@ -1427,7 +1427,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
|
||||
}
|
||||
unsigned MemSizeInBits = MemOp.getSize() * 8;
|
||||
|
||||
const unsigned PtrReg = I.getOperand(1).getReg();
|
||||
const Register PtrReg = I.getOperand(1).getReg();
|
||||
#ifndef NDEBUG
|
||||
const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, MRI, TRI);
|
||||
// Sanity-check the pointer register.
|
||||
@ -1437,7 +1437,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
|
||||
"Load/Store pointer operand isn't a pointer");
|
||||
#endif
|
||||
|
||||
const unsigned ValReg = I.getOperand(0).getReg();
|
||||
const Register ValReg = I.getOperand(0).getReg();
|
||||
const RegisterBank &RB = *RBI.getRegBank(ValReg, MRI, TRI);
|
||||
|
||||
const unsigned NewOpc =
|
||||
@ -1488,8 +1488,8 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
|
||||
return false;
|
||||
// If we have a ZEXTLOAD then change the load's type to be a narrower reg
|
||||
//and zero_extend with SUBREG_TO_REG.
|
||||
unsigned LdReg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
|
||||
unsigned DstReg = I.getOperand(0).getReg();
|
||||
Register LdReg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
|
||||
Register DstReg = I.getOperand(0).getReg();
|
||||
I.getOperand(0).setReg(LdReg);
|
||||
|
||||
MIB.setInsertPt(MIB.getMBB(), std::next(I.getIterator()));
|
||||
@ -1510,7 +1510,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
|
||||
if (unsupportedBinOp(I, RBI, MRI, TRI))
|
||||
return false;
|
||||
|
||||
const unsigned DefReg = I.getOperand(0).getReg();
|
||||
const Register DefReg = I.getOperand(0).getReg();
|
||||
const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
|
||||
|
||||
if (RB.getID() != AArch64::GPRRegBankID) {
|
||||
@ -1555,7 +1555,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
|
||||
|
||||
const unsigned OpSize = Ty.getSizeInBits();
|
||||
|
||||
const unsigned DefReg = I.getOperand(0).getReg();
|
||||
const Register DefReg = I.getOperand(0).getReg();
|
||||
const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
|
||||
|
||||
const unsigned NewOpc = selectBinaryOp(I.getOpcode(), RB.getID(), OpSize);
|
||||
@ -1600,7 +1600,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
|
||||
// this case, we want to increment when carry is set.
|
||||
auto CsetMI = MIRBuilder
|
||||
.buildInstr(AArch64::CSINCWr, {I.getOperand(1).getReg()},
|
||||
{AArch64::WZR, AArch64::WZR})
|
||||
{Register(AArch64::WZR), Register(AArch64::WZR)})
|
||||
.addImm(getInvertedCondCode(AArch64CC::HS));
|
||||
constrainSelectedInstRegOperands(*CsetMI, TII, TRI, RBI);
|
||||
I.eraseFromParent();
|
||||
@ -1623,8 +1623,8 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
|
||||
const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
|
||||
const LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
|
||||
|
||||
const unsigned DstReg = I.getOperand(0).getReg();
|
||||
const unsigned SrcReg = I.getOperand(1).getReg();
|
||||
const Register DstReg = I.getOperand(0).getReg();
|
||||
const Register SrcReg = I.getOperand(1).getReg();
|
||||
|
||||
const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
|
||||
const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
|
||||
@ -1681,8 +1681,8 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
|
||||
}
|
||||
|
||||
case TargetOpcode::G_ANYEXT: {
|
||||
const unsigned DstReg = I.getOperand(0).getReg();
|
||||
const unsigned SrcReg = I.getOperand(1).getReg();
|
||||
const Register DstReg = I.getOperand(0).getReg();
|
||||
const Register SrcReg = I.getOperand(1).getReg();
|
||||
|
||||
const RegisterBank &RBDst = *RBI.getRegBank(DstReg, MRI, TRI);
|
||||
if (RBDst.getID() != AArch64::GPRRegBankID) {
|
||||
@ -1713,7 +1713,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
|
||||
// At this point G_ANYEXT is just like a plain COPY, but we need
|
||||
// to explicitly form the 64-bit value if any.
|
||||
if (DstSize > 32) {
|
||||
unsigned ExtSrc = MRI.createVirtualRegister(&AArch64::GPR64allRegClass);
|
||||
Register ExtSrc = MRI.createVirtualRegister(&AArch64::GPR64allRegClass);
|
||||
BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG))
|
||||
.addDef(ExtSrc)
|
||||
.addImm(0)
|
||||
@ -1730,8 +1730,8 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
|
||||
const LLT DstTy = MRI.getType(I.getOperand(0).getReg()),
|
||||
SrcTy = MRI.getType(I.getOperand(1).getReg());
|
||||
const bool isSigned = Opcode == TargetOpcode::G_SEXT;
|
||||
const unsigned DefReg = I.getOperand(0).getReg();
|
||||
const unsigned SrcReg = I.getOperand(1).getReg();
|
||||
const Register DefReg = I.getOperand(0).getReg();
|
||||
const Register SrcReg = I.getOperand(1).getReg();
|
||||
const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
|
||||
|
||||
if (RB.getID() != AArch64::GPRRegBankID) {
|
||||
@ -1749,7 +1749,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
|
||||
return false;
|
||||
}
|
||||
|
||||
const unsigned SrcXReg =
|
||||
const Register SrcXReg =
|
||||
MRI.createVirtualRegister(&AArch64::GPR64RegClass);
|
||||
BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG))
|
||||
.addDef(SrcXReg)
|
||||
@ -1817,9 +1817,9 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
|
||||
return false;
|
||||
}
|
||||
|
||||
const unsigned CondReg = I.getOperand(1).getReg();
|
||||
const unsigned TReg = I.getOperand(2).getReg();
|
||||
const unsigned FReg = I.getOperand(3).getReg();
|
||||
const Register CondReg = I.getOperand(1).getReg();
|
||||
const Register TReg = I.getOperand(2).getReg();
|
||||
const Register FReg = I.getOperand(3).getReg();
|
||||
|
||||
// If we have a floating-point result, then we should use a floating point
|
||||
// select instead of an integer select.
|
||||
@ -1829,7 +1829,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
|
||||
if (IsFP && tryOptSelect(I))
|
||||
return true;
|
||||
|
||||
unsigned CSelOpc = selectSelectOpc(I, MRI, RBI);
|
||||
Register CSelOpc = selectSelectOpc(I, MRI, RBI);
|
||||
MachineInstr &TstMI =
|
||||
*BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ANDSWri))
|
||||
.addDef(AArch64::WZR)
|
||||
@ -1859,7 +1859,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
|
||||
}
|
||||
|
||||
unsigned CmpOpc = 0;
|
||||
unsigned ZReg = 0;
|
||||
Register ZReg;
|
||||
|
||||
// Check if this compare can be represented as a cmn, and perform any
|
||||
// necessary transformations to do so.
|
||||
@ -1930,8 +1930,8 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
|
||||
if (CmpOpc != AArch64::FCMPSri && CmpOpc != AArch64::FCMPDri)
|
||||
CmpMI = CmpMI.addUse(I.getOperand(3).getReg());
|
||||
|
||||
const unsigned DefReg = I.getOperand(0).getReg();
|
||||
unsigned Def1Reg = DefReg;
|
||||
const Register DefReg = I.getOperand(0).getReg();
|
||||
Register Def1Reg = DefReg;
|
||||
if (CC2 != AArch64CC::AL)
|
||||
Def1Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
|
||||
|
||||
@ -1943,7 +1943,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
|
||||
.addImm(getInvertedCondCode(CC1));
|
||||
|
||||
if (CC2 != AArch64CC::AL) {
|
||||
unsigned Def2Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
|
||||
Register Def2Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
|
||||
MachineInstr &CSet2MI =
|
||||
*BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
|
||||
.addDef(Def2Reg)
|
||||
@ -1974,7 +1974,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
|
||||
case TargetOpcode::G_IMPLICIT_DEF: {
|
||||
I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
|
||||
const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
|
||||
const unsigned DstReg = I.getOperand(0).getReg();
|
||||
const Register DstReg = I.getOperand(0).getReg();
|
||||
const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
|
||||
const TargetRegisterClass *DstRC =
|
||||
getRegClassForTypeOnBank(DstTy, DstRB, RBI);
|
||||
@ -2027,13 +2027,13 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
|
||||
bool AArch64InstructionSelector::selectBrJT(MachineInstr &I,
|
||||
MachineRegisterInfo &MRI) const {
|
||||
assert(I.getOpcode() == TargetOpcode::G_BRJT && "Expected G_BRJT");
|
||||
unsigned JTAddr = I.getOperand(0).getReg();
|
||||
Register JTAddr = I.getOperand(0).getReg();
|
||||
unsigned JTI = I.getOperand(1).getIndex();
|
||||
unsigned Index = I.getOperand(2).getReg();
|
||||
Register Index = I.getOperand(2).getReg();
|
||||
MachineIRBuilder MIB(I);
|
||||
|
||||
unsigned TargetReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
|
||||
unsigned ScratchReg = MRI.createVirtualRegister(&AArch64::GPR64spRegClass);
|
||||
Register TargetReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
|
||||
Register ScratchReg = MRI.createVirtualRegister(&AArch64::GPR64spRegClass);
|
||||
MIB.buildInstr(AArch64::JumpTableDest32, {TargetReg, ScratchReg},
|
||||
{JTAddr, Index})
|
||||
.addJumpTableIndex(JTI);
|
||||
@ -2049,7 +2049,7 @@ bool AArch64InstructionSelector::selectJumpTable(
|
||||
assert(I.getOpcode() == TargetOpcode::G_JUMP_TABLE && "Expected jump table");
|
||||
assert(I.getOperand(1).isJTI() && "Jump table op should have a JTI!");
|
||||
|
||||
unsigned DstReg = I.getOperand(0).getReg();
|
||||
Register DstReg = I.getOperand(0).getReg();
|
||||
unsigned JTI = I.getOperand(1).getIndex();
|
||||
// We generate a MOVaddrJT which will get expanded to an ADRP + ADD later.
|
||||
MachineIRBuilder MIB(I);
|
||||
@ -2173,10 +2173,10 @@ bool AArch64InstructionSelector::selectIntrinsicRound(
|
||||
|
||||
bool AArch64InstructionSelector::selectVectorICmp(
|
||||
MachineInstr &I, MachineRegisterInfo &MRI) const {
|
||||
unsigned DstReg = I.getOperand(0).getReg();
|
||||
Register DstReg = I.getOperand(0).getReg();
|
||||
LLT DstTy = MRI.getType(DstReg);
|
||||
unsigned SrcReg = I.getOperand(2).getReg();
|
||||
unsigned Src2Reg = I.getOperand(3).getReg();
|
||||
Register SrcReg = I.getOperand(2).getReg();
|
||||
Register Src2Reg = I.getOperand(3).getReg();
|
||||
LLT SrcTy = MRI.getType(SrcReg);
|
||||
|
||||
unsigned SrcEltSize = SrcTy.getElementType().getSizeInBits();
|
||||
@ -2344,7 +2344,7 @@ bool AArch64InstructionSelector::selectVectorICmp(
|
||||
}
|
||||
|
||||
MachineInstr *AArch64InstructionSelector::emitScalarToVector(
|
||||
unsigned EltSize, const TargetRegisterClass *DstRC, unsigned Scalar,
|
||||
unsigned EltSize, const TargetRegisterClass *DstRC, Register Scalar,
|
||||
MachineIRBuilder &MIRBuilder) const {
|
||||
auto Undef = MIRBuilder.buildInstr(TargetOpcode::IMPLICIT_DEF, {DstRC}, {});
|
||||
|
||||
@ -2387,14 +2387,14 @@ bool AArch64InstructionSelector::selectMergeValues(
|
||||
return false;
|
||||
|
||||
auto *DstRC = &AArch64::GPR64RegClass;
|
||||
unsigned SubToRegDef = MRI.createVirtualRegister(DstRC);
|
||||
Register SubToRegDef = MRI.createVirtualRegister(DstRC);
|
||||
MachineInstr &SubRegMI = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
|
||||
TII.get(TargetOpcode::SUBREG_TO_REG))
|
||||
.addDef(SubToRegDef)
|
||||
.addImm(0)
|
||||
.addUse(I.getOperand(1).getReg())
|
||||
.addImm(AArch64::sub_32);
|
||||
unsigned SubToRegDef2 = MRI.createVirtualRegister(DstRC);
|
||||
Register SubToRegDef2 = MRI.createVirtualRegister(DstRC);
|
||||
// Need to anyext the second scalar before we can use bfm
|
||||
MachineInstr &SubRegMI2 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
|
||||
TII.get(TargetOpcode::SUBREG_TO_REG))
|
||||
@ -2442,8 +2442,8 @@ static bool getLaneCopyOpcode(unsigned &CopyOpc, unsigned &ExtractSubReg,
|
||||
}
|
||||
|
||||
MachineInstr *AArch64InstructionSelector::emitExtractVectorElt(
|
||||
Optional<unsigned> DstReg, const RegisterBank &DstRB, LLT ScalarTy,
|
||||
unsigned VecReg, unsigned LaneIdx, MachineIRBuilder &MIRBuilder) const {
|
||||
Optional<Register> DstReg, const RegisterBank &DstRB, LLT ScalarTy,
|
||||
Register VecReg, unsigned LaneIdx, MachineIRBuilder &MIRBuilder) const {
|
||||
MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
|
||||
unsigned CopyOpc = 0;
|
||||
unsigned ExtractSubReg = 0;
|
||||
@ -2470,7 +2470,7 @@ MachineInstr *AArch64InstructionSelector::emitExtractVectorElt(
|
||||
}
|
||||
|
||||
// The register that we're going to copy into.
|
||||
unsigned InsertReg = VecReg;
|
||||
Register InsertReg = VecReg;
|
||||
if (!DstReg)
|
||||
DstReg = MRI.createVirtualRegister(DstRC);
|
||||
// If the lane index is 0, we just use a subregister COPY.
|
||||
@ -2505,9 +2505,9 @@ bool AArch64InstructionSelector::selectExtractElt(
|
||||
MachineInstr &I, MachineRegisterInfo &MRI) const {
|
||||
assert(I.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT &&
|
||||
"unexpected opcode!");
|
||||
unsigned DstReg = I.getOperand(0).getReg();
|
||||
Register DstReg = I.getOperand(0).getReg();
|
||||
const LLT NarrowTy = MRI.getType(DstReg);
|
||||
const unsigned SrcReg = I.getOperand(1).getReg();
|
||||
const Register SrcReg = I.getOperand(1).getReg();
|
||||
const LLT WideTy = MRI.getType(SrcReg);
|
||||
(void)WideTy;
|
||||
assert(WideTy.getSizeInBits() >= NarrowTy.getSizeInBits() &&
|
||||
@ -2544,7 +2544,7 @@ bool AArch64InstructionSelector::selectExtractElt(
|
||||
bool AArch64InstructionSelector::selectSplitVectorUnmerge(
|
||||
MachineInstr &I, MachineRegisterInfo &MRI) const {
|
||||
unsigned NumElts = I.getNumOperands() - 1;
|
||||
unsigned SrcReg = I.getOperand(NumElts).getReg();
|
||||
Register SrcReg = I.getOperand(NumElts).getReg();
|
||||
const LLT NarrowTy = MRI.getType(I.getOperand(0).getReg());
|
||||
const LLT SrcTy = MRI.getType(SrcReg);
|
||||
|
||||
@ -2561,7 +2561,7 @@ bool AArch64InstructionSelector::selectSplitVectorUnmerge(
|
||||
const RegisterBank &DstRB =
|
||||
*RBI.getRegBank(I.getOperand(0).getReg(), MRI, TRI);
|
||||
for (unsigned OpIdx = 0; OpIdx < NumElts; ++OpIdx) {
|
||||
unsigned Dst = I.getOperand(OpIdx).getReg();
|
||||
Register Dst = I.getOperand(OpIdx).getReg();
|
||||
MachineInstr *Extract =
|
||||
emitExtractVectorElt(Dst, DstRB, NarrowTy, SrcReg, OpIdx, MIB);
|
||||
if (!Extract)
|
||||
@ -2589,7 +2589,7 @@ bool AArch64InstructionSelector::selectUnmergeValues(
|
||||
// The last operand is the vector source register, and every other operand is
|
||||
// a register to unpack into.
|
||||
unsigned NumElts = I.getNumOperands() - 1;
|
||||
unsigned SrcReg = I.getOperand(NumElts).getReg();
|
||||
Register SrcReg = I.getOperand(NumElts).getReg();
|
||||
const LLT NarrowTy = MRI.getType(I.getOperand(0).getReg());
|
||||
const LLT WideTy = MRI.getType(SrcReg);
|
||||
(void)WideTy;
|
||||
@ -2613,7 +2613,7 @@ bool AArch64InstructionSelector::selectUnmergeValues(
|
||||
MachineBasicBlock &MBB = *I.getParent();
|
||||
|
||||
// Stores the registers we'll be copying from.
|
||||
SmallVector<unsigned, 4> InsertRegs;
|
||||
SmallVector<Register, 4> InsertRegs;
|
||||
|
||||
// We'll use the first register twice, so we only need NumElts-1 registers.
|
||||
unsigned NumInsertRegs = NumElts - 1;
|
||||
@ -2622,18 +2622,18 @@ bool AArch64InstructionSelector::selectUnmergeValues(
|
||||
// directly. Otherwise, we need to do a bit of setup with some subregister
|
||||
// inserts.
|
||||
if (NarrowTy.getSizeInBits() * NumElts == 128) {
|
||||
InsertRegs = SmallVector<unsigned, 4>(NumInsertRegs, SrcReg);
|
||||
InsertRegs = SmallVector<Register, 4>(NumInsertRegs, SrcReg);
|
||||
} else {
|
||||
// No. We have to perform subregister inserts. For each insert, create an
|
||||
// implicit def and a subregister insert, and save the register we create.
|
||||
for (unsigned Idx = 0; Idx < NumInsertRegs; ++Idx) {
|
||||
unsigned ImpDefReg = MRI.createVirtualRegister(&AArch64::FPR128RegClass);
|
||||
Register ImpDefReg = MRI.createVirtualRegister(&AArch64::FPR128RegClass);
|
||||
MachineInstr &ImpDefMI =
|
||||
*BuildMI(MBB, I, I.getDebugLoc(), TII.get(TargetOpcode::IMPLICIT_DEF),
|
||||
ImpDefReg);
|
||||
|
||||
// Now, create the subregister insert from SrcReg.
|
||||
unsigned InsertReg = MRI.createVirtualRegister(&AArch64::FPR128RegClass);
|
||||
Register InsertReg = MRI.createVirtualRegister(&AArch64::FPR128RegClass);
|
||||
MachineInstr &InsMI =
|
||||
*BuildMI(MBB, I, I.getDebugLoc(),
|
||||
TII.get(TargetOpcode::INSERT_SUBREG), InsertReg)
|
||||
@ -2653,15 +2653,15 @@ bool AArch64InstructionSelector::selectUnmergeValues(
|
||||
// create the copies.
|
||||
//
|
||||
// Perform the first copy separately as a subregister copy.
|
||||
unsigned CopyTo = I.getOperand(0).getReg();
|
||||
Register CopyTo = I.getOperand(0).getReg();
|
||||
auto FirstCopy = MIB.buildInstr(TargetOpcode::COPY, {CopyTo}, {})
|
||||
.addReg(InsertRegs[0], 0, ExtractSubReg);
|
||||
constrainSelectedInstRegOperands(*FirstCopy, TII, TRI, RBI);
|
||||
|
||||
// Now, perform the remaining copies as vector lane copies.
|
||||
unsigned LaneIdx = 1;
|
||||
for (unsigned InsReg : InsertRegs) {
|
||||
unsigned CopyTo = I.getOperand(LaneIdx).getReg();
|
||||
for (Register InsReg : InsertRegs) {
|
||||
Register CopyTo = I.getOperand(LaneIdx).getReg();
|
||||
MachineInstr &CopyInst =
|
||||
*BuildMI(MBB, I, I.getDebugLoc(), TII.get(CopyOpc), CopyTo)
|
||||
.addUse(InsReg)
|
||||
@ -2689,9 +2689,9 @@ bool AArch64InstructionSelector::selectConcatVectors(
|
||||
MachineInstr &I, MachineRegisterInfo &MRI) const {
|
||||
assert(I.getOpcode() == TargetOpcode::G_CONCAT_VECTORS &&
|
||||
"Unexpected opcode");
|
||||
unsigned Dst = I.getOperand(0).getReg();
|
||||
unsigned Op1 = I.getOperand(1).getReg();
|
||||
unsigned Op2 = I.getOperand(2).getReg();
|
||||
Register Dst = I.getOperand(0).getReg();
|
||||
Register Op1 = I.getOperand(1).getReg();
|
||||
Register Op2 = I.getOperand(2).getReg();
|
||||
MachineIRBuilder MIRBuilder(I);
|
||||
MachineInstr *ConcatMI = emitVectorConcat(Dst, Op1, Op2, MIRBuilder);
|
||||
if (!ConcatMI)
|
||||
@ -2807,7 +2807,7 @@ getInsertVecEltOpInfo(const RegisterBank &RB, unsigned EltSize) {
|
||||
}
|
||||
|
||||
MachineInstr *AArch64InstructionSelector::emitVectorConcat(
|
||||
Optional<unsigned> Dst, unsigned Op1, unsigned Op2,
|
||||
Optional<Register> Dst, Register Op1, Register Op2,
|
||||
MachineIRBuilder &MIRBuilder) const {
|
||||
// We implement a vector concat by:
|
||||
// 1. Use scalar_to_vector to insert the lower vector into the larger dest
|
||||
@ -2900,14 +2900,14 @@ MachineInstr *AArch64InstructionSelector::emitFMovForFConstant(
|
||||
}
|
||||
|
||||
MachineInstr *
|
||||
AArch64InstructionSelector::emitCSetForICMP(unsigned DefReg, unsigned Pred,
|
||||
AArch64InstructionSelector::emitCSetForICMP(Register DefReg, unsigned Pred,
|
||||
MachineIRBuilder &MIRBuilder) const {
|
||||
// CSINC increments the result when the predicate is false. Invert it.
|
||||
const AArch64CC::CondCode InvCC = changeICMPPredToAArch64CC(
|
||||
CmpInst::getInversePredicate((CmpInst::Predicate)Pred));
|
||||
auto I =
|
||||
MIRBuilder
|
||||
.buildInstr(AArch64::CSINCWr, {DefReg}, {AArch64::WZR, AArch64::WZR})
|
||||
.buildInstr(AArch64::CSINCWr, {DefReg}, {Register(AArch64::WZR), Register(AArch64::WZR)})
|
||||
.addImm(InvCC);
|
||||
constrainSelectedInstRegOperands(*I, TII, TRI, RBI);
|
||||
return &*I;
|
||||
@ -3011,7 +3011,7 @@ bool AArch64InstructionSelector::tryOptCMN(MachineInstr &I) const {
|
||||
// cmn z, y
|
||||
|
||||
// Helper lambda to find the def.
|
||||
auto FindDef = [&](unsigned VReg) {
|
||||
auto FindDef = [&](Register VReg) {
|
||||
MachineInstr *Def = MRI.getVRegDef(VReg);
|
||||
while (Def) {
|
||||
if (Def->getOpcode() != TargetOpcode::COPY)
|
||||
@ -3091,7 +3091,7 @@ bool AArch64InstructionSelector::tryOptCMN(MachineInstr &I) const {
|
||||
(MRI.getType(I.getOperand(2).getReg()).getSizeInBits() == 32);
|
||||
auto ImmFns = selectArithImmed(I.getOperand(3));
|
||||
unsigned Opc = OpcTable[Is32Bit][ImmFns.hasValue()];
|
||||
unsigned ZReg = Is32Bit ? AArch64::WZR : AArch64::XZR;
|
||||
Register ZReg = Is32Bit ? AArch64::WZR : AArch64::XZR;
|
||||
|
||||
auto CmpMI = MIRBuilder.buildInstr(Opc, {ZReg}, {I.getOperand(2).getReg()});
|
||||
|
||||
@ -3145,7 +3145,7 @@ bool AArch64InstructionSelector::tryOptVectorDup(MachineInstr &I) const {
|
||||
if (!UndefMI)
|
||||
return false;
|
||||
// Match the scalar being splatted.
|
||||
unsigned ScalarReg = InsMI->getOperand(2).getReg();
|
||||
Register ScalarReg = InsMI->getOperand(2).getReg();
|
||||
const RegisterBank *ScalarRB = RBI.getRegBank(ScalarReg, MRI, TRI);
|
||||
// Match the index constant 0.
|
||||
int64_t Index = 0;
|
||||
@ -3206,9 +3206,9 @@ bool AArch64InstructionSelector::selectShuffleVector(
|
||||
if (tryOptVectorShuffle(I))
|
||||
return true;
|
||||
const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
|
||||
unsigned Src1Reg = I.getOperand(1).getReg();
|
||||
Register Src1Reg = I.getOperand(1).getReg();
|
||||
const LLT Src1Ty = MRI.getType(Src1Reg);
|
||||
unsigned Src2Reg = I.getOperand(2).getReg();
|
||||
Register Src2Reg = I.getOperand(2).getReg();
|
||||
const LLT Src2Ty = MRI.getType(Src2Reg);
|
||||
|
||||
MachineBasicBlock &MBB = *I.getParent();
|
||||
@ -3302,7 +3302,7 @@ bool AArch64InstructionSelector::selectShuffleVector(
|
||||
}
|
||||
|
||||
MachineInstr *AArch64InstructionSelector::emitLaneInsert(
|
||||
Optional<unsigned> DstReg, unsigned SrcReg, unsigned EltReg,
|
||||
Optional<Register> DstReg, Register SrcReg, Register EltReg,
|
||||
unsigned LaneIdx, const RegisterBank &RB,
|
||||
MachineIRBuilder &MIRBuilder) const {
|
||||
MachineInstr *InsElt = nullptr;
|
||||
@ -3337,12 +3337,12 @@ bool AArch64InstructionSelector::selectInsertElt(
|
||||
assert(I.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT);
|
||||
|
||||
// Get information on the destination.
|
||||
unsigned DstReg = I.getOperand(0).getReg();
|
||||
Register DstReg = I.getOperand(0).getReg();
|
||||
const LLT DstTy = MRI.getType(DstReg);
|
||||
unsigned VecSize = DstTy.getSizeInBits();
|
||||
|
||||
// Get information on the element we want to insert into the destination.
|
||||
unsigned EltReg = I.getOperand(2).getReg();
|
||||
Register EltReg = I.getOperand(2).getReg();
|
||||
const LLT EltTy = MRI.getType(EltReg);
|
||||
unsigned EltSize = EltTy.getSizeInBits();
|
||||
if (EltSize < 16 || EltSize > 64)
|
||||
@ -3350,14 +3350,14 @@ bool AArch64InstructionSelector::selectInsertElt(
|
||||
|
||||
// Find the definition of the index. Bail out if it's not defined by a
|
||||
// G_CONSTANT.
|
||||
unsigned IdxReg = I.getOperand(3).getReg();
|
||||
Register IdxReg = I.getOperand(3).getReg();
|
||||
auto VRegAndVal = getConstantVRegValWithLookThrough(IdxReg, MRI);
|
||||
if (!VRegAndVal)
|
||||
return false;
|
||||
unsigned LaneIdx = VRegAndVal->Value;
|
||||
|
||||
// Perform the lane insert.
|
||||
unsigned SrcReg = I.getOperand(1).getReg();
|
||||
Register SrcReg = I.getOperand(1).getReg();
|
||||
const RegisterBank &EltRB = *RBI.getRegBank(EltReg, MRI, TRI);
|
||||
MachineIRBuilder MIRBuilder(I);
|
||||
|
||||
@ -3380,7 +3380,7 @@ bool AArch64InstructionSelector::selectInsertElt(
|
||||
if (VecSize < 128) {
|
||||
// If we had to widen to perform the insert, then we have to demote back to
|
||||
// the original size to get the result we want.
|
||||
unsigned DemoteVec = InsMI->getOperand(0).getReg();
|
||||
Register DemoteVec = InsMI->getOperand(0).getReg();
|
||||
const TargetRegisterClass *RC =
|
||||
getMinClassForRegBank(*RBI.getRegBank(DemoteVec, MRI, TRI), VecSize);
|
||||
if (RC != &AArch64::FPR32RegClass && RC != &AArch64::FPR64RegClass) {
|
||||
@ -3428,7 +3428,7 @@ bool AArch64InstructionSelector::selectBuildVector(
|
||||
if (!ScalarToVec)
|
||||
return false;
|
||||
|
||||
unsigned DstVec = ScalarToVec->getOperand(0).getReg();
|
||||
Register DstVec = ScalarToVec->getOperand(0).getReg();
|
||||
unsigned DstSize = DstTy.getSizeInBits();
|
||||
|
||||
// Keep track of the last MI we inserted. Later on, we might be able to save
|
||||
@ -3464,8 +3464,8 @@ bool AArch64InstructionSelector::selectBuildVector(
|
||||
return false;
|
||||
}
|
||||
|
||||
unsigned Reg = MRI.createVirtualRegister(RC);
|
||||
unsigned DstReg = I.getOperand(0).getReg();
|
||||
Register Reg = MRI.createVirtualRegister(RC);
|
||||
Register DstReg = I.getOperand(0).getReg();
|
||||
|
||||
MIRBuilder.buildInstr(TargetOpcode::COPY, {DstReg}, {})
|
||||
.addReg(DstVec, 0, SubReg);
|
||||
@ -3531,17 +3531,17 @@ bool AArch64InstructionSelector::selectIntrinsicWithSideEffects(
|
||||
MIRBuilder.buildInstr(AArch64::BRK, {}, {}).addImm(0xF000);
|
||||
break;
|
||||
case Intrinsic::aarch64_stlxr:
|
||||
unsigned StatReg = I.getOperand(0).getReg();
|
||||
Register StatReg = I.getOperand(0).getReg();
|
||||
assert(RBI.getSizeInBits(StatReg, MRI, TRI) == 32 &&
|
||||
"Status register must be 32 bits!");
|
||||
unsigned SrcReg = I.getOperand(2).getReg();
|
||||
Register SrcReg = I.getOperand(2).getReg();
|
||||
|
||||
if (RBI.getSizeInBits(SrcReg, MRI, TRI) != 64) {
|
||||
LLVM_DEBUG(dbgs() << "Only support 64-bit sources right now.\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
unsigned PtrReg = I.getOperand(3).getReg();
|
||||
Register PtrReg = I.getOperand(3).getReg();
|
||||
assert(MRI.getType(PtrReg).isPointer() && "Expected pointer operand");
|
||||
|
||||
// Expect only one memory operand.
|
||||
@ -3573,8 +3573,8 @@ bool AArch64InstructionSelector::selectIntrinsic(
|
||||
default:
|
||||
break;
|
||||
case Intrinsic::aarch64_crypto_sha1h:
|
||||
unsigned DstReg = I.getOperand(0).getReg();
|
||||
unsigned SrcReg = I.getOperand(2).getReg();
|
||||
Register DstReg = I.getOperand(0).getReg();
|
||||
Register SrcReg = I.getOperand(2).getReg();
|
||||
|
||||
// FIXME: Should this be an assert?
|
||||
if (MRI.getType(DstReg).getSizeInBits() != 32 ||
|
||||
|
@ -37,17 +37,17 @@ struct OutgoingArgHandler : public CallLowering::ValueHandler {
|
||||
|
||||
MachineInstrBuilder MIB;
|
||||
|
||||
unsigned getStackAddress(uint64_t Size, int64_t Offset,
|
||||
Register getStackAddress(uint64_t Size, int64_t Offset,
|
||||
MachinePointerInfo &MPO) override {
|
||||
llvm_unreachable("not implemented");
|
||||
}
|
||||
|
||||
void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size,
|
||||
void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
|
||||
MachinePointerInfo &MPO, CCValAssign &VA) override {
|
||||
llvm_unreachable("not implemented");
|
||||
}
|
||||
|
||||
void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
|
||||
void assignValueToReg(Register ValVReg, Register PhysReg,
|
||||
CCValAssign &VA) override {
|
||||
MIB.addUse(PhysReg);
|
||||
MIRBuilder.buildCopy(PhysReg, ValVReg);
|
||||
@ -111,7 +111,7 @@ bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
|
||||
return true;
|
||||
}
|
||||
|
||||
unsigned AMDGPUCallLowering::lowerParameterPtr(MachineIRBuilder &MIRBuilder,
|
||||
Register AMDGPUCallLowering::lowerParameterPtr(MachineIRBuilder &MIRBuilder,
|
||||
Type *ParamTy,
|
||||
uint64_t Offset) const {
|
||||
|
||||
@ -122,12 +122,12 @@ unsigned AMDGPUCallLowering::lowerParameterPtr(MachineIRBuilder &MIRBuilder,
|
||||
const DataLayout &DL = F.getParent()->getDataLayout();
|
||||
PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUAS::CONSTANT_ADDRESS);
|
||||
LLT PtrType = getLLTForType(*PtrTy, DL);
|
||||
unsigned DstReg = MRI.createGenericVirtualRegister(PtrType);
|
||||
unsigned KernArgSegmentPtr =
|
||||
Register DstReg = MRI.createGenericVirtualRegister(PtrType);
|
||||
Register KernArgSegmentPtr =
|
||||
MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
|
||||
unsigned KernArgSegmentVReg = MRI.getLiveInVirtReg(KernArgSegmentPtr);
|
||||
Register KernArgSegmentVReg = MRI.getLiveInVirtReg(KernArgSegmentPtr);
|
||||
|
||||
unsigned OffsetReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
|
||||
Register OffsetReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
|
||||
MIRBuilder.buildConstant(OffsetReg, Offset);
|
||||
|
||||
MIRBuilder.buildGEP(DstReg, KernArgSegmentVReg, OffsetReg);
|
||||
@ -156,7 +156,7 @@ void AMDGPUCallLowering::lowerParameter(MachineIRBuilder &MIRBuilder,
|
||||
MIRBuilder.buildLoad(DstReg, PtrReg, *MMO);
|
||||
}
|
||||
|
||||
static unsigned findFirstFreeSGPR(CCState &CCInfo) {
|
||||
static Register findFirstFreeSGPR(CCState &CCInfo) {
|
||||
unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
|
||||
for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) {
|
||||
if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) {
|
||||
@ -215,27 +215,27 @@ bool AMDGPUCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
|
||||
|
||||
// FIXME: How should these inputs interact with inreg / custom SGPR inputs?
|
||||
if (Info->hasPrivateSegmentBuffer()) {
|
||||
unsigned PrivateSegmentBufferReg = Info->addPrivateSegmentBuffer(*TRI);
|
||||
Register PrivateSegmentBufferReg = Info->addPrivateSegmentBuffer(*TRI);
|
||||
MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SReg_128RegClass);
|
||||
CCInfo.AllocateReg(PrivateSegmentBufferReg);
|
||||
}
|
||||
|
||||
if (Info->hasDispatchPtr()) {
|
||||
unsigned DispatchPtrReg = Info->addDispatchPtr(*TRI);
|
||||
Register DispatchPtrReg = Info->addDispatchPtr(*TRI);
|
||||
// FIXME: Need to add reg as live-in
|
||||
CCInfo.AllocateReg(DispatchPtrReg);
|
||||
}
|
||||
|
||||
if (Info->hasQueuePtr()) {
|
||||
unsigned QueuePtrReg = Info->addQueuePtr(*TRI);
|
||||
Register QueuePtrReg = Info->addQueuePtr(*TRI);
|
||||
// FIXME: Need to add reg as live-in
|
||||
CCInfo.AllocateReg(QueuePtrReg);
|
||||
}
|
||||
|
||||
if (Info->hasKernargSegmentPtr()) {
|
||||
unsigned InputPtrReg = Info->addKernargSegmentPtr(*TRI);
|
||||
Register InputPtrReg = Info->addKernargSegmentPtr(*TRI);
|
||||
const LLT P2 = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64);
|
||||
unsigned VReg = MRI.createGenericVirtualRegister(P2);
|
||||
Register VReg = MRI.createGenericVirtualRegister(P2);
|
||||
MRI.addLiveIn(InputPtrReg, VReg);
|
||||
MIRBuilder.getMBB().addLiveIn(InputPtrReg);
|
||||
MIRBuilder.buildCopy(VReg, InputPtrReg);
|
||||
|
@ -22,7 +22,7 @@ namespace llvm {
|
||||
class AMDGPUTargetLowering;
|
||||
|
||||
class AMDGPUCallLowering: public CallLowering {
|
||||
unsigned lowerParameterPtr(MachineIRBuilder &MIRBuilder, Type *ParamTy,
|
||||
Register lowerParameterPtr(MachineIRBuilder &MIRBuilder, Type *ParamTy,
|
||||
uint64_t Offset) const;
|
||||
|
||||
void lowerParameter(MachineIRBuilder &MIRBuilder, Type *ParamTy,
|
||||
|
@ -835,12 +835,12 @@ bool AMDGPULegalizerInfo::legalizeAddrSpaceCast(
|
||||
auto SegmentNull = MIRBuilder.buildConstant(DstTy, NullVal);
|
||||
auto FlatNull = MIRBuilder.buildConstant(SrcTy, 0);
|
||||
|
||||
unsigned PtrLo32 = MRI.createGenericVirtualRegister(DstTy);
|
||||
Register PtrLo32 = MRI.createGenericVirtualRegister(DstTy);
|
||||
|
||||
// Extract low 32-bits of the pointer.
|
||||
MIRBuilder.buildExtract(PtrLo32, Src, 0);
|
||||
|
||||
unsigned CmpRes = MRI.createGenericVirtualRegister(LLT::scalar(1));
|
||||
Register CmpRes = MRI.createGenericVirtualRegister(LLT::scalar(1));
|
||||
MIRBuilder.buildICmp(CmpInst::ICMP_NE, CmpRes, Src, FlatNull.getReg(0));
|
||||
MIRBuilder.buildSelect(Dst, CmpRes, PtrLo32, SegmentNull.getReg(0));
|
||||
|
||||
@ -856,15 +856,15 @@ bool AMDGPULegalizerInfo::legalizeAddrSpaceCast(
|
||||
auto FlatNull =
|
||||
MIRBuilder.buildConstant(DstTy, TM.getNullPointerValue(DestAS));
|
||||
|
||||
unsigned ApertureReg = getSegmentAperture(DestAS, MRI, MIRBuilder);
|
||||
Register ApertureReg = getSegmentAperture(DestAS, MRI, MIRBuilder);
|
||||
|
||||
unsigned CmpRes = MRI.createGenericVirtualRegister(LLT::scalar(1));
|
||||
Register CmpRes = MRI.createGenericVirtualRegister(LLT::scalar(1));
|
||||
MIRBuilder.buildICmp(CmpInst::ICMP_NE, CmpRes, Src, SegmentNull.getReg(0));
|
||||
|
||||
unsigned BuildPtr = MRI.createGenericVirtualRegister(DstTy);
|
||||
Register BuildPtr = MRI.createGenericVirtualRegister(DstTy);
|
||||
|
||||
// Coerce the type of the low half of the result so we can use merge_values.
|
||||
unsigned SrcAsInt = MRI.createGenericVirtualRegister(LLT::scalar(32));
|
||||
Register SrcAsInt = MRI.createGenericVirtualRegister(LLT::scalar(32));
|
||||
MIRBuilder.buildInstr(TargetOpcode::G_PTRTOINT)
|
||||
.addDef(SrcAsInt)
|
||||
.addUse(Src);
|
||||
@ -883,7 +883,7 @@ bool AMDGPULegalizerInfo::legalizeFrint(
|
||||
MachineIRBuilder &MIRBuilder) const {
|
||||
MIRBuilder.setInstr(MI);
|
||||
|
||||
unsigned Src = MI.getOperand(1).getReg();
|
||||
Register Src = MI.getOperand(1).getReg();
|
||||
LLT Ty = MRI.getType(Src);
|
||||
assert(Ty.isScalar() && Ty.getSizeInBits() == 64);
|
||||
|
||||
@ -913,7 +913,7 @@ bool AMDGPULegalizerInfo::legalizeFceil(
|
||||
const LLT S1 = LLT::scalar(1);
|
||||
const LLT S64 = LLT::scalar(64);
|
||||
|
||||
unsigned Src = MI.getOperand(1).getReg();
|
||||
Register Src = MI.getOperand(1).getReg();
|
||||
assert(MRI.getType(Src) == S64);
|
||||
|
||||
// result = trunc(src)
|
||||
@ -959,12 +959,12 @@ bool AMDGPULegalizerInfo::legalizeIntrinsicTrunc(
|
||||
const LLT S32 = LLT::scalar(32);
|
||||
const LLT S64 = LLT::scalar(64);
|
||||
|
||||
unsigned Src = MI.getOperand(1).getReg();
|
||||
Register Src = MI.getOperand(1).getReg();
|
||||
assert(MRI.getType(Src) == S64);
|
||||
|
||||
// TODO: Should this use extract since the low half is unused?
|
||||
auto Unmerge = B.buildUnmerge({S32, S32}, Src);
|
||||
unsigned Hi = Unmerge.getReg(1);
|
||||
Register Hi = Unmerge.getReg(1);
|
||||
|
||||
// Extract the upper half, since this is where we will find the sign and
|
||||
// exponent.
|
||||
@ -1001,8 +1001,8 @@ bool AMDGPULegalizerInfo::legalizeITOFP(
|
||||
MachineIRBuilder &B, bool Signed) const {
|
||||
B.setInstr(MI);
|
||||
|
||||
unsigned Dst = MI.getOperand(0).getReg();
|
||||
unsigned Src = MI.getOperand(1).getReg();
|
||||
Register Dst = MI.getOperand(0).getReg();
|
||||
Register Src = MI.getOperand(1).getReg();
|
||||
|
||||
const LLT S64 = LLT::scalar(64);
|
||||
const LLT S32 = LLT::scalar(32);
|
||||
|
@ -828,14 +828,14 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
|
||||
}
|
||||
case AMDGPU::G_SEXT:
|
||||
case AMDGPU::G_ZEXT: {
|
||||
unsigned SrcReg = MI.getOperand(1).getReg();
|
||||
Register SrcReg = MI.getOperand(1).getReg();
|
||||
LLT SrcTy = MRI.getType(SrcReg);
|
||||
if (SrcTy != LLT::scalar(1))
|
||||
return;
|
||||
|
||||
MachineIRBuilder B(MI);
|
||||
bool Signed = Opc == AMDGPU::G_SEXT;
|
||||
unsigned DstReg = MI.getOperand(0).getReg();
|
||||
Register DstReg = MI.getOperand(0).getReg();
|
||||
LLT DstTy = MRI.getType(DstReg);
|
||||
const RegisterBank *SrcBank = getRegBank(SrcReg, MRI, *TRI);
|
||||
if (SrcBank->getID() == AMDGPU::SCCRegBankID ||
|
||||
|
@ -90,27 +90,27 @@ struct OutgoingValueHandler : public CallLowering::ValueHandler {
|
||||
MachineInstrBuilder &MIB, CCAssignFn *AssignFn)
|
||||
: ValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB) {}
|
||||
|
||||
unsigned getStackAddress(uint64_t Size, int64_t Offset,
|
||||
Register getStackAddress(uint64_t Size, int64_t Offset,
|
||||
MachinePointerInfo &MPO) override {
|
||||
assert((Size == 1 || Size == 2 || Size == 4 || Size == 8) &&
|
||||
"Unsupported size");
|
||||
|
||||
LLT p0 = LLT::pointer(0, 32);
|
||||
LLT s32 = LLT::scalar(32);
|
||||
unsigned SPReg = MRI.createGenericVirtualRegister(p0);
|
||||
MIRBuilder.buildCopy(SPReg, ARM::SP);
|
||||
Register SPReg = MRI.createGenericVirtualRegister(p0);
|
||||
MIRBuilder.buildCopy(SPReg, Register(ARM::SP));
|
||||
|
||||
unsigned OffsetReg = MRI.createGenericVirtualRegister(s32);
|
||||
Register OffsetReg = MRI.createGenericVirtualRegister(s32);
|
||||
MIRBuilder.buildConstant(OffsetReg, Offset);
|
||||
|
||||
unsigned AddrReg = MRI.createGenericVirtualRegister(p0);
|
||||
Register AddrReg = MRI.createGenericVirtualRegister(p0);
|
||||
MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg);
|
||||
|
||||
MPO = MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset);
|
||||
return AddrReg;
|
||||
}
|
||||
|
||||
void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
|
||||
void assignValueToReg(Register ValVReg, Register PhysReg,
|
||||
CCValAssign &VA) override {
|
||||
assert(VA.isRegLoc() && "Value shouldn't be assigned to reg");
|
||||
assert(VA.getLocReg() == PhysReg && "Assigning to the wrong reg?");
|
||||
@ -118,17 +118,17 @@ struct OutgoingValueHandler : public CallLowering::ValueHandler {
|
||||
assert(VA.getValVT().getSizeInBits() <= 64 && "Unsupported value size");
|
||||
assert(VA.getLocVT().getSizeInBits() <= 64 && "Unsupported location size");
|
||||
|
||||
unsigned ExtReg = extendRegister(ValVReg, VA);
|
||||
Register ExtReg = extendRegister(ValVReg, VA);
|
||||
MIRBuilder.buildCopy(PhysReg, ExtReg);
|
||||
MIB.addUse(PhysReg, RegState::Implicit);
|
||||
}
|
||||
|
||||
void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size,
|
||||
void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
|
||||
MachinePointerInfo &MPO, CCValAssign &VA) override {
|
||||
assert((Size == 1 || Size == 2 || Size == 4 || Size == 8) &&
|
||||
"Unsupported size");
|
||||
|
||||
unsigned ExtReg = extendRegister(ValVReg, VA);
|
||||
Register ExtReg = extendRegister(ValVReg, VA);
|
||||
auto MMO = MIRBuilder.getMF().getMachineMemOperand(
|
||||
MPO, MachineMemOperand::MOStore, VA.getLocVT().getStoreSize(),
|
||||
/* Alignment */ 1);
|
||||
@ -298,7 +298,7 @@ struct IncomingValueHandler : public CallLowering::ValueHandler {
|
||||
|
||||
bool isArgumentHandler() const override { return true; }
|
||||
|
||||
unsigned getStackAddress(uint64_t Size, int64_t Offset,
|
||||
Register getStackAddress(uint64_t Size, int64_t Offset,
|
||||
MachinePointerInfo &MPO) override {
|
||||
assert((Size == 1 || Size == 2 || Size == 4 || Size == 8) &&
|
||||
"Unsupported size");
|
||||
@ -315,7 +315,7 @@ struct IncomingValueHandler : public CallLowering::ValueHandler {
|
||||
return AddrReg;
|
||||
}
|
||||
|
||||
void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size,
|
||||
void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
|
||||
MachinePointerInfo &MPO, CCValAssign &VA) override {
|
||||
assert((Size == 1 || Size == 2 || Size == 4 || Size == 8) &&
|
||||
"Unsupported size");
|
||||
@ -336,14 +336,14 @@ struct IncomingValueHandler : public CallLowering::ValueHandler {
|
||||
}
|
||||
}
|
||||
|
||||
void buildLoad(unsigned Val, unsigned Addr, uint64_t Size, unsigned Alignment,
|
||||
void buildLoad(Register Val, Register Addr, uint64_t Size, unsigned Alignment,
|
||||
MachinePointerInfo &MPO) {
|
||||
auto MMO = MIRBuilder.getMF().getMachineMemOperand(
|
||||
MPO, MachineMemOperand::MOLoad, Size, Alignment);
|
||||
MIRBuilder.buildLoad(Val, Addr, *MMO);
|
||||
}
|
||||
|
||||
void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
|
||||
void assignValueToReg(Register ValVReg, Register PhysReg,
|
||||
CCValAssign &VA) override {
|
||||
assert(VA.isRegLoc() && "Value shouldn't be assigned to reg");
|
||||
assert(VA.getLocReg() == PhysReg && "Assigning to the wrong reg?");
|
||||
|
@ -423,7 +423,7 @@ bool ARMLegalizerInfo::legalizeCustom(MachineInstr &MI,
|
||||
auto *ArgTy = OpSize == 32 ? Type::getFloatTy(Ctx) : Type::getDoubleTy(Ctx);
|
||||
auto *RetTy = Type::getInt32Ty(Ctx);
|
||||
|
||||
SmallVector<unsigned, 2> Results;
|
||||
SmallVector<Register, 2> Results;
|
||||
for (auto Libcall : Libcalls) {
|
||||
auto LibcallResult = MRI.createGenericVirtualRegister(LLT::scalar(32));
|
||||
auto Status =
|
||||
|
@ -93,7 +93,7 @@ private:
|
||||
void assignValueToReg(Register ValVReg, const CCValAssign &VA,
|
||||
const EVT &VT) override;
|
||||
|
||||
unsigned getStackAddress(const CCValAssign &VA,
|
||||
Register getStackAddress(const CCValAssign &VA,
|
||||
MachineMemOperand *&MMO) override;
|
||||
|
||||
void assignValueToAddress(Register ValVReg, const CCValAssign &VA) override;
|
||||
@ -134,7 +134,7 @@ void IncomingValueHandler::assignValueToReg(Register ValVReg,
|
||||
const EVT &VT) {
|
||||
const MipsSubtarget &STI =
|
||||
static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget());
|
||||
unsigned PhysReg = VA.getLocReg();
|
||||
Register PhysReg = VA.getLocReg();
|
||||
if (VT == MVT::f64 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) {
|
||||
const MipsSubtarget &STI =
|
||||
static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget());
|
||||
@ -173,7 +173,7 @@ void IncomingValueHandler::assignValueToReg(Register ValVReg,
|
||||
}
|
||||
}
|
||||
|
||||
unsigned IncomingValueHandler::getStackAddress(const CCValAssign &VA,
|
||||
Register IncomingValueHandler::getStackAddress(const CCValAssign &VA,
|
||||
MachineMemOperand *&MMO) {
|
||||
MachineFunction &MF = MIRBuilder.getMF();
|
||||
unsigned Size = alignTo(VA.getValVT().getSizeInBits(), 8) / 8;
|
||||
@ -188,7 +188,7 @@ unsigned IncomingValueHandler::getStackAddress(const CCValAssign &VA,
|
||||
unsigned Align = MinAlign(TFL->getStackAlignment(), Offset);
|
||||
MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOLoad, Size, Align);
|
||||
|
||||
unsigned AddrReg = MRI.createGenericVirtualRegister(LLT::pointer(0, 32));
|
||||
Register AddrReg = MRI.createGenericVirtualRegister(LLT::pointer(0, 32));
|
||||
MIRBuilder.buildFrameIndex(AddrReg, FI);
|
||||
|
||||
return AddrReg;
|
||||
@ -228,7 +228,7 @@ private:
|
||||
void assignValueToReg(Register ValVReg, const CCValAssign &VA,
|
||||
const EVT &VT) override;
|
||||
|
||||
unsigned getStackAddress(const CCValAssign &VA,
|
||||
Register getStackAddress(const CCValAssign &VA,
|
||||
MachineMemOperand *&MMO) override;
|
||||
|
||||
void assignValueToAddress(Register ValVReg, const CCValAssign &VA) override;
|
||||
@ -237,7 +237,7 @@ private:
|
||||
ArrayRef<CCValAssign> ArgLocs, unsigned ArgLocsStartIndex,
|
||||
Register ArgsReg, const EVT &VT) override;
|
||||
|
||||
unsigned extendRegister(Register ValReg, const CCValAssign &VA);
|
||||
Register extendRegister(Register ValReg, const CCValAssign &VA);
|
||||
|
||||
MachineInstrBuilder &MIB;
|
||||
};
|
||||
@ -274,13 +274,13 @@ void OutgoingValueHandler::assignValueToReg(Register ValVReg,
|
||||
.constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(),
|
||||
*STI.getRegBankInfo());
|
||||
} else {
|
||||
unsigned ExtReg = extendRegister(ValVReg, VA);
|
||||
Register ExtReg = extendRegister(ValVReg, VA);
|
||||
MIRBuilder.buildCopy(PhysReg, ExtReg);
|
||||
MIB.addUse(PhysReg, RegState::Implicit);
|
||||
}
|
||||
}
|
||||
|
||||
unsigned OutgoingValueHandler::getStackAddress(const CCValAssign &VA,
|
||||
Register OutgoingValueHandler::getStackAddress(const CCValAssign &VA,
|
||||
MachineMemOperand *&MMO) {
|
||||
MachineFunction &MF = MIRBuilder.getMF();
|
||||
const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering();
|
||||
@ -288,7 +288,7 @@ unsigned OutgoingValueHandler::getStackAddress(const CCValAssign &VA,
|
||||
LLT p0 = LLT::pointer(0, 32);
|
||||
LLT s32 = LLT::scalar(32);
|
||||
Register SPReg = MRI.createGenericVirtualRegister(p0);
|
||||
MIRBuilder.buildCopy(SPReg, Mips::SP);
|
||||
MIRBuilder.buildCopy(SPReg, Register(Mips::SP));
|
||||
|
||||
Register OffsetReg = MRI.createGenericVirtualRegister(s32);
|
||||
unsigned Offset = VA.getLocMemOffset();
|
||||
@ -310,11 +310,11 @@ void OutgoingValueHandler::assignValueToAddress(Register ValVReg,
|
||||
const CCValAssign &VA) {
|
||||
MachineMemOperand *MMO;
|
||||
Register Addr = getStackAddress(VA, MMO);
|
||||
unsigned ExtReg = extendRegister(ValVReg, VA);
|
||||
Register ExtReg = extendRegister(ValVReg, VA);
|
||||
MIRBuilder.buildStore(ExtReg, Addr, *MMO);
|
||||
}
|
||||
|
||||
unsigned OutgoingValueHandler::extendRegister(Register ValReg,
|
||||
Register OutgoingValueHandler::extendRegister(Register ValReg,
|
||||
const CCValAssign &VA) {
|
||||
LLT LocTy{VA.getLocVT()};
|
||||
switch (VA.getLocInfo()) {
|
||||
@ -530,7 +530,7 @@ bool MipsCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
|
||||
Callee.isReg() || IsCalleeGlobalPIC ? Mips::JALRPseudo : Mips::JAL);
|
||||
MIB.addDef(Mips::SP, RegState::Implicit);
|
||||
if (IsCalleeGlobalPIC) {
|
||||
unsigned CalleeReg =
|
||||
Register CalleeReg =
|
||||
MF.getRegInfo().createGenericVirtualRegister(LLT::pointer(0, 32));
|
||||
MachineInstr *CalleeGlobalValue =
|
||||
MIRBuilder.buildGlobalValue(CalleeReg, Callee.getGlobal());
|
||||
@ -583,8 +583,8 @@ bool MipsCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
|
||||
|
||||
if (IsCalleeGlobalPIC) {
|
||||
MIRBuilder.buildCopy(
|
||||
Mips::GP,
|
||||
MF.getInfo<MipsFunctionInfo>()->getGlobalBaseRegForGlobalISel());
|
||||
Register(Mips::GP),
|
||||
MF.getInfo<MipsFunctionInfo>()->getGlobalBaseRegForGlobalISel());
|
||||
MIB.addDef(Mips::GP, RegState::Implicit);
|
||||
}
|
||||
MIRBuilder.insertInstr(MIB);
|
||||
|
@ -45,7 +45,7 @@ public:
|
||||
private:
|
||||
bool assign(Register VReg, const CCValAssign &VA, const EVT &VT);
|
||||
|
||||
virtual unsigned getStackAddress(const CCValAssign &VA,
|
||||
virtual Register getStackAddress(const CCValAssign &VA,
|
||||
MachineMemOperand *&MMO) = 0;
|
||||
|
||||
virtual void assignValueToReg(Register ValVReg, const CCValAssign &VA,
|
||||
|
@ -38,7 +38,7 @@ public:
|
||||
|
||||
private:
|
||||
bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
|
||||
bool materialize32BitImm(unsigned DestReg, APInt Imm,
|
||||
bool materialize32BitImm(Register DestReg, APInt Imm,
|
||||
MachineIRBuilder &B) const;
|
||||
bool selectCopy(MachineInstr &I, MachineRegisterInfo &MRI) const;
|
||||
|
||||
@ -80,7 +80,7 @@ MipsInstructionSelector::MipsInstructionSelector(
|
||||
|
||||
bool MipsInstructionSelector::selectCopy(MachineInstr &I,
|
||||
MachineRegisterInfo &MRI) const {
|
||||
unsigned DstReg = I.getOperand(0).getReg();
|
||||
Register DstReg = I.getOperand(0).getReg();
|
||||
if (TargetRegisterInfo::isPhysicalRegister(DstReg))
|
||||
return true;
|
||||
|
||||
@ -104,12 +104,12 @@ bool MipsInstructionSelector::selectCopy(MachineInstr &I,
|
||||
return true;
|
||||
}
|
||||
|
||||
bool MipsInstructionSelector::materialize32BitImm(unsigned DestReg, APInt Imm,
|
||||
bool MipsInstructionSelector::materialize32BitImm(Register DestReg, APInt Imm,
|
||||
MachineIRBuilder &B) const {
|
||||
assert(Imm.getBitWidth() == 32 && "Unsupported immediate size.");
|
||||
// Ori zero extends immediate. Used for values with zeros in high 16 bits.
|
||||
if (Imm.getHiBits(16).isNullValue()) {
|
||||
MachineInstr *Inst = B.buildInstr(Mips::ORi, {DestReg}, {Mips::ZERO})
|
||||
MachineInstr *Inst = B.buildInstr(Mips::ORi, {DestReg}, {Register(Mips::ZERO)})
|
||||
.addImm(Imm.getLoBits(16).getLimitedValue());
|
||||
return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI);
|
||||
}
|
||||
@ -121,12 +121,12 @@ bool MipsInstructionSelector::materialize32BitImm(unsigned DestReg, APInt Imm,
|
||||
}
|
||||
// ADDiu sign extends immediate. Used for values with 1s in high 17 bits.
|
||||
if (Imm.isSignedIntN(16)) {
|
||||
MachineInstr *Inst = B.buildInstr(Mips::ADDiu, {DestReg}, {Mips::ZERO})
|
||||
MachineInstr *Inst = B.buildInstr(Mips::ADDiu, {DestReg}, {Register(Mips::ZERO)})
|
||||
.addImm(Imm.getLoBits(16).getLimitedValue());
|
||||
return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI);
|
||||
}
|
||||
// Values that cannot be materialized with single immediate instruction.
|
||||
unsigned LUiReg = B.getMRI()->createVirtualRegister(&Mips::GPR32RegClass);
|
||||
Register LUiReg = B.getMRI()->createVirtualRegister(&Mips::GPR32RegClass);
|
||||
MachineInstr *LUi = B.buildInstr(Mips::LUi, {LUiReg}, {})
|
||||
.addImm(Imm.getHiBits(16).getLimitedValue());
|
||||
MachineInstr *ORi = B.buildInstr(Mips::ORi, {DestReg}, {LUiReg})
|
||||
@ -201,7 +201,7 @@ bool MipsInstructionSelector::select(MachineInstr &I,
|
||||
|
||||
switch (I.getOpcode()) {
|
||||
case G_UMULH: {
|
||||
unsigned PseudoMULTuReg = MRI.createVirtualRegister(&Mips::ACC64RegClass);
|
||||
Register PseudoMULTuReg = MRI.createVirtualRegister(&Mips::ACC64RegClass);
|
||||
MachineInstr *PseudoMULTu, *PseudoMove;
|
||||
|
||||
PseudoMULTu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoMULTu))
|
||||
@ -242,7 +242,7 @@ bool MipsInstructionSelector::select(MachineInstr &I,
|
||||
break;
|
||||
}
|
||||
case G_PHI: {
|
||||
const unsigned DestReg = I.getOperand(0).getReg();
|
||||
const Register DestReg = I.getOperand(0).getReg();
|
||||
const unsigned DestRegBank = RBI.getRegBank(DestReg, MRI, TRI)->getID();
|
||||
const unsigned OpSize = MRI.getType(DestReg).getSizeInBits();
|
||||
|
||||
@ -257,7 +257,7 @@ bool MipsInstructionSelector::select(MachineInstr &I,
|
||||
case G_LOAD:
|
||||
case G_ZEXTLOAD:
|
||||
case G_SEXTLOAD: {
|
||||
const unsigned DestReg = I.getOperand(0).getReg();
|
||||
const Register DestReg = I.getOperand(0).getReg();
|
||||
const unsigned DestRegBank = RBI.getRegBank(DestReg, MRI, TRI)->getID();
|
||||
const unsigned OpSize = MRI.getType(DestReg).getSizeInBits();
|
||||
const unsigned OpMemSizeInBytes = (*I.memoperands_begin())->getSize();
|
||||
@ -281,7 +281,7 @@ bool MipsInstructionSelector::select(MachineInstr &I,
|
||||
case G_UREM:
|
||||
case G_SDIV:
|
||||
case G_SREM: {
|
||||
unsigned HILOReg = MRI.createVirtualRegister(&Mips::ACC64RegClass);
|
||||
Register HILOReg = MRI.createVirtualRegister(&Mips::ACC64RegClass);
|
||||
bool IsSigned = I.getOpcode() == G_SREM || I.getOpcode() == G_SDIV;
|
||||
bool IsDiv = I.getOpcode() == G_UDIV || I.getOpcode() == G_SDIV;
|
||||
|
||||
@ -328,7 +328,7 @@ bool MipsInstructionSelector::select(MachineInstr &I,
|
||||
unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
|
||||
|
||||
if (Size == 32) {
|
||||
unsigned GPRReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
|
||||
Register GPRReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
|
||||
MachineIRBuilder B(I);
|
||||
if (!materialize32BitImm(GPRReg, APImm, B))
|
||||
return false;
|
||||
@ -339,8 +339,8 @@ bool MipsInstructionSelector::select(MachineInstr &I,
|
||||
return false;
|
||||
}
|
||||
if (Size == 64) {
|
||||
unsigned GPRRegHigh = MRI.createVirtualRegister(&Mips::GPR32RegClass);
|
||||
unsigned GPRRegLow = MRI.createVirtualRegister(&Mips::GPR32RegClass);
|
||||
Register GPRRegHigh = MRI.createVirtualRegister(&Mips::GPR32RegClass);
|
||||
Register GPRRegLow = MRI.createVirtualRegister(&Mips::GPR32RegClass);
|
||||
MachineIRBuilder B(I);
|
||||
if (!materialize32BitImm(GPRRegHigh, APImm.getHiBits(32).trunc(32), B))
|
||||
return false;
|
||||
@ -419,7 +419,7 @@ bool MipsInstructionSelector::select(MachineInstr &I,
|
||||
return false;
|
||||
|
||||
if (GVal->hasLocalLinkage()) {
|
||||
unsigned LWGOTDef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
|
||||
Register LWGOTDef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
|
||||
LWGOT->getOperand(0).setReg(LWGOTDef);
|
||||
|
||||
MachineInstr *ADDiu =
|
||||
@ -432,7 +432,7 @@ bool MipsInstructionSelector::select(MachineInstr &I,
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
unsigned LUiReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
|
||||
Register LUiReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
|
||||
|
||||
MachineInstr *LUi = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LUi))
|
||||
.addDef(LUiReg)
|
||||
@ -455,8 +455,9 @@ bool MipsInstructionSelector::select(MachineInstr &I,
|
||||
}
|
||||
case G_ICMP: {
|
||||
struct Instr {
|
||||
unsigned Opcode, Def, LHS, RHS;
|
||||
Instr(unsigned Opcode, unsigned Def, unsigned LHS, unsigned RHS)
|
||||
unsigned Opcode;
|
||||
Register Def, LHS, RHS;
|
||||
Instr(unsigned Opcode, Register Def, Register LHS, Register RHS)
|
||||
: Opcode(Opcode), Def(Def), LHS(LHS), RHS(RHS){};
|
||||
|
||||
bool hasImm() const {
|
||||
@ -467,10 +468,10 @@ bool MipsInstructionSelector::select(MachineInstr &I,
|
||||
};
|
||||
|
||||
SmallVector<struct Instr, 2> Instructions;
|
||||
unsigned ICMPReg = I.getOperand(0).getReg();
|
||||
unsigned Temp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
|
||||
unsigned LHS = I.getOperand(2).getReg();
|
||||
unsigned RHS = I.getOperand(3).getReg();
|
||||
Register ICMPReg = I.getOperand(0).getReg();
|
||||
Register Temp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
|
||||
Register LHS = I.getOperand(2).getReg();
|
||||
Register RHS = I.getOperand(3).getReg();
|
||||
CmpInst::Predicate Cond =
|
||||
static_cast<CmpInst::Predicate>(I.getOperand(1).getPredicate());
|
||||
|
||||
|
@ -44,14 +44,14 @@ static const TargetRegisterClass &getGlobalBaseRegClass(MachineFunction &MF) {
|
||||
return Mips::GPR32RegClass;
|
||||
}
|
||||
|
||||
unsigned MipsFunctionInfo::getGlobalBaseReg() {
|
||||
Register MipsFunctionInfo::getGlobalBaseReg() {
|
||||
if (!GlobalBaseReg)
|
||||
GlobalBaseReg =
|
||||
MF.getRegInfo().createVirtualRegister(&getGlobalBaseRegClass(MF));
|
||||
return GlobalBaseReg;
|
||||
}
|
||||
|
||||
unsigned MipsFunctionInfo::getGlobalBaseRegForGlobalISel() {
|
||||
Register MipsFunctionInfo::getGlobalBaseRegForGlobalISel() {
|
||||
if (!GlobalBaseReg) {
|
||||
getGlobalBaseReg();
|
||||
initGlobalBaseReg();
|
||||
|
@ -32,8 +32,8 @@ public:
|
||||
void setSRetReturnReg(unsigned Reg) { SRetReturnReg = Reg; }
|
||||
|
||||
bool globalBaseRegSet() const;
|
||||
unsigned getGlobalBaseReg();
|
||||
unsigned getGlobalBaseRegForGlobalISel();
|
||||
Register getGlobalBaseReg();
|
||||
Register getGlobalBaseRegForGlobalISel();
|
||||
|
||||
// Insert instructions to initialize the global base register in the
|
||||
// first MBB of the function.
|
||||
|
@ -101,28 +101,28 @@ struct OutgoingValueHandler : public CallLowering::ValueHandler {
|
||||
DL(MIRBuilder.getMF().getDataLayout()),
|
||||
STI(MIRBuilder.getMF().getSubtarget<X86Subtarget>()) {}
|
||||
|
||||
unsigned getStackAddress(uint64_t Size, int64_t Offset,
|
||||
Register getStackAddress(uint64_t Size, int64_t Offset,
|
||||
MachinePointerInfo &MPO) override {
|
||||
LLT p0 = LLT::pointer(0, DL.getPointerSizeInBits(0));
|
||||
LLT SType = LLT::scalar(DL.getPointerSizeInBits(0));
|
||||
unsigned SPReg = MRI.createGenericVirtualRegister(p0);
|
||||
Register SPReg = MRI.createGenericVirtualRegister(p0);
|
||||
MIRBuilder.buildCopy(SPReg, STI.getRegisterInfo()->getStackRegister());
|
||||
|
||||
unsigned OffsetReg = MRI.createGenericVirtualRegister(SType);
|
||||
Register OffsetReg = MRI.createGenericVirtualRegister(SType);
|
||||
MIRBuilder.buildConstant(OffsetReg, Offset);
|
||||
|
||||
unsigned AddrReg = MRI.createGenericVirtualRegister(p0);
|
||||
Register AddrReg = MRI.createGenericVirtualRegister(p0);
|
||||
MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg);
|
||||
|
||||
MPO = MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset);
|
||||
return AddrReg;
|
||||
}
|
||||
|
||||
void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
|
||||
void assignValueToReg(Register ValVReg, Register PhysReg,
|
||||
CCValAssign &VA) override {
|
||||
MIB.addUse(PhysReg, RegState::Implicit);
|
||||
|
||||
unsigned ExtReg;
|
||||
Register ExtReg;
|
||||
// If we are copying the value to a physical register with the
|
||||
// size larger than the size of the value itself - build AnyExt
|
||||
// to the size of the register first and only then do the copy.
|
||||
@ -143,9 +143,9 @@ struct OutgoingValueHandler : public CallLowering::ValueHandler {
|
||||
MIRBuilder.buildCopy(PhysReg, ExtReg);
|
||||
}
|
||||
|
||||
void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size,
|
||||
void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
|
||||
MachinePointerInfo &MPO, CCValAssign &VA) override {
|
||||
unsigned ExtReg = extendRegister(ValVReg, VA);
|
||||
Register ExtReg = extendRegister(ValVReg, VA);
|
||||
auto MMO = MIRBuilder.getMF().getMachineMemOperand(
|
||||
MPO, MachineMemOperand::MOStore, VA.getLocVT().getStoreSize(),
|
||||
/* Alignment */ 1);
|
||||
@ -230,7 +230,7 @@ struct IncomingValueHandler : public CallLowering::ValueHandler {
|
||||
|
||||
bool isArgumentHandler() const override { return true; }
|
||||
|
||||
unsigned getStackAddress(uint64_t Size, int64_t Offset,
|
||||
Register getStackAddress(uint64_t Size, int64_t Offset,
|
||||
MachinePointerInfo &MPO) override {
|
||||
auto &MFI = MIRBuilder.getMF().getFrameInfo();
|
||||
int FI = MFI.CreateFixedObject(Size, Offset, true);
|
||||
@ -242,7 +242,7 @@ struct IncomingValueHandler : public CallLowering::ValueHandler {
|
||||
return AddrReg;
|
||||
}
|
||||
|
||||
void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size,
|
||||
void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
|
||||
MachinePointerInfo &MPO, CCValAssign &VA) override {
|
||||
auto MMO = MIRBuilder.getMF().getMachineMemOperand(
|
||||
MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, Size,
|
||||
@ -250,7 +250,7 @@ struct IncomingValueHandler : public CallLowering::ValueHandler {
|
||||
MIRBuilder.buildLoad(ValVReg, Addr, *MMO);
|
||||
}
|
||||
|
||||
void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
|
||||
void assignValueToReg(Register ValVReg, Register PhysReg,
|
||||
CCValAssign &VA) override {
|
||||
markPhysRegUsed(PhysReg);
|
||||
|
||||
|
@ -136,13 +136,13 @@ public:
|
||||
Register getFrameRegister(const MachineFunction &MF) const override;
|
||||
unsigned getPtrSizedFrameRegister(const MachineFunction &MF) const;
|
||||
unsigned getPtrSizedStackRegister(const MachineFunction &MF) const;
|
||||
unsigned getStackRegister() const { return StackPtr; }
|
||||
unsigned getBaseRegister() const { return BasePtr; }
|
||||
Register getStackRegister() const { return StackPtr; }
|
||||
Register getBaseRegister() const { return BasePtr; }
|
||||
/// Returns physical register used as frame pointer.
|
||||
/// This will always returns the frame pointer register, contrary to
|
||||
/// getFrameRegister() which returns the "base pointer" in situations
|
||||
/// involving a stack, frame and base pointer.
|
||||
unsigned getFramePtr() const { return FramePtr; }
|
||||
Register getFramePtr() const { return FramePtr; }
|
||||
// FIXME: Move to FrameInfok
|
||||
unsigned getSlotSize() const { return SlotSize; }
|
||||
};
|
||||
|
@ -111,7 +111,7 @@ static MachineFunction *getMFFromMMI(const Module *M,
|
||||
return MF;
|
||||
}
|
||||
|
||||
static void collectCopies(SmallVectorImpl<unsigned> &Copies,
|
||||
static void collectCopies(SmallVectorImpl<Register> &Copies,
|
||||
MachineFunction *MF) {
|
||||
for (auto &MBB : *MF)
|
||||
for (MachineInstr &MI : MBB) {
|
||||
@ -128,7 +128,7 @@ TEST(PatternMatchInstr, MatchIntConstant) {
|
||||
auto ModuleMMIPair = createDummyModule(Context, *TM, "");
|
||||
MachineFunction *MF =
|
||||
getMFFromMMI(ModuleMMIPair.first.get(), ModuleMMIPair.second.get());
|
||||
SmallVector<unsigned, 4> Copies;
|
||||
SmallVector<Register, 4> Copies;
|
||||
collectCopies(Copies, MF);
|
||||
MachineBasicBlock *EntryMBB = &*MF->begin();
|
||||
MachineIRBuilder B(*MF);
|
||||
@ -149,7 +149,7 @@ TEST(PatternMatchInstr, MatchBinaryOp) {
|
||||
auto ModuleMMIPair = createDummyModule(Context, *TM, "");
|
||||
MachineFunction *MF =
|
||||
getMFFromMMI(ModuleMMIPair.first.get(), ModuleMMIPair.second.get());
|
||||
SmallVector<unsigned, 4> Copies;
|
||||
SmallVector<Register, 4> Copies;
|
||||
collectCopies(Copies, MF);
|
||||
MachineBasicBlock *EntryMBB = &*MF->begin();
|
||||
MachineIRBuilder B(*MF);
|
||||
@ -276,7 +276,7 @@ TEST(PatternMatchInstr, MatchFPUnaryOp) {
|
||||
auto ModuleMMIPair = createDummyModule(Context, *TM, "");
|
||||
MachineFunction *MF =
|
||||
getMFFromMMI(ModuleMMIPair.first.get(), ModuleMMIPair.second.get());
|
||||
SmallVector<unsigned, 4> Copies;
|
||||
SmallVector<Register, 4> Copies;
|
||||
collectCopies(Copies, MF);
|
||||
MachineBasicBlock *EntryMBB = &*MF->begin();
|
||||
MachineIRBuilder B(*MF);
|
||||
@ -347,7 +347,7 @@ TEST(PatternMatchInstr, MatchExtendsTrunc) {
|
||||
auto ModuleMMIPair = createDummyModule(Context, *TM, "");
|
||||
MachineFunction *MF =
|
||||
getMFFromMMI(ModuleMMIPair.first.get(), ModuleMMIPair.second.get());
|
||||
SmallVector<unsigned, 4> Copies;
|
||||
SmallVector<Register, 4> Copies;
|
||||
collectCopies(Copies, MF);
|
||||
MachineBasicBlock *EntryMBB = &*MF->begin();
|
||||
MachineIRBuilder B(*MF);
|
||||
@ -403,7 +403,7 @@ TEST(PatternMatchInstr, MatchSpecificType) {
|
||||
auto ModuleMMIPair = createDummyModule(Context, *TM, "");
|
||||
MachineFunction *MF =
|
||||
getMFFromMMI(ModuleMMIPair.first.get(), ModuleMMIPair.second.get());
|
||||
SmallVector<unsigned, 4> Copies;
|
||||
SmallVector<Register, 4> Copies;
|
||||
collectCopies(Copies, MF);
|
||||
MachineBasicBlock *EntryMBB = &*MF->begin();
|
||||
MachineIRBuilder B(*MF);
|
||||
@ -450,7 +450,7 @@ TEST(PatternMatchInstr, MatchCombinators) {
|
||||
auto ModuleMMIPair = createDummyModule(Context, *TM, "");
|
||||
MachineFunction *MF =
|
||||
getMFFromMMI(ModuleMMIPair.first.get(), ModuleMMIPair.second.get());
|
||||
SmallVector<unsigned, 4> Copies;
|
||||
SmallVector<Register, 4> Copies;
|
||||
collectCopies(Copies, MF);
|
||||
MachineBasicBlock *EntryMBB = &*MF->begin();
|
||||
MachineIRBuilder B(*MF);
|
||||
@ -493,7 +493,7 @@ TEST(PatternMatchInstr, MatchMiscellaneous) {
|
||||
auto ModuleMMIPair = createDummyModule(Context, *TM, "");
|
||||
MachineFunction *MF =
|
||||
getMFFromMMI(ModuleMMIPair.first.get(), ModuleMMIPair.second.get());
|
||||
SmallVector<unsigned, 4> Copies;
|
||||
SmallVector<Register, 4> Copies;
|
||||
collectCopies(Copies, MF);
|
||||
MachineBasicBlock *EntryMBB = &*MF->begin();
|
||||
MachineIRBuilder B(*MF);
|
||||
|
Loading…
x
Reference in New Issue
Block a user