mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 02:33:06 +01:00
[FastISel] Remove kill tracking
This is a followup to D98145: As far as I know, tracking of kill flags in FastISel is just a compile-time optimization. However, I'm not actually seeing any compile-time regression when removing the tracking. This probably used to be more important in the past, before FastRA was switched to allocate instructions in reverse order, which means that it discovers kills as a matter of course. As such, the kill tracking doesn't really seem to serve a purpose anymore, and just adds additional complexity and potential for errors. This patch removes it entirely. The primary changes are dropping the hasTrivialKill() method and removing the kill arguments from the emitFast methods. The rest is mechanical fixup. Differential Revision: https://reviews.llvm.org/D98294
This commit is contained in:
parent
2808d32c32
commit
5794a3620e
@ -116,6 +116,13 @@ Changes to the Go bindings
|
||||
--------------------------
|
||||
|
||||
|
||||
Changes to the FastISel infrastructure
|
||||
--------------------------------------
|
||||
|
||||
* FastISel no longer tracks killed registers, and instead leaves this to the
|
||||
register allocator. This means that ``hasTrivialKill()`` is removed, as well
|
||||
as the ``OpNIsKill`` parameters to the ``fastEmit_*()`` family of functions.
|
||||
|
||||
Changes to the DAG infrastructure
|
||||
---------------------------------
|
||||
|
||||
|
@ -274,7 +274,7 @@ public:
|
||||
|
||||
/// This is a wrapper around getRegForValue that also takes care of
|
||||
/// truncating or sign-extending the given getelementptr index value.
|
||||
std::pair<Register, bool> getRegForGEPIndex(const Value *Idx);
|
||||
Register getRegForGEPIndex(const Value *Idx);
|
||||
|
||||
/// We're checking to see if we can fold \p LI into \p FoldInst. Note
|
||||
/// that we could have a sequence where multiple LLVM IR instructions are
|
||||
@ -347,27 +347,26 @@ protected:
|
||||
|
||||
/// This method is called by target-independent code to request that an
|
||||
/// instruction with the given type, opcode, and register operand be emitted.
|
||||
virtual unsigned fastEmit_r(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
|
||||
bool Op0IsKill);
|
||||
virtual unsigned fastEmit_r(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0);
|
||||
|
||||
/// This method is called by target-independent code to request that an
|
||||
/// instruction with the given type, opcode, and register operands be emitted.
|
||||
virtual unsigned fastEmit_rr(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
|
||||
bool Op0IsKill, unsigned Op1, bool Op1IsKill);
|
||||
unsigned Op1);
|
||||
|
||||
/// This method is called by target-independent code to request that an
|
||||
/// instruction with the given type, opcode, and register and immediate
|
||||
/// operands be emitted.
|
||||
virtual unsigned fastEmit_ri(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
|
||||
bool Op0IsKill, uint64_t Imm);
|
||||
uint64_t Imm);
|
||||
|
||||
/// This method is a wrapper of fastEmit_ri.
|
||||
///
|
||||
/// It first tries to emit an instruction with an immediate operand using
|
||||
/// fastEmit_ri. If that fails, it materializes the immediate into a register
|
||||
/// and try fastEmit_rr instead.
|
||||
Register fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, bool Op0IsKill,
|
||||
uint64_t Imm, MVT ImmType);
|
||||
Register fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, uint64_t Imm,
|
||||
MVT ImmType);
|
||||
|
||||
/// This method is called by target-independent code to request that an
|
||||
/// instruction with the given type, opcode, and immediate operand be emitted.
|
||||
@ -387,33 +386,31 @@ protected:
|
||||
/// Emit a MachineInstr with one register operand and a result register
|
||||
/// in the given register class.
|
||||
Register fastEmitInst_r(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC, unsigned Op0,
|
||||
bool Op0IsKill);
|
||||
const TargetRegisterClass *RC, unsigned Op0);
|
||||
|
||||
/// Emit a MachineInstr with two register operands and a result
|
||||
/// register in the given register class.
|
||||
Register fastEmitInst_rr(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC, unsigned Op0,
|
||||
bool Op0IsKill, unsigned Op1, bool Op1IsKill);
|
||||
unsigned Op1);
|
||||
|
||||
/// Emit a MachineInstr with three register operands and a result
|
||||
/// register in the given register class.
|
||||
Register fastEmitInst_rrr(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC, unsigned Op0,
|
||||
bool Op0IsKill, unsigned Op1, bool Op1IsKill,
|
||||
unsigned Op2, bool Op2IsKill);
|
||||
unsigned Op1, unsigned Op2);
|
||||
|
||||
/// Emit a MachineInstr with a register operand, an immediate, and a
|
||||
/// result register in the given register class.
|
||||
Register fastEmitInst_ri(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC, unsigned Op0,
|
||||
bool Op0IsKill, uint64_t Imm);
|
||||
uint64_t Imm);
|
||||
|
||||
/// Emit a MachineInstr with one register operand and two immediate
|
||||
/// operands.
|
||||
Register fastEmitInst_rii(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC, unsigned Op0,
|
||||
bool Op0IsKill, uint64_t Imm1, uint64_t Imm2);
|
||||
uint64_t Imm1, uint64_t Imm2);
|
||||
|
||||
/// Emit a MachineInstr with a floating point immediate, and a result
|
||||
/// register in the given register class.
|
||||
@ -425,8 +422,7 @@ protected:
|
||||
/// result register in the given register class.
|
||||
Register fastEmitInst_rri(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC, unsigned Op0,
|
||||
bool Op0IsKill, unsigned Op1, bool Op1IsKill,
|
||||
uint64_t Imm);
|
||||
unsigned Op1, uint64_t Imm);
|
||||
|
||||
/// Emit a MachineInstr with a single immediate operand, and a result
|
||||
/// register in the given register class.
|
||||
@ -435,12 +431,11 @@ protected:
|
||||
|
||||
/// Emit a MachineInstr for an extract_subreg from a specified index of
|
||||
/// a superregister to a specified type.
|
||||
Register fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, bool Op0IsKill,
|
||||
uint32_t Idx);
|
||||
Register fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, uint32_t Idx);
|
||||
|
||||
/// Emit MachineInstrs to compute the value of Op with all but the
|
||||
/// least significant bit set to zero.
|
||||
Register fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill);
|
||||
Register fastEmitZExtFromI1(MVT VT, unsigned Op0);
|
||||
|
||||
/// Emit an unconditional branch to the given block, unless it is the
|
||||
/// immediate (fall-through) successor, and update the CFG.
|
||||
@ -490,12 +485,6 @@ protected:
|
||||
/// - \c Add has a constant operand.
|
||||
bool canFoldAddIntoGEP(const User *GEP, const Value *Add);
|
||||
|
||||
/// Test whether the register associated with this value has exactly one use,
|
||||
/// in which case that single use is killing. Note that multiple IR values
|
||||
/// may map onto the same register, in which case this is not the same as
|
||||
/// checking that an IR value has one use.
|
||||
bool hasTrivialKill(const Value *V);
|
||||
|
||||
/// Create a machine mem operand from the given instruction.
|
||||
MachineMemOperand *createMachineMemOperandFor(const Instruction *I) const;
|
||||
|
||||
|
@ -238,42 +238,6 @@ void FastISel::flushLocalValueMap() {
|
||||
SavedInsertPt = FuncInfo.InsertPt;
|
||||
}
|
||||
|
||||
bool FastISel::hasTrivialKill(const Value *V) {
|
||||
// Don't consider constants or arguments to have trivial kills.
|
||||
const Instruction *I = dyn_cast<Instruction>(V);
|
||||
if (!I)
|
||||
return false;
|
||||
|
||||
// No-op casts are trivially coalesced by fast-isel.
|
||||
if (const auto *Cast = dyn_cast<CastInst>(I))
|
||||
if (Cast->isNoopCast(DL) && !hasTrivialKill(Cast->getOperand(0)))
|
||||
return false;
|
||||
|
||||
// Even the value might have only one use in the LLVM IR, it is possible that
|
||||
// FastISel might fold the use into another instruction and now there is more
|
||||
// than one use at the Machine Instruction level.
|
||||
Register Reg = lookUpRegForValue(V);
|
||||
if (Reg && !MRI.use_empty(Reg))
|
||||
return false;
|
||||
|
||||
// GEPs with all zero indices are trivially coalesced by fast-isel.
|
||||
if (const auto *GEP = dyn_cast<GetElementPtrInst>(I))
|
||||
if (GEP->hasAllZeroIndices() && !hasTrivialKill(GEP->getOperand(0)))
|
||||
return false;
|
||||
|
||||
// Casts and extractvalues may be trivially coalesced by fast-isel.
|
||||
if (I->getOpcode() == Instruction::BitCast ||
|
||||
I->getOpcode() == Instruction::PtrToInt ||
|
||||
I->getOpcode() == Instruction::IntToPtr ||
|
||||
I->getOpcode() == Instruction::ExtractValue)
|
||||
return false;
|
||||
|
||||
// Only instructions with a single use in the same basic block are considered
|
||||
// to have trivial kills.
|
||||
return I->hasOneUse() &&
|
||||
cast<Instruction>(*I->user_begin())->getParent() == I->getParent();
|
||||
}
|
||||
|
||||
Register FastISel::getRegForValue(const Value *V) {
|
||||
EVT RealVT = TLI.getValueType(DL, V->getType(), /*AllowUnknown=*/true);
|
||||
// Don't handle non-simple values in FastISel.
|
||||
@ -346,8 +310,8 @@ Register FastISel::materializeConstant(const Value *V, MVT VT) {
|
||||
Register IntegerReg =
|
||||
getRegForValue(ConstantInt::get(V->getContext(), SIntVal));
|
||||
if (IntegerReg)
|
||||
Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg,
|
||||
/*Op0IsKill=*/false);
|
||||
Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP,
|
||||
IntegerReg);
|
||||
}
|
||||
}
|
||||
} else if (const auto *Op = dyn_cast<Operator>(V)) {
|
||||
@ -419,27 +383,22 @@ void FastISel::updateValueMap(const Value *I, Register Reg, unsigned NumRegs) {
|
||||
}
|
||||
}
|
||||
|
||||
std::pair<Register, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
|
||||
Register FastISel::getRegForGEPIndex(const Value *Idx) {
|
||||
Register IdxN = getRegForValue(Idx);
|
||||
if (!IdxN)
|
||||
// Unhandled operand. Halt "fast" selection and bail.
|
||||
return std::pair<Register, bool>(Register(), false);
|
||||
|
||||
bool IdxNIsKill = hasTrivialKill(Idx);
|
||||
return Register();
|
||||
|
||||
// If the index is smaller or larger than intptr_t, truncate or extend it.
|
||||
MVT PtrVT = TLI.getPointerTy(DL);
|
||||
EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
|
||||
if (IdxVT.bitsLT(PtrVT)) {
|
||||
IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN,
|
||||
IdxNIsKill);
|
||||
IdxNIsKill = true;
|
||||
IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN);
|
||||
} else if (IdxVT.bitsGT(PtrVT)) {
|
||||
IdxN =
|
||||
fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN, IdxNIsKill);
|
||||
IdxNIsKill = true;
|
||||
fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN);
|
||||
}
|
||||
return std::pair<Register, bool>(IdxN, IdxNIsKill);
|
||||
return IdxN;
|
||||
}
|
||||
|
||||
void FastISel::recomputeInsertPt() {
|
||||
@ -517,11 +476,10 @@ bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
|
||||
Register Op1 = getRegForValue(I->getOperand(1));
|
||||
if (!Op1)
|
||||
return false;
|
||||
bool Op1IsKill = hasTrivialKill(I->getOperand(1));
|
||||
|
||||
Register ResultReg =
|
||||
fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, Op1IsKill,
|
||||
CI->getZExtValue(), VT.getSimpleVT());
|
||||
fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, CI->getZExtValue(),
|
||||
VT.getSimpleVT());
|
||||
if (!ResultReg)
|
||||
return false;
|
||||
|
||||
@ -533,7 +491,6 @@ bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
|
||||
Register Op0 = getRegForValue(I->getOperand(0));
|
||||
if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
|
||||
return false;
|
||||
bool Op0IsKill = hasTrivialKill(I->getOperand(0));
|
||||
|
||||
// Check if the second operand is a constant and handle it appropriately.
|
||||
if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
|
||||
@ -553,8 +510,8 @@ bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
|
||||
ISDOpcode = ISD::AND;
|
||||
}
|
||||
|
||||
Register ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0,
|
||||
Op0IsKill, Imm, VT.getSimpleVT());
|
||||
Register ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0, Imm,
|
||||
VT.getSimpleVT());
|
||||
if (!ResultReg)
|
||||
return false;
|
||||
|
||||
@ -566,11 +523,10 @@ bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
|
||||
Register Op1 = getRegForValue(I->getOperand(1));
|
||||
if (!Op1) // Unhandled operand. Halt "fast" selection and bail.
|
||||
return false;
|
||||
bool Op1IsKill = hasTrivialKill(I->getOperand(1));
|
||||
|
||||
// Now we have both operands in registers. Emit the instruction.
|
||||
Register ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
|
||||
ISDOpcode, Op0, Op0IsKill, Op1, Op1IsKill);
|
||||
ISDOpcode, Op0, Op1);
|
||||
if (!ResultReg)
|
||||
// Target-specific code wasn't able to find a machine opcode for
|
||||
// the given ISD opcode and type. Halt "fast" selection and bail.
|
||||
@ -591,8 +547,6 @@ bool FastISel::selectGetElementPtr(const User *I) {
|
||||
if (isa<VectorType>(I->getType()))
|
||||
return false;
|
||||
|
||||
bool NIsKill = hasTrivialKill(I->getOperand(0));
|
||||
|
||||
// Keep a running tab of the total offset to coalesce multiple N = N + Offset
|
||||
// into a single N = N + TotalOffset.
|
||||
uint64_t TotalOffs = 0;
|
||||
@ -608,10 +562,9 @@ bool FastISel::selectGetElementPtr(const User *I) {
|
||||
// N = N + Offset
|
||||
TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
|
||||
if (TotalOffs >= MaxOffs) {
|
||||
N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
|
||||
N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
|
||||
if (!N) // Unhandled operand. Halt "fast" selection and bail.
|
||||
return false;
|
||||
NIsKill = true;
|
||||
TotalOffs = 0;
|
||||
}
|
||||
}
|
||||
@ -626,43 +579,38 @@ bool FastISel::selectGetElementPtr(const User *I) {
|
||||
uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue();
|
||||
TotalOffs += DL.getTypeAllocSize(Ty) * IdxN;
|
||||
if (TotalOffs >= MaxOffs) {
|
||||
N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
|
||||
N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
|
||||
if (!N) // Unhandled operand. Halt "fast" selection and bail.
|
||||
return false;
|
||||
NIsKill = true;
|
||||
TotalOffs = 0;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (TotalOffs) {
|
||||
N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
|
||||
N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
|
||||
if (!N) // Unhandled operand. Halt "fast" selection and bail.
|
||||
return false;
|
||||
NIsKill = true;
|
||||
TotalOffs = 0;
|
||||
}
|
||||
|
||||
// N = N + Idx * ElementSize;
|
||||
uint64_t ElementSize = DL.getTypeAllocSize(Ty);
|
||||
std::pair<Register, bool> Pair = getRegForGEPIndex(Idx);
|
||||
Register IdxN = Pair.first;
|
||||
bool IdxNIsKill = Pair.second;
|
||||
Register IdxN = getRegForGEPIndex(Idx);
|
||||
if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
|
||||
return false;
|
||||
|
||||
if (ElementSize != 1) {
|
||||
IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
|
||||
IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, ElementSize, VT);
|
||||
if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
|
||||
return false;
|
||||
IdxNIsKill = true;
|
||||
}
|
||||
N = fastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
|
||||
N = fastEmit_rr(VT, VT, ISD::ADD, N, IdxN);
|
||||
if (!N) // Unhandled operand. Halt "fast" selection and bail.
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (TotalOffs) {
|
||||
N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
|
||||
N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
|
||||
if (!N) // Unhandled operand. Halt "fast" selection and bail.
|
||||
return false;
|
||||
}
|
||||
@ -1425,10 +1373,8 @@ bool FastISel::selectCast(const User *I, unsigned Opcode) {
|
||||
// Unhandled operand. Halt "fast" selection and bail.
|
||||
return false;
|
||||
|
||||
bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
|
||||
|
||||
Register ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
|
||||
Opcode, InputReg, InputRegIsKill);
|
||||
Opcode, InputReg);
|
||||
if (!ResultReg)
|
||||
return false;
|
||||
|
||||
@ -1459,7 +1405,6 @@ bool FastISel::selectBitCast(const User *I) {
|
||||
Register Op0 = getRegForValue(I->getOperand(0));
|
||||
if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
|
||||
return false;
|
||||
bool Op0IsKill = hasTrivialKill(I->getOperand(0));
|
||||
|
||||
// First, try to perform the bitcast by inserting a reg-reg copy.
|
||||
Register ResultReg;
|
||||
@ -1476,7 +1421,7 @@ bool FastISel::selectBitCast(const User *I) {
|
||||
|
||||
// If the reg-reg copy failed, select a BITCAST opcode.
|
||||
if (!ResultReg)
|
||||
ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0, Op0IsKill);
|
||||
ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0);
|
||||
|
||||
if (!ResultReg)
|
||||
return false;
|
||||
@ -1652,12 +1597,11 @@ bool FastISel::selectFNeg(const User *I, const Value *In) {
|
||||
Register OpReg = getRegForValue(In);
|
||||
if (!OpReg)
|
||||
return false;
|
||||
bool OpRegIsKill = hasTrivialKill(In);
|
||||
|
||||
// If the target has ISD::FNEG, use it.
|
||||
EVT VT = TLI.getValueType(DL, I->getType());
|
||||
Register ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
|
||||
OpReg, OpRegIsKill);
|
||||
OpReg);
|
||||
if (ResultReg) {
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
@ -1672,18 +1616,18 @@ bool FastISel::selectFNeg(const User *I, const Value *In) {
|
||||
return false;
|
||||
|
||||
Register IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
|
||||
ISD::BITCAST, OpReg, OpRegIsKill);
|
||||
ISD::BITCAST, OpReg);
|
||||
if (!IntReg)
|
||||
return false;
|
||||
|
||||
Register IntResultReg = fastEmit_ri_(
|
||||
IntVT.getSimpleVT(), ISD::XOR, IntReg, /*Op0IsKill=*/true,
|
||||
IntVT.getSimpleVT(), ISD::XOR, IntReg,
|
||||
UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT());
|
||||
if (!IntResultReg)
|
||||
return false;
|
||||
|
||||
ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST,
|
||||
IntResultReg, /*Op0IsKill=*/true);
|
||||
IntResultReg);
|
||||
if (!ResultReg)
|
||||
return false;
|
||||
|
||||
@ -1883,14 +1827,12 @@ bool FastISel::fastLowerIntrinsicCall(const IntrinsicInst * /*II*/) {
|
||||
|
||||
unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; }
|
||||
|
||||
unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/,
|
||||
bool /*Op0IsKill*/) {
|
||||
unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/,
|
||||
bool /*Op0IsKill*/, unsigned /*Op1*/,
|
||||
bool /*Op1IsKill*/) {
|
||||
unsigned /*Op1*/) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1904,7 +1846,7 @@ unsigned FastISel::fastEmit_f(MVT, MVT, unsigned,
|
||||
}
|
||||
|
||||
unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/,
|
||||
bool /*Op0IsKill*/, uint64_t /*Imm*/) {
|
||||
uint64_t /*Imm*/) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1913,7 +1855,7 @@ unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/,
|
||||
/// If that fails, it materializes the immediate into a register and try
|
||||
/// fastEmit_rr instead.
|
||||
Register FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
|
||||
bool Op0IsKill, uint64_t Imm, MVT ImmType) {
|
||||
uint64_t Imm, MVT ImmType) {
|
||||
// If this is a multiply by a power of two, emit this as a shift left.
|
||||
if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
|
||||
Opcode = ISD::SHL;
|
||||
@ -1931,11 +1873,10 @@ Register FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
|
||||
return 0;
|
||||
|
||||
// First check if immediate type is legal. If not, we can't use the ri form.
|
||||
Register ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
|
||||
Register ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Imm);
|
||||
if (ResultReg)
|
||||
return ResultReg;
|
||||
Register MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
|
||||
bool IsImmKill = true;
|
||||
if (!MaterialReg) {
|
||||
// This is a bit ugly/slow, but failing here means falling out of
|
||||
// fast-isel, which would be very slow.
|
||||
@ -1944,15 +1885,8 @@ Register FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
|
||||
MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
|
||||
if (!MaterialReg)
|
||||
return 0;
|
||||
// FIXME: If the materialized register here has no uses yet then this
|
||||
// will be the first use and we should be able to mark it as killed.
|
||||
// However, the local value area for materialising constant expressions
|
||||
// grows down, not up, which means that any constant expressions we generate
|
||||
// later which also use 'Imm' could be after this instruction and therefore
|
||||
// after this kill.
|
||||
IsImmKill = false;
|
||||
}
|
||||
return fastEmit_rr(VT, VT, Opcode, Op0, Op0IsKill, MaterialReg, IsImmKill);
|
||||
return fastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
|
||||
}
|
||||
|
||||
Register FastISel::createResultReg(const TargetRegisterClass *RC) {
|
||||
@ -1986,8 +1920,7 @@ Register FastISel::fastEmitInst_(unsigned MachineInstOpcode,
|
||||
}
|
||||
|
||||
Register FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC, unsigned Op0,
|
||||
bool Op0IsKill) {
|
||||
const TargetRegisterClass *RC, unsigned Op0) {
|
||||
const MCInstrDesc &II = TII.get(MachineInstOpcode);
|
||||
|
||||
Register ResultReg = createResultReg(RC);
|
||||
@ -1995,10 +1928,10 @@ Register FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
|
||||
|
||||
if (II.getNumDefs() >= 1)
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
|
||||
.addReg(Op0, getKillRegState(Op0IsKill));
|
||||
.addReg(Op0);
|
||||
else {
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
|
||||
.addReg(Op0, getKillRegState(Op0IsKill));
|
||||
.addReg(Op0);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
||||
TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
|
||||
}
|
||||
@ -2008,8 +1941,7 @@ Register FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
|
||||
|
||||
Register FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC, unsigned Op0,
|
||||
bool Op0IsKill, unsigned Op1,
|
||||
bool Op1IsKill) {
|
||||
unsigned Op1) {
|
||||
const MCInstrDesc &II = TII.get(MachineInstOpcode);
|
||||
|
||||
Register ResultReg = createResultReg(RC);
|
||||
@ -2018,12 +1950,12 @@ Register FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
|
||||
|
||||
if (II.getNumDefs() >= 1)
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
|
||||
.addReg(Op0, getKillRegState(Op0IsKill))
|
||||
.addReg(Op1, getKillRegState(Op1IsKill));
|
||||
.addReg(Op0)
|
||||
.addReg(Op1);
|
||||
else {
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
|
||||
.addReg(Op0, getKillRegState(Op0IsKill))
|
||||
.addReg(Op1, getKillRegState(Op1IsKill));
|
||||
.addReg(Op0)
|
||||
.addReg(Op1);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
||||
TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
|
||||
}
|
||||
@ -2032,9 +1964,7 @@ Register FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
|
||||
|
||||
Register FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC, unsigned Op0,
|
||||
bool Op0IsKill, unsigned Op1,
|
||||
bool Op1IsKill, unsigned Op2,
|
||||
bool Op2IsKill) {
|
||||
unsigned Op1, unsigned Op2) {
|
||||
const MCInstrDesc &II = TII.get(MachineInstOpcode);
|
||||
|
||||
Register ResultReg = createResultReg(RC);
|
||||
@ -2044,14 +1974,14 @@ Register FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
|
||||
|
||||
if (II.getNumDefs() >= 1)
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
|
||||
.addReg(Op0, getKillRegState(Op0IsKill))
|
||||
.addReg(Op1, getKillRegState(Op1IsKill))
|
||||
.addReg(Op2, getKillRegState(Op2IsKill));
|
||||
.addReg(Op0)
|
||||
.addReg(Op1)
|
||||
.addReg(Op2);
|
||||
else {
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
|
||||
.addReg(Op0, getKillRegState(Op0IsKill))
|
||||
.addReg(Op1, getKillRegState(Op1IsKill))
|
||||
.addReg(Op2, getKillRegState(Op2IsKill));
|
||||
.addReg(Op0)
|
||||
.addReg(Op1)
|
||||
.addReg(Op2);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
||||
TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
|
||||
}
|
||||
@ -2060,7 +1990,7 @@ Register FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
|
||||
|
||||
Register FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC, unsigned Op0,
|
||||
bool Op0IsKill, uint64_t Imm) {
|
||||
uint64_t Imm) {
|
||||
const MCInstrDesc &II = TII.get(MachineInstOpcode);
|
||||
|
||||
Register ResultReg = createResultReg(RC);
|
||||
@ -2068,11 +1998,11 @@ Register FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
|
||||
|
||||
if (II.getNumDefs() >= 1)
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
|
||||
.addReg(Op0, getKillRegState(Op0IsKill))
|
||||
.addReg(Op0)
|
||||
.addImm(Imm);
|
||||
else {
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
|
||||
.addReg(Op0, getKillRegState(Op0IsKill))
|
||||
.addReg(Op0)
|
||||
.addImm(Imm);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
||||
TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
|
||||
@ -2082,8 +2012,7 @@ Register FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
|
||||
|
||||
Register FastISel::fastEmitInst_rii(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC, unsigned Op0,
|
||||
bool Op0IsKill, uint64_t Imm1,
|
||||
uint64_t Imm2) {
|
||||
uint64_t Imm1, uint64_t Imm2) {
|
||||
const MCInstrDesc &II = TII.get(MachineInstOpcode);
|
||||
|
||||
Register ResultReg = createResultReg(RC);
|
||||
@ -2091,12 +2020,12 @@ Register FastISel::fastEmitInst_rii(unsigned MachineInstOpcode,
|
||||
|
||||
if (II.getNumDefs() >= 1)
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
|
||||
.addReg(Op0, getKillRegState(Op0IsKill))
|
||||
.addReg(Op0)
|
||||
.addImm(Imm1)
|
||||
.addImm(Imm2);
|
||||
else {
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
|
||||
.addReg(Op0, getKillRegState(Op0IsKill))
|
||||
.addReg(Op0)
|
||||
.addImm(Imm1)
|
||||
.addImm(Imm2);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
||||
@ -2126,8 +2055,7 @@ Register FastISel::fastEmitInst_f(unsigned MachineInstOpcode,
|
||||
|
||||
Register FastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC, unsigned Op0,
|
||||
bool Op0IsKill, unsigned Op1,
|
||||
bool Op1IsKill, uint64_t Imm) {
|
||||
unsigned Op1, uint64_t Imm) {
|
||||
const MCInstrDesc &II = TII.get(MachineInstOpcode);
|
||||
|
||||
Register ResultReg = createResultReg(RC);
|
||||
@ -2136,13 +2064,13 @@ Register FastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
|
||||
|
||||
if (II.getNumDefs() >= 1)
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
|
||||
.addReg(Op0, getKillRegState(Op0IsKill))
|
||||
.addReg(Op1, getKillRegState(Op1IsKill))
|
||||
.addReg(Op0)
|
||||
.addReg(Op1)
|
||||
.addImm(Imm);
|
||||
else {
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
|
||||
.addReg(Op0, getKillRegState(Op0IsKill))
|
||||
.addReg(Op1, getKillRegState(Op1IsKill))
|
||||
.addReg(Op0)
|
||||
.addReg(Op1)
|
||||
.addImm(Imm);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
||||
TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
|
||||
@ -2167,21 +2095,21 @@ Register FastISel::fastEmitInst_i(unsigned MachineInstOpcode,
|
||||
}
|
||||
|
||||
Register FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0,
|
||||
bool Op0IsKill, uint32_t Idx) {
|
||||
uint32_t Idx) {
|
||||
Register ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
|
||||
assert(Register::isVirtualRegister(Op0) &&
|
||||
"Cannot yet extract from physregs");
|
||||
const TargetRegisterClass *RC = MRI.getRegClass(Op0);
|
||||
MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx));
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
|
||||
ResultReg).addReg(Op0, getKillRegState(Op0IsKill), Idx);
|
||||
ResultReg).addReg(Op0, 0, Idx);
|
||||
return ResultReg;
|
||||
}
|
||||
|
||||
/// Emit MachineInstrs to compute the value of Op with all but the least
|
||||
/// significant bit set to zero.
|
||||
Register FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
|
||||
return fastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
|
||||
Register FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0) {
|
||||
return fastEmit_ri(VT, VT, ISD::AND, Op0, 1);
|
||||
}
|
||||
|
||||
/// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -136,16 +136,13 @@ class ARMFastISel final : public FastISel {
|
||||
// Code from FastISel.cpp.
|
||||
|
||||
unsigned fastEmitInst_r(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned Op0, bool Op0IsKill);
|
||||
const TargetRegisterClass *RC, unsigned Op0);
|
||||
unsigned fastEmitInst_rr(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned Op0, bool Op0IsKill,
|
||||
unsigned Op1, bool Op1IsKill);
|
||||
unsigned Op0, unsigned Op1);
|
||||
unsigned fastEmitInst_ri(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned Op0, bool Op0IsKill,
|
||||
uint64_t Imm);
|
||||
unsigned Op0, uint64_t Imm);
|
||||
unsigned fastEmitInst_i(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
uint64_t Imm);
|
||||
@ -299,7 +296,7 @@ ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) {
|
||||
|
||||
unsigned ARMFastISel::fastEmitInst_r(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned Op0, bool Op0IsKill) {
|
||||
unsigned Op0) {
|
||||
Register ResultReg = createResultReg(RC);
|
||||
const MCInstrDesc &II = TII.get(MachineInstOpcode);
|
||||
|
||||
@ -308,10 +305,10 @@ unsigned ARMFastISel::fastEmitInst_r(unsigned MachineInstOpcode,
|
||||
Op0 = constrainOperandRegClass(II, Op0, 1);
|
||||
if (II.getNumDefs() >= 1) {
|
||||
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II,
|
||||
ResultReg).addReg(Op0, Op0IsKill * RegState::Kill));
|
||||
ResultReg).addReg(Op0));
|
||||
} else {
|
||||
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
|
||||
.addReg(Op0, Op0IsKill * RegState::Kill));
|
||||
.addReg(Op0));
|
||||
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
||||
TII.get(TargetOpcode::COPY), ResultReg)
|
||||
.addReg(II.ImplicitDefs[0]));
|
||||
@ -321,8 +318,7 @@ unsigned ARMFastISel::fastEmitInst_r(unsigned MachineInstOpcode,
|
||||
|
||||
unsigned ARMFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned Op0, bool Op0IsKill,
|
||||
unsigned Op1, bool Op1IsKill) {
|
||||
unsigned Op0, unsigned Op1) {
|
||||
unsigned ResultReg = createResultReg(RC);
|
||||
const MCInstrDesc &II = TII.get(MachineInstOpcode);
|
||||
|
||||
@ -334,12 +330,12 @@ unsigned ARMFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
|
||||
if (II.getNumDefs() >= 1) {
|
||||
AddOptionalDefs(
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
|
||||
.addReg(Op0, Op0IsKill * RegState::Kill)
|
||||
.addReg(Op1, Op1IsKill * RegState::Kill));
|
||||
.addReg(Op0)
|
||||
.addReg(Op1));
|
||||
} else {
|
||||
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
|
||||
.addReg(Op0, Op0IsKill * RegState::Kill)
|
||||
.addReg(Op1, Op1IsKill * RegState::Kill));
|
||||
.addReg(Op0)
|
||||
.addReg(Op1));
|
||||
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
||||
TII.get(TargetOpcode::COPY), ResultReg)
|
||||
.addReg(II.ImplicitDefs[0]));
|
||||
@ -349,8 +345,7 @@ unsigned ARMFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
|
||||
|
||||
unsigned ARMFastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned Op0, bool Op0IsKill,
|
||||
uint64_t Imm) {
|
||||
unsigned Op0, uint64_t Imm) {
|
||||
unsigned ResultReg = createResultReg(RC);
|
||||
const MCInstrDesc &II = TII.get(MachineInstOpcode);
|
||||
|
||||
@ -360,11 +355,11 @@ unsigned ARMFastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
|
||||
if (II.getNumDefs() >= 1) {
|
||||
AddOptionalDefs(
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
|
||||
.addReg(Op0, Op0IsKill * RegState::Kill)
|
||||
.addReg(Op0)
|
||||
.addImm(Imm));
|
||||
} else {
|
||||
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
|
||||
.addReg(Op0, Op0IsKill * RegState::Kill)
|
||||
.addReg(Op0)
|
||||
.addImm(Imm));
|
||||
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
||||
TII.get(TargetOpcode::COPY), ResultReg)
|
||||
@ -851,7 +846,7 @@ void ARMFastISel::ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3) {
|
||||
// get the reg+offset into a register.
|
||||
if (needsLowering) {
|
||||
Addr.Base.Reg = fastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg,
|
||||
/*Op0IsKill*/false, Addr.Offset, MVT::i32);
|
||||
Addr.Offset, MVT::i32);
|
||||
Addr.Offset = 0;
|
||||
}
|
||||
}
|
||||
@ -1967,8 +1962,7 @@ bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,
|
||||
break;
|
||||
}
|
||||
case CCValAssign::BCvt: {
|
||||
unsigned BC = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg,
|
||||
/*TODO: Kill=*/false);
|
||||
unsigned BC = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg);
|
||||
assert(BC != 0 && "Failed to emit a bitcast!");
|
||||
Arg = BC;
|
||||
ArgVT = VA.getLocVT();
|
||||
|
@ -228,14 +228,13 @@ private:
|
||||
|
||||
unsigned fastEmitInst_rr(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned Op0, bool Op0IsKill,
|
||||
unsigned Op1, bool Op1IsKill);
|
||||
unsigned Op0, unsigned Op1);
|
||||
|
||||
// for some reason, this default is not generated by tablegen
|
||||
// so we explicitly generate it here.
|
||||
unsigned fastEmitInst_riir(uint64_t inst, const TargetRegisterClass *RC,
|
||||
unsigned Op0, bool Op0IsKill, uint64_t imm1,
|
||||
uint64_t imm2, unsigned Op3, bool Op3IsKill) {
|
||||
unsigned Op0, uint64_t imm1, uint64_t imm2,
|
||||
unsigned Op3) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2122,8 +2121,7 @@ void MipsFastISel::simplifyAddress(Address &Addr) {
|
||||
|
||||
unsigned MipsFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned Op0, bool Op0IsKill,
|
||||
unsigned Op1, bool Op1IsKill) {
|
||||
unsigned Op0, unsigned Op1) {
|
||||
// We treat the MUL instruction in a special way because it clobbers
|
||||
// the HI0 & LO0 registers. The TableGen definition of this instruction can
|
||||
// mark these registers only as implicitly defined. As a result, the
|
||||
@ -2136,15 +2134,14 @@ unsigned MipsFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
|
||||
Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
|
||||
Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
|
||||
.addReg(Op0, getKillRegState(Op0IsKill))
|
||||
.addReg(Op1, getKillRegState(Op1IsKill))
|
||||
.addReg(Op0)
|
||||
.addReg(Op1)
|
||||
.addReg(Mips::HI0, RegState::ImplicitDefine | RegState::Dead)
|
||||
.addReg(Mips::LO0, RegState::ImplicitDefine | RegState::Dead);
|
||||
return ResultReg;
|
||||
}
|
||||
|
||||
return FastISel::fastEmitInst_rr(MachineInstOpcode, RC, Op0, Op0IsKill, Op1,
|
||||
Op1IsKill);
|
||||
return FastISel::fastEmitInst_rr(MachineInstOpcode, RC, Op0, Op1);
|
||||
}
|
||||
|
||||
namespace llvm {
|
||||
|
@ -112,15 +112,12 @@ class PPCFastISel final : public FastISel {
|
||||
unsigned fastEmit_i(MVT Ty, MVT RetTy, unsigned Opc, uint64_t Imm) override;
|
||||
unsigned fastEmitInst_ri(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned Op0, bool Op0IsKill,
|
||||
uint64_t Imm);
|
||||
unsigned Op0, uint64_t Imm);
|
||||
unsigned fastEmitInst_r(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned Op0, bool Op0IsKill);
|
||||
const TargetRegisterClass *RC, unsigned Op0);
|
||||
unsigned fastEmitInst_rr(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned Op0, bool Op0IsKill,
|
||||
unsigned Op1, bool Op1IsKill);
|
||||
unsigned Op0, unsigned Op1);
|
||||
|
||||
bool fastLowerCall(CallLoweringInfo &CLI) override;
|
||||
|
||||
@ -2426,7 +2423,7 @@ unsigned PPCFastISel::fastEmit_i(MVT Ty, MVT VT, unsigned Opc, uint64_t Imm) {
|
||||
// where those regs have another meaning.
|
||||
unsigned PPCFastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned Op0, bool Op0IsKill,
|
||||
unsigned Op0,
|
||||
uint64_t Imm) {
|
||||
if (MachineInstOpcode == PPC::ADDI)
|
||||
MRI.setRegClass(Op0, &PPC::GPRC_and_GPRC_NOR0RegClass);
|
||||
@ -2437,8 +2434,7 @@ unsigned PPCFastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
|
||||
(RC == &PPC::GPRCRegClass ? &PPC::GPRC_and_GPRC_NOR0RegClass :
|
||||
(RC == &PPC::G8RCRegClass ? &PPC::G8RC_and_G8RC_NOX0RegClass : RC));
|
||||
|
||||
return FastISel::fastEmitInst_ri(MachineInstOpcode, UseRC,
|
||||
Op0, Op0IsKill, Imm);
|
||||
return FastISel::fastEmitInst_ri(MachineInstOpcode, UseRC, Op0, Imm);
|
||||
}
|
||||
|
||||
// Override for instructions with one register operand to avoid use of
|
||||
@ -2446,12 +2442,12 @@ unsigned PPCFastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
|
||||
// we must be conservative.
|
||||
unsigned PPCFastISel::fastEmitInst_r(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass* RC,
|
||||
unsigned Op0, bool Op0IsKill) {
|
||||
unsigned Op0) {
|
||||
const TargetRegisterClass *UseRC =
|
||||
(RC == &PPC::GPRCRegClass ? &PPC::GPRC_and_GPRC_NOR0RegClass :
|
||||
(RC == &PPC::G8RCRegClass ? &PPC::G8RC_and_G8RC_NOX0RegClass : RC));
|
||||
|
||||
return FastISel::fastEmitInst_r(MachineInstOpcode, UseRC, Op0, Op0IsKill);
|
||||
return FastISel::fastEmitInst_r(MachineInstOpcode, UseRC, Op0);
|
||||
}
|
||||
|
||||
// Override for instructions with two register operands to avoid use
|
||||
@ -2459,14 +2455,12 @@ unsigned PPCFastISel::fastEmitInst_r(unsigned MachineInstOpcode,
|
||||
// so we must be conservative.
|
||||
unsigned PPCFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass* RC,
|
||||
unsigned Op0, bool Op0IsKill,
|
||||
unsigned Op1, bool Op1IsKill) {
|
||||
unsigned Op0, unsigned Op1) {
|
||||
const TargetRegisterClass *UseRC =
|
||||
(RC == &PPC::GPRCRegClass ? &PPC::GPRC_and_GPRC_NOR0RegClass :
|
||||
(RC == &PPC::G8RCRegClass ? &PPC::G8RC_and_G8RC_NOX0RegClass : RC));
|
||||
|
||||
return FastISel::fastEmitInst_rr(MachineInstOpcode, UseRC, Op0, Op0IsKill,
|
||||
Op1, Op1IsKill);
|
||||
return FastISel::fastEmitInst_rr(MachineInstOpcode, UseRC, Op0, Op1);
|
||||
}
|
||||
|
||||
namespace llvm {
|
||||
|
@ -1167,7 +1167,7 @@ bool WebAssemblyFastISel::selectBitCast(const Instruction *I) {
|
||||
}
|
||||
|
||||
Register Reg = fastEmit_ISD_BITCAST_r(VT.getSimpleVT(), RetVT.getSimpleVT(),
|
||||
In, I->getOperand(0)->hasOneUse());
|
||||
In);
|
||||
if (!Reg)
|
||||
return false;
|
||||
MachineBasicBlock::iterator Iter = FuncInfo.InsertPt;
|
||||
|
@ -89,8 +89,7 @@ private:
|
||||
|
||||
bool X86FastEmitStore(EVT VT, const Value *Val, X86AddressMode &AM,
|
||||
MachineMemOperand *MMO = nullptr, bool Aligned = false);
|
||||
bool X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill,
|
||||
X86AddressMode &AM,
|
||||
bool X86FastEmitStore(EVT VT, unsigned ValReg, X86AddressMode &AM,
|
||||
MachineMemOperand *MMO = nullptr, bool Aligned = false);
|
||||
|
||||
bool X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT,
|
||||
@ -176,9 +175,7 @@ private:
|
||||
|
||||
unsigned fastEmitInst_rrrr(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC, unsigned Op0,
|
||||
bool Op0IsKill, unsigned Op1, bool Op1IsKill,
|
||||
unsigned Op2, bool Op2IsKill, unsigned Op3,
|
||||
bool Op3IsKill);
|
||||
unsigned Op1, unsigned Op2, unsigned Op3);
|
||||
};
|
||||
|
||||
} // end anonymous namespace.
|
||||
@ -487,8 +484,7 @@ bool X86FastISel::X86FastEmitLoad(MVT VT, X86AddressMode &AM,
|
||||
/// type VT. The address is either pre-computed, consisted of a base ptr, Ptr
|
||||
/// and a displacement offset, or a GlobalAddress,
|
||||
/// i.e. V. Return true if it is possible.
|
||||
bool X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill,
|
||||
X86AddressMode &AM,
|
||||
bool X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg, X86AddressMode &AM,
|
||||
MachineMemOperand *MMO, bool Aligned) {
|
||||
bool HasSSE1 = Subtarget->hasSSE1();
|
||||
bool HasSSE2 = Subtarget->hasSSE2();
|
||||
@ -508,7 +504,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill,
|
||||
Register AndResult = createResultReg(&X86::GR8RegClass);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
||||
TII.get(X86::AND8ri), AndResult)
|
||||
.addReg(ValReg, getKillRegState(ValIsKill)).addImm(1);
|
||||
.addReg(ValReg).addImm(1);
|
||||
ValReg = AndResult;
|
||||
LLVM_FALLTHROUGH; // handle i1 as i8.
|
||||
}
|
||||
@ -654,7 +650,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill,
|
||||
ValReg = constrainOperandRegClass(Desc, ValReg, Desc.getNumOperands() - 1);
|
||||
MachineInstrBuilder MIB =
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, Desc);
|
||||
addFullAddress(MIB, AM).addReg(ValReg, getKillRegState(ValIsKill));
|
||||
addFullAddress(MIB, AM).addReg(ValReg);
|
||||
if (MMO)
|
||||
MIB->addMemOperand(*FuncInfo.MF, MMO);
|
||||
|
||||
@ -702,8 +698,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
|
||||
if (ValReg == 0)
|
||||
return false;
|
||||
|
||||
bool ValKill = hasTrivialKill(Val);
|
||||
return X86FastEmitStore(VT, ValReg, ValKill, AM, MMO, Aligned);
|
||||
return X86FastEmitStore(VT, ValReg, AM, MMO, Aligned);
|
||||
}
|
||||
|
||||
/// X86FastEmitExtend - Emit a machine instruction to extend a value Src of
|
||||
@ -712,8 +707,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
|
||||
bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT,
|
||||
unsigned Src, EVT SrcVT,
|
||||
unsigned &ResultReg) {
|
||||
unsigned RR = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc,
|
||||
Src, /*TODO: Kill=*/false);
|
||||
unsigned RR = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, Src);
|
||||
if (RR == 0)
|
||||
return false;
|
||||
|
||||
@ -945,7 +939,7 @@ redo_gep:
|
||||
(S == 1 || S == 2 || S == 4 || S == 8)) {
|
||||
// Scaled-index addressing.
|
||||
Scale = S;
|
||||
IndexReg = getRegForGEPIndex(Op).first;
|
||||
IndexReg = getRegForGEPIndex(Op);
|
||||
if (IndexReg == 0)
|
||||
return false;
|
||||
break;
|
||||
@ -1262,14 +1256,13 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
|
||||
if (Outs[0].Flags.isSExt())
|
||||
return false;
|
||||
// TODO
|
||||
SrcReg = fastEmitZExtFromI1(MVT::i8, SrcReg, /*Op0IsKill=*/false);
|
||||
SrcReg = fastEmitZExtFromI1(MVT::i8, SrcReg);
|
||||
SrcVT = MVT::i8;
|
||||
}
|
||||
unsigned Op = Outs[0].Flags.isZExt() ? ISD::ZERO_EXTEND :
|
||||
ISD::SIGN_EXTEND;
|
||||
// TODO
|
||||
SrcReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Op, SrcReg,
|
||||
/*Op0IsKill=*/false);
|
||||
SrcReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Op, SrcReg);
|
||||
}
|
||||
|
||||
// Make the copy.
|
||||
@ -1467,8 +1460,7 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
|
||||
ResultReg = createResultReg(&X86::GR32RegClass);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV32r0),
|
||||
ResultReg);
|
||||
ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultReg,
|
||||
/*Op0IsKill=*/true, X86::sub_8bit);
|
||||
ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultReg, X86::sub_8bit);
|
||||
if (!ResultReg)
|
||||
return false;
|
||||
break;
|
||||
@ -1558,7 +1550,7 @@ bool X86FastISel::X86SelectZExt(const Instruction *I) {
|
||||
MVT SrcVT = TLI.getSimpleValueType(DL, I->getOperand(0)->getType());
|
||||
if (SrcVT == MVT::i1) {
|
||||
// Set the high bits to zero.
|
||||
ResultReg = fastEmitZExtFromI1(MVT::i8, ResultReg, /*TODO: Kill=*/false);
|
||||
ResultReg = fastEmitZExtFromI1(MVT::i8, ResultReg);
|
||||
SrcVT = MVT::i8;
|
||||
|
||||
if (ResultReg == 0)
|
||||
@ -1591,11 +1583,10 @@ bool X86FastISel::X86SelectZExt(const Instruction *I) {
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOVZX32rr8),
|
||||
Result32).addReg(ResultReg);
|
||||
|
||||
ResultReg = fastEmitInst_extractsubreg(MVT::i16, Result32,
|
||||
/*Op0IsKill=*/true, X86::sub_16bit);
|
||||
ResultReg = fastEmitInst_extractsubreg(MVT::i16, Result32, X86::sub_16bit);
|
||||
} else if (DstVT != MVT::i8) {
|
||||
ResultReg = fastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::ZERO_EXTEND,
|
||||
ResultReg, /*Op0IsKill=*/true);
|
||||
ResultReg);
|
||||
if (ResultReg == 0)
|
||||
return false;
|
||||
}
|
||||
@ -1617,8 +1608,7 @@ bool X86FastISel::X86SelectSExt(const Instruction *I) {
|
||||
MVT SrcVT = TLI.getSimpleValueType(DL, I->getOperand(0)->getType());
|
||||
if (SrcVT == MVT::i1) {
|
||||
// Set the high bits to zero.
|
||||
Register ZExtReg = fastEmitZExtFromI1(MVT::i8, ResultReg,
|
||||
/*TODO: Kill=*/false);
|
||||
Register ZExtReg = fastEmitZExtFromI1(MVT::i8, ResultReg);
|
||||
if (ZExtReg == 0)
|
||||
return false;
|
||||
|
||||
@ -1637,11 +1627,10 @@ bool X86FastISel::X86SelectSExt(const Instruction *I) {
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOVSX32rr8),
|
||||
Result32).addReg(ResultReg);
|
||||
|
||||
ResultReg = fastEmitInst_extractsubreg(MVT::i16, Result32,
|
||||
/*Op0IsKill=*/true, X86::sub_16bit);
|
||||
ResultReg = fastEmitInst_extractsubreg(MVT::i16, Result32, X86::sub_16bit);
|
||||
} else if (DstVT != MVT::i8) {
|
||||
ResultReg = fastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::SIGN_EXTEND,
|
||||
ResultReg, /*Op0IsKill=*/true);
|
||||
ResultReg);
|
||||
if (ResultReg == 0)
|
||||
return false;
|
||||
}
|
||||
@ -1793,8 +1782,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
||||
TII.get(TargetOpcode::COPY), OpReg)
|
||||
.addReg(KOpReg);
|
||||
OpReg = fastEmitInst_extractsubreg(MVT::i8, OpReg, /*Op0IsKill=*/true,
|
||||
X86::sub_8bit);
|
||||
OpReg = fastEmitInst_extractsubreg(MVT::i8, OpReg, X86::sub_8bit);
|
||||
}
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri))
|
||||
.addReg(OpReg)
|
||||
@ -2025,7 +2013,7 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) {
|
||||
|
||||
// Now reference the 8-bit subreg of the result.
|
||||
ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultSuperReg,
|
||||
/*Op0IsKill=*/true, X86::sub_8bit);
|
||||
X86::sub_8bit);
|
||||
}
|
||||
// Copy the result out of the physreg if we haven't already.
|
||||
if (!ResultReg) {
|
||||
@ -2130,7 +2118,6 @@ bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) {
|
||||
Register CondReg = getRegForValue(Cond);
|
||||
if (CondReg == 0)
|
||||
return false;
|
||||
bool CondIsKill = hasTrivialKill(Cond);
|
||||
|
||||
// In case OpReg is a K register, COPY to a GPR
|
||||
if (MRI.getRegClass(CondReg) == &X86::VK1RegClass) {
|
||||
@ -2138,12 +2125,11 @@ bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) {
|
||||
CondReg = createResultReg(&X86::GR32RegClass);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
||||
TII.get(TargetOpcode::COPY), CondReg)
|
||||
.addReg(KCondReg, getKillRegState(CondIsKill));
|
||||
CondReg = fastEmitInst_extractsubreg(MVT::i8, CondReg, /*Op0IsKill=*/true,
|
||||
X86::sub_8bit);
|
||||
.addReg(KCondReg);
|
||||
CondReg = fastEmitInst_extractsubreg(MVT::i8, CondReg, X86::sub_8bit);
|
||||
}
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri))
|
||||
.addReg(CondReg, getKillRegState(CondIsKill))
|
||||
.addReg(CondReg)
|
||||
.addImm(1);
|
||||
}
|
||||
|
||||
@ -2151,18 +2137,13 @@ bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) {
|
||||
const Value *RHS = I->getOperand(2);
|
||||
|
||||
Register RHSReg = getRegForValue(RHS);
|
||||
bool RHSIsKill = hasTrivialKill(RHS);
|
||||
|
||||
Register LHSReg = getRegForValue(LHS);
|
||||
bool LHSIsKill = hasTrivialKill(LHS);
|
||||
|
||||
if (!LHSReg || !RHSReg)
|
||||
return false;
|
||||
|
||||
const TargetRegisterInfo &TRI = *Subtarget->getRegisterInfo();
|
||||
unsigned Opc = X86::getCMovOpcode(TRI.getRegSizeInBits(*RC)/8);
|
||||
Register ResultReg = fastEmitInst_rri(Opc, RC, RHSReg, RHSIsKill,
|
||||
LHSReg, LHSIsKill, CC);
|
||||
Register ResultReg = fastEmitInst_rri(Opc, RC, RHSReg, LHSReg, CC);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
@ -2211,17 +2192,9 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
|
||||
const Value *RHS = I->getOperand(2);
|
||||
|
||||
Register LHSReg = getRegForValue(LHS);
|
||||
bool LHSIsKill = hasTrivialKill(LHS);
|
||||
|
||||
Register RHSReg = getRegForValue(RHS);
|
||||
bool RHSIsKill = hasTrivialKill(RHS);
|
||||
|
||||
Register CmpLHSReg = getRegForValue(CmpLHS);
|
||||
bool CmpLHSIsKill = hasTrivialKill(CmpLHS);
|
||||
|
||||
Register CmpRHSReg = getRegForValue(CmpRHS);
|
||||
bool CmpRHSIsKill = hasTrivialKill(CmpRHS);
|
||||
|
||||
if (!LHSReg || !RHSReg || !CmpLHSReg || !CmpRHSReg)
|
||||
return false;
|
||||
|
||||
@ -2235,8 +2208,8 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
|
||||
|
||||
unsigned CmpOpcode =
|
||||
(RetVT == MVT::f32) ? X86::VCMPSSZrr : X86::VCMPSDZrr;
|
||||
Register CmpReg = fastEmitInst_rri(CmpOpcode, VK1, CmpLHSReg, CmpLHSIsKill,
|
||||
CmpRHSReg, CmpRHSIsKill, CC);
|
||||
Register CmpReg = fastEmitInst_rri(CmpOpcode, VK1, CmpLHSReg, CmpRHSReg,
|
||||
CC);
|
||||
|
||||
// Need an IMPLICIT_DEF for the input that is used to generate the upper
|
||||
// bits of the result register since its not based on any of the inputs.
|
||||
@ -2248,9 +2221,8 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
|
||||
// LHS in the input. The mask input comes from the compare.
|
||||
unsigned MovOpcode =
|
||||
(RetVT == MVT::f32) ? X86::VMOVSSZrrk : X86::VMOVSDZrrk;
|
||||
unsigned MovReg = fastEmitInst_rrrr(MovOpcode, VR128X, RHSReg, RHSIsKill,
|
||||
CmpReg, true, ImplicitDefReg, true,
|
||||
LHSReg, LHSIsKill);
|
||||
unsigned MovReg = fastEmitInst_rrrr(MovOpcode, VR128X, RHSReg, CmpReg,
|
||||
ImplicitDefReg, LHSReg);
|
||||
|
||||
ResultReg = createResultReg(RC);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
||||
@ -2269,10 +2241,10 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
|
||||
unsigned BlendOpcode =
|
||||
(RetVT == MVT::f32) ? X86::VBLENDVPSrr : X86::VBLENDVPDrr;
|
||||
|
||||
Register CmpReg = fastEmitInst_rri(CmpOpcode, RC, CmpLHSReg, CmpLHSIsKill,
|
||||
CmpRHSReg, CmpRHSIsKill, CC);
|
||||
Register VBlendReg = fastEmitInst_rrr(BlendOpcode, VR128, RHSReg, RHSIsKill,
|
||||
LHSReg, LHSIsKill, CmpReg, true);
|
||||
Register CmpReg = fastEmitInst_rri(CmpOpcode, RC, CmpLHSReg, CmpRHSReg,
|
||||
CC);
|
||||
Register VBlendReg = fastEmitInst_rrr(BlendOpcode, VR128, RHSReg, LHSReg,
|
||||
CmpReg);
|
||||
ResultReg = createResultReg(RC);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
||||
TII.get(TargetOpcode::COPY), ResultReg).addReg(VBlendReg);
|
||||
@ -2291,14 +2263,10 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
|
||||
}
|
||||
|
||||
const TargetRegisterClass *VR128 = &X86::VR128RegClass;
|
||||
Register CmpReg = fastEmitInst_rri(Opc[0], RC, CmpLHSReg, CmpLHSIsKill,
|
||||
CmpRHSReg, CmpRHSIsKill, CC);
|
||||
Register AndReg = fastEmitInst_rr(Opc[1], VR128, CmpReg,
|
||||
/*Op0IsKill=*/false, LHSReg, LHSIsKill);
|
||||
Register AndNReg = fastEmitInst_rr(Opc[2], VR128, CmpReg,
|
||||
/*Op0IsKill=*/true, RHSReg, RHSIsKill);
|
||||
Register OrReg = fastEmitInst_rr(Opc[3], VR128, AndNReg, /*Op0IsKill=*/true,
|
||||
AndReg, /*Op1IsKill=*/true);
|
||||
Register CmpReg = fastEmitInst_rri(Opc[0], RC, CmpLHSReg, CmpRHSReg, CC);
|
||||
Register AndReg = fastEmitInst_rr(Opc[1], VR128, CmpReg, LHSReg);
|
||||
Register AndNReg = fastEmitInst_rr(Opc[2], VR128, CmpReg, RHSReg);
|
||||
Register OrReg = fastEmitInst_rr(Opc[3], VR128, AndNReg, AndReg);
|
||||
ResultReg = createResultReg(RC);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
||||
TII.get(TargetOpcode::COPY), ResultReg).addReg(OrReg);
|
||||
@ -2348,7 +2316,6 @@ bool X86FastISel::X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I) {
|
||||
Register CondReg = getRegForValue(Cond);
|
||||
if (CondReg == 0)
|
||||
return false;
|
||||
bool CondIsKill = hasTrivialKill(Cond);
|
||||
|
||||
// In case OpReg is a K register, COPY to a GPR
|
||||
if (MRI.getRegClass(CondReg) == &X86::VK1RegClass) {
|
||||
@ -2356,12 +2323,11 @@ bool X86FastISel::X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I) {
|
||||
CondReg = createResultReg(&X86::GR32RegClass);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
||||
TII.get(TargetOpcode::COPY), CondReg)
|
||||
.addReg(KCondReg, getKillRegState(CondIsKill));
|
||||
CondReg = fastEmitInst_extractsubreg(MVT::i8, CondReg, /*Op0IsKill=*/true,
|
||||
X86::sub_8bit);
|
||||
.addReg(KCondReg);
|
||||
CondReg = fastEmitInst_extractsubreg(MVT::i8, CondReg, X86::sub_8bit);
|
||||
}
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri))
|
||||
.addReg(CondReg, getKillRegState(CondIsKill))
|
||||
.addReg(CondReg)
|
||||
.addImm(1);
|
||||
}
|
||||
|
||||
@ -2369,18 +2335,14 @@ bool X86FastISel::X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I) {
|
||||
const Value *RHS = I->getOperand(2);
|
||||
|
||||
Register LHSReg = getRegForValue(LHS);
|
||||
bool LHSIsKill = hasTrivialKill(LHS);
|
||||
|
||||
Register RHSReg = getRegForValue(RHS);
|
||||
bool RHSIsKill = hasTrivialKill(RHS);
|
||||
|
||||
if (!LHSReg || !RHSReg)
|
||||
return false;
|
||||
|
||||
const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
|
||||
|
||||
Register ResultReg =
|
||||
fastEmitInst_rri(Opc, RC, RHSReg, RHSIsKill, LHSReg, LHSIsKill, CC);
|
||||
fastEmitInst_rri(Opc, RC, RHSReg, LHSReg, CC);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
@ -2404,12 +2366,11 @@ bool X86FastISel::X86SelectSelect(const Instruction *I) {
|
||||
Register OpReg = getRegForValue(Opnd);
|
||||
if (OpReg == 0)
|
||||
return false;
|
||||
bool OpIsKill = hasTrivialKill(Opnd);
|
||||
const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
|
||||
Register ResultReg = createResultReg(RC);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
||||
TII.get(TargetOpcode::COPY), ResultReg)
|
||||
.addReg(OpReg, getKillRegState(OpIsKill));
|
||||
.addReg(OpReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
@ -2479,8 +2440,7 @@ bool X86FastISel::X86SelectIntToFP(const Instruction *I, bool IsSigned) {
|
||||
Register ImplicitDefReg = createResultReg(RC);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
||||
TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);
|
||||
Register ResultReg =
|
||||
fastEmitInst_rr(Opcode, RC, ImplicitDefReg, true, OpReg, false);
|
||||
Register ResultReg = fastEmitInst_rr(Opcode, RC, ImplicitDefReg, OpReg);
|
||||
updateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
@ -2577,8 +2537,7 @@ bool X86FastISel::X86SelectTrunc(const Instruction *I) {
|
||||
}
|
||||
|
||||
// Issue an extract_subreg.
|
||||
Register ResultReg = fastEmitInst_extractsubreg(MVT::i8,
|
||||
InputReg, false,
|
||||
Register ResultReg = fastEmitInst_extractsubreg(MVT::i8, InputReg,
|
||||
X86::sub_8bit);
|
||||
if (!ResultReg)
|
||||
return false;
|
||||
@ -2614,7 +2573,7 @@ bool X86FastISel::TryEmitSmallMemcpy(X86AddressMode DestAM,
|
||||
|
||||
unsigned Reg;
|
||||
bool RV = X86FastEmitLoad(VT, SrcAM, nullptr, Reg);
|
||||
RV &= X86FastEmitStore(VT, Reg, /*ValIsKill=*/true, DestAM);
|
||||
RV &= X86FastEmitStore(VT, Reg, DestAM);
|
||||
assert(RV && "Failed to emit load or store??");
|
||||
|
||||
unsigned Size = VT.getSizeInBits()/8;
|
||||
@ -2662,7 +2621,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
|
||||
// controlled by MXCSR.
|
||||
unsigned Opc = Subtarget->hasVLX() ? X86::VCVTPS2PHZ128rr
|
||||
: X86::VCVTPS2PHrr;
|
||||
InputReg = fastEmitInst_ri(Opc, RC, InputReg, false, 4);
|
||||
InputReg = fastEmitInst_ri(Opc, RC, InputReg, 4);
|
||||
|
||||
// Move the lower 32-bits of ResultReg to another register of class GR32.
|
||||
Opc = Subtarget->hasAVX512() ? X86::VMOVPDI2DIZrr
|
||||
@ -2673,20 +2632,19 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
|
||||
|
||||
// The result value is in the lower 16-bits of ResultReg.
|
||||
unsigned RegIdx = X86::sub_16bit;
|
||||
ResultReg = fastEmitInst_extractsubreg(MVT::i16, ResultReg, true, RegIdx);
|
||||
ResultReg = fastEmitInst_extractsubreg(MVT::i16, ResultReg, RegIdx);
|
||||
} else {
|
||||
assert(Op->getType()->isIntegerTy(16) && "Expected a 16-bit integer!");
|
||||
// Explicitly zero-extend the input to 32-bit.
|
||||
InputReg = fastEmit_r(MVT::i16, MVT::i32, ISD::ZERO_EXTEND, InputReg,
|
||||
/*Op0IsKill=*/false);
|
||||
InputReg = fastEmit_r(MVT::i16, MVT::i32, ISD::ZERO_EXTEND, InputReg);
|
||||
|
||||
// The following SCALAR_TO_VECTOR will be expanded into a VMOVDI2PDIrr.
|
||||
InputReg = fastEmit_r(MVT::i32, MVT::v4i32, ISD::SCALAR_TO_VECTOR,
|
||||
InputReg, /*Op0IsKill=*/true);
|
||||
InputReg);
|
||||
|
||||
unsigned Opc = Subtarget->hasVLX() ? X86::VCVTPH2PSZ128rr
|
||||
: X86::VCVTPH2PSrr;
|
||||
InputReg = fastEmitInst_r(Opc, RC, InputReg, /*Op0IsKill=*/true);
|
||||
InputReg = fastEmitInst_r(Opc, RC, InputReg);
|
||||
|
||||
// The result value is in the lower 32-bits of ResultReg.
|
||||
// Emit an explicit copy from register class VR128 to register class FR32.
|
||||
@ -2937,7 +2895,6 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
|
||||
Register LHSReg = getRegForValue(LHS);
|
||||
if (LHSReg == 0)
|
||||
return false;
|
||||
bool LHSIsKill = hasTrivialKill(LHS);
|
||||
|
||||
unsigned ResultReg = 0;
|
||||
// Check if we have an immediate version.
|
||||
@ -2954,21 +2911,17 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
|
||||
bool IsDec = BaseOpc == ISD::SUB;
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
||||
TII.get(Opc[IsDec][VT.SimpleTy-MVT::i8]), ResultReg)
|
||||
.addReg(LHSReg, getKillRegState(LHSIsKill));
|
||||
.addReg(LHSReg);
|
||||
} else
|
||||
ResultReg = fastEmit_ri(VT, VT, BaseOpc, LHSReg, LHSIsKill,
|
||||
CI->getZExtValue());
|
||||
ResultReg = fastEmit_ri(VT, VT, BaseOpc, LHSReg, CI->getZExtValue());
|
||||
}
|
||||
|
||||
unsigned RHSReg;
|
||||
bool RHSIsKill;
|
||||
if (!ResultReg) {
|
||||
RHSReg = getRegForValue(RHS);
|
||||
if (RHSReg == 0)
|
||||
return false;
|
||||
RHSIsKill = hasTrivialKill(RHS);
|
||||
ResultReg = fastEmit_rr(VT, VT, BaseOpc, LHSReg, LHSIsKill, RHSReg,
|
||||
RHSIsKill);
|
||||
ResultReg = fastEmit_rr(VT, VT, BaseOpc, LHSReg, RHSReg);
|
||||
}
|
||||
|
||||
// FastISel doesn't have a pattern for all X86::MUL*r and X86::IMUL*r. Emit
|
||||
@ -2981,9 +2934,9 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
|
||||
// the X86::MUL*r instruction.
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
||||
TII.get(TargetOpcode::COPY), Reg[VT.SimpleTy-MVT::i8])
|
||||
.addReg(LHSReg, getKillRegState(LHSIsKill));
|
||||
.addReg(LHSReg);
|
||||
ResultReg = fastEmitInst_r(MULOpc[VT.SimpleTy-MVT::i8],
|
||||
TLI.getRegClassFor(VT), RHSReg, RHSIsKill);
|
||||
TLI.getRegClassFor(VT), RHSReg);
|
||||
} else if (BaseOpc == X86ISD::SMUL && !ResultReg) {
|
||||
static const uint16_t MULOpc[] =
|
||||
{ X86::IMUL8r, X86::IMUL16rr, X86::IMUL32rr, X86::IMUL64rr };
|
||||
@ -2992,13 +2945,11 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
|
||||
// X86::IMUL8r instruction.
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
||||
TII.get(TargetOpcode::COPY), X86::AL)
|
||||
.addReg(LHSReg, getKillRegState(LHSIsKill));
|
||||
ResultReg = fastEmitInst_r(MULOpc[0], TLI.getRegClassFor(VT), RHSReg,
|
||||
RHSIsKill);
|
||||
.addReg(LHSReg);
|
||||
ResultReg = fastEmitInst_r(MULOpc[0], TLI.getRegClassFor(VT), RHSReg);
|
||||
} else
|
||||
ResultReg = fastEmitInst_rr(MULOpc[VT.SimpleTy-MVT::i8],
|
||||
TLI.getRegClassFor(VT), LHSReg, LHSIsKill,
|
||||
RHSReg, RHSIsKill);
|
||||
TLI.getRegClassFor(VT), LHSReg, RHSReg);
|
||||
}
|
||||
|
||||
if (!ResultReg)
|
||||
@ -3309,8 +3260,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
|
||||
if (!isTypeLegal(PrevVal->getType(), VT))
|
||||
return false;
|
||||
|
||||
ResultReg =
|
||||
fastEmit_ri(VT, VT, ISD::AND, ResultReg, hasTrivialKill(PrevVal), 1);
|
||||
ResultReg = fastEmit_ri(VT, VT, ISD::AND, ResultReg, 1);
|
||||
} else {
|
||||
if (!isTypeLegal(Val->getType(), VT) ||
|
||||
(VT.isVector() && VT.getVectorElementType() == MVT::i1))
|
||||
@ -3378,7 +3328,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
|
||||
// Handle zero-extension from i1 to i8, which is common.
|
||||
if (ArgVT == MVT::i1) {
|
||||
// Set the high bits to zero.
|
||||
ArgReg = fastEmitZExtFromI1(MVT::i8, ArgReg, /*TODO: Kill=*/false);
|
||||
ArgReg = fastEmitZExtFromI1(MVT::i8, ArgReg);
|
||||
ArgVT = MVT::i8;
|
||||
|
||||
if (ArgReg == 0)
|
||||
@ -3408,8 +3358,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
|
||||
break;
|
||||
}
|
||||
case CCValAssign::BCvt: {
|
||||
ArgReg = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, ArgReg,
|
||||
/*TODO: Kill=*/false);
|
||||
ArgReg = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, ArgReg);
|
||||
assert(ArgReg && "Failed to emit a bitcast!");
|
||||
ArgVT = VA.getLocVT();
|
||||
break;
|
||||
@ -3462,8 +3411,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
|
||||
if (!X86FastEmitStore(ArgVT, ArgVal, AM, MMO))
|
||||
return false;
|
||||
} else {
|
||||
bool ValIsKill = hasTrivialKill(ArgVal);
|
||||
if (!X86FastEmitStore(ArgVT, ArgReg, ValIsKill, AM, MMO))
|
||||
if (!X86FastEmitStore(ArgVT, ArgReg, AM, MMO))
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -3727,11 +3675,9 @@ unsigned X86FastISel::X86MaterializeInt(const ConstantInt *CI, MVT VT) {
|
||||
default: llvm_unreachable("Unexpected value type");
|
||||
case MVT::i1:
|
||||
case MVT::i8:
|
||||
return fastEmitInst_extractsubreg(MVT::i8, SrcReg, /*Op0IsKill=*/true,
|
||||
X86::sub_8bit);
|
||||
return fastEmitInst_extractsubreg(MVT::i8, SrcReg, X86::sub_8bit);
|
||||
case MVT::i16:
|
||||
return fastEmitInst_extractsubreg(MVT::i16, SrcReg, /*Op0IsKill=*/true,
|
||||
X86::sub_16bit);
|
||||
return fastEmitInst_extractsubreg(MVT::i16, SrcReg, X86::sub_16bit);
|
||||
case MVT::i32:
|
||||
return SrcReg;
|
||||
case MVT::i64: {
|
||||
@ -4001,10 +3947,8 @@ bool X86FastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
|
||||
|
||||
unsigned X86FastISel::fastEmitInst_rrrr(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned Op0, bool Op0IsKill,
|
||||
unsigned Op1, bool Op1IsKill,
|
||||
unsigned Op2, bool Op2IsKill,
|
||||
unsigned Op3, bool Op3IsKill) {
|
||||
unsigned Op0, unsigned Op1,
|
||||
unsigned Op2, unsigned Op3) {
|
||||
const MCInstrDesc &II = TII.get(MachineInstOpcode);
|
||||
|
||||
Register ResultReg = createResultReg(RC);
|
||||
@ -4015,16 +3959,16 @@ unsigned X86FastISel::fastEmitInst_rrrr(unsigned MachineInstOpcode,
|
||||
|
||||
if (II.getNumDefs() >= 1)
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
|
||||
.addReg(Op0, getKillRegState(Op0IsKill))
|
||||
.addReg(Op1, getKillRegState(Op1IsKill))
|
||||
.addReg(Op2, getKillRegState(Op2IsKill))
|
||||
.addReg(Op3, getKillRegState(Op3IsKill));
|
||||
.addReg(Op0)
|
||||
.addReg(Op1)
|
||||
.addReg(Op2)
|
||||
.addReg(Op3);
|
||||
else {
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
|
||||
.addReg(Op0, getKillRegState(Op0IsKill))
|
||||
.addReg(Op1, getKillRegState(Op1IsKill))
|
||||
.addReg(Op2, getKillRegState(Op2IsKill))
|
||||
.addReg(Op3, getKillRegState(Op3IsKill));
|
||||
.addReg(Op0)
|
||||
.addReg(Op1)
|
||||
.addReg(Op2)
|
||||
.addReg(Op3);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
||||
TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
|
||||
}
|
||||
|
@ -6,7 +6,7 @@
|
||||
|
||||
; CHECK-SSA: [[QUOTREG:%[0-9]+]]:gpr32 = SDIVWr
|
||||
; CHECK-SSA-NOT: [[QUOTREG]] =
|
||||
; CHECK-SSA: {{%[0-9]+}}:gpr32 = MSUBWrrr killed [[QUOTREG]]
|
||||
; CHECK-SSA: {{%[0-9]+}}:gpr32 = MSUBWrrr [[QUOTREG]]
|
||||
|
||||
; CHECK-SSA-LABEL: Machine code for function t2
|
||||
|
||||
|
@ -294,7 +294,7 @@ struct OperandsSignature {
|
||||
for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
|
||||
OS << LS;
|
||||
if (Operands[i].isReg()) {
|
||||
OS << "unsigned Op" << i << ", bool Op" << i << "IsKill";
|
||||
OS << "unsigned Op" << i;
|
||||
} else if (Operands[i].isImm()) {
|
||||
OS << "uint64_t imm" << i;
|
||||
} else if (Operands[i].isFP()) {
|
||||
@ -316,7 +316,7 @@ struct OperandsSignature {
|
||||
|
||||
OS << LS;
|
||||
if (Operands[i].isReg()) {
|
||||
OS << "Op" << i << ", Op" << i << "IsKill";
|
||||
OS << "Op" << i;
|
||||
} else if (Operands[i].isImm()) {
|
||||
OS << "imm" << i;
|
||||
} else if (Operands[i].isFP()) {
|
||||
@ -332,7 +332,7 @@ struct OperandsSignature {
|
||||
for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
|
||||
OS << LS;
|
||||
if (Operands[i].isReg()) {
|
||||
OS << "Op" << i << ", Op" << i << "IsKill";
|
||||
OS << "Op" << i;
|
||||
} else if (Operands[i].isImm()) {
|
||||
OS << "imm" << i;
|
||||
} else if (Operands[i].isFP()) {
|
||||
@ -673,7 +673,7 @@ void FastISelMap::emitInstructionCode(raw_ostream &OS,
|
||||
OS << ");\n";
|
||||
} else {
|
||||
OS << "extractsubreg(" << RetVTName
|
||||
<< ", Op0, Op0IsKill, " << Memo.SubRegNo << ");\n";
|
||||
<< ", Op0, " << Memo.SubRegNo << ");\n";
|
||||
}
|
||||
|
||||
if (!PredicateCheck.empty()) {
|
||||
|
Loading…
Reference in New Issue
Block a user