From 5794a3620eca2afa027978f7ba470112df6497ef Mon Sep 17 00:00:00 2001 From: Nikita Popov Date: Tue, 9 Mar 2021 21:04:03 +0100 Subject: [PATCH] [FastISel] Remove kill tracking This is a followup to D98145: As far as I know, tracking of kill flags in FastISel is just a compile-time optimization. However, I'm not actually seeing any compile-time regression when removing the tracking. This probably used to be more important in the past, before FastRA was switched to allocate instructions in reverse order, which means that it discovers kills as a matter of course. As such, the kill tracking doesn't really seem to serve a purpose anymore, and just adds additional complexity and potential for errors. This patch removes it entirely. The primary changes are dropping the hasTrivialKill() method and removing the kill arguments from the emitFast methods. The rest is mechanical fixup. Differential Revision: https://reviews.llvm.org/D98294 --- docs/ReleaseNotes.rst | 7 + include/llvm/CodeGen/FastISel.h | 39 +- lib/CodeGen/SelectionDAG/FastISel.cpp | 194 ++----- lib/Target/AArch64/AArch64FastISel.cpp | 525 +++++++----------- lib/Target/ARM/ARMFastISel.cpp | 38 +- lib/Target/Mips/MipsFastISel.cpp | 17 +- lib/Target/PowerPC/PPCFastISel.cpp | 24 +- .../WebAssembly/WebAssemblyFastISel.cpp | 2 +- lib/Target/X86/X86FastISel.cpp | 200 +++---- test/CodeGen/AArch64/arm64-fast-isel-rem.ll | 2 +- utils/TableGen/FastISelEmitter.cpp | 8 +- 11 files changed, 388 insertions(+), 668 deletions(-) diff --git a/docs/ReleaseNotes.rst b/docs/ReleaseNotes.rst index e751ed90db2..93be66836ab 100644 --- a/docs/ReleaseNotes.rst +++ b/docs/ReleaseNotes.rst @@ -116,6 +116,13 @@ Changes to the Go bindings -------------------------- +Changes to the FastISel infrastructure +-------------------------------------- + +* FastISel no longer tracks killed registers, and instead leaves this to the + register allocator. This means that ``hasTrivialKill()`` is removed, as well + as the ``OpNIsKill`` parameters to the ``fastEmit_*()`` family of functions. + Changes to the DAG infrastructure --------------------------------- diff --git a/include/llvm/CodeGen/FastISel.h b/include/llvm/CodeGen/FastISel.h index 26bf4ab2618..9c7e688da6a 100644 --- a/include/llvm/CodeGen/FastISel.h +++ b/include/llvm/CodeGen/FastISel.h @@ -274,7 +274,7 @@ public: /// This is a wrapper around getRegForValue that also takes care of /// truncating or sign-extending the given getelementptr index value. - std::pair getRegForGEPIndex(const Value *Idx); + Register getRegForGEPIndex(const Value *Idx); /// We're checking to see if we can fold \p LI into \p FoldInst. Note /// that we could have a sequence where multiple LLVM IR instructions are @@ -347,27 +347,26 @@ protected: /// This method is called by target-independent code to request that an /// instruction with the given type, opcode, and register operand be emitted. - virtual unsigned fastEmit_r(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, - bool Op0IsKill); + virtual unsigned fastEmit_r(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0); /// This method is called by target-independent code to request that an /// instruction with the given type, opcode, and register operands be emitted. virtual unsigned fastEmit_rr(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, - bool Op0IsKill, unsigned Op1, bool Op1IsKill); + unsigned Op1); /// This method is called by target-independent code to request that an /// instruction with the given type, opcode, and register and immediate /// operands be emitted. virtual unsigned fastEmit_ri(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, - bool Op0IsKill, uint64_t Imm); + uint64_t Imm); /// This method is a wrapper of fastEmit_ri. /// /// It first tries to emit an instruction with an immediate operand using /// fastEmit_ri. If that fails, it materializes the immediate into a register /// and try fastEmit_rr instead. - Register fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, bool Op0IsKill, - uint64_t Imm, MVT ImmType); + Register fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, uint64_t Imm, + MVT ImmType); /// This method is called by target-independent code to request that an /// instruction with the given type, opcode, and immediate operand be emitted. @@ -387,33 +386,31 @@ protected: /// Emit a MachineInstr with one register operand and a result register /// in the given register class. Register fastEmitInst_r(unsigned MachineInstOpcode, - const TargetRegisterClass *RC, unsigned Op0, - bool Op0IsKill); + const TargetRegisterClass *RC, unsigned Op0); /// Emit a MachineInstr with two register operands and a result /// register in the given register class. Register fastEmitInst_rr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, - bool Op0IsKill, unsigned Op1, bool Op1IsKill); + unsigned Op1); /// Emit a MachineInstr with three register operands and a result /// register in the given register class. Register fastEmitInst_rrr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, - bool Op0IsKill, unsigned Op1, bool Op1IsKill, - unsigned Op2, bool Op2IsKill); + unsigned Op1, unsigned Op2); /// Emit a MachineInstr with a register operand, an immediate, and a /// result register in the given register class. Register fastEmitInst_ri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, - bool Op0IsKill, uint64_t Imm); + uint64_t Imm); /// Emit a MachineInstr with one register operand and two immediate /// operands. Register fastEmitInst_rii(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, - bool Op0IsKill, uint64_t Imm1, uint64_t Imm2); + uint64_t Imm1, uint64_t Imm2); /// Emit a MachineInstr with a floating point immediate, and a result /// register in the given register class. @@ -425,8 +422,7 @@ protected: /// result register in the given register class. Register fastEmitInst_rri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, - bool Op0IsKill, unsigned Op1, bool Op1IsKill, - uint64_t Imm); + unsigned Op1, uint64_t Imm); /// Emit a MachineInstr with a single immediate operand, and a result /// register in the given register class. @@ -435,12 +431,11 @@ protected: /// Emit a MachineInstr for an extract_subreg from a specified index of /// a superregister to a specified type. - Register fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, bool Op0IsKill, - uint32_t Idx); + Register fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, uint32_t Idx); /// Emit MachineInstrs to compute the value of Op with all but the /// least significant bit set to zero. - Register fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill); + Register fastEmitZExtFromI1(MVT VT, unsigned Op0); /// Emit an unconditional branch to the given block, unless it is the /// immediate (fall-through) successor, and update the CFG. @@ -490,12 +485,6 @@ protected: /// - \c Add has a constant operand. bool canFoldAddIntoGEP(const User *GEP, const Value *Add); - /// Test whether the register associated with this value has exactly one use, - /// in which case that single use is killing. Note that multiple IR values - /// may map onto the same register, in which case this is not the same as - /// checking that an IR value has one use. - bool hasTrivialKill(const Value *V); - /// Create a machine mem operand from the given instruction. MachineMemOperand *createMachineMemOperandFor(const Instruction *I) const; diff --git a/lib/CodeGen/SelectionDAG/FastISel.cpp b/lib/CodeGen/SelectionDAG/FastISel.cpp index 0ff77d4ba1a..50faa63be59 100644 --- a/lib/CodeGen/SelectionDAG/FastISel.cpp +++ b/lib/CodeGen/SelectionDAG/FastISel.cpp @@ -238,42 +238,6 @@ void FastISel::flushLocalValueMap() { SavedInsertPt = FuncInfo.InsertPt; } -bool FastISel::hasTrivialKill(const Value *V) { - // Don't consider constants or arguments to have trivial kills. - const Instruction *I = dyn_cast(V); - if (!I) - return false; - - // No-op casts are trivially coalesced by fast-isel. - if (const auto *Cast = dyn_cast(I)) - if (Cast->isNoopCast(DL) && !hasTrivialKill(Cast->getOperand(0))) - return false; - - // Even the value might have only one use in the LLVM IR, it is possible that - // FastISel might fold the use into another instruction and now there is more - // than one use at the Machine Instruction level. - Register Reg = lookUpRegForValue(V); - if (Reg && !MRI.use_empty(Reg)) - return false; - - // GEPs with all zero indices are trivially coalesced by fast-isel. - if (const auto *GEP = dyn_cast(I)) - if (GEP->hasAllZeroIndices() && !hasTrivialKill(GEP->getOperand(0))) - return false; - - // Casts and extractvalues may be trivially coalesced by fast-isel. - if (I->getOpcode() == Instruction::BitCast || - I->getOpcode() == Instruction::PtrToInt || - I->getOpcode() == Instruction::IntToPtr || - I->getOpcode() == Instruction::ExtractValue) - return false; - - // Only instructions with a single use in the same basic block are considered - // to have trivial kills. - return I->hasOneUse() && - cast(*I->user_begin())->getParent() == I->getParent(); -} - Register FastISel::getRegForValue(const Value *V) { EVT RealVT = TLI.getValueType(DL, V->getType(), /*AllowUnknown=*/true); // Don't handle non-simple values in FastISel. @@ -346,8 +310,8 @@ Register FastISel::materializeConstant(const Value *V, MVT VT) { Register IntegerReg = getRegForValue(ConstantInt::get(V->getContext(), SIntVal)); if (IntegerReg) - Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg, - /*Op0IsKill=*/false); + Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, + IntegerReg); } } } else if (const auto *Op = dyn_cast(V)) { @@ -419,27 +383,22 @@ void FastISel::updateValueMap(const Value *I, Register Reg, unsigned NumRegs) { } } -std::pair FastISel::getRegForGEPIndex(const Value *Idx) { +Register FastISel::getRegForGEPIndex(const Value *Idx) { Register IdxN = getRegForValue(Idx); if (!IdxN) // Unhandled operand. Halt "fast" selection and bail. - return std::pair(Register(), false); - - bool IdxNIsKill = hasTrivialKill(Idx); + return Register(); // If the index is smaller or larger than intptr_t, truncate or extend it. MVT PtrVT = TLI.getPointerTy(DL); EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false); if (IdxVT.bitsLT(PtrVT)) { - IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN, - IdxNIsKill); - IdxNIsKill = true; + IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN); } else if (IdxVT.bitsGT(PtrVT)) { IdxN = - fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN, IdxNIsKill); - IdxNIsKill = true; + fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN); } - return std::pair(IdxN, IdxNIsKill); + return IdxN; } void FastISel::recomputeInsertPt() { @@ -517,11 +476,10 @@ bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) { Register Op1 = getRegForValue(I->getOperand(1)); if (!Op1) return false; - bool Op1IsKill = hasTrivialKill(I->getOperand(1)); Register ResultReg = - fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, Op1IsKill, - CI->getZExtValue(), VT.getSimpleVT()); + fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, CI->getZExtValue(), + VT.getSimpleVT()); if (!ResultReg) return false; @@ -533,7 +491,6 @@ bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) { Register Op0 = getRegForValue(I->getOperand(0)); if (!Op0) // Unhandled operand. Halt "fast" selection and bail. return false; - bool Op0IsKill = hasTrivialKill(I->getOperand(0)); // Check if the second operand is a constant and handle it appropriately. if (const auto *CI = dyn_cast(I->getOperand(1))) { @@ -553,8 +510,8 @@ bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) { ISDOpcode = ISD::AND; } - Register ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0, - Op0IsKill, Imm, VT.getSimpleVT()); + Register ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0, Imm, + VT.getSimpleVT()); if (!ResultReg) return false; @@ -566,11 +523,10 @@ bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) { Register Op1 = getRegForValue(I->getOperand(1)); if (!Op1) // Unhandled operand. Halt "fast" selection and bail. return false; - bool Op1IsKill = hasTrivialKill(I->getOperand(1)); // Now we have both operands in registers. Emit the instruction. Register ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(), - ISDOpcode, Op0, Op0IsKill, Op1, Op1IsKill); + ISDOpcode, Op0, Op1); if (!ResultReg) // Target-specific code wasn't able to find a machine opcode for // the given ISD opcode and type. Halt "fast" selection and bail. @@ -591,8 +547,6 @@ bool FastISel::selectGetElementPtr(const User *I) { if (isa(I->getType())) return false; - bool NIsKill = hasTrivialKill(I->getOperand(0)); - // Keep a running tab of the total offset to coalesce multiple N = N + Offset // into a single N = N + TotalOffset. uint64_t TotalOffs = 0; @@ -608,10 +562,9 @@ bool FastISel::selectGetElementPtr(const User *I) { // N = N + Offset TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field); if (TotalOffs >= MaxOffs) { - N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT); + N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT); if (!N) // Unhandled operand. Halt "fast" selection and bail. return false; - NIsKill = true; TotalOffs = 0; } } @@ -626,43 +579,38 @@ bool FastISel::selectGetElementPtr(const User *I) { uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue(); TotalOffs += DL.getTypeAllocSize(Ty) * IdxN; if (TotalOffs >= MaxOffs) { - N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT); + N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT); if (!N) // Unhandled operand. Halt "fast" selection and bail. return false; - NIsKill = true; TotalOffs = 0; } continue; } if (TotalOffs) { - N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT); + N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT); if (!N) // Unhandled operand. Halt "fast" selection and bail. return false; - NIsKill = true; TotalOffs = 0; } // N = N + Idx * ElementSize; uint64_t ElementSize = DL.getTypeAllocSize(Ty); - std::pair Pair = getRegForGEPIndex(Idx); - Register IdxN = Pair.first; - bool IdxNIsKill = Pair.second; + Register IdxN = getRegForGEPIndex(Idx); if (!IdxN) // Unhandled operand. Halt "fast" selection and bail. return false; if (ElementSize != 1) { - IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT); + IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, ElementSize, VT); if (!IdxN) // Unhandled operand. Halt "fast" selection and bail. return false; - IdxNIsKill = true; } - N = fastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill); + N = fastEmit_rr(VT, VT, ISD::ADD, N, IdxN); if (!N) // Unhandled operand. Halt "fast" selection and bail. return false; } } if (TotalOffs) { - N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT); + N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT); if (!N) // Unhandled operand. Halt "fast" selection and bail. return false; } @@ -1425,10 +1373,8 @@ bool FastISel::selectCast(const User *I, unsigned Opcode) { // Unhandled operand. Halt "fast" selection and bail. return false; - bool InputRegIsKill = hasTrivialKill(I->getOperand(0)); - Register ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), - Opcode, InputReg, InputRegIsKill); + Opcode, InputReg); if (!ResultReg) return false; @@ -1459,7 +1405,6 @@ bool FastISel::selectBitCast(const User *I) { Register Op0 = getRegForValue(I->getOperand(0)); if (!Op0) // Unhandled operand. Halt "fast" selection and bail. return false; - bool Op0IsKill = hasTrivialKill(I->getOperand(0)); // First, try to perform the bitcast by inserting a reg-reg copy. Register ResultReg; @@ -1476,7 +1421,7 @@ bool FastISel::selectBitCast(const User *I) { // If the reg-reg copy failed, select a BITCAST opcode. if (!ResultReg) - ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0, Op0IsKill); + ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0); if (!ResultReg) return false; @@ -1652,12 +1597,11 @@ bool FastISel::selectFNeg(const User *I, const Value *In) { Register OpReg = getRegForValue(In); if (!OpReg) return false; - bool OpRegIsKill = hasTrivialKill(In); // If the target has ISD::FNEG, use it. EVT VT = TLI.getValueType(DL, I->getType()); Register ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG, - OpReg, OpRegIsKill); + OpReg); if (ResultReg) { updateValueMap(I, ResultReg); return true; @@ -1672,18 +1616,18 @@ bool FastISel::selectFNeg(const User *I, const Value *In) { return false; Register IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(), - ISD::BITCAST, OpReg, OpRegIsKill); + ISD::BITCAST, OpReg); if (!IntReg) return false; Register IntResultReg = fastEmit_ri_( - IntVT.getSimpleVT(), ISD::XOR, IntReg, /*Op0IsKill=*/true, + IntVT.getSimpleVT(), ISD::XOR, IntReg, UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT()); if (!IntResultReg) return false; ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST, - IntResultReg, /*Op0IsKill=*/true); + IntResultReg); if (!ResultReg) return false; @@ -1883,14 +1827,12 @@ bool FastISel::fastLowerIntrinsicCall(const IntrinsicInst * /*II*/) { unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; } -unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/, - bool /*Op0IsKill*/) { +unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/) { return 0; } unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/, - bool /*Op0IsKill*/, unsigned /*Op1*/, - bool /*Op1IsKill*/) { + unsigned /*Op1*/) { return 0; } @@ -1904,7 +1846,7 @@ unsigned FastISel::fastEmit_f(MVT, MVT, unsigned, } unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/, - bool /*Op0IsKill*/, uint64_t /*Imm*/) { + uint64_t /*Imm*/) { return 0; } @@ -1913,7 +1855,7 @@ unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/, /// If that fails, it materializes the immediate into a register and try /// fastEmit_rr instead. Register FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, - bool Op0IsKill, uint64_t Imm, MVT ImmType) { + uint64_t Imm, MVT ImmType) { // If this is a multiply by a power of two, emit this as a shift left. if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) { Opcode = ISD::SHL; @@ -1931,11 +1873,10 @@ Register FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, return 0; // First check if immediate type is legal. If not, we can't use the ri form. - Register ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm); + Register ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Imm); if (ResultReg) return ResultReg; Register MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm); - bool IsImmKill = true; if (!MaterialReg) { // This is a bit ugly/slow, but failing here means falling out of // fast-isel, which would be very slow. @@ -1944,15 +1885,8 @@ Register FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm)); if (!MaterialReg) return 0; - // FIXME: If the materialized register here has no uses yet then this - // will be the first use and we should be able to mark it as killed. - // However, the local value area for materialising constant expressions - // grows down, not up, which means that any constant expressions we generate - // later which also use 'Imm' could be after this instruction and therefore - // after this kill. - IsImmKill = false; } - return fastEmit_rr(VT, VT, Opcode, Op0, Op0IsKill, MaterialReg, IsImmKill); + return fastEmit_rr(VT, VT, Opcode, Op0, MaterialReg); } Register FastISel::createResultReg(const TargetRegisterClass *RC) { @@ -1986,8 +1920,7 @@ Register FastISel::fastEmitInst_(unsigned MachineInstOpcode, } Register FastISel::fastEmitInst_r(unsigned MachineInstOpcode, - const TargetRegisterClass *RC, unsigned Op0, - bool Op0IsKill) { + const TargetRegisterClass *RC, unsigned Op0) { const MCInstrDesc &II = TII.get(MachineInstOpcode); Register ResultReg = createResultReg(RC); @@ -1995,10 +1928,10 @@ Register FastISel::fastEmitInst_r(unsigned MachineInstOpcode, if (II.getNumDefs() >= 1) BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) - .addReg(Op0, getKillRegState(Op0IsKill)); + .addReg(Op0); else { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) - .addReg(Op0, getKillRegState(Op0IsKill)); + .addReg(Op0); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); } @@ -2008,8 +1941,7 @@ Register FastISel::fastEmitInst_r(unsigned MachineInstOpcode, Register FastISel::fastEmitInst_rr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, - bool Op0IsKill, unsigned Op1, - bool Op1IsKill) { + unsigned Op1) { const MCInstrDesc &II = TII.get(MachineInstOpcode); Register ResultReg = createResultReg(RC); @@ -2018,12 +1950,12 @@ Register FastISel::fastEmitInst_rr(unsigned MachineInstOpcode, if (II.getNumDefs() >= 1) BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) - .addReg(Op0, getKillRegState(Op0IsKill)) - .addReg(Op1, getKillRegState(Op1IsKill)); + .addReg(Op0) + .addReg(Op1); else { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) - .addReg(Op0, getKillRegState(Op0IsKill)) - .addReg(Op1, getKillRegState(Op1IsKill)); + .addReg(Op0) + .addReg(Op1); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); } @@ -2032,9 +1964,7 @@ Register FastISel::fastEmitInst_rr(unsigned MachineInstOpcode, Register FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, - bool Op0IsKill, unsigned Op1, - bool Op1IsKill, unsigned Op2, - bool Op2IsKill) { + unsigned Op1, unsigned Op2) { const MCInstrDesc &II = TII.get(MachineInstOpcode); Register ResultReg = createResultReg(RC); @@ -2044,14 +1974,14 @@ Register FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode, if (II.getNumDefs() >= 1) BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) - .addReg(Op0, getKillRegState(Op0IsKill)) - .addReg(Op1, getKillRegState(Op1IsKill)) - .addReg(Op2, getKillRegState(Op2IsKill)); + .addReg(Op0) + .addReg(Op1) + .addReg(Op2); else { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) - .addReg(Op0, getKillRegState(Op0IsKill)) - .addReg(Op1, getKillRegState(Op1IsKill)) - .addReg(Op2, getKillRegState(Op2IsKill)); + .addReg(Op0) + .addReg(Op1) + .addReg(Op2); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); } @@ -2060,7 +1990,7 @@ Register FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode, Register FastISel::fastEmitInst_ri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, - bool Op0IsKill, uint64_t Imm) { + uint64_t Imm) { const MCInstrDesc &II = TII.get(MachineInstOpcode); Register ResultReg = createResultReg(RC); @@ -2068,11 +1998,11 @@ Register FastISel::fastEmitInst_ri(unsigned MachineInstOpcode, if (II.getNumDefs() >= 1) BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) - .addReg(Op0, getKillRegState(Op0IsKill)) + .addReg(Op0) .addImm(Imm); else { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) - .addReg(Op0, getKillRegState(Op0IsKill)) + .addReg(Op0) .addImm(Imm); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); @@ -2082,8 +2012,7 @@ Register FastISel::fastEmitInst_ri(unsigned MachineInstOpcode, Register FastISel::fastEmitInst_rii(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, - bool Op0IsKill, uint64_t Imm1, - uint64_t Imm2) { + uint64_t Imm1, uint64_t Imm2) { const MCInstrDesc &II = TII.get(MachineInstOpcode); Register ResultReg = createResultReg(RC); @@ -2091,12 +2020,12 @@ Register FastISel::fastEmitInst_rii(unsigned MachineInstOpcode, if (II.getNumDefs() >= 1) BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) - .addReg(Op0, getKillRegState(Op0IsKill)) + .addReg(Op0) .addImm(Imm1) .addImm(Imm2); else { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) - .addReg(Op0, getKillRegState(Op0IsKill)) + .addReg(Op0) .addImm(Imm1) .addImm(Imm2); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, @@ -2126,8 +2055,7 @@ Register FastISel::fastEmitInst_f(unsigned MachineInstOpcode, Register FastISel::fastEmitInst_rri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, - bool Op0IsKill, unsigned Op1, - bool Op1IsKill, uint64_t Imm) { + unsigned Op1, uint64_t Imm) { const MCInstrDesc &II = TII.get(MachineInstOpcode); Register ResultReg = createResultReg(RC); @@ -2136,13 +2064,13 @@ Register FastISel::fastEmitInst_rri(unsigned MachineInstOpcode, if (II.getNumDefs() >= 1) BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) - .addReg(Op0, getKillRegState(Op0IsKill)) - .addReg(Op1, getKillRegState(Op1IsKill)) + .addReg(Op0) + .addReg(Op1) .addImm(Imm); else { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) - .addReg(Op0, getKillRegState(Op0IsKill)) - .addReg(Op1, getKillRegState(Op1IsKill)) + .addReg(Op0) + .addReg(Op1) .addImm(Imm); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); @@ -2167,21 +2095,21 @@ Register FastISel::fastEmitInst_i(unsigned MachineInstOpcode, } Register FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, - bool Op0IsKill, uint32_t Idx) { + uint32_t Idx) { Register ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); assert(Register::isVirtualRegister(Op0) && "Cannot yet extract from physregs"); const TargetRegisterClass *RC = MRI.getRegClass(Op0); MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx)); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), - ResultReg).addReg(Op0, getKillRegState(Op0IsKill), Idx); + ResultReg).addReg(Op0, 0, Idx); return ResultReg; } /// Emit MachineInstrs to compute the value of Op with all but the least /// significant bit set to zero. -Register FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) { - return fastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1); +Register FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0) { + return fastEmit_ri(VT, VT, ISD::AND, Op0, 1); } /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks. diff --git a/lib/Target/AArch64/AArch64FastISel.cpp b/lib/Target/AArch64/AArch64FastISel.cpp index ecc68ccda03..393b7d8e5af 100644 --- a/lib/Target/AArch64/AArch64FastISel.cpp +++ b/lib/Target/AArch64/AArch64FastISel.cpp @@ -195,34 +195,32 @@ private: const Value *Cond); bool optimizeIntExtLoad(const Instruction *I, MVT RetVT, MVT SrcVT); bool optimizeSelect(const SelectInst *SI); - std::pair getRegForGEPIndex(const Value *Idx); + unsigned getRegForGEPIndex(const Value *Idx); // Emit helper routines. unsigned emitAddSub(bool UseAdd, MVT RetVT, const Value *LHS, const Value *RHS, bool SetFlags = false, bool WantResult = true, bool IsZExt = false); unsigned emitAddSub_rr(bool UseAdd, MVT RetVT, unsigned LHSReg, - bool LHSIsKill, unsigned RHSReg, bool RHSIsKill, - bool SetFlags = false, bool WantResult = true); + unsigned RHSReg, bool SetFlags = false, + bool WantResult = true); unsigned emitAddSub_ri(bool UseAdd, MVT RetVT, unsigned LHSReg, - bool LHSIsKill, uint64_t Imm, bool SetFlags = false, + uint64_t Imm, bool SetFlags = false, bool WantResult = true); unsigned emitAddSub_rs(bool UseAdd, MVT RetVT, unsigned LHSReg, - bool LHSIsKill, unsigned RHSReg, bool RHSIsKill, - AArch64_AM::ShiftExtendType ShiftType, + unsigned RHSReg, AArch64_AM::ShiftExtendType ShiftType, uint64_t ShiftImm, bool SetFlags = false, bool WantResult = true); unsigned emitAddSub_rx(bool UseAdd, MVT RetVT, unsigned LHSReg, - bool LHSIsKill, unsigned RHSReg, bool RHSIsKill, - AArch64_AM::ShiftExtendType ExtType, - uint64_t ShiftImm, bool SetFlags = false, + unsigned RHSReg, AArch64_AM::ShiftExtendType ExtType, + uint64_t ShiftImm, bool SetFlags = false, bool WantResult = true); // Emit functions. bool emitCompareAndBranch(const BranchInst *BI); bool emitCmp(const Value *LHS, const Value *RHS, bool IsZExt); bool emitICmp(MVT RetVT, const Value *LHS, const Value *RHS, bool IsZExt); - bool emitICmp_ri(MVT RetVT, unsigned LHSReg, bool LHSIsKill, uint64_t Imm); + bool emitICmp_ri(MVT RetVT, unsigned LHSReg, uint64_t Imm); bool emitFCmp(MVT RetVT, const Value *LHS, const Value *RHS); unsigned emitLoad(MVT VT, MVT ResultVT, Address Addr, bool WantZExt = true, MachineMemOperand *MMO = nullptr); @@ -235,42 +233,34 @@ private: unsigned emitAdd(MVT RetVT, const Value *LHS, const Value *RHS, bool SetFlags = false, bool WantResult = true, bool IsZExt = false); - unsigned emitAdd_ri_(MVT VT, unsigned Op0, bool Op0IsKill, int64_t Imm); + unsigned emitAdd_ri_(MVT VT, unsigned Op0, int64_t Imm); unsigned emitSub(MVT RetVT, const Value *LHS, const Value *RHS, bool SetFlags = false, bool WantResult = true, bool IsZExt = false); - unsigned emitSubs_rr(MVT RetVT, unsigned LHSReg, bool LHSIsKill, - unsigned RHSReg, bool RHSIsKill, bool WantResult = true); - unsigned emitSubs_rs(MVT RetVT, unsigned LHSReg, bool LHSIsKill, - unsigned RHSReg, bool RHSIsKill, + unsigned emitSubs_rr(MVT RetVT, unsigned LHSReg, unsigned RHSReg, + bool WantResult = true); + unsigned emitSubs_rs(MVT RetVT, unsigned LHSReg, unsigned RHSReg, AArch64_AM::ShiftExtendType ShiftType, uint64_t ShiftImm, bool WantResult = true); unsigned emitLogicalOp(unsigned ISDOpc, MVT RetVT, const Value *LHS, const Value *RHS); unsigned emitLogicalOp_ri(unsigned ISDOpc, MVT RetVT, unsigned LHSReg, - bool LHSIsKill, uint64_t Imm); + uint64_t Imm); unsigned emitLogicalOp_rs(unsigned ISDOpc, MVT RetVT, unsigned LHSReg, - bool LHSIsKill, unsigned RHSReg, bool RHSIsKill, - uint64_t ShiftImm); - unsigned emitAnd_ri(MVT RetVT, unsigned LHSReg, bool LHSIsKill, uint64_t Imm); - unsigned emitMul_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, - unsigned Op1, bool Op1IsKill); - unsigned emitSMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, - unsigned Op1, bool Op1IsKill); - unsigned emitUMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, - unsigned Op1, bool Op1IsKill); - unsigned emitLSL_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill, - unsigned Op1Reg, bool Op1IsKill); - unsigned emitLSL_ri(MVT RetVT, MVT SrcVT, unsigned Op0Reg, bool Op0IsKill, - uint64_t Imm, bool IsZExt = true); - unsigned emitLSR_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill, - unsigned Op1Reg, bool Op1IsKill); - unsigned emitLSR_ri(MVT RetVT, MVT SrcVT, unsigned Op0Reg, bool Op0IsKill, - uint64_t Imm, bool IsZExt = true); - unsigned emitASR_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill, - unsigned Op1Reg, bool Op1IsKill); - unsigned emitASR_ri(MVT RetVT, MVT SrcVT, unsigned Op0Reg, bool Op0IsKill, - uint64_t Imm, bool IsZExt = false); + unsigned RHSReg, uint64_t ShiftImm); + unsigned emitAnd_ri(MVT RetVT, unsigned LHSReg, uint64_t Imm); + unsigned emitMul_rr(MVT RetVT, unsigned Op0, unsigned Op1); + unsigned emitSMULL_rr(MVT RetVT, unsigned Op0, unsigned Op1); + unsigned emitUMULL_rr(MVT RetVT, unsigned Op0, unsigned Op1); + unsigned emitLSL_rr(MVT RetVT, unsigned Op0Reg, unsigned Op1Reg); + unsigned emitLSL_ri(MVT RetVT, MVT SrcVT, unsigned Op0Reg, uint64_t Imm, + bool IsZExt = true); + unsigned emitLSR_rr(MVT RetVT, unsigned Op0Reg, unsigned Op1Reg); + unsigned emitLSR_ri(MVT RetVT, MVT SrcVT, unsigned Op0Reg, uint64_t Imm, + bool IsZExt = true); + unsigned emitASR_rr(MVT RetVT, unsigned Op0Reg, unsigned Op1Reg); + unsigned emitASR_ri(MVT RetVT, MVT SrcVT, unsigned Op0Reg, uint64_t Imm, + bool IsZExt = false); unsigned materializeInt(const ConstantInt *CI, MVT VT); unsigned materializeFP(const ConstantFP *CFP, MVT VT); @@ -554,7 +544,7 @@ unsigned AArch64FastISel::fastMaterializeFloatZero(const ConstantFP* CFP) { bool Is64Bit = (VT == MVT::f64); unsigned ZReg = Is64Bit ? AArch64::XZR : AArch64::WZR; unsigned Opc = Is64Bit ? AArch64::FMOVXDr : AArch64::FMOVWSr; - return fastEmitInst_r(Opc, TLI.getRegClassFor(VT), ZReg, /*IsKill=*/true); + return fastEmitInst_r(Opc, TLI.getRegClassFor(VT), ZReg); } /// Check if the multiply is by a power-of-2 constant. @@ -764,9 +754,7 @@ bool AArch64FastISel::computeAddress(const Value *Obj, Address &Addr, Type *Ty) unsigned Reg = getRegForValue(LHS); if (!Reg) return false; - bool RegIsKill = hasTrivialKill(LHS); - Reg = fastEmitInst_extractsubreg(MVT::i32, Reg, RegIsKill, - AArch64::sub_32); + Reg = fastEmitInst_extractsubreg(MVT::i32, Reg, AArch64::sub_32); Addr.setOffsetReg(Reg); return true; } @@ -862,9 +850,7 @@ bool AArch64FastISel::computeAddress(const Value *Obj, Address &Addr, Type *Ty) unsigned Reg = getRegForValue(LHS); if (!Reg) return false; - bool RegIsKill = hasTrivialKill(LHS); - Reg = fastEmitInst_extractsubreg(MVT::i32, Reg, RegIsKill, - AArch64::sub_32); + Reg = fastEmitInst_extractsubreg(MVT::i32, Reg, AArch64::sub_32); Addr.setOffsetReg(Reg); return true; } @@ -1064,26 +1050,22 @@ bool AArch64FastISel::simplifyAddress(Address &Addr, MVT VT) { if (Addr.getExtendType() == AArch64_AM::SXTW || Addr.getExtendType() == AArch64_AM::UXTW ) ResultReg = emitAddSub_rx(/*UseAdd=*/true, MVT::i64, Addr.getReg(), - /*TODO:IsKill=*/false, Addr.getOffsetReg(), - /*TODO:IsKill=*/false, Addr.getExtendType(), + Addr.getOffsetReg(), Addr.getExtendType(), Addr.getShift()); else ResultReg = emitAddSub_rs(/*UseAdd=*/true, MVT::i64, Addr.getReg(), - /*TODO:IsKill=*/false, Addr.getOffsetReg(), - /*TODO:IsKill=*/false, AArch64_AM::LSL, + Addr.getOffsetReg(), AArch64_AM::LSL, Addr.getShift()); } else { if (Addr.getExtendType() == AArch64_AM::UXTW) ResultReg = emitLSL_ri(MVT::i64, MVT::i32, Addr.getOffsetReg(), - /*Op0IsKill=*/false, Addr.getShift(), - /*IsZExt=*/true); + Addr.getShift(), /*IsZExt=*/true); else if (Addr.getExtendType() == AArch64_AM::SXTW) ResultReg = emitLSL_ri(MVT::i64, MVT::i32, Addr.getOffsetReg(), - /*Op0IsKill=*/false, Addr.getShift(), - /*IsZExt=*/false); + Addr.getShift(), /*IsZExt=*/false); else ResultReg = emitLSL_ri(MVT::i64, MVT::i64, Addr.getOffsetReg(), - /*Op0IsKill=*/false, Addr.getShift()); + Addr.getShift()); } if (!ResultReg) return false; @@ -1100,7 +1082,7 @@ bool AArch64FastISel::simplifyAddress(Address &Addr, MVT VT) { unsigned ResultReg; if (Addr.getReg()) // Try to fold the immediate into the add instruction. - ResultReg = emitAdd_ri_(MVT::i64, Addr.getReg(), /*IsKill=*/false, Offset); + ResultReg = emitAdd_ri_(MVT::i64, Addr.getReg(), Offset); else ResultReg = fastEmit_i(MVT::i64, MVT::i64, ISD::Constant, Offset); @@ -1199,7 +1181,6 @@ unsigned AArch64FastISel::emitAddSub(bool UseAdd, MVT RetVT, const Value *LHS, unsigned LHSReg = getRegForValue(LHS); if (!LHSReg) return 0; - bool LHSIsKill = hasTrivialKill(LHS); if (NeedExtend) LHSReg = emitIntExt(SrcVT, LHSReg, RetVT, IsZExt); @@ -1208,15 +1189,14 @@ unsigned AArch64FastISel::emitAddSub(bool UseAdd, MVT RetVT, const Value *LHS, if (const auto *C = dyn_cast(RHS)) { uint64_t Imm = IsZExt ? C->getZExtValue() : C->getSExtValue(); if (C->isNegative()) - ResultReg = emitAddSub_ri(!UseAdd, RetVT, LHSReg, LHSIsKill, -Imm, - SetFlags, WantResult); + ResultReg = emitAddSub_ri(!UseAdd, RetVT, LHSReg, -Imm, SetFlags, + WantResult); else - ResultReg = emitAddSub_ri(UseAdd, RetVT, LHSReg, LHSIsKill, Imm, SetFlags, + ResultReg = emitAddSub_ri(UseAdd, RetVT, LHSReg, Imm, SetFlags, WantResult); } else if (const auto *C = dyn_cast(RHS)) if (C->isNullValue()) - ResultReg = emitAddSub_ri(UseAdd, RetVT, LHSReg, LHSIsKill, 0, SetFlags, - WantResult); + ResultReg = emitAddSub_ri(UseAdd, RetVT, LHSReg, 0, SetFlags, WantResult); if (ResultReg) return ResultReg; @@ -1230,17 +1210,14 @@ unsigned AArch64FastISel::emitAddSub(bool UseAdd, MVT RetVT, const Value *LHS, unsigned RHSReg = getRegForValue(SI->getOperand(0)); if (!RHSReg) return 0; - bool RHSIsKill = hasTrivialKill(SI->getOperand(0)); - return emitAddSub_rx(UseAdd, RetVT, LHSReg, LHSIsKill, RHSReg, - RHSIsKill, ExtendType, C->getZExtValue(), - SetFlags, WantResult); + return emitAddSub_rx(UseAdd, RetVT, LHSReg, RHSReg, ExtendType, + C->getZExtValue(), SetFlags, WantResult); } unsigned RHSReg = getRegForValue(RHS); if (!RHSReg) return 0; - bool RHSIsKill = hasTrivialKill(RHS); - return emitAddSub_rx(UseAdd, RetVT, LHSReg, LHSIsKill, RHSReg, RHSIsKill, - ExtendType, 0, SetFlags, WantResult); + return emitAddSub_rx(UseAdd, RetVT, LHSReg, RHSReg, ExtendType, 0, + SetFlags, WantResult); } // Check if the mul can be folded into the instruction. @@ -1258,10 +1235,8 @@ unsigned AArch64FastISel::emitAddSub(bool UseAdd, MVT RetVT, const Value *LHS, unsigned RHSReg = getRegForValue(MulLHS); if (!RHSReg) return 0; - bool RHSIsKill = hasTrivialKill(MulLHS); - ResultReg = emitAddSub_rs(UseAdd, RetVT, LHSReg, LHSIsKill, RHSReg, - RHSIsKill, AArch64_AM::LSL, ShiftVal, SetFlags, - WantResult); + ResultReg = emitAddSub_rs(UseAdd, RetVT, LHSReg, RHSReg, AArch64_AM::LSL, + ShiftVal, SetFlags, WantResult); if (ResultReg) return ResultReg; } @@ -1283,10 +1258,8 @@ unsigned AArch64FastISel::emitAddSub(bool UseAdd, MVT RetVT, const Value *LHS, unsigned RHSReg = getRegForValue(SI->getOperand(0)); if (!RHSReg) return 0; - bool RHSIsKill = hasTrivialKill(SI->getOperand(0)); - ResultReg = emitAddSub_rs(UseAdd, RetVT, LHSReg, LHSIsKill, RHSReg, - RHSIsKill, ShiftType, ShiftVal, SetFlags, - WantResult); + ResultReg = emitAddSub_rs(UseAdd, RetVT, LHSReg, RHSReg, ShiftType, + ShiftVal, SetFlags, WantResult); if (ResultReg) return ResultReg; } @@ -1297,18 +1270,15 @@ unsigned AArch64FastISel::emitAddSub(bool UseAdd, MVT RetVT, const Value *LHS, unsigned RHSReg = getRegForValue(RHS); if (!RHSReg) return 0; - bool RHSIsKill = hasTrivialKill(RHS); if (NeedExtend) RHSReg = emitIntExt(SrcVT, RHSReg, RetVT, IsZExt); - return emitAddSub_rr(UseAdd, RetVT, LHSReg, LHSIsKill, RHSReg, RHSIsKill, - SetFlags, WantResult); + return emitAddSub_rr(UseAdd, RetVT, LHSReg, RHSReg, SetFlags, WantResult); } unsigned AArch64FastISel::emitAddSub_rr(bool UseAdd, MVT RetVT, unsigned LHSReg, - bool LHSIsKill, unsigned RHSReg, - bool RHSIsKill, bool SetFlags, + unsigned RHSReg, bool SetFlags, bool WantResult) { assert(LHSReg && RHSReg && "Invalid register number."); @@ -1339,14 +1309,14 @@ unsigned AArch64FastISel::emitAddSub_rr(bool UseAdd, MVT RetVT, unsigned LHSReg, LHSReg = constrainOperandRegClass(II, LHSReg, II.getNumDefs()); RHSReg = constrainOperandRegClass(II, RHSReg, II.getNumDefs() + 1); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) - .addReg(LHSReg, getKillRegState(LHSIsKill)) - .addReg(RHSReg, getKillRegState(RHSIsKill)); + .addReg(LHSReg) + .addReg(RHSReg); return ResultReg; } unsigned AArch64FastISel::emitAddSub_ri(bool UseAdd, MVT RetVT, unsigned LHSReg, - bool LHSIsKill, uint64_t Imm, - bool SetFlags, bool WantResult) { + uint64_t Imm, bool SetFlags, + bool WantResult) { assert(LHSReg && "Invalid register number."); if (RetVT != MVT::i32 && RetVT != MVT::i64) @@ -1383,15 +1353,14 @@ unsigned AArch64FastISel::emitAddSub_ri(bool UseAdd, MVT RetVT, unsigned LHSReg, const MCInstrDesc &II = TII.get(Opc); LHSReg = constrainOperandRegClass(II, LHSReg, II.getNumDefs()); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) - .addReg(LHSReg, getKillRegState(LHSIsKill)) + .addReg(LHSReg) .addImm(Imm) .addImm(getShifterImm(AArch64_AM::LSL, ShiftImm)); return ResultReg; } unsigned AArch64FastISel::emitAddSub_rs(bool UseAdd, MVT RetVT, unsigned LHSReg, - bool LHSIsKill, unsigned RHSReg, - bool RHSIsKill, + unsigned RHSReg, AArch64_AM::ShiftExtendType ShiftType, uint64_t ShiftImm, bool SetFlags, bool WantResult) { @@ -1426,15 +1395,14 @@ unsigned AArch64FastISel::emitAddSub_rs(bool UseAdd, MVT RetVT, unsigned LHSReg, LHSReg = constrainOperandRegClass(II, LHSReg, II.getNumDefs()); RHSReg = constrainOperandRegClass(II, RHSReg, II.getNumDefs() + 1); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) - .addReg(LHSReg, getKillRegState(LHSIsKill)) - .addReg(RHSReg, getKillRegState(RHSIsKill)) + .addReg(LHSReg) + .addReg(RHSReg) .addImm(getShifterImm(ShiftType, ShiftImm)); return ResultReg; } unsigned AArch64FastISel::emitAddSub_rx(bool UseAdd, MVT RetVT, unsigned LHSReg, - bool LHSIsKill, unsigned RHSReg, - bool RHSIsKill, + unsigned RHSReg, AArch64_AM::ShiftExtendType ExtType, uint64_t ShiftImm, bool SetFlags, bool WantResult) { @@ -1471,8 +1439,8 @@ unsigned AArch64FastISel::emitAddSub_rx(bool UseAdd, MVT RetVT, unsigned LHSReg, LHSReg = constrainOperandRegClass(II, LHSReg, II.getNumDefs()); RHSReg = constrainOperandRegClass(II, RHSReg, II.getNumDefs() + 1); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) - .addReg(LHSReg, getKillRegState(LHSIsKill)) - .addReg(RHSReg, getKillRegState(RHSIsKill)) + .addReg(LHSReg) + .addReg(RHSReg) .addImm(getArithExtendImm(ExtType, ShiftImm)); return ResultReg; } @@ -1505,9 +1473,8 @@ bool AArch64FastISel::emitICmp(MVT RetVT, const Value *LHS, const Value *RHS, IsZExt) != 0; } -bool AArch64FastISel::emitICmp_ri(MVT RetVT, unsigned LHSReg, bool LHSIsKill, - uint64_t Imm) { - return emitAddSub_ri(/*UseAdd=*/false, RetVT, LHSReg, LHSIsKill, Imm, +bool AArch64FastISel::emitICmp_ri(MVT RetVT, unsigned LHSReg, uint64_t Imm) { + return emitAddSub_ri(/*UseAdd=*/false, RetVT, LHSReg, Imm, /*SetFlags=*/true, /*WantResult=*/false) != 0; } @@ -1525,24 +1492,22 @@ bool AArch64FastISel::emitFCmp(MVT RetVT, const Value *LHS, const Value *RHS) { unsigned LHSReg = getRegForValue(LHS); if (!LHSReg) return false; - bool LHSIsKill = hasTrivialKill(LHS); if (UseImm) { unsigned Opc = (RetVT == MVT::f64) ? AArch64::FCMPDri : AArch64::FCMPSri; BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)) - .addReg(LHSReg, getKillRegState(LHSIsKill)); + .addReg(LHSReg); return true; } unsigned RHSReg = getRegForValue(RHS); if (!RHSReg) return false; - bool RHSIsKill = hasTrivialKill(RHS); unsigned Opc = (RetVT == MVT::f64) ? AArch64::FCMPDrr : AArch64::FCMPSrr; BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)) - .addReg(LHSReg, getKillRegState(LHSIsKill)) - .addReg(RHSReg, getKillRegState(RHSIsKill)); + .addReg(LHSReg) + .addReg(RHSReg); return true; } @@ -1557,13 +1522,12 @@ unsigned AArch64FastISel::emitAdd(MVT RetVT, const Value *LHS, const Value *RHS, /// First try to emit an add with an immediate operand using emitAddSub_ri. If /// that fails, then try to materialize the immediate into a register and use /// emitAddSub_rr instead. -unsigned AArch64FastISel::emitAdd_ri_(MVT VT, unsigned Op0, bool Op0IsKill, - int64_t Imm) { +unsigned AArch64FastISel::emitAdd_ri_(MVT VT, unsigned Op0, int64_t Imm) { unsigned ResultReg; if (Imm < 0) - ResultReg = emitAddSub_ri(false, VT, Op0, Op0IsKill, -Imm); + ResultReg = emitAddSub_ri(false, VT, Op0, -Imm); else - ResultReg = emitAddSub_ri(true, VT, Op0, Op0IsKill, Imm); + ResultReg = emitAddSub_ri(true, VT, Op0, Imm); if (ResultReg) return ResultReg; @@ -1572,7 +1536,7 @@ unsigned AArch64FastISel::emitAdd_ri_(MVT VT, unsigned Op0, bool Op0IsKill, if (!CReg) return 0; - ResultReg = emitAddSub_rr(true, VT, Op0, Op0IsKill, CReg, true); + ResultReg = emitAddSub_rr(true, VT, Op0, CReg); return ResultReg; } @@ -1583,20 +1547,17 @@ unsigned AArch64FastISel::emitSub(MVT RetVT, const Value *LHS, const Value *RHS, } unsigned AArch64FastISel::emitSubs_rr(MVT RetVT, unsigned LHSReg, - bool LHSIsKill, unsigned RHSReg, - bool RHSIsKill, bool WantResult) { - return emitAddSub_rr(/*UseAdd=*/false, RetVT, LHSReg, LHSIsKill, RHSReg, - RHSIsKill, /*SetFlags=*/true, WantResult); + unsigned RHSReg, bool WantResult) { + return emitAddSub_rr(/*UseAdd=*/false, RetVT, LHSReg, RHSReg, + /*SetFlags=*/true, WantResult); } unsigned AArch64FastISel::emitSubs_rs(MVT RetVT, unsigned LHSReg, - bool LHSIsKill, unsigned RHSReg, - bool RHSIsKill, + unsigned RHSReg, AArch64_AM::ShiftExtendType ShiftType, uint64_t ShiftImm, bool WantResult) { - return emitAddSub_rs(/*UseAdd=*/false, RetVT, LHSReg, LHSIsKill, RHSReg, - RHSIsKill, ShiftType, ShiftImm, /*SetFlags=*/true, - WantResult); + return emitAddSub_rs(/*UseAdd=*/false, RetVT, LHSReg, RHSReg, ShiftType, + ShiftImm, /*SetFlags=*/true, WantResult); } unsigned AArch64FastISel::emitLogicalOp(unsigned ISDOpc, MVT RetVT, @@ -1619,12 +1580,11 @@ unsigned AArch64FastISel::emitLogicalOp(unsigned ISDOpc, MVT RetVT, unsigned LHSReg = getRegForValue(LHS); if (!LHSReg) return 0; - bool LHSIsKill = hasTrivialKill(LHS); unsigned ResultReg = 0; if (const auto *C = dyn_cast(RHS)) { uint64_t Imm = C->getZExtValue(); - ResultReg = emitLogicalOp_ri(ISDOpc, RetVT, LHSReg, LHSIsKill, Imm); + ResultReg = emitLogicalOp_ri(ISDOpc, RetVT, LHSReg, Imm); } if (ResultReg) return ResultReg; @@ -1645,9 +1605,7 @@ unsigned AArch64FastISel::emitLogicalOp(unsigned ISDOpc, MVT RetVT, unsigned RHSReg = getRegForValue(MulLHS); if (!RHSReg) return 0; - bool RHSIsKill = hasTrivialKill(MulLHS); - ResultReg = emitLogicalOp_rs(ISDOpc, RetVT, LHSReg, LHSIsKill, RHSReg, - RHSIsKill, ShiftVal); + ResultReg = emitLogicalOp_rs(ISDOpc, RetVT, LHSReg, RHSReg, ShiftVal); if (ResultReg) return ResultReg; } @@ -1661,9 +1619,7 @@ unsigned AArch64FastISel::emitLogicalOp(unsigned ISDOpc, MVT RetVT, unsigned RHSReg = getRegForValue(SI->getOperand(0)); if (!RHSReg) return 0; - bool RHSIsKill = hasTrivialKill(SI->getOperand(0)); - ResultReg = emitLogicalOp_rs(ISDOpc, RetVT, LHSReg, LHSIsKill, RHSReg, - RHSIsKill, ShiftVal); + ResultReg = emitLogicalOp_rs(ISDOpc, RetVT, LHSReg, RHSReg, ShiftVal); if (ResultReg) return ResultReg; } @@ -1672,20 +1628,18 @@ unsigned AArch64FastISel::emitLogicalOp(unsigned ISDOpc, MVT RetVT, unsigned RHSReg = getRegForValue(RHS); if (!RHSReg) return 0; - bool RHSIsKill = hasTrivialKill(RHS); MVT VT = std::max(MVT::i32, RetVT.SimpleTy); - ResultReg = fastEmit_rr(VT, VT, ISDOpc, LHSReg, LHSIsKill, RHSReg, RHSIsKill); + ResultReg = fastEmit_rr(VT, VT, ISDOpc, LHSReg, RHSReg); if (RetVT >= MVT::i8 && RetVT <= MVT::i16) { uint64_t Mask = (RetVT == MVT::i8) ? 0xff : 0xffff; - ResultReg = emitAnd_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask); + ResultReg = emitAnd_ri(MVT::i32, ResultReg, Mask); } return ResultReg; } unsigned AArch64FastISel::emitLogicalOp_ri(unsigned ISDOpc, MVT RetVT, - unsigned LHSReg, bool LHSIsKill, - uint64_t Imm) { + unsigned LHSReg, uint64_t Imm) { static_assert((ISD::AND + 1 == ISD::OR) && (ISD::AND + 2 == ISD::XOR), "ISD nodes are not consecutive!"); static const unsigned OpcTable[3][2] = { @@ -1720,18 +1674,17 @@ unsigned AArch64FastISel::emitLogicalOp_ri(unsigned ISDOpc, MVT RetVT, return 0; unsigned ResultReg = - fastEmitInst_ri(Opc, RC, LHSReg, LHSIsKill, + fastEmitInst_ri(Opc, RC, LHSReg, AArch64_AM::encodeLogicalImmediate(Imm, RegSize)); if (RetVT >= MVT::i8 && RetVT <= MVT::i16 && ISDOpc != ISD::AND) { uint64_t Mask = (RetVT == MVT::i8) ? 0xff : 0xffff; - ResultReg = emitAnd_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask); + ResultReg = emitAnd_ri(MVT::i32, ResultReg, Mask); } return ResultReg; } unsigned AArch64FastISel::emitLogicalOp_rs(unsigned ISDOpc, MVT RetVT, - unsigned LHSReg, bool LHSIsKill, - unsigned RHSReg, bool RHSIsKill, + unsigned LHSReg, unsigned RHSReg, uint64_t ShiftImm) { static_assert((ISD::AND + 1 == ISD::OR) && (ISD::AND + 2 == ISD::XOR), "ISD nodes are not consecutive!"); @@ -1763,18 +1716,18 @@ unsigned AArch64FastISel::emitLogicalOp_rs(unsigned ISDOpc, MVT RetVT, break; } unsigned ResultReg = - fastEmitInst_rri(Opc, RC, LHSReg, LHSIsKill, RHSReg, RHSIsKill, + fastEmitInst_rri(Opc, RC, LHSReg, RHSReg, AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftImm)); if (RetVT >= MVT::i8 && RetVT <= MVT::i16) { uint64_t Mask = (RetVT == MVT::i8) ? 0xff : 0xffff; - ResultReg = emitAnd_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask); + ResultReg = emitAnd_ri(MVT::i32, ResultReg, Mask); } return ResultReg; } -unsigned AArch64FastISel::emitAnd_ri(MVT RetVT, unsigned LHSReg, bool LHSIsKill, +unsigned AArch64FastISel::emitAnd_ri(MVT RetVT, unsigned LHSReg, uint64_t Imm) { - return emitLogicalOp_ri(ISD::AND, RetVT, LHSReg, LHSIsKill, Imm); + return emitLogicalOp_ri(ISD::AND, RetVT, LHSReg, Imm); } unsigned AArch64FastISel::emitLoad(MVT VT, MVT RetVT, Address Addr, @@ -1895,7 +1848,7 @@ unsigned AArch64FastISel::emitLoad(MVT VT, MVT RetVT, Address Addr, // Loading an i1 requires special handling. if (VT == MVT::i1) { - unsigned ANDReg = emitAnd_ri(MVT::i32, ResultReg, /*IsKill=*/true, 1); + unsigned ANDReg = emitAnd_ri(MVT::i32, ResultReg, 1); assert(ANDReg && "Unexpected AND instruction emission failure."); ResultReg = ANDReg; } @@ -2049,7 +2002,6 @@ bool AArch64FastISel::selectLoad(const Instruction *I) { removeDeadCode(I, std::next(I)); } else ResultReg = fastEmitInst_extractsubreg(MVT::i32, ResultReg, - /*IsKill=*/true, AArch64::sub_32); } updateValueMap(I, ResultReg); @@ -2157,7 +2109,7 @@ bool AArch64FastISel::emitStore(MVT VT, unsigned SrcReg, Address Addr, // Storing an i1 requires special handling. if (VTIsi1 && SrcReg != AArch64::WZR) { - unsigned ANDReg = emitAnd_ri(MVT::i32, SrcReg, /*TODO:IsKill=*/false, 1); + unsigned ANDReg = emitAnd_ri(MVT::i32, SrcReg, 1); assert(ANDReg && "Unexpected AND instruction emission failure."); SrcReg = ANDReg; } @@ -2390,11 +2342,9 @@ bool AArch64FastISel::emitCompareAndBranch(const BranchInst *BI) { unsigned SrcReg = getRegForValue(LHS); if (!SrcReg) return false; - bool SrcIsKill = hasTrivialKill(LHS); if (BW == 64 && !Is64Bit) - SrcReg = fastEmitInst_extractsubreg(MVT::i32, SrcReg, SrcIsKill, - AArch64::sub_32); + SrcReg = fastEmitInst_extractsubreg(MVT::i32, SrcReg, AArch64::sub_32); if ((BW < 32) && !IsBitTest) SrcReg = emitIntExt(VT, SrcReg, MVT::i32, /*isZExt=*/true); @@ -2403,7 +2353,7 @@ bool AArch64FastISel::emitCompareAndBranch(const BranchInst *BI) { SrcReg = constrainOperandRegClass(II, SrcReg, II.getNumDefs()); MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)) - .addReg(SrcReg, getKillRegState(SrcIsKill)); + .addReg(SrcReg); if (IsBitTest) MIB.addImm(TestBit); MIB.addMBB(TBB); @@ -2521,7 +2471,6 @@ bool AArch64FastISel::selectBranch(const Instruction *I) { unsigned CondReg = getRegForValue(BI->getCondition()); if (CondReg == 0) return false; - bool CondRegIsKill = hasTrivialKill(BI->getCondition()); // i1 conditions come as i32 values, test the lowest bit with tb(n)z. unsigned Opcode = AArch64::TBNZW; @@ -2534,7 +2483,7 @@ bool AArch64FastISel::selectBranch(const Instruction *I) { unsigned ConstrainedCondReg = constrainOperandRegClass(II, CondReg, II.getNumDefs()); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) - .addReg(ConstrainedCondReg, getKillRegState(CondRegIsKill)) + .addReg(ConstrainedCondReg) .addImm(0) .addMBB(TBB); @@ -2684,19 +2633,16 @@ bool AArch64FastISel::optimizeSelect(const SelectInst *SI) { unsigned Src1Reg = getRegForValue(Src1Val); if (!Src1Reg) return false; - bool Src1IsKill = hasTrivialKill(Src1Val); unsigned Src2Reg = getRegForValue(Src2Val); if (!Src2Reg) return false; - bool Src2IsKill = hasTrivialKill(Src2Val); - if (NeedExtraOp) { - Src1Reg = emitLogicalOp_ri(ISD::XOR, MVT::i32, Src1Reg, Src1IsKill, 1); - Src1IsKill = true; - } + if (NeedExtraOp) + Src1Reg = emitLogicalOp_ri(ISD::XOR, MVT::i32, Src1Reg, 1); + unsigned ResultReg = fastEmitInst_rr(Opc, &AArch64::GPR32RegClass, Src1Reg, - Src1IsKill, Src2Reg, Src2IsKill); + Src2Reg); updateValueMap(SI, ResultReg); return true; } @@ -2768,9 +2714,6 @@ bool AArch64FastISel::selectSelect(const Instruction *I) { unsigned SrcReg = getRegForValue(FoldSelect); if (!SrcReg) return false; - unsigned UseReg = lookUpRegForValue(SI); - if (UseReg) - MRI.clearKillFlags(UseReg); updateValueMap(I, SrcReg); return true; @@ -2799,7 +2742,6 @@ bool AArch64FastISel::selectSelect(const Instruction *I) { unsigned CondReg = getRegForValue(Cond); if (!CondReg) return false; - bool CondIsKill = hasTrivialKill(Cond); const MCInstrDesc &II = TII.get(AArch64::ANDSWri); CondReg = constrainOperandRegClass(II, CondReg, 1); @@ -2807,26 +2749,20 @@ bool AArch64FastISel::selectSelect(const Instruction *I) { // Emit a TST instruction (ANDS wzr, reg, #imm). BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, AArch64::WZR) - .addReg(CondReg, getKillRegState(CondIsKill)) + .addReg(CondReg) .addImm(AArch64_AM::encodeLogicalImmediate(1, 32)); } unsigned Src1Reg = getRegForValue(SI->getTrueValue()); - bool Src1IsKill = hasTrivialKill(SI->getTrueValue()); - unsigned Src2Reg = getRegForValue(SI->getFalseValue()); - bool Src2IsKill = hasTrivialKill(SI->getFalseValue()); if (!Src1Reg || !Src2Reg) return false; - if (ExtraCC != AArch64CC::AL) { - Src2Reg = fastEmitInst_rri(Opc, RC, Src1Reg, Src1IsKill, Src2Reg, - Src2IsKill, ExtraCC); - Src2IsKill = true; - } - unsigned ResultReg = fastEmitInst_rri(Opc, RC, Src1Reg, Src1IsKill, Src2Reg, - Src2IsKill, CC); + if (ExtraCC != AArch64CC::AL) + Src2Reg = fastEmitInst_rri(Opc, RC, Src1Reg, Src2Reg, ExtraCC); + + unsigned ResultReg = fastEmitInst_rri(Opc, RC, Src1Reg, Src2Reg, CC); updateValueMap(I, ResultReg); return true; } @@ -2911,7 +2847,6 @@ bool AArch64FastISel::selectIntToFP(const Instruction *I, bool Signed) { unsigned SrcReg = getRegForValue(I->getOperand(0)); if (!SrcReg) return false; - bool SrcIsKill = hasTrivialKill(I->getOperand(0)); EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType(), true); @@ -2921,7 +2856,6 @@ bool AArch64FastISel::selectIntToFP(const Instruction *I, bool Signed) { emitIntExt(SrcVT.getSimpleVT(), SrcReg, MVT::i32, /*isZExt*/ !Signed); if (!SrcReg) return false; - SrcIsKill = true; } unsigned Opc; @@ -2937,8 +2871,7 @@ bool AArch64FastISel::selectIntToFP(const Instruction *I, bool Signed) { Opc = (DestVT == MVT::f32) ? AArch64::UCVTFUWSri : AArch64::UCVTFUWDri; } - unsigned ResultReg = fastEmitInst_r(Opc, TLI.getRegClassFor(DestVT), SrcReg, - SrcIsKill); + unsigned ResultReg = fastEmitInst_r(Opc, TLI.getRegClassFor(DestVT), SrcReg); updateValueMap(I, ResultReg); return true; } @@ -3491,7 +3424,7 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) { unsigned Depth = cast(II->getOperand(0))->getZExtValue(); while (Depth--) { DestReg = fastEmitInst_ri(AArch64::LDRXui, &AArch64::GPR64RegClass, - SrcReg, /*IsKill=*/true, 0); + SrcReg, 0); assert(DestReg && "Unexpected LDR instruction emission failure."); SrcReg = DestReg; } @@ -3637,10 +3570,9 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) { unsigned SrcReg = getRegForValue(II->getOperand(0)); if (!SrcReg) return false; - bool SrcRegIsKill = hasTrivialKill(II->getOperand(0)); unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) - .addReg(SrcReg, getKillRegState(SrcRegIsKill)); + .addReg(SrcReg); updateValueMap(II, ResultReg); return true; } @@ -3663,9 +3595,8 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) { unsigned Op0Reg = getRegForValue(II->getOperand(0)); if (!Op0Reg) return false; - bool Op0IsKill = hasTrivialKill(II->getOperand(0)); - unsigned ResultReg = fastEmit_r(VT, VT, ISD::FSQRT, Op0Reg, Op0IsKill); + unsigned ResultReg = fastEmit_r(VT, VT, ISD::FSQRT, Op0Reg); if (!ResultReg) return false; @@ -3742,33 +3673,26 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) { unsigned LHSReg = getRegForValue(LHS); if (!LHSReg) return false; - bool LHSIsKill = hasTrivialKill(LHS); unsigned RHSReg = getRegForValue(RHS); if (!RHSReg) return false; - bool RHSIsKill = hasTrivialKill(RHS); if (VT == MVT::i32) { - MulReg = emitSMULL_rr(MVT::i64, LHSReg, LHSIsKill, RHSReg, RHSIsKill); - unsigned ShiftReg = emitLSR_ri(MVT::i64, MVT::i64, MulReg, - /*IsKill=*/false, 32); - MulReg = fastEmitInst_extractsubreg(VT, MulReg, /*IsKill=*/true, - AArch64::sub_32); - ShiftReg = fastEmitInst_extractsubreg(VT, ShiftReg, /*IsKill=*/true, - AArch64::sub_32); - emitSubs_rs(VT, ShiftReg, /*IsKill=*/true, MulReg, /*IsKill=*/false, - AArch64_AM::ASR, 31, /*WantResult=*/false); + MulReg = emitSMULL_rr(MVT::i64, LHSReg, RHSReg); + unsigned ShiftReg = emitLSR_ri(MVT::i64, MVT::i64, MulReg, 32); + MulReg = fastEmitInst_extractsubreg(VT, MulReg, AArch64::sub_32); + ShiftReg = fastEmitInst_extractsubreg(VT, ShiftReg, AArch64::sub_32); + emitSubs_rs(VT, ShiftReg, MulReg, AArch64_AM::ASR, 31, + /*WantResult=*/false); } else { assert(VT == MVT::i64 && "Unexpected value type."); // LHSReg and RHSReg cannot be killed by this Mul, since they are // reused in the next instruction. - MulReg = emitMul_rr(VT, LHSReg, /*IsKill=*/false, RHSReg, - /*IsKill=*/false); - unsigned SMULHReg = fastEmit_rr(VT, VT, ISD::MULHS, LHSReg, LHSIsKill, - RHSReg, RHSIsKill); - emitSubs_rs(VT, SMULHReg, /*IsKill=*/true, MulReg, /*IsKill=*/false, - AArch64_AM::ASR, 63, /*WantResult=*/false); + MulReg = emitMul_rr(VT, LHSReg, RHSReg); + unsigned SMULHReg = fastEmit_rr(VT, VT, ISD::MULHS, LHSReg, RHSReg); + emitSubs_rs(VT, SMULHReg, MulReg, AArch64_AM::ASR, 63, + /*WantResult=*/false); } break; } @@ -3777,30 +3701,23 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) { unsigned LHSReg = getRegForValue(LHS); if (!LHSReg) return false; - bool LHSIsKill = hasTrivialKill(LHS); unsigned RHSReg = getRegForValue(RHS); if (!RHSReg) return false; - bool RHSIsKill = hasTrivialKill(RHS); if (VT == MVT::i32) { - MulReg = emitUMULL_rr(MVT::i64, LHSReg, LHSIsKill, RHSReg, RHSIsKill); - emitSubs_rs(MVT::i64, AArch64::XZR, /*IsKill=*/true, MulReg, - /*IsKill=*/false, AArch64_AM::LSR, 32, + MulReg = emitUMULL_rr(MVT::i64, LHSReg, RHSReg); + emitSubs_rs(MVT::i64, AArch64::XZR, MulReg, AArch64_AM::LSR, 32, /*WantResult=*/false); - MulReg = fastEmitInst_extractsubreg(VT, MulReg, /*IsKill=*/true, - AArch64::sub_32); + MulReg = fastEmitInst_extractsubreg(VT, MulReg, AArch64::sub_32); } else { assert(VT == MVT::i64 && "Unexpected value type."); // LHSReg and RHSReg cannot be killed by this Mul, since they are // reused in the next instruction. - MulReg = emitMul_rr(VT, LHSReg, /*IsKill=*/false, RHSReg, - /*IsKill=*/false); - unsigned UMULHReg = fastEmit_rr(VT, VT, ISD::MULHU, LHSReg, LHSIsKill, - RHSReg, RHSIsKill); - emitSubs_rr(VT, AArch64::XZR, /*IsKill=*/true, UMULHReg, - /*IsKill=*/false, /*WantResult=*/false); + MulReg = emitMul_rr(VT, LHSReg, RHSReg); + unsigned UMULHReg = fastEmit_rr(VT, VT, ISD::MULHU, LHSReg, RHSReg); + emitSubs_rr(VT, AArch64::XZR, UMULHReg, /*WantResult=*/false); } break; } @@ -3816,8 +3733,8 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) { return false; ResultReg2 = fastEmitInst_rri(AArch64::CSINCWr, &AArch64::GPR32RegClass, - AArch64::WZR, /*IsKill=*/true, AArch64::WZR, - /*IsKill=*/true, getInvertedCondCode(CC)); + AArch64::WZR, AArch64::WZR, + getInvertedCondCode(CC)); (void)ResultReg2; assert((ResultReg1 + 1) == ResultReg2 && "Nonconsecutive result registers."); @@ -3917,7 +3834,7 @@ bool AArch64FastISel::selectRet(const Instruction *I) { // "Callee" (i.e. value producer) zero extends pointers at function // boundary. if (Subtarget->isTargetILP32() && RV->getType()->isPointerTy()) - SrcReg = emitAnd_ri(MVT::i64, SrcReg, false, 0xffffffff); + SrcReg = emitAnd_ri(MVT::i64, SrcReg, 0xffffffff); // Make the copy. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, @@ -3959,7 +3876,6 @@ bool AArch64FastISel::selectTrunc(const Instruction *I) { unsigned SrcReg = getRegForValue(Op); if (!SrcReg) return false; - bool SrcIsKill = hasTrivialKill(Op); // If we're truncating from i64 to a smaller non-legal type then generate an // AND. Otherwise, we know the high bits are undefined and a truncate only @@ -3984,16 +3900,16 @@ bool AArch64FastISel::selectTrunc(const Instruction *I) { break; } // Issue an extract_subreg to get the lower 32-bits. - unsigned Reg32 = fastEmitInst_extractsubreg(MVT::i32, SrcReg, SrcIsKill, + unsigned Reg32 = fastEmitInst_extractsubreg(MVT::i32, SrcReg, AArch64::sub_32); // Create the AND instruction which performs the actual truncation. - ResultReg = emitAnd_ri(MVT::i32, Reg32, /*IsKill=*/true, Mask); + ResultReg = emitAnd_ri(MVT::i32, Reg32, Mask); assert(ResultReg && "Unexpected AND instruction emission failure."); } else { ResultReg = createResultReg(&AArch64::GPR32RegClass); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), ResultReg) - .addReg(SrcReg, getKillRegState(SrcIsKill)); + .addReg(SrcReg); } updateValueMap(I, ResultReg); @@ -4009,7 +3925,7 @@ unsigned AArch64FastISel::emiti1Ext(unsigned SrcReg, MVT DestVT, bool IsZExt) { DestVT = MVT::i32; if (IsZExt) { - unsigned ResultReg = emitAnd_ri(MVT::i32, SrcReg, /*TODO:IsKill=*/false, 1); + unsigned ResultReg = emitAnd_ri(MVT::i32, SrcReg, 1); assert(ResultReg && "Unexpected AND instruction emission failure."); if (DestVT == MVT::i64) { // We're ZExt i1 to i64. The ANDWri Wd, Ws, #1 implicitly clears the @@ -4029,12 +3945,11 @@ unsigned AArch64FastISel::emiti1Ext(unsigned SrcReg, MVT DestVT, bool IsZExt) { return 0; } return fastEmitInst_rii(AArch64::SBFMWri, &AArch64::GPR32RegClass, SrcReg, - /*TODO:IsKill=*/false, 0, 0); + 0, 0); } } -unsigned AArch64FastISel::emitMul_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, - unsigned Op1, bool Op1IsKill) { +unsigned AArch64FastISel::emitMul_rr(MVT RetVT, unsigned Op0, unsigned Op1) { unsigned Opc, ZReg; switch (RetVT.SimpleTy) { default: return 0; @@ -4049,32 +3964,27 @@ unsigned AArch64FastISel::emitMul_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, const TargetRegisterClass *RC = (RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass; - return fastEmitInst_rrr(Opc, RC, Op0, Op0IsKill, Op1, Op1IsKill, - /*IsKill=*/ZReg, true); + return fastEmitInst_rrr(Opc, RC, Op0, Op1, ZReg); } -unsigned AArch64FastISel::emitSMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, - unsigned Op1, bool Op1IsKill) { +unsigned AArch64FastISel::emitSMULL_rr(MVT RetVT, unsigned Op0, unsigned Op1) { if (RetVT != MVT::i64) return 0; return fastEmitInst_rrr(AArch64::SMADDLrrr, &AArch64::GPR64RegClass, - Op0, Op0IsKill, Op1, Op1IsKill, - AArch64::XZR, /*IsKill=*/true); + Op0, Op1, AArch64::XZR); } -unsigned AArch64FastISel::emitUMULL_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, - unsigned Op1, bool Op1IsKill) { +unsigned AArch64FastISel::emitUMULL_rr(MVT RetVT, unsigned Op0, unsigned Op1) { if (RetVT != MVT::i64) return 0; return fastEmitInst_rrr(AArch64::UMADDLrrr, &AArch64::GPR64RegClass, - Op0, Op0IsKill, Op1, Op1IsKill, - AArch64::XZR, /*IsKill=*/true); + Op0, Op1, AArch64::XZR); } -unsigned AArch64FastISel::emitLSL_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill, - unsigned Op1Reg, bool Op1IsKill) { +unsigned AArch64FastISel::emitLSL_rr(MVT RetVT, unsigned Op0Reg, + unsigned Op1Reg) { unsigned Opc = 0; bool NeedTrunc = false; uint64_t Mask = 0; @@ -4088,20 +3998,17 @@ unsigned AArch64FastISel::emitLSL_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill, const TargetRegisterClass *RC = (RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass; - if (NeedTrunc) { - Op1Reg = emitAnd_ri(MVT::i32, Op1Reg, Op1IsKill, Mask); - Op1IsKill = true; - } - unsigned ResultReg = fastEmitInst_rr(Opc, RC, Op0Reg, Op0IsKill, Op1Reg, - Op1IsKill); if (NeedTrunc) - ResultReg = emitAnd_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask); + Op1Reg = emitAnd_ri(MVT::i32, Op1Reg, Mask); + + unsigned ResultReg = fastEmitInst_rr(Opc, RC, Op0Reg, Op1Reg); + if (NeedTrunc) + ResultReg = emitAnd_ri(MVT::i32, ResultReg, Mask); return ResultReg; } unsigned AArch64FastISel::emitLSL_ri(MVT RetVT, MVT SrcVT, unsigned Op0, - bool Op0IsKill, uint64_t Shift, - bool IsZExt) { + uint64_t Shift, bool IsZExt) { assert(RetVT.SimpleTy >= SrcVT.SimpleTy && "Unexpected source/return type pair."); assert((SrcVT == MVT::i1 || SrcVT == MVT::i8 || SrcVT == MVT::i16 || @@ -4123,7 +4030,7 @@ unsigned AArch64FastISel::emitLSL_ri(MVT RetVT, MVT SrcVT, unsigned Op0, unsigned ResultReg = createResultReg(RC); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), ResultReg) - .addReg(Op0, getKillRegState(Op0IsKill)); + .addReg(Op0); return ResultReg; } else return emitIntExt(SrcVT, Op0, RetVT, IsZExt); @@ -4171,16 +4078,15 @@ unsigned AArch64FastISel::emitLSL_ri(MVT RetVT, MVT SrcVT, unsigned Op0, BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::SUBREG_TO_REG), TmpReg) .addImm(0) - .addReg(Op0, getKillRegState(Op0IsKill)) + .addReg(Op0) .addImm(AArch64::sub_32); Op0 = TmpReg; - Op0IsKill = true; } - return fastEmitInst_rii(Opc, RC, Op0, Op0IsKill, ImmR, ImmS); + return fastEmitInst_rii(Opc, RC, Op0, ImmR, ImmS); } -unsigned AArch64FastISel::emitLSR_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill, - unsigned Op1Reg, bool Op1IsKill) { +unsigned AArch64FastISel::emitLSR_rr(MVT RetVT, unsigned Op0Reg, + unsigned Op1Reg) { unsigned Opc = 0; bool NeedTrunc = false; uint64_t Mask = 0; @@ -4195,20 +4101,17 @@ unsigned AArch64FastISel::emitLSR_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill, const TargetRegisterClass *RC = (RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass; if (NeedTrunc) { - Op0Reg = emitAnd_ri(MVT::i32, Op0Reg, Op0IsKill, Mask); - Op1Reg = emitAnd_ri(MVT::i32, Op1Reg, Op1IsKill, Mask); - Op0IsKill = Op1IsKill = true; + Op0Reg = emitAnd_ri(MVT::i32, Op0Reg, Mask); + Op1Reg = emitAnd_ri(MVT::i32, Op1Reg, Mask); } - unsigned ResultReg = fastEmitInst_rr(Opc, RC, Op0Reg, Op0IsKill, Op1Reg, - Op1IsKill); + unsigned ResultReg = fastEmitInst_rr(Opc, RC, Op0Reg, Op1Reg); if (NeedTrunc) - ResultReg = emitAnd_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask); + ResultReg = emitAnd_ri(MVT::i32, ResultReg, Mask); return ResultReg; } unsigned AArch64FastISel::emitLSR_ri(MVT RetVT, MVT SrcVT, unsigned Op0, - bool Op0IsKill, uint64_t Shift, - bool IsZExt) { + uint64_t Shift, bool IsZExt) { assert(RetVT.SimpleTy >= SrcVT.SimpleTy && "Unexpected source/return type pair."); assert((SrcVT == MVT::i1 || SrcVT == MVT::i8 || SrcVT == MVT::i16 || @@ -4230,7 +4133,7 @@ unsigned AArch64FastISel::emitLSR_ri(MVT RetVT, MVT SrcVT, unsigned Op0, unsigned ResultReg = createResultReg(RC); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), ResultReg) - .addReg(Op0, getKillRegState(Op0IsKill)); + .addReg(Op0); return ResultReg; } else return emitIntExt(SrcVT, Op0, RetVT, IsZExt); @@ -4274,7 +4177,6 @@ unsigned AArch64FastISel::emitLSR_ri(MVT RetVT, MVT SrcVT, unsigned Op0, Op0 = emitIntExt(SrcVT, Op0, RetVT, IsZExt); if (!Op0) return 0; - Op0IsKill = true; SrcVT = RetVT; SrcBits = SrcVT.getSizeInBits(); IsZExt = true; @@ -4292,16 +4194,15 @@ unsigned AArch64FastISel::emitLSR_ri(MVT RetVT, MVT SrcVT, unsigned Op0, BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::SUBREG_TO_REG), TmpReg) .addImm(0) - .addReg(Op0, getKillRegState(Op0IsKill)) + .addReg(Op0) .addImm(AArch64::sub_32); Op0 = TmpReg; - Op0IsKill = true; } - return fastEmitInst_rii(Opc, RC, Op0, Op0IsKill, ImmR, ImmS); + return fastEmitInst_rii(Opc, RC, Op0, ImmR, ImmS); } -unsigned AArch64FastISel::emitASR_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill, - unsigned Op1Reg, bool Op1IsKill) { +unsigned AArch64FastISel::emitASR_rr(MVT RetVT, unsigned Op0Reg, + unsigned Op1Reg) { unsigned Opc = 0; bool NeedTrunc = false; uint64_t Mask = 0; @@ -4317,19 +4218,16 @@ unsigned AArch64FastISel::emitASR_rr(MVT RetVT, unsigned Op0Reg, bool Op0IsKill, (RetVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass; if (NeedTrunc) { Op0Reg = emitIntExt(RetVT, Op0Reg, MVT::i32, /*isZExt=*/false); - Op1Reg = emitAnd_ri(MVT::i32, Op1Reg, Op1IsKill, Mask); - Op0IsKill = Op1IsKill = true; + Op1Reg = emitAnd_ri(MVT::i32, Op1Reg, Mask); } - unsigned ResultReg = fastEmitInst_rr(Opc, RC, Op0Reg, Op0IsKill, Op1Reg, - Op1IsKill); + unsigned ResultReg = fastEmitInst_rr(Opc, RC, Op0Reg, Op1Reg); if (NeedTrunc) - ResultReg = emitAnd_ri(MVT::i32, ResultReg, /*IsKill=*/true, Mask); + ResultReg = emitAnd_ri(MVT::i32, ResultReg, Mask); return ResultReg; } unsigned AArch64FastISel::emitASR_ri(MVT RetVT, MVT SrcVT, unsigned Op0, - bool Op0IsKill, uint64_t Shift, - bool IsZExt) { + uint64_t Shift, bool IsZExt) { assert(RetVT.SimpleTy >= SrcVT.SimpleTy && "Unexpected source/return type pair."); assert((SrcVT == MVT::i1 || SrcVT == MVT::i8 || SrcVT == MVT::i16 || @@ -4351,7 +4249,7 @@ unsigned AArch64FastISel::emitASR_ri(MVT RetVT, MVT SrcVT, unsigned Op0, unsigned ResultReg = createResultReg(RC); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), ResultReg) - .addReg(Op0, getKillRegState(Op0IsKill)); + .addReg(Op0); return ResultReg; } else return emitIntExt(SrcVT, Op0, RetVT, IsZExt); @@ -4401,12 +4299,11 @@ unsigned AArch64FastISel::emitASR_ri(MVT RetVT, MVT SrcVT, unsigned Op0, BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::SUBREG_TO_REG), TmpReg) .addImm(0) - .addReg(Op0, getKillRegState(Op0IsKill)) + .addReg(Op0) .addImm(AArch64::sub_32); Op0 = TmpReg; - Op0IsKill = true; } - return fastEmitInst_rii(Opc, RC, Op0, Op0IsKill, ImmR, ImmS); + return fastEmitInst_rii(Opc, RC, Op0, ImmR, ImmS); } unsigned AArch64FastISel::emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, @@ -4467,7 +4364,7 @@ unsigned AArch64FastISel::emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, const TargetRegisterClass *RC = (DestVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass; - return fastEmitInst_rii(Opc, RC, SrcReg, /*TODO:IsKill=*/false, 0, Imm); + return fastEmitInst_rii(Opc, RC, SrcReg, 0, Imm); } static bool isZExtLoad(const MachineInstr *LI) { @@ -4590,7 +4487,6 @@ bool AArch64FastISel::selectIntExt(const Instruction *I) { unsigned SrcReg = getRegForValue(I->getOperand(0)); if (!SrcReg) return false; - bool SrcIsKill = hasTrivialKill(I->getOperand(0)); // Try to optimize already sign-/zero-extended values from function arguments. bool IsZExt = isa(I); @@ -4601,17 +4497,10 @@ bool AArch64FastISel::selectIntExt(const Instruction *I) { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::SUBREG_TO_REG), ResultReg) .addImm(0) - .addReg(SrcReg, getKillRegState(SrcIsKill)) + .addReg(SrcReg) .addImm(AArch64::sub_32); SrcReg = ResultReg; } - // Conservatively clear all kill flags from all uses, because we are - // replacing a sign-/zero-extend instruction at IR level with a nop at MI - // level. The result of the instruction at IR level might have been - // trivially dead, which is now not longer true. - unsigned UseReg = lookUpRegForValue(I); - if (UseReg) - MRI.clearKillFlags(UseReg); updateValueMap(I, SrcReg); return true; @@ -4651,23 +4540,18 @@ bool AArch64FastISel::selectRem(const Instruction *I, unsigned ISDOpcode) { unsigned Src0Reg = getRegForValue(I->getOperand(0)); if (!Src0Reg) return false; - bool Src0IsKill = hasTrivialKill(I->getOperand(0)); unsigned Src1Reg = getRegForValue(I->getOperand(1)); if (!Src1Reg) return false; - bool Src1IsKill = hasTrivialKill(I->getOperand(1)); const TargetRegisterClass *RC = (DestVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass; - unsigned QuotReg = fastEmitInst_rr(DivOpc, RC, Src0Reg, /*IsKill=*/false, - Src1Reg, /*IsKill=*/false); + unsigned QuotReg = fastEmitInst_rr(DivOpc, RC, Src0Reg, Src1Reg); assert(QuotReg && "Unexpected DIV instruction emission failure."); // The remainder is computed as numerator - (quotient * denominator) using the // MSUB instruction. - unsigned ResultReg = fastEmitInst_rrr(MSubOpc, RC, QuotReg, /*IsKill=*/true, - Src1Reg, Src1IsKill, Src0Reg, - Src0IsKill); + unsigned ResultReg = fastEmitInst_rrr(MSubOpc, RC, QuotReg, Src1Reg, Src0Reg); updateValueMap(I, ResultReg); return true; } @@ -4715,10 +4599,9 @@ bool AArch64FastISel::selectMul(const Instruction *I) { unsigned Src0Reg = getRegForValue(Src0); if (!Src0Reg) return false; - bool Src0IsKill = hasTrivialKill(Src0); unsigned ResultReg = - emitLSL_ri(VT, SrcVT, Src0Reg, Src0IsKill, ShiftVal, IsZExt); + emitLSL_ri(VT, SrcVT, Src0Reg, ShiftVal, IsZExt); if (ResultReg) { updateValueMap(I, ResultReg); @@ -4729,14 +4612,12 @@ bool AArch64FastISel::selectMul(const Instruction *I) { unsigned Src0Reg = getRegForValue(I->getOperand(0)); if (!Src0Reg) return false; - bool Src0IsKill = hasTrivialKill(I->getOperand(0)); unsigned Src1Reg = getRegForValue(I->getOperand(1)); if (!Src1Reg) return false; - bool Src1IsKill = hasTrivialKill(I->getOperand(1)); - unsigned ResultReg = emitMul_rr(VT, Src0Reg, Src0IsKill, Src1Reg, Src1IsKill); + unsigned ResultReg = emitMul_rr(VT, Src0Reg, Src1Reg); if (!ResultReg) return false; @@ -4782,18 +4663,17 @@ bool AArch64FastISel::selectShift(const Instruction *I) { unsigned Op0Reg = getRegForValue(Op0); if (!Op0Reg) return false; - bool Op0IsKill = hasTrivialKill(Op0); switch (I->getOpcode()) { default: llvm_unreachable("Unexpected instruction."); case Instruction::Shl: - ResultReg = emitLSL_ri(RetVT, SrcVT, Op0Reg, Op0IsKill, ShiftVal, IsZExt); + ResultReg = emitLSL_ri(RetVT, SrcVT, Op0Reg, ShiftVal, IsZExt); break; case Instruction::AShr: - ResultReg = emitASR_ri(RetVT, SrcVT, Op0Reg, Op0IsKill, ShiftVal, IsZExt); + ResultReg = emitASR_ri(RetVT, SrcVT, Op0Reg, ShiftVal, IsZExt); break; case Instruction::LShr: - ResultReg = emitLSR_ri(RetVT, SrcVT, Op0Reg, Op0IsKill, ShiftVal, IsZExt); + ResultReg = emitLSR_ri(RetVT, SrcVT, Op0Reg, ShiftVal, IsZExt); break; } if (!ResultReg) @@ -4806,24 +4686,22 @@ bool AArch64FastISel::selectShift(const Instruction *I) { unsigned Op0Reg = getRegForValue(I->getOperand(0)); if (!Op0Reg) return false; - bool Op0IsKill = hasTrivialKill(I->getOperand(0)); unsigned Op1Reg = getRegForValue(I->getOperand(1)); if (!Op1Reg) return false; - bool Op1IsKill = hasTrivialKill(I->getOperand(1)); unsigned ResultReg = 0; switch (I->getOpcode()) { default: llvm_unreachable("Unexpected instruction."); case Instruction::Shl: - ResultReg = emitLSL_rr(RetVT, Op0Reg, Op0IsKill, Op1Reg, Op1IsKill); + ResultReg = emitLSL_rr(RetVT, Op0Reg, Op1Reg); break; case Instruction::AShr: - ResultReg = emitASR_rr(RetVT, Op0Reg, Op0IsKill, Op1Reg, Op1IsKill); + ResultReg = emitASR_rr(RetVT, Op0Reg, Op1Reg); break; case Instruction::LShr: - ResultReg = emitLSR_rr(RetVT, Op0Reg, Op0IsKill, Op1Reg, Op1IsKill); + ResultReg = emitLSR_rr(RetVT, Op0Reg, Op1Reg); break; } @@ -4865,9 +4743,8 @@ bool AArch64FastISel::selectBitCast(const Instruction *I) { unsigned Op0Reg = getRegForValue(I->getOperand(0)); if (!Op0Reg) return false; - bool Op0IsKill = hasTrivialKill(I->getOperand(0)); - unsigned ResultReg = fastEmitInst_r(Opc, RC, Op0Reg, Op0IsKill); + unsigned ResultReg = fastEmitInst_r(Opc, RC, Op0Reg); if (!ResultReg) return false; @@ -4930,10 +4807,9 @@ bool AArch64FastISel::selectSDiv(const Instruction *I) { unsigned Src0Reg = getRegForValue(I->getOperand(0)); if (!Src0Reg) return false; - bool Src0IsKill = hasTrivialKill(I->getOperand(0)); if (cast(I)->isExact()) { - unsigned ResultReg = emitASR_ri(VT, VT, Src0Reg, Src0IsKill, Lg2); + unsigned ResultReg = emitASR_ri(VT, VT, Src0Reg, Lg2); if (!ResultReg) return false; updateValueMap(I, ResultReg); @@ -4941,12 +4817,12 @@ bool AArch64FastISel::selectSDiv(const Instruction *I) { } int64_t Pow2MinusOne = (1ULL << Lg2) - 1; - unsigned AddReg = emitAdd_ri_(VT, Src0Reg, /*IsKill=*/false, Pow2MinusOne); + unsigned AddReg = emitAdd_ri_(VT, Src0Reg, Pow2MinusOne); if (!AddReg) return false; // (Src0 < 0) ? Pow2 - 1 : 0; - if (!emitICmp_ri(VT, Src0Reg, /*IsKill=*/false, 0)) + if (!emitICmp_ri(VT, Src0Reg, 0)) return false; unsigned SelectOpc; @@ -4958,9 +4834,8 @@ bool AArch64FastISel::selectSDiv(const Instruction *I) { SelectOpc = AArch64::CSELWr; RC = &AArch64::GPR32RegClass; } - unsigned SelectReg = - fastEmitInst_rri(SelectOpc, RC, AddReg, /*IsKill=*/true, Src0Reg, - Src0IsKill, AArch64CC::LT); + unsigned SelectReg = fastEmitInst_rri(SelectOpc, RC, AddReg, Src0Reg, + AArch64CC::LT); if (!SelectReg) return false; @@ -4969,10 +4844,10 @@ bool AArch64FastISel::selectSDiv(const Instruction *I) { unsigned ZeroReg = (VT == MVT::i64) ? AArch64::XZR : AArch64::WZR; unsigned ResultReg; if (C.isNegative()) - ResultReg = emitAddSub_rs(/*UseAdd=*/false, VT, ZeroReg, /*IsKill=*/true, - SelectReg, /*IsKill=*/true, AArch64_AM::ASR, Lg2); + ResultReg = emitAddSub_rs(/*UseAdd=*/false, VT, ZeroReg, SelectReg, + AArch64_AM::ASR, Lg2); else - ResultReg = emitASR_ri(VT, VT, SelectReg, /*IsKill=*/true, Lg2); + ResultReg = emitASR_ri(VT, VT, SelectReg, Lg2); if (!ResultReg) return false; @@ -4984,23 +4859,20 @@ bool AArch64FastISel::selectSDiv(const Instruction *I) { /// This is mostly a copy of the existing FastISel getRegForGEPIndex code. We /// have to duplicate it for AArch64, because otherwise we would fail during the /// sign-extend emission. -std::pair AArch64FastISel::getRegForGEPIndex(const Value *Idx) { +unsigned AArch64FastISel::getRegForGEPIndex(const Value *Idx) { unsigned IdxN = getRegForValue(Idx); if (IdxN == 0) // Unhandled operand. Halt "fast" selection and bail. - return std::pair(0, false); - - bool IdxNIsKill = hasTrivialKill(Idx); + return 0; // If the index is smaller or larger than intptr_t, truncate or extend it. MVT PtrVT = TLI.getPointerTy(DL); EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false); if (IdxVT.bitsLT(PtrVT)) { IdxN = emitIntExt(IdxVT.getSimpleVT(), IdxN, PtrVT, /*isZExt=*/false); - IdxNIsKill = true; } else if (IdxVT.bitsGT(PtrVT)) llvm_unreachable("AArch64 FastISel doesn't support types larger than i64"); - return std::pair(IdxN, IdxNIsKill); + return IdxN; } /// This is mostly a copy of the existing FastISel GEP code, but we have to @@ -5014,7 +4886,6 @@ bool AArch64FastISel::selectGetElementPtr(const Instruction *I) { unsigned N = getRegForValue(I->getOperand(0)); if (!N) return false; - bool NIsKill = hasTrivialKill(I->getOperand(0)); // Keep a running tab of the total offset to coalesce multiple N = N + Offset // into a single N = N + TotalOffset. @@ -5041,18 +4912,15 @@ bool AArch64FastISel::selectGetElementPtr(const Instruction *I) { continue; } if (TotalOffs) { - N = emitAdd_ri_(VT, N, NIsKill, TotalOffs); + N = emitAdd_ri_(VT, N, TotalOffs); if (!N) return false; - NIsKill = true; TotalOffs = 0; } // N = N + Idx * ElementSize; uint64_t ElementSize = DL.getTypeAllocSize(Ty); - std::pair Pair = getRegForGEPIndex(Idx); - unsigned IdxN = Pair.first; - bool IdxNIsKill = Pair.second; + unsigned IdxN = getRegForGEPIndex(Idx); if (!IdxN) return false; @@ -5060,18 +4928,17 @@ bool AArch64FastISel::selectGetElementPtr(const Instruction *I) { unsigned C = fastEmit_i(VT, VT, ISD::Constant, ElementSize); if (!C) return false; - IdxN = emitMul_rr(VT, IdxN, IdxNIsKill, C, true); + IdxN = emitMul_rr(VT, IdxN, C); if (!IdxN) return false; - IdxNIsKill = true; } - N = fastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill); + N = fastEmit_rr(VT, VT, ISD::ADD, N, IdxN); if (!N) return false; } } if (TotalOffs) { - N = emitAdd_ri_(VT, N, NIsKill, TotalOffs); + N = emitAdd_ri_(VT, N, TotalOffs); if (!N) return false; } diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp index da1d9af8d5b..73cb3a21882 100644 --- a/lib/Target/ARM/ARMFastISel.cpp +++ b/lib/Target/ARM/ARMFastISel.cpp @@ -136,16 +136,13 @@ class ARMFastISel final : public FastISel { // Code from FastISel.cpp. unsigned fastEmitInst_r(unsigned MachineInstOpcode, - const TargetRegisterClass *RC, - unsigned Op0, bool Op0IsKill); + const TargetRegisterClass *RC, unsigned Op0); unsigned fastEmitInst_rr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, - unsigned Op0, bool Op0IsKill, - unsigned Op1, bool Op1IsKill); + unsigned Op0, unsigned Op1); unsigned fastEmitInst_ri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, - unsigned Op0, bool Op0IsKill, - uint64_t Imm); + unsigned Op0, uint64_t Imm); unsigned fastEmitInst_i(unsigned MachineInstOpcode, const TargetRegisterClass *RC, uint64_t Imm); @@ -299,7 +296,7 @@ ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { unsigned ARMFastISel::fastEmitInst_r(unsigned MachineInstOpcode, const TargetRegisterClass *RC, - unsigned Op0, bool Op0IsKill) { + unsigned Op0) { Register ResultReg = createResultReg(RC); const MCInstrDesc &II = TII.get(MachineInstOpcode); @@ -308,10 +305,10 @@ unsigned ARMFastISel::fastEmitInst_r(unsigned MachineInstOpcode, Op0 = constrainOperandRegClass(II, Op0, 1); if (II.getNumDefs() >= 1) { AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, - ResultReg).addReg(Op0, Op0IsKill * RegState::Kill)); + ResultReg).addReg(Op0)); } else { AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) - .addReg(Op0, Op0IsKill * RegState::Kill)); + .addReg(Op0)); AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), ResultReg) .addReg(II.ImplicitDefs[0])); @@ -321,8 +318,7 @@ unsigned ARMFastISel::fastEmitInst_r(unsigned MachineInstOpcode, unsigned ARMFastISel::fastEmitInst_rr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, - unsigned Op0, bool Op0IsKill, - unsigned Op1, bool Op1IsKill) { + unsigned Op0, unsigned Op1) { unsigned ResultReg = createResultReg(RC); const MCInstrDesc &II = TII.get(MachineInstOpcode); @@ -334,12 +330,12 @@ unsigned ARMFastISel::fastEmitInst_rr(unsigned MachineInstOpcode, if (II.getNumDefs() >= 1) { AddOptionalDefs( BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) - .addReg(Op0, Op0IsKill * RegState::Kill) - .addReg(Op1, Op1IsKill * RegState::Kill)); + .addReg(Op0) + .addReg(Op1)); } else { AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) - .addReg(Op0, Op0IsKill * RegState::Kill) - .addReg(Op1, Op1IsKill * RegState::Kill)); + .addReg(Op0) + .addReg(Op1)); AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), ResultReg) .addReg(II.ImplicitDefs[0])); @@ -349,8 +345,7 @@ unsigned ARMFastISel::fastEmitInst_rr(unsigned MachineInstOpcode, unsigned ARMFastISel::fastEmitInst_ri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, - unsigned Op0, bool Op0IsKill, - uint64_t Imm) { + unsigned Op0, uint64_t Imm) { unsigned ResultReg = createResultReg(RC); const MCInstrDesc &II = TII.get(MachineInstOpcode); @@ -360,11 +355,11 @@ unsigned ARMFastISel::fastEmitInst_ri(unsigned MachineInstOpcode, if (II.getNumDefs() >= 1) { AddOptionalDefs( BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) - .addReg(Op0, Op0IsKill * RegState::Kill) + .addReg(Op0) .addImm(Imm)); } else { AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) - .addReg(Op0, Op0IsKill * RegState::Kill) + .addReg(Op0) .addImm(Imm)); AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), ResultReg) @@ -851,7 +846,7 @@ void ARMFastISel::ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3) { // get the reg+offset into a register. if (needsLowering) { Addr.Base.Reg = fastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg, - /*Op0IsKill*/false, Addr.Offset, MVT::i32); + Addr.Offset, MVT::i32); Addr.Offset = 0; } } @@ -1967,8 +1962,7 @@ bool ARMFastISel::ProcessCallArgs(SmallVectorImpl &Args, break; } case CCValAssign::BCvt: { - unsigned BC = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg, - /*TODO: Kill=*/false); + unsigned BC = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg); assert(BC != 0 && "Failed to emit a bitcast!"); Arg = BC; ArgVT = VA.getLocVT(); diff --git a/lib/Target/Mips/MipsFastISel.cpp b/lib/Target/Mips/MipsFastISel.cpp index 8a847eaf661..e963185eaea 100644 --- a/lib/Target/Mips/MipsFastISel.cpp +++ b/lib/Target/Mips/MipsFastISel.cpp @@ -228,14 +228,13 @@ private: unsigned fastEmitInst_rr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, - unsigned Op0, bool Op0IsKill, - unsigned Op1, bool Op1IsKill); + unsigned Op0, unsigned Op1); // for some reason, this default is not generated by tablegen // so we explicitly generate it here. unsigned fastEmitInst_riir(uint64_t inst, const TargetRegisterClass *RC, - unsigned Op0, bool Op0IsKill, uint64_t imm1, - uint64_t imm2, unsigned Op3, bool Op3IsKill) { + unsigned Op0, uint64_t imm1, uint64_t imm2, + unsigned Op3) { return 0; } @@ -2122,8 +2121,7 @@ void MipsFastISel::simplifyAddress(Address &Addr) { unsigned MipsFastISel::fastEmitInst_rr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, - unsigned Op0, bool Op0IsKill, - unsigned Op1, bool Op1IsKill) { + unsigned Op0, unsigned Op1) { // We treat the MUL instruction in a special way because it clobbers // the HI0 & LO0 registers. The TableGen definition of this instruction can // mark these registers only as implicitly defined. As a result, the @@ -2136,15 +2134,14 @@ unsigned MipsFastISel::fastEmitInst_rr(unsigned MachineInstOpcode, Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) - .addReg(Op0, getKillRegState(Op0IsKill)) - .addReg(Op1, getKillRegState(Op1IsKill)) + .addReg(Op0) + .addReg(Op1) .addReg(Mips::HI0, RegState::ImplicitDefine | RegState::Dead) .addReg(Mips::LO0, RegState::ImplicitDefine | RegState::Dead); return ResultReg; } - return FastISel::fastEmitInst_rr(MachineInstOpcode, RC, Op0, Op0IsKill, Op1, - Op1IsKill); + return FastISel::fastEmitInst_rr(MachineInstOpcode, RC, Op0, Op1); } namespace llvm { diff --git a/lib/Target/PowerPC/PPCFastISel.cpp b/lib/Target/PowerPC/PPCFastISel.cpp index c181816e31c..0cdf44e31ab 100644 --- a/lib/Target/PowerPC/PPCFastISel.cpp +++ b/lib/Target/PowerPC/PPCFastISel.cpp @@ -112,15 +112,12 @@ class PPCFastISel final : public FastISel { unsigned fastEmit_i(MVT Ty, MVT RetTy, unsigned Opc, uint64_t Imm) override; unsigned fastEmitInst_ri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, - unsigned Op0, bool Op0IsKill, - uint64_t Imm); + unsigned Op0, uint64_t Imm); unsigned fastEmitInst_r(unsigned MachineInstOpcode, - const TargetRegisterClass *RC, - unsigned Op0, bool Op0IsKill); + const TargetRegisterClass *RC, unsigned Op0); unsigned fastEmitInst_rr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, - unsigned Op0, bool Op0IsKill, - unsigned Op1, bool Op1IsKill); + unsigned Op0, unsigned Op1); bool fastLowerCall(CallLoweringInfo &CLI) override; @@ -2426,7 +2423,7 @@ unsigned PPCFastISel::fastEmit_i(MVT Ty, MVT VT, unsigned Opc, uint64_t Imm) { // where those regs have another meaning. unsigned PPCFastISel::fastEmitInst_ri(unsigned MachineInstOpcode, const TargetRegisterClass *RC, - unsigned Op0, bool Op0IsKill, + unsigned Op0, uint64_t Imm) { if (MachineInstOpcode == PPC::ADDI) MRI.setRegClass(Op0, &PPC::GPRC_and_GPRC_NOR0RegClass); @@ -2437,8 +2434,7 @@ unsigned PPCFastISel::fastEmitInst_ri(unsigned MachineInstOpcode, (RC == &PPC::GPRCRegClass ? &PPC::GPRC_and_GPRC_NOR0RegClass : (RC == &PPC::G8RCRegClass ? &PPC::G8RC_and_G8RC_NOX0RegClass : RC)); - return FastISel::fastEmitInst_ri(MachineInstOpcode, UseRC, - Op0, Op0IsKill, Imm); + return FastISel::fastEmitInst_ri(MachineInstOpcode, UseRC, Op0, Imm); } // Override for instructions with one register operand to avoid use of @@ -2446,12 +2442,12 @@ unsigned PPCFastISel::fastEmitInst_ri(unsigned MachineInstOpcode, // we must be conservative. unsigned PPCFastISel::fastEmitInst_r(unsigned MachineInstOpcode, const TargetRegisterClass* RC, - unsigned Op0, bool Op0IsKill) { + unsigned Op0) { const TargetRegisterClass *UseRC = (RC == &PPC::GPRCRegClass ? &PPC::GPRC_and_GPRC_NOR0RegClass : (RC == &PPC::G8RCRegClass ? &PPC::G8RC_and_G8RC_NOX0RegClass : RC)); - return FastISel::fastEmitInst_r(MachineInstOpcode, UseRC, Op0, Op0IsKill); + return FastISel::fastEmitInst_r(MachineInstOpcode, UseRC, Op0); } // Override for instructions with two register operands to avoid use @@ -2459,14 +2455,12 @@ unsigned PPCFastISel::fastEmitInst_r(unsigned MachineInstOpcode, // so we must be conservative. unsigned PPCFastISel::fastEmitInst_rr(unsigned MachineInstOpcode, const TargetRegisterClass* RC, - unsigned Op0, bool Op0IsKill, - unsigned Op1, bool Op1IsKill) { + unsigned Op0, unsigned Op1) { const TargetRegisterClass *UseRC = (RC == &PPC::GPRCRegClass ? &PPC::GPRC_and_GPRC_NOR0RegClass : (RC == &PPC::G8RCRegClass ? &PPC::G8RC_and_G8RC_NOX0RegClass : RC)); - return FastISel::fastEmitInst_rr(MachineInstOpcode, UseRC, Op0, Op0IsKill, - Op1, Op1IsKill); + return FastISel::fastEmitInst_rr(MachineInstOpcode, UseRC, Op0, Op1); } namespace llvm { diff --git a/lib/Target/WebAssembly/WebAssemblyFastISel.cpp b/lib/Target/WebAssembly/WebAssemblyFastISel.cpp index 5b54ffdc251..b9b236a3f57 100644 --- a/lib/Target/WebAssembly/WebAssemblyFastISel.cpp +++ b/lib/Target/WebAssembly/WebAssemblyFastISel.cpp @@ -1167,7 +1167,7 @@ bool WebAssemblyFastISel::selectBitCast(const Instruction *I) { } Register Reg = fastEmit_ISD_BITCAST_r(VT.getSimpleVT(), RetVT.getSimpleVT(), - In, I->getOperand(0)->hasOneUse()); + In); if (!Reg) return false; MachineBasicBlock::iterator Iter = FuncInfo.InsertPt; diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp index a04c9d6c045..bd08af81e67 100644 --- a/lib/Target/X86/X86FastISel.cpp +++ b/lib/Target/X86/X86FastISel.cpp @@ -89,8 +89,7 @@ private: bool X86FastEmitStore(EVT VT, const Value *Val, X86AddressMode &AM, MachineMemOperand *MMO = nullptr, bool Aligned = false); - bool X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill, - X86AddressMode &AM, + bool X86FastEmitStore(EVT VT, unsigned ValReg, X86AddressMode &AM, MachineMemOperand *MMO = nullptr, bool Aligned = false); bool X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT, @@ -176,9 +175,7 @@ private: unsigned fastEmitInst_rrrr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, unsigned Op0, - bool Op0IsKill, unsigned Op1, bool Op1IsKill, - unsigned Op2, bool Op2IsKill, unsigned Op3, - bool Op3IsKill); + unsigned Op1, unsigned Op2, unsigned Op3); }; } // end anonymous namespace. @@ -487,8 +484,7 @@ bool X86FastISel::X86FastEmitLoad(MVT VT, X86AddressMode &AM, /// type VT. The address is either pre-computed, consisted of a base ptr, Ptr /// and a displacement offset, or a GlobalAddress, /// i.e. V. Return true if it is possible. -bool X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill, - X86AddressMode &AM, +bool X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg, X86AddressMode &AM, MachineMemOperand *MMO, bool Aligned) { bool HasSSE1 = Subtarget->hasSSE1(); bool HasSSE2 = Subtarget->hasSSE2(); @@ -508,7 +504,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill, Register AndResult = createResultReg(&X86::GR8RegClass); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::AND8ri), AndResult) - .addReg(ValReg, getKillRegState(ValIsKill)).addImm(1); + .addReg(ValReg).addImm(1); ValReg = AndResult; LLVM_FALLTHROUGH; // handle i1 as i8. } @@ -654,7 +650,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill, ValReg = constrainOperandRegClass(Desc, ValReg, Desc.getNumOperands() - 1); MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, Desc); - addFullAddress(MIB, AM).addReg(ValReg, getKillRegState(ValIsKill)); + addFullAddress(MIB, AM).addReg(ValReg); if (MMO) MIB->addMemOperand(*FuncInfo.MF, MMO); @@ -702,8 +698,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val, if (ValReg == 0) return false; - bool ValKill = hasTrivialKill(Val); - return X86FastEmitStore(VT, ValReg, ValKill, AM, MMO, Aligned); + return X86FastEmitStore(VT, ValReg, AM, MMO, Aligned); } /// X86FastEmitExtend - Emit a machine instruction to extend a value Src of @@ -712,8 +707,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val, bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT, unsigned &ResultReg) { - unsigned RR = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, - Src, /*TODO: Kill=*/false); + unsigned RR = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, Src); if (RR == 0) return false; @@ -945,7 +939,7 @@ redo_gep: (S == 1 || S == 2 || S == 4 || S == 8)) { // Scaled-index addressing. Scale = S; - IndexReg = getRegForGEPIndex(Op).first; + IndexReg = getRegForGEPIndex(Op); if (IndexReg == 0) return false; break; @@ -1262,14 +1256,13 @@ bool X86FastISel::X86SelectRet(const Instruction *I) { if (Outs[0].Flags.isSExt()) return false; // TODO - SrcReg = fastEmitZExtFromI1(MVT::i8, SrcReg, /*Op0IsKill=*/false); + SrcReg = fastEmitZExtFromI1(MVT::i8, SrcReg); SrcVT = MVT::i8; } unsigned Op = Outs[0].Flags.isZExt() ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND; // TODO - SrcReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Op, SrcReg, - /*Op0IsKill=*/false); + SrcReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Op, SrcReg); } // Make the copy. @@ -1467,8 +1460,7 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) { ResultReg = createResultReg(&X86::GR32RegClass); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV32r0), ResultReg); - ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultReg, - /*Op0IsKill=*/true, X86::sub_8bit); + ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultReg, X86::sub_8bit); if (!ResultReg) return false; break; @@ -1558,7 +1550,7 @@ bool X86FastISel::X86SelectZExt(const Instruction *I) { MVT SrcVT = TLI.getSimpleValueType(DL, I->getOperand(0)->getType()); if (SrcVT == MVT::i1) { // Set the high bits to zero. - ResultReg = fastEmitZExtFromI1(MVT::i8, ResultReg, /*TODO: Kill=*/false); + ResultReg = fastEmitZExtFromI1(MVT::i8, ResultReg); SrcVT = MVT::i8; if (ResultReg == 0) @@ -1591,11 +1583,10 @@ bool X86FastISel::X86SelectZExt(const Instruction *I) { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOVZX32rr8), Result32).addReg(ResultReg); - ResultReg = fastEmitInst_extractsubreg(MVT::i16, Result32, - /*Op0IsKill=*/true, X86::sub_16bit); + ResultReg = fastEmitInst_extractsubreg(MVT::i16, Result32, X86::sub_16bit); } else if (DstVT != MVT::i8) { ResultReg = fastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::ZERO_EXTEND, - ResultReg, /*Op0IsKill=*/true); + ResultReg); if (ResultReg == 0) return false; } @@ -1617,8 +1608,7 @@ bool X86FastISel::X86SelectSExt(const Instruction *I) { MVT SrcVT = TLI.getSimpleValueType(DL, I->getOperand(0)->getType()); if (SrcVT == MVT::i1) { // Set the high bits to zero. - Register ZExtReg = fastEmitZExtFromI1(MVT::i8, ResultReg, - /*TODO: Kill=*/false); + Register ZExtReg = fastEmitZExtFromI1(MVT::i8, ResultReg); if (ZExtReg == 0) return false; @@ -1637,11 +1627,10 @@ bool X86FastISel::X86SelectSExt(const Instruction *I) { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOVSX32rr8), Result32).addReg(ResultReg); - ResultReg = fastEmitInst_extractsubreg(MVT::i16, Result32, - /*Op0IsKill=*/true, X86::sub_16bit); + ResultReg = fastEmitInst_extractsubreg(MVT::i16, Result32, X86::sub_16bit); } else if (DstVT != MVT::i8) { ResultReg = fastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::SIGN_EXTEND, - ResultReg, /*Op0IsKill=*/true); + ResultReg); if (ResultReg == 0) return false; } @@ -1793,8 +1782,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), OpReg) .addReg(KOpReg); - OpReg = fastEmitInst_extractsubreg(MVT::i8, OpReg, /*Op0IsKill=*/true, - X86::sub_8bit); + OpReg = fastEmitInst_extractsubreg(MVT::i8, OpReg, X86::sub_8bit); } BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri)) .addReg(OpReg) @@ -2025,7 +2013,7 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) { // Now reference the 8-bit subreg of the result. ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultSuperReg, - /*Op0IsKill=*/true, X86::sub_8bit); + X86::sub_8bit); } // Copy the result out of the physreg if we haven't already. if (!ResultReg) { @@ -2130,7 +2118,6 @@ bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) { Register CondReg = getRegForValue(Cond); if (CondReg == 0) return false; - bool CondIsKill = hasTrivialKill(Cond); // In case OpReg is a K register, COPY to a GPR if (MRI.getRegClass(CondReg) == &X86::VK1RegClass) { @@ -2138,12 +2125,11 @@ bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) { CondReg = createResultReg(&X86::GR32RegClass); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), CondReg) - .addReg(KCondReg, getKillRegState(CondIsKill)); - CondReg = fastEmitInst_extractsubreg(MVT::i8, CondReg, /*Op0IsKill=*/true, - X86::sub_8bit); + .addReg(KCondReg); + CondReg = fastEmitInst_extractsubreg(MVT::i8, CondReg, X86::sub_8bit); } BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri)) - .addReg(CondReg, getKillRegState(CondIsKill)) + .addReg(CondReg) .addImm(1); } @@ -2151,18 +2137,13 @@ bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) { const Value *RHS = I->getOperand(2); Register RHSReg = getRegForValue(RHS); - bool RHSIsKill = hasTrivialKill(RHS); - Register LHSReg = getRegForValue(LHS); - bool LHSIsKill = hasTrivialKill(LHS); - if (!LHSReg || !RHSReg) return false; const TargetRegisterInfo &TRI = *Subtarget->getRegisterInfo(); unsigned Opc = X86::getCMovOpcode(TRI.getRegSizeInBits(*RC)/8); - Register ResultReg = fastEmitInst_rri(Opc, RC, RHSReg, RHSIsKill, - LHSReg, LHSIsKill, CC); + Register ResultReg = fastEmitInst_rri(Opc, RC, RHSReg, LHSReg, CC); updateValueMap(I, ResultReg); return true; } @@ -2211,17 +2192,9 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) { const Value *RHS = I->getOperand(2); Register LHSReg = getRegForValue(LHS); - bool LHSIsKill = hasTrivialKill(LHS); - Register RHSReg = getRegForValue(RHS); - bool RHSIsKill = hasTrivialKill(RHS); - Register CmpLHSReg = getRegForValue(CmpLHS); - bool CmpLHSIsKill = hasTrivialKill(CmpLHS); - Register CmpRHSReg = getRegForValue(CmpRHS); - bool CmpRHSIsKill = hasTrivialKill(CmpRHS); - if (!LHSReg || !RHSReg || !CmpLHSReg || !CmpRHSReg) return false; @@ -2235,8 +2208,8 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) { unsigned CmpOpcode = (RetVT == MVT::f32) ? X86::VCMPSSZrr : X86::VCMPSDZrr; - Register CmpReg = fastEmitInst_rri(CmpOpcode, VK1, CmpLHSReg, CmpLHSIsKill, - CmpRHSReg, CmpRHSIsKill, CC); + Register CmpReg = fastEmitInst_rri(CmpOpcode, VK1, CmpLHSReg, CmpRHSReg, + CC); // Need an IMPLICIT_DEF for the input that is used to generate the upper // bits of the result register since its not based on any of the inputs. @@ -2248,9 +2221,8 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) { // LHS in the input. The mask input comes from the compare. unsigned MovOpcode = (RetVT == MVT::f32) ? X86::VMOVSSZrrk : X86::VMOVSDZrrk; - unsigned MovReg = fastEmitInst_rrrr(MovOpcode, VR128X, RHSReg, RHSIsKill, - CmpReg, true, ImplicitDefReg, true, - LHSReg, LHSIsKill); + unsigned MovReg = fastEmitInst_rrrr(MovOpcode, VR128X, RHSReg, CmpReg, + ImplicitDefReg, LHSReg); ResultReg = createResultReg(RC); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, @@ -2269,10 +2241,10 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) { unsigned BlendOpcode = (RetVT == MVT::f32) ? X86::VBLENDVPSrr : X86::VBLENDVPDrr; - Register CmpReg = fastEmitInst_rri(CmpOpcode, RC, CmpLHSReg, CmpLHSIsKill, - CmpRHSReg, CmpRHSIsKill, CC); - Register VBlendReg = fastEmitInst_rrr(BlendOpcode, VR128, RHSReg, RHSIsKill, - LHSReg, LHSIsKill, CmpReg, true); + Register CmpReg = fastEmitInst_rri(CmpOpcode, RC, CmpLHSReg, CmpRHSReg, + CC); + Register VBlendReg = fastEmitInst_rrr(BlendOpcode, VR128, RHSReg, LHSReg, + CmpReg); ResultReg = createResultReg(RC); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), ResultReg).addReg(VBlendReg); @@ -2291,14 +2263,10 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) { } const TargetRegisterClass *VR128 = &X86::VR128RegClass; - Register CmpReg = fastEmitInst_rri(Opc[0], RC, CmpLHSReg, CmpLHSIsKill, - CmpRHSReg, CmpRHSIsKill, CC); - Register AndReg = fastEmitInst_rr(Opc[1], VR128, CmpReg, - /*Op0IsKill=*/false, LHSReg, LHSIsKill); - Register AndNReg = fastEmitInst_rr(Opc[2], VR128, CmpReg, - /*Op0IsKill=*/true, RHSReg, RHSIsKill); - Register OrReg = fastEmitInst_rr(Opc[3], VR128, AndNReg, /*Op0IsKill=*/true, - AndReg, /*Op1IsKill=*/true); + Register CmpReg = fastEmitInst_rri(Opc[0], RC, CmpLHSReg, CmpRHSReg, CC); + Register AndReg = fastEmitInst_rr(Opc[1], VR128, CmpReg, LHSReg); + Register AndNReg = fastEmitInst_rr(Opc[2], VR128, CmpReg, RHSReg); + Register OrReg = fastEmitInst_rr(Opc[3], VR128, AndNReg, AndReg); ResultReg = createResultReg(RC); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), ResultReg).addReg(OrReg); @@ -2348,7 +2316,6 @@ bool X86FastISel::X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I) { Register CondReg = getRegForValue(Cond); if (CondReg == 0) return false; - bool CondIsKill = hasTrivialKill(Cond); // In case OpReg is a K register, COPY to a GPR if (MRI.getRegClass(CondReg) == &X86::VK1RegClass) { @@ -2356,12 +2323,11 @@ bool X86FastISel::X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I) { CondReg = createResultReg(&X86::GR32RegClass); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), CondReg) - .addReg(KCondReg, getKillRegState(CondIsKill)); - CondReg = fastEmitInst_extractsubreg(MVT::i8, CondReg, /*Op0IsKill=*/true, - X86::sub_8bit); + .addReg(KCondReg); + CondReg = fastEmitInst_extractsubreg(MVT::i8, CondReg, X86::sub_8bit); } BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri)) - .addReg(CondReg, getKillRegState(CondIsKill)) + .addReg(CondReg) .addImm(1); } @@ -2369,18 +2335,14 @@ bool X86FastISel::X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I) { const Value *RHS = I->getOperand(2); Register LHSReg = getRegForValue(LHS); - bool LHSIsKill = hasTrivialKill(LHS); - Register RHSReg = getRegForValue(RHS); - bool RHSIsKill = hasTrivialKill(RHS); - if (!LHSReg || !RHSReg) return false; const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT); Register ResultReg = - fastEmitInst_rri(Opc, RC, RHSReg, RHSIsKill, LHSReg, LHSIsKill, CC); + fastEmitInst_rri(Opc, RC, RHSReg, LHSReg, CC); updateValueMap(I, ResultReg); return true; } @@ -2404,12 +2366,11 @@ bool X86FastISel::X86SelectSelect(const Instruction *I) { Register OpReg = getRegForValue(Opnd); if (OpReg == 0) return false; - bool OpIsKill = hasTrivialKill(Opnd); const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT); Register ResultReg = createResultReg(RC); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), ResultReg) - .addReg(OpReg, getKillRegState(OpIsKill)); + .addReg(OpReg); updateValueMap(I, ResultReg); return true; } @@ -2479,8 +2440,7 @@ bool X86FastISel::X86SelectIntToFP(const Instruction *I, bool IsSigned) { Register ImplicitDefReg = createResultReg(RC); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg); - Register ResultReg = - fastEmitInst_rr(Opcode, RC, ImplicitDefReg, true, OpReg, false); + Register ResultReg = fastEmitInst_rr(Opcode, RC, ImplicitDefReg, OpReg); updateValueMap(I, ResultReg); return true; } @@ -2577,8 +2537,7 @@ bool X86FastISel::X86SelectTrunc(const Instruction *I) { } // Issue an extract_subreg. - Register ResultReg = fastEmitInst_extractsubreg(MVT::i8, - InputReg, false, + Register ResultReg = fastEmitInst_extractsubreg(MVT::i8, InputReg, X86::sub_8bit); if (!ResultReg) return false; @@ -2614,7 +2573,7 @@ bool X86FastISel::TryEmitSmallMemcpy(X86AddressMode DestAM, unsigned Reg; bool RV = X86FastEmitLoad(VT, SrcAM, nullptr, Reg); - RV &= X86FastEmitStore(VT, Reg, /*ValIsKill=*/true, DestAM); + RV &= X86FastEmitStore(VT, Reg, DestAM); assert(RV && "Failed to emit load or store??"); unsigned Size = VT.getSizeInBits()/8; @@ -2662,7 +2621,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) { // controlled by MXCSR. unsigned Opc = Subtarget->hasVLX() ? X86::VCVTPS2PHZ128rr : X86::VCVTPS2PHrr; - InputReg = fastEmitInst_ri(Opc, RC, InputReg, false, 4); + InputReg = fastEmitInst_ri(Opc, RC, InputReg, 4); // Move the lower 32-bits of ResultReg to another register of class GR32. Opc = Subtarget->hasAVX512() ? X86::VMOVPDI2DIZrr @@ -2673,20 +2632,19 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) { // The result value is in the lower 16-bits of ResultReg. unsigned RegIdx = X86::sub_16bit; - ResultReg = fastEmitInst_extractsubreg(MVT::i16, ResultReg, true, RegIdx); + ResultReg = fastEmitInst_extractsubreg(MVT::i16, ResultReg, RegIdx); } else { assert(Op->getType()->isIntegerTy(16) && "Expected a 16-bit integer!"); // Explicitly zero-extend the input to 32-bit. - InputReg = fastEmit_r(MVT::i16, MVT::i32, ISD::ZERO_EXTEND, InputReg, - /*Op0IsKill=*/false); + InputReg = fastEmit_r(MVT::i16, MVT::i32, ISD::ZERO_EXTEND, InputReg); // The following SCALAR_TO_VECTOR will be expanded into a VMOVDI2PDIrr. InputReg = fastEmit_r(MVT::i32, MVT::v4i32, ISD::SCALAR_TO_VECTOR, - InputReg, /*Op0IsKill=*/true); + InputReg); unsigned Opc = Subtarget->hasVLX() ? X86::VCVTPH2PSZ128rr : X86::VCVTPH2PSrr; - InputReg = fastEmitInst_r(Opc, RC, InputReg, /*Op0IsKill=*/true); + InputReg = fastEmitInst_r(Opc, RC, InputReg); // The result value is in the lower 32-bits of ResultReg. // Emit an explicit copy from register class VR128 to register class FR32. @@ -2937,7 +2895,6 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) { Register LHSReg = getRegForValue(LHS); if (LHSReg == 0) return false; - bool LHSIsKill = hasTrivialKill(LHS); unsigned ResultReg = 0; // Check if we have an immediate version. @@ -2954,21 +2911,17 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) { bool IsDec = BaseOpc == ISD::SUB; BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc[IsDec][VT.SimpleTy-MVT::i8]), ResultReg) - .addReg(LHSReg, getKillRegState(LHSIsKill)); + .addReg(LHSReg); } else - ResultReg = fastEmit_ri(VT, VT, BaseOpc, LHSReg, LHSIsKill, - CI->getZExtValue()); + ResultReg = fastEmit_ri(VT, VT, BaseOpc, LHSReg, CI->getZExtValue()); } unsigned RHSReg; - bool RHSIsKill; if (!ResultReg) { RHSReg = getRegForValue(RHS); if (RHSReg == 0) return false; - RHSIsKill = hasTrivialKill(RHS); - ResultReg = fastEmit_rr(VT, VT, BaseOpc, LHSReg, LHSIsKill, RHSReg, - RHSIsKill); + ResultReg = fastEmit_rr(VT, VT, BaseOpc, LHSReg, RHSReg); } // FastISel doesn't have a pattern for all X86::MUL*r and X86::IMUL*r. Emit @@ -2981,9 +2934,9 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) { // the X86::MUL*r instruction. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), Reg[VT.SimpleTy-MVT::i8]) - .addReg(LHSReg, getKillRegState(LHSIsKill)); + .addReg(LHSReg); ResultReg = fastEmitInst_r(MULOpc[VT.SimpleTy-MVT::i8], - TLI.getRegClassFor(VT), RHSReg, RHSIsKill); + TLI.getRegClassFor(VT), RHSReg); } else if (BaseOpc == X86ISD::SMUL && !ResultReg) { static const uint16_t MULOpc[] = { X86::IMUL8r, X86::IMUL16rr, X86::IMUL32rr, X86::IMUL64rr }; @@ -2992,13 +2945,11 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) { // X86::IMUL8r instruction. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), X86::AL) - .addReg(LHSReg, getKillRegState(LHSIsKill)); - ResultReg = fastEmitInst_r(MULOpc[0], TLI.getRegClassFor(VT), RHSReg, - RHSIsKill); + .addReg(LHSReg); + ResultReg = fastEmitInst_r(MULOpc[0], TLI.getRegClassFor(VT), RHSReg); } else ResultReg = fastEmitInst_rr(MULOpc[VT.SimpleTy-MVT::i8], - TLI.getRegClassFor(VT), LHSReg, LHSIsKill, - RHSReg, RHSIsKill); + TLI.getRegClassFor(VT), LHSReg, RHSReg); } if (!ResultReg) @@ -3309,8 +3260,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) { if (!isTypeLegal(PrevVal->getType(), VT)) return false; - ResultReg = - fastEmit_ri(VT, VT, ISD::AND, ResultReg, hasTrivialKill(PrevVal), 1); + ResultReg = fastEmit_ri(VT, VT, ISD::AND, ResultReg, 1); } else { if (!isTypeLegal(Val->getType(), VT) || (VT.isVector() && VT.getVectorElementType() == MVT::i1)) @@ -3378,7 +3328,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) { // Handle zero-extension from i1 to i8, which is common. if (ArgVT == MVT::i1) { // Set the high bits to zero. - ArgReg = fastEmitZExtFromI1(MVT::i8, ArgReg, /*TODO: Kill=*/false); + ArgReg = fastEmitZExtFromI1(MVT::i8, ArgReg); ArgVT = MVT::i8; if (ArgReg == 0) @@ -3408,8 +3358,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) { break; } case CCValAssign::BCvt: { - ArgReg = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, ArgReg, - /*TODO: Kill=*/false); + ArgReg = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, ArgReg); assert(ArgReg && "Failed to emit a bitcast!"); ArgVT = VA.getLocVT(); break; @@ -3462,8 +3411,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) { if (!X86FastEmitStore(ArgVT, ArgVal, AM, MMO)) return false; } else { - bool ValIsKill = hasTrivialKill(ArgVal); - if (!X86FastEmitStore(ArgVT, ArgReg, ValIsKill, AM, MMO)) + if (!X86FastEmitStore(ArgVT, ArgReg, AM, MMO)) return false; } } @@ -3727,11 +3675,9 @@ unsigned X86FastISel::X86MaterializeInt(const ConstantInt *CI, MVT VT) { default: llvm_unreachable("Unexpected value type"); case MVT::i1: case MVT::i8: - return fastEmitInst_extractsubreg(MVT::i8, SrcReg, /*Op0IsKill=*/true, - X86::sub_8bit); + return fastEmitInst_extractsubreg(MVT::i8, SrcReg, X86::sub_8bit); case MVT::i16: - return fastEmitInst_extractsubreg(MVT::i16, SrcReg, /*Op0IsKill=*/true, - X86::sub_16bit); + return fastEmitInst_extractsubreg(MVT::i16, SrcReg, X86::sub_16bit); case MVT::i32: return SrcReg; case MVT::i64: { @@ -4001,10 +3947,8 @@ bool X86FastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, unsigned X86FastISel::fastEmitInst_rrrr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, - unsigned Op0, bool Op0IsKill, - unsigned Op1, bool Op1IsKill, - unsigned Op2, bool Op2IsKill, - unsigned Op3, bool Op3IsKill) { + unsigned Op0, unsigned Op1, + unsigned Op2, unsigned Op3) { const MCInstrDesc &II = TII.get(MachineInstOpcode); Register ResultReg = createResultReg(RC); @@ -4015,16 +3959,16 @@ unsigned X86FastISel::fastEmitInst_rrrr(unsigned MachineInstOpcode, if (II.getNumDefs() >= 1) BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) - .addReg(Op0, getKillRegState(Op0IsKill)) - .addReg(Op1, getKillRegState(Op1IsKill)) - .addReg(Op2, getKillRegState(Op2IsKill)) - .addReg(Op3, getKillRegState(Op3IsKill)); + .addReg(Op0) + .addReg(Op1) + .addReg(Op2) + .addReg(Op3); else { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) - .addReg(Op0, getKillRegState(Op0IsKill)) - .addReg(Op1, getKillRegState(Op1IsKill)) - .addReg(Op2, getKillRegState(Op2IsKill)) - .addReg(Op3, getKillRegState(Op3IsKill)); + .addReg(Op0) + .addReg(Op1) + .addReg(Op2) + .addReg(Op3); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); } diff --git a/test/CodeGen/AArch64/arm64-fast-isel-rem.ll b/test/CodeGen/AArch64/arm64-fast-isel-rem.ll index 1c245a3bb9a..82da08e8692 100644 --- a/test/CodeGen/AArch64/arm64-fast-isel-rem.ll +++ b/test/CodeGen/AArch64/arm64-fast-isel-rem.ll @@ -6,7 +6,7 @@ ; CHECK-SSA: [[QUOTREG:%[0-9]+]]:gpr32 = SDIVWr ; CHECK-SSA-NOT: [[QUOTREG]] = -; CHECK-SSA: {{%[0-9]+}}:gpr32 = MSUBWrrr killed [[QUOTREG]] +; CHECK-SSA: {{%[0-9]+}}:gpr32 = MSUBWrrr [[QUOTREG]] ; CHECK-SSA-LABEL: Machine code for function t2 diff --git a/utils/TableGen/FastISelEmitter.cpp b/utils/TableGen/FastISelEmitter.cpp index 2892c7d818d..2defcebe1b7 100644 --- a/utils/TableGen/FastISelEmitter.cpp +++ b/utils/TableGen/FastISelEmitter.cpp @@ -294,7 +294,7 @@ struct OperandsSignature { for (unsigned i = 0, e = Operands.size(); i != e; ++i) { OS << LS; if (Operands[i].isReg()) { - OS << "unsigned Op" << i << ", bool Op" << i << "IsKill"; + OS << "unsigned Op" << i; } else if (Operands[i].isImm()) { OS << "uint64_t imm" << i; } else if (Operands[i].isFP()) { @@ -316,7 +316,7 @@ struct OperandsSignature { OS << LS; if (Operands[i].isReg()) { - OS << "Op" << i << ", Op" << i << "IsKill"; + OS << "Op" << i; } else if (Operands[i].isImm()) { OS << "imm" << i; } else if (Operands[i].isFP()) { @@ -332,7 +332,7 @@ struct OperandsSignature { for (unsigned i = 0, e = Operands.size(); i != e; ++i) { OS << LS; if (Operands[i].isReg()) { - OS << "Op" << i << ", Op" << i << "IsKill"; + OS << "Op" << i; } else if (Operands[i].isImm()) { OS << "imm" << i; } else if (Operands[i].isFP()) { @@ -673,7 +673,7 @@ void FastISelMap::emitInstructionCode(raw_ostream &OS, OS << ");\n"; } else { OS << "extractsubreg(" << RetVTName - << ", Op0, Op0IsKill, " << Memo.SubRegNo << ");\n"; + << ", Op0, " << Memo.SubRegNo << ");\n"; } if (!PredicateCheck.empty()) {