1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 11:13:28 +01:00
llvm-mirror/include/llvm/CodeGen/MachineBasicBlock.h

943 lines
38 KiB
C
Raw Normal View History

//===- llvm/CodeGen/MachineBasicBlock.h -------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
2002-10-27 21:49:47 +01:00
// Collect the sequence of machine instructions for a basic block.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_MACHINEBASICBLOCK_H
#define LLVM_CODEGEN_MACHINEBASICBLOCK_H
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/ilist.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/ADT/simple_ilist.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBundleIterator.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/MC/LaneBitmask.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/Support/BranchProbability.h"
#include "llvm/Support/Printable.h"
#include <cassert>
#include <cstdint>
#include <functional>
#include <iterator>
#include <string>
#include <vector>
namespace llvm {
class BasicBlock;
class MachineFunction;
class MCSymbol;
class ModuleSlotTracker;
class Pass;
class SlotIndexes;
class StringRef;
class raw_ostream;
class TargetRegisterClass;
class TargetRegisterInfo;
template <> struct ilist_traits<MachineInstr> {
private:
friend class MachineBasicBlock; // Set by the owning MachineBasicBlock.
MachineBasicBlock *Parent;
using instr_iterator =
simple_ilist<MachineInstr, ilist_sentinel_tracking<true>>::iterator;
public:
void addNodeToList(MachineInstr *N);
void removeNodeFromList(MachineInstr *N);
void transferNodesFromList(ilist_traits &FromList, instr_iterator First,
instr_iterator Last);
void deleteNode(MachineInstr *MI);
};
ADT: Avoid relying on UB in ilist_node::getNextNode() Re-implement `ilist_node::getNextNode()` and `getPrevNode()` without relying on the sentinel having a "next" pointer. Instead, get access to the owning list and compare against the `begin()` and `end()` iterators. This only works when the node *can* get access to the owning list. The new support is in `ilist_node_with_parent<>`, and any class `Ty` inheriting from `ilist_node<NodeTy>` that wants `getNextNode()` and/or `getPrevNode()` should inherit from `ilist_node_with_parent<NodeTy, ParentTy>` instead. The requirements: - `NodeTy` must have a `getParent()` function that returns the parent. - `ParentTy` must have a `getSublistAccess()` static that, given a(n ignored) `NodeTy*` (to determine which list), returns a member field pointer to the appropriate `ilist<>`. This isn't the cleanest way to get access to the owning list, but it leverages the API already used in the IR hierarchy (see, e.g., `Instruction::getSublistAccess()`). If anyone feels like ripping out the calls to `getNextNode()` and `getPrevNode()` and replacing with direct iterator logic, they can also remove the access function, etc., but as an incremental step, I'm maintaining the API where it's currently used in tree. If these requirements are *not* met, call sites with access to the ilist can call `iplist<NodeTy>::getNextNode(NodeTy*)` directly, as in ilistTest.cpp. Why rewrite this? The old code was broken, calling `getNext()` on a sentinel that possibly didn't have a "next" pointer at all! The new code avoids that particular flavour of UB (see the commit message for r252538 for more details about the "lucky" memory layout that made this function so interesting). There's still some UB here: the end iterator gets downcast to `NodeTy*`, even when it's a sentinel (which is typically `ilist_half_node<NodeTy*>`). I'll tackle that in follow-up commits. See this llvm-dev thread for more details: http://lists.llvm.org/pipermail/llvm-dev/2015-October/091115.html What's the danger? There might be some code that relies on `getNextNode()` or `getPrevNode()` *never* returning `nullptr` -- i.e., that relies on them being broken when the sentinel is an `ilist_half_node<NodeTy>`. I tried to root out those cases with the audits I did leading up to r252380, but it's possible I missed one or two. I hope not. (If (1) you have out-of-tree code, (2) you've reverted r252380 temporarily, and (3) you get some weird crashes with this commit, then I recommend un-reverting r252380 and auditing the compile errors looking for "strange" implicit conversions.) llvm-svn: 252694
2015-11-11 03:26:42 +01:00
class MachineBasicBlock
: public ilist_node_with_parent<MachineBasicBlock, MachineFunction> {
public:
/// Pair of physical register and lane mask.
/// This is not simply a std::pair typedef because the members should be named
/// clearly as they both have an integer type.
struct RegisterMaskPair {
public:
MCPhysReg PhysReg;
LaneBitmask LaneMask;
RegisterMaskPair(MCPhysReg PhysReg, LaneBitmask LaneMask)
: PhysReg(PhysReg), LaneMask(LaneMask) {}
};
private:
using Instructions = ilist<MachineInstr, ilist_sentinel_tracking<true>>;
Instructions Insts;
const BasicBlock *BB;
int Number;
MachineFunction *xParent;
2015-08-06 14:49:40 +02:00
/// Keep track of the predecessor / successor basic blocks.
std::vector<MachineBasicBlock *> Predecessors;
std::vector<MachineBasicBlock *> Successors;
/// Keep track of the probabilities to the successors. This vector has the
/// same order as Successors, or it is empty if we don't use it (disable
/// optimization).
std::vector<BranchProbability> Probs;
using probability_iterator = std::vector<BranchProbability>::iterator;
using const_probability_iterator =
std::vector<BranchProbability>::const_iterator;
Optional<uint64_t> IrrLoopHeaderWeight;
/// Keep track of the physical registers that are livein of the basicblock.
using LiveInVector = std::vector<RegisterMaskPair>;
LiveInVector LiveIns;
/// Alignment of the basic block. Zero if the basic block does not need to be
/// aligned. The alignment is specified as log2(bytes).
unsigned Alignment = 0;
/// Indicate that this basic block is entered via an exception handler.
bool IsEHPad = false;
/// Indicate that this basic block is potentially the target of an indirect
/// branch.
bool AddressTaken = false;
/// Indicate that this basic block is the entry block of an EH scope, i.e.,
/// the block that used to have a catchpad or cleanuppad instruction in the
/// LLVM IR.
bool IsEHScopeEntry = false;
/// Indicate that this basic block is the entry block of an EH funclet.
bool IsEHFuncletEntry = false;
/// Indicate that this basic block is the entry block of a cleanup funclet.
bool IsCleanupFuncletEntry = false;
/// since getSymbol is a relatively heavy-weight operation, the symbol
/// is only computed once and is cached.
mutable MCSymbol *CachedMCSymbol = nullptr;
// Intrusive list support
MachineBasicBlock() = default;
explicit MachineBasicBlock(MachineFunction &MF, const BasicBlock *BB);
~MachineBasicBlock();
// MachineBasicBlocks are allocated and owned by MachineFunction.
friend class MachineFunction;
public:
/// Return the LLVM basic block that this instance corresponded to originally.
/// Note that this may be NULL if this instance does not correspond directly
/// to an LLVM basic block.
const BasicBlock *getBasicBlock() const { return BB; }
/// Return the name of the corresponding LLVM basic block, or an empty string.
StringRef getName() const;
/// Return a formatted string to identify this block and its parent function.
std::string getFullName() const;
/// Test whether this block is potentially the target of an indirect branch.
bool hasAddressTaken() const { return AddressTaken; }
/// Set this block to reflect that it potentially is the target of an indirect
/// branch.
void setHasAddressTaken() { AddressTaken = true; }
/// Return the MachineFunction containing this basic block.
const MachineFunction *getParent() const { return xParent; }
MachineFunction *getParent() { return xParent; }
using instr_iterator = Instructions::iterator;
using const_instr_iterator = Instructions::const_iterator;
using reverse_instr_iterator = Instructions::reverse_iterator;
using const_reverse_instr_iterator = Instructions::const_reverse_iterator;
using iterator = MachineInstrBundleIterator<MachineInstr>;
using const_iterator = MachineInstrBundleIterator<const MachineInstr>;
using reverse_iterator = MachineInstrBundleIterator<MachineInstr, true>;
using const_reverse_iterator =
MachineInstrBundleIterator<const MachineInstr, true>;
unsigned size() const { return (unsigned)Insts.size(); }
bool empty() const { return Insts.empty(); }
MachineInstr &instr_front() { return Insts.front(); }
MachineInstr &instr_back() { return Insts.back(); }
const MachineInstr &instr_front() const { return Insts.front(); }
const MachineInstr &instr_back() const { return Insts.back(); }
MachineInstr &front() { return Insts.front(); }
MachineInstr &back() { return *--end(); }
const MachineInstr &front() const { return Insts.front(); }
const MachineInstr &back() const { return *--end(); }
instr_iterator instr_begin() { return Insts.begin(); }
const_instr_iterator instr_begin() const { return Insts.begin(); }
instr_iterator instr_end() { return Insts.end(); }
const_instr_iterator instr_end() const { return Insts.end(); }
reverse_instr_iterator instr_rbegin() { return Insts.rbegin(); }
const_reverse_instr_iterator instr_rbegin() const { return Insts.rbegin(); }
reverse_instr_iterator instr_rend () { return Insts.rend(); }
const_reverse_instr_iterator instr_rend () const { return Insts.rend(); }
using instr_range = iterator_range<instr_iterator>;
using const_instr_range = iterator_range<const_instr_iterator>;
instr_range instrs() { return instr_range(instr_begin(), instr_end()); }
const_instr_range instrs() const {
return const_instr_range(instr_begin(), instr_end());
}
iterator begin() { return instr_begin(); }
const_iterator begin() const { return instr_begin(); }
iterator end () { return instr_end(); }
const_iterator end () const { return instr_end(); }
CodeGen: Give MachineBasicBlock::reverse_iterator a handle to the current MI Now that MachineBasicBlock::reverse_instr_iterator knows when it's at the end (since r281168 and r281170), implement MachineBasicBlock::reverse_iterator directly on top of an ilist::reverse_iterator by adding an IsReverse template parameter to MachineInstrBundleIterator. This replaces another hard-to-reason-about use of std::reverse_iterator on list iterators, matching the changes for ilist::reverse_iterator from r280032 (see the "out of scope" section at the end of that commit message). MachineBasicBlock::reverse_iterator now has a handle to the current node and has obvious invalidation semantics. r280032 has a more detailed explanation of how list-style reverse iterators (invalidated when the pointed-at node is deleted) are different from vector-style reverse iterators like std::reverse_iterator (invalidated on every operation). A great motivating example is this commit's changes to lib/CodeGen/DeadMachineInstructionElim.cpp. Note: If your out-of-tree backend deletes instructions while iterating on a MachineBasicBlock::reverse_iterator or converts between MachineBasicBlock::iterator and MachineBasicBlock::reverse_iterator, you'll need to update your code in similar ways to r280032. The following table might help: [Old] ==> [New] delete &*RI, RE = end() delete &*RI++ RI->erase(), RE = end() RI++->erase() reverse_iterator(I) std::prev(I).getReverse() reverse_iterator(I) ++I.getReverse() --reverse_iterator(I) I.getReverse() reverse_iterator(std::next(I)) I.getReverse() RI.base() std::prev(RI).getReverse() RI.base() ++RI.getReverse() --RI.base() RI.getReverse() std::next(RI).base() RI.getReverse() (For more details, have a look at r280032.) llvm-svn: 281172
2016-09-11 20:51:28 +02:00
reverse_iterator rbegin() {
return reverse_iterator::getAtBundleBegin(instr_rbegin());
}
ADT: Give ilist<T>::reverse_iterator a handle to the current node Reverse iterators to doubly-linked lists can be simpler (and cheaper) than std::reverse_iterator. Make it so. In particular, change ilist<T>::reverse_iterator so that it is *never* invalidated unless the node it references is deleted. This matches the guarantees of ilist<T>::iterator. (Note: MachineBasicBlock::iterator is *not* an ilist iterator, but a MachineInstrBundleIterator<MachineInstr>. This commit does not change MachineBasicBlock::reverse_iterator, but it does update MachineBasicBlock::reverse_instr_iterator. See note at end of commit message for details on bundle iterators.) Given the list (with the Sentinel showing twice for simplicity): [Sentinel] <-> A <-> B <-> [Sentinel] the following is now true: 1. begin() represents A. 2. begin() holds the pointer for A. 3. end() represents [Sentinel]. 4. end() holds the poitner for [Sentinel]. 5. rbegin() represents B. 6. rbegin() holds the pointer for B. 7. rend() represents [Sentinel]. 8. rend() holds the pointer for [Sentinel]. The changes are #6 and #8. Here are some properties from the old scheme (which used std::reverse_iterator): - rbegin() held the pointer for [Sentinel] and rend() held the pointer for A; - operator*() cost two dereferences instead of one; - converting from a valid iterator to its valid reverse_iterator involved a confusing increment; and - "RI++->erase()" left RI invalid. The unintuitive replacement was "RI->erase(), RE = end()". With vector-like data structures these properties are hard to avoid (since past-the-beginning is not a valid pointer), and don't impose a real cost (since there's still only one dereference, and all iterators are invalidated on erase). But with lists, this was a poor design. Specifically, the following code (which obviously works with normal iterators) now works with ilist::reverse_iterator as well: for (auto RI = L.rbegin(), RE = L.rend(); RI != RE;) fooThatMightRemoveArgFromList(*RI++); Converting between iterator and reverse_iterator for the same node uses the getReverse() function. reverse_iterator iterator::getReverse(); iterator reverse_iterator::getReverse(); Why doesn't iterator <=> reverse_iterator conversion use constructors? In order to catch and update old code, reverse_iterator does not even have an explicit conversion from iterator. It wouldn't be safe because there would be no reasonable way to catch all the bugs from the changed semantic (see the changes at call sites that are part of this patch). Old code used this API: std::reverse_iterator::reverse_iterator(iterator); iterator std::reverse_iterator::base(); Here's how to update from old code to new (that incorporates the semantic change), assuming I is an ilist<>::iterator and RI is an ilist<>::reverse_iterator: [Old] ==> [New] reverse_iterator(I) (--I).getReverse() reverse_iterator(I) ++I.getReverse() --reverse_iterator(I) I.getReverse() reverse_iterator(++I) I.getReverse() RI.base() (--RI).getReverse() RI.base() ++RI.getReverse() --RI.base() RI.getReverse() (++RI).base() RI.getReverse() delete &*RI, RE = end() delete &*RI++ RI->erase(), RE = end() RI++->erase() ======================================= Note: bundle iterators are out of scope ======================================= MachineBasicBlock::iterator, also known as MachineInstrBundleIterator<MachineInstr>, is a wrapper to represent MachineInstr bundles. The idea is that each operator++ takes you to the beginning of the next bundle. Implementing a sane reverse iterator for this is harder than ilist. Here are the options: - Use std::reverse_iterator<MBB::i>. Store a handle to the beginning of the next bundle. A call to operator*() runs a loop (usually operator--() will be called 1 time, for unbundled instructions). Increment/decrement just works. This is the status quo. - Store a handle to the final node in the bundle. A call to operator*() still runs a loop, but it iterates one time fewer (usually operator--() will be called 0 times, for unbundled instructions). Increment/decrement just works. - Make the ilist_sentinel<MachineInstr> *always* store that it's the sentinel (instead of just in asserts mode). Then the bundle iterator can sniff the sentinel bit in operator++(). I initially tried implementing the end() option as part of this commit, but updating iterator/reverse_iterator conversion call sites was error-prone. I have a WIP series of patches that implements the final option. llvm-svn: 280032
2016-08-30 02:13:12 +02:00
const_reverse_iterator rbegin() const {
CodeGen: Give MachineBasicBlock::reverse_iterator a handle to the current MI Now that MachineBasicBlock::reverse_instr_iterator knows when it's at the end (since r281168 and r281170), implement MachineBasicBlock::reverse_iterator directly on top of an ilist::reverse_iterator by adding an IsReverse template parameter to MachineInstrBundleIterator. This replaces another hard-to-reason-about use of std::reverse_iterator on list iterators, matching the changes for ilist::reverse_iterator from r280032 (see the "out of scope" section at the end of that commit message). MachineBasicBlock::reverse_iterator now has a handle to the current node and has obvious invalidation semantics. r280032 has a more detailed explanation of how list-style reverse iterators (invalidated when the pointed-at node is deleted) are different from vector-style reverse iterators like std::reverse_iterator (invalidated on every operation). A great motivating example is this commit's changes to lib/CodeGen/DeadMachineInstructionElim.cpp. Note: If your out-of-tree backend deletes instructions while iterating on a MachineBasicBlock::reverse_iterator or converts between MachineBasicBlock::iterator and MachineBasicBlock::reverse_iterator, you'll need to update your code in similar ways to r280032. The following table might help: [Old] ==> [New] delete &*RI, RE = end() delete &*RI++ RI->erase(), RE = end() RI++->erase() reverse_iterator(I) std::prev(I).getReverse() reverse_iterator(I) ++I.getReverse() --reverse_iterator(I) I.getReverse() reverse_iterator(std::next(I)) I.getReverse() RI.base() std::prev(RI).getReverse() RI.base() ++RI.getReverse() --RI.base() RI.getReverse() std::next(RI).base() RI.getReverse() (For more details, have a look at r280032.) llvm-svn: 281172
2016-09-11 20:51:28 +02:00
return const_reverse_iterator::getAtBundleBegin(instr_rbegin());
ADT: Give ilist<T>::reverse_iterator a handle to the current node Reverse iterators to doubly-linked lists can be simpler (and cheaper) than std::reverse_iterator. Make it so. In particular, change ilist<T>::reverse_iterator so that it is *never* invalidated unless the node it references is deleted. This matches the guarantees of ilist<T>::iterator. (Note: MachineBasicBlock::iterator is *not* an ilist iterator, but a MachineInstrBundleIterator<MachineInstr>. This commit does not change MachineBasicBlock::reverse_iterator, but it does update MachineBasicBlock::reverse_instr_iterator. See note at end of commit message for details on bundle iterators.) Given the list (with the Sentinel showing twice for simplicity): [Sentinel] <-> A <-> B <-> [Sentinel] the following is now true: 1. begin() represents A. 2. begin() holds the pointer for A. 3. end() represents [Sentinel]. 4. end() holds the poitner for [Sentinel]. 5. rbegin() represents B. 6. rbegin() holds the pointer for B. 7. rend() represents [Sentinel]. 8. rend() holds the pointer for [Sentinel]. The changes are #6 and #8. Here are some properties from the old scheme (which used std::reverse_iterator): - rbegin() held the pointer for [Sentinel] and rend() held the pointer for A; - operator*() cost two dereferences instead of one; - converting from a valid iterator to its valid reverse_iterator involved a confusing increment; and - "RI++->erase()" left RI invalid. The unintuitive replacement was "RI->erase(), RE = end()". With vector-like data structures these properties are hard to avoid (since past-the-beginning is not a valid pointer), and don't impose a real cost (since there's still only one dereference, and all iterators are invalidated on erase). But with lists, this was a poor design. Specifically, the following code (which obviously works with normal iterators) now works with ilist::reverse_iterator as well: for (auto RI = L.rbegin(), RE = L.rend(); RI != RE;) fooThatMightRemoveArgFromList(*RI++); Converting between iterator and reverse_iterator for the same node uses the getReverse() function. reverse_iterator iterator::getReverse(); iterator reverse_iterator::getReverse(); Why doesn't iterator <=> reverse_iterator conversion use constructors? In order to catch and update old code, reverse_iterator does not even have an explicit conversion from iterator. It wouldn't be safe because there would be no reasonable way to catch all the bugs from the changed semantic (see the changes at call sites that are part of this patch). Old code used this API: std::reverse_iterator::reverse_iterator(iterator); iterator std::reverse_iterator::base(); Here's how to update from old code to new (that incorporates the semantic change), assuming I is an ilist<>::iterator and RI is an ilist<>::reverse_iterator: [Old] ==> [New] reverse_iterator(I) (--I).getReverse() reverse_iterator(I) ++I.getReverse() --reverse_iterator(I) I.getReverse() reverse_iterator(++I) I.getReverse() RI.base() (--RI).getReverse() RI.base() ++RI.getReverse() --RI.base() RI.getReverse() (++RI).base() RI.getReverse() delete &*RI, RE = end() delete &*RI++ RI->erase(), RE = end() RI++->erase() ======================================= Note: bundle iterators are out of scope ======================================= MachineBasicBlock::iterator, also known as MachineInstrBundleIterator<MachineInstr>, is a wrapper to represent MachineInstr bundles. The idea is that each operator++ takes you to the beginning of the next bundle. Implementing a sane reverse iterator for this is harder than ilist. Here are the options: - Use std::reverse_iterator<MBB::i>. Store a handle to the beginning of the next bundle. A call to operator*() runs a loop (usually operator--() will be called 1 time, for unbundled instructions). Increment/decrement just works. This is the status quo. - Store a handle to the final node in the bundle. A call to operator*() still runs a loop, but it iterates one time fewer (usually operator--() will be called 0 times, for unbundled instructions). Increment/decrement just works. - Make the ilist_sentinel<MachineInstr> *always* store that it's the sentinel (instead of just in asserts mode). Then the bundle iterator can sniff the sentinel bit in operator++(). I initially tried implementing the end() option as part of this commit, but updating iterator/reverse_iterator conversion call sites was error-prone. I have a WIP series of patches that implements the final option. llvm-svn: 280032
2016-08-30 02:13:12 +02:00
}
CodeGen: Give MachineBasicBlock::reverse_iterator a handle to the current MI Now that MachineBasicBlock::reverse_instr_iterator knows when it's at the end (since r281168 and r281170), implement MachineBasicBlock::reverse_iterator directly on top of an ilist::reverse_iterator by adding an IsReverse template parameter to MachineInstrBundleIterator. This replaces another hard-to-reason-about use of std::reverse_iterator on list iterators, matching the changes for ilist::reverse_iterator from r280032 (see the "out of scope" section at the end of that commit message). MachineBasicBlock::reverse_iterator now has a handle to the current node and has obvious invalidation semantics. r280032 has a more detailed explanation of how list-style reverse iterators (invalidated when the pointed-at node is deleted) are different from vector-style reverse iterators like std::reverse_iterator (invalidated on every operation). A great motivating example is this commit's changes to lib/CodeGen/DeadMachineInstructionElim.cpp. Note: If your out-of-tree backend deletes instructions while iterating on a MachineBasicBlock::reverse_iterator or converts between MachineBasicBlock::iterator and MachineBasicBlock::reverse_iterator, you'll need to update your code in similar ways to r280032. The following table might help: [Old] ==> [New] delete &*RI, RE = end() delete &*RI++ RI->erase(), RE = end() RI++->erase() reverse_iterator(I) std::prev(I).getReverse() reverse_iterator(I) ++I.getReverse() --reverse_iterator(I) I.getReverse() reverse_iterator(std::next(I)) I.getReverse() RI.base() std::prev(RI).getReverse() RI.base() ++RI.getReverse() --RI.base() RI.getReverse() std::next(RI).base() RI.getReverse() (For more details, have a look at r280032.) llvm-svn: 281172
2016-09-11 20:51:28 +02:00
reverse_iterator rend() { return reverse_iterator(instr_rend()); }
ADT: Give ilist<T>::reverse_iterator a handle to the current node Reverse iterators to doubly-linked lists can be simpler (and cheaper) than std::reverse_iterator. Make it so. In particular, change ilist<T>::reverse_iterator so that it is *never* invalidated unless the node it references is deleted. This matches the guarantees of ilist<T>::iterator. (Note: MachineBasicBlock::iterator is *not* an ilist iterator, but a MachineInstrBundleIterator<MachineInstr>. This commit does not change MachineBasicBlock::reverse_iterator, but it does update MachineBasicBlock::reverse_instr_iterator. See note at end of commit message for details on bundle iterators.) Given the list (with the Sentinel showing twice for simplicity): [Sentinel] <-> A <-> B <-> [Sentinel] the following is now true: 1. begin() represents A. 2. begin() holds the pointer for A. 3. end() represents [Sentinel]. 4. end() holds the poitner for [Sentinel]. 5. rbegin() represents B. 6. rbegin() holds the pointer for B. 7. rend() represents [Sentinel]. 8. rend() holds the pointer for [Sentinel]. The changes are #6 and #8. Here are some properties from the old scheme (which used std::reverse_iterator): - rbegin() held the pointer for [Sentinel] and rend() held the pointer for A; - operator*() cost two dereferences instead of one; - converting from a valid iterator to its valid reverse_iterator involved a confusing increment; and - "RI++->erase()" left RI invalid. The unintuitive replacement was "RI->erase(), RE = end()". With vector-like data structures these properties are hard to avoid (since past-the-beginning is not a valid pointer), and don't impose a real cost (since there's still only one dereference, and all iterators are invalidated on erase). But with lists, this was a poor design. Specifically, the following code (which obviously works with normal iterators) now works with ilist::reverse_iterator as well: for (auto RI = L.rbegin(), RE = L.rend(); RI != RE;) fooThatMightRemoveArgFromList(*RI++); Converting between iterator and reverse_iterator for the same node uses the getReverse() function. reverse_iterator iterator::getReverse(); iterator reverse_iterator::getReverse(); Why doesn't iterator <=> reverse_iterator conversion use constructors? In order to catch and update old code, reverse_iterator does not even have an explicit conversion from iterator. It wouldn't be safe because there would be no reasonable way to catch all the bugs from the changed semantic (see the changes at call sites that are part of this patch). Old code used this API: std::reverse_iterator::reverse_iterator(iterator); iterator std::reverse_iterator::base(); Here's how to update from old code to new (that incorporates the semantic change), assuming I is an ilist<>::iterator and RI is an ilist<>::reverse_iterator: [Old] ==> [New] reverse_iterator(I) (--I).getReverse() reverse_iterator(I) ++I.getReverse() --reverse_iterator(I) I.getReverse() reverse_iterator(++I) I.getReverse() RI.base() (--RI).getReverse() RI.base() ++RI.getReverse() --RI.base() RI.getReverse() (++RI).base() RI.getReverse() delete &*RI, RE = end() delete &*RI++ RI->erase(), RE = end() RI++->erase() ======================================= Note: bundle iterators are out of scope ======================================= MachineBasicBlock::iterator, also known as MachineInstrBundleIterator<MachineInstr>, is a wrapper to represent MachineInstr bundles. The idea is that each operator++ takes you to the beginning of the next bundle. Implementing a sane reverse iterator for this is harder than ilist. Here are the options: - Use std::reverse_iterator<MBB::i>. Store a handle to the beginning of the next bundle. A call to operator*() runs a loop (usually operator--() will be called 1 time, for unbundled instructions). Increment/decrement just works. This is the status quo. - Store a handle to the final node in the bundle. A call to operator*() still runs a loop, but it iterates one time fewer (usually operator--() will be called 0 times, for unbundled instructions). Increment/decrement just works. - Make the ilist_sentinel<MachineInstr> *always* store that it's the sentinel (instead of just in asserts mode). Then the bundle iterator can sniff the sentinel bit in operator++(). I initially tried implementing the end() option as part of this commit, but updating iterator/reverse_iterator conversion call sites was error-prone. I have a WIP series of patches that implements the final option. llvm-svn: 280032
2016-08-30 02:13:12 +02:00
const_reverse_iterator rend() const {
CodeGen: Give MachineBasicBlock::reverse_iterator a handle to the current MI Now that MachineBasicBlock::reverse_instr_iterator knows when it's at the end (since r281168 and r281170), implement MachineBasicBlock::reverse_iterator directly on top of an ilist::reverse_iterator by adding an IsReverse template parameter to MachineInstrBundleIterator. This replaces another hard-to-reason-about use of std::reverse_iterator on list iterators, matching the changes for ilist::reverse_iterator from r280032 (see the "out of scope" section at the end of that commit message). MachineBasicBlock::reverse_iterator now has a handle to the current node and has obvious invalidation semantics. r280032 has a more detailed explanation of how list-style reverse iterators (invalidated when the pointed-at node is deleted) are different from vector-style reverse iterators like std::reverse_iterator (invalidated on every operation). A great motivating example is this commit's changes to lib/CodeGen/DeadMachineInstructionElim.cpp. Note: If your out-of-tree backend deletes instructions while iterating on a MachineBasicBlock::reverse_iterator or converts between MachineBasicBlock::iterator and MachineBasicBlock::reverse_iterator, you'll need to update your code in similar ways to r280032. The following table might help: [Old] ==> [New] delete &*RI, RE = end() delete &*RI++ RI->erase(), RE = end() RI++->erase() reverse_iterator(I) std::prev(I).getReverse() reverse_iterator(I) ++I.getReverse() --reverse_iterator(I) I.getReverse() reverse_iterator(std::next(I)) I.getReverse() RI.base() std::prev(RI).getReverse() RI.base() ++RI.getReverse() --RI.base() RI.getReverse() std::next(RI).base() RI.getReverse() (For more details, have a look at r280032.) llvm-svn: 281172
2016-09-11 20:51:28 +02:00
return const_reverse_iterator(instr_rend());
ADT: Give ilist<T>::reverse_iterator a handle to the current node Reverse iterators to doubly-linked lists can be simpler (and cheaper) than std::reverse_iterator. Make it so. In particular, change ilist<T>::reverse_iterator so that it is *never* invalidated unless the node it references is deleted. This matches the guarantees of ilist<T>::iterator. (Note: MachineBasicBlock::iterator is *not* an ilist iterator, but a MachineInstrBundleIterator<MachineInstr>. This commit does not change MachineBasicBlock::reverse_iterator, but it does update MachineBasicBlock::reverse_instr_iterator. See note at end of commit message for details on bundle iterators.) Given the list (with the Sentinel showing twice for simplicity): [Sentinel] <-> A <-> B <-> [Sentinel] the following is now true: 1. begin() represents A. 2. begin() holds the pointer for A. 3. end() represents [Sentinel]. 4. end() holds the poitner for [Sentinel]. 5. rbegin() represents B. 6. rbegin() holds the pointer for B. 7. rend() represents [Sentinel]. 8. rend() holds the pointer for [Sentinel]. The changes are #6 and #8. Here are some properties from the old scheme (which used std::reverse_iterator): - rbegin() held the pointer for [Sentinel] and rend() held the pointer for A; - operator*() cost two dereferences instead of one; - converting from a valid iterator to its valid reverse_iterator involved a confusing increment; and - "RI++->erase()" left RI invalid. The unintuitive replacement was "RI->erase(), RE = end()". With vector-like data structures these properties are hard to avoid (since past-the-beginning is not a valid pointer), and don't impose a real cost (since there's still only one dereference, and all iterators are invalidated on erase). But with lists, this was a poor design. Specifically, the following code (which obviously works with normal iterators) now works with ilist::reverse_iterator as well: for (auto RI = L.rbegin(), RE = L.rend(); RI != RE;) fooThatMightRemoveArgFromList(*RI++); Converting between iterator and reverse_iterator for the same node uses the getReverse() function. reverse_iterator iterator::getReverse(); iterator reverse_iterator::getReverse(); Why doesn't iterator <=> reverse_iterator conversion use constructors? In order to catch and update old code, reverse_iterator does not even have an explicit conversion from iterator. It wouldn't be safe because there would be no reasonable way to catch all the bugs from the changed semantic (see the changes at call sites that are part of this patch). Old code used this API: std::reverse_iterator::reverse_iterator(iterator); iterator std::reverse_iterator::base(); Here's how to update from old code to new (that incorporates the semantic change), assuming I is an ilist<>::iterator and RI is an ilist<>::reverse_iterator: [Old] ==> [New] reverse_iterator(I) (--I).getReverse() reverse_iterator(I) ++I.getReverse() --reverse_iterator(I) I.getReverse() reverse_iterator(++I) I.getReverse() RI.base() (--RI).getReverse() RI.base() ++RI.getReverse() --RI.base() RI.getReverse() (++RI).base() RI.getReverse() delete &*RI, RE = end() delete &*RI++ RI->erase(), RE = end() RI++->erase() ======================================= Note: bundle iterators are out of scope ======================================= MachineBasicBlock::iterator, also known as MachineInstrBundleIterator<MachineInstr>, is a wrapper to represent MachineInstr bundles. The idea is that each operator++ takes you to the beginning of the next bundle. Implementing a sane reverse iterator for this is harder than ilist. Here are the options: - Use std::reverse_iterator<MBB::i>. Store a handle to the beginning of the next bundle. A call to operator*() runs a loop (usually operator--() will be called 1 time, for unbundled instructions). Increment/decrement just works. This is the status quo. - Store a handle to the final node in the bundle. A call to operator*() still runs a loop, but it iterates one time fewer (usually operator--() will be called 0 times, for unbundled instructions). Increment/decrement just works. - Make the ilist_sentinel<MachineInstr> *always* store that it's the sentinel (instead of just in asserts mode). Then the bundle iterator can sniff the sentinel bit in operator++(). I initially tried implementing the end() option as part of this commit, but updating iterator/reverse_iterator conversion call sites was error-prone. I have a WIP series of patches that implements the final option. llvm-svn: 280032
2016-08-30 02:13:12 +02:00
}
ADT: Avoid relying on UB in ilist_node::getNextNode() Re-implement `ilist_node::getNextNode()` and `getPrevNode()` without relying on the sentinel having a "next" pointer. Instead, get access to the owning list and compare against the `begin()` and `end()` iterators. This only works when the node *can* get access to the owning list. The new support is in `ilist_node_with_parent<>`, and any class `Ty` inheriting from `ilist_node<NodeTy>` that wants `getNextNode()` and/or `getPrevNode()` should inherit from `ilist_node_with_parent<NodeTy, ParentTy>` instead. The requirements: - `NodeTy` must have a `getParent()` function that returns the parent. - `ParentTy` must have a `getSublistAccess()` static that, given a(n ignored) `NodeTy*` (to determine which list), returns a member field pointer to the appropriate `ilist<>`. This isn't the cleanest way to get access to the owning list, but it leverages the API already used in the IR hierarchy (see, e.g., `Instruction::getSublistAccess()`). If anyone feels like ripping out the calls to `getNextNode()` and `getPrevNode()` and replacing with direct iterator logic, they can also remove the access function, etc., but as an incremental step, I'm maintaining the API where it's currently used in tree. If these requirements are *not* met, call sites with access to the ilist can call `iplist<NodeTy>::getNextNode(NodeTy*)` directly, as in ilistTest.cpp. Why rewrite this? The old code was broken, calling `getNext()` on a sentinel that possibly didn't have a "next" pointer at all! The new code avoids that particular flavour of UB (see the commit message for r252538 for more details about the "lucky" memory layout that made this function so interesting). There's still some UB here: the end iterator gets downcast to `NodeTy*`, even when it's a sentinel (which is typically `ilist_half_node<NodeTy*>`). I'll tackle that in follow-up commits. See this llvm-dev thread for more details: http://lists.llvm.org/pipermail/llvm-dev/2015-October/091115.html What's the danger? There might be some code that relies on `getNextNode()` or `getPrevNode()` *never* returning `nullptr` -- i.e., that relies on them being broken when the sentinel is an `ilist_half_node<NodeTy>`. I tried to root out those cases with the audits I did leading up to r252380, but it's possible I missed one or two. I hope not. (If (1) you have out-of-tree code, (2) you've reverted r252380 temporarily, and (3) you get some weird crashes with this commit, then I recommend un-reverting r252380 and auditing the compile errors looking for "strange" implicit conversions.) llvm-svn: 252694
2015-11-11 03:26:42 +01:00
/// Support for MachineInstr::getNextNode().
static Instructions MachineBasicBlock::*getSublistAccess(MachineInstr *) {
return &MachineBasicBlock::Insts;
}
inline iterator_range<iterator> terminators() {
return make_range(getFirstTerminator(), end());
}
inline iterator_range<const_iterator> terminators() const {
return make_range(getFirstTerminator(), end());
}
/// Returns a range that iterates over the phis in the basic block.
inline iterator_range<iterator> phis() {
return make_range(begin(), getFirstNonPHI());
}
inline iterator_range<const_iterator> phis() const {
return const_cast<MachineBasicBlock *>(this)->phis();
}
// Machine-CFG iterators
using pred_iterator = std::vector<MachineBasicBlock *>::iterator;
using const_pred_iterator = std::vector<MachineBasicBlock *>::const_iterator;
using succ_iterator = std::vector<MachineBasicBlock *>::iterator;
using const_succ_iterator = std::vector<MachineBasicBlock *>::const_iterator;
using pred_reverse_iterator =
std::vector<MachineBasicBlock *>::reverse_iterator;
using const_pred_reverse_iterator =
std::vector<MachineBasicBlock *>::const_reverse_iterator;
using succ_reverse_iterator =
std::vector<MachineBasicBlock *>::reverse_iterator;
using const_succ_reverse_iterator =
std::vector<MachineBasicBlock *>::const_reverse_iterator;
pred_iterator pred_begin() { return Predecessors.begin(); }
const_pred_iterator pred_begin() const { return Predecessors.begin(); }
pred_iterator pred_end() { return Predecessors.end(); }
const_pred_iterator pred_end() const { return Predecessors.end(); }
pred_reverse_iterator pred_rbegin()
{ return Predecessors.rbegin();}
const_pred_reverse_iterator pred_rbegin() const
{ return Predecessors.rbegin();}
pred_reverse_iterator pred_rend()
{ return Predecessors.rend(); }
const_pred_reverse_iterator pred_rend() const
{ return Predecessors.rend(); }
unsigned pred_size() const {
return (unsigned)Predecessors.size();
}
bool pred_empty() const { return Predecessors.empty(); }
succ_iterator succ_begin() { return Successors.begin(); }
const_succ_iterator succ_begin() const { return Successors.begin(); }
succ_iterator succ_end() { return Successors.end(); }
const_succ_iterator succ_end() const { return Successors.end(); }
succ_reverse_iterator succ_rbegin()
{ return Successors.rbegin(); }
const_succ_reverse_iterator succ_rbegin() const
{ return Successors.rbegin(); }
succ_reverse_iterator succ_rend()
{ return Successors.rend(); }
const_succ_reverse_iterator succ_rend() const
{ return Successors.rend(); }
unsigned succ_size() const {
return (unsigned)Successors.size();
}
bool succ_empty() const { return Successors.empty(); }
inline iterator_range<pred_iterator> predecessors() {
return make_range(pred_begin(), pred_end());
}
2014-04-04 19:36:55 +02:00
inline iterator_range<const_pred_iterator> predecessors() const {
return make_range(pred_begin(), pred_end());
}
2014-04-04 04:14:38 +02:00
inline iterator_range<succ_iterator> successors() {
return make_range(succ_begin(), succ_end());
}
2014-04-04 19:36:55 +02:00
inline iterator_range<const_succ_iterator> successors() const {
return make_range(succ_begin(), succ_end());
}
// LiveIn management methods.
/// Adds the specified register as a live in. Note that it is an error to add
/// the same register to the same set more than once unless the intention is
/// to call sortUniqueLiveIns after all registers are added.
void addLiveIn(MCPhysReg PhysReg,
LaneBitmask LaneMask = LaneBitmask::getAll()) {
LiveIns.push_back(RegisterMaskPair(PhysReg, LaneMask));
}
void addLiveIn(const RegisterMaskPair &RegMaskPair) {
LiveIns.push_back(RegMaskPair);
}
/// Sorts and uniques the LiveIns vector. It can be significantly faster to do
/// this than repeatedly calling isLiveIn before calling addLiveIn for every
/// LiveIn insertion.
void sortUniqueLiveIns();
/// Clear live in list.
void clearLiveIns();
/// Add PhysReg as live in to this block, and ensure that there is a copy of
/// PhysReg to a virtual register of class RC. Return the virtual register
/// that is a copy of the live in PhysReg.
unsigned addLiveIn(MCPhysReg PhysReg, const TargetRegisterClass *RC);
/// Remove the specified register from the live in set.
void removeLiveIn(MCPhysReg Reg,
LaneBitmask LaneMask = LaneBitmask::getAll());
/// Return true if the specified register is in the live in set.
bool isLiveIn(MCPhysReg Reg,
LaneBitmask LaneMask = LaneBitmask::getAll()) const;
// Iteration support for live in sets. These sets are kept in sorted
// order by their register number.
using livein_iterator = LiveInVector::const_iterator;
#ifndef NDEBUG
/// Unlike livein_begin, this method does not check that the liveness
/// information is accurate. Still for debug purposes it may be useful
/// to have iterators that won't assert if the liveness information
/// is not current.
livein_iterator livein_begin_dbg() const { return LiveIns.begin(); }
iterator_range<livein_iterator> liveins_dbg() const {
return make_range(livein_begin_dbg(), livein_end());
}
#endif
livein_iterator livein_begin() const;
livein_iterator livein_end() const { return LiveIns.end(); }
bool livein_empty() const { return LiveIns.empty(); }
iterator_range<livein_iterator> liveins() const {
return make_range(livein_begin(), livein_end());
}
/// Remove entry from the livein set and return iterator to the next.
livein_iterator removeLiveIn(livein_iterator I);
/// Get the clobber mask for the start of this basic block. Funclets use this
/// to prevent register allocation across funclet transitions.
const uint32_t *getBeginClobberMask(const TargetRegisterInfo *TRI) const;
/// Get the clobber mask for the end of the basic block.
/// \see getBeginClobberMask()
const uint32_t *getEndClobberMask(const TargetRegisterInfo *TRI) const;
/// Return alignment of the basic block. The alignment is specified as
/// log2(bytes).
unsigned getAlignment() const { return Alignment; }
/// Set alignment of the basic block. The alignment is specified as
/// log2(bytes).
void setAlignment(unsigned Align) { Alignment = Align; }
/// Returns true if the block is a landing pad. That is this basic block is
/// entered via an exception handler.
bool isEHPad() const { return IsEHPad; }
/// Indicates the block is a landing pad. That is this basic block is entered
/// via an exception handler.
void setIsEHPad(bool V = true) { IsEHPad = V; }
bool hasEHPadSuccessor() const;
/// Returns true if this is the entry block of an EH scope, i.e., the block
/// that used to have a catchpad or cleanuppad instruction in the LLVM IR.
bool isEHScopeEntry() const { return IsEHScopeEntry; }
/// Indicates if this is the entry block of an EH scope, i.e., the block that
/// that used to have a catchpad or cleanuppad instruction in the LLVM IR.
void setIsEHScopeEntry(bool V = true) { IsEHScopeEntry = V; }
/// Returns true if this is the entry block of an EH funclet.
bool isEHFuncletEntry() const { return IsEHFuncletEntry; }
/// Indicates if this is the entry block of an EH funclet.
void setIsEHFuncletEntry(bool V = true) { IsEHFuncletEntry = V; }
/// Returns true if this is the entry block of a cleanup funclet.
bool isCleanupFuncletEntry() const { return IsCleanupFuncletEntry; }
/// Indicates if this is the entry block of a cleanup funclet.
void setIsCleanupFuncletEntry(bool V = true) { IsCleanupFuncletEntry = V; }
/// Returns true if it is legal to hoist instructions into this block.
bool isLegalToHoistInto() const;
2006-10-24 01:35:35 +02:00
// Code Layout methods.
/// Move 'this' block before or after the specified block. This only moves
/// the block, it does not modify the CFG or adjust potential fall-throughs at
/// the end of the block.
2006-10-24 01:35:35 +02:00
void moveBefore(MachineBasicBlock *NewAfter);
void moveAfter(MachineBasicBlock *NewBefore);
/// Update the terminator instructions in block to account for changes to the
/// layout. If the block previously used a fallthrough, it may now need a
/// branch, and if it previously used branching it may now be able to use a
/// fallthrough.
void updateTerminator();
// Machine-CFG mutators
/// Add Succ as a successor of this MachineBasicBlock. The Predecessors list
/// of Succ is automatically updated. PROB parameter is stored in
2015-12-01 06:29:22 +01:00
/// Probabilities list. The default probability is set as unknown. Mixing
/// known and unknown probabilities in successor list is not allowed. When all
/// successors have unknown probabilities, 1 / N is returned as the
/// probability for each successor, where N is the number of successors.
///
/// Note that duplicate Machine CFG edges are not allowed.
2015-12-01 06:29:22 +01:00
void addSuccessor(MachineBasicBlock *Succ,
BranchProbability Prob = BranchProbability::getUnknown());
/// Add Succ as a successor of this MachineBasicBlock. The Predecessors list
/// of Succ is automatically updated. The probability is not provided because
/// BPI is not available (e.g. -O0 is used), in which case edge probabilities
/// won't be used. Using this interface can save some space.
void addSuccessorWithoutProb(MachineBasicBlock *Succ);
/// Set successor probability of a given iterator.
void setSuccProbability(succ_iterator I, BranchProbability Prob);
/// Normalize probabilities of all successors so that the sum of them becomes
/// one. This is usually done when the current update on this MBB is done, and
/// the sum of its successors' probabilities is not guaranteed to be one. The
/// user is responsible for the correct use of this function.
/// MBB::removeSuccessor() has an option to do this automatically.
void normalizeSuccProbs() {
BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
}
/// Validate successors' probabilities and check if the sum of them is
/// approximate one. This only works in DEBUG mode.
void validateSuccProbs() const;
/// Remove successor from the successors list of this MachineBasicBlock. The
/// Predecessors list of Succ is automatically updated.
/// If NormalizeSuccProbs is true, then normalize successors' probabilities
/// after the successor is removed.
void removeSuccessor(MachineBasicBlock *Succ,
bool NormalizeSuccProbs = false);
/// Remove specified successor from the successors list of this
/// MachineBasicBlock. The Predecessors list of Succ is automatically updated.
/// If NormalizeSuccProbs is true, then normalize successors' probabilities
/// after the successor is removed.
/// Return the iterator to the element after the one removed.
succ_iterator removeSuccessor(succ_iterator I,
bool NormalizeSuccProbs = false);
2015-12-01 06:29:22 +01:00
/// Replace successor OLD with NEW and update probability info.
void replaceSuccessor(MachineBasicBlock *Old, MachineBasicBlock *New);
[x86] Introduce a pass to begin more systematically fixing PR36028 and similar issues. The key idea is to lower COPY nodes populating EFLAGS by scanning the uses of EFLAGS and introducing dedicated code to preserve the necessary state in a GPR. In the vast majority of cases, these uses are cmovCC and jCC instructions. For such cases, we can very easily save and restore the necessary information by simply inserting a setCC into a GPR where the original flags are live, and then testing that GPR directly to feed the cmov or conditional branch. However, things are a bit more tricky if arithmetic is using the flags. This patch handles the vast majority of cases that seem to come up in practice: adc, adcx, adox, rcl, and rcr; all without taking advantage of partially preserved EFLAGS as LLVM doesn't currently model that at all. There are a large number of operations that techinaclly observe EFLAGS currently but shouldn't in this case -- they typically are using DF. Currently, they will not be handled by this approach. However, I have never seen this issue come up in practice. It is already pretty rare to have these patterns come up in practical code with LLVM. I had to resort to writing MIR tests to cover most of the logic in this pass already. I suspect even with its current amount of coverage of arithmetic users of EFLAGS it will be a significant improvement over the current use of pushf/popf. It will also produce substantially faster code in most of the common patterns. This patch also removes all of the old lowering for EFLAGS copies, and the hack that forced us to use a frame pointer when EFLAGS copies were found anywhere in a function so that the dynamic stack adjustment wasn't a problem. None of this is needed as we now lower all of these copies directly in MI and without require stack adjustments. Lots of thanks to Reid who came up with several aspects of this approach, and Craig who helped me work out a couple of things tripping me up while working on this. Differential Revision: https://reviews.llvm.org/D45146 llvm-svn: 329657
2018-04-10 03:41:17 +02:00
/// Copy a successor (and any probability info) from original block to this
/// block's. Uses an iterator into the original blocks successors.
///
/// This is useful when doing a partial clone of successors. Afterward, the
/// probabilities may need to be normalized.
void copySuccessor(MachineBasicBlock *Orig, succ_iterator I);
[SLH] Introduce a new pass to do Speculative Load Hardening to mitigate Spectre variant #1 for x86. There is a lengthy, detailed RFC thread on llvm-dev which discusses the high level issues. High level discussion is probably best there. I've split the design document out of this patch and will land it separately once I update it to reflect the latest edits and updates to the Google doc used in the RFC thread. This patch is really just an initial step. It isn't quite ready for prime time and is only exposed via debugging flags. It has two major limitations currently: 1) It only supports x86-64, and only certain ABIs. Many assumptions are currently hard-coded and need to be factored out of the code here. 2) It doesn't include any options for more fine-grained control, either of which control flow edges are significant or which loads are important to be hardened. 3) The code is still quite rough and the testing lighter than I'd like. However, this is enough for people to begin using. I have had numerous requests from people to be able to experiment with this patch to understand the trade-offs it presents and how to use it. We would also like to encourage work to similar effect in other toolchains. The ARM folks are actively developing a system based on this for AArch64. We hope to merge this with their efforts when both are far enough along. But we also don't want to block making this available on that effort. Many thanks to the *numerous* people who helped along the way here. For this patch in particular, both Eric and Craig did a ton of review to even have confidence in it as an early, rough cut at this functionality. Differential Revision: https://reviews.llvm.org/D44824 llvm-svn: 336990
2018-07-13 13:13:58 +02:00
/// Split the old successor into old plus new and updates the probability
/// info.
void splitSuccessor(MachineBasicBlock *Old, MachineBasicBlock *New,
bool NormalizeSuccProbs = false);
/// Transfers all the successors from MBB to this machine basic block (i.e.,
/// copies all the successors FromMBB and remove all the successors from
/// FromMBB).
void transferSuccessors(MachineBasicBlock *FromMBB);
/// Transfers all the successors, as in transferSuccessors, and update PHI
/// operands in the successor blocks which refer to FromMBB to refer to this.
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB);
/// Return true if any of the successors have probabilities attached to them.
bool hasSuccessorProbabilities() const { return !Probs.empty(); }
/// Return true if the specified MBB is a predecessor of this block.
bool isPredecessor(const MachineBasicBlock *MBB) const;
/// Return true if the specified MBB is a successor of this block.
bool isSuccessor(const MachineBasicBlock *MBB) const;
/// Return true if the specified MBB will be emitted immediately after this
/// block, such that if this block exits by falling through, control will
/// transfer to the specified MBB. Note that MBB need not be a successor at
/// all, for example if this block ends with an unconditional branch to some
/// other block.
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const;
/// Return the fallthrough block if the block can implicitly
/// transfer control to the block after it by falling off the end of
/// it. This should return null if it can reach the block after
/// it, but it uses an explicit branch to do so (e.g., a table
/// jump). Non-null return is a conservative answer.
MachineBasicBlock *getFallThrough();
/// Return true if the block can implicitly transfer control to the
/// block after it by falling off the end of it. This should return
/// false if it can reach the block after it, but it uses an
/// explicit branch to do so (e.g., a table jump). True is a
/// conservative answer.
bool canFallThrough();
/// Returns a pointer to the first instruction in this block that is not a
/// PHINode instruction. When adding instructions to the beginning of the
/// basic block, they should be added before the returned value, not before
/// the first instruction, which might be PHI.
/// Returns end() is there's no non-PHI instruction.
iterator getFirstNonPHI();
/// Return the first instruction in MBB after I that is not a PHI or a label.
/// This is the correct point to insert lowered copies at the beginning of a
/// basic block that must be before any debugging information.
iterator SkipPHIsAndLabels(iterator I);
/// Return the first instruction in MBB after I that is not a PHI, label or
/// debug. This is the correct point to insert copies at the beginning of a
/// basic block.
iterator SkipPHIsLabelsAndDebug(iterator I);
/// Returns an iterator to the first terminator instruction of this basic
/// block. If a terminator does not exist, it returns end().
iterator getFirstTerminator();
const_iterator getFirstTerminator() const {
return const_cast<MachineBasicBlock *>(this)->getFirstTerminator();
}
/// Same getFirstTerminator but it ignores bundles and return an
/// instr_iterator instead.
instr_iterator getFirstInstrTerminator();
/// Returns an iterator to the first non-debug instruction in the basic block,
/// or end().
iterator getFirstNonDebugInstr();
const_iterator getFirstNonDebugInstr() const {
return const_cast<MachineBasicBlock *>(this)->getFirstNonDebugInstr();
}
/// Returns an iterator to the last non-debug instruction in the basic block,
/// or end().
iterator getLastNonDebugInstr();
const_iterator getLastNonDebugInstr() const {
return const_cast<MachineBasicBlock *>(this)->getLastNonDebugInstr();
}
/// Convenience function that returns true if the block ends in a return
/// instruction.
bool isReturnBlock() const {
return !empty() && back().isReturn();
}
/// Convenience function that returns true if the bock ends in a EH scope
/// return instruction.
bool isEHScopeReturnBlock() const {
return !empty() && back().isEHScopeReturn();
}
/// Split the critical edge from this block to the given successor block, and
/// return the newly created block, or null if splitting is not possible.
///
/// This function updates LiveVariables, MachineDominatorTree, and
/// MachineLoopInfo, as applicable.
MachineBasicBlock *SplitCriticalEdge(MachineBasicBlock *Succ, Pass &P);
/// Check if the edge between this block and the given successor \p
/// Succ, can be split. If this returns true a subsequent call to
/// SplitCriticalEdge is guaranteed to return a valid basic block if
/// no changes occurred in the meantime.
bool canSplitCriticalEdge(const MachineBasicBlock *Succ) const;
void pop_front() { Insts.pop_front(); }
void pop_back() { Insts.pop_back(); }
void push_back(MachineInstr *MI) { Insts.push_back(MI); }
/// Insert MI into the instruction list before I, possibly inside a bundle.
///
/// If the insertion point is inside a bundle, MI will be added to the bundle,
/// otherwise MI will not be added to any bundle. That means this function
/// alone can't be used to prepend or append instructions to bundles. See
/// MIBundleBuilder::insert() for a more reliable way of doing that.
instr_iterator insert(instr_iterator I, MachineInstr *M);
/// Insert a range of instructions into the instruction list before I.
template<typename IT>
void insert(iterator I, IT S, IT E) {
assert((I == end() || I->getParent() == this) &&
"iterator points outside of basic block");
Insts.insert(I.getInstrIterator(), S, E);
}
/// Insert MI into the instruction list before I.
iterator insert(iterator I, MachineInstr *MI) {
assert((I == end() || I->getParent() == this) &&
"iterator points outside of basic block");
assert(!MI->isBundledWithPred() && !MI->isBundledWithSucc() &&
"Cannot insert instruction with bundle flags");
return Insts.insert(I.getInstrIterator(), MI);
}
/// Insert MI into the instruction list after I.
iterator insertAfter(iterator I, MachineInstr *MI) {
assert((I == end() || I->getParent() == this) &&
"iterator points outside of basic block");
assert(!MI->isBundledWithPred() && !MI->isBundledWithSucc() &&
"Cannot insert instruction with bundle flags");
return Insts.insertAfter(I.getInstrIterator(), MI);
}
/// Remove an instruction from the instruction list and delete it.
///
/// If the instruction is part of a bundle, the other instructions in the
/// bundle will still be bundled after removing the single instruction.
instr_iterator erase(instr_iterator I);
/// Remove an instruction from the instruction list and delete it.
///
/// If the instruction is part of a bundle, the other instructions in the
/// bundle will still be bundled after removing the single instruction.
instr_iterator erase_instr(MachineInstr *I) {
return erase(instr_iterator(I));
}
/// Remove a range of instructions from the instruction list and delete them.
iterator erase(iterator I, iterator E) {
return Insts.erase(I.getInstrIterator(), E.getInstrIterator());
}
/// Remove an instruction or bundle from the instruction list and delete it.
///
/// If I points to a bundle of instructions, they are all erased.
iterator erase(iterator I) {
return erase(I, std::next(I));
}
/// Remove an instruction from the instruction list and delete it.
///
/// If I is the head of a bundle of instructions, the whole bundle will be
/// erased.
iterator erase(MachineInstr *I) {
return erase(iterator(I));
}
/// Remove the unbundled instruction from the instruction list without
/// deleting it.
///
/// This function can not be used to remove bundled instructions, use
/// remove_instr to remove individual instructions from a bundle.
MachineInstr *remove(MachineInstr *I) {
assert(!I->isBundled() && "Cannot remove bundled instructions");
return Insts.remove(instr_iterator(I));
}
/// Remove the possibly bundled instruction from the instruction list
/// without deleting it.
///
/// If the instruction is part of a bundle, the other instructions in the
/// bundle will still be bundled after removing the single instruction.
MachineInstr *remove_instr(MachineInstr *I);
void clear() {
Insts.clear();
}
/// Take an instruction from MBB 'Other' at the position From, and insert it
/// into this MBB right before 'Where'.
///
/// If From points to a bundle of instructions, the whole bundle is moved.
void splice(iterator Where, MachineBasicBlock *Other, iterator From) {
// The range splice() doesn't allow noop moves, but this one does.
if (Where != From)
splice(Where, Other, From, std::next(From));
}
/// Take a block of instructions from MBB 'Other' in the range [From, To),
/// and insert them into this MBB right before 'Where'.
///
/// The instruction at 'Where' must not be included in the range of
/// instructions to move.
void splice(iterator Where, MachineBasicBlock *Other,
iterator From, iterator To) {
Insts.splice(Where.getInstrIterator(), Other->Insts,
From.getInstrIterator(), To.getInstrIterator());
}
/// This method unlinks 'this' from the containing function, and returns it,
/// but does not delete it.
MachineBasicBlock *removeFromParent();
/// This method unlinks 'this' from the containing function and deletes it.
void eraseFromParent();
/// Given a machine basic block that branched to 'Old', change the code and
/// CFG so that it branches to 'New' instead.
void ReplaceUsesOfBlockWith(MachineBasicBlock *Old, MachineBasicBlock *New);
/// Various pieces of code can cause excess edges in the CFG to be inserted.
/// If we have proven that MBB can only branch to DestA and DestB, remove any
/// other MBB successors from the CFG. DestA and DestB can be null. Besides
/// DestA and DestB, retain other edges leading to LandingPads (currently
/// there can be only one; we don't check or require that here). Note it is
/// possible that DestA and/or DestB are LandingPads.
bool CorrectExtraCFGEdges(MachineBasicBlock *DestA,
MachineBasicBlock *DestB,
bool IsCond);
/// Find the next valid DebugLoc starting at MBBI, skipping any DBG_VALUE
/// and DBG_LABEL instructions. Return UnknownLoc if there is none.
DebugLoc findDebugLoc(instr_iterator MBBI);
DebugLoc findDebugLoc(iterator MBBI) {
return findDebugLoc(MBBI.getInstrIterator());
}
/// Find the previous valid DebugLoc preceding MBBI, skipping and DBG_VALUE
/// instructions. Return UnknownLoc if there is none.
DebugLoc findPrevDebugLoc(instr_iterator MBBI);
DebugLoc findPrevDebugLoc(iterator MBBI) {
return findPrevDebugLoc(MBBI.getInstrIterator());
}
Make MachineBasicBlock::updateTerminator to update DebugLoc as well Summary: Currently MachineBasicBlock::updateTerminator simply drops DebugLoc for newly created branch instructions, which may cause incorrect stepping and/or imprecise sample profile data. Below is an example: ``` 1 extern int bar(int x); 2 3 int foo(int *begin, int *end) { 4 int *i; 5 int ret = 0; 6 for ( 7 i = begin ; 8 i != end ; 9 i++) 10 { 11 ret += bar(*i); 12 } 13 return ret; 14 } ``` Below is a bitcode of 'foo' at the end of LLVM-IR level optimizations with -O3: ``` define i32 @foo(i32* readonly %begin, i32* readnone %end) !dbg !4 { entry: %cmp6 = icmp eq i32* %begin, %end, !dbg !9 br i1 %cmp6, label %for.end, label %for.body.preheader, !dbg !12 for.body.preheader: ; preds = %entry br label %for.body, !dbg !13 for.body: ; preds = %for.body.preheader, %for.body %ret.08 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ] %i.07 = phi i32* [ %incdec.ptr, %for.body ], [ %begin, %for.body.preheader ] %0 = load i32, i32* %i.07, align 4, !dbg !13, !tbaa !15 %call = tail call i32 @bar(i32 %0), !dbg !19 %add = add nsw i32 %call, %ret.08, !dbg !20 %incdec.ptr = getelementptr inbounds i32, i32* %i.07, i64 1, !dbg !21 %cmp = icmp eq i32* %incdec.ptr, %end, !dbg !9 br i1 %cmp, label %for.end.loopexit, label %for.body, !dbg !12, !llvm.loop !22 for.end.loopexit: ; preds = %for.body br label %for.end, !dbg !24 for.end: ; preds = %for.end.loopexit, %entry %ret.0.lcssa = phi i32 [ 0, %entry ], [ %add, %for.end.loopexit ] ret i32 %ret.0.lcssa, !dbg !24 } ``` where ``` !12 = !DILocation(line: 6, column: 3, scope: !11) ``` . As you can see, the terminator of 'entry' block, which is a loop control branch, has a DebugLoc of line 6, column 3. Howerver, after the execution of 'MachineBlock::updateTerminator' function, which is triggered by MachineSinking pass, the DebugLoc info is dropped as below (see there's no debug-location for JNE_1): ``` bb.0.entry: successors: %bb.4(0x30000000), %bb.1.for.body.preheader(0x50000000) liveins: %rdi, %rsi %6 = COPY %rsi %5 = COPY %rdi %8 = SUB64rr %5, %6, implicit-def %eflags, debug-location !9 JNE_1 %bb.1.for.body.preheader, implicit %eflags ``` This patch addresses this issue and make newly created branch instructions to keep debug-location info. Reviewers: aprantl, MatzeB, craig.topper, qcolombet Reviewed By: qcolombet Subscribers: qcolombet, llvm-commits Differential Revision: https://reviews.llvm.org/D29596 llvm-svn: 294976
2017-02-13 19:15:31 +01:00
/// Find and return the merged DebugLoc of the branch instructions of the
/// block. Return UnknownLoc if there is none.
DebugLoc findBranchDebugLoc();
/// Possible outcome of a register liveness query to computeRegisterLiveness()
enum LivenessQueryResult {
LQR_Live, ///< Register is known to be (at least partially) live.
LQR_Dead, ///< Register is known to be fully dead.
LQR_Unknown ///< Register liveness not decidable from local neighborhood.
};
[CodeGen] Use MachineOperand::print in the MIRPrinter for MO_Register. Work towards the unification of MIR and debug output by refactoring the interfaces. For MachineOperand::print, keep a simple version that can be easily called from `dump()`, and a more complex one which will be called from both the MIRPrinter and MachineInstr::print. Add extra checks inside MachineOperand for detached operands (operands with getParent() == nullptr). https://reviews.llvm.org/D40836 * find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/kill: ([^ ]+) ([^ ]+)<def> ([^ ]+)/kill: \1 def \2 \3/g' * find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/kill: ([^ ]+) ([^ ]+) ([^ ]+)<def>/kill: \1 \2 def \3/g' * find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/kill: def ([^ ]+) ([^ ]+) ([^ ]+)<def>/kill: def \1 \2 def \3/g' * find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/<def>//g' * find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/([^ ]+)<kill>/killed \1/g' * find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/([^ ]+)<imp-use,kill>/implicit killed \1/g' * find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/([^ ]+)<dead>/dead \1/g' * find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/([^ ]+)<def[ ]*,[ ]*dead>/dead \1/g' * find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/([^ ]+)<imp-def[ ]*,[ ]*dead>/implicit-def dead \1/g' * find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/([^ ]+)<imp-def>/implicit-def \1/g' * find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/([^ ]+)<imp-use>/implicit \1/g' * find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/([^ ]+)<internal>/internal \1/g' * find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/([^ ]+)<undef>/undef \1/g' llvm-svn: 320022
2017-12-07 11:40:31 +01:00
/// Return whether (physical) register \p Reg has been defined and not
/// killed as of just before \p Before.
2014-07-02 08:45:26 +02:00
///
/// Search is localised to a neighborhood of \p Neighborhood instructions
/// before (searching for defs or kills) and \p Neighborhood instructions
/// after (searching just for defs) \p Before.
///
/// \p Reg must be a physical register.
LivenessQueryResult computeRegisterLiveness(const TargetRegisterInfo *TRI,
unsigned Reg,
const_iterator Before,
unsigned Neighborhood = 10) const;
// Debugging methods.
void dump() const;
void print(raw_ostream &OS, const SlotIndexes * = nullptr,
bool IsStandalone = true) const;
void print(raw_ostream &OS, ModuleSlotTracker &MST,
const SlotIndexes * = nullptr, bool IsStandalone = true) const;
// Printing method used by LoopInfo.
2014-07-02 08:45:26 +02:00
void printAsOperand(raw_ostream &OS, bool PrintType = true) const;
/// MachineBasicBlocks are uniquely numbered at the function level, unless
/// they're not in a MachineFunction yet, in which case this will return -1.
int getNumber() const { return Number; }
2006-10-03 22:16:45 +02:00
void setNumber(int N) { Number = N; }
/// Return the MCSymbol for this basic block.
MCSymbol *getSymbol() const;
Optional<uint64_t> getIrrLoopHeaderWeight() const {
return IrrLoopHeaderWeight;
}
void setIrrLoopHeaderWeight(uint64_t Weight) {
IrrLoopHeaderWeight = Weight;
}
private:
/// Return probability iterator corresponding to the I successor iterator.
probability_iterator getProbabilityIterator(succ_iterator I);
const_probability_iterator
getProbabilityIterator(const_succ_iterator I) const;
friend class MachineBranchProbabilityInfo;
MIR Serialization: Change MIR syntax - use custom syntax for MBBs. This commit modifies the way the machine basic blocks are serialized - now the machine basic blocks are serialized using a custom syntax instead of relying on YAML primitives. Instead of using YAML mappings to represent the individual machine basic blocks in a machine function's body, the new syntax uses a single YAML block scalar which contains all of the machine basic blocks and instructions for that function. This is an example of a function's body that uses the old syntax: body: - id: 0 name: entry instructions: - '%eax = MOV32r0 implicit-def %eflags' - 'RETQ %eax' ... The same body is now written like this: body: | bb.0.entry: %eax = MOV32r0 implicit-def %eflags RETQ %eax ... This syntax change is motivated by the fact that the bundled machine instructions didn't map that well to the old syntax which was using a single YAML sequence to store all of the machine instructions in a block. The bundled machine instructions internally use flags like BundledPred and BundledSucc to determine the bundles, and serializing them as MI flags using the old syntax would have had a negative impact on the readability and the ease of editing for MIR files. The new syntax allows me to serialize the bundled machine instructions using a block construct without relying on the internal flags, for example: BUNDLE implicit-def dead %itstate, implicit-def %s1 ... { t2IT 1, 24, implicit-def %itstate %s1 = VMOVS killed %s0, 1, killed %cpsr, implicit killed %itstate } This commit also converts the MIR testcases to the new syntax. I developed a script that can convert from the old syntax to the new one. I will post the script on the llvm-commits mailing list in the thread for this commit. llvm-svn: 244982
2015-08-14 01:10:16 +02:00
friend class MIPrinter;
/// Return probability of the edge from this block to MBB. This method should
/// NOT be called directly, but by using getEdgeProbability method from
/// MachineBranchProbabilityInfo class.
BranchProbability getSuccProbability(const_succ_iterator Succ) const;
// Methods used to maintain doubly linked list of blocks...
friend struct ilist_callback_traits<MachineBasicBlock>;
// Machine-CFG mutators
/// Add Pred as a predecessor of this MachineBasicBlock. Don't do this
/// unless you know what you're doing, because it doesn't update Pred's
/// successors list. Use Pred->addSuccessor instead.
void addPredecessor(MachineBasicBlock *Pred);
/// Remove Pred as a predecessor of this MachineBasicBlock. Don't do this
/// unless you know what you're doing, because it doesn't update Pred's
/// successors list. Use Pred->removeSuccessor instead.
void removePredecessor(MachineBasicBlock *Pred);
};
raw_ostream& operator<<(raw_ostream &OS, const MachineBasicBlock &MBB);
/// Prints a machine basic block reference.
///
/// The format is:
/// %bb.5 - a machine basic block with MBB.getNumber() == 5.
///
/// Usage: OS << printMBBReference(MBB) << '\n';
Printable printMBBReference(const MachineBasicBlock &MBB);
// This is useful when building IndexedMaps keyed on basic block pointers.
struct MBB2NumberFunctor {
using argument_type = const MachineBasicBlock *;
unsigned operator()(const MachineBasicBlock *MBB) const {
return MBB->getNumber();
}
};
//===--------------------------------------------------------------------===//
// GraphTraits specializations for machine basic block graphs (machine-CFGs)
//===--------------------------------------------------------------------===//
// Provide specializations of GraphTraits to be able to treat a
2015-08-06 14:49:40 +02:00
// MachineFunction as a graph of MachineBasicBlocks.
//
template <> struct GraphTraits<MachineBasicBlock *> {
using NodeRef = MachineBasicBlock *;
using ChildIteratorType = MachineBasicBlock::succ_iterator;
static NodeRef getEntryNode(MachineBasicBlock *BB) { return BB; }
static ChildIteratorType child_begin(NodeRef N) { return N->succ_begin(); }
static ChildIteratorType child_end(NodeRef N) { return N->succ_end(); }
};
template <> struct GraphTraits<const MachineBasicBlock *> {
using NodeRef = const MachineBasicBlock *;
using ChildIteratorType = MachineBasicBlock::const_succ_iterator;
static NodeRef getEntryNode(const MachineBasicBlock *BB) { return BB; }
static ChildIteratorType child_begin(NodeRef N) { return N->succ_begin(); }
static ChildIteratorType child_end(NodeRef N) { return N->succ_end(); }
};
// Provide specializations of GraphTraits to be able to treat a
2015-08-06 14:49:40 +02:00
// MachineFunction as a graph of MachineBasicBlocks and to walk it
// in inverse order. Inverse order for a function is considered
// to be when traversing the predecessor edges of a MBB
// instead of the successor edges.
//
template <> struct GraphTraits<Inverse<MachineBasicBlock*>> {
using NodeRef = MachineBasicBlock *;
using ChildIteratorType = MachineBasicBlock::pred_iterator;
static NodeRef getEntryNode(Inverse<MachineBasicBlock *> G) {
return G.Graph;
}
static ChildIteratorType child_begin(NodeRef N) { return N->pred_begin(); }
static ChildIteratorType child_end(NodeRef N) { return N->pred_end(); }
};
template <> struct GraphTraits<Inverse<const MachineBasicBlock*>> {
using NodeRef = const MachineBasicBlock *;
using ChildIteratorType = MachineBasicBlock::const_pred_iterator;
static NodeRef getEntryNode(Inverse<const MachineBasicBlock *> G) {
return G.Graph;
}
static ChildIteratorType child_begin(NodeRef N) { return N->pred_begin(); }
static ChildIteratorType child_end(NodeRef N) { return N->pred_end(); }
};
/// MachineInstrSpan provides an interface to get an iteration range
/// containing the instruction it was initialized with, along with all
/// those instructions inserted prior to or following that instruction
/// at some point after the MachineInstrSpan is constructed.
class MachineInstrSpan {
MachineBasicBlock &MBB;
MachineBasicBlock::iterator I, B, E;
public:
MachineInstrSpan(MachineBasicBlock::iterator I)
: MBB(*I->getParent()),
I(I),
B(I == MBB.begin() ? MBB.end() : std::prev(I)),
E(std::next(I)) {}
MachineBasicBlock::iterator begin() {
return B == MBB.end() ? MBB.begin() : std::next(B);
}
MachineBasicBlock::iterator end() { return E; }
bool empty() { return begin() == end(); }
MachineBasicBlock::iterator getInitial() { return I; }
};
/// Increment \p It until it points to a non-debug instruction or to \p End
/// and return the resulting iterator. This function should only be used
/// MachineBasicBlock::{iterator, const_iterator, instr_iterator,
/// const_instr_iterator} and the respective reverse iterators.
template<typename IterT>
inline IterT skipDebugInstructionsForward(IterT It, IterT End) {
while (It != End && It->isDebugInstr())
It++;
return It;
}
/// Decrement \p It until it points to a non-debug instruction or to \p Begin
/// and return the resulting iterator. This function should only be used
/// MachineBasicBlock::{iterator, const_iterator, instr_iterator,
/// const_instr_iterator} and the respective reverse iterators.
template<class IterT>
inline IterT skipDebugInstructionsBackward(IterT It, IterT Begin) {
while (It != Begin && It->isDebugInstr())
It--;
return It;
}
} // end namespace llvm
#endif // LLVM_CODEGEN_MACHINEBASICBLOCK_H