1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-25 12:12:47 +01:00

Remove \brief commands from doxygen comments.

We've been running doxygen with the autobrief option for a couple of
years now. This makes the \brief markers into our comments
redundant. Since they are a visual distraction and we don't want to
encourage more \brief markers in new code either, this patch removes
them all.

Patch produced by

  for i in $(git grep -l '\\brief'); do perl -pi -e 's/\\brief //g' $i & done

Differential Revision: https://reviews.llvm.org/D46290

llvm-svn: 331272
This commit is contained in:
Adrian Prantl 2018-05-01 15:54:18 +00:00
parent 3cb2fbb565
commit 076a6683eb
781 changed files with 5005 additions and 5005 deletions

View File

@ -190,7 +190,7 @@ lto_module_create_from_memory_with_path(const void* mem, size_t length,
const char *path);
/**
* \brief Loads an object file in its own context.
* Loads an object file in its own context.
*
* Loads an object file in its own LLVMContext. This function call is
* thread-safe. However, modules created this way should not be merged into an
@ -205,7 +205,7 @@ lto_module_create_in_local_context(const void *mem, size_t length,
const char *path);
/**
* \brief Loads an object file in the codegen context.
* Loads an object file in the codegen context.
*
* Loads an object file into the same context as \c cg. The module is safe to
* add using \a lto_codegen_add_module().
@ -345,7 +345,7 @@ extern lto_code_gen_t
lto_codegen_create(void);
/**
* \brief Instantiate a code generator in its own context.
* Instantiate a code generator in its own context.
*
* Instantiates a code generator in its own context. Modules added via \a
* lto_codegen_add_module() must have all been created in the same context,
@ -539,7 +539,7 @@ lto_codegen_set_should_internalize(lto_code_gen_t cg,
lto_bool_t ShouldInternalize);
/**
* \brief Set whether to embed uselists in bitcode.
* Set whether to embed uselists in bitcode.
*
* Sets whether \a lto_codegen_write_merged_modules() should embed uselists in
* output bitcode. This should be turned on for all -save-temps output.

View File

@ -1215,7 +1215,7 @@ inline APFloat abs(APFloat X) {
return X;
}
/// \brief Returns the negated value of the argument.
/// Returns the negated value of the argument.
inline APFloat neg(APFloat X) {
X.changeSign();
return X;

File diff suppressed because it is too large Load Diff

View File

@ -72,7 +72,7 @@ public:
}
using APInt::toString;
/// \brief Get the correctly-extended \c int64_t value.
/// Get the correctly-extended \c int64_t value.
int64_t getExtValue() const {
assert(getMinSignedBits() <= 64 && "Too many bits for int64_t");
return isSigned() ? getSExtValue() : getZExtValue();
@ -279,13 +279,13 @@ public:
: APInt::getSignedMinValue(numBits), Unsigned);
}
/// \brief Determine if two APSInts have the same value, zero- or
/// Determine if two APSInts have the same value, zero- or
/// sign-extending as needed.
static bool isSameValue(const APSInt &I1, const APSInt &I2) {
return !compareValues(I1, I2);
}
/// \brief Compare underlying values of two numbers.
/// Compare underlying values of two numbers.
static int compareValues(const APSInt &I1, const APSInt &I2) {
if (I1.getBitWidth() == I2.getBitWidth() && I1.isSigned() == I2.isSigned())
return I1.IsUnsigned ? I1.compare(I2) : I1.compareSigned(I2);

View File

@ -184,51 +184,51 @@ namespace llvm {
/// slice(n) - Chop off the first N elements of the array.
ArrayRef<T> slice(size_t N) const { return slice(N, size() - N); }
/// \brief Drop the first \p N elements of the array.
/// Drop the first \p N elements of the array.
ArrayRef<T> drop_front(size_t N = 1) const {
assert(size() >= N && "Dropping more elements than exist");
return slice(N, size() - N);
}
/// \brief Drop the last \p N elements of the array.
/// Drop the last \p N elements of the array.
ArrayRef<T> drop_back(size_t N = 1) const {
assert(size() >= N && "Dropping more elements than exist");
return slice(0, size() - N);
}
/// \brief Return a copy of *this with the first N elements satisfying the
/// Return a copy of *this with the first N elements satisfying the
/// given predicate removed.
template <class PredicateT> ArrayRef<T> drop_while(PredicateT Pred) const {
return ArrayRef<T>(find_if_not(*this, Pred), end());
}
/// \brief Return a copy of *this with the first N elements not satisfying
/// Return a copy of *this with the first N elements not satisfying
/// the given predicate removed.
template <class PredicateT> ArrayRef<T> drop_until(PredicateT Pred) const {
return ArrayRef<T>(find_if(*this, Pred), end());
}
/// \brief Return a copy of *this with only the first \p N elements.
/// Return a copy of *this with only the first \p N elements.
ArrayRef<T> take_front(size_t N = 1) const {
if (N >= size())
return *this;
return drop_back(size() - N);
}
/// \brief Return a copy of *this with only the last \p N elements.
/// Return a copy of *this with only the last \p N elements.
ArrayRef<T> take_back(size_t N = 1) const {
if (N >= size())
return *this;
return drop_front(size() - N);
}
/// \brief Return the first N elements of this Array that satisfy the given
/// Return the first N elements of this Array that satisfy the given
/// predicate.
template <class PredicateT> ArrayRef<T> take_while(PredicateT Pred) const {
return ArrayRef<T>(begin(), find_if_not(*this, Pred));
}
/// \brief Return the first N elements of this Array that don't satisfy the
/// Return the first N elements of this Array that don't satisfy the
/// given predicate.
template <class PredicateT> ArrayRef<T> take_until(PredicateT Pred) const {
return ArrayRef<T>(begin(), find_if(*this, Pred));
@ -358,7 +358,7 @@ namespace llvm {
return slice(N, this->size() - N);
}
/// \brief Drop the first \p N elements of the array.
/// Drop the first \p N elements of the array.
MutableArrayRef<T> drop_front(size_t N = 1) const {
assert(this->size() >= N && "Dropping more elements than exist");
return slice(N, this->size() - N);
@ -369,42 +369,42 @@ namespace llvm {
return slice(0, this->size() - N);
}
/// \brief Return a copy of *this with the first N elements satisfying the
/// Return a copy of *this with the first N elements satisfying the
/// given predicate removed.
template <class PredicateT>
MutableArrayRef<T> drop_while(PredicateT Pred) const {
return MutableArrayRef<T>(find_if_not(*this, Pred), end());
}
/// \brief Return a copy of *this with the first N elements not satisfying
/// Return a copy of *this with the first N elements not satisfying
/// the given predicate removed.
template <class PredicateT>
MutableArrayRef<T> drop_until(PredicateT Pred) const {
return MutableArrayRef<T>(find_if(*this, Pred), end());
}
/// \brief Return a copy of *this with only the first \p N elements.
/// Return a copy of *this with only the first \p N elements.
MutableArrayRef<T> take_front(size_t N = 1) const {
if (N >= this->size())
return *this;
return drop_back(this->size() - N);
}
/// \brief Return a copy of *this with only the last \p N elements.
/// Return a copy of *this with only the last \p N elements.
MutableArrayRef<T> take_back(size_t N = 1) const {
if (N >= this->size())
return *this;
return drop_front(this->size() - N);
}
/// \brief Return the first N elements of this Array that satisfy the given
/// Return the first N elements of this Array that satisfy the given
/// predicate.
template <class PredicateT>
MutableArrayRef<T> take_while(PredicateT Pred) const {
return MutableArrayRef<T>(begin(), find_if_not(*this, Pred));
}
/// \brief Return the first N elements of this Array that don't satisfy the
/// Return the first N elements of this Array that don't satisfy the
/// given predicate.
template <class PredicateT>
MutableArrayRef<T> take_until(PredicateT Pred) const {

View File

@ -779,7 +779,7 @@ public:
}
private:
/// \brief Perform a logical left shift of \p Count words by moving everything
/// Perform a logical left shift of \p Count words by moving everything
/// \p Count words to the right in memory.
///
/// While confusing, words are stored from least significant at Bits[0] to
@ -810,7 +810,7 @@ private:
clear_unused_bits();
}
/// \brief Perform a logical right shift of \p Count words by moving those
/// Perform a logical right shift of \p Count words by moving those
/// words to the left in memory. See wordShl for more information.
///
void wordShr(uint32_t Count) {

View File

@ -177,7 +177,7 @@ public:
return *this;
}
/// \brief Skips all children of the current node and traverses to next node
/// Skips all children of the current node and traverses to next node
///
/// Note: This function takes care of incrementing the iterator. If you
/// always increment and call this function, you risk walking off the end.

View File

@ -24,7 +24,7 @@ namespace llvm {
#if LLVM_ENABLE_ABI_BREAKING_CHECKS
/// \brief A base class for data structure classes wishing to make iterators
/// A base class for data structure classes wishing to make iterators
/// ("handles") pointing into themselves fail-fast. When building without
/// asserts, this class is empty and does nothing.
///
@ -39,15 +39,15 @@ class DebugEpochBase {
public:
DebugEpochBase() : Epoch(0) {}
/// \brief Calling incrementEpoch invalidates all handles pointing into the
/// Calling incrementEpoch invalidates all handles pointing into the
/// calling instance.
void incrementEpoch() { ++Epoch; }
/// \brief The destructor calls incrementEpoch to make use-after-free bugs
/// The destructor calls incrementEpoch to make use-after-free bugs
/// more likely to crash deterministically.
~DebugEpochBase() { incrementEpoch(); }
/// \brief A base class for iterator classes ("handles") that wish to poll for
/// A base class for iterator classes ("handles") that wish to poll for
/// iterator invalidating modifications in the underlying data structure.
/// When LLVM is built without asserts, this class is empty and does nothing.
///
@ -65,12 +65,12 @@ public:
explicit HandleBase(const DebugEpochBase *Parent)
: EpochAddress(&Parent->Epoch), EpochAtCreation(Parent->Epoch) {}
/// \brief Returns true if the DebugEpochBase this Handle is linked to has
/// Returns true if the DebugEpochBase this Handle is linked to has
/// not called incrementEpoch on itself since the creation of this
/// HandleBase instance.
bool isHandleInSync() const { return *EpochAddress == EpochAtCreation; }
/// \brief Returns a pointer to the epoch word stored in the data structure
/// Returns a pointer to the epoch word stored in the data structure
/// this handle points into. Can be used to check if two iterators point
/// into the same data structure.
const void *getEpochAddress() const { return EpochAddress; }

View File

@ -57,7 +57,7 @@
namespace llvm {
/// \brief An opaque object representing a hash code.
/// An opaque object representing a hash code.
///
/// This object represents the result of hashing some entity. It is intended to
/// be used to implement hashtables or other hashing-based data structures.
@ -73,14 +73,14 @@ class hash_code {
size_t value;
public:
/// \brief Default construct a hash_code.
/// Default construct a hash_code.
/// Note that this leaves the value uninitialized.
hash_code() = default;
/// \brief Form a hash code directly from a numerical value.
/// Form a hash code directly from a numerical value.
hash_code(size_t value) : value(value) {}
/// \brief Convert the hash code to its numerical value for use.
/// Convert the hash code to its numerical value for use.
/*explicit*/ operator size_t() const { return value; }
friend bool operator==(const hash_code &lhs, const hash_code &rhs) {
@ -90,11 +90,11 @@ public:
return lhs.value != rhs.value;
}
/// \brief Allow a hash_code to be directly run through hash_value.
/// Allow a hash_code to be directly run through hash_value.
friend size_t hash_value(const hash_code &code) { return code.value; }
};
/// \brief Compute a hash_code for any integer value.
/// Compute a hash_code for any integer value.
///
/// Note that this function is intended to compute the same hash_code for
/// a particular value without regard to the pre-promotion type. This is in
@ -105,21 +105,21 @@ template <typename T>
typename std::enable_if<is_integral_or_enum<T>::value, hash_code>::type
hash_value(T value);
/// \brief Compute a hash_code for a pointer's address.
/// Compute a hash_code for a pointer's address.
///
/// N.B.: This hashes the *address*. Not the value and not the type.
template <typename T> hash_code hash_value(const T *ptr);
/// \brief Compute a hash_code for a pair of objects.
/// Compute a hash_code for a pair of objects.
template <typename T, typename U>
hash_code hash_value(const std::pair<T, U> &arg);
/// \brief Compute a hash_code for a standard string.
/// Compute a hash_code for a standard string.
template <typename T>
hash_code hash_value(const std::basic_string<T> &arg);
/// \brief Override the execution seed with a fixed value.
/// Override the execution seed with a fixed value.
///
/// This hashing library uses a per-execution seed designed to change on each
/// run with high probability in order to ensure that the hash codes are not
@ -164,7 +164,7 @@ static const uint64_t k1 = 0xb492b66fbe98f273ULL;
static const uint64_t k2 = 0x9ae16a3b2f90404fULL;
static const uint64_t k3 = 0xc949d7c7509e6557ULL;
/// \brief Bitwise right rotate.
/// Bitwise right rotate.
/// Normally this will compile to a single instruction, especially if the
/// shift is a manifest constant.
inline uint64_t rotate(uint64_t val, size_t shift) {
@ -254,13 +254,13 @@ inline uint64_t hash_short(const char *s, size_t length, uint64_t seed) {
return k2 ^ seed;
}
/// \brief The intermediate state used during hashing.
/// The intermediate state used during hashing.
/// Currently, the algorithm for computing hash codes is based on CityHash and
/// keeps 56 bytes of arbitrary state.
struct hash_state {
uint64_t h0, h1, h2, h3, h4, h5, h6;
/// \brief Create a new hash_state structure and initialize it based on the
/// Create a new hash_state structure and initialize it based on the
/// seed and the first 64-byte chunk.
/// This effectively performs the initial mix.
static hash_state create(const char *s, uint64_t seed) {
@ -272,7 +272,7 @@ struct hash_state {
return state;
}
/// \brief Mix 32-bytes from the input sequence into the 16-bytes of 'a'
/// Mix 32-bytes from the input sequence into the 16-bytes of 'a'
/// and 'b', including whatever is already in 'a' and 'b'.
static void mix_32_bytes(const char *s, uint64_t &a, uint64_t &b) {
a += fetch64(s);
@ -284,7 +284,7 @@ struct hash_state {
a += c;
}
/// \brief Mix in a 64-byte buffer of data.
/// Mix in a 64-byte buffer of data.
/// We mix all 64 bytes even when the chunk length is smaller, but we
/// record the actual length.
void mix(const char *s) {
@ -302,7 +302,7 @@ struct hash_state {
std::swap(h2, h0);
}
/// \brief Compute the final 64-bit hash code value based on the current
/// Compute the final 64-bit hash code value based on the current
/// state and the length of bytes hashed.
uint64_t finalize(size_t length) {
return hash_16_bytes(hash_16_bytes(h3, h5) + shift_mix(h1) * k1 + h2,
@ -311,7 +311,7 @@ struct hash_state {
};
/// \brief A global, fixed seed-override variable.
/// A global, fixed seed-override variable.
///
/// This variable can be set using the \see llvm::set_fixed_execution_seed
/// function. See that function for details. Do not, under any circumstances,
@ -332,7 +332,7 @@ inline size_t get_execution_seed() {
}
/// \brief Trait to indicate whether a type's bits can be hashed directly.
/// Trait to indicate whether a type's bits can be hashed directly.
///
/// A type trait which is true if we want to combine values for hashing by
/// reading the underlying data. It is false if values of this type must
@ -359,14 +359,14 @@ template <typename T, typename U> struct is_hashable_data<std::pair<T, U> >
(sizeof(T) + sizeof(U)) ==
sizeof(std::pair<T, U>))> {};
/// \brief Helper to get the hashable data representation for a type.
/// Helper to get the hashable data representation for a type.
/// This variant is enabled when the type itself can be used.
template <typename T>
typename std::enable_if<is_hashable_data<T>::value, T>::type
get_hashable_data(const T &value) {
return value;
}
/// \brief Helper to get the hashable data representation for a type.
/// Helper to get the hashable data representation for a type.
/// This variant is enabled when we must first call hash_value and use the
/// result as our data.
template <typename T>
@ -376,7 +376,7 @@ get_hashable_data(const T &value) {
return hash_value(value);
}
/// \brief Helper to store data from a value into a buffer and advance the
/// Helper to store data from a value into a buffer and advance the
/// pointer into that buffer.
///
/// This routine first checks whether there is enough space in the provided
@ -395,7 +395,7 @@ bool store_and_advance(char *&buffer_ptr, char *buffer_end, const T& value,
return true;
}
/// \brief Implement the combining of integral values into a hash_code.
/// Implement the combining of integral values into a hash_code.
///
/// This overload is selected when the value type of the iterator is
/// integral. Rather than computing a hash_code for each object and then
@ -435,7 +435,7 @@ hash_code hash_combine_range_impl(InputIteratorT first, InputIteratorT last) {
return state.finalize(length);
}
/// \brief Implement the combining of integral values into a hash_code.
/// Implement the combining of integral values into a hash_code.
///
/// This overload is selected when the value type of the iterator is integral
/// and when the input iterator is actually a pointer. Rather than computing
@ -470,7 +470,7 @@ hash_combine_range_impl(ValueT *first, ValueT *last) {
} // namespace hashing
/// \brief Compute a hash_code for a sequence of values.
/// Compute a hash_code for a sequence of values.
///
/// This hashes a sequence of values. It produces the same hash_code as
/// 'hash_combine(a, b, c, ...)', but can run over arbitrary sized sequences
@ -486,7 +486,7 @@ hash_code hash_combine_range(InputIteratorT first, InputIteratorT last) {
namespace hashing {
namespace detail {
/// \brief Helper class to manage the recursive combining of hash_combine
/// Helper class to manage the recursive combining of hash_combine
/// arguments.
///
/// This class exists to manage the state and various calls involved in the
@ -499,14 +499,14 @@ struct hash_combine_recursive_helper {
const size_t seed;
public:
/// \brief Construct a recursive hash combining helper.
/// Construct a recursive hash combining helper.
///
/// This sets up the state for a recursive hash combine, including getting
/// the seed and buffer setup.
hash_combine_recursive_helper()
: seed(get_execution_seed()) {}
/// \brief Combine one chunk of data into the current in-flight hash.
/// Combine one chunk of data into the current in-flight hash.
///
/// This merges one chunk of data into the hash. First it tries to buffer
/// the data. If the buffer is full, it hashes the buffer into its
@ -547,7 +547,7 @@ public:
return buffer_ptr;
}
/// \brief Recursive, variadic combining method.
/// Recursive, variadic combining method.
///
/// This function recurses through each argument, combining that argument
/// into a single hash.
@ -560,7 +560,7 @@ public:
return combine(length, buffer_ptr, buffer_end, args...);
}
/// \brief Base case for recursive, variadic combining.
/// Base case for recursive, variadic combining.
///
/// The base case when combining arguments recursively is reached when all
/// arguments have been handled. It flushes the remaining buffer and
@ -588,7 +588,7 @@ public:
} // namespace detail
} // namespace hashing
/// \brief Combine values into a single hash_code.
/// Combine values into a single hash_code.
///
/// This routine accepts a varying number of arguments of any type. It will
/// attempt to combine them into a single hash_code. For user-defined types it
@ -610,7 +610,7 @@ template <typename ...Ts> hash_code hash_combine(const Ts &...args) {
namespace hashing {
namespace detail {
/// \brief Helper to hash the value of a single integer.
/// Helper to hash the value of a single integer.
///
/// Overloads for smaller integer types are not provided to ensure consistent
/// behavior in the presence of integral promotions. Essentially,

View File

@ -157,14 +157,14 @@ public:
(Vector.begin() + Pos->second);
}
/// \brief Remove the last element from the vector.
/// Remove the last element from the vector.
void pop_back() {
typename MapType::iterator Pos = Map.find(Vector.back().first);
Map.erase(Pos);
Vector.pop_back();
}
/// \brief Remove the element given by Iterator.
/// Remove the element given by Iterator.
///
/// Returns an iterator to the element following the one which was removed,
/// which may be end().
@ -187,7 +187,7 @@ public:
return Next;
}
/// \brief Remove all elements with the key value Key.
/// Remove all elements with the key value Key.
///
/// Returns the number of elements removed.
size_type erase(const KeyT &Key) {
@ -198,7 +198,7 @@ public:
return 1;
}
/// \brief Remove the elements that match the predicate.
/// Remove the elements that match the predicate.
///
/// Erase all elements that match \c Pred in a single pass. Takes linear
/// time.
@ -227,7 +227,7 @@ void MapVector<KeyT, ValueT, MapType, VectorType>::remove_if(Function Pred) {
Vector.erase(O, Vector.end());
}
/// \brief A MapVector that performs no allocations if smaller than a certain
/// A MapVector that performs no allocations if smaller than a certain
/// size.
template <typename KeyT, typename ValueT, unsigned N>
struct SmallMapVector

View File

@ -17,7 +17,7 @@
#define LLVM_ADT_NONE_H
namespace llvm {
/// \brief A simple null object to allow implicit construction of Optional<T>
/// A simple null object to allow implicit construction of Optional<T>
/// and similar types without having to spell out the specialization's name.
// (constant value 1 in an attempt to workaround MSVC build issue... )
enum class NoneType { None = 1 };

View File

@ -65,7 +65,7 @@ protected:
}
};
/// \brief Store a vector of values using a specific number of bits for each
/// Store a vector of values using a specific number of bits for each
/// value. Both signed and unsigned types can be used, e.g
/// @code
/// PackedVector<signed, 2> vec;

View File

@ -33,7 +33,7 @@
namespace llvm {
/// \brief Enumerate the SCCs of a directed graph in reverse topological order
/// Enumerate the SCCs of a directed graph in reverse topological order
/// of the SCC DAG.
///
/// This is implemented using Tarjan's DFS algorithm using an internal stack to
@ -104,7 +104,7 @@ public:
}
static scc_iterator end(const GraphT &) { return scc_iterator(); }
/// \brief Direct loop termination test which is more efficient than
/// Direct loop termination test which is more efficient than
/// comparison with \c end().
bool isAtEnd() const {
assert(!CurrentSCC.empty() || VisitStack.empty());
@ -125,7 +125,7 @@ public:
return CurrentSCC;
}
/// \brief Test if the current SCC has a loop.
/// Test if the current SCC has a loop.
///
/// If the SCC has more than one node, this is trivially true. If not, it may
/// still contain a loop if the node has an edge back to itself.
@ -222,12 +222,12 @@ bool scc_iterator<GraphT, GT>::hasLoop() const {
return false;
}
/// \brief Construct the begin iterator for a deduced graph type T.
/// Construct the begin iterator for a deduced graph type T.
template <class T> scc_iterator<T> scc_begin(const T &G) {
return scc_iterator<T>::begin(G);
}
/// \brief Construct the end iterator for a deduced graph type T.
/// Construct the end iterator for a deduced graph type T.
template <class T> scc_iterator<T> scc_end(const T &G) {
return scc_iterator<T>::end(G);
}

View File

@ -711,7 +711,7 @@ detail::concat_range<ValueT, RangeTs...> concat(RangeTs &&... Ranges) {
// Extra additions to <utility>
//===----------------------------------------------------------------------===//
/// \brief Function object to check whether the first component of a std::pair
/// Function object to check whether the first component of a std::pair
/// compares less than the first component of another std::pair.
struct less_first {
template <typename T> bool operator()(const T &lhs, const T &rhs) const {
@ -719,7 +719,7 @@ struct less_first {
}
};
/// \brief Function object to check whether the second component of a std::pair
/// Function object to check whether the second component of a std::pair
/// compares less than the second component of another std::pair.
struct less_second {
template <typename T> bool operator()(const T &lhs, const T &rhs) const {
@ -729,14 +729,14 @@ struct less_second {
// A subset of N3658. More stuff can be added as-needed.
/// \brief Represents a compile-time sequence of integers.
/// Represents a compile-time sequence of integers.
template <class T, T... I> struct integer_sequence {
using value_type = T;
static constexpr size_t size() { return sizeof...(I); }
};
/// \brief Alias for the common case of a sequence of size_ts.
/// Alias for the common case of a sequence of size_ts.
template <size_t... I>
struct index_sequence : integer_sequence<std::size_t, I...> {};
@ -745,7 +745,7 @@ struct build_index_impl : build_index_impl<N - 1, N - 1, I...> {};
template <std::size_t... I>
struct build_index_impl<0, I...> : index_sequence<I...> {};
/// \brief Creates a compile-time integer sequence for a parameter pack.
/// Creates a compile-time integer sequence for a parameter pack.
template <class... Ts>
struct index_sequence_for : build_index_impl<sizeof...(Ts)> {};
@ -754,7 +754,7 @@ struct index_sequence_for : build_index_impl<sizeof...(Ts)> {};
template <int N> struct rank : rank<N - 1> {};
template <> struct rank<0> {};
/// \brief traits class for checking whether type T is one of any of the given
/// traits class for checking whether type T is one of any of the given
/// types in the variadic list.
template <typename T, typename... Ts> struct is_one_of {
static const bool value = false;
@ -766,7 +766,7 @@ struct is_one_of<T, U, Ts...> {
std::is_same<T, U>::value || is_one_of<T, Ts...>::value;
};
/// \brief traits class for checking whether type T is a base class for all
/// traits class for checking whether type T is a base class for all
/// the given types in the variadic list.
template <typename T, typename... Ts> struct are_base_of {
static const bool value = true;
@ -1005,7 +1005,7 @@ auto lower_bound(R &&Range, ForwardIt I) -> decltype(adl_begin(Range)) {
return std::lower_bound(adl_begin(Range), adl_end(Range), I);
}
/// \brief Given a range of type R, iterate the entire range and return a
/// Given a range of type R, iterate the entire range and return a
/// SmallVector with elements of the vector. This is useful, for example,
/// when you want to iterate a range and then sort the results.
template <unsigned Size, typename R>
@ -1032,7 +1032,7 @@ void erase_if(Container &C, UnaryPredicate P) {
// Implement make_unique according to N3656.
/// \brief Constructs a `new T()` with the given args and returns a
/// Constructs a `new T()` with the given args and returns a
/// `unique_ptr<T>` which owns the object.
///
/// Example:
@ -1045,7 +1045,7 @@ make_unique(Args &&... args) {
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}
/// \brief Constructs a `new T[n]` with the given args and returns a
/// Constructs a `new T[n]` with the given args and returns a
/// `unique_ptr<T[]>` which owns the object.
///
/// \param n size of the new array.

View File

@ -31,7 +31,7 @@
namespace llvm {
/// \brief A vector that has set insertion semantics.
/// A vector that has set insertion semantics.
///
/// This adapter class provides a way to keep a set of things that also has the
/// property of a deterministic iteration order. The order of iteration is the
@ -52,10 +52,10 @@ public:
using const_reverse_iterator = typename vector_type::const_reverse_iterator;
using size_type = typename vector_type::size_type;
/// \brief Construct an empty SetVector
/// Construct an empty SetVector
SetVector() = default;
/// \brief Initialize a SetVector with a range of elements
/// Initialize a SetVector with a range of elements
template<typename It>
SetVector(It Start, It End) {
insert(Start, End);
@ -69,75 +69,75 @@ public:
return std::move(vector_);
}
/// \brief Determine if the SetVector is empty or not.
/// Determine if the SetVector is empty or not.
bool empty() const {
return vector_.empty();
}
/// \brief Determine the number of elements in the SetVector.
/// Determine the number of elements in the SetVector.
size_type size() const {
return vector_.size();
}
/// \brief Get an iterator to the beginning of the SetVector.
/// Get an iterator to the beginning of the SetVector.
iterator begin() {
return vector_.begin();
}
/// \brief Get a const_iterator to the beginning of the SetVector.
/// Get a const_iterator to the beginning of the SetVector.
const_iterator begin() const {
return vector_.begin();
}
/// \brief Get an iterator to the end of the SetVector.
/// Get an iterator to the end of the SetVector.
iterator end() {
return vector_.end();
}
/// \brief Get a const_iterator to the end of the SetVector.
/// Get a const_iterator to the end of the SetVector.
const_iterator end() const {
return vector_.end();
}
/// \brief Get an reverse_iterator to the end of the SetVector.
/// Get an reverse_iterator to the end of the SetVector.
reverse_iterator rbegin() {
return vector_.rbegin();
}
/// \brief Get a const_reverse_iterator to the end of the SetVector.
/// Get a const_reverse_iterator to the end of the SetVector.
const_reverse_iterator rbegin() const {
return vector_.rbegin();
}
/// \brief Get a reverse_iterator to the beginning of the SetVector.
/// Get a reverse_iterator to the beginning of the SetVector.
reverse_iterator rend() {
return vector_.rend();
}
/// \brief Get a const_reverse_iterator to the beginning of the SetVector.
/// Get a const_reverse_iterator to the beginning of the SetVector.
const_reverse_iterator rend() const {
return vector_.rend();
}
/// \brief Return the first element of the SetVector.
/// Return the first element of the SetVector.
const T &front() const {
assert(!empty() && "Cannot call front() on empty SetVector!");
return vector_.front();
}
/// \brief Return the last element of the SetVector.
/// Return the last element of the SetVector.
const T &back() const {
assert(!empty() && "Cannot call back() on empty SetVector!");
return vector_.back();
}
/// \brief Index into the SetVector.
/// Index into the SetVector.
const_reference operator[](size_type n) const {
assert(n < vector_.size() && "SetVector access out of range!");
return vector_[n];
}
/// \brief Insert a new element into the SetVector.
/// Insert a new element into the SetVector.
/// \returns true if the element was inserted into the SetVector.
bool insert(const value_type &X) {
bool result = set_.insert(X).second;
@ -146,7 +146,7 @@ public:
return result;
}
/// \brief Insert a range of elements into the SetVector.
/// Insert a range of elements into the SetVector.
template<typename It>
void insert(It Start, It End) {
for (; Start != End; ++Start)
@ -154,7 +154,7 @@ public:
vector_.push_back(*Start);
}
/// \brief Remove an item from the set vector.
/// Remove an item from the set vector.
bool remove(const value_type& X) {
if (set_.erase(X)) {
typename vector_type::iterator I = find(vector_, X);
@ -183,7 +183,7 @@ public:
return vector_.erase(NI);
}
/// \brief Remove items from the set vector based on a predicate function.
/// Remove items from the set vector based on a predicate function.
///
/// This is intended to be equivalent to the following code, if we could
/// write it:
@ -206,19 +206,19 @@ public:
return true;
}
/// \brief Count the number of elements of a given key in the SetVector.
/// Count the number of elements of a given key in the SetVector.
/// \returns 0 if the element is not in the SetVector, 1 if it is.
size_type count(const key_type &key) const {
return set_.count(key);
}
/// \brief Completely clear the SetVector
/// Completely clear the SetVector
void clear() {
set_.clear();
vector_.clear();
}
/// \brief Remove the last element of the SetVector.
/// Remove the last element of the SetVector.
void pop_back() {
assert(!empty() && "Cannot remove an element from an empty SetVector!");
set_.erase(back());
@ -239,7 +239,7 @@ public:
return vector_ != that.vector_;
}
/// \brief Compute This := This u S, return whether 'This' changed.
/// Compute This := This u S, return whether 'This' changed.
/// TODO: We should be able to use set_union from SetOperations.h, but
/// SetVector interface is inconsistent with DenseSet.
template <class STy>
@ -254,7 +254,7 @@ public:
return Changed;
}
/// \brief Compute This := This - B
/// Compute This := This - B
/// TODO: We should be able to use set_subtract from SetOperations.h, but
/// SetVector interface is inconsistent with DenseSet.
template <class STy>
@ -265,7 +265,7 @@ public:
}
private:
/// \brief A wrapper predicate designed for use with std::remove_if.
/// A wrapper predicate designed for use with std::remove_if.
///
/// This predicate wraps a predicate suitable for use with std::remove_if to
/// call set_.erase(x) on each element which is slated for removal.
@ -292,7 +292,7 @@ private:
vector_type vector_; ///< The vector.
};
/// \brief A SetVector that performs no allocations if smaller than
/// A SetVector that performs no allocations if smaller than
/// a certain size.
template <typename T, unsigned N>
class SmallSetVector
@ -300,7 +300,7 @@ class SmallSetVector
public:
SmallSetVector() = default;
/// \brief Initialize a SmallSetVector with a range of elements
/// Initialize a SmallSetVector with a range of elements
template<typename It>
SmallSetVector(It Start, It End) {
this->insert(Start, End);

View File

@ -335,7 +335,7 @@ struct RoundUpToPowerOfTwo {
enum { Val = RoundUpToPowerOfTwoH<N, (N&(N-1)) == 0>::Val };
};
/// \brief A templated base class for \c SmallPtrSet which provides the
/// A templated base class for \c SmallPtrSet which provides the
/// typesafe interface that is common across all small sizes.
///
/// This is particularly useful for passing around between interface boundaries

View File

@ -169,19 +169,19 @@ protected:
#define STATISTIC(VARNAME, DESC) \
static llvm::Statistic VARNAME = {DEBUG_TYPE, #VARNAME, DESC, {0}, {false}}
/// \brief Enable the collection and printing of statistics.
/// Enable the collection and printing of statistics.
void EnableStatistics(bool PrintOnExit = true);
/// \brief Check if statistics are enabled.
/// Check if statistics are enabled.
bool AreStatisticsEnabled();
/// \brief Return a file stream to print our output on.
/// Return a file stream to print our output on.
std::unique_ptr<raw_fd_ostream> CreateInfoOutputFile();
/// \brief Print statistics to the file returned by CreateInfoOutputFile().
/// Print statistics to the file returned by CreateInfoOutputFile().
void PrintStatistics();
/// \brief Print statistics to the given output stream.
/// Print statistics to the given output stream.
void PrintStatistics(raw_ostream &OS);
/// Print statistics in JSON format. This does include all global timers (\see
@ -190,7 +190,7 @@ void PrintStatistics(raw_ostream &OS);
/// PrintStatisticsJSON().
void PrintStatisticsJSON(raw_ostream &OS);
/// \brief Get the statistics. This can be used to look up the value of
/// Get the statistics. This can be used to look up the value of
/// statistics without needing to parse JSON.
///
/// This function does not prevent statistics being updated by other threads
@ -199,7 +199,7 @@ void PrintStatisticsJSON(raw_ostream &OS);
/// completes.
const std::vector<std::pair<StringRef, unsigned>> GetStatistics();
/// \brief Reset the statistics. This can be used to zero and de-register the
/// Reset the statistics. This can be used to zero and de-register the
/// statistics in order to measure a compilation.
///
/// When this function begins to call destructors prior to returning, all

View File

@ -157,7 +157,7 @@ inline std::string fromHex(StringRef Input) {
return Output;
}
/// \brief Convert the string \p S to an integer of the specified type using
/// Convert the string \p S to an integer of the specified type using
/// the radix \p Base. If \p Base is 0, auto-detects the radix.
/// Returns true if the number was successfully converted, false otherwise.
template <typename N> bool to_integer(StringRef S, N &Num, unsigned Base = 0) {

View File

@ -201,7 +201,7 @@ namespace llvm {
LLVM_NODISCARD
int compare_numeric(StringRef RHS) const;
/// \brief Determine the edit distance between this string and another
/// Determine the edit distance between this string and another
/// string.
///
/// \param Other the string to compare this string against.
@ -912,7 +912,7 @@ namespace llvm {
/// @}
/// \brief Compute a hash_code for a StringRef.
/// Compute a hash_code for a StringRef.
LLVM_NODISCARD
hash_code hash_value(StringRef S);

View File

@ -20,7 +20,7 @@
namespace llvm {
/// \brief A switch()-like statement whose cases are string literals.
/// A switch()-like statement whose cases are string literals.
///
/// The StringSwitch class is a simple form of a switch() statement that
/// determines whether the given string matches one of the given string
@ -41,10 +41,10 @@ namespace llvm {
/// \endcode
template<typename T, typename R = T>
class StringSwitch {
/// \brief The string we are matching.
/// The string we are matching.
const StringRef Str;
/// \brief The pointer to the result of this switch statement, once known,
/// The pointer to the result of this switch statement, once known,
/// null before that.
Optional<T> Result;

View File

@ -72,16 +72,16 @@ public:
return Vector[ID - 1];
}
/// \brief Return an iterator to the start of the vector.
/// Return an iterator to the start of the vector.
iterator begin() { return Vector.begin(); }
/// \brief Return an iterator to the start of the vector.
/// Return an iterator to the start of the vector.
const_iterator begin() const { return Vector.begin(); }
/// \brief Return an iterator to the end of the vector.
/// Return an iterator to the end of the vector.
iterator end() { return Vector.end(); }
/// \brief Return an iterator to the end of the vector.
/// Return an iterator to the end of the vector.
const_iterator end() const { return Vector.end(); }
/// size - Returns the number of entries in the vector.

View File

@ -53,7 +53,7 @@ namespace llvm {
#define LLVM_COMMA_JOIN31(x) LLVM_COMMA_JOIN30(x), x ## 30
#define LLVM_COMMA_JOIN32(x) LLVM_COMMA_JOIN31(x), x ## 31
/// \brief Class which can simulate a type-safe variadic function.
/// Class which can simulate a type-safe variadic function.
///
/// The VariadicFunction class template makes it easy to define
/// type-safe variadic functions where all arguments have the same

View File

@ -22,7 +22,7 @@
namespace llvm {
/// \brief Determine the edit distance between two sequences.
/// Determine the edit distance between two sequences.
///
/// \param FromArray the first sequence to compare.
///

View File

@ -356,26 +356,26 @@ public:
using base_list_type::sort;
/// \brief Get the previous node, or \c nullptr for the list head.
/// Get the previous node, or \c nullptr for the list head.
pointer getPrevNode(reference N) const {
auto I = N.getIterator();
if (I == begin())
return nullptr;
return &*std::prev(I);
}
/// \brief Get the previous node, or \c nullptr for the list head.
/// Get the previous node, or \c nullptr for the list head.
const_pointer getPrevNode(const_reference N) const {
return getPrevNode(const_cast<reference >(N));
}
/// \brief Get the next node, or \c nullptr for the list tail.
/// Get the next node, or \c nullptr for the list tail.
pointer getNextNode(reference N) const {
auto Next = std::next(N.getIterator());
if (Next == end())
return nullptr;
return &*Next;
}
/// \brief Get the next node, or \c nullptr for the list tail.
/// Get the next node, or \c nullptr for the list tail.
const_pointer getNextNode(const_reference N) const {
return getNextNode(const_cast<reference >(N));
}

View File

@ -271,7 +271,7 @@ private:
public:
/// @name Adjacent Node Accessors
/// @{
/// \brief Get the previous node, or \c nullptr for the list head.
/// Get the previous node, or \c nullptr for the list head.
NodeTy *getPrevNode() {
// Should be separated to a reused function, but then we couldn't use auto
// (and would need the type of the list).
@ -280,12 +280,12 @@ public:
return List.getPrevNode(*static_cast<NodeTy *>(this));
}
/// \brief Get the previous node, or \c nullptr for the list head.
/// Get the previous node, or \c nullptr for the list head.
const NodeTy *getPrevNode() const {
return const_cast<ilist_node_with_parent *>(this)->getPrevNode();
}
/// \brief Get the next node, or \c nullptr for the list tail.
/// Get the next node, or \c nullptr for the list tail.
NodeTy *getNextNode() {
// Should be separated to a reused function, but then we couldn't use auto
// (and would need the type of the list).
@ -294,7 +294,7 @@ public:
return List.getNextNode(*static_cast<NodeTy *>(this));
}
/// \brief Get the next node, or \c nullptr for the list tail.
/// Get the next node, or \c nullptr for the list tail.
const NodeTy *getNextNode() const {
return const_cast<ilist_node_with_parent *>(this)->getNextNode();
}

View File

@ -19,7 +19,7 @@
namespace llvm {
/// \brief CRTP base class which implements the entire standard iterator facade
/// CRTP base class which implements the entire standard iterator facade
/// in terms of a minimal subset of the interface.
///
/// Use this when it is reasonable to implement most of the iterator
@ -183,7 +183,7 @@ public:
}
};
/// \brief CRTP base class for adapting an iterator to a different type.
/// CRTP base class for adapting an iterator to a different type.
///
/// This class can be used through CRTP to adapt one iterator into another.
/// Typically this is done through providing in the derived class a custom \c
@ -274,7 +274,7 @@ public:
ReferenceT operator*() const { return *I; }
};
/// \brief An iterator type that allows iterating over the pointees via some
/// An iterator type that allows iterating over the pointees via some
/// other iterator.
///
/// The typical usage of this is to expose a type that iterates over Ts, but

View File

@ -24,7 +24,7 @@
namespace llvm {
/// \brief A range adaptor for a pair of iterators.
/// A range adaptor for a pair of iterators.
///
/// This just wraps two iterators into a range-compatible interface. Nothing
/// fancy at all.
@ -47,7 +47,7 @@ public:
IteratorT end() const { return end_iterator; }
};
/// \brief Convenience function for iterating over sub-ranges.
/// Convenience function for iterating over sub-ranges.
///
/// This provides a bit of syntactic sugar to make using sub-ranges
/// in for loops a bit easier. Analogous to std::make_pair().

View File

@ -659,7 +659,7 @@ public:
/// http://llvm.org/docs/AliasAnalysis.html#ModRefInfo
ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2);
/// \brief Return information about whether a particular call site modifies
/// Return information about whether a particular call site modifies
/// or reads the specified memory location \p MemLoc before instruction \p I
/// in a BasicBlock. An ordered basic block \p OBB can be used to speed up
/// instruction ordering queries inside the BasicBlock containing \p I.
@ -669,7 +669,7 @@ public:
const MemoryLocation &MemLoc, DominatorTree *DT,
OrderedBasicBlock *OBB = nullptr);
/// \brief A convenience wrapper to synthesize a memory location.
/// A convenience wrapper to synthesize a memory location.
ModRefInfo callCapturesBefore(const Instruction *I, const Value *P,
uint64_t Size, DominatorTree *DT,
OrderedBasicBlock *OBB = nullptr) {

View File

@ -56,7 +56,7 @@ public:
}
~AAEvaluator();
/// \brief Run the pass over the function.
/// Run the pass over the function.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
private:

View File

@ -32,7 +32,7 @@ class Function;
class raw_ostream;
class Value;
/// \brief A cache of \@llvm.assume calls within a function.
/// A cache of \@llvm.assume calls within a function.
///
/// This cache provides fast lookup of assumptions within a function by caching
/// them and amortizing the cost of scanning for them across all queries. Passes
@ -40,12 +40,12 @@ class Value;
/// register any new \@llvm.assume calls that they create. Deletions of
/// \@llvm.assume calls do not require special handling.
class AssumptionCache {
/// \brief The function for which this cache is handling assumptions.
/// The function for which this cache is handling assumptions.
///
/// We track this to lazily populate our assumptions.
Function &F;
/// \brief Vector of weak value handles to calls of the \@llvm.assume
/// Vector of weak value handles to calls of the \@llvm.assume
/// intrinsic.
SmallVector<WeakTrackingVH, 4> AssumeHandles;
@ -64,7 +64,7 @@ class AssumptionCache {
friend AffectedValueCallbackVH;
/// \brief A map of values about which an assumption might be providing
/// A map of values about which an assumption might be providing
/// information to the relevant set of assumptions.
using AffectedValuesMap =
DenseMap<AffectedValueCallbackVH, SmallVector<WeakTrackingVH, 1>,
@ -77,17 +77,17 @@ class AssumptionCache {
/// Copy affected values in the cache for OV to be affected values for NV.
void copyAffectedValuesInCache(Value *OV, Value *NV);
/// \brief Flag tracking whether we have scanned the function yet.
/// Flag tracking whether we have scanned the function yet.
///
/// We want to be as lazy about this as possible, and so we scan the function
/// at the last moment.
bool Scanned = false;
/// \brief Scan the function for assumptions and add them to the cache.
/// Scan the function for assumptions and add them to the cache.
void scanFunction();
public:
/// \brief Construct an AssumptionCache from a function by scanning all of
/// Construct an AssumptionCache from a function by scanning all of
/// its instructions.
AssumptionCache(Function &F) : F(F) {}
@ -98,17 +98,17 @@ public:
return false;
}
/// \brief Add an \@llvm.assume intrinsic to this function's cache.
/// Add an \@llvm.assume intrinsic to this function's cache.
///
/// The call passed in must be an instruction within this function and must
/// not already be in the cache.
void registerAssumption(CallInst *CI);
/// \brief Update the cache of values being affected by this assumption (i.e.
/// Update the cache of values being affected by this assumption (i.e.
/// the values about which this assumption provides information).
void updateAffectedValues(CallInst *CI);
/// \brief Clear the cache of \@llvm.assume intrinsics for a function.
/// Clear the cache of \@llvm.assume intrinsics for a function.
///
/// It will be re-scanned the next time it is requested.
void clear() {
@ -117,7 +117,7 @@ public:
Scanned = false;
}
/// \brief Access the list of assumption handles currently tracked for this
/// Access the list of assumption handles currently tracked for this
/// function.
///
/// Note that these produce weak handles that may be null. The caller must
@ -131,7 +131,7 @@ public:
return AssumeHandles;
}
/// \brief Access the list of assumptions which affect this value.
/// Access the list of assumptions which affect this value.
MutableArrayRef<WeakTrackingVH> assumptionsFor(const Value *V) {
if (!Scanned)
scanFunction();
@ -144,7 +144,7 @@ public:
}
};
/// \brief A function analysis which provides an \c AssumptionCache.
/// A function analysis which provides an \c AssumptionCache.
///
/// This analysis is intended for use with the new pass manager and will vend
/// assumption caches for a given function.
@ -161,7 +161,7 @@ public:
}
};
/// \brief Printer pass for the \c AssumptionAnalysis results.
/// Printer pass for the \c AssumptionAnalysis results.
class AssumptionPrinterPass : public PassInfoMixin<AssumptionPrinterPass> {
raw_ostream &OS;
@ -171,7 +171,7 @@ public:
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief An immutable pass that tracks lazily created \c AssumptionCache
/// An immutable pass that tracks lazily created \c AssumptionCache
/// objects.
///
/// This is essentially a workaround for the legacy pass manager's weaknesses
@ -203,7 +203,7 @@ class AssumptionCacheTracker : public ImmutablePass {
FunctionCallsMap AssumptionCaches;
public:
/// \brief Get the cached assumptions for a function.
/// Get the cached assumptions for a function.
///
/// If no assumptions are cached, this will scan the function. Otherwise, the
/// existing cache will be returned.

View File

@ -173,7 +173,7 @@ private:
const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompObject,
uint64_t ObjectAccessSize);
/// \brief A Heuristic for aliasGEP that searches for a constant offset
/// A Heuristic for aliasGEP that searches for a constant offset
/// between the variables.
///
/// GetLinearExpression has some limitations, as generally zext(%x + 1)

View File

@ -65,17 +65,17 @@ public:
/// floating points.
BlockFrequency getBlockFreq(const BasicBlock *BB) const;
/// \brief Returns the estimated profile count of \p BB.
/// Returns the estimated profile count of \p BB.
/// This computes the relative block frequency of \p BB and multiplies it by
/// the enclosing function's count (if available) and returns the value.
Optional<uint64_t> getBlockProfileCount(const BasicBlock *BB) const;
/// \brief Returns the estimated profile count of \p Freq.
/// Returns the estimated profile count of \p Freq.
/// This uses the frequency \p Freq and multiplies it by
/// the enclosing function's count (if available) and returns the value.
Optional<uint64_t> getProfileCountFromFreq(uint64_t Freq) const;
/// \brief Returns true if \p BB is an irreducible loop header
/// Returns true if \p BB is an irreducible loop header
/// block. Otherwise false.
bool isIrrLoopHeader(const BasicBlock *BB);
@ -105,7 +105,7 @@ public:
void print(raw_ostream &OS) const;
};
/// \brief Analysis pass which computes \c BlockFrequencyInfo.
/// Analysis pass which computes \c BlockFrequencyInfo.
class BlockFrequencyAnalysis
: public AnalysisInfoMixin<BlockFrequencyAnalysis> {
friend AnalysisInfoMixin<BlockFrequencyAnalysis>;
@ -113,14 +113,14 @@ class BlockFrequencyAnalysis
static AnalysisKey Key;
public:
/// \brief Provide the result type for this analysis pass.
/// Provide the result type for this analysis pass.
using Result = BlockFrequencyInfo;
/// \brief Run the analysis pass over a function and produce BFI.
/// Run the analysis pass over a function and produce BFI.
Result run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief Printer pass for the \c BlockFrequencyInfo results.
/// Printer pass for the \c BlockFrequencyInfo results.
class BlockFrequencyPrinterPass
: public PassInfoMixin<BlockFrequencyPrinterPass> {
raw_ostream &OS;
@ -131,7 +131,7 @@ public:
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief Legacy analysis pass which computes \c BlockFrequencyInfo.
/// Legacy analysis pass which computes \c BlockFrequencyInfo.
class BlockFrequencyInfoWrapperPass : public FunctionPass {
BlockFrequencyInfo BFI;

View File

@ -66,7 +66,7 @@ struct IrreducibleGraph;
// This is part of a workaround for a GCC 4.7 crash on lambdas.
template <class BT> struct BlockEdgesAdder;
/// \brief Mass of a block.
/// Mass of a block.
///
/// This class implements a sort of fixed-point fraction always between 0.0 and
/// 1.0. getMass() == std::numeric_limits<uint64_t>::max() indicates a value of
@ -100,7 +100,7 @@ public:
bool operator!() const { return isEmpty(); }
/// \brief Add another mass.
/// Add another mass.
///
/// Adds another mass, saturating at \a isFull() rather than overflowing.
BlockMass &operator+=(BlockMass X) {
@ -109,7 +109,7 @@ public:
return *this;
}
/// \brief Subtract another mass.
/// Subtract another mass.
///
/// Subtracts another mass, saturating at \a isEmpty() rather than
/// undeflowing.
@ -131,7 +131,7 @@ public:
bool operator<(BlockMass X) const { return Mass < X.Mass; }
bool operator>(BlockMass X) const { return Mass > X.Mass; }
/// \brief Convert to scaled number.
/// Convert to scaled number.
///
/// Convert to \a ScaledNumber. \a isFull() gives 1.0, while \a isEmpty()
/// gives slightly above 0.0.
@ -164,7 +164,7 @@ template <> struct isPodLike<bfi_detail::BlockMass> {
static const bool value = true;
};
/// \brief Base class for BlockFrequencyInfoImpl
/// Base class for BlockFrequencyInfoImpl
///
/// BlockFrequencyInfoImplBase has supporting data structures and some
/// algorithms for BlockFrequencyInfoImplBase. Only algorithms that depend on
@ -177,7 +177,7 @@ public:
using Scaled64 = ScaledNumber<uint64_t>;
using BlockMass = bfi_detail::BlockMass;
/// \brief Representative of a block.
/// Representative of a block.
///
/// This is a simple wrapper around an index into the reverse-post-order
/// traversal of the blocks.
@ -206,13 +206,13 @@ public:
}
};
/// \brief Stats about a block itself.
/// Stats about a block itself.
struct FrequencyData {
Scaled64 Scaled;
uint64_t Integer;
};
/// \brief Data about a loop.
/// Data about a loop.
///
/// Contains the data necessary to represent a loop as a pseudo-node once it's
/// packaged.
@ -270,7 +270,7 @@ public:
}
};
/// \brief Index of loop information.
/// Index of loop information.
struct WorkingData {
BlockNode Node; ///< This node.
LoopData *Loop = nullptr; ///< The loop this block is inside.
@ -293,7 +293,7 @@ public:
return Loop->Parent->Parent;
}
/// \brief Resolve a node to its representative.
/// Resolve a node to its representative.
///
/// Get the node currently representing Node, which could be a containing
/// loop.
@ -320,7 +320,7 @@ public:
return L;
}
/// \brief Get the appropriate mass for a node.
/// Get the appropriate mass for a node.
///
/// Get appropriate mass for Node. If Node is a loop-header (whose loop
/// has been packaged), returns the mass of its pseudo-node. If it's a
@ -333,19 +333,19 @@ public:
return Loop->Parent->Mass;
}
/// \brief Has ContainingLoop been packaged up?
/// Has ContainingLoop been packaged up?
bool isPackaged() const { return getResolvedNode() != Node; }
/// \brief Has Loop been packaged up?
/// Has Loop been packaged up?
bool isAPackage() const { return isLoopHeader() && Loop->IsPackaged; }
/// \brief Has Loop been packaged up twice?
/// Has Loop been packaged up twice?
bool isADoublePackage() const {
return isDoubleLoopHeader() && Loop->Parent->IsPackaged;
}
};
/// \brief Unscaled probability weight.
/// Unscaled probability weight.
///
/// Probability weight for an edge in the graph (including the
/// successor/target node).
@ -369,7 +369,7 @@ public:
: Type(Type), TargetNode(TargetNode), Amount(Amount) {}
};
/// \brief Distribution of unscaled probability weight.
/// Distribution of unscaled probability weight.
///
/// Distribution of unscaled probability weight to a set of successors.
///
@ -398,7 +398,7 @@ public:
add(Node, Amount, Weight::Backedge);
}
/// \brief Normalize the distribution.
/// Normalize the distribution.
///
/// Combines multiple edges to the same \a Weight::TargetNode and scales
/// down so that \a Total fits into 32-bits.
@ -413,26 +413,26 @@ public:
void add(const BlockNode &Node, uint64_t Amount, Weight::DistType Type);
};
/// \brief Data about each block. This is used downstream.
/// Data about each block. This is used downstream.
std::vector<FrequencyData> Freqs;
/// \brief Whether each block is an irreducible loop header.
/// Whether each block is an irreducible loop header.
/// This is used downstream.
SparseBitVector<> IsIrrLoopHeader;
/// \brief Loop data: see initializeLoops().
/// Loop data: see initializeLoops().
std::vector<WorkingData> Working;
/// \brief Indexed information about loops.
/// Indexed information about loops.
std::list<LoopData> Loops;
/// \brief Virtual destructor.
/// Virtual destructor.
///
/// Need a virtual destructor to mask the compiler warning about
/// getBlockName().
virtual ~BlockFrequencyInfoImplBase() = default;
/// \brief Add all edges out of a packaged loop to the distribution.
/// Add all edges out of a packaged loop to the distribution.
///
/// Adds all edges from LocalLoopHead to Dist. Calls addToDist() to add each
/// successor edge.
@ -441,7 +441,7 @@ public:
bool addLoopSuccessorsToDist(const LoopData *OuterLoop, LoopData &Loop,
Distribution &Dist);
/// \brief Add an edge to the distribution.
/// Add an edge to the distribution.
///
/// Adds an edge to Succ to Dist. If \c LoopHead.isValid(), then whether the
/// edge is local/exit/backedge is in the context of LoopHead. Otherwise,
@ -457,7 +457,7 @@ public:
return *Working[Head.Index].Loop;
}
/// \brief Analyze irreducible SCCs.
/// Analyze irreducible SCCs.
///
/// Separate irreducible SCCs from \c G, which is an explict graph of \c
/// OuterLoop (or the top-level function, if \c OuterLoop is \c nullptr).
@ -468,7 +468,7 @@ public:
analyzeIrreducible(const bfi_detail::IrreducibleGraph &G, LoopData *OuterLoop,
std::list<LoopData>::iterator Insert);
/// \brief Update a loop after packaging irreducible SCCs inside of it.
/// Update a loop after packaging irreducible SCCs inside of it.
///
/// Update \c OuterLoop. Before finding irreducible control flow, it was
/// partway through \a computeMassInLoop(), so \a LoopData::Exits and \a
@ -476,7 +476,7 @@ public:
/// up need to be removed from \a OuterLoop::Nodes.
void updateLoopWithIrreducible(LoopData &OuterLoop);
/// \brief Distribute mass according to a distribution.
/// Distribute mass according to a distribution.
///
/// Distributes the mass in Source according to Dist. If LoopHead.isValid(),
/// backedges and exits are stored in its entry in Loops.
@ -485,7 +485,7 @@ public:
void distributeMass(const BlockNode &Source, LoopData *OuterLoop,
Distribution &Dist);
/// \brief Compute the loop scale for a loop.
/// Compute the loop scale for a loop.
void computeLoopScale(LoopData &Loop);
/// Adjust the mass of all headers in an irreducible loop.
@ -500,19 +500,19 @@ public:
void distributeIrrLoopHeaderMass(Distribution &Dist);
/// \brief Package up a loop.
/// Package up a loop.
void packageLoop(LoopData &Loop);
/// \brief Unwrap loops.
/// Unwrap loops.
void unwrapLoops();
/// \brief Finalize frequency metrics.
/// Finalize frequency metrics.
///
/// Calculates final frequencies and cleans up no-longer-needed data
/// structures.
void finalizeMetrics();
/// \brief Clear all memory.
/// Clear all memory.
void clear();
virtual std::string getBlockName(const BlockNode &Node) const;
@ -560,7 +560,7 @@ template <> struct TypeMap<MachineBasicBlock> {
using LoopInfoT = MachineLoopInfo;
};
/// \brief Get the name of a MachineBasicBlock.
/// Get the name of a MachineBasicBlock.
///
/// Get the name of a MachineBasicBlock. It's templated so that including from
/// CodeGen is unnecessary (that would be a layering issue).
@ -574,13 +574,13 @@ template <class BlockT> std::string getBlockName(const BlockT *BB) {
return (MachineName + "[" + BB->getName() + "]").str();
return MachineName.str();
}
/// \brief Get the name of a BasicBlock.
/// Get the name of a BasicBlock.
template <> inline std::string getBlockName(const BasicBlock *BB) {
assert(BB && "Unexpected nullptr");
return BB->getName().str();
}
/// \brief Graph of irreducible control flow.
/// Graph of irreducible control flow.
///
/// This graph is used for determining the SCCs in a loop (or top-level
/// function) that has irreducible control flow.
@ -619,7 +619,7 @@ struct IrreducibleGraph {
std::vector<IrrNode> Nodes;
SmallDenseMap<uint32_t, IrrNode *, 4> Lookup;
/// \brief Construct an explicit graph containing irreducible control flow.
/// Construct an explicit graph containing irreducible control flow.
///
/// Construct an explicit graph of the control flow in \c OuterLoop (or the
/// top-level function, if \c OuterLoop is \c nullptr). Uses \c
@ -687,7 +687,7 @@ void IrreducibleGraph::addEdges(const BlockNode &Node,
} // end namespace bfi_detail
/// \brief Shared implementation for block frequency analysis.
/// Shared implementation for block frequency analysis.
///
/// This is a shared implementation of BlockFrequencyInfo and
/// MachineBlockFrequencyInfo, and calculates the relative frequencies of
@ -878,12 +878,12 @@ template <class BT> class BlockFrequencyInfoImpl : BlockFrequencyInfoImplBase {
return RPOT[Node.Index];
}
/// \brief Run (and save) a post-order traversal.
/// Run (and save) a post-order traversal.
///
/// Saves a reverse post-order traversal of all the nodes in \a F.
void initializeRPOT();
/// \brief Initialize loop data.
/// Initialize loop data.
///
/// Build up \a Loops using \a LoopInfo. \a LoopInfo gives us a mapping from
/// each block to the deepest loop it's in, but we need the inverse. For each
@ -892,7 +892,7 @@ template <class BT> class BlockFrequencyInfoImpl : BlockFrequencyInfoImplBase {
/// the loop that are not in sub-loops.
void initializeLoops();
/// \brief Propagate to a block's successors.
/// Propagate to a block's successors.
///
/// In the context of distributing mass through \c OuterLoop, divide the mass
/// currently assigned to \c Node between its successors.
@ -900,7 +900,7 @@ template <class BT> class BlockFrequencyInfoImpl : BlockFrequencyInfoImplBase {
/// \return \c true unless there's an irreducible backedge.
bool propagateMassToSuccessors(LoopData *OuterLoop, const BlockNode &Node);
/// \brief Compute mass in a particular loop.
/// Compute mass in a particular loop.
///
/// Assign mass to \c Loop's header, and then for each block in \c Loop in
/// reverse post-order, distribute mass to its successors. Only visits nodes
@ -910,7 +910,7 @@ template <class BT> class BlockFrequencyInfoImpl : BlockFrequencyInfoImplBase {
/// \return \c true unless there's an irreducible backedge.
bool computeMassInLoop(LoopData &Loop);
/// \brief Try to compute mass in the top-level function.
/// Try to compute mass in the top-level function.
///
/// Assign mass to the entry block, and then for each block in reverse
/// post-order, distribute mass to its successors. Skips nodes that have
@ -920,7 +920,7 @@ template <class BT> class BlockFrequencyInfoImpl : BlockFrequencyInfoImplBase {
/// \return \c true unless there's an irreducible backedge.
bool tryToComputeMassInFunction();
/// \brief Compute mass in (and package up) irreducible SCCs.
/// Compute mass in (and package up) irreducible SCCs.
///
/// Find the irreducible SCCs in \c OuterLoop, add them to \a Loops (in front
/// of \c Insert), and call \a computeMassInLoop() on each of them.
@ -935,7 +935,7 @@ template <class BT> class BlockFrequencyInfoImpl : BlockFrequencyInfoImplBase {
void computeIrreducibleMass(LoopData *OuterLoop,
std::list<LoopData>::iterator Insert);
/// \brief Compute mass in all loops.
/// Compute mass in all loops.
///
/// For each loop bottom-up, call \a computeMassInLoop().
///
@ -946,7 +946,7 @@ template <class BT> class BlockFrequencyInfoImpl : BlockFrequencyInfoImplBase {
/// \post \a computeMassInLoop() has returned \c true for every loop.
void computeMassInLoops();
/// \brief Compute mass in the top-level function.
/// Compute mass in the top-level function.
///
/// Uses \a tryToComputeMassInFunction() and \a computeIrreducibleMass() to
/// compute mass in the top-level function.
@ -994,7 +994,7 @@ public:
const BranchProbabilityInfoT &getBPI() const { return *BPI; }
/// \brief Print the frequencies for the current function.
/// Print the frequencies for the current function.
///
/// Prints the frequencies for the blocks in the current function.
///

View File

@ -38,7 +38,7 @@ class raw_ostream;
class TargetLibraryInfo;
class Value;
/// \brief Analysis providing branch probability information.
/// Analysis providing branch probability information.
///
/// This is a function analysis which provides information on the relative
/// probabilities of each "edge" in the function's CFG where such an edge is
@ -79,7 +79,7 @@ public:
void print(raw_ostream &OS) const;
/// \brief Get an edge's probability, relative to other out-edges of the Src.
/// Get an edge's probability, relative to other out-edges of the Src.
///
/// This routine provides access to the fractional probability between zero
/// (0%) and one (100%) of this edge executing, relative to other edges
@ -88,7 +88,7 @@ public:
BranchProbability getEdgeProbability(const BasicBlock *Src,
unsigned IndexInSuccessors) const;
/// \brief Get the probability of going from Src to Dst.
/// Get the probability of going from Src to Dst.
///
/// It returns the sum of all probabilities for edges from Src to Dst.
BranchProbability getEdgeProbability(const BasicBlock *Src,
@ -97,19 +97,19 @@ public:
BranchProbability getEdgeProbability(const BasicBlock *Src,
succ_const_iterator Dst) const;
/// \brief Test if an edge is hot relative to other out-edges of the Src.
/// Test if an edge is hot relative to other out-edges of the Src.
///
/// Check whether this edge out of the source block is 'hot'. We define hot
/// as having a relative probability >= 80%.
bool isEdgeHot(const BasicBlock *Src, const BasicBlock *Dst) const;
/// \brief Retrieve the hot successor of a block if one exists.
/// Retrieve the hot successor of a block if one exists.
///
/// Given a basic block, look through its successors and if one exists for
/// which \see isEdgeHot would return true, return that successor block.
const BasicBlock *getHotSucc(const BasicBlock *BB) const;
/// \brief Print an edge's probability.
/// Print an edge's probability.
///
/// Retrieves an edge's probability similarly to \see getEdgeProbability, but
/// then prints that probability to the provided stream. That stream is then
@ -117,7 +117,7 @@ public:
raw_ostream &printEdgeProbability(raw_ostream &OS, const BasicBlock *Src,
const BasicBlock *Dst) const;
/// \brief Set the raw edge probability for the given edge.
/// Set the raw edge probability for the given edge.
///
/// This allows a pass to explicitly set the edge probability for an edge. It
/// can be used when updating the CFG to update and preserve the branch
@ -179,13 +179,13 @@ private:
DenseMap<Edge, BranchProbability> Probs;
/// \brief Track the last function we run over for printing.
/// Track the last function we run over for printing.
const Function *LastF;
/// \brief Track the set of blocks directly succeeded by a returning block.
/// Track the set of blocks directly succeeded by a returning block.
SmallPtrSet<const BasicBlock *, 16> PostDominatedByUnreachable;
/// \brief Track the set of blocks that always lead to a cold call.
/// Track the set of blocks that always lead to a cold call.
SmallPtrSet<const BasicBlock *, 16> PostDominatedByColdCall;
void updatePostDominatedByUnreachable(const BasicBlock *BB);
@ -201,7 +201,7 @@ private:
bool calcInvokeHeuristics(const BasicBlock *BB);
};
/// \brief Analysis pass which computes \c BranchProbabilityInfo.
/// Analysis pass which computes \c BranchProbabilityInfo.
class BranchProbabilityAnalysis
: public AnalysisInfoMixin<BranchProbabilityAnalysis> {
friend AnalysisInfoMixin<BranchProbabilityAnalysis>;
@ -209,14 +209,14 @@ class BranchProbabilityAnalysis
static AnalysisKey Key;
public:
/// \brief Provide the result type for this analysis pass.
/// Provide the result type for this analysis pass.
using Result = BranchProbabilityInfo;
/// \brief Run the analysis pass over a function and produce BPI.
/// Run the analysis pass over a function and produce BPI.
BranchProbabilityInfo run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief Printer pass for the \c BranchProbabilityAnalysis results.
/// Printer pass for the \c BranchProbabilityAnalysis results.
class BranchProbabilityPrinterPass
: public PassInfoMixin<BranchProbabilityPrinterPass> {
raw_ostream &OS;
@ -227,7 +227,7 @@ public:
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief Legacy analysis pass which computes \c BranchProbabilityInfo.
/// Legacy analysis pass which computes \c BranchProbabilityInfo.
class BranchProbabilityInfoWrapperPass : public FunctionPass {
BranchProbabilityInfo BPI;

View File

@ -49,7 +49,7 @@ unsigned GetSuccessorNumber(const BasicBlock *BB, const BasicBlock *Succ);
bool isCriticalEdge(const TerminatorInst *TI, unsigned SuccNum,
bool AllowIdenticalEdges = false);
/// \brief Determine whether instruction 'To' is reachable from 'From',
/// Determine whether instruction 'To' is reachable from 'From',
/// returning true if uncertain.
///
/// Determine whether there is a path from From to To within a single function.
@ -68,7 +68,7 @@ bool isPotentiallyReachable(const Instruction *From, const Instruction *To,
const DominatorTree *DT = nullptr,
const LoopInfo *LI = nullptr);
/// \brief Determine whether block 'To' is reachable from 'From', returning
/// Determine whether block 'To' is reachable from 'From', returning
/// true if uncertain.
///
/// Determine whether there is a path from From to To within a single function.
@ -78,7 +78,7 @@ bool isPotentiallyReachable(const BasicBlock *From, const BasicBlock *To,
const DominatorTree *DT = nullptr,
const LoopInfo *LI = nullptr);
/// \brief Determine whether there is at least one path from a block in
/// Determine whether there is at least one path from a block in
/// 'Worklist' to 'StopBB', returning true if uncertain.
///
/// Determine whether there is a path from at least one block in Worklist to
@ -90,7 +90,7 @@ bool isPotentiallyReachableFromMany(SmallVectorImpl<BasicBlock *> &Worklist,
const DominatorTree *DT = nullptr,
const LoopInfo *LI = nullptr);
/// \brief Return true if the control flow in \p RPOTraversal is irreducible.
/// Return true if the control flow in \p RPOTraversal is irreducible.
///
/// This is a generic implementation to detect CFG irreducibility based on loop
/// info analysis. It can be used for any kind of CFG (Loop, MachineLoop,

View File

@ -56,7 +56,7 @@ public:
/// Evict the given function from cache
void evict(const Function *Fn);
/// \brief Get the alias summary for the given function
/// Get the alias summary for the given function
/// Return nullptr if the summary is not found or not available
const cflaa::AliasSummary *getAliasSummary(const Function &);
@ -64,19 +64,19 @@ public:
AliasResult alias(const MemoryLocation &, const MemoryLocation &);
private:
/// \brief Ensures that the given function is available in the cache.
/// Ensures that the given function is available in the cache.
/// Returns the appropriate entry from the cache.
const Optional<FunctionInfo> &ensureCached(const Function &);
/// \brief Inserts the given Function into the cache.
/// Inserts the given Function into the cache.
void scan(const Function &);
/// \brief Build summary for a given function
/// Build summary for a given function
FunctionInfo buildInfoFrom(const Function &);
const TargetLibraryInfo &TLI;
/// \brief Cached mapping of Functions to their StratifiedSets.
/// Cached mapping of Functions to their StratifiedSets.
/// If a function's sets are currently being built, it is marked
/// in the cache as an Optional without a value. This way, if we
/// have any kind of recursion, it is discernable from a function

View File

@ -55,16 +55,16 @@ public:
return false;
}
/// \brief Inserts the given Function into the cache.
/// Inserts the given Function into the cache.
void scan(Function *Fn);
void evict(Function *Fn);
/// \brief Ensures that the given function is available in the cache.
/// Ensures that the given function is available in the cache.
/// Returns the appropriate entry from the cache.
const Optional<FunctionInfo> &ensureCached(Function *Fn);
/// \brief Get the alias summary for the given function
/// Get the alias summary for the given function
/// Return nullptr if the summary is not found or not available
const cflaa::AliasSummary *getAliasSummary(Function &Fn);
@ -92,7 +92,7 @@ public:
private:
const TargetLibraryInfo &TLI;
/// \brief Cached mapping of Functions to their StratifiedSets.
/// Cached mapping of Functions to their StratifiedSets.
/// If a function's sets are currently being built, it is marked
/// in the cache as an Optional without a value. This way, if we
/// have any kind of recursion, it is discernable from a function

View File

@ -119,7 +119,7 @@ extern template class AllAnalysesOn<LazyCallGraph::SCC>;
extern template class AnalysisManager<LazyCallGraph::SCC, LazyCallGraph &>;
/// \brief The CGSCC analysis manager.
/// The CGSCC analysis manager.
///
/// See the documentation for the AnalysisManager template for detail
/// documentation. This type serves as a convenient way to refer to this
@ -140,7 +140,7 @@ PassManager<LazyCallGraph::SCC, CGSCCAnalysisManager, LazyCallGraph &,
extern template class PassManager<LazyCallGraph::SCC, CGSCCAnalysisManager,
LazyCallGraph &, CGSCCUpdateResult &>;
/// \brief The CGSCC pass manager.
/// The CGSCC pass manager.
///
/// See the documentation for the PassManager template for details. It runs
/// a sequence of SCC passes over each SCC that the manager is run over. This
@ -175,10 +175,10 @@ public:
explicit Result(CGSCCAnalysisManager &InnerAM, LazyCallGraph &G)
: InnerAM(&InnerAM), G(&G) {}
/// \brief Accessor for the analysis manager.
/// Accessor for the analysis manager.
CGSCCAnalysisManager &getManager() { return *InnerAM; }
/// \brief Handler for invalidation of the Module.
/// Handler for invalidation of the Module.
///
/// If the proxy analysis itself is preserved, then we assume that the set of
/// SCCs in the Module hasn't changed. Thus any pointers to SCCs in the
@ -302,7 +302,7 @@ struct CGSCCUpdateResult {
&InlinedInternalEdges;
};
/// \brief The core module pass which does a post-order walk of the SCCs and
/// The core module pass which does a post-order walk of the SCCs and
/// runs a CGSCC pass over each one.
///
/// Designed to allow composition of a CGSCCPass(Manager) and
@ -338,7 +338,7 @@ public:
return *this;
}
/// \brief Runs the CGSCC pass across every SCC in the module.
/// Runs the CGSCC pass across every SCC in the module.
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM) {
// Setup the CGSCC analysis manager from its proxy.
CGSCCAnalysisManager &CGAM =
@ -494,7 +494,7 @@ private:
CGSCCPassT Pass;
};
/// \brief A function to deduce a function pass type and wrap it in the
/// A function to deduce a function pass type and wrap it in the
/// templated adaptor.
template <typename CGSCCPassT>
ModuleToPostOrderCGSCCPassAdaptor<CGSCCPassT>
@ -517,7 +517,7 @@ public:
public:
explicit Result(FunctionAnalysisManager &FAM) : FAM(&FAM) {}
/// \brief Accessor for the analysis manager.
/// Accessor for the analysis manager.
FunctionAnalysisManager &getManager() { return *FAM; }
bool invalidate(LazyCallGraph::SCC &C, const PreservedAnalyses &PA,
@ -552,7 +552,7 @@ LazyCallGraph::SCC &updateCGAndAnalysisManagerForFunctionPass(
LazyCallGraph &G, LazyCallGraph::SCC &C, LazyCallGraph::Node &N,
CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR);
/// \brief Adaptor that maps from a SCC to its functions.
/// Adaptor that maps from a SCC to its functions.
///
/// Designed to allow composition of a FunctionPass(Manager) and
/// a CGSCCPassManager. Note that if this pass is constructed with a pointer
@ -585,7 +585,7 @@ public:
return *this;
}
/// \brief Runs the function pass across every function in the module.
/// Runs the function pass across every function in the module.
PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
LazyCallGraph &CG, CGSCCUpdateResult &UR) {
// Setup the function analysis manager from its proxy.
@ -652,7 +652,7 @@ private:
FunctionPassT Pass;
};
/// \brief A function to deduce a function pass type and wrap it in the
/// A function to deduce a function pass type and wrap it in the
/// templated adaptor.
template <typename FunctionPassT>
CGSCCToFunctionPassAdaptor<FunctionPassT>
@ -824,7 +824,7 @@ private:
int MaxIterations;
};
/// \brief A function to deduce a function pass type and wrap it in the
/// A function to deduce a function pass type and wrap it in the
/// templated adaptor.
template <typename PassT>
DevirtSCCRepeatedPass<PassT> createDevirtSCCRepeatedPass(PassT Pass,

View File

@ -66,7 +66,7 @@ class CallGraphNode;
class Module;
class raw_ostream;
/// \brief The basic data container for the call graph of a \c Module of IR.
/// The basic data container for the call graph of a \c Module of IR.
///
/// This class exposes both the interface to the call graph for a module of IR.
///
@ -77,25 +77,25 @@ class CallGraph {
using FunctionMapTy =
std::map<const Function *, std::unique_ptr<CallGraphNode>>;
/// \brief A map from \c Function* to \c CallGraphNode*.
/// A map from \c Function* to \c CallGraphNode*.
FunctionMapTy FunctionMap;
/// \brief This node has edges to all external functions and those internal
/// This node has edges to all external functions and those internal
/// functions that have their address taken.
CallGraphNode *ExternalCallingNode;
/// \brief This node has edges to it from all functions making indirect calls
/// This node has edges to it from all functions making indirect calls
/// or calling an external function.
std::unique_ptr<CallGraphNode> CallsExternalNode;
/// \brief Replace the function represented by this node by another.
/// Replace the function represented by this node by another.
///
/// This does not rescan the body of the function, so it is suitable when
/// splicing the body of one function to another while also updating all
/// callers from the old function to the new.
void spliceFunction(const Function *From, const Function *To);
/// \brief Add a function to the call graph, and link the node to all of the
/// Add a function to the call graph, and link the node to all of the
/// functions that it calls.
void addToCallGraph(Function *F);
@ -110,7 +110,7 @@ public:
using iterator = FunctionMapTy::iterator;
using const_iterator = FunctionMapTy::const_iterator;
/// \brief Returns the module the call graph corresponds to.
/// Returns the module the call graph corresponds to.
Module &getModule() const { return M; }
inline iterator begin() { return FunctionMap.begin(); }
@ -118,21 +118,21 @@ public:
inline const_iterator begin() const { return FunctionMap.begin(); }
inline const_iterator end() const { return FunctionMap.end(); }
/// \brief Returns the call graph node for the provided function.
/// Returns the call graph node for the provided function.
inline const CallGraphNode *operator[](const Function *F) const {
const_iterator I = FunctionMap.find(F);
assert(I != FunctionMap.end() && "Function not in callgraph!");
return I->second.get();
}
/// \brief Returns the call graph node for the provided function.
/// Returns the call graph node for the provided function.
inline CallGraphNode *operator[](const Function *F) {
const_iterator I = FunctionMap.find(F);
assert(I != FunctionMap.end() && "Function not in callgraph!");
return I->second.get();
}
/// \brief Returns the \c CallGraphNode which is used to represent
/// Returns the \c CallGraphNode which is used to represent
/// undetermined calls into the callgraph.
CallGraphNode *getExternalCallingNode() const { return ExternalCallingNode; }
@ -145,7 +145,7 @@ public:
// modified.
//
/// \brief Unlink the function from this module, returning it.
/// Unlink the function from this module, returning it.
///
/// Because this removes the function from the module, the call graph node is
/// destroyed. This is only valid if the function does not call any other
@ -153,25 +153,25 @@ public:
/// this is to dropAllReferences before calling this.
Function *removeFunctionFromModule(CallGraphNode *CGN);
/// \brief Similar to operator[], but this will insert a new CallGraphNode for
/// Similar to operator[], but this will insert a new CallGraphNode for
/// \c F if one does not already exist.
CallGraphNode *getOrInsertFunction(const Function *F);
};
/// \brief A node in the call graph for a module.
/// A node in the call graph for a module.
///
/// Typically represents a function in the call graph. There are also special
/// "null" nodes used to represent theoretical entries in the call graph.
class CallGraphNode {
public:
/// \brief A pair of the calling instruction (a call or invoke)
/// A pair of the calling instruction (a call or invoke)
/// and the call graph node being called.
using CallRecord = std::pair<WeakTrackingVH, CallGraphNode *>;
public:
using CalledFunctionsVector = std::vector<CallRecord>;
/// \brief Creates a node for the specified function.
/// Creates a node for the specified function.
inline CallGraphNode(Function *F) : F(F) {}
CallGraphNode(const CallGraphNode &) = delete;
@ -184,7 +184,7 @@ public:
using iterator = std::vector<CallRecord>::iterator;
using const_iterator = std::vector<CallRecord>::const_iterator;
/// \brief Returns the function that this call graph node represents.
/// Returns the function that this call graph node represents.
Function *getFunction() const { return F; }
inline iterator begin() { return CalledFunctions.begin(); }
@ -194,17 +194,17 @@ public:
inline bool empty() const { return CalledFunctions.empty(); }
inline unsigned size() const { return (unsigned)CalledFunctions.size(); }
/// \brief Returns the number of other CallGraphNodes in this CallGraph that
/// Returns the number of other CallGraphNodes in this CallGraph that
/// reference this node in their callee list.
unsigned getNumReferences() const { return NumReferences; }
/// \brief Returns the i'th called function.
/// Returns the i'th called function.
CallGraphNode *operator[](unsigned i) const {
assert(i < CalledFunctions.size() && "Invalid index");
return CalledFunctions[i].second;
}
/// \brief Print out this call graph node.
/// Print out this call graph node.
void dump() const;
void print(raw_ostream &OS) const;
@ -213,7 +213,7 @@ public:
// modified
//
/// \brief Removes all edges from this CallGraphNode to any functions it
/// Removes all edges from this CallGraphNode to any functions it
/// calls.
void removeAllCalledFunctions() {
while (!CalledFunctions.empty()) {
@ -222,14 +222,14 @@ public:
}
}
/// \brief Moves all the callee information from N to this node.
/// Moves all the callee information from N to this node.
void stealCalledFunctionsFrom(CallGraphNode *N) {
assert(CalledFunctions.empty() &&
"Cannot steal callsite information if I already have some");
std::swap(CalledFunctions, N->CalledFunctions);
}
/// \brief Adds a function to the list of functions called by this one.
/// Adds a function to the list of functions called by this one.
void addCalledFunction(CallSite CS, CallGraphNode *M) {
assert(!CS.getInstruction() || !CS.getCalledFunction() ||
!CS.getCalledFunction()->isIntrinsic() ||
@ -244,23 +244,23 @@ public:
CalledFunctions.pop_back();
}
/// \brief Removes the edge in the node for the specified call site.
/// Removes the edge in the node for the specified call site.
///
/// Note that this method takes linear time, so it should be used sparingly.
void removeCallEdgeFor(CallSite CS);
/// \brief Removes all call edges from this node to the specified callee
/// Removes all call edges from this node to the specified callee
/// function.
///
/// This takes more time to execute than removeCallEdgeTo, so it should not
/// be used unless necessary.
void removeAnyCallEdgeTo(CallGraphNode *Callee);
/// \brief Removes one edge associated with a null callsite from this node to
/// Removes one edge associated with a null callsite from this node to
/// the specified callee function.
void removeOneAbstractEdgeTo(CallGraphNode *Callee);
/// \brief Replaces the edge in the node for the specified call site with a
/// Replaces the edge in the node for the specified call site with a
/// new one.
///
/// Note that this method takes linear time, so it should be used sparingly.
@ -273,18 +273,18 @@ private:
std::vector<CallRecord> CalledFunctions;
/// \brief The number of times that this CallGraphNode occurs in the
/// The number of times that this CallGraphNode occurs in the
/// CalledFunctions array of this or other CallGraphNodes.
unsigned NumReferences = 0;
void DropRef() { --NumReferences; }
void AddRef() { ++NumReferences; }
/// \brief A special function that should only be used by the CallGraph class.
/// A special function that should only be used by the CallGraph class.
void allReferencesDropped() { NumReferences = 0; }
};
/// \brief An analysis pass to compute the \c CallGraph for a \c Module.
/// An analysis pass to compute the \c CallGraph for a \c Module.
///
/// This class implements the concept of an analysis pass used by the \c
/// ModuleAnalysisManager to run an analysis over a module and cache the
@ -295,16 +295,16 @@ class CallGraphAnalysis : public AnalysisInfoMixin<CallGraphAnalysis> {
static AnalysisKey Key;
public:
/// \brief A formulaic type to inform clients of the result type.
/// A formulaic type to inform clients of the result type.
using Result = CallGraph;
/// \brief Compute the \c CallGraph for the module \c M.
/// Compute the \c CallGraph for the module \c M.
///
/// The real work here is done in the \c CallGraph constructor.
CallGraph run(Module &M, ModuleAnalysisManager &) { return CallGraph(M); }
};
/// \brief Printer pass for the \c CallGraphAnalysis results.
/// Printer pass for the \c CallGraphAnalysis results.
class CallGraphPrinterPass : public PassInfoMixin<CallGraphPrinterPass> {
raw_ostream &OS;
@ -314,7 +314,7 @@ public:
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};
/// \brief The \c ModulePass which wraps up a \c CallGraph and the logic to
/// The \c ModulePass which wraps up a \c CallGraph and the logic to
/// build it.
///
/// This class exposes both the interface to the call graph container and the
@ -330,7 +330,7 @@ public:
CallGraphWrapperPass();
~CallGraphWrapperPass() override;
/// \brief The internal \c CallGraph around which the rest of this interface
/// The internal \c CallGraph around which the rest of this interface
/// is wrapped.
const CallGraph &getCallGraph() const { return *G; }
CallGraph &getCallGraph() { return *G; }
@ -338,7 +338,7 @@ public:
using iterator = CallGraph::iterator;
using const_iterator = CallGraph::const_iterator;
/// \brief Returns the module the call graph corresponds to.
/// Returns the module the call graph corresponds to.
Module &getModule() const { return G->getModule(); }
inline iterator begin() { return G->begin(); }
@ -346,15 +346,15 @@ public:
inline const_iterator begin() const { return G->begin(); }
inline const_iterator end() const { return G->end(); }
/// \brief Returns the call graph node for the provided function.
/// Returns the call graph node for the provided function.
inline const CallGraphNode *operator[](const Function *F) const {
return (*G)[F];
}
/// \brief Returns the call graph node for the provided function.
/// Returns the call graph node for the provided function.
inline CallGraphNode *operator[](const Function *F) { return (*G)[F]; }
/// \brief Returns the \c CallGraphNode which is used to represent
/// Returns the \c CallGraphNode which is used to represent
/// undetermined calls into the callgraph.
CallGraphNode *getExternalCallingNode() const {
return G->getExternalCallingNode();
@ -369,7 +369,7 @@ public:
// modified.
//
/// \brief Unlink the function from this module, returning it.
/// Unlink the function from this module, returning it.
///
/// Because this removes the function from the module, the call graph node is
/// destroyed. This is only valid if the function does not call any other
@ -379,7 +379,7 @@ public:
return G->removeFunctionFromModule(CGN);
}
/// \brief Similar to operator[], but this will insert a new CallGraphNode for
/// Similar to operator[], but this will insert a new CallGraphNode for
/// \c F if one does not already exist.
CallGraphNode *getOrInsertFunction(const Function *F) {
return G->getOrInsertFunction(F);

View File

@ -29,7 +29,7 @@ class DataLayout;
class TargetTransformInfo;
class Value;
/// \brief Check whether a call will lower to something small.
/// Check whether a call will lower to something small.
///
/// This tests checks whether this callsite will lower to something
/// significantly cheaper than a traditional call, often a single
@ -37,64 +37,64 @@ class Value;
/// return true, so will this function.
bool callIsSmall(ImmutableCallSite CS);
/// \brief Utility to calculate the size and a few similar metrics for a set
/// Utility to calculate the size and a few similar metrics for a set
/// of basic blocks.
struct CodeMetrics {
/// \brief True if this function contains a call to setjmp or other functions
/// True if this function contains a call to setjmp or other functions
/// with attribute "returns twice" without having the attribute itself.
bool exposesReturnsTwice = false;
/// \brief True if this function calls itself.
/// True if this function calls itself.
bool isRecursive = false;
/// \brief True if this function cannot be duplicated.
/// True if this function cannot be duplicated.
///
/// True if this function contains one or more indirect branches, or it contains
/// one or more 'noduplicate' instructions.
bool notDuplicatable = false;
/// \brief True if this function contains a call to a convergent function.
/// True if this function contains a call to a convergent function.
bool convergent = false;
/// \brief True if this function calls alloca (in the C sense).
/// True if this function calls alloca (in the C sense).
bool usesDynamicAlloca = false;
/// \brief Number of instructions in the analyzed blocks.
/// Number of instructions in the analyzed blocks.
unsigned NumInsts = false;
/// \brief Number of analyzed blocks.
/// Number of analyzed blocks.
unsigned NumBlocks = false;
/// \brief Keeps track of basic block code size estimates.
/// Keeps track of basic block code size estimates.
DenseMap<const BasicBlock *, unsigned> NumBBInsts;
/// \brief Keep track of the number of calls to 'big' functions.
/// Keep track of the number of calls to 'big' functions.
unsigned NumCalls = false;
/// \brief The number of calls to internal functions with a single caller.
/// The number of calls to internal functions with a single caller.
///
/// These are likely targets for future inlining, likely exposed by
/// interleaved devirtualization.
unsigned NumInlineCandidates = 0;
/// \brief How many instructions produce vector values.
/// How many instructions produce vector values.
///
/// The inliner is more aggressive with inlining vector kernels.
unsigned NumVectorInsts = 0;
/// \brief How many 'ret' instructions the blocks contain.
/// How many 'ret' instructions the blocks contain.
unsigned NumRets = 0;
/// \brief Add information about a block to the current state.
/// Add information about a block to the current state.
void analyzeBasicBlock(const BasicBlock *BB, const TargetTransformInfo &TTI,
const SmallPtrSetImpl<const Value*> &EphValues);
/// \brief Collect a loop's ephemeral values (those used only by an assume
/// Collect a loop's ephemeral values (those used only by an assume
/// or similar intrinsics in the loop).
static void collectEphemeralValues(const Loop *L, AssumptionCache *AC,
SmallPtrSetImpl<const Value *> &EphValues);
/// \brief Collect a functions's ephemeral values (those used only by an
/// Collect a functions's ephemeral values (those used only by an
/// assume or similar intrinsics in the function).
static void collectEphemeralValues(const Function *L, AssumptionCache *AC,
SmallPtrSetImpl<const Value *> &EphValues);

View File

@ -73,19 +73,19 @@ ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS,
Constant *RHS, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr);
/// \brief Attempt to constant fold a binary operation with the specified
/// Attempt to constant fold a binary operation with the specified
/// operands. If it fails, it returns a constant expression of the specified
/// operands.
Constant *ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS,
Constant *RHS, const DataLayout &DL);
/// \brief Attempt to constant fold a select instruction with the specified
/// Attempt to constant fold a select instruction with the specified
/// operands. The constant result is returned if successful; if not, null is
/// returned.
Constant *ConstantFoldSelectInstruction(Constant *Cond, Constant *V1,
Constant *V2);
/// \brief Attempt to constant fold a cast with the specified operand. If it
/// Attempt to constant fold a cast with the specified operand. If it
/// fails, it returns a constant expression of the specified operand.
Constant *ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy,
const DataLayout &DL);
@ -96,25 +96,25 @@ Constant *ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy,
Constant *ConstantFoldInsertValueInstruction(Constant *Agg, Constant *Val,
ArrayRef<unsigned> Idxs);
/// \brief Attempt to constant fold an extractvalue instruction with the
/// Attempt to constant fold an extractvalue instruction with the
/// specified operands and indices. The constant result is returned if
/// successful; if not, null is returned.
Constant *ConstantFoldExtractValueInstruction(Constant *Agg,
ArrayRef<unsigned> Idxs);
/// \brief Attempt to constant fold an insertelement instruction with the
/// Attempt to constant fold an insertelement instruction with the
/// specified operands and indices. The constant result is returned if
/// successful; if not, null is returned.
Constant *ConstantFoldInsertElementInstruction(Constant *Val,
Constant *Elt,
Constant *Idx);
/// \brief Attempt to constant fold an extractelement instruction with the
/// Attempt to constant fold an extractelement instruction with the
/// specified operands and indices. The constant result is returned if
/// successful; if not, null is returned.
Constant *ConstantFoldExtractElementInstruction(Constant *Val, Constant *Idx);
/// \brief Attempt to constant fold a shufflevector instruction with the
/// Attempt to constant fold a shufflevector instruction with the
/// specified operands and indices. The constant result is returned if
/// successful; if not, null is returned.
Constant *ConstantFoldShuffleVectorInstruction(Constant *V1, Constant *V2,
@ -153,7 +153,7 @@ Constant *ConstantFoldCall(ImmutableCallSite CS, Function *F,
Constant *ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy,
const DataLayout &DL);
/// \brief Check whether the given call has no side-effects.
/// Check whether the given call has no side-effects.
/// Specifically checks for math routimes which sometimes set errno.
bool isMathLibCallNoop(CallSite CS, const TargetLibraryInfo *TLI);
}

View File

@ -20,7 +20,7 @@
namespace llvm {
/// \brief Default traits class for extracting a graph from an analysis pass.
/// Default traits class for extracting a graph from an analysis pass.
///
/// This assumes that 'GraphT' is 'AnalysisT *' and so just passes it through.
template <typename AnalysisT, typename GraphT = AnalysisT *>

View File

@ -96,15 +96,15 @@ class DemandedBitsAnalysis : public AnalysisInfoMixin<DemandedBitsAnalysis> {
static AnalysisKey Key;
public:
/// \brief Provide the result type for this analysis pass.
/// Provide the result type for this analysis pass.
using Result = DemandedBits;
/// \brief Run the analysis pass over a function and produce demanded bits
/// Run the analysis pass over a function and produce demanded bits
/// information.
DemandedBits run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief Printer pass for DemandedBits
/// Printer pass for DemandedBits
class DemandedBitsPrinterPass : public PassInfoMixin<DemandedBitsPrinterPass> {
raw_ostream &OS;

View File

@ -914,7 +914,7 @@ template <typename T> class ArrayRef;
SmallVectorImpl<Subscript> &Pair);
}; // class DependenceInfo
/// \brief AnalysisPass to compute dependence information in a function
/// AnalysisPass to compute dependence information in a function
class DependenceAnalysis : public AnalysisInfoMixin<DependenceAnalysis> {
public:
typedef DependenceInfo Result;
@ -925,7 +925,7 @@ template <typename T> class ArrayRef;
friend struct AnalysisInfoMixin<DependenceAnalysis>;
}; // class DependenceAnalysis
/// \brief Legacy pass manager pass to access dependence information
/// Legacy pass manager pass to access dependence information
class DependenceAnalysisWrapperPass : public FunctionPass {
public:
static char ID; // Class identification, replacement for typeinfo

View File

@ -180,7 +180,7 @@ extern template class DominanceFrontierBase<BasicBlock, false>;
extern template class DominanceFrontierBase<BasicBlock, true>;
extern template class ForwardDominanceFrontierBase<BasicBlock>;
/// \brief Analysis pass which computes a \c DominanceFrontier.
/// Analysis pass which computes a \c DominanceFrontier.
class DominanceFrontierAnalysis
: public AnalysisInfoMixin<DominanceFrontierAnalysis> {
friend AnalysisInfoMixin<DominanceFrontierAnalysis>;
@ -188,14 +188,14 @@ class DominanceFrontierAnalysis
static AnalysisKey Key;
public:
/// \brief Provide the result type for this analysis pass.
/// Provide the result type for this analysis pass.
using Result = DominanceFrontier;
/// \brief Run the analysis pass over a function and produce a dominator tree.
/// Run the analysis pass over a function and produce a dominator tree.
DominanceFrontier run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief Printer pass for the \c DominanceFrontier.
/// Printer pass for the \c DominanceFrontier.
class DominanceFrontierPrinterPass
: public PassInfoMixin<DominanceFrontierPrinterPass> {
raw_ostream &OS;

View File

@ -35,7 +35,7 @@ enum class EHPersonality {
Rust
};
/// \brief See if the given exception handling personality function is one
/// See if the given exception handling personality function is one
/// that we understand. If so, return a description of it; otherwise return
/// Unknown.
EHPersonality classifyEHPersonality(const Value *Pers);
@ -44,7 +44,7 @@ StringRef getEHPersonalityName(EHPersonality Pers);
EHPersonality getDefaultEHPersonality(const Triple &T);
/// \brief Returns true if this personality function catches asynchronous
/// Returns true if this personality function catches asynchronous
/// exceptions.
inline bool isAsynchronousEHPersonality(EHPersonality Pers) {
// The two SEH personality functions can catch asynch exceptions. We assume
@ -59,7 +59,7 @@ inline bool isAsynchronousEHPersonality(EHPersonality Pers) {
llvm_unreachable("invalid enum");
}
/// \brief Returns true if this is a personality function that invokes
/// Returns true if this is a personality function that invokes
/// handler funclets (which must return to it).
inline bool isFuncletEHPersonality(EHPersonality Pers) {
switch (Pers) {
@ -74,7 +74,7 @@ inline bool isFuncletEHPersonality(EHPersonality Pers) {
llvm_unreachable("invalid enum");
}
/// \brief Return true if this personality may be safely removed if there
/// Return true if this personality may be safely removed if there
/// are no invoke instructions remaining in the current function.
inline bool isNoOpWithoutInvoke(EHPersonality Pers) {
switch (Pers) {
@ -91,7 +91,7 @@ bool canSimplifyInvokeNoUnwind(const Function *F);
typedef TinyPtrVector<BasicBlock *> ColorVector;
/// \brief If an EH funclet personality is in use (see isFuncletEHPersonality),
/// If an EH funclet personality is in use (see isFuncletEHPersonality),
/// this will recompute which blocks are in which funclet. It is possible that
/// some blocks are in multiple funclets. Consider this analysis to be
/// expensive.

View File

@ -48,7 +48,7 @@ private:
public:
ICallPromotionAnalysis();
/// \brief Returns reference to array of InstrProfValueData for the given
/// Returns reference to array of InstrProfValueData for the given
/// instruction \p I.
///
/// The \p NumVals, \p TotalCount and \p NumCandidates

View File

@ -52,7 +52,7 @@ const int NoreturnPenalty = 10000;
const unsigned TotalAllocaSizeRecursiveCaller = 1024;
}
/// \brief Represents the cost of inlining a function.
/// Represents the cost of inlining a function.
///
/// This supports special values for functions which should "always" or
/// "never" be inlined. Otherwise, the cost represents a unitless amount;
@ -68,10 +68,10 @@ class InlineCost {
NeverInlineCost = INT_MAX
};
/// \brief The estimated cost of inlining this callsite.
/// The estimated cost of inlining this callsite.
const int Cost;
/// \brief The adjusted threshold against which this cost was computed.
/// The adjusted threshold against which this cost was computed.
const int Threshold;
// Trivial constructor, interesting logic in the factory functions below.
@ -90,7 +90,7 @@ public:
return InlineCost(NeverInlineCost, 0);
}
/// \brief Test whether the inline cost is low enough for inlining.
/// Test whether the inline cost is low enough for inlining.
explicit operator bool() const {
return Cost < Threshold;
}
@ -99,20 +99,20 @@ public:
bool isNever() const { return Cost == NeverInlineCost; }
bool isVariable() const { return !isAlways() && !isNever(); }
/// \brief Get the inline cost estimate.
/// Get the inline cost estimate.
/// It is an error to call this on an "always" or "never" InlineCost.
int getCost() const {
assert(isVariable() && "Invalid access of InlineCost");
return Cost;
}
/// \brief Get the threshold against which the cost was computed
/// Get the threshold against which the cost was computed
int getThreshold() const {
assert(isVariable() && "Invalid access of InlineCost");
return Threshold;
}
/// \brief Get the cost delta from the threshold for inlining.
/// Get the cost delta from the threshold for inlining.
/// Only valid if the cost is of the variable kind. Returns a negative
/// value if the cost is too high to inline.
int getCostDelta() const { return Threshold - getCost(); }
@ -178,7 +178,7 @@ InlineParams getInlineParams(unsigned OptLevel, unsigned SizeOptLevel);
/// and the call/return instruction.
int getCallsiteCost(CallSite CS, const DataLayout &DL);
/// \brief Get an InlineCost object representing the cost of inlining this
/// Get an InlineCost object representing the cost of inlining this
/// callsite.
///
/// Note that a default threshold is passed into this function. This threshold
@ -195,7 +195,7 @@ InlineCost getInlineCost(
Optional<function_ref<BlockFrequencyInfo &(Function &)>> GetBFI,
ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE = nullptr);
/// \brief Get an InlineCost with the callee explicitly specified.
/// Get an InlineCost with the callee explicitly specified.
/// This allows you to calculate the cost of inlining a function via a
/// pointer. This behaves exactly as the version with no explicit callee
/// parameter in all other respects.
@ -207,7 +207,7 @@ getInlineCost(CallSite CS, Function *Callee, const InlineParams &Params,
Optional<function_ref<BlockFrequencyInfo &(Function &)>> GetBFI,
ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE);
/// \brief Minimal filter to detect invalid constructs for inlining.
/// Minimal filter to detect invalid constructs for inlining.
bool isInlineViable(Function &Callee);
}

View File

@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
/// \brief Compute iterated dominance frontiers using a linear time algorithm.
/// Compute iterated dominance frontiers using a linear time algorithm.
///
/// The algorithm used here is based on:
///
@ -32,7 +32,7 @@
namespace llvm {
/// \brief Determine the iterated dominance frontier, given a set of defining
/// Determine the iterated dominance frontier, given a set of defining
/// blocks, and optionally, a set of live-in blocks.
///
/// In turn, the results can be used to place phi nodes.
@ -48,7 +48,7 @@ class IDFCalculator {
IDFCalculator(DominatorTreeBase<BasicBlock, IsPostDom> &DT)
: DT(DT), useLiveIn(false) {}
/// \brief Give the IDF calculator the set of blocks in which the value is
/// Give the IDF calculator the set of blocks in which the value is
/// defined. This is equivalent to the set of starting blocks it should be
/// calculating the IDF for (though later gets pruned based on liveness).
///
@ -57,7 +57,7 @@ class IDFCalculator {
DefBlocks = &Blocks;
}
/// \brief Give the IDF calculator the set of blocks in which the value is
/// Give the IDF calculator the set of blocks in which the value is
/// live on entry to the block. This is used to prune the IDF calculation to
/// not include blocks where any phi insertion would be dead.
///
@ -68,14 +68,14 @@ class IDFCalculator {
useLiveIn = true;
}
/// \brief Reset the live-in block set to be empty, and tell the IDF
/// Reset the live-in block set to be empty, and tell the IDF
/// calculator to not use liveness anymore.
void resetLiveInBlocks() {
LiveInBlocks = nullptr;
useLiveIn = false;
}
/// \brief Calculate iterated dominance frontiers
/// Calculate iterated dominance frontiers
///
/// This uses the linear-time phi algorithm based on DJ-graphs mentioned in
/// the file-level comment. It performs DF->IDF pruning using the live-in

View File

@ -75,7 +75,7 @@ private:
const LoopInfoT *LI;
};
/// \brief This is an alternative analysis pass to
/// This is an alternative analysis pass to
/// BlockFrequencyInfoWrapperPass. The difference is that with this pass the
/// block frequencies are not computed when the analysis pass is executed but
/// rather when the BFI result is explicitly requested by the analysis client.
@ -109,10 +109,10 @@ public:
LazyBlockFrequencyInfoPass();
/// \brief Compute and return the block frequencies.
/// Compute and return the block frequencies.
BlockFrequencyInfo &getBFI() { return LBFI.getCalculated(); }
/// \brief Compute and return the block frequencies.
/// Compute and return the block frequencies.
const BlockFrequencyInfo &getBFI() const { return LBFI.getCalculated(); }
void getAnalysisUsage(AnalysisUsage &AU) const override;
@ -126,7 +126,7 @@ public:
void print(raw_ostream &OS, const Module *M) const override;
};
/// \brief Helper for client passes to initialize dependent passes for LBFI.
/// Helper for client passes to initialize dependent passes for LBFI.
void initializeLazyBFIPassPass(PassRegistry &Registry);
}
#endif

View File

@ -26,7 +26,7 @@ class Function;
class LoopInfo;
class TargetLibraryInfo;
/// \brief This is an alternative analysis pass to
/// This is an alternative analysis pass to
/// BranchProbabilityInfoWrapperPass. The difference is that with this pass the
/// branch probabilities are not computed when the analysis pass is executed but
/// rather when the BPI results is explicitly requested by the analysis client.
@ -89,10 +89,10 @@ public:
LazyBranchProbabilityInfoPass();
/// \brief Compute and return the branch probabilities.
/// Compute and return the branch probabilities.
BranchProbabilityInfo &getBPI() { return LBPI->getCalculated(); }
/// \brief Compute and return the branch probabilities.
/// Compute and return the branch probabilities.
const BranchProbabilityInfo &getBPI() const { return LBPI->getCalculated(); }
void getAnalysisUsage(AnalysisUsage &AU) const override;
@ -106,10 +106,10 @@ public:
void print(raw_ostream &OS, const Module *M) const override;
};
/// \brief Helper for client passes to initialize dependent passes for LBPI.
/// Helper for client passes to initialize dependent passes for LBPI.
void initializeLazyBPIPassPass(PassRegistry &Registry);
/// \brief Simple trait class that provides a mapping between BPI passes and the
/// Simple trait class that provides a mapping between BPI passes and the
/// corresponding BPInfo.
template <typename PassT> struct BPIPassTrait {
static PassT &getBPI(PassT *P) { return *P; }

View File

@ -128,7 +128,7 @@ public:
FunctionAnalysisManager::Invalidator &Inv);
};
/// \brief Analysis to compute lazy value information.
/// Analysis to compute lazy value information.
class LazyValueAnalysis : public AnalysisInfoMixin<LazyValueAnalysis> {
public:
typedef LazyValueInfo Result;

View File

@ -38,25 +38,25 @@ class SCEVUnionPredicate;
class LoopAccessInfo;
class OptimizationRemarkEmitter;
/// \brief Collection of parameters shared beetween the Loop Vectorizer and the
/// Collection of parameters shared beetween the Loop Vectorizer and the
/// Loop Access Analysis.
struct VectorizerParams {
/// \brief Maximum SIMD width.
/// Maximum SIMD width.
static const unsigned MaxVectorWidth;
/// \brief VF as overridden by the user.
/// VF as overridden by the user.
static unsigned VectorizationFactor;
/// \brief Interleave factor as overridden by the user.
/// Interleave factor as overridden by the user.
static unsigned VectorizationInterleave;
/// \brief True if force-vector-interleave was specified by the user.
/// True if force-vector-interleave was specified by the user.
static bool isInterleaveForced();
/// \\brief When performing memory disambiguation checks at runtime do not
/// \When performing memory disambiguation checks at runtime do not
/// make more than this number of comparisons.
static unsigned RuntimeMemoryCheckThreshold;
};
/// \brief Checks memory dependences among accesses to the same underlying
/// Checks memory dependences among accesses to the same underlying
/// object to determine whether there vectorization is legal or not (and at
/// which vectorization factor).
///
@ -94,12 +94,12 @@ class MemoryDepChecker {
public:
typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList;
/// \brief Set of potential dependent memory accesses.
/// Set of potential dependent memory accesses.
typedef EquivalenceClasses<MemAccessInfo> DepCandidates;
/// \brief Dependece between memory access instructions.
/// Dependece between memory access instructions.
struct Dependence {
/// \brief The type of the dependence.
/// The type of the dependence.
enum DepType {
// No dependence.
NoDep,
@ -127,36 +127,36 @@ public:
BackwardVectorizableButPreventsForwarding
};
/// \brief String version of the types.
/// String version of the types.
static const char *DepName[];
/// \brief Index of the source of the dependence in the InstMap vector.
/// Index of the source of the dependence in the InstMap vector.
unsigned Source;
/// \brief Index of the destination of the dependence in the InstMap vector.
/// Index of the destination of the dependence in the InstMap vector.
unsigned Destination;
/// \brief The type of the dependence.
/// The type of the dependence.
DepType Type;
Dependence(unsigned Source, unsigned Destination, DepType Type)
: Source(Source), Destination(Destination), Type(Type) {}
/// \brief Return the source instruction of the dependence.
/// Return the source instruction of the dependence.
Instruction *getSource(const LoopAccessInfo &LAI) const;
/// \brief Return the destination instruction of the dependence.
/// Return the destination instruction of the dependence.
Instruction *getDestination(const LoopAccessInfo &LAI) const;
/// \brief Dependence types that don't prevent vectorization.
/// Dependence types that don't prevent vectorization.
static bool isSafeForVectorization(DepType Type);
/// \brief Lexically forward dependence.
/// Lexically forward dependence.
bool isForward() const;
/// \brief Lexically backward dependence.
/// Lexically backward dependence.
bool isBackward() const;
/// \brief May be a lexically backward dependence type (includes Unknown).
/// May be a lexically backward dependence type (includes Unknown).
bool isPossiblyBackward() const;
/// \brief Print the dependence. \p Instr is used to map the instruction
/// Print the dependence. \p Instr is used to map the instruction
/// indices to instructions.
void print(raw_ostream &OS, unsigned Depth,
const SmallVectorImpl<Instruction *> &Instrs) const;
@ -167,7 +167,7 @@ public:
ShouldRetryWithRuntimeCheck(false), SafeForVectorization(true),
RecordDependences(true) {}
/// \brief Register the location (instructions are given increasing numbers)
/// Register the location (instructions are given increasing numbers)
/// of a write access.
void addAccess(StoreInst *SI) {
Value *Ptr = SI->getPointerOperand();
@ -176,7 +176,7 @@ public:
++AccessIdx;
}
/// \brief Register the location (instructions are given increasing numbers)
/// Register the location (instructions are given increasing numbers)
/// of a write access.
void addAccess(LoadInst *LI) {
Value *Ptr = LI->getPointerOperand();
@ -185,29 +185,29 @@ public:
++AccessIdx;
}
/// \brief Check whether the dependencies between the accesses are safe.
/// Check whether the dependencies between the accesses are safe.
///
/// Only checks sets with elements in \p CheckDeps.
bool areDepsSafe(DepCandidates &AccessSets, MemAccessInfoList &CheckDeps,
const ValueToValueMap &Strides);
/// \brief No memory dependence was encountered that would inhibit
/// No memory dependence was encountered that would inhibit
/// vectorization.
bool isSafeForVectorization() const { return SafeForVectorization; }
/// \brief The maximum number of bytes of a vector register we can vectorize
/// The maximum number of bytes of a vector register we can vectorize
/// the accesses safely with.
uint64_t getMaxSafeDepDistBytes() { return MaxSafeDepDistBytes; }
/// \brief Return the number of elements that are safe to operate on
/// Return the number of elements that are safe to operate on
/// simultaneously, multiplied by the size of the element in bits.
uint64_t getMaxSafeRegisterWidth() const { return MaxSafeRegisterWidth; }
/// \brief In same cases when the dependency check fails we can still
/// In same cases when the dependency check fails we can still
/// vectorize the loop with a dynamic array access check.
bool shouldRetryWithRuntimeCheck() { return ShouldRetryWithRuntimeCheck; }
/// \brief Returns the memory dependences. If null is returned we exceeded
/// Returns the memory dependences. If null is returned we exceeded
/// the MaxDependences threshold and this information is not
/// available.
const SmallVectorImpl<Dependence> *getDependences() const {
@ -216,13 +216,13 @@ public:
void clearDependences() { Dependences.clear(); }
/// \brief The vector of memory access instructions. The indices are used as
/// The vector of memory access instructions. The indices are used as
/// instruction identifiers in the Dependence class.
const SmallVectorImpl<Instruction *> &getMemoryInstructions() const {
return InstMap;
}
/// \brief Generate a mapping between the memory instructions and their
/// Generate a mapping between the memory instructions and their
/// indices according to program order.
DenseMap<Instruction *, unsigned> generateInstructionOrderMap() const {
DenseMap<Instruction *, unsigned> OrderMap;
@ -233,7 +233,7 @@ public:
return OrderMap;
}
/// \brief Find the set of instructions that read or write via \p Ptr.
/// Find the set of instructions that read or write via \p Ptr.
SmallVector<Instruction *, 4> getInstructionsForAccess(Value *Ptr,
bool isWrite) const;
@ -247,42 +247,42 @@ private:
PredicatedScalarEvolution &PSE;
const Loop *InnermostLoop;
/// \brief Maps access locations (ptr, read/write) to program order.
/// Maps access locations (ptr, read/write) to program order.
DenseMap<MemAccessInfo, std::vector<unsigned> > Accesses;
/// \brief Memory access instructions in program order.
/// Memory access instructions in program order.
SmallVector<Instruction *, 16> InstMap;
/// \brief The program order index to be used for the next instruction.
/// The program order index to be used for the next instruction.
unsigned AccessIdx;
// We can access this many bytes in parallel safely.
uint64_t MaxSafeDepDistBytes;
/// \brief Number of elements (from consecutive iterations) that are safe to
/// Number of elements (from consecutive iterations) that are safe to
/// operate on simultaneously, multiplied by the size of the element in bits.
/// The size of the element is taken from the memory access that is most
/// restrictive.
uint64_t MaxSafeRegisterWidth;
/// \brief If we see a non-constant dependence distance we can still try to
/// If we see a non-constant dependence distance we can still try to
/// vectorize this loop with runtime checks.
bool ShouldRetryWithRuntimeCheck;
/// \brief No memory dependence was encountered that would inhibit
/// No memory dependence was encountered that would inhibit
/// vectorization.
bool SafeForVectorization;
//// \brief True if Dependences reflects the dependences in the
//// True if Dependences reflects the dependences in the
//// loop. If false we exceeded MaxDependences and
//// Dependences is invalid.
bool RecordDependences;
/// \brief Memory dependences collected during the analysis. Only valid if
/// Memory dependences collected during the analysis. Only valid if
/// RecordDependences is true.
SmallVector<Dependence, 8> Dependences;
/// \brief Check whether there is a plausible dependence between the two
/// Check whether there is a plausible dependence between the two
/// accesses.
///
/// Access \p A must happen before \p B in program order. The two indices
@ -298,7 +298,7 @@ private:
const MemAccessInfo &B, unsigned BIdx,
const ValueToValueMap &Strides);
/// \brief Check whether the data dependence could prevent store-load
/// Check whether the data dependence could prevent store-load
/// forwarding.
///
/// \return false if we shouldn't vectorize at all or avoid larger
@ -306,7 +306,7 @@ private:
bool couldPreventStoreLoadForward(uint64_t Distance, uint64_t TypeByteSize);
};
/// \brief Holds information about the memory runtime legality checks to verify
/// Holds information about the memory runtime legality checks to verify
/// that a group of pointers do not overlap.
class RuntimePointerChecking {
public:
@ -355,13 +355,13 @@ public:
unsigned ASId, const ValueToValueMap &Strides,
PredicatedScalarEvolution &PSE);
/// \brief No run-time memory checking is necessary.
/// No run-time memory checking is necessary.
bool empty() const { return Pointers.empty(); }
/// A grouping of pointers. A single memcheck is required between
/// two groups.
struct CheckingPtrGroup {
/// \brief Create a new pointer checking group containing a single
/// Create a new pointer checking group containing a single
/// pointer, with index \p Index in RtCheck.
CheckingPtrGroup(unsigned Index, RuntimePointerChecking &RtCheck)
: RtCheck(RtCheck), High(RtCheck.Pointers[Index].End),
@ -369,7 +369,7 @@ public:
Members.push_back(Index);
}
/// \brief Tries to add the pointer recorded in RtCheck at index
/// Tries to add the pointer recorded in RtCheck at index
/// \p Index to this pointer checking group. We can only add a pointer
/// to a checking group if we will still be able to get
/// the upper and lower bounds of the check. Returns true in case
@ -390,7 +390,7 @@ public:
SmallVector<unsigned, 2> Members;
};
/// \brief A memcheck which made up of a pair of grouped pointers.
/// A memcheck which made up of a pair of grouped pointers.
///
/// These *have* to be const for now, since checks are generated from
/// CheckingPtrGroups in LAI::addRuntimeChecks which is a const member
@ -399,24 +399,24 @@ public:
typedef std::pair<const CheckingPtrGroup *, const CheckingPtrGroup *>
PointerCheck;
/// \brief Generate the checks and store it. This also performs the grouping
/// Generate the checks and store it. This also performs the grouping
/// of pointers to reduce the number of memchecks necessary.
void generateChecks(MemoryDepChecker::DepCandidates &DepCands,
bool UseDependencies);
/// \brief Returns the checks that generateChecks created.
/// Returns the checks that generateChecks created.
const SmallVector<PointerCheck, 4> &getChecks() const { return Checks; }
/// \brief Decide if we need to add a check between two groups of pointers,
/// Decide if we need to add a check between two groups of pointers,
/// according to needsChecking.
bool needsChecking(const CheckingPtrGroup &M,
const CheckingPtrGroup &N) const;
/// \brief Returns the number of run-time checks required according to
/// Returns the number of run-time checks required according to
/// needsChecking.
unsigned getNumberOfChecks() const { return Checks.size(); }
/// \brief Print the list run-time memory checks necessary.
/// Print the list run-time memory checks necessary.
void print(raw_ostream &OS, unsigned Depth = 0) const;
/// Print \p Checks.
@ -432,7 +432,7 @@ public:
/// Holds a partitioning of pointers into "check groups".
SmallVector<CheckingPtrGroup, 2> CheckingGroups;
/// \brief Check if pointers are in the same partition
/// Check if pointers are in the same partition
///
/// \p PtrToPartition contains the partition number for pointers (-1 if the
/// pointer belongs to multiple partitions).
@ -440,17 +440,17 @@ public:
arePointersInSamePartition(const SmallVectorImpl<int> &PtrToPartition,
unsigned PtrIdx1, unsigned PtrIdx2);
/// \brief Decide whether we need to issue a run-time check for pointer at
/// Decide whether we need to issue a run-time check for pointer at
/// index \p I and \p J to prove their independence.
bool needsChecking(unsigned I, unsigned J) const;
/// \brief Return PointerInfo for pointer at index \p PtrIdx.
/// Return PointerInfo for pointer at index \p PtrIdx.
const PointerInfo &getPointerInfo(unsigned PtrIdx) const {
return Pointers[PtrIdx];
}
private:
/// \brief Groups pointers such that a single memcheck is required
/// Groups pointers such that a single memcheck is required
/// between two different groups. This will clear the CheckingGroups vector
/// and re-compute it. We will only group dependecies if \p UseDependencies
/// is true, otherwise we will create a separate group for each pointer.
@ -464,12 +464,12 @@ private:
/// Holds a pointer to the ScalarEvolution analysis.
ScalarEvolution *SE;
/// \brief Set of run-time checks required to establish independence of
/// Set of run-time checks required to establish independence of
/// otherwise may-aliasing pointers in the loop.
SmallVector<PointerCheck, 4> Checks;
};
/// \brief Drive the analysis of memory accesses in the loop
/// Drive the analysis of memory accesses in the loop
///
/// This class is responsible for analyzing the memory accesses of a loop. It
/// collects the accesses and then its main helper the AccessAnalysis class
@ -503,7 +503,7 @@ public:
return PtrRtChecking.get();
}
/// \brief Number of memchecks required to prove independence of otherwise
/// Number of memchecks required to prove independence of otherwise
/// may-alias pointers.
unsigned getNumRuntimePointerChecks() const {
return PtrRtChecking->getNumberOfChecks();
@ -521,7 +521,7 @@ public:
unsigned getNumStores() const { return NumStores; }
unsigned getNumLoads() const { return NumLoads;}
/// \brief Add code that checks at runtime if the accessed arrays overlap.
/// Add code that checks at runtime if the accessed arrays overlap.
///
/// Returns a pair of instructions where the first element is the first
/// instruction generated in possibly a sequence of instructions and the
@ -529,7 +529,7 @@ public:
std::pair<Instruction *, Instruction *>
addRuntimeChecks(Instruction *Loc) const;
/// \brief Generete the instructions for the checks in \p PointerChecks.
/// Generete the instructions for the checks in \p PointerChecks.
///
/// Returns a pair of instructions where the first element is the first
/// instruction generated in possibly a sequence of instructions and the
@ -539,32 +539,32 @@ public:
const SmallVectorImpl<RuntimePointerChecking::PointerCheck>
&PointerChecks) const;
/// \brief The diagnostics report generated for the analysis. E.g. why we
/// The diagnostics report generated for the analysis. E.g. why we
/// couldn't analyze the loop.
const OptimizationRemarkAnalysis *getReport() const { return Report.get(); }
/// \brief the Memory Dependence Checker which can determine the
/// the Memory Dependence Checker which can determine the
/// loop-independent and loop-carried dependences between memory accesses.
const MemoryDepChecker &getDepChecker() const { return *DepChecker; }
/// \brief Return the list of instructions that use \p Ptr to read or write
/// Return the list of instructions that use \p Ptr to read or write
/// memory.
SmallVector<Instruction *, 4> getInstructionsForAccess(Value *Ptr,
bool isWrite) const {
return DepChecker->getInstructionsForAccess(Ptr, isWrite);
}
/// \brief If an access has a symbolic strides, this maps the pointer value to
/// If an access has a symbolic strides, this maps the pointer value to
/// the stride symbol.
const ValueToValueMap &getSymbolicStrides() const { return SymbolicStrides; }
/// \brief Pointer has a symbolic stride.
/// Pointer has a symbolic stride.
bool hasStride(Value *V) const { return StrideSet.count(V); }
/// \brief Print the information about the memory accesses in the loop.
/// Print the information about the memory accesses in the loop.
void print(raw_ostream &OS, unsigned Depth = 0) const;
/// \brief Checks existence of store to invariant address inside loop.
/// Checks existence of store to invariant address inside loop.
/// If the loop has any store to invariant address, then it returns true,
/// else returns false.
bool hasStoreToLoopInvariantAddress() const {
@ -579,15 +579,15 @@ public:
const PredicatedScalarEvolution &getPSE() const { return *PSE; }
private:
/// \brief Analyze the loop.
/// Analyze the loop.
void analyzeLoop(AliasAnalysis *AA, LoopInfo *LI,
const TargetLibraryInfo *TLI, DominatorTree *DT);
/// \brief Check if the structure of the loop allows it to be analyzed by this
/// Check if the structure of the loop allows it to be analyzed by this
/// pass.
bool canAnalyzeLoop();
/// \brief Save the analysis remark.
/// Save the analysis remark.
///
/// LAA does not directly emits the remarks. Instead it stores it which the
/// client can retrieve and presents as its own analysis
@ -595,7 +595,7 @@ private:
OptimizationRemarkAnalysis &recordAnalysis(StringRef RemarkName,
Instruction *Instr = nullptr);
/// \brief Collect memory access with loop invariant strides.
/// Collect memory access with loop invariant strides.
///
/// Looks for accesses like "a[i * StrideA]" where "StrideA" is loop
/// invariant.
@ -607,7 +607,7 @@ private:
/// at runtime. Using std::unique_ptr to make using move ctor simpler.
std::unique_ptr<RuntimePointerChecking> PtrRtChecking;
/// \brief the Memory Dependence Checker which can determine the
/// the Memory Dependence Checker which can determine the
/// loop-independent and loop-carried dependences between memory accesses.
std::unique_ptr<MemoryDepChecker> DepChecker;
@ -618,28 +618,28 @@ private:
uint64_t MaxSafeDepDistBytes;
/// \brief Cache the result of analyzeLoop.
/// Cache the result of analyzeLoop.
bool CanVecMem;
/// \brief Indicator for storing to uniform addresses.
/// Indicator for storing to uniform addresses.
/// If a loop has write to a loop invariant address then it should be true.
bool StoreToLoopInvariantAddress;
/// \brief The diagnostics report generated for the analysis. E.g. why we
/// The diagnostics report generated for the analysis. E.g. why we
/// couldn't analyze the loop.
std::unique_ptr<OptimizationRemarkAnalysis> Report;
/// \brief If an access has a symbolic strides, this maps the pointer value to
/// If an access has a symbolic strides, this maps the pointer value to
/// the stride symbol.
ValueToValueMap SymbolicStrides;
/// \brief Set of symbolic strides values.
/// Set of symbolic strides values.
SmallPtrSet<Value *, 8> StrideSet;
};
Value *stripIntegerCast(Value *V);
/// \brief Return the SCEV corresponding to a pointer with the symbolic stride
/// Return the SCEV corresponding to a pointer with the symbolic stride
/// replaced with constant one, assuming the SCEV predicate associated with
/// \p PSE is true.
///
@ -653,7 +653,7 @@ const SCEV *replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE,
const ValueToValueMap &PtrToStride,
Value *Ptr, Value *OrigPtr = nullptr);
/// \brief If the pointer has a constant stride return it in units of its
/// If the pointer has a constant stride return it in units of its
/// element size. Otherwise return zero.
///
/// Ensure that it does not wrap in the address space, assuming the predicate
@ -667,7 +667,7 @@ int64_t getPtrStride(PredicatedScalarEvolution &PSE, Value *Ptr, const Loop *Lp,
const ValueToValueMap &StridesMap = ValueToValueMap(),
bool Assume = false, bool ShouldCheckWrap = true);
/// \brief Attempt to sort the pointers in \p VL and return the sorted indices
/// Attempt to sort the pointers in \p VL and return the sorted indices
/// in \p SortedIndices, if reordering is required.
///
/// Returns 'true' if sorting is legal, otherwise returns 'false'.
@ -681,12 +681,12 @@ bool sortPtrAccesses(ArrayRef<Value *> VL, const DataLayout &DL,
ScalarEvolution &SE,
SmallVectorImpl<unsigned> &SortedIndices);
/// \brief Returns true if the memory operations \p A and \p B are consecutive.
/// Returns true if the memory operations \p A and \p B are consecutive.
/// This is a simple API that does not depend on the analysis pass.
bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL,
ScalarEvolution &SE, bool CheckType = true);
/// \brief This analysis provides dependence information for the memory accesses
/// This analysis provides dependence information for the memory accesses
/// of a loop.
///
/// It runs the analysis for a loop on demand. This can be initiated by
@ -705,7 +705,7 @@ public:
void getAnalysisUsage(AnalysisUsage &AU) const override;
/// \brief Query the result of the loop access information for the loop \p L.
/// Query the result of the loop access information for the loop \p L.
///
/// If there is no cached result available run the analysis.
const LoopAccessInfo &getInfo(Loop *L);
@ -715,11 +715,11 @@ public:
LoopAccessInfoMap.clear();
}
/// \brief Print the result of the analysis when invoked with -analyze.
/// Print the result of the analysis when invoked with -analyze.
void print(raw_ostream &OS, const Module *M = nullptr) const override;
private:
/// \brief The cache.
/// The cache.
DenseMap<Loop *, std::unique_ptr<LoopAccessInfo>> LoopAccessInfoMap;
// The used analysis passes.
@ -730,7 +730,7 @@ private:
LoopInfo *LI;
};
/// \brief This analysis provides dependence information for the memory
/// This analysis provides dependence information for the memory
/// accesses of a loop.
///
/// It runs the analysis for a loop on demand. This can be initiated by

View File

@ -69,7 +69,7 @@ extern cl::opt<bool> EnableMSSALoopDependency;
extern template class AllAnalysesOn<Loop>;
extern template class AnalysisManager<Loop, LoopStandardAnalysisResults &>;
/// \brief The loop analysis manager.
/// The loop analysis manager.
///
/// See the documentation for the AnalysisManager template for detail
/// documentation. This typedef serves as a convenient way to refer to this

View File

@ -444,7 +444,7 @@ extern template class LoopBase<BasicBlock, Loop>;
/// in the CFG are necessarily loops.
class Loop : public LoopBase<BasicBlock, Loop> {
public:
/// \brief A range representing the start and end location of a loop.
/// A range representing the start and end location of a loop.
class LocRange {
DebugLoc Start;
DebugLoc End;
@ -458,7 +458,7 @@ public:
const DebugLoc &getStart() const { return Start; }
const DebugLoc &getEnd() const { return End; }
/// \brief Check for null.
/// Check for null.
///
explicit operator bool() const { return Start && End; }
};
@ -935,7 +935,7 @@ template <> struct GraphTraits<Loop *> {
static ChildIteratorType child_end(NodeRef N) { return N->end(); }
};
/// \brief Analysis pass that exposes the \c LoopInfo for a function.
/// Analysis pass that exposes the \c LoopInfo for a function.
class LoopAnalysis : public AnalysisInfoMixin<LoopAnalysis> {
friend AnalysisInfoMixin<LoopAnalysis>;
static AnalysisKey Key;
@ -946,7 +946,7 @@ public:
LoopInfo run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief Printer pass for the \c LoopAnalysis results.
/// Printer pass for the \c LoopAnalysis results.
class LoopPrinterPass : public PassInfoMixin<LoopPrinterPass> {
raw_ostream &OS;
@ -955,12 +955,12 @@ public:
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief Verifier pass for the \c LoopAnalysis results.
/// Verifier pass for the \c LoopAnalysis results.
struct LoopVerifierPass : public PassInfoMixin<LoopVerifierPass> {
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief The legacy pass manager's analysis pass to compute loop information.
/// The legacy pass manager's analysis pass to compute loop information.
class LoopInfoWrapperPass : public FunctionPass {
LoopInfo LI;
@ -974,7 +974,7 @@ public:
LoopInfo &getLoopInfo() { return LI; }
const LoopInfo &getLoopInfo() const { return LI; }
/// \brief Calculate the natural loop information for a given function.
/// Calculate the natural loop information for a given function.
bool runOnFunction(Function &F) override;
void verifyAnalysis() const override;

View File

@ -57,7 +57,7 @@ public:
using Base::visit;
private:
/// \brief A cache of pointer bases and constant-folded offsets corresponding
/// A cache of pointer bases and constant-folded offsets corresponding
/// to GEP (or derived from GEP) instructions.
///
/// In order to find the base pointer one needs to perform non-trivial
@ -65,11 +65,11 @@ private:
/// results saved.
DenseMap<Value *, SimplifiedAddress> SimplifiedAddresses;
/// \brief SCEV expression corresponding to number of currently simulated
/// SCEV expression corresponding to number of currently simulated
/// iteration.
const SCEV *IterationNumber;
/// \brief A Value->Constant map for keeping values that we managed to
/// A Value->Constant map for keeping values that we managed to
/// constant-fold on the given iteration.
///
/// While we walk the loop instructions, we build up and maintain a mapping

View File

@ -53,33 +53,33 @@ class Type;
class UndefValue;
class Value;
/// \brief Tests if a value is a call or invoke to a library function that
/// Tests if a value is a call or invoke to a library function that
/// allocates or reallocates memory (either malloc, calloc, realloc, or strdup
/// like).
bool isAllocationFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast = false);
/// \brief Tests if a value is a call or invoke to a function that returns a
/// Tests if a value is a call or invoke to a function that returns a
/// NoAlias pointer (including malloc/calloc/realloc/strdup-like functions).
bool isNoAliasFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast = false);
/// \brief Tests if a value is a call or invoke to a library function that
/// Tests if a value is a call or invoke to a library function that
/// allocates uninitialized memory (such as malloc).
bool isMallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast = false);
/// \brief Tests if a value is a call or invoke to a library function that
/// Tests if a value is a call or invoke to a library function that
/// allocates zero-filled memory (such as calloc).
bool isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast = false);
/// \brief Tests if a value is a call or invoke to a library function that
/// Tests if a value is a call or invoke to a library function that
/// allocates memory similar to malloc or calloc.
bool isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast = false);
/// \brief Tests if a value is a call or invoke to a library function that
/// Tests if a value is a call or invoke to a library function that
/// allocates memory (either malloc, calloc, or strdup like).
bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast = false);
@ -170,7 +170,7 @@ struct ObjectSizeOpts {
bool NullIsUnknownSize = false;
};
/// \brief Compute the size of the object pointed by Ptr. Returns true and the
/// Compute the size of the object pointed by Ptr. Returns true and the
/// object size in Size if successful, and false otherwise. In this context, by
/// object we mean the region of memory starting at Ptr to the end of the
/// underlying object pointed to by Ptr.
@ -189,7 +189,7 @@ ConstantInt *lowerObjectSizeCall(IntrinsicInst *ObjectSize,
using SizeOffsetType = std::pair<APInt, APInt>;
/// \brief Evaluate the size and offset of an object pointed to by a Value*
/// Evaluate the size and offset of an object pointed to by a Value*
/// statically. Fails if size or offset are not known at compile time.
class ObjectSizeOffsetVisitor
: public InstVisitor<ObjectSizeOffsetVisitor, SizeOffsetType> {
@ -248,7 +248,7 @@ private:
using SizeOffsetEvalType = std::pair<Value *, Value *>;
/// \brief Evaluate the size and offset of an object pointed to by a Value*.
/// Evaluate the size and offset of an object pointed to by a Value*.
/// May create code to compute the result at run-time.
class ObjectSizeOffsetEvaluator
: public InstVisitor<ObjectSizeOffsetEvaluator, SizeOffsetEvalType> {

View File

@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
//
/// \file
/// \brief This file exposes an interface to building/using memory SSA to
/// This file exposes an interface to building/using memory SSA to
/// walk memory instructions using a use/def graph.
///
/// Memory SSA class builds an SSA form that links together memory access
@ -130,7 +130,7 @@ using memoryaccess_def_iterator = memoryaccess_def_iterator_base<MemoryAccess>;
using const_memoryaccess_def_iterator =
memoryaccess_def_iterator_base<const MemoryAccess>;
// \brief The base for all memory accesses. All memory accesses in a block are
// The base for all memory accesses. All memory accesses in a block are
// linked together using an intrusive list.
class MemoryAccess
: public DerivedUser,
@ -159,11 +159,11 @@ public:
void print(raw_ostream &OS) const;
void dump() const;
/// \brief The user iterators for a memory access
/// The user iterators for a memory access
using iterator = user_iterator;
using const_iterator = const_user_iterator;
/// \brief This iterator walks over all of the defs in a given
/// This iterator walks over all of the defs in a given
/// MemoryAccess. For MemoryPhi nodes, this walks arguments. For
/// MemoryUse/MemoryDef, this walks the defining access.
memoryaccess_def_iterator defs_begin();
@ -171,7 +171,7 @@ public:
memoryaccess_def_iterator defs_end();
const_memoryaccess_def_iterator defs_end() const;
/// \brief Get the iterators for the all access list and the defs only list
/// Get the iterators for the all access list and the defs only list
/// We default to the all access list.
AllAccessType::self_iterator getIterator() {
return this->AllAccessType::getIterator();
@ -205,11 +205,11 @@ protected:
friend class MemoryUse;
friend class MemoryUseOrDef;
/// \brief Used by MemorySSA to change the block of a MemoryAccess when it is
/// Used by MemorySSA to change the block of a MemoryAccess when it is
/// moved.
void setBlock(BasicBlock *BB) { Block = BB; }
/// \brief Used for debugging and tracking things about MemoryAccesses.
/// Used for debugging and tracking things about MemoryAccesses.
/// Guaranteed unique among MemoryAccesses, no guarantees otherwise.
inline unsigned getID() const;
@ -235,7 +235,7 @@ inline raw_ostream &operator<<(raw_ostream &OS, const MemoryAccess &MA) {
return OS;
}
/// \brief Class that has the common methods + fields of memory uses/defs. It's
/// Class that has the common methods + fields of memory uses/defs. It's
/// a little awkward to have, but there are many cases where we want either a
/// use or def, and there are many cases where uses are needed (defs aren't
/// acceptable), and vice-versa.
@ -248,10 +248,10 @@ public:
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
/// \brief Get the instruction that this MemoryUse represents.
/// Get the instruction that this MemoryUse represents.
Instruction *getMemoryInst() const { return MemoryInstruction; }
/// \brief Get the access that produces the memory state used by this Use.
/// Get the access that produces the memory state used by this Use.
MemoryAccess *getDefiningAccess() const { return getOperand(0); }
static bool classof(const Value *MA) {
@ -270,7 +270,7 @@ public:
return OptimizedAccessAlias;
}
/// \brief Reset the ID of what this MemoryUse was optimized to, causing it to
/// Reset the ID of what this MemoryUse was optimized to, causing it to
/// be rewalked by the walker if necessary.
/// This really should only be called by tests.
inline void resetOptimized();
@ -313,7 +313,7 @@ struct OperandTraits<MemoryUseOrDef>
: public FixedNumOperandTraits<MemoryUseOrDef, 1> {};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef, MemoryAccess)
/// \brief Represents read-only accesses to memory
/// Represents read-only accesses to memory
///
/// In particular, the set of Instructions that will be represented by
/// MemoryUse's is exactly the set of Instructions for which
@ -364,7 +364,7 @@ template <>
struct OperandTraits<MemoryUse> : public FixedNumOperandTraits<MemoryUse, 1> {};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUse, MemoryAccess)
/// \brief Represents a read-write access to memory, whether it is a must-alias,
/// Represents a read-write access to memory, whether it is a must-alias,
/// or a may-alias.
///
/// In particular, the set of Instructions that will be represented by
@ -424,7 +424,7 @@ template <>
struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 1> {};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryDef, MemoryAccess)
/// \brief Represents phi nodes for memory accesses.
/// Represents phi nodes for memory accesses.
///
/// These have the same semantic as regular phi nodes, with the exception that
/// only one phi will ever exist in a given basic block.
@ -504,10 +504,10 @@ public:
const_op_range incoming_values() const { return operands(); }
/// \brief Return the number of incoming edges
/// Return the number of incoming edges
unsigned getNumIncomingValues() const { return getNumOperands(); }
/// \brief Return incoming value number x
/// Return incoming value number x
MemoryAccess *getIncomingValue(unsigned I) const { return getOperand(I); }
void setIncomingValue(unsigned I, MemoryAccess *V) {
assert(V && "PHI node got a null value!");
@ -517,17 +517,17 @@ public:
static unsigned getOperandNumForIncomingValue(unsigned I) { return I; }
static unsigned getIncomingValueNumForOperand(unsigned I) { return I; }
/// \brief Return incoming basic block number @p i.
/// Return incoming basic block number @p i.
BasicBlock *getIncomingBlock(unsigned I) const { return block_begin()[I]; }
/// \brief Return incoming basic block corresponding
/// Return incoming basic block corresponding
/// to an operand of the PHI.
BasicBlock *getIncomingBlock(const Use &U) const {
assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
return getIncomingBlock(unsigned(&U - op_begin()));
}
/// \brief Return incoming basic block corresponding
/// Return incoming basic block corresponding
/// to value use iterator.
BasicBlock *getIncomingBlock(MemoryAccess::const_user_iterator I) const {
return getIncomingBlock(I.getUse());
@ -538,7 +538,7 @@ public:
block_begin()[I] = BB;
}
/// \brief Add an incoming value to the end of the PHI list
/// Add an incoming value to the end of the PHI list
void addIncoming(MemoryAccess *V, BasicBlock *BB) {
if (getNumOperands() == ReservedSpace)
growOperands(); // Get more space!
@ -548,7 +548,7 @@ public:
setIncomingBlock(getNumOperands() - 1, BB);
}
/// \brief Return the first index of the specified basic
/// Return the first index of the specified basic
/// block in the value list for this PHI. Returns -1 if no instance.
int getBasicBlockIndex(const BasicBlock *BB) const {
for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
@ -574,7 +574,7 @@ public:
protected:
friend class MemorySSA;
/// \brief this is more complicated than the generic
/// this is more complicated than the generic
/// User::allocHungoffUses, because we have to allocate Uses for the incoming
/// values and pointers to the incoming blocks, all in one allocation.
void allocHungoffUses(unsigned N) {
@ -586,7 +586,7 @@ private:
const unsigned ID;
unsigned ReservedSpace;
/// \brief This grows the operand list in response to a push_back style of
/// This grows the operand list in response to a push_back style of
/// operation. This grows the number of ops by 1.5 times.
void growOperands() {
unsigned E = getNumOperands();
@ -635,7 +635,7 @@ inline void MemoryUseOrDef::resetOptimized() {
template <> struct OperandTraits<MemoryPhi> : public HungoffOperandTraits<2> {};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryPhi, MemoryAccess)
/// \brief Encapsulates MemorySSA, including all data associated with memory
/// Encapsulates MemorySSA, including all data associated with memory
/// accesses.
class MemorySSA {
public:
@ -644,7 +644,7 @@ public:
MemorySSAWalker *getWalker();
/// \brief Given a memory Mod/Ref'ing instruction, get the MemorySSA
/// Given a memory Mod/Ref'ing instruction, get the MemorySSA
/// access associated with it. If passed a basic block gets the memory phi
/// node that exists for that block, if there is one. Otherwise, this will get
/// a MemoryUseOrDef.
@ -654,7 +654,7 @@ public:
void dump() const;
void print(raw_ostream &) const;
/// \brief Return true if \p MA represents the live on entry value
/// Return true if \p MA represents the live on entry value
///
/// Loads and stores from pointer arguments and other global values may be
/// defined by memory operations that do not occur in the current function, so
@ -678,14 +678,14 @@ public:
using DefsList =
simple_ilist<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>;
/// \brief Return the list of MemoryAccess's for a given basic block.
/// Return the list of MemoryAccess's for a given basic block.
///
/// This list is not modifiable by the user.
const AccessList *getBlockAccesses(const BasicBlock *BB) const {
return getWritableBlockAccesses(BB);
}
/// \brief Return the list of MemoryDef's and MemoryPhi's for a given basic
/// Return the list of MemoryDef's and MemoryPhi's for a given basic
/// block.
///
/// This list is not modifiable by the user.
@ -693,19 +693,19 @@ public:
return getWritableBlockDefs(BB);
}
/// \brief Given two memory accesses in the same basic block, determine
/// Given two memory accesses in the same basic block, determine
/// whether MemoryAccess \p A dominates MemoryAccess \p B.
bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const;
/// \brief Given two memory accesses in potentially different blocks,
/// Given two memory accesses in potentially different blocks,
/// determine whether MemoryAccess \p A dominates MemoryAccess \p B.
bool dominates(const MemoryAccess *A, const MemoryAccess *B) const;
/// \brief Given a MemoryAccess and a Use, determine whether MemoryAccess \p A
/// Given a MemoryAccess and a Use, determine whether MemoryAccess \p A
/// dominates Use \p B.
bool dominates(const MemoryAccess *A, const Use &B) const;
/// \brief Verify that MemorySSA is self consistent (IE definitions dominate
/// Verify that MemorySSA is self consistent (IE definitions dominate
/// all uses, uses appear in the right places). This is used by unit tests.
void verifyMemorySSA() const;
@ -859,7 +859,7 @@ public:
Result run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief Printer pass for \c MemorySSA.
/// Printer pass for \c MemorySSA.
class MemorySSAPrinterPass : public PassInfoMixin<MemorySSAPrinterPass> {
raw_ostream &OS;
@ -869,12 +869,12 @@ public:
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief Verifier pass for \c MemorySSA.
/// Verifier pass for \c MemorySSA.
struct MemorySSAVerifierPass : PassInfoMixin<MemorySSAVerifierPass> {
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief Legacy analysis pass which computes \c MemorySSA.
/// Legacy analysis pass which computes \c MemorySSA.
class MemorySSAWrapperPass : public FunctionPass {
public:
MemorySSAWrapperPass();
@ -895,7 +895,7 @@ private:
std::unique_ptr<MemorySSA> MSSA;
};
/// \brief This is the generic walker interface for walkers of MemorySSA.
/// This is the generic walker interface for walkers of MemorySSA.
/// Walkers are used to be able to further disambiguate the def-use chains
/// MemorySSA gives you, or otherwise produce better info than MemorySSA gives
/// you.
@ -913,7 +913,7 @@ public:
using MemoryAccessSet = SmallVector<MemoryAccess *, 8>;
/// \brief Given a memory Mod/Ref/ModRef'ing instruction, calling this
/// Given a memory Mod/Ref/ModRef'ing instruction, calling this
/// will give you the nearest dominating MemoryAccess that Mod's the location
/// the instruction accesses (by skipping any def which AA can prove does not
/// alias the location(s) accessed by the instruction given).
@ -945,7 +945,7 @@ public:
/// but takes a MemoryAccess instead of an Instruction.
virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) = 0;
/// \brief Given a potentially clobbering memory access and a new location,
/// Given a potentially clobbering memory access and a new location,
/// calling this will give you the nearest dominating clobbering MemoryAccess
/// (by skipping non-aliasing def links).
///
@ -959,7 +959,7 @@ public:
virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
const MemoryLocation &) = 0;
/// \brief Given a memory access, invalidate anything this walker knows about
/// Given a memory access, invalidate anything this walker knows about
/// that access.
/// This API is used by walkers that store information to perform basic cache
/// invalidation. This will be called by MemorySSA at appropriate times for
@ -974,7 +974,7 @@ protected:
MemorySSA *MSSA;
};
/// \brief A MemorySSAWalker that does no alias queries, or anything else. It
/// A MemorySSAWalker that does no alias queries, or anything else. It
/// simply returns the links as they were constructed by the builder.
class DoNothingMemorySSAWalker final : public MemorySSAWalker {
public:
@ -990,7 +990,7 @@ public:
using MemoryAccessPair = std::pair<MemoryAccess *, MemoryLocation>;
using ConstMemoryAccessPair = std::pair<const MemoryAccess *, MemoryLocation>;
/// \brief Iterator base class used to implement const and non-const iterators
/// Iterator base class used to implement const and non-const iterators
/// over the defining accesses of a MemoryAccess.
template <class T>
class memoryaccess_def_iterator_base
@ -1063,7 +1063,7 @@ inline const_memoryaccess_def_iterator MemoryAccess::defs_end() const {
return const_memoryaccess_def_iterator();
}
/// \brief GraphTraits for a MemoryAccess, which walks defs in the normal case,
/// GraphTraits for a MemoryAccess, which walks defs in the normal case,
/// and uses in the inverse case.
template <> struct GraphTraits<MemoryAccess *> {
using NodeRef = MemoryAccess *;
@ -1083,7 +1083,7 @@ template <> struct GraphTraits<Inverse<MemoryAccess *>> {
static ChildIteratorType child_end(NodeRef N) { return N->user_end(); }
};
/// \brief Provide an iterator that walks defs, giving both the memory access,
/// Provide an iterator that walks defs, giving both the memory access,
/// and the current pointer location, updating the pointer location as it
/// changes due to phi node translation.
///

View File

@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
//
// \file
// \brief An automatic updater for MemorySSA that handles arbitrary insertion,
// An automatic updater for MemorySSA that handles arbitrary insertion,
// deletion, and moves. It performs phi insertion where necessary, and
// automatically updates the MemorySSA IR to be correct.
// While updating loads or removing instructions is often easy enough to not
@ -96,7 +96,7 @@ public:
// the edge cases right, and the above calls already operate in near-optimal
// time bounds.
/// \brief Create a MemoryAccess in MemorySSA at a specified point in a block,
/// Create a MemoryAccess in MemorySSA at a specified point in a block,
/// with a specified clobbering definition.
///
/// Returns the new MemoryAccess.
@ -113,7 +113,7 @@ public:
const BasicBlock *BB,
MemorySSA::InsertionPlace Point);
/// \brief Create a MemoryAccess in MemorySSA before or after an existing
/// Create a MemoryAccess in MemorySSA before or after an existing
/// MemoryAccess.
///
/// Returns the new MemoryAccess.
@ -130,7 +130,7 @@ public:
MemoryAccess *Definition,
MemoryAccess *InsertPt);
/// \brief Remove a MemoryAccess from MemorySSA, including updating all
/// Remove a MemoryAccess from MemorySSA, including updating all
/// definitions and uses.
/// This should be called when a memory instruction that has a MemoryAccess
/// associated with it is erased from the program. For example, if a store or

View File

@ -30,7 +30,7 @@ class Instruction;
class DominatorTree;
class Loop;
/// \brief Captures loop safety information.
/// Captures loop safety information.
/// It keep information for loop & its header may throw exception or otherwise
/// exit abnormaly on any iteration of the loop which might actually execute
/// at runtime. The primary way to consume this infromation is via
@ -46,7 +46,7 @@ struct LoopSafetyInfo {
LoopSafetyInfo() = default;
};
/// \brief Computes safety information for a loop checks loop body & header for
/// Computes safety information for a loop checks loop body & header for
/// the possibility of may throw exception, it takes LoopSafetyInfo and loop as
/// argument. Updates safety information in LoopSafetyInfo argument.
/// Note: This is defined to clear and reinitialize an already initialized

View File

@ -29,7 +29,7 @@
namespace llvm {
namespace objcarc {
/// \brief This is a simple alias analysis implementation that uses knowledge
/// This is a simple alias analysis implementation that uses knowledge
/// of ARC constructs to answer queries.
///
/// TODO: This class could be generalized to know about other ObjC-specific

View File

@ -43,10 +43,10 @@ class raw_ostream;
namespace llvm {
namespace objcarc {
/// \brief A handy option to enable/disable all ARC Optimizations.
/// A handy option to enable/disable all ARC Optimizations.
extern bool EnableARCOpts;
/// \brief Test if the given module looks interesting to run ARC optimization
/// Test if the given module looks interesting to run ARC optimization
/// on.
inline bool ModuleHasARC(const Module &M) {
return
@ -71,7 +71,7 @@ inline bool ModuleHasARC(const Module &M) {
M.getNamedValue("clang.arc.use");
}
/// \brief This is a wrapper around getUnderlyingObject which also knows how to
/// This is a wrapper around getUnderlyingObject which also knows how to
/// look through objc_retain and objc_autorelease calls, which we know to return
/// their argument verbatim.
inline const Value *GetUnderlyingObjCPtr(const Value *V,
@ -129,7 +129,7 @@ inline Value *GetRCIdentityRoot(Value *V) {
return const_cast<Value *>(GetRCIdentityRoot((const Value *)V));
}
/// \brief Assuming the given instruction is one of the special calls such as
/// Assuming the given instruction is one of the special calls such as
/// objc_retain or objc_release, return the RCIdentity root of the argument of
/// the call.
inline Value *GetArgRCIdentityRoot(Value *Inst) {
@ -146,7 +146,7 @@ inline bool IsNoopInstruction(const Instruction *I) {
cast<GetElementPtrInst>(I)->hasAllZeroIndices());
}
/// \brief Test whether the given value is possible a retainable object pointer.
/// Test whether the given value is possible a retainable object pointer.
inline bool IsPotentialRetainableObjPtr(const Value *Op) {
// Pointers to static or stack storage are not valid retainable object
// pointers.
@ -191,7 +191,7 @@ inline bool IsPotentialRetainableObjPtr(const Value *Op,
return true;
}
/// \brief Helper for GetARCInstKind. Determines what kind of construct CS
/// Helper for GetARCInstKind. Determines what kind of construct CS
/// is.
inline ARCInstKind GetCallSiteClass(ImmutableCallSite CS) {
for (ImmutableCallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
@ -202,7 +202,7 @@ inline ARCInstKind GetCallSiteClass(ImmutableCallSite CS) {
return CS.onlyReadsMemory() ? ARCInstKind::None : ARCInstKind::Call;
}
/// \brief Return true if this value refers to a distinct and identifiable
/// Return true if this value refers to a distinct and identifiable
/// object.
///
/// This is similar to AliasAnalysis's isIdentifiedObject, except that it uses

View File

@ -18,7 +18,7 @@ namespace objcarc {
/// \enum ARCInstKind
///
/// \brief Equivalence classes of instructions in the ARC Model.
/// Equivalence classes of instructions in the ARC Model.
///
/// Since we do not have "instructions" to represent ARC concepts in LLVM IR,
/// we instead operate on equivalence classes of instructions.
@ -57,32 +57,32 @@ enum class ARCInstKind {
raw_ostream &operator<<(raw_ostream &OS, const ARCInstKind Class);
/// \brief Test if the given class is a kind of user.
/// Test if the given class is a kind of user.
bool IsUser(ARCInstKind Class);
/// \brief Test if the given class is objc_retain or equivalent.
/// Test if the given class is objc_retain or equivalent.
bool IsRetain(ARCInstKind Class);
/// \brief Test if the given class is objc_autorelease or equivalent.
/// Test if the given class is objc_autorelease or equivalent.
bool IsAutorelease(ARCInstKind Class);
/// \brief Test if the given class represents instructions which return their
/// Test if the given class represents instructions which return their
/// argument verbatim.
bool IsForwarding(ARCInstKind Class);
/// \brief Test if the given class represents instructions which do nothing if
/// Test if the given class represents instructions which do nothing if
/// passed a null pointer.
bool IsNoopOnNull(ARCInstKind Class);
/// \brief Test if the given class represents instructions which are always safe
/// Test if the given class represents instructions which are always safe
/// to mark with the "tail" keyword.
bool IsAlwaysTail(ARCInstKind Class);
/// \brief Test if the given class represents instructions which are never safe
/// Test if the given class represents instructions which are never safe
/// to mark with the "tail" keyword.
bool IsNeverTail(ARCInstKind Class);
/// \brief Test if the given class represents instructions which are always safe
/// Test if the given class represents instructions which are always safe
/// to mark with the nounwind attribute.
bool IsNoThrow(ARCInstKind Class);
@ -90,11 +90,11 @@ bool IsNoThrow(ARCInstKind Class);
/// autoreleasepool pop.
bool CanInterruptRV(ARCInstKind Class);
/// \brief Determine if F is one of the special known Functions. If it isn't,
/// Determine if F is one of the special known Functions. If it isn't,
/// return ARCInstKind::CallOrUser.
ARCInstKind GetFunctionClass(const Function *F);
/// \brief Determine which objc runtime call instruction class V belongs to.
/// Determine which objc runtime call instruction class V belongs to.
///
/// This is similar to GetARCInstKind except that it only detects objc
/// runtime calls. This allows it to be faster.

View File

@ -40,7 +40,7 @@ public:
OptimizationRemarkEmitter(const Function *F, BlockFrequencyInfo *BFI)
: F(F), BFI(BFI) {}
/// \brief This variant can be used to generate ORE on demand (without the
/// This variant can be used to generate ORE on demand (without the
/// analysis pass).
///
/// Note that this ctor has a very different cost depending on whether
@ -66,11 +66,11 @@ public:
bool invalidate(Function &F, const PreservedAnalyses &PA,
FunctionAnalysisManager::Invalidator &Inv);
/// \brief Output the remark via the diagnostic handler and to the
/// Output the remark via the diagnostic handler and to the
/// optimization record file.
void emit(DiagnosticInfoOptimizationBase &OptDiag);
/// \brief Take a lambda that returns a remark which will be emitted. Second
/// Take a lambda that returns a remark which will be emitted. Second
/// argument is only used to restrict this to functions.
template <typename T>
void emit(T RemarkBuilder, decltype(RemarkBuilder()) * = nullptr) {
@ -85,7 +85,7 @@ public:
}
}
/// \brief Whether we allow for extra compile-time budget to perform more
/// Whether we allow for extra compile-time budget to perform more
/// analysis to produce fewer false positives.
///
/// This is useful when reporting missed optimizations. In this case we can
@ -112,7 +112,7 @@ private:
/// Similar but use value from \p OptDiag and update hotness there.
void computeHotness(DiagnosticInfoIROptimization &OptDiag);
/// \brief Only allow verbose messages if we know we're filtering by hotness
/// Only allow verbose messages if we know we're filtering by hotness
/// (BFI is only set in this case).
bool shouldEmitVerbose() { return BFI != nullptr; }
@ -120,7 +120,7 @@ private:
void operator=(const OptimizationRemarkEmitter &) = delete;
};
/// \brief Add a small namespace to avoid name clashes with the classes used in
/// Add a small namespace to avoid name clashes with the classes used in
/// the streaming interface. We want these to be short for better
/// write/readability.
namespace ore {
@ -158,10 +158,10 @@ class OptimizationRemarkEmitterAnalysis
static AnalysisKey Key;
public:
/// \brief Provide the result typedef for this analysis pass.
/// Provide the result typedef for this analysis pass.
typedef OptimizationRemarkEmitter Result;
/// \brief Run the analysis pass over a function and produce BFI.
/// Run the analysis pass over a function and produce BFI.
Result run(Function &F, FunctionAnalysisManager &AM);
};
}

View File

@ -33,28 +33,28 @@ class BasicBlock;
class OrderedBasicBlock {
private:
/// \brief Map a instruction to its position in a BasicBlock.
/// Map a instruction to its position in a BasicBlock.
SmallDenseMap<const Instruction *, unsigned, 32> NumberedInsts;
/// \brief Keep track of last instruction inserted into \p NumberedInsts.
/// Keep track of last instruction inserted into \p NumberedInsts.
/// It speeds up queries for uncached instructions by providing a start point
/// for new queries in OrderedBasicBlock::comesBefore.
BasicBlock::const_iterator LastInstFound;
/// \brief The position/number to tag the next instruction to be found.
/// The position/number to tag the next instruction to be found.
unsigned NextInstPos;
/// \brief The source BasicBlock to map.
/// The source BasicBlock to map.
const BasicBlock *BB;
/// \brief Given no cached results, find if \p A comes before \p B in \p BB.
/// Given no cached results, find if \p A comes before \p B in \p BB.
/// Cache and number out instruction while walking \p BB.
bool comesBefore(const Instruction *A, const Instruction *B);
public:
OrderedBasicBlock(const BasicBlock *BasicB);
/// \brief Find out whether \p A dominates \p B, meaning whether \p A
/// Find out whether \p A dominates \p B, meaning whether \p A
/// comes before \p B in \p BB. This is a simplification that considers
/// cached instruction positions and ignores other basic blocks, being
/// only relevant to compare relative instructions positions inside \p BB.

View File

@ -35,7 +35,7 @@ public:
FunctionAnalysisManager::Invalidator &);
};
/// \brief Analysis pass which computes a \c PostDominatorTree.
/// Analysis pass which computes a \c PostDominatorTree.
class PostDominatorTreeAnalysis
: public AnalysisInfoMixin<PostDominatorTreeAnalysis> {
friend AnalysisInfoMixin<PostDominatorTreeAnalysis>;
@ -43,15 +43,15 @@ class PostDominatorTreeAnalysis
static AnalysisKey Key;
public:
/// \brief Provide the result type for this analysis pass.
/// Provide the result type for this analysis pass.
using Result = PostDominatorTree;
/// \brief Run the analysis pass over a function and produce a post dominator
/// Run the analysis pass over a function and produce a post dominator
/// tree.
PostDominatorTree run(Function &F, FunctionAnalysisManager &);
};
/// \brief Printer pass for the \c PostDominatorTree.
/// Printer pass for the \c PostDominatorTree.
class PostDominatorTreePrinterPass
: public PassInfoMixin<PostDominatorTreePrinterPass> {
raw_ostream &OS;

View File

@ -31,7 +31,7 @@ class BasicBlock;
class BlockFrequencyInfo;
class CallSite;
class ProfileSummary;
/// \brief Analysis providing profile information.
/// Analysis providing profile information.
///
/// This is an immutable analysis pass that provides ability to query global
/// (program-level) profile information. The main APIs are isHotCount and
@ -59,16 +59,16 @@ public:
ProfileSummaryInfo(ProfileSummaryInfo &&Arg)
: M(Arg.M), Summary(std::move(Arg.Summary)) {}
/// \brief Returns true if profile summary is available.
/// Returns true if profile summary is available.
bool hasProfileSummary() { return computeSummary(); }
/// \brief Returns true if module \c M has sample profile.
/// Returns true if module \c M has sample profile.
bool hasSampleProfile() {
return hasProfileSummary() &&
Summary->getKind() == ProfileSummary::PSK_Sample;
}
/// \brief Returns true if module \c M has instrumentation profile.
/// Returns true if module \c M has instrumentation profile.
bool hasInstrumentationProfile() {
return hasProfileSummary() &&
Summary->getKind() == ProfileSummary::PSK_Instr;
@ -90,31 +90,31 @@ public:
BlockFrequencyInfo *BFI);
/// Returns true if the working set size of the code is considered huge.
bool hasHugeWorkingSetSize();
/// \brief Returns true if \p F has hot function entry.
/// Returns true if \p F has hot function entry.
bool isFunctionEntryHot(const Function *F);
/// Returns true if \p F contains hot code.
bool isFunctionHotInCallGraph(const Function *F, BlockFrequencyInfo &BFI);
/// \brief Returns true if \p F has cold function entry.
/// Returns true if \p F has cold function entry.
bool isFunctionEntryCold(const Function *F);
/// Returns true if \p F contains only cold code.
bool isFunctionColdInCallGraph(const Function *F, BlockFrequencyInfo &BFI);
/// \brief Returns true if \p F is a hot function.
/// Returns true if \p F is a hot function.
bool isHotCount(uint64_t C);
/// \brief Returns true if count \p C is considered cold.
/// Returns true if count \p C is considered cold.
bool isColdCount(uint64_t C);
/// \brief Returns true if BasicBlock \p B is considered hot.
/// Returns true if BasicBlock \p B is considered hot.
bool isHotBB(const BasicBlock *B, BlockFrequencyInfo *BFI);
/// \brief Returns true if BasicBlock \p B is considered cold.
/// Returns true if BasicBlock \p B is considered cold.
bool isColdBB(const BasicBlock *B, BlockFrequencyInfo *BFI);
/// \brief Returns true if CallSite \p CS is considered hot.
/// Returns true if CallSite \p CS is considered hot.
bool isHotCallSite(const CallSite &CS, BlockFrequencyInfo *BFI);
/// \brief Returns true if Callsite \p CS is considered cold.
/// Returns true if Callsite \p CS is considered cold.
bool isColdCallSite(const CallSite &CS, BlockFrequencyInfo *BFI);
/// \brief Returns HotCountThreshold if set.
/// Returns HotCountThreshold if set.
uint64_t getHotCountThreshold() {
return HotCountThreshold ? HotCountThreshold.getValue() : 0;
}
/// \brief Returns ColdCountThreshold if set.
/// Returns ColdCountThreshold if set.
uint64_t getColdCountThreshold() {
return ColdCountThreshold ? ColdCountThreshold.getValue() : 0;
}
@ -152,7 +152,7 @@ private:
static AnalysisKey Key;
};
/// \brief Printer pass that uses \c ProfileSummaryAnalysis.
/// Printer pass that uses \c ProfileSummaryAnalysis.
class ProfileSummaryPrinterPass
: public PassInfoMixin<ProfileSummaryPrinterPass> {
raw_ostream &OS;

View File

@ -47,7 +47,7 @@ namespace llvm {
namespace detail {
/// \brief Implementation of non-dependent functionality for \c PtrUseVisitor.
/// Implementation of non-dependent functionality for \c PtrUseVisitor.
///
/// See \c PtrUseVisitor for the public interface and detailed comments about
/// usage. This class is just a helper base class which is not templated and
@ -55,7 +55,7 @@ namespace detail {
/// PtrUseVisitor.
class PtrUseVisitorBase {
public:
/// \brief This class provides information about the result of a visit.
/// This class provides information about the result of a visit.
///
/// After walking all the users (recursively) of a pointer, the basic
/// infrastructure records some commonly useful information such as escape
@ -64,7 +64,7 @@ public:
public:
PtrInfo() : AbortedInfo(nullptr, false), EscapedInfo(nullptr, false) {}
/// \brief Reset the pointer info, clearing all state.
/// Reset the pointer info, clearing all state.
void reset() {
AbortedInfo.setPointer(nullptr);
AbortedInfo.setInt(false);
@ -72,37 +72,37 @@ public:
EscapedInfo.setInt(false);
}
/// \brief Did we abort the visit early?
/// Did we abort the visit early?
bool isAborted() const { return AbortedInfo.getInt(); }
/// \brief Is the pointer escaped at some point?
/// Is the pointer escaped at some point?
bool isEscaped() const { return EscapedInfo.getInt(); }
/// \brief Get the instruction causing the visit to abort.
/// Get the instruction causing the visit to abort.
/// \returns a pointer to the instruction causing the abort if one is
/// available; otherwise returns null.
Instruction *getAbortingInst() const { return AbortedInfo.getPointer(); }
/// \brief Get the instruction causing the pointer to escape.
/// Get the instruction causing the pointer to escape.
/// \returns a pointer to the instruction which escapes the pointer if one
/// is available; otherwise returns null.
Instruction *getEscapingInst() const { return EscapedInfo.getPointer(); }
/// \brief Mark the visit as aborted. Intended for use in a void return.
/// Mark the visit as aborted. Intended for use in a void return.
/// \param I The instruction which caused the visit to abort, if available.
void setAborted(Instruction *I = nullptr) {
AbortedInfo.setInt(true);
AbortedInfo.setPointer(I);
}
/// \brief Mark the pointer as escaped. Intended for use in a void return.
/// Mark the pointer as escaped. Intended for use in a void return.
/// \param I The instruction which escapes the pointer, if available.
void setEscaped(Instruction *I = nullptr) {
EscapedInfo.setInt(true);
EscapedInfo.setPointer(I);
}
/// \brief Mark the pointer as escaped, and the visit as aborted. Intended
/// Mark the pointer as escaped, and the visit as aborted. Intended
/// for use in a void return.
/// \param I The instruction which both escapes the pointer and aborts the
/// visit, if available.
@ -121,10 +121,10 @@ protected:
/// \name Visitation infrastructure
/// @{
/// \brief The info collected about the pointer being visited thus far.
/// The info collected about the pointer being visited thus far.
PtrInfo PI;
/// \brief A struct of the data needed to visit a particular use.
/// A struct of the data needed to visit a particular use.
///
/// This is used to maintain a worklist fo to-visit uses. This is used to
/// make the visit be iterative rather than recursive.
@ -135,10 +135,10 @@ protected:
APInt Offset;
};
/// \brief The worklist of to-visit uses.
/// The worklist of to-visit uses.
SmallVector<UseToVisit, 8> Worklist;
/// \brief A set of visited uses to break cycles in unreachable code.
/// A set of visited uses to break cycles in unreachable code.
SmallPtrSet<Use *, 8> VisitedUses;
/// @}
@ -147,14 +147,14 @@ protected:
/// This state is reset for each instruction visited.
/// @{
/// \brief The use currently being visited.
/// The use currently being visited.
Use *U;
/// \brief True if we have a known constant offset for the use currently
/// True if we have a known constant offset for the use currently
/// being visited.
bool IsOffsetKnown;
/// \brief The constant offset of the use if that is known.
/// The constant offset of the use if that is known.
APInt Offset;
/// @}
@ -163,13 +163,13 @@ protected:
/// class, we can't create instances directly of this class.
PtrUseVisitorBase(const DataLayout &DL) : DL(DL) {}
/// \brief Enqueue the users of this instruction in the visit worklist.
/// Enqueue the users of this instruction in the visit worklist.
///
/// This will visit the users with the same offset of the current visit
/// (including an unknown offset if that is the current state).
void enqueueUsers(Instruction &I);
/// \brief Walk the operands of a GEP and adjust the offset as appropriate.
/// Walk the operands of a GEP and adjust the offset as appropriate.
///
/// This routine does the heavy lifting of the pointer walk by computing
/// offsets and looking through GEPs.
@ -178,7 +178,7 @@ protected:
} // end namespace detail
/// \brief A base class for visitors over the uses of a pointer value.
/// A base class for visitors over the uses of a pointer value.
///
/// Once constructed, a user can call \c visit on a pointer value, and this
/// will walk its uses and visit each instruction using an InstVisitor. It also
@ -216,7 +216,7 @@ public:
"Must pass the derived type to this template!");
}
/// \brief Recursively visit the uses of the given pointer.
/// Recursively visit the uses of the given pointer.
/// \returns An info struct about the pointer. See \c PtrInfo for details.
PtrInfo visitPtr(Instruction &I) {
// This must be a pointer type. Get an integer type suitable to hold

View File

@ -726,7 +726,7 @@ class RegionInfoBase {
BBtoRegionMap BBtoRegion;
protected:
/// \brief Update refences to a RegionInfoT held by the RegionT managed here
/// Update refences to a RegionInfoT held by the RegionT managed here
///
/// This is a post-move helper. Regions hold references to the owning
/// RegionInfo object. After a move these need to be fixed.
@ -740,7 +740,7 @@ protected:
}
private:
/// \brief Wipe this region tree's state without releasing any resources.
/// Wipe this region tree's state without releasing any resources.
///
/// This is essentially a post-move helper only. It leaves the object in an
/// assignable and destroyable state, but otherwise invalid.
@ -968,7 +968,7 @@ public:
//@}
};
/// \brief Analysis pass that exposes the \c RegionInfo for a function.
/// Analysis pass that exposes the \c RegionInfo for a function.
class RegionInfoAnalysis : public AnalysisInfoMixin<RegionInfoAnalysis> {
friend AnalysisInfoMixin<RegionInfoAnalysis>;
@ -980,7 +980,7 @@ public:
RegionInfo run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief Printer pass for the \c RegionInfo.
/// Printer pass for the \c RegionInfo.
class RegionInfoPrinterPass : public PassInfoMixin<RegionInfoPrinterPass> {
raw_ostream &OS;
@ -990,7 +990,7 @@ public:
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief Verifier pass for the \c RegionInfo.
/// Verifier pass for the \c RegionInfo.
struct RegionInfoVerifierPass : PassInfoMixin<RegionInfoVerifierPass> {
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

View File

@ -49,7 +49,7 @@ class Type;
class User;
class Value;
/// \brief Information about a load/store intrinsic defined by the target.
/// Information about a load/store intrinsic defined by the target.
struct MemIntrinsicInfo {
/// This is the pointer that the intrinsic is loading from or storing to.
/// If this is non-null, then analysis/optimization passes can assume that
@ -73,18 +73,18 @@ struct MemIntrinsicInfo {
}
};
/// \brief This pass provides access to the codegen interfaces that are needed
/// This pass provides access to the codegen interfaces that are needed
/// for IR-level transformations.
class TargetTransformInfo {
public:
/// \brief Construct a TTI object using a type implementing the \c Concept
/// Construct a TTI object using a type implementing the \c Concept
/// API below.
///
/// This is used by targets to construct a TTI wrapping their target-specific
/// implementaion that encodes appropriate costs for their target.
template <typename T> TargetTransformInfo(T Impl);
/// \brief Construct a baseline TTI object using a minimal implementation of
/// Construct a baseline TTI object using a minimal implementation of
/// the \c Concept API below.
///
/// The TTI implementation will reflect the information in the DataLayout
@ -99,7 +99,7 @@ public:
// out-of-line.
~TargetTransformInfo();
/// \brief Handle the invalidation of this information.
/// Handle the invalidation of this information.
///
/// When used as a result of \c TargetIRAnalysis this method will be called
/// when the function this was computed for changes. When it returns false,
@ -114,7 +114,7 @@ public:
/// \name Generic Target Information
/// @{
/// \brief The kind of cost model.
/// The kind of cost model.
///
/// There are several different cost models that can be customized by the
/// target. The normalization of each cost model may be target specific.
@ -124,7 +124,7 @@ public:
TCK_CodeSize ///< Instruction code size.
};
/// \brief Query the cost of a specified instruction.
/// Query the cost of a specified instruction.
///
/// Clients should use this interface to query the cost of an existing
/// instruction. The instruction must have a valid parent (basic block).
@ -145,7 +145,7 @@ public:
llvm_unreachable("Unknown instruction cost kind");
}
/// \brief Underlying constants for 'cost' values in this interface.
/// Underlying constants for 'cost' values in this interface.
///
/// Many APIs in this interface return a cost. This enum defines the
/// fundamental values that should be used to interpret (and produce) those
@ -169,7 +169,7 @@ public:
TCC_Expensive = 4 ///< The cost of a 'div' instruction on x86.
};
/// \brief Estimate the cost of a specific operation when lowered.
/// Estimate the cost of a specific operation when lowered.
///
/// Note that this is designed to work on an arbitrary synthetic opcode, and
/// thus work for hypothetical queries before an instruction has even been
@ -185,7 +185,7 @@ public:
/// comments for a detailed explanation of the cost values.
int getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy = nullptr) const;
/// \brief Estimate the cost of a GEP operation when lowered.
/// Estimate the cost of a GEP operation when lowered.
///
/// The contract for this function is the same as \c getOperationCost except
/// that it supports an interface that provides extra information specific to
@ -193,14 +193,14 @@ public:
int getGEPCost(Type *PointeeType, const Value *Ptr,
ArrayRef<const Value *> Operands) const;
/// \brief Estimate the cost of a EXT operation when lowered.
/// Estimate the cost of a EXT operation when lowered.
///
/// The contract for this function is the same as \c getOperationCost except
/// that it supports an interface that provides extra information specific to
/// the EXT operation.
int getExtCost(const Instruction *I, const Value *Src) const;
/// \brief Estimate the cost of a function call when lowered.
/// Estimate the cost of a function call when lowered.
///
/// The contract for this is the same as \c getOperationCost except that it
/// supports an interface that provides extra information specific to call
@ -211,13 +211,13 @@ public:
/// The latter is only interesting for varargs function types.
int getCallCost(FunctionType *FTy, int NumArgs = -1) const;
/// \brief Estimate the cost of calling a specific function when lowered.
/// Estimate the cost of calling a specific function when lowered.
///
/// This overload adds the ability to reason about the particular function
/// being called in the event it is a library call with special lowering.
int getCallCost(const Function *F, int NumArgs = -1) const;
/// \brief Estimate the cost of calling a specific function when lowered.
/// Estimate the cost of calling a specific function when lowered.
///
/// This overload allows specifying a set of candidate argument values.
int getCallCost(const Function *F, ArrayRef<const Value *> Arguments) const;
@ -230,13 +230,13 @@ public:
/// individual classes of instructions would be better.
unsigned getInliningThresholdMultiplier() const;
/// \brief Estimate the cost of an intrinsic when lowered.
/// Estimate the cost of an intrinsic when lowered.
///
/// Mirrors the \c getCallCost method but uses an intrinsic identifier.
int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
ArrayRef<Type *> ParamTys) const;
/// \brief Estimate the cost of an intrinsic when lowered.
/// Estimate the cost of an intrinsic when lowered.
///
/// Mirrors the \c getCallCost method but uses an intrinsic identifier.
int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
@ -248,7 +248,7 @@ public:
unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
unsigned &JTSize) const;
/// \brief Estimate the cost of a given IR user when lowered.
/// Estimate the cost of a given IR user when lowered.
///
/// This can estimate the cost of either a ConstantExpr or Instruction when
/// lowered. It has two primary advantages over the \c getOperationCost and
@ -271,7 +271,7 @@ public:
/// comments for a detailed explanation of the cost values.
int getUserCost(const User *U, ArrayRef<const Value *> Operands) const;
/// \brief This is a helper function which calls the two-argument getUserCost
/// This is a helper function which calls the two-argument getUserCost
/// with \p Operands which are the current operands U has.
int getUserCost(const User *U) const {
SmallVector<const Value *, 4> Operands(U->value_op_begin(),
@ -279,14 +279,14 @@ public:
return getUserCost(U, Operands);
}
/// \brief Return true if branch divergence exists.
/// Return true if branch divergence exists.
///
/// Branch divergence has a significantly negative impact on GPU performance
/// when threads in the same wavefront take different paths due to conditional
/// branches.
bool hasBranchDivergence() const;
/// \brief Returns whether V is a source of divergence.
/// Returns whether V is a source of divergence.
///
/// This function provides the target-dependent information for
/// the target-independent DivergenceAnalysis. DivergenceAnalysis first
@ -294,7 +294,7 @@ public:
/// starting with the sources of divergence.
bool isSourceOfDivergence(const Value *V) const;
// \brief Returns true for the target specific
// Returns true for the target specific
// set of operations which produce uniform result
// even taking non-unform arguments
bool isAlwaysUniform(const Value *V) const;
@ -317,7 +317,7 @@ public:
/// optimize away.
unsigned getFlatAddressSpace() const;
/// \brief Test whether calls to a function lower to actual program function
/// Test whether calls to a function lower to actual program function
/// calls.
///
/// The idea is to test whether the program is likely to require a 'call'
@ -424,7 +424,7 @@ public:
bool UnrollRemainder;
};
/// \brief Get target-customized preferences for the generic loop unrolling
/// Get target-customized preferences for the generic loop unrolling
/// transformation. The caller will initialize UP with the current
/// target-independent defaults.
void getUnrollingPreferences(Loop *L, ScalarEvolution &,
@ -435,7 +435,7 @@ public:
/// \name Scalar Target Information
/// @{
/// \brief Flags indicating the kind of support for population count.
/// Flags indicating the kind of support for population count.
///
/// Compared to the SW implementation, HW support is supposed to
/// significantly boost the performance when the population is dense, and it
@ -445,18 +445,18 @@ public:
/// considered as "Slow".
enum PopcntSupportKind { PSK_Software, PSK_SlowHardware, PSK_FastHardware };
/// \brief Return true if the specified immediate is legal add immediate, that
/// Return true if the specified immediate is legal add immediate, that
/// is the target has add instructions which can add a register with the
/// immediate without having to materialize the immediate into a register.
bool isLegalAddImmediate(int64_t Imm) const;
/// \brief Return true if the specified immediate is legal icmp immediate,
/// Return true if the specified immediate is legal icmp immediate,
/// that is the target has icmp instructions which can compare a register
/// against the immediate without having to materialize the immediate into a
/// register.
bool isLegalICmpImmediate(int64_t Imm) const;
/// \brief Return true if the addressing mode represented by AM is legal for
/// Return true if the addressing mode represented by AM is legal for
/// this target, for a load/store of the specified type.
/// The type may be VoidTy, in which case only return true if the addressing
/// mode is legal for a load/store of any legal type.
@ -467,7 +467,7 @@ public:
unsigned AddrSpace = 0,
Instruction *I = nullptr) const;
/// \brief Return true if LSR cost of C1 is lower than C1.
/// Return true if LSR cost of C1 is lower than C1.
bool isLSRCostLess(TargetTransformInfo::LSRCost &C1,
TargetTransformInfo::LSRCost &C2) const;
@ -480,12 +480,12 @@ public:
/// addressing mode expressions.
bool shouldFavorPostInc() const;
/// \brief Return true if the target supports masked load/store
/// Return true if the target supports masked load/store
/// AVX2 and AVX-512 targets allow masks for consecutive load and store
bool isLegalMaskedStore(Type *DataType) const;
bool isLegalMaskedLoad(Type *DataType) const;
/// \brief Return true if the target supports masked gather/scatter
/// Return true if the target supports masked gather/scatter
/// AVX-512 fully supports gather and scatter for vectors with 32 and 64
/// bits scalar type.
bool isLegalMaskedScatter(Type *DataType) const;
@ -508,7 +508,7 @@ public:
/// Return true if target doesn't mind addresses in vectors.
bool prefersVectorizedAddressing() const;
/// \brief Return the cost of the scaling factor used in the addressing
/// Return the cost of the scaling factor used in the addressing
/// mode represented by AM for this target, for a load/store
/// of the specified type.
/// If the AM is supported, the return value must be >= 0.
@ -518,41 +518,41 @@ public:
bool HasBaseReg, int64_t Scale,
unsigned AddrSpace = 0) const;
/// \brief Return true if the loop strength reduce pass should make
/// Return true if the loop strength reduce pass should make
/// Instruction* based TTI queries to isLegalAddressingMode(). This is
/// needed on SystemZ, where e.g. a memcpy can only have a 12 bit unsigned
/// immediate offset and no index register.
bool LSRWithInstrQueries() const;
/// \brief Return true if it's free to truncate a value of type Ty1 to type
/// Return true if it's free to truncate a value of type Ty1 to type
/// Ty2. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
/// by referencing its sub-register AX.
bool isTruncateFree(Type *Ty1, Type *Ty2) const;
/// \brief Return true if it is profitable to hoist instruction in the
/// Return true if it is profitable to hoist instruction in the
/// then/else to before if.
bool isProfitableToHoist(Instruction *I) const;
bool useAA() const;
/// \brief Return true if this type is legal.
/// Return true if this type is legal.
bool isTypeLegal(Type *Ty) const;
/// \brief Returns the target's jmp_buf alignment in bytes.
/// Returns the target's jmp_buf alignment in bytes.
unsigned getJumpBufAlignment() const;
/// \brief Returns the target's jmp_buf size in bytes.
/// Returns the target's jmp_buf size in bytes.
unsigned getJumpBufSize() const;
/// \brief Return true if switches should be turned into lookup tables for the
/// Return true if switches should be turned into lookup tables for the
/// target.
bool shouldBuildLookupTables() const;
/// \brief Return true if switches should be turned into lookup tables
/// Return true if switches should be turned into lookup tables
/// containing this constant value for the target.
bool shouldBuildLookupTablesForConstant(Constant *C) const;
/// \brief Return true if the input function which is cold at all call sites,
/// Return true if the input function which is cold at all call sites,
/// should use coldcc calling convention.
bool useColdCCForColdCall(Function &F) const;
@ -566,10 +566,10 @@ public:
/// the scalarization cost of a load/store.
bool supportsEfficientVectorElementLoadStore() const;
/// \brief Don't restrict interleaved unrolling to small loops.
/// Don't restrict interleaved unrolling to small loops.
bool enableAggressiveInterleaving(bool LoopHasReductions) const;
/// \brief If not nullptr, enable inline expansion of memcmp. IsZeroCmp is
/// If not nullptr, enable inline expansion of memcmp. IsZeroCmp is
/// true if this is the expansion of memcmp(p1, p2, s) == 0.
struct MemCmpExpansionOptions {
// The list of available load sizes (in bytes), sorted in decreasing order.
@ -577,10 +577,10 @@ public:
};
const MemCmpExpansionOptions *enableMemCmpExpansion(bool IsZeroCmp) const;
/// \brief Enable matching of interleaved access groups.
/// Enable matching of interleaved access groups.
bool enableInterleavedAccessVectorization() const;
/// \brief Indicate that it is potentially unsafe to automatically vectorize
/// Indicate that it is potentially unsafe to automatically vectorize
/// floating-point operations because the semantics of vector and scalar
/// floating-point semantics may differ. For example, ARM NEON v7 SIMD math
/// does not support IEEE-754 denormal numbers, while depending on the
@ -589,16 +589,16 @@ public:
/// operations, shuffles, or casts.
bool isFPVectorizationPotentiallyUnsafe() const;
/// \brief Determine if the target supports unaligned memory accesses.
/// Determine if the target supports unaligned memory accesses.
bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
unsigned BitWidth, unsigned AddressSpace = 0,
unsigned Alignment = 1,
bool *Fast = nullptr) const;
/// \brief Return hardware support for population count.
/// Return hardware support for population count.
PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const;
/// \brief Return true if the hardware has a fast square-root instruction.
/// Return true if the hardware has a fast square-root instruction.
bool haveFastSqrt(Type *Ty) const;
/// Return true if it is faster to check if a floating-point value is NaN
@ -607,15 +607,15 @@ public:
/// generally as cheap as checking for ordered/unordered.
bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const;
/// \brief Return the expected cost of supporting the floating point operation
/// Return the expected cost of supporting the floating point operation
/// of the specified type.
int getFPOpCost(Type *Ty) const;
/// \brief Return the expected cost of materializing for the given integer
/// Return the expected cost of materializing for the given integer
/// immediate of the specified type.
int getIntImmCost(const APInt &Imm, Type *Ty) const;
/// \brief Return the expected cost of materialization for the given integer
/// Return the expected cost of materialization for the given integer
/// immediate of the specified type for a given instruction. The cost can be
/// zero if the immediate can be folded into the specified instruction.
int getIntImmCost(unsigned Opc, unsigned Idx, const APInt &Imm,
@ -623,7 +623,7 @@ public:
int getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
Type *Ty) const;
/// \brief Return the expected cost for the given integer when optimising
/// Return the expected cost for the given integer when optimising
/// for size. This is different than the other integer immediate cost
/// functions in that it is subtarget agnostic. This is useful when you e.g.
/// target one ISA such as Aarch32 but smaller encodings could be possible
@ -637,7 +637,7 @@ public:
/// \name Vector Target Information
/// @{
/// \brief The various kinds of shuffle patterns for vector queries.
/// The various kinds of shuffle patterns for vector queries.
enum ShuffleKind {
SK_Broadcast, ///< Broadcast element 0 to all other elements.
SK_Reverse, ///< Reverse the order of the vector.
@ -651,7 +651,7 @@ public:
///< shuffle mask.
};
/// \brief Additional information about an operand's possible values.
/// Additional information about an operand's possible values.
enum OperandValueKind {
OK_AnyValue, // Operand can have any value.
OK_UniformValue, // Operand is uniform (splat of a value).
@ -659,7 +659,7 @@ public:
OK_NonUniformConstantValue // Operand is a non uniform constant value.
};
/// \brief Additional properties of an operand's values.
/// Additional properties of an operand's values.
enum OperandValueProperties { OP_None = 0, OP_PowerOf2 = 1 };
/// \return The number of scalar or vector registers that the target has.
@ -812,7 +812,7 @@ public:
ArrayRef<unsigned> Indices, unsigned Alignment,
unsigned AddressSpace) const;
/// \brief Calculate the cost of performing a vector reduction.
/// Calculate the cost of performing a vector reduction.
///
/// This is the cost of reducing the vector value of type \p Ty to a scalar
/// value using the operation denoted by \p Opcode. The form of the reduction
@ -906,7 +906,7 @@ public:
bool areInlineCompatible(const Function *Caller,
const Function *Callee) const;
/// \brief The type of load/store indexing.
/// The type of load/store indexing.
enum MemIndexedMode {
MIM_Unindexed, ///< No indexing.
MIM_PreInc, ///< Pre-incrementing.
@ -972,19 +972,19 @@ public:
/// @}
private:
/// \brief Estimate the latency of specified instruction.
/// Estimate the latency of specified instruction.
/// Returns 1 as the default value.
int getInstructionLatency(const Instruction *I) const;
/// \brief Returns the expected throughput cost of the instruction.
/// Returns the expected throughput cost of the instruction.
/// Returns -1 if the cost is unknown.
int getInstructionThroughput(const Instruction *I) const;
/// \brief The abstract base class used to type erase specific TTI
/// The abstract base class used to type erase specific TTI
/// implementations.
class Concept;
/// \brief The template model for the base class which wraps a concrete
/// The template model for the base class which wraps a concrete
/// implementation in a type erased interface.
template <typename T> class Model;
@ -1574,7 +1574,7 @@ template <typename T>
TargetTransformInfo::TargetTransformInfo(T Impl)
: TTIImpl(new Model<T>(Impl)) {}
/// \brief Analysis pass providing the \c TargetTransformInfo.
/// Analysis pass providing the \c TargetTransformInfo.
///
/// The core idea of the TargetIRAnalysis is to expose an interface through
/// which LLVM targets can analyze and provide information about the middle
@ -1589,13 +1589,13 @@ class TargetIRAnalysis : public AnalysisInfoMixin<TargetIRAnalysis> {
public:
typedef TargetTransformInfo Result;
/// \brief Default construct a target IR analysis.
/// Default construct a target IR analysis.
///
/// This will use the module's datalayout to construct a baseline
/// conservative TTI result.
TargetIRAnalysis();
/// \brief Construct an IR analysis pass around a target-provide callback.
/// Construct an IR analysis pass around a target-provide callback.
///
/// The callback will be called with a particular function for which the TTI
/// is needed and must return a TTI object for that function.
@ -1621,7 +1621,7 @@ private:
friend AnalysisInfoMixin<TargetIRAnalysis>;
static AnalysisKey Key;
/// \brief The callback used to produce a result.
/// The callback used to produce a result.
///
/// We use a completely opaque callback so that targets can provide whatever
/// mechanism they desire for constructing the TTI for a given function.
@ -1633,11 +1633,11 @@ private:
/// the external TargetMachine, and that reference needs to never dangle.
std::function<Result(const Function &)> TTICallback;
/// \brief Helper function used as the callback in the default constructor.
/// Helper function used as the callback in the default constructor.
static Result getDefaultTTI(const Function &F);
};
/// \brief Wrapper pass for TargetTransformInfo.
/// Wrapper pass for TargetTransformInfo.
///
/// This pass can be constructed from a TTI object which it stores internally
/// and is queried by passes.
@ -1650,7 +1650,7 @@ class TargetTransformInfoWrapperPass : public ImmutablePass {
public:
static char ID;
/// \brief We must provide a default constructor for the pass but it should
/// We must provide a default constructor for the pass but it should
/// never be used.
///
/// Use the constructor below or call one of the creation routines.
@ -1661,7 +1661,7 @@ public:
TargetTransformInfo &getTTI(const Function &F);
};
/// \brief Create an analysis pass wrapper around a TTI object.
/// Create an analysis pass wrapper around a TTI object.
///
/// This analysis pass just holds the TTI instance and makes it available to
/// clients.

View File

@ -27,7 +27,7 @@
namespace llvm {
/// \brief Base class for use as a mix-in that aids implementing
/// Base class for use as a mix-in that aids implementing
/// a TargetTransformInfo-compatible class.
class TargetTransformInfoImplBase {
protected:
@ -651,7 +651,7 @@ protected:
}
};
/// \brief CRTP base class for use as a mix-in that aids implementing
/// CRTP base class for use as a mix-in that aids implementing
/// a TargetTransformInfo-compatible class.
template <typename T>
class TargetTransformInfoImplCRTPBase : public TargetTransformInfoImplBase {

View File

@ -288,7 +288,7 @@ class Value;
return GetUnderlyingObject(const_cast<Value *>(V), DL, MaxLookup);
}
/// \brief This method is similar to GetUnderlyingObject except that it can
/// This method is similar to GetUnderlyingObject except that it can
/// look through phi and select instructions and return multiple objects.
///
/// If LoopInfo is passed, loop phis are further analyzed. If a pointer
@ -461,7 +461,7 @@ class Value;
/// the parent of I.
bool programUndefinedIfFullPoison(const Instruction *PoisonI);
/// \brief Specific patterns of select instructions we can match.
/// Specific patterns of select instructions we can match.
enum SelectPatternFlavor {
SPF_UNKNOWN = 0,
SPF_SMIN, /// Signed minimum
@ -474,7 +474,7 @@ class Value;
SPF_NABS /// Negated absolute value
};
/// \brief Behavior when a floating point min/max is given one NaN and one
/// Behavior when a floating point min/max is given one NaN and one
/// non-NaN as input.
enum SelectPatternNaNBehavior {
SPNB_NA = 0, /// NaN behavior not applicable.

View File

@ -33,50 +33,50 @@ namespace Intrinsic {
enum ID : unsigned;
}
/// \brief Identify if the intrinsic is trivially vectorizable.
/// Identify if the intrinsic is trivially vectorizable.
/// This method returns true if the intrinsic's argument types are all
/// scalars for the scalar form of the intrinsic and all vectors for
/// the vector form of the intrinsic.
bool isTriviallyVectorizable(Intrinsic::ID ID);
/// \brief Identifies if the intrinsic has a scalar operand. It checks for
/// Identifies if the intrinsic has a scalar operand. It checks for
/// ctlz,cttz and powi special intrinsics whose argument is scalar.
bool hasVectorInstrinsicScalarOpd(Intrinsic::ID ID, unsigned ScalarOpdIdx);
/// \brief Returns intrinsic ID for call.
/// Returns intrinsic ID for call.
/// For the input call instruction it finds mapping intrinsic and returns
/// its intrinsic ID, in case it does not found it return not_intrinsic.
Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI,
const TargetLibraryInfo *TLI);
/// \brief Find the operand of the GEP that should be checked for consecutive
/// Find the operand of the GEP that should be checked for consecutive
/// stores. This ignores trailing indices that have no effect on the final
/// pointer.
unsigned getGEPInductionOperand(const GetElementPtrInst *Gep);
/// \brief If the argument is a GEP, then returns the operand identified by
/// If the argument is a GEP, then returns the operand identified by
/// getGEPInductionOperand. However, if there is some other non-loop-invariant
/// operand, it returns that instead.
Value *stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp);
/// \brief If a value has only one user that is a CastInst, return it.
/// If a value has only one user that is a CastInst, return it.
Value *getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty);
/// \brief Get the stride of a pointer access in a loop. Looks for symbolic
/// Get the stride of a pointer access in a loop. Looks for symbolic
/// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
Value *getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp);
/// \brief Given a vector and an element number, see if the scalar value is
/// Given a vector and an element number, see if the scalar value is
/// already around as a register, for example if it were inserted then extracted
/// from the vector.
Value *findScalarElement(Value *V, unsigned EltNo);
/// \brief Get splat value if the input is a splat vector or return nullptr.
/// Get splat value if the input is a splat vector or return nullptr.
/// The value may be extracted from a splat constants vector or from
/// a sequence of instructions that broadcast a single value into a vector.
const Value *getSplatValue(const Value *V);
/// \brief Compute a map of integer instructions to their minimum legal type
/// Compute a map of integer instructions to their minimum legal type
/// size.
///
/// C semantics force sub-int-sized values (e.g. i8, i16) to be promoted to int
@ -124,7 +124,7 @@ computeMinimumValueSizes(ArrayRef<BasicBlock*> Blocks,
/// This function always sets a (possibly null) value for each K in Kinds.
Instruction *propagateMetadata(Instruction *I, ArrayRef<Value *> VL);
/// \brief Create an interleave shuffle mask.
/// Create an interleave shuffle mask.
///
/// This function creates a shuffle mask for interleaving \p NumVecs vectors of
/// vectorization factor \p VF into a single wide vector. The mask is of the
@ -138,7 +138,7 @@ Instruction *propagateMetadata(Instruction *I, ArrayRef<Value *> VL);
Constant *createInterleaveMask(IRBuilder<> &Builder, unsigned VF,
unsigned NumVecs);
/// \brief Create a stride shuffle mask.
/// Create a stride shuffle mask.
///
/// This function creates a shuffle mask whose elements begin at \p Start and
/// are incremented by \p Stride. The mask can be used to deinterleave an
@ -153,7 +153,7 @@ Constant *createInterleaveMask(IRBuilder<> &Builder, unsigned VF,
Constant *createStrideMask(IRBuilder<> &Builder, unsigned Start,
unsigned Stride, unsigned VF);
/// \brief Create a sequential shuffle mask.
/// Create a sequential shuffle mask.
///
/// This function creates shuffle mask whose elements are sequential and begin
/// at \p Start. The mask contains \p NumInts integers and is padded with \p
@ -167,7 +167,7 @@ Constant *createStrideMask(IRBuilder<> &Builder, unsigned Start,
Constant *createSequentialMask(IRBuilder<> &Builder, unsigned Start,
unsigned NumInts, unsigned NumUndefs);
/// \brief Concatenate a list of vectors.
/// Concatenate a list of vectors.
///
/// This function generates code that concatenate the vectors in \p Vecs into a
/// single large vector. The number of vectors should be greater than one, and

View File

@ -30,7 +30,7 @@ class Type;
/// Module (intermediate representation) with the corresponding features. Note
/// that this does not verify that the generated Module is valid, so you should
/// run the verifier after parsing the file to check that it is okay.
/// \brief Parse LLVM Assembly from a file
/// Parse LLVM Assembly from a file
/// \param Filename The name of the file to parse
/// \param Error Error result info.
/// \param Context Context in which to allocate globals info.
@ -50,7 +50,7 @@ parseAssemblyFile(StringRef Filename, SMDiagnostic &Error, LLVMContext &Context,
/// Module (intermediate representation) with the corresponding features. Note
/// that this does not verify that the generated Module is valid, so you should
/// run the verifier after parsing the file to check that it is okay.
/// \brief Parse LLVM Assembly from a string
/// Parse LLVM Assembly from a string
/// \param AsmString The string containing assembly
/// \param Error Error result info.
/// \param Context Context in which to allocate globals info.
@ -68,7 +68,7 @@ std::unique_ptr<Module> parseAssemblyString(StringRef AsmString,
StringRef DataLayoutString = "");
/// parseAssemblyFile and parseAssemblyString are wrappers around this function.
/// \brief Parse LLVM Assembly from a MemoryBuffer.
/// Parse LLVM Assembly from a MemoryBuffer.
/// \param F The MemoryBuffer containing assembly
/// \param Err Error result info.
/// \param Slots The optional slot mapping that will be initialized during

View File

@ -105,7 +105,7 @@ class raw_ostream;
const std::map<std::string, GVSummaryMapTy> *ModuleToSummariesForIndex);
};
/// \brief Write the specified module to the specified raw output stream.
/// Write the specified module to the specified raw output stream.
///
/// For streams where it matters, the given stream should be in "binary"
/// mode.

View File

@ -23,7 +23,7 @@ class Module;
class ModulePass;
class raw_ostream;
/// \brief Create and return a pass that writes the module to the specified
/// Create and return a pass that writes the module to the specified
/// ostream. Note that this pass is designed for use with the legacy pass
/// manager.
///
@ -40,7 +40,7 @@ ModulePass *createBitcodeWriterPass(raw_ostream &Str,
bool EmitSummaryIndex = false,
bool EmitModuleHash = false);
/// \brief Pass for writing a module of IR out to a bitcode file.
/// Pass for writing a module of IR out to a bitcode file.
///
/// Note that this is intended for use with the new pass manager. To construct
/// a pass for the legacy pass manager, use the function above.
@ -51,7 +51,7 @@ class BitcodeWriterPass : public PassInfoMixin<BitcodeWriterPass> {
bool EmitModuleHash;
public:
/// \brief Construct a bitcode writer pass around a particular output stream.
/// Construct a bitcode writer pass around a particular output stream.
///
/// If \c ShouldPreserveUseListOrder, encode use-list order so it can be
/// reproduced when deserialized.
@ -65,7 +65,7 @@ public:
: OS(OS), ShouldPreserveUseListOrder(ShouldPreserveUseListOrder),
EmitSummaryIndex(EmitSummaryIndex), EmitModuleHash(EmitModuleHash) {}
/// \brief Run the bitcode writer pass, and output the module to the selected
/// Run the bitcode writer pass, and output the module to the selected
/// output stream.
PreservedAnalyses run(Module &M, ModuleAnalysisManager &);
};

View File

@ -90,10 +90,10 @@ public:
assert(BlockScope.empty() && CurAbbrevs.empty() && "Block imbalance");
}
/// \brief Retrieve the current position in the stream, in bits.
/// Retrieve the current position in the stream, in bits.
uint64_t GetCurrentBitNo() const { return GetBufferOffset() * 8 + CurBit; }
/// \brief Retrieve the number of bits currently used to encode an abbrev ID.
/// Retrieve the number of bits currently used to encode an abbrev ID.
unsigned GetAbbrevIDWidth() const { return CurCodeSize; }
//===--------------------------------------------------------------------===//

View File

@ -36,7 +36,7 @@ class SDValue;
class SelectionDAG;
struct EVT;
/// \brief Compute the linearized index of a member in a nested
/// Compute the linearized index of a member in a nested
/// aggregate/struct/array.
///
/// Given an LLVM IR aggregate type and a sequence of insertvalue or

View File

@ -343,10 +343,10 @@ public:
/// Lower the specified LLVM Constant to an MCExpr.
virtual const MCExpr *lowerConstant(const Constant *CV);
/// \brief Print a general LLVM constant to the .s file.
/// Print a general LLVM constant to the .s file.
void EmitGlobalConstant(const DataLayout &DL, const Constant *CV);
/// \brief Unnamed constant global variables solely contaning a pointer to
/// Unnamed constant global variables solely contaning a pointer to
/// another globals variable act like a global variable "proxy", or GOT
/// equivalents, i.e., it's only used to hold the address of the latter. One
/// optimization is to replace accesses to these proxies by using the GOT
@ -356,7 +356,7 @@ public:
/// accesses to GOT entries.
void computeGlobalGOTEquivs(Module &M);
/// \brief Constant expressions using GOT equivalent globals may not be
/// Constant expressions using GOT equivalent globals may not be
/// eligible for PC relative GOT entry conversion, in such cases we need to
/// emit the proxies we previously omitted in EmitGlobalVariable.
void emitGlobalGOTEquivs();
@ -541,10 +541,10 @@ public:
// Dwarf Lowering Routines
//===------------------------------------------------------------------===//
/// \brief Emit frame instruction to describe the layout of the frame.
/// Emit frame instruction to describe the layout of the frame.
void emitCFIInstruction(const MCCFIInstruction &Inst) const;
/// \brief Emit Dwarf abbreviation table.
/// Emit Dwarf abbreviation table.
template <typename T> void emitDwarfAbbrevs(const T &Abbrevs) const {
// For each abbreviation.
for (const auto &Abbrev : Abbrevs)
@ -556,7 +556,7 @@ public:
void emitDwarfAbbrev(const DIEAbbrev &Abbrev) const;
/// \brief Recursively emit Dwarf DIE tree.
/// Recursively emit Dwarf DIE tree.
void emitDwarfDIE(const DIE &Die) const;
//===------------------------------------------------------------------===//

View File

@ -26,7 +26,7 @@ using CreateCmpXchgInstFun =
function_ref<void(IRBuilder<> &, Value *, Value *, Value *, AtomicOrdering,
Value *&, Value *&)>;
/// \brief Expand an atomic RMW instruction into a loop utilizing
/// Expand an atomic RMW instruction into a loop utilizing
/// cmpxchg. You'll want to make sure your target machine likes cmpxchg
/// instructions in the first place and that there isn't another, better,
/// transformation available (for example AArch32/AArch64 have linked loads).

View File

@ -65,7 +65,7 @@ class TargetMachine;
extern cl::opt<unsigned> PartialUnrollingThreshold;
/// \brief Base class which can be used to help build a TTI implementation.
/// Base class which can be used to help build a TTI implementation.
///
/// This class provides as much implementation of the TTI interface as is
/// possible using the target independent parts of the code generator.
@ -101,12 +101,12 @@ private:
return Cost;
}
/// \brief Local query method delegates up to T which *must* implement this!
/// Local query method delegates up to T which *must* implement this!
const TargetSubtargetInfo *getST() const {
return static_cast<const T *>(this)->getST();
}
/// \brief Local query method delegates up to T which *must* implement this!
/// Local query method delegates up to T which *must* implement this!
const TargetLoweringBase *getTLI() const {
return static_cast<const T *>(this)->getTLI();
}
@ -1204,7 +1204,7 @@ public:
return SingleCallCost;
}
/// \brief Compute a cost of the given call instruction.
/// Compute a cost of the given call instruction.
///
/// Compute the cost of calling function F with return type RetTy and
/// argument types Tys. F might be nullptr, in this case the cost of an
@ -1365,7 +1365,7 @@ public:
/// @}
};
/// \brief Concrete BasicTTIImpl that can be used if no further customization
/// Concrete BasicTTIImpl that can be used if no further customization
/// is needed.
class BasicTTIImpl : public BasicTTIImplBase<BasicTTIImpl> {
using BaseT = BasicTTIImplBase<BasicTTIImpl>;

View File

@ -22,7 +22,7 @@ class MachineFunction;
class MachineLoopInfo;
class VirtRegMap;
/// \brief Normalize the spill weight of a live interval
/// Normalize the spill weight of a live interval
///
/// The spill weight of a live interval is computed as:
///
@ -42,7 +42,7 @@ class VirtRegMap;
return UseDefFreq / (Size + 25*SlotIndex::InstrDist);
}
/// \brief Calculate auxiliary information for a virtual register such as its
/// Calculate auxiliary information for a virtual register such as its
/// spill weight and allocation hint.
class VirtRegAuxInfo {
public:
@ -64,10 +64,10 @@ class VirtRegMap;
NormalizingFn norm = normalizeSpillWeight)
: MF(mf), LIS(lis), VRM(vrm), Loops(loops), MBFI(mbfi), normalize(norm) {}
/// \brief (re)compute li's spill weight and allocation hint.
/// (re)compute li's spill weight and allocation hint.
void calculateSpillWeightAndHint(LiveInterval &li);
/// \brief Compute future expected spill weight of a split artifact of li
/// Compute future expected spill weight of a split artifact of li
/// that will span between start and end slot indexes.
/// \param li The live interval to be split.
/// \param start The expected begining of the split artifact. Instructions
@ -78,7 +78,7 @@ class VirtRegMap;
/// negative weight for unspillable li.
float futureWeight(LiveInterval &li, SlotIndex start, SlotIndex end);
/// \brief Helper function for weight calculations.
/// Helper function for weight calculations.
/// (Re)compute li's spill weight and allocation hint, or, for non null
/// start and end - compute future expected spill weight of a split
/// artifact of li that will span between start and end slot indexes.
@ -94,7 +94,7 @@ class VirtRegMap;
SlotIndex *end = nullptr);
};
/// \brief Compute spill weights and allocation hints for all virtual register
/// Compute spill weights and allocation hints for all virtual register
/// live intervals.
void calculateSpillWeightsAndHints(LiveIntervals &LIS, MachineFunction &MF,
VirtRegMap *VRM,

View File

@ -349,7 +349,7 @@ LLVM_ATTRIBUTE_UNUSED static std::vector<std::string> getFeatureList() {
return Features.getFeatures();
}
/// \brief Set function attributes of functions in Module M based on CPU,
/// Set function attributes of functions in Module M based on CPU,
/// Features, and command line flags.
LLVM_ATTRIBUTE_UNUSED static void
setFunctionAttributes(StringRef CPU, StringRef Features, Module &M) {

View File

@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
/// \brief Cost tables and simple lookup functions
/// Cost tables and simple lookup functions
///
//===----------------------------------------------------------------------===//

View File

@ -136,7 +136,7 @@ class DIEAbbrevSet {
/// The bump allocator to use when creating DIEAbbrev objects in the uniqued
/// storage container.
BumpPtrAllocator &Alloc;
/// \brief FoldingSet that uniques the abbreviations.
/// FoldingSet that uniques the abbreviations.
FoldingSet<DIEAbbrev> AbbreviationsSet;
/// A list of all the unique abbreviations in use.
std::vector<DIEAbbrev *> Abbreviations;

View File

@ -61,7 +61,7 @@ class Type;
class User;
class Value;
/// \brief This is a fast-path instruction selection class that generates poor
/// This is a fast-path instruction selection class that generates poor
/// code and doesn't support illegal types or non-trivial lowering, but runs
/// quickly.
class FastISel {
@ -78,7 +78,7 @@ public:
bool IsReturnValueUsed : 1;
bool IsPatchPoint : 1;
// \brief IsTailCall Should be modified by implementations of FastLowerCall
// IsTailCall Should be modified by implementations of FastLowerCall
// that perform tail call conversions.
bool IsTailCall = false;
@ -215,13 +215,13 @@ protected:
const TargetLibraryInfo *LibInfo;
bool SkipTargetIndependentISel;
/// \brief The position of the last instruction for materializing constants
/// The position of the last instruction for materializing constants
/// for use in the current block. It resets to EmitStartPt when it makes sense
/// (for example, it's usually profitable to avoid function calls between the
/// definition and the use)
MachineInstr *LastLocalValue;
/// \brief The top most instruction in the current block that is allowed for
/// The top most instruction in the current block that is allowed for
/// emitting local variables. LastLocalValue resets to EmitStartPt when it
/// makes sense (for example, on function calls)
MachineInstr *EmitStartPt;
@ -233,56 +233,56 @@ protected:
public:
virtual ~FastISel();
/// \brief Return the position of the last instruction emitted for
/// Return the position of the last instruction emitted for
/// materializing constants for use in the current block.
MachineInstr *getLastLocalValue() { return LastLocalValue; }
/// \brief Update the position of the last instruction emitted for
/// Update the position of the last instruction emitted for
/// materializing constants for use in the current block.
void setLastLocalValue(MachineInstr *I) {
EmitStartPt = I;
LastLocalValue = I;
}
/// \brief Set the current block to which generated machine instructions will
/// Set the current block to which generated machine instructions will
/// be appended.
void startNewBlock();
/// Flush the local value map and sink local values if possible.
void finishBasicBlock();
/// \brief Return current debug location information.
/// Return current debug location information.
DebugLoc getCurDebugLoc() const { return DbgLoc; }
/// \brief Do "fast" instruction selection for function arguments and append
/// Do "fast" instruction selection for function arguments and append
/// the machine instructions to the current block. Returns true when
/// successful.
bool lowerArguments();
/// \brief Do "fast" instruction selection for the given LLVM IR instruction
/// Do "fast" instruction selection for the given LLVM IR instruction
/// and append the generated machine instructions to the current block.
/// Returns true if selection was successful.
bool selectInstruction(const Instruction *I);
/// \brief Do "fast" instruction selection for the given LLVM IR operator
/// Do "fast" instruction selection for the given LLVM IR operator
/// (Instruction or ConstantExpr), and append generated machine instructions
/// to the current block. Return true if selection was successful.
bool selectOperator(const User *I, unsigned Opcode);
/// \brief Create a virtual register and arrange for it to be assigned the
/// Create a virtual register and arrange for it to be assigned the
/// value for the given LLVM value.
unsigned getRegForValue(const Value *V);
/// \brief Look up the value to see if its value is already cached in a
/// Look up the value to see if its value is already cached in a
/// register. It may be defined by instructions across blocks or defined
/// locally.
unsigned lookUpRegForValue(const Value *V);
/// \brief This is a wrapper around getRegForValue that also takes care of
/// This is a wrapper around getRegForValue that also takes care of
/// truncating or sign-extending the given getelementptr index value.
std::pair<unsigned, bool> getRegForGEPIndex(const Value *V);
/// \brief We're checking to see if we can fold \p LI into \p FoldInst. Note
/// We're checking to see if we can fold \p LI into \p FoldInst. Note
/// that we could have a sequence where multiple LLVM IR instructions are
/// folded into the same machineinstr. For example we could have:
///
@ -296,7 +296,7 @@ public:
/// If we succeed folding, return true.
bool tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst);
/// \brief The specified machine instr operand is a vreg, and that vreg is
/// The specified machine instr operand is a vreg, and that vreg is
/// being provided by the specified load instruction. If possible, try to
/// fold the load as an operand to the instruction, returning true if
/// possible.
@ -307,11 +307,11 @@ public:
return false;
}
/// \brief Reset InsertPt to prepare for inserting instructions into the
/// Reset InsertPt to prepare for inserting instructions into the
/// current block.
void recomputeInsertPt();
/// \brief Remove all dead instructions between the I and E.
/// Remove all dead instructions between the I and E.
void removeDeadCode(MachineBasicBlock::iterator I,
MachineBasicBlock::iterator E);
@ -320,11 +320,11 @@ public:
DebugLoc DL;
};
/// \brief Prepare InsertPt to begin inserting instructions into the local
/// Prepare InsertPt to begin inserting instructions into the local
/// value area and return the old insert position.
SavePoint enterLocalValueArea();
/// \brief Reset InsertPt to the given old insert position.
/// Reset InsertPt to the given old insert position.
void leaveLocalValueArea(SavePoint Old);
protected:
@ -332,45 +332,45 @@ protected:
const TargetLibraryInfo *LibInfo,
bool SkipTargetIndependentISel = false);
/// \brief This method is called by target-independent code when the normal
/// This method is called by target-independent code when the normal
/// FastISel process fails to select an instruction. This gives targets a
/// chance to emit code for anything that doesn't fit into FastISel's
/// framework. It returns true if it was successful.
virtual bool fastSelectInstruction(const Instruction *I) = 0;
/// \brief This method is called by target-independent code to do target-
/// This method is called by target-independent code to do target-
/// specific argument lowering. It returns true if it was successful.
virtual bool fastLowerArguments();
/// \brief This method is called by target-independent code to do target-
/// This method is called by target-independent code to do target-
/// specific call lowering. It returns true if it was successful.
virtual bool fastLowerCall(CallLoweringInfo &CLI);
/// \brief This method is called by target-independent code to do target-
/// This method is called by target-independent code to do target-
/// specific intrinsic lowering. It returns true if it was successful.
virtual bool fastLowerIntrinsicCall(const IntrinsicInst *II);
/// \brief This method is called by target-independent code to request that an
/// This method is called by target-independent code to request that an
/// instruction with the given type and opcode be emitted.
virtual unsigned fastEmit_(MVT VT, MVT RetVT, unsigned Opcode);
/// \brief This method is called by target-independent code to request that an
/// This method is called by target-independent code to request that an
/// instruction with the given type, opcode, and register operand be emitted.
virtual unsigned fastEmit_r(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
bool Op0IsKill);
/// \brief This method is called by target-independent code to request that an
/// This method is called by target-independent code to request that an
/// instruction with the given type, opcode, and register operands be emitted.
virtual unsigned fastEmit_rr(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
bool Op0IsKill, unsigned Op1, bool Op1IsKill);
/// \brief This method is called by target-independent code to request that an
/// This method is called by target-independent code to request that an
/// instruction with the given type, opcode, and register and immediate
/// operands be emitted.
virtual unsigned fastEmit_ri(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
bool Op0IsKill, uint64_t Imm);
/// \brief This method is a wrapper of fastEmit_ri.
/// This method is a wrapper of fastEmit_ri.
///
/// It first tries to emit an instruction with an immediate operand using
/// fastEmit_ri. If that fails, it materializes the immediate into a register
@ -378,80 +378,80 @@ protected:
unsigned fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, bool Op0IsKill,
uint64_t Imm, MVT ImmType);
/// \brief This method is called by target-independent code to request that an
/// This method is called by target-independent code to request that an
/// instruction with the given type, opcode, and immediate operand be emitted.
virtual unsigned fastEmit_i(MVT VT, MVT RetVT, unsigned Opcode, uint64_t Imm);
/// \brief This method is called by target-independent code to request that an
/// This method is called by target-independent code to request that an
/// instruction with the given type, opcode, and floating-point immediate
/// operand be emitted.
virtual unsigned fastEmit_f(MVT VT, MVT RetVT, unsigned Opcode,
const ConstantFP *FPImm);
/// \brief Emit a MachineInstr with no operands and a result register in the
/// Emit a MachineInstr with no operands and a result register in the
/// given register class.
unsigned fastEmitInst_(unsigned MachineInstOpcode,
const TargetRegisterClass *RC);
/// \brief Emit a MachineInstr with one register operand and a result register
/// Emit a MachineInstr with one register operand and a result register
/// in the given register class.
unsigned fastEmitInst_r(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill);
/// \brief Emit a MachineInstr with two register operands and a result
/// Emit a MachineInstr with two register operands and a result
/// register in the given register class.
unsigned fastEmitInst_rr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, unsigned Op1, bool Op1IsKill);
/// \brief Emit a MachineInstr with three register operands and a result
/// Emit a MachineInstr with three register operands and a result
/// register in the given register class.
unsigned fastEmitInst_rrr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, unsigned Op1, bool Op1IsKill,
unsigned Op2, bool Op2IsKill);
/// \brief Emit a MachineInstr with a register operand, an immediate, and a
/// Emit a MachineInstr with a register operand, an immediate, and a
/// result register in the given register class.
unsigned fastEmitInst_ri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, uint64_t Imm);
/// \brief Emit a MachineInstr with one register operand and two immediate
/// Emit a MachineInstr with one register operand and two immediate
/// operands.
unsigned fastEmitInst_rii(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, uint64_t Imm1, uint64_t Imm2);
/// \brief Emit a MachineInstr with a floating point immediate, and a result
/// Emit a MachineInstr with a floating point immediate, and a result
/// register in the given register class.
unsigned fastEmitInst_f(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
const ConstantFP *FPImm);
/// \brief Emit a MachineInstr with two register operands, an immediate, and a
/// Emit a MachineInstr with two register operands, an immediate, and a
/// result register in the given register class.
unsigned fastEmitInst_rri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC, unsigned Op0,
bool Op0IsKill, unsigned Op1, bool Op1IsKill,
uint64_t Imm);
/// \brief Emit a MachineInstr with a single immediate operand, and a result
/// Emit a MachineInstr with a single immediate operand, and a result
/// register in the given register class.
unsigned fastEmitInst_i(unsigned MachineInstrOpcode,
const TargetRegisterClass *RC, uint64_t Imm);
/// \brief Emit a MachineInstr for an extract_subreg from a specified index of
/// Emit a MachineInstr for an extract_subreg from a specified index of
/// a superregister to a specified type.
unsigned fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, bool Op0IsKill,
uint32_t Idx);
/// \brief Emit MachineInstrs to compute the value of Op with all but the
/// Emit MachineInstrs to compute the value of Op with all but the
/// least significant bit set to zero.
unsigned fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill);
/// \brief Emit an unconditional branch to the given block, unless it is the
/// Emit an unconditional branch to the given block, unless it is the
/// immediate (fall-through) successor, and update the CFG.
void fastEmitBranch(MachineBasicBlock *MBB, const DebugLoc &DL);
@ -460,7 +460,7 @@ protected:
void finishCondBranch(const BasicBlock *BranchBB, MachineBasicBlock *TrueMBB,
MachineBasicBlock *FalseMBB);
/// \brief Update the value map to include the new mapping for this
/// Update the value map to include the new mapping for this
/// instruction, or insert an extra copy to get the result in a previous
/// determined register.
///
@ -471,26 +471,26 @@ protected:
unsigned createResultReg(const TargetRegisterClass *RC);
/// \brief Try to constrain Op so that it is usable by argument OpNum of the
/// Try to constrain Op so that it is usable by argument OpNum of the
/// provided MCInstrDesc. If this fails, create a new virtual register in the
/// correct class and COPY the value there.
unsigned constrainOperandRegClass(const MCInstrDesc &II, unsigned Op,
unsigned OpNum);
/// \brief Emit a constant in a register using target-specific logic, such as
/// Emit a constant in a register using target-specific logic, such as
/// constant pool loads.
virtual unsigned fastMaterializeConstant(const Constant *C) { return 0; }
/// \brief Emit an alloca address in a register using target-specific logic.
/// Emit an alloca address in a register using target-specific logic.
virtual unsigned fastMaterializeAlloca(const AllocaInst *C) { return 0; }
/// \brief Emit the floating-point constant +0.0 in a register using target-
/// Emit the floating-point constant +0.0 in a register using target-
/// specific logic.
virtual unsigned fastMaterializeFloatZero(const ConstantFP *CF) {
return 0;
}
/// \brief Check if \c Add is an add that can be safely folded into \c GEP.
/// Check if \c Add is an add that can be safely folded into \c GEP.
///
/// \c Add can be folded into \c GEP if:
/// - \c Add is an add,
@ -499,10 +499,10 @@ protected:
/// - \c Add has a constant operand.
bool canFoldAddIntoGEP(const User *GEP, const Value *Add);
/// \brief Test whether the given value has exactly one use.
/// Test whether the given value has exactly one use.
bool hasTrivialKill(const Value *V);
/// \brief Create a machine mem operand from the given instruction.
/// Create a machine mem operand from the given instruction.
MachineMemOperand *createMachineMemOperandFor(const Instruction *I) const;
CmpInst::Predicate optimizeCmpPredicate(const CmpInst *CI) const;
@ -525,7 +525,7 @@ protected:
}
bool lowerCall(const CallInst *I);
/// \brief Select and emit code for a binary operator instruction, which has
/// Select and emit code for a binary operator instruction, which has
/// an opcode which directly corresponds to the given ISD opcode.
bool selectBinaryOp(const User *I, unsigned ISDOpcode);
bool selectFNeg(const User *I);
@ -542,7 +542,7 @@ protected:
bool selectXRayTypedEvent(const CallInst *II);
private:
/// \brief Handle PHI nodes in successor blocks.
/// Handle PHI nodes in successor blocks.
///
/// Emit code to ensure constants are copied into registers when needed.
/// Remember the virtual registers that need to be added to the Machine PHI
@ -551,21 +551,21 @@ private:
/// correspond to a different MBB than the end.
bool handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB);
/// \brief Helper for materializeRegForValue to materialize a constant in a
/// Helper for materializeRegForValue to materialize a constant in a
/// target-independent way.
unsigned materializeConstant(const Value *V, MVT VT);
/// \brief Helper for getRegForVale. This function is called when the value
/// Helper for getRegForVale. This function is called when the value
/// isn't already available in a register and must be materialized with new
/// instructions.
unsigned materializeRegForValue(const Value *V, MVT VT);
/// \brief Clears LocalValueMap and moves the area for the new local variables
/// Clears LocalValueMap and moves the area for the new local variables
/// to the beginning of the block. It helps to avoid spilling cached variables
/// across heavy instructions like calls.
void flushLocalValueMap();
/// \brief Removes dead local value instructions after SavedLastLocalvalue.
/// Removes dead local value instructions after SavedLastLocalvalue.
void removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue);
struct InstOrderMap {
@ -582,10 +582,10 @@ private:
void sinkLocalValueMaterialization(MachineInstr &LocalMI, unsigned DefReg,
InstOrderMap &OrderMap);
/// \brief Insertion point before trying to select the current instruction.
/// Insertion point before trying to select the current instruction.
MachineBasicBlock::iterator SavedInsertPt;
/// \brief Add a stackmap or patchpoint intrinsic call's live variable
/// Add a stackmap or patchpoint intrinsic call's live variable
/// operands to a stackmap or patchpoint machine instruction.
bool addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
const CallInst *CI, unsigned StartIdx);

View File

@ -23,7 +23,7 @@
#include "llvm/CodeGen/MachineLoopInfo.h"
namespace llvm {
/// \brief This is an alternative analysis pass to MachineBlockFrequencyInfo.
/// This is an alternative analysis pass to MachineBlockFrequencyInfo.
/// The difference is that with this pass, the block frequencies are not
/// computed when the analysis pass is executed but rather when the BFI result
/// is explicitly requested by the analysis client.
@ -49,7 +49,7 @@ private:
/// The function.
MachineFunction *MF = nullptr;
/// \brief Calculate MBFI and all other analyses that's not available and
/// Calculate MBFI and all other analyses that's not available and
/// required by BFI.
MachineBlockFrequencyInfo &calculateIfNotAvailable() const;
@ -58,10 +58,10 @@ public:
LazyMachineBlockFrequencyInfoPass();
/// \brief Compute and return the block frequencies.
/// Compute and return the block frequencies.
MachineBlockFrequencyInfo &getBFI() { return calculateIfNotAvailable(); }
/// \brief Compute and return the block frequencies.
/// Compute and return the block frequencies.
const MachineBlockFrequencyInfo &getBFI() const {
return calculateIfNotAvailable();
}

View File

@ -609,7 +609,7 @@ namespace llvm {
void print(raw_ostream &OS) const;
void dump() const;
/// \brief Walk the range and assert if any invariants fail to hold.
/// Walk the range and assert if any invariants fail to hold.
///
/// Note that this is a no-op when asserts are disabled.
#ifdef NDEBUG
@ -802,7 +802,7 @@ namespace llvm {
void print(raw_ostream &OS) const;
void dump() const;
/// \brief Walks the interval and assert if any invariants fail to hold.
/// Walks the interval and assert if any invariants fail to hold.
///
/// Note that this is a no-op when asserts are disabled.
#ifdef NDEBUG

View File

@ -44,7 +44,7 @@ class MachineOperand;
class MachineRegisterInfo;
class raw_ostream;
/// \brief A set of physical registers with utility functions to track liveness
/// A set of physical registers with utility functions to track liveness
/// when walking backward/forward through a basic block.
class LivePhysRegs {
const TargetRegisterInfo *TRI = nullptr;
@ -84,7 +84,7 @@ public:
LiveRegs.insert(*SubRegs);
}
/// \brief Removes a physical register, all its sub-registers, and all its
/// Removes a physical register, all its sub-registers, and all its
/// super-registers from the set.
void removeReg(unsigned Reg) {
assert(TRI && "LivePhysRegs is not initialized.");
@ -98,7 +98,7 @@ public:
SmallVectorImpl<std::pair<unsigned, const MachineOperand*>> *Clobbers =
nullptr);
/// \brief Returns true if register \p Reg is contained in the set. This also
/// Returns true if register \p Reg is contained in the set. This also
/// works if only the super register of \p Reg has been defined, because
/// addReg() always adds all sub-registers to the set as well.
/// Note: Returns false if just some sub registers are live, use available()
@ -155,7 +155,7 @@ public:
void dump() const;
private:
/// \brief Adds live-in registers from basic block \p MBB, taking associated
/// Adds live-in registers from basic block \p MBB, taking associated
/// lane masks into consideration.
void addBlockLiveIns(const MachineBasicBlock &MBB);
@ -169,7 +169,7 @@ inline raw_ostream &operator<<(raw_ostream &OS, const LivePhysRegs& LR) {
return OS;
}
/// \brief Computes registers live-in to \p MBB assuming all of its successors
/// Computes registers live-in to \p MBB assuming all of its successors
/// live-in lists are up-to-date. Puts the result into the given LivePhysReg
/// instance \p LiveRegs.
void computeLiveIns(LivePhysRegs &LiveRegs, const MachineBasicBlock &MBB);

View File

@ -117,7 +117,7 @@ private:
/// registers are created.
void MRI_NoteNewVirtualRegister(unsigned VReg) override;
/// \brief Check if MachineOperand \p MO is a last use/kill either in the
/// Check if MachineOperand \p MO is a last use/kill either in the
/// main live range of \p LI or in one of the matching subregister ranges.
bool useIsKill(const LiveInterval &LI, const MachineOperand &MO) const;

View File

@ -90,7 +90,7 @@ public:
Units.set(*Unit);
}
/// \brief Adds register units covered by physical register \p Reg that are
/// Adds register units covered by physical register \p Reg that are
/// part of the lanemask \p Mask.
void addRegMasked(unsigned Reg, LaneBitmask Mask) {
for (MCRegUnitMaskIterator Unit(Reg, TRI); Unit.isValid(); ++Unit) {

View File

@ -101,7 +101,7 @@ public:
};
LoopTraversal() {}
/// \brief Identifies basic blocks that are part of loops and should to be
/// Identifies basic blocks that are part of loops and should to be
/// visited twice and returns efficient traversal order for all the blocks.
typedef SmallVector<TraversedMBBInfo, 4> TraversalOrder;
TraversalOrder traverse(MachineFunction &MF);

View File

@ -45,7 +45,7 @@ public:
/// \returns nullptr if a parsing error occurred.
std::unique_ptr<Module> parseIRModule();
/// \brief Parses MachineFunctions in the MIR file and add them to the given
/// Parses MachineFunctions in the MIR file and add them to the given
/// MachineModuleInfo \p MMI.
///
/// \returns true if an error occurred.

View File

@ -121,7 +121,7 @@ private:
/// Indicate that this basic block is the entry block of a cleanup funclet.
bool IsCleanupFuncletEntry = false;
/// \brief since getSymbol is a relatively heavy-weight operation, the symbol
/// since getSymbol is a relatively heavy-weight operation, the symbol
/// is only computed once and is cached.
mutable MCSymbol *CachedMCSymbol = nullptr;

View File

@ -45,7 +45,7 @@ using MachineDomTreeNode = DomTreeNodeBase<MachineBasicBlock>;
/// compute a normal dominator tree.
///
class MachineDominatorTree : public MachineFunctionPass {
/// \brief Helper structure used to hold all the basic blocks
/// Helper structure used to hold all the basic blocks
/// involved in the split of a critical edge.
struct CriticalEdge {
MachineBasicBlock *FromBB;
@ -53,12 +53,12 @@ class MachineDominatorTree : public MachineFunctionPass {
MachineBasicBlock *NewBB;
};
/// \brief Pile up all the critical edges to be split.
/// Pile up all the critical edges to be split.
/// The splitting of a critical edge is local and thus, it is possible
/// to apply several of those changes at the same time.
mutable SmallVector<CriticalEdge, 32> CriticalEdgesToSplit;
/// \brief Remember all the basic blocks that are inserted during
/// Remember all the basic blocks that are inserted during
/// edge splitting.
/// Invariant: NewBBs == all the basic blocks contained in the NewBB
/// field of all the elements of CriticalEdgesToSplit.
@ -69,7 +69,7 @@ class MachineDominatorTree : public MachineFunctionPass {
/// The DominatorTreeBase that is used to compute a normal dominator tree
std::unique_ptr<DomTreeBase<MachineBasicBlock>> DT;
/// \brief Apply all the recorded critical edges to the DT.
/// Apply all the recorded critical edges to the DT.
/// This updates the underlying DT information in a way that uses
/// the fast query path of DT as much as possible.
///
@ -228,7 +228,7 @@ public:
void print(raw_ostream &OS, const Module*) const override;
/// \brief Record that the critical edge (FromBB, ToBB) has been
/// Record that the critical edge (FromBB, ToBB) has been
/// split with NewBB.
/// This is best to use this method instead of directly update the
/// underlying information, because this helps mitigating the

View File

@ -96,7 +96,7 @@ template <> struct ilist_callback_traits<MachineBasicBlock> {
struct MachineFunctionInfo {
virtual ~MachineFunctionInfo();
/// \brief Factory function: default behavior is to call new using the
/// Factory function: default behavior is to call new using the
/// supplied allocator.
///
/// This function can be overridden in a derive class.
@ -610,7 +610,7 @@ public:
//===--------------------------------------------------------------------===//
// Internal functions used to automatically number MachineBasicBlocks
/// \brief Adds the MBB to the internal numbering. Returns the unique number
/// Adds the MBB to the internal numbering. Returns the unique number
/// assigned to the MBB.
unsigned addToMBBNumbering(MachineBasicBlock *MBB) {
MBBNumbering.push_back(MBB);
@ -696,7 +696,7 @@ public:
OperandRecycler.deallocate(Cap, Array);
}
/// \brief Allocate and initialize a register mask with @p NumRegister bits.
/// Allocate and initialize a register mask with @p NumRegister bits.
uint32_t *allocateRegisterMask(unsigned NumRegister) {
unsigned Size = (NumRegister + 31) / 32;
uint32_t *Mask = Allocator.Allocate<uint32_t>(Size);

View File

@ -576,7 +576,7 @@ public:
return hasProperty(MCID::FoldableAsLoad, Type);
}
/// \brief Return true if this instruction behaves
/// Return true if this instruction behaves
/// the same way as the generic REG_SEQUENCE instructions.
/// E.g., on ARM,
/// dX VMOVDRR rY, rZ
@ -590,7 +590,7 @@ public:
return hasProperty(MCID::RegSequence, Type);
}
/// \brief Return true if this instruction behaves
/// Return true if this instruction behaves
/// the same way as the generic EXTRACT_SUBREG instructions.
/// E.g., on ARM,
/// rX, rY VMOVRRD dZ
@ -605,7 +605,7 @@ public:
return hasProperty(MCID::ExtractSubreg, Type);
}
/// \brief Return true if this instruction behaves
/// Return true if this instruction behaves
/// the same way as the generic INSERT_SUBREG instructions.
/// E.g., on ARM,
/// dX = VSETLNi32 dY, rZ, Imm
@ -1049,7 +1049,7 @@ public:
const TargetInstrInfo *TII,
const TargetRegisterInfo *TRI) const;
/// \brief Applies the constraints (def/use) implied by this MI on \p Reg to
/// Applies the constraints (def/use) implied by this MI on \p Reg to
/// the given \p CurRC.
/// If \p ExploreBundle is set and MI is part of a bundle, all the
/// instructions inside the bundle will be taken into account. In other words,
@ -1066,7 +1066,7 @@ public:
const TargetInstrInfo *TII, const TargetRegisterInfo *TRI,
bool ExploreBundle = false) const;
/// \brief Applies the constraints (def/use) implied by the \p OpIdx operand
/// Applies the constraints (def/use) implied by the \p OpIdx operand
/// to the given \p CurRC.
///
/// Returns the register class that satisfies both \p CurRC and the
@ -1363,7 +1363,7 @@ private:
/// Slow path for hasProperty when we're dealing with a bundle.
bool hasPropertyInBundle(unsigned Mask, QueryType Type) const;
/// \brief Implements the logic of getRegClassConstraintEffectForVReg for the
/// Implements the logic of getRegClassConstraintEffectForVReg for the
/// this MI and the given operand index \p OpIdx.
/// If the related operand does not constrained Reg, this returns CurRC.
const TargetRegisterClass *getRegClassConstraintEffectForVRegImpl(

View File

@ -54,7 +54,7 @@ public:
/// that contains the header.
MachineBasicBlock *getBottomBlock();
/// \brief Find the block that contains the loop control variable and the
/// Find the block that contains the loop control variable and the
/// loop test. This will return the latch block if it's one of the exiting
/// blocks. Otherwise, return the exiting block. Return 'null' when
/// multiple exiting blocks are present.
@ -97,7 +97,7 @@ public:
LoopInfoBase<MachineBasicBlock, MachineLoop>& getBase() { return LI; }
/// \brief Find the block that either is the loop preheader, or could
/// Find the block that either is the loop preheader, or could
/// speculatively be used as the preheader. This is e.g. useful to place
/// loop setup code. Code that cannot be speculated should not be placed
/// here. SpeculativePreheader is controlling whether it also tries to

View File

@ -677,7 +677,7 @@ public:
/// should stay in sync with the hash_value overload below.
bool isIdenticalTo(const MachineOperand &Other) const;
/// \brief MachineOperand hash_value overload.
/// MachineOperand hash_value overload.
///
/// Note that this includes the same information in the hash that
/// isIdenticalTo uses for comparison. It is thus suited for use in hash

View File

@ -24,7 +24,7 @@ class MachineBasicBlock;
class MachineBlockFrequencyInfo;
class MachineInstr;
/// \brief Common features for diagnostics dealing with optimization remarks
/// Common features for diagnostics dealing with optimization remarks
/// that are used by machine passes.
class DiagnosticInfoMIROptimization : public DiagnosticInfoOptimizationBase {
public:
@ -151,7 +151,7 @@ public:
/// Emit an optimization remark.
void emit(DiagnosticInfoOptimizationBase &OptDiag);
/// \brief Whether we allow for extra compile-time budget to perform more
/// Whether we allow for extra compile-time budget to perform more
/// analysis to be more informative.
///
/// This is useful to enable additional missed optimizations to be reported
@ -164,7 +164,7 @@ public:
.getDiagHandlerPtr()->isAnyRemarkEnabled(PassName));
}
/// \brief Take a lambda that returns a remark which will be emitted. Second
/// Take a lambda that returns a remark which will be emitted. Second
/// argument is only used to restrict this to functions.
template <typename T>
void emit(T RemarkBuilder, decltype(RemarkBuilder()) * = nullptr) {
@ -192,7 +192,7 @@ private:
/// Similar but use value from \p OptDiag and update hotness there.
void computeHotness(DiagnosticInfoMIROptimization &Remark);
/// \brief Only allow verbose messages if we know we're filtering by hotness
/// Only allow verbose messages if we know we're filtering by hotness
/// (BFI is only set in this case).
bool shouldEmitVerbose() { return MBFI != nullptr; }
};

Some files were not shown because too many files have changed in this diff Show More