2004-09-01 22:55:40 +00:00
|
|
|
//===-- llvm/Support/MathExtras.h - Useful math functions -------*- C++ -*-===//
|
2005-04-21 20:48:15 +00:00
|
|
|
//
|
2003-10-20 19:46:57 +00:00
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-29 19:59:42 +00:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2005-04-21 20:48:15 +00:00
|
|
|
//
|
2003-10-20 19:46:57 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
2002-05-19 21:19:55 +00:00
|
|
|
//
|
|
|
|
// This file contains some functions that are useful for math stuff.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
2001-11-27 00:03:19 +00:00
|
|
|
|
2004-09-01 22:55:40 +00:00
|
|
|
#ifndef LLVM_SUPPORT_MATHEXTRAS_H
|
|
|
|
#define LLVM_SUPPORT_MATHEXTRAS_H
|
2001-11-27 00:03:19 +00:00
|
|
|
|
2013-05-24 20:51:59 +00:00
|
|
|
#include "llvm/Support/Compiler.h"
|
2010-11-29 18:16:10 +00:00
|
|
|
#include "llvm/Support/SwapByteOrder.h"
|
2016-06-10 05:19:42 +00:00
|
|
|
#include <algorithm>
|
2014-03-28 09:08:14 +00:00
|
|
|
#include <cassert>
|
2013-05-24 20:29:47 +00:00
|
|
|
#include <cstring>
|
2014-03-07 14:43:48 +00:00
|
|
|
#include <type_traits>
|
2016-04-18 09:17:29 +00:00
|
|
|
#include <limits>
|
2001-11-27 00:03:19 +00:00
|
|
|
|
2013-01-16 20:50:43 +00:00
|
|
|
#ifdef _MSC_VER
|
2013-11-13 00:15:44 +00:00
|
|
|
#include <intrin.h>
|
2013-01-16 20:50:43 +00:00
|
|
|
#endif
|
|
|
|
|
2015-05-07 00:05:26 +00:00
|
|
|
#ifdef __ANDROID_NDK__
|
|
|
|
#include <android/api-level.h>
|
|
|
|
#endif
|
|
|
|
|
2003-11-11 22:41:34 +00:00
|
|
|
namespace llvm {
|
2013-05-24 20:29:47 +00:00
|
|
|
/// \brief The behavior an operation has on an input of 0.
|
|
|
|
enum ZeroBehavior {
|
|
|
|
/// \brief The returned value is undefined.
|
|
|
|
ZB_Undefined,
|
|
|
|
/// \brief The returned value is numeric_limits<T>::max()
|
|
|
|
ZB_Max,
|
|
|
|
/// \brief The returned value is numeric_limits<T>::digits
|
|
|
|
ZB_Width
|
|
|
|
};
|
|
|
|
|
2015-02-12 13:47:29 +00:00
|
|
|
namespace detail {
|
|
|
|
template <typename T, std::size_t SizeOfT> struct TrailingZerosCounter {
|
|
|
|
static std::size_t count(T Val, ZeroBehavior) {
|
|
|
|
if (!Val)
|
|
|
|
return std::numeric_limits<T>::digits;
|
|
|
|
if (Val & 0x1)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
// Bisection method.
|
|
|
|
std::size_t ZeroBits = 0;
|
|
|
|
T Shift = std::numeric_limits<T>::digits >> 1;
|
|
|
|
T Mask = std::numeric_limits<T>::max() >> Shift;
|
|
|
|
while (Shift) {
|
|
|
|
if ((Val & Mask) == 0) {
|
|
|
|
Val >>= Shift;
|
|
|
|
ZeroBits |= Shift;
|
|
|
|
}
|
|
|
|
Shift >>= 1;
|
|
|
|
Mask >>= Shift;
|
2013-05-24 20:29:47 +00:00
|
|
|
}
|
2015-02-12 13:47:29 +00:00
|
|
|
return ZeroBits;
|
2013-05-24 20:29:47 +00:00
|
|
|
}
|
2015-02-12 13:47:29 +00:00
|
|
|
};
|
2013-05-24 20:29:47 +00:00
|
|
|
|
2015-11-19 22:37:26 +00:00
|
|
|
#if __GNUC__ >= 4 || defined(_MSC_VER)
|
2015-02-12 13:47:29 +00:00
|
|
|
template <typename T> struct TrailingZerosCounter<T, 4> {
|
|
|
|
static std::size_t count(T Val, ZeroBehavior ZB) {
|
|
|
|
if (ZB != ZB_Undefined && Val == 0)
|
|
|
|
return 32;
|
2013-05-24 20:29:47 +00:00
|
|
|
|
2014-10-21 21:15:45 +00:00
|
|
|
#if __has_builtin(__builtin_ctz) || LLVM_GNUC_PREREQ(4, 0, 0)
|
2015-02-12 13:47:29 +00:00
|
|
|
return __builtin_ctz(Val);
|
2015-11-19 22:37:26 +00:00
|
|
|
#elif defined(_MSC_VER)
|
2015-02-12 13:47:29 +00:00
|
|
|
unsigned long Index;
|
|
|
|
_BitScanForward(&Index, Val);
|
|
|
|
return Index;
|
2013-05-24 20:29:47 +00:00
|
|
|
#endif
|
2015-02-12 13:47:29 +00:00
|
|
|
}
|
|
|
|
};
|
2013-05-24 20:29:47 +00:00
|
|
|
|
2013-05-24 20:51:59 +00:00
|
|
|
#if !defined(_MSC_VER) || defined(_M_X64)
|
2015-02-12 13:47:29 +00:00
|
|
|
template <typename T> struct TrailingZerosCounter<T, 8> {
|
|
|
|
static std::size_t count(T Val, ZeroBehavior ZB) {
|
|
|
|
if (ZB != ZB_Undefined && Val == 0)
|
|
|
|
return 64;
|
2013-05-24 20:29:47 +00:00
|
|
|
|
2014-10-21 21:15:45 +00:00
|
|
|
#if __has_builtin(__builtin_ctzll) || LLVM_GNUC_PREREQ(4, 0, 0)
|
2015-02-12 13:47:29 +00:00
|
|
|
return __builtin_ctzll(Val);
|
2015-11-19 22:37:26 +00:00
|
|
|
#elif defined(_MSC_VER)
|
2015-02-12 13:47:29 +00:00
|
|
|
unsigned long Index;
|
|
|
|
_BitScanForward64(&Index, Val);
|
|
|
|
return Index;
|
2013-05-24 20:29:47 +00:00
|
|
|
#endif
|
2015-02-12 13:47:29 +00:00
|
|
|
}
|
|
|
|
};
|
2013-05-24 20:29:47 +00:00
|
|
|
#endif
|
2013-05-24 20:51:59 +00:00
|
|
|
#endif
|
2015-02-12 13:47:29 +00:00
|
|
|
} // namespace detail
|
2013-05-24 20:29:47 +00:00
|
|
|
|
2015-02-12 13:47:29 +00:00
|
|
|
/// \brief Count number of 0's from the least significant bit to the most
|
2013-05-24 20:29:47 +00:00
|
|
|
/// stopping at the first 1.
|
|
|
|
///
|
|
|
|
/// Only unsigned integral types are allowed.
|
|
|
|
///
|
|
|
|
/// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are
|
|
|
|
/// valid arguments.
|
|
|
|
template <typename T>
|
2015-02-12 13:47:29 +00:00
|
|
|
std::size_t countTrailingZeros(T Val, ZeroBehavior ZB = ZB_Width) {
|
|
|
|
static_assert(std::numeric_limits<T>::is_integer &&
|
|
|
|
!std::numeric_limits<T>::is_signed,
|
|
|
|
"Only unsigned integral types are allowed.");
|
|
|
|
return detail::TrailingZerosCounter<T, sizeof(T)>::count(Val, ZB);
|
2013-05-24 20:29:47 +00:00
|
|
|
}
|
|
|
|
|
2015-02-12 13:47:29 +00:00
|
|
|
namespace detail {
|
|
|
|
template <typename T, std::size_t SizeOfT> struct LeadingZerosCounter {
|
|
|
|
static std::size_t count(T Val, ZeroBehavior) {
|
|
|
|
if (!Val)
|
|
|
|
return std::numeric_limits<T>::digits;
|
|
|
|
|
|
|
|
// Bisection method.
|
|
|
|
std::size_t ZeroBits = 0;
|
|
|
|
for (T Shift = std::numeric_limits<T>::digits >> 1; Shift; Shift >>= 1) {
|
|
|
|
T Tmp = Val >> Shift;
|
|
|
|
if (Tmp)
|
|
|
|
Val = Tmp;
|
|
|
|
else
|
|
|
|
ZeroBits |= Shift;
|
|
|
|
}
|
|
|
|
return ZeroBits;
|
|
|
|
}
|
|
|
|
};
|
2013-05-24 20:29:47 +00:00
|
|
|
|
2015-11-19 22:37:26 +00:00
|
|
|
#if __GNUC__ >= 4 || defined(_MSC_VER)
|
2015-02-12 13:47:29 +00:00
|
|
|
template <typename T> struct LeadingZerosCounter<T, 4> {
|
|
|
|
static std::size_t count(T Val, ZeroBehavior ZB) {
|
|
|
|
if (ZB != ZB_Undefined && Val == 0)
|
|
|
|
return 32;
|
2013-05-24 20:29:47 +00:00
|
|
|
|
2014-10-21 21:15:45 +00:00
|
|
|
#if __has_builtin(__builtin_clz) || LLVM_GNUC_PREREQ(4, 0, 0)
|
2015-02-12 13:47:29 +00:00
|
|
|
return __builtin_clz(Val);
|
2015-11-19 22:37:26 +00:00
|
|
|
#elif defined(_MSC_VER)
|
2015-02-12 13:47:29 +00:00
|
|
|
unsigned long Index;
|
|
|
|
_BitScanReverse(&Index, Val);
|
|
|
|
return Index ^ 31;
|
2013-05-24 20:29:47 +00:00
|
|
|
#endif
|
2015-02-12 13:47:29 +00:00
|
|
|
}
|
|
|
|
};
|
2013-05-24 20:29:47 +00:00
|
|
|
|
2013-05-24 20:51:59 +00:00
|
|
|
#if !defined(_MSC_VER) || defined(_M_X64)
|
2015-02-12 13:47:29 +00:00
|
|
|
template <typename T> struct LeadingZerosCounter<T, 8> {
|
|
|
|
static std::size_t count(T Val, ZeroBehavior ZB) {
|
|
|
|
if (ZB != ZB_Undefined && Val == 0)
|
|
|
|
return 64;
|
2013-05-24 20:29:47 +00:00
|
|
|
|
2014-10-21 21:15:45 +00:00
|
|
|
#if __has_builtin(__builtin_clzll) || LLVM_GNUC_PREREQ(4, 0, 0)
|
2015-02-12 13:47:29 +00:00
|
|
|
return __builtin_clzll(Val);
|
2015-11-19 22:37:26 +00:00
|
|
|
#elif defined(_MSC_VER)
|
2015-02-12 13:47:29 +00:00
|
|
|
unsigned long Index;
|
|
|
|
_BitScanReverse64(&Index, Val);
|
|
|
|
return Index ^ 63;
|
2013-05-24 20:29:47 +00:00
|
|
|
#endif
|
2015-02-12 13:47:29 +00:00
|
|
|
}
|
|
|
|
};
|
2013-05-24 20:29:47 +00:00
|
|
|
#endif
|
2013-05-24 20:51:59 +00:00
|
|
|
#endif
|
2015-02-12 13:47:29 +00:00
|
|
|
} // namespace detail
|
|
|
|
|
|
|
|
/// \brief Count number of 0's from the most significant bit to the least
|
|
|
|
/// stopping at the first 1.
|
|
|
|
///
|
|
|
|
/// Only unsigned integral types are allowed.
|
|
|
|
///
|
|
|
|
/// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are
|
|
|
|
/// valid arguments.
|
|
|
|
template <typename T>
|
|
|
|
std::size_t countLeadingZeros(T Val, ZeroBehavior ZB = ZB_Width) {
|
|
|
|
static_assert(std::numeric_limits<T>::is_integer &&
|
|
|
|
!std::numeric_limits<T>::is_signed,
|
|
|
|
"Only unsigned integral types are allowed.");
|
|
|
|
return detail::LeadingZerosCounter<T, sizeof(T)>::count(Val, ZB);
|
|
|
|
}
|
2013-05-24 20:29:47 +00:00
|
|
|
|
|
|
|
/// \brief Get the index of the first set bit starting from the least
|
|
|
|
/// significant bit.
|
|
|
|
///
|
|
|
|
/// Only unsigned integral types are allowed.
|
|
|
|
///
|
|
|
|
/// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are
|
|
|
|
/// valid arguments.
|
2015-02-12 13:47:29 +00:00
|
|
|
template <typename T> T findFirstSet(T Val, ZeroBehavior ZB = ZB_Max) {
|
2013-05-24 20:29:47 +00:00
|
|
|
if (ZB == ZB_Max && Val == 0)
|
|
|
|
return std::numeric_limits<T>::max();
|
|
|
|
|
|
|
|
return countTrailingZeros(Val, ZB_Undefined);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \brief Get the index of the last set bit starting from the least
|
|
|
|
/// significant bit.
|
|
|
|
///
|
|
|
|
/// Only unsigned integral types are allowed.
|
|
|
|
///
|
|
|
|
/// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are
|
|
|
|
/// valid arguments.
|
2015-02-12 13:47:29 +00:00
|
|
|
template <typename T> T findLastSet(T Val, ZeroBehavior ZB = ZB_Max) {
|
2013-05-24 20:29:47 +00:00
|
|
|
if (ZB == ZB_Max && Val == 0)
|
|
|
|
return std::numeric_limits<T>::max();
|
|
|
|
|
|
|
|
// Use ^ instead of - because both gcc and llvm can remove the associated ^
|
|
|
|
// in the __builtin_clz intrinsic on x86.
|
|
|
|
return countLeadingZeros(Val, ZB_Undefined) ^
|
|
|
|
(std::numeric_limits<T>::digits - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \brief Macro compressed bit reversal table for 256 bits.
|
|
|
|
///
|
|
|
|
/// http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
|
|
|
|
static const unsigned char BitReverseTable256[256] = {
|
|
|
|
#define R2(n) n, n + 2 * 64, n + 1 * 64, n + 3 * 64
|
|
|
|
#define R4(n) R2(n), R2(n + 2 * 16), R2(n + 1 * 16), R2(n + 3 * 16)
|
|
|
|
#define R6(n) R4(n), R4(n + 2 * 4), R4(n + 1 * 4), R4(n + 3 * 4)
|
|
|
|
R6(0), R6(2), R6(1), R6(3)
|
2014-07-02 14:40:35 +00:00
|
|
|
#undef R2
|
|
|
|
#undef R4
|
|
|
|
#undef R6
|
2013-05-24 20:29:47 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/// \brief Reverse the bits in \p Val.
|
|
|
|
template <typename T>
|
|
|
|
T reverseBits(T Val) {
|
|
|
|
unsigned char in[sizeof(Val)];
|
|
|
|
unsigned char out[sizeof(Val)];
|
|
|
|
std::memcpy(in, &Val, sizeof(Val));
|
|
|
|
for (unsigned i = 0; i < sizeof(Val); ++i)
|
|
|
|
out[(sizeof(Val) - i) - 1] = BitReverseTable256[in[i]];
|
|
|
|
std::memcpy(&Val, out, sizeof(Val));
|
|
|
|
return Val;
|
|
|
|
}
|
2003-11-11 22:41:34 +00:00
|
|
|
|
2009-02-20 22:51:36 +00:00
|
|
|
// NOTE: The following support functions use the _32/_64 extensions instead of
|
2005-08-03 16:53:58 +00:00
|
|
|
// type overloading so that signed and unsigned integers can be used without
|
|
|
|
// ambiguity.
|
2005-08-02 19:15:30 +00:00
|
|
|
|
2006-12-19 01:11:32 +00:00
|
|
|
/// Hi_32 - This function returns the high 32 bits of a 64 bit value.
|
2016-10-23 19:39:16 +00:00
|
|
|
constexpr inline uint32_t Hi_32(uint64_t Value) {
|
2007-03-22 19:11:57 +00:00
|
|
|
return static_cast<uint32_t>(Value >> 32);
|
2005-08-02 19:15:30 +00:00
|
|
|
}
|
|
|
|
|
2006-12-19 01:11:32 +00:00
|
|
|
/// Lo_32 - This function returns the low 32 bits of a 64 bit value.
|
2016-10-23 19:39:16 +00:00
|
|
|
constexpr inline uint32_t Lo_32(uint64_t Value) {
|
2007-03-22 19:11:57 +00:00
|
|
|
return static_cast<uint32_t>(Value);
|
2005-08-02 19:15:30 +00:00
|
|
|
}
|
|
|
|
|
2014-05-28 22:49:12 +00:00
|
|
|
/// Make_64 - This functions makes a 64-bit integer from a high / low pair of
|
|
|
|
/// 32-bit integers.
|
2016-10-23 19:39:16 +00:00
|
|
|
constexpr inline uint64_t Make_64(uint32_t High, uint32_t Low) {
|
2014-05-28 22:49:12 +00:00
|
|
|
return ((uint64_t)High << 32) | (uint64_t)Low;
|
|
|
|
}
|
|
|
|
|
2010-03-29 21:13:41 +00:00
|
|
|
/// isInt - Checks if an integer fits into the given bit width.
|
2016-10-23 19:39:16 +00:00
|
|
|
template <unsigned N> constexpr inline bool isInt(int64_t x) {
|
2009-08-13 06:24:02 +00:00
|
|
|
return N >= 64 || (-(INT64_C(1)<<(N-1)) <= x && x < (INT64_C(1)<<(N-1)));
|
2009-08-12 06:22:07 +00:00
|
|
|
}
|
2010-03-29 21:13:41 +00:00
|
|
|
// Template specializations to get better code for common cases.
|
2016-10-23 19:39:16 +00:00
|
|
|
template <> constexpr inline bool isInt<8>(int64_t x) {
|
2010-03-29 21:13:41 +00:00
|
|
|
return static_cast<int8_t>(x) == x;
|
|
|
|
}
|
2016-10-23 19:39:16 +00:00
|
|
|
template <> constexpr inline bool isInt<16>(int64_t x) {
|
2010-03-29 21:13:41 +00:00
|
|
|
return static_cast<int16_t>(x) == x;
|
|
|
|
}
|
2016-10-23 19:39:16 +00:00
|
|
|
template <> constexpr inline bool isInt<32>(int64_t x) {
|
2010-03-29 21:13:41 +00:00
|
|
|
return static_cast<int32_t>(x) == x;
|
|
|
|
}
|
2009-08-12 06:22:07 +00:00
|
|
|
|
2011-12-12 21:14:40 +00:00
|
|
|
/// isShiftedInt<N,S> - Checks if a signed integer is an N bit number shifted
|
|
|
|
/// left by S.
|
2016-10-23 19:39:16 +00:00
|
|
|
template <unsigned N, unsigned S>
|
|
|
|
constexpr inline bool isShiftedInt(int64_t x) {
|
2016-07-17 18:19:21 +00:00
|
|
|
static_assert(
|
|
|
|
N > 0, "isShiftedInt<0> doesn't make sense (refers to a 0-bit number.");
|
|
|
|
static_assert(N + S <= 64, "isShiftedInt<N, S> with N + S > 64 is too wide.");
|
|
|
|
return isInt<N + S>(x) && (x % (UINT64_C(1) << S) == 0);
|
2011-12-12 21:14:40 +00:00
|
|
|
}
|
|
|
|
|
2010-03-29 21:13:41 +00:00
|
|
|
/// isUInt - Checks if an unsigned integer fits into the given bit width.
|
2016-07-18 20:40:35 +00:00
|
|
|
///
|
|
|
|
/// This is written as two functions rather than as simply
|
|
|
|
///
|
|
|
|
/// return N >= 64 || X < (UINT64_C(1) << N);
|
|
|
|
///
|
|
|
|
/// to keep MSVC from (incorrectly) warning on isUInt<64> that we're shifting
|
|
|
|
/// left too many places.
|
|
|
|
template <unsigned N>
|
2016-10-23 19:39:16 +00:00
|
|
|
constexpr inline typename std::enable_if<(N < 64), bool>::type
|
2016-07-30 13:38:51 +00:00
|
|
|
isUInt(uint64_t X) {
|
2016-07-18 20:40:35 +00:00
|
|
|
static_assert(N > 0, "isUInt<0> doesn't make sense");
|
2016-07-19 12:26:51 +00:00
|
|
|
return X < (UINT64_C(1) << (N));
|
2016-07-18 20:40:35 +00:00
|
|
|
}
|
|
|
|
template <unsigned N>
|
2016-10-23 19:39:16 +00:00
|
|
|
constexpr inline typename std::enable_if<N >= 64, bool>::type
|
2016-07-30 13:38:51 +00:00
|
|
|
isUInt(uint64_t X) {
|
2016-07-18 20:40:35 +00:00
|
|
|
return true;
|
2009-08-12 06:22:07 +00:00
|
|
|
}
|
2016-07-17 18:19:21 +00:00
|
|
|
|
2010-03-29 21:13:41 +00:00
|
|
|
// Template specializations to get better code for common cases.
|
2016-10-23 19:39:16 +00:00
|
|
|
template <> constexpr inline bool isUInt<8>(uint64_t x) {
|
2010-03-29 21:13:41 +00:00
|
|
|
return static_cast<uint8_t>(x) == x;
|
|
|
|
}
|
2016-10-23 19:39:16 +00:00
|
|
|
template <> constexpr inline bool isUInt<16>(uint64_t x) {
|
2010-03-29 21:13:41 +00:00
|
|
|
return static_cast<uint16_t>(x) == x;
|
|
|
|
}
|
2016-10-23 19:39:16 +00:00
|
|
|
template <> constexpr inline bool isUInt<32>(uint64_t x) {
|
2010-03-29 21:13:41 +00:00
|
|
|
return static_cast<uint32_t>(x) == x;
|
|
|
|
}
|
2009-08-12 06:22:07 +00:00
|
|
|
|
2016-07-17 18:19:21 +00:00
|
|
|
/// Checks if a unsigned integer is an N bit number shifted left by S.
|
2016-10-23 19:39:16 +00:00
|
|
|
template <unsigned N, unsigned S>
|
|
|
|
constexpr inline bool isShiftedUInt(uint64_t x) {
|
2016-07-17 18:19:21 +00:00
|
|
|
static_assert(
|
|
|
|
N > 0, "isShiftedUInt<0> doesn't make sense (refers to a 0-bit number)");
|
|
|
|
static_assert(N + S <= 64,
|
|
|
|
"isShiftedUInt<N, S> with N + S > 64 is too wide.");
|
|
|
|
// Per the two static_asserts above, S must be strictly less than 64. So
|
|
|
|
// 1 << S is not undefined behavior.
|
|
|
|
return isUInt<N + S>(x) && (x % (UINT64_C(1) << S) == 0);
|
2011-12-12 21:14:40 +00:00
|
|
|
}
|
|
|
|
|
2016-06-01 07:58:15 +00:00
|
|
|
/// Gets the maximum value for a N-bit unsigned integer.
|
2016-06-02 12:00:34 +00:00
|
|
|
inline uint64_t maxUIntN(uint64_t N) {
|
|
|
|
assert(N > 0 && N <= 64 && "integer width out of range");
|
|
|
|
|
Use a faster implementation of maxUIntN.
Summary:
On x86-64 with clang 3.8, before:
mov edx, 1
mov cl, dil
shl rdx, cl
cmp rdi, 64
mov rax, -1
cmovne rax, rdx
ret
after:
mov ecx, 64
sub ecx, edi
mov rax, -1
shr rax, cl
ret
Reviewers: rnk
Subscribers: dylanmckay, mkuper, llvm-commits
Differential Revision: https://reviews.llvm.org/D22440
llvm-svn: 275718
2016-07-17 18:19:19 +00:00
|
|
|
// uint64_t(1) << 64 is undefined behavior, so we can't do
|
|
|
|
// (uint64_t(1) << N) - 1
|
|
|
|
// without checking first that N != 64. But this works and doesn't have a
|
|
|
|
// branch.
|
|
|
|
return UINT64_MAX >> (64 - N);
|
2016-06-02 12:00:34 +00:00
|
|
|
}
|
|
|
|
|
2016-06-01 07:58:15 +00:00
|
|
|
/// Gets the minimum value for a N-bit signed integer.
|
2016-06-02 12:00:34 +00:00
|
|
|
inline int64_t minIntN(int64_t N) {
|
|
|
|
assert(N > 0 && N <= 64 && "integer width out of range");
|
|
|
|
|
2016-07-18 17:03:09 +00:00
|
|
|
return -(UINT64_C(1)<<(N-1));
|
2016-06-02 12:00:34 +00:00
|
|
|
}
|
|
|
|
|
2016-06-01 07:58:15 +00:00
|
|
|
/// Gets the maximum value for a N-bit signed integer.
|
2016-06-02 12:00:34 +00:00
|
|
|
inline int64_t maxIntN(int64_t N) {
|
|
|
|
assert(N > 0 && N <= 64 && "integer width out of range");
|
|
|
|
|
2016-07-17 18:19:26 +00:00
|
|
|
// This relies on two's complement wraparound when N == 64, so we convert to
|
|
|
|
// int64_t only at the very end to avoid UB.
|
|
|
|
return (UINT64_C(1) << (N - 1)) - 1;
|
2016-06-02 12:00:34 +00:00
|
|
|
}
|
2016-06-01 07:58:15 +00:00
|
|
|
|
2010-11-03 00:38:40 +00:00
|
|
|
/// isUIntN - Checks if an unsigned integer fits into the given (dynamic)
|
|
|
|
/// bit width.
|
|
|
|
inline bool isUIntN(unsigned N, uint64_t x) {
|
2016-06-01 11:15:25 +00:00
|
|
|
return N >= 64 || x <= maxUIntN(N);
|
2010-11-03 00:38:40 +00:00
|
|
|
}
|
|
|
|
|
2010-12-15 22:33:06 +00:00
|
|
|
/// isIntN - Checks if an signed integer fits into the given (dynamic)
|
2010-12-15 07:12:24 +00:00
|
|
|
/// bit width.
|
|
|
|
inline bool isIntN(unsigned N, int64_t x) {
|
2016-06-01 11:15:25 +00:00
|
|
|
return N >= 64 || (minIntN(N) <= x && x <= maxIntN(N));
|
2010-12-15 07:12:24 +00:00
|
|
|
}
|
|
|
|
|
2015-04-10 22:58:48 +00:00
|
|
|
/// isMask_32 - This function returns true if the argument is a non-empty
|
|
|
|
/// sequence of ones starting at the least significant bit with the remainder
|
|
|
|
/// zero (32 bit version). Ex. isMask_32(0x0000FFFFU) == true.
|
2016-10-23 19:39:16 +00:00
|
|
|
constexpr inline bool isMask_32(uint32_t Value) {
|
2005-08-02 19:15:30 +00:00
|
|
|
return Value && ((Value + 1) & Value) == 0;
|
|
|
|
}
|
|
|
|
|
2015-04-10 22:58:48 +00:00
|
|
|
/// isMask_64 - This function returns true if the argument is a non-empty
|
|
|
|
/// sequence of ones starting at the least significant bit with the remainder
|
|
|
|
/// zero (64 bit version).
|
2016-10-23 19:39:16 +00:00
|
|
|
constexpr inline bool isMask_64(uint64_t Value) {
|
2005-08-02 19:15:30 +00:00
|
|
|
return Value && ((Value + 1) & Value) == 0;
|
|
|
|
}
|
|
|
|
|
2009-02-20 22:51:36 +00:00
|
|
|
/// isShiftedMask_32 - This function returns true if the argument contains a
|
2015-04-10 22:58:48 +00:00
|
|
|
/// non-empty sequence of ones with the remainder zero (32 bit version.)
|
2006-12-19 01:11:32 +00:00
|
|
|
/// Ex. isShiftedMask_32(0x0000FF00U) == true.
|
2016-10-23 19:39:16 +00:00
|
|
|
constexpr inline bool isShiftedMask_32(uint32_t Value) {
|
2015-04-10 22:58:48 +00:00
|
|
|
return Value && isMask_32((Value - 1) | Value);
|
2005-08-02 19:15:30 +00:00
|
|
|
}
|
|
|
|
|
2009-02-20 22:51:36 +00:00
|
|
|
/// isShiftedMask_64 - This function returns true if the argument contains a
|
2015-04-10 22:58:48 +00:00
|
|
|
/// non-empty sequence of ones with the remainder zero (64 bit version.)
|
2016-10-23 19:39:16 +00:00
|
|
|
constexpr inline bool isShiftedMask_64(uint64_t Value) {
|
2015-04-10 22:58:48 +00:00
|
|
|
return Value && isMask_64((Value - 1) | Value);
|
2005-08-02 19:15:30 +00:00
|
|
|
}
|
|
|
|
|
2009-02-20 22:51:36 +00:00
|
|
|
/// isPowerOf2_32 - This function returns true if the argument is a power of
|
2006-12-19 01:11:32 +00:00
|
|
|
/// two > 0. Ex. isPowerOf2_32(0x00100000U) == true (32 bit edition.)
|
2016-10-23 19:39:16 +00:00
|
|
|
constexpr inline bool isPowerOf2_32(uint32_t Value) {
|
2005-08-02 19:15:30 +00:00
|
|
|
return Value && !(Value & (Value - 1));
|
|
|
|
}
|
|
|
|
|
2006-12-19 01:11:32 +00:00
|
|
|
/// isPowerOf2_64 - This function returns true if the argument is a power of two
|
|
|
|
/// > 0 (64 bit edition.)
|
2016-10-23 19:39:16 +00:00
|
|
|
constexpr inline bool isPowerOf2_64(uint64_t Value) {
|
2006-05-24 19:21:13 +00:00
|
|
|
return Value && !(Value & (Value - int64_t(1L)));
|
2005-08-02 19:15:30 +00:00
|
|
|
}
|
|
|
|
|
2006-12-19 01:11:32 +00:00
|
|
|
/// ByteSwap_16 - This function returns a byte-swapped representation of the
|
|
|
|
/// 16-bit argument, Value.
|
2007-03-22 19:11:57 +00:00
|
|
|
inline uint16_t ByteSwap_16(uint16_t Value) {
|
2010-11-23 04:04:25 +00:00
|
|
|
return sys::SwapByteOrder_16(Value);
|
2006-01-14 01:25:24 +00:00
|
|
|
}
|
|
|
|
|
2006-12-19 01:11:32 +00:00
|
|
|
/// ByteSwap_32 - This function returns a byte-swapped representation of the
|
|
|
|
/// 32-bit argument, Value.
|
2007-03-22 19:11:57 +00:00
|
|
|
inline uint32_t ByteSwap_32(uint32_t Value) {
|
2010-11-23 04:04:25 +00:00
|
|
|
return sys::SwapByteOrder_32(Value);
|
2006-01-14 01:25:24 +00:00
|
|
|
}
|
|
|
|
|
2006-12-19 01:11:32 +00:00
|
|
|
/// ByteSwap_64 - This function returns a byte-swapped representation of the
|
|
|
|
/// 64-bit argument, Value.
|
2006-01-14 01:25:24 +00:00
|
|
|
inline uint64_t ByteSwap_64(uint64_t Value) {
|
2010-11-23 04:04:25 +00:00
|
|
|
return sys::SwapByteOrder_64(Value);
|
2006-01-14 01:25:24 +00:00
|
|
|
}
|
|
|
|
|
2015-02-12 15:35:40 +00:00
|
|
|
/// \brief Count the number of ones from the most significant bit to the first
|
|
|
|
/// zero bit.
|
|
|
|
///
|
|
|
|
/// Ex. CountLeadingOnes(0xFF0FFF00) == 8.
|
|
|
|
/// Only unsigned integral types are allowed.
|
|
|
|
///
|
|
|
|
/// \param ZB the behavior on an input of all ones. Only ZB_Width and
|
|
|
|
/// ZB_Undefined are valid arguments.
|
|
|
|
template <typename T>
|
|
|
|
std::size_t countLeadingOnes(T Value, ZeroBehavior ZB = ZB_Width) {
|
|
|
|
static_assert(std::numeric_limits<T>::is_integer &&
|
|
|
|
!std::numeric_limits<T>::is_signed,
|
|
|
|
"Only unsigned integral types are allowed.");
|
|
|
|
return countLeadingZeros(~Value, ZB);
|
2005-08-31 00:25:01 +00:00
|
|
|
}
|
|
|
|
|
2015-02-12 15:35:40 +00:00
|
|
|
/// \brief Count the number of ones from the least significant bit to the first
|
|
|
|
/// zero bit.
|
|
|
|
///
|
|
|
|
/// Ex. countTrailingOnes(0x00FF00FF) == 8.
|
|
|
|
/// Only unsigned integral types are allowed.
|
|
|
|
///
|
|
|
|
/// \param ZB the behavior on an input of all ones. Only ZB_Width and
|
|
|
|
/// ZB_Undefined are valid arguments.
|
|
|
|
template <typename T>
|
|
|
|
std::size_t countTrailingOnes(T Value, ZeroBehavior ZB = ZB_Width) {
|
|
|
|
static_assert(std::numeric_limits<T>::is_integer &&
|
|
|
|
!std::numeric_limits<T>::is_signed,
|
|
|
|
"Only unsigned integral types are allowed.");
|
|
|
|
return countTrailingZeros(~Value, ZB);
|
2008-02-13 20:54:54 +00:00
|
|
|
}
|
|
|
|
|
2015-02-12 15:35:40 +00:00
|
|
|
namespace detail {
|
|
|
|
template <typename T, std::size_t SizeOfT> struct PopulationCounter {
|
|
|
|
static unsigned count(T Value) {
|
|
|
|
// Generic version, forward to 32 bits.
|
|
|
|
static_assert(SizeOfT <= 4, "Not implemented!");
|
2007-03-01 05:41:28 +00:00
|
|
|
#if __GNUC__ >= 4
|
2015-02-12 15:35:40 +00:00
|
|
|
return __builtin_popcount(Value);
|
2007-03-01 05:41:28 +00:00
|
|
|
#else
|
2015-02-12 15:35:40 +00:00
|
|
|
uint32_t v = Value;
|
|
|
|
v = v - ((v >> 1) & 0x55555555);
|
|
|
|
v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
|
|
|
|
return ((v + (v >> 4) & 0xF0F0F0F) * 0x1010101) >> 24;
|
2007-03-01 05:41:28 +00:00
|
|
|
#endif
|
2015-02-12 15:35:40 +00:00
|
|
|
}
|
|
|
|
};
|
2005-08-31 00:25:01 +00:00
|
|
|
|
2015-02-12 15:35:40 +00:00
|
|
|
template <typename T> struct PopulationCounter<T, 8> {
|
|
|
|
static unsigned count(T Value) {
|
2007-03-01 05:41:28 +00:00
|
|
|
#if __GNUC__ >= 4
|
2015-02-12 15:35:40 +00:00
|
|
|
return __builtin_popcountll(Value);
|
2007-03-01 05:41:28 +00:00
|
|
|
#else
|
2015-02-12 15:35:40 +00:00
|
|
|
uint64_t v = Value;
|
|
|
|
v = v - ((v >> 1) & 0x5555555555555555ULL);
|
|
|
|
v = (v & 0x3333333333333333ULL) + ((v >> 2) & 0x3333333333333333ULL);
|
|
|
|
v = (v + (v >> 4)) & 0x0F0F0F0F0F0F0F0FULL;
|
|
|
|
return unsigned((uint64_t)(v * 0x0101010101010101ULL) >> 56);
|
2007-03-01 05:41:28 +00:00
|
|
|
#endif
|
2015-02-12 15:35:40 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
} // namespace detail
|
|
|
|
|
|
|
|
/// \brief Count the number of set bits in a value.
|
|
|
|
/// Ex. countPopulation(0xF000F000) = 8
|
|
|
|
/// Returns 0 if the word is zero.
|
|
|
|
template <typename T>
|
|
|
|
inline unsigned countPopulation(T Value) {
|
|
|
|
static_assert(std::numeric_limits<T>::is_integer &&
|
|
|
|
!std::numeric_limits<T>::is_signed,
|
|
|
|
"Only unsigned integral types are allowed.");
|
|
|
|
return detail::PopulationCounter<T, sizeof(T)>::count(Value);
|
2005-08-31 00:25:01 +00:00
|
|
|
}
|
|
|
|
|
2015-05-07 00:05:26 +00:00
|
|
|
/// Log2 - This function returns the log base 2 of the specified value
|
|
|
|
inline double Log2(double Value) {
|
|
|
|
#if defined(__ANDROID_API__) && __ANDROID_API__ < 18
|
2015-05-24 13:24:31 +00:00
|
|
|
return __builtin_log(Value) / __builtin_log(2.0);
|
2015-05-07 00:05:26 +00:00
|
|
|
#else
|
|
|
|
return log2(Value);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2009-02-20 22:51:36 +00:00
|
|
|
/// Log2_32 - This function returns the floor log base 2 of the specified value,
|
2006-12-19 01:11:32 +00:00
|
|
|
/// -1 if the value is zero. (32 bit edition.)
|
|
|
|
/// Ex. Log2_32(32) == 5, Log2_32(1) == 0, Log2_32(0) == -1, Log2_32(6) == 2
|
2015-05-19 10:51:24 +00:00
|
|
|
inline unsigned Log2_32(uint32_t Value) {
|
2013-05-24 22:23:49 +00:00
|
|
|
return 31 - countLeadingZeros(Value);
|
2005-08-31 00:25:01 +00:00
|
|
|
}
|
2002-05-19 21:19:55 +00:00
|
|
|
|
2009-02-20 22:51:36 +00:00
|
|
|
/// Log2_64 - This function returns the floor log base 2 of the specified value,
|
2006-12-19 01:11:32 +00:00
|
|
|
/// -1 if the value is zero. (64 bit edition.)
|
2015-05-19 10:51:24 +00:00
|
|
|
inline unsigned Log2_64(uint64_t Value) {
|
2013-05-24 22:23:49 +00:00
|
|
|
return 63 - countLeadingZeros(Value);
|
2001-11-27 00:03:19 +00:00
|
|
|
}
|
|
|
|
|
2006-12-19 01:11:32 +00:00
|
|
|
/// Log2_32_Ceil - This function returns the ceil log base 2 of the specified
|
|
|
|
/// value, 32 if the value is zero. (32 bit edition).
|
|
|
|
/// Ex. Log2_32_Ceil(32) == 5, Log2_32_Ceil(1) == 0, Log2_32_Ceil(6) == 3
|
2015-05-19 10:51:24 +00:00
|
|
|
inline unsigned Log2_32_Ceil(uint32_t Value) {
|
2013-05-24 22:23:49 +00:00
|
|
|
return 32 - countLeadingZeros(Value - 1);
|
2006-07-18 00:47:10 +00:00
|
|
|
}
|
|
|
|
|
2009-05-31 16:18:03 +00:00
|
|
|
/// Log2_64_Ceil - This function returns the ceil log base 2 of the specified
|
|
|
|
/// value, 64 if the value is zero. (64 bit edition.)
|
2015-05-19 10:51:24 +00:00
|
|
|
inline unsigned Log2_64_Ceil(uint64_t Value) {
|
2013-05-24 22:23:49 +00:00
|
|
|
return 64 - countLeadingZeros(Value - 1);
|
2006-07-18 00:47:10 +00:00
|
|
|
}
|
|
|
|
|
2006-12-19 01:11:32 +00:00
|
|
|
/// GreatestCommonDivisor64 - Return the greatest common divisor of the two
|
|
|
|
/// values using Euclid's algorithm.
|
|
|
|
inline uint64_t GreatestCommonDivisor64(uint64_t A, uint64_t B) {
|
|
|
|
while (B) {
|
|
|
|
uint64_t T = B;
|
|
|
|
B = A % B;
|
|
|
|
A = T;
|
|
|
|
}
|
|
|
|
return A;
|
|
|
|
}
|
2009-02-20 22:51:36 +00:00
|
|
|
|
2006-12-19 01:11:32 +00:00
|
|
|
/// BitsToDouble - This function takes a 64-bit integer and returns the bit
|
|
|
|
/// equivalent double.
|
2005-08-17 17:27:47 +00:00
|
|
|
inline double BitsToDouble(uint64_t Bits) {
|
|
|
|
union {
|
|
|
|
uint64_t L;
|
|
|
|
double D;
|
|
|
|
} T;
|
|
|
|
T.L = Bits;
|
|
|
|
return T.D;
|
|
|
|
}
|
|
|
|
|
2006-12-19 01:11:32 +00:00
|
|
|
/// BitsToFloat - This function takes a 32-bit integer and returns the bit
|
|
|
|
/// equivalent float.
|
2005-08-17 23:54:12 +00:00
|
|
|
inline float BitsToFloat(uint32_t Bits) {
|
2005-08-17 17:27:47 +00:00
|
|
|
union {
|
2005-08-17 23:54:12 +00:00
|
|
|
uint32_t I;
|
2005-08-17 17:27:47 +00:00
|
|
|
float F;
|
|
|
|
} T;
|
|
|
|
T.I = Bits;
|
|
|
|
return T.F;
|
|
|
|
}
|
|
|
|
|
2006-12-19 01:11:32 +00:00
|
|
|
/// DoubleToBits - This function takes a double and returns the bit
|
2009-01-21 20:32:55 +00:00
|
|
|
/// equivalent 64-bit integer. Note that copying doubles around
|
|
|
|
/// changes the bits of NaNs on some hosts, notably x86, so this
|
|
|
|
/// routine cannot be used if these bits are needed.
|
2005-08-17 17:27:47 +00:00
|
|
|
inline uint64_t DoubleToBits(double Double) {
|
|
|
|
union {
|
|
|
|
uint64_t L;
|
|
|
|
double D;
|
|
|
|
} T;
|
|
|
|
T.D = Double;
|
|
|
|
return T.L;
|
|
|
|
}
|
|
|
|
|
2006-12-19 01:11:32 +00:00
|
|
|
/// FloatToBits - This function takes a float and returns the bit
|
2009-01-21 20:32:55 +00:00
|
|
|
/// equivalent 32-bit integer. Note that copying floats around
|
|
|
|
/// changes the bits of NaNs on some hosts, notably x86, so this
|
|
|
|
/// routine cannot be used if these bits are needed.
|
2005-08-17 23:54:12 +00:00
|
|
|
inline uint32_t FloatToBits(float Float) {
|
2005-08-17 17:27:47 +00:00
|
|
|
union {
|
2005-08-17 23:54:12 +00:00
|
|
|
uint32_t I;
|
2005-08-17 17:27:47 +00:00
|
|
|
float F;
|
|
|
|
} T;
|
|
|
|
T.F = Float;
|
|
|
|
return T.I;
|
|
|
|
}
|
|
|
|
|
2007-11-09 13:41:39 +00:00
|
|
|
/// MinAlign - A and B are either alignments or offsets. Return the minimum
|
|
|
|
/// alignment that may be assumed after adding the two together.
|
2016-10-23 19:39:16 +00:00
|
|
|
constexpr inline uint64_t MinAlign(uint64_t A, uint64_t B) {
|
2007-11-09 13:41:39 +00:00
|
|
|
// The largest power of 2 that divides both A and B.
|
2013-02-27 18:25:41 +00:00
|
|
|
//
|
2015-08-10 04:22:09 +00:00
|
|
|
// Replace "-Value" by "1+~Value" in the following commented code to avoid
|
2013-02-27 18:25:41 +00:00
|
|
|
// MSVC warning C4146
|
|
|
|
// return (A | B) & -(A | B);
|
|
|
|
return (A | B) & (1 + ~(A | B));
|
2007-11-09 13:41:39 +00:00
|
|
|
}
|
2008-06-27 21:48:21 +00:00
|
|
|
|
2014-09-02 21:51:35 +00:00
|
|
|
/// \brief Aligns \c Addr to \c Alignment bytes, rounding up.
|
2014-03-28 09:08:14 +00:00
|
|
|
///
|
|
|
|
/// Alignment should be a power of two. This method rounds up, so
|
2014-09-02 21:51:35 +00:00
|
|
|
/// alignAddr(7, 4) == 8 and alignAddr(8, 4) == 8.
|
2015-06-05 01:23:42 +00:00
|
|
|
inline uintptr_t alignAddr(const void *Addr, size_t Alignment) {
|
2014-03-28 09:08:14 +00:00
|
|
|
assert(Alignment && isPowerOf2_64((uint64_t)Alignment) &&
|
|
|
|
"Alignment is not a power of two!");
|
|
|
|
|
2014-09-07 04:24:31 +00:00
|
|
|
assert((uintptr_t)Addr + Alignment - 1 >= (uintptr_t)Addr);
|
|
|
|
|
2014-09-02 21:51:35 +00:00
|
|
|
return (((uintptr_t)Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1));
|
2014-03-28 09:08:14 +00:00
|
|
|
}
|
|
|
|
|
2014-09-07 04:24:31 +00:00
|
|
|
/// \brief Returns the necessary adjustment for aligning \c Ptr to \c Alignment
|
|
|
|
/// bytes, rounding up.
|
2015-06-05 01:23:42 +00:00
|
|
|
inline size_t alignmentAdjustment(const void *Ptr, size_t Alignment) {
|
2014-09-07 04:24:31 +00:00
|
|
|
return alignAddr(Ptr, Alignment) - (uintptr_t)Ptr;
|
|
|
|
}
|
|
|
|
|
2008-06-27 21:48:21 +00:00
|
|
|
/// NextPowerOf2 - Returns the next power of two (in 64-bits)
|
|
|
|
/// that is strictly greater than A. Returns zero on overflow.
|
2012-06-20 08:39:33 +00:00
|
|
|
inline uint64_t NextPowerOf2(uint64_t A) {
|
2008-06-27 21:48:21 +00:00
|
|
|
A |= (A >> 1);
|
|
|
|
A |= (A >> 2);
|
|
|
|
A |= (A >> 4);
|
|
|
|
A |= (A >> 8);
|
|
|
|
A |= (A >> 16);
|
|
|
|
A |= (A >> 32);
|
|
|
|
return A + 1;
|
|
|
|
}
|
2009-02-16 22:57:04 +00:00
|
|
|
|
2014-01-27 11:12:24 +00:00
|
|
|
/// Returns the power of two which is less than or equal to the given value.
|
|
|
|
/// Essentially, it is a floor operation across the domain of powers of two.
|
|
|
|
inline uint64_t PowerOf2Floor(uint64_t A) {
|
|
|
|
if (!A) return 0;
|
|
|
|
return 1ull << (63 - countLeadingZeros(A, ZB_Undefined));
|
|
|
|
}
|
|
|
|
|
2016-11-11 02:22:16 +00:00
|
|
|
/// Returns the power of two which is greater than or equal to the given value.
|
|
|
|
/// Essentially, it is a ceil operation across the domain of powers of two.
|
|
|
|
inline uint64_t PowerOf2Ceil(uint64_t A) {
|
|
|
|
if (!A)
|
|
|
|
return 0;
|
|
|
|
return NextPowerOf2(A - 1);
|
|
|
|
}
|
|
|
|
|
2012-09-13 12:34:29 +00:00
|
|
|
/// Returns the next integer (mod 2**64) that is greater than or equal to
|
|
|
|
/// \p Value and is a multiple of \p Align. \p Align must be non-zero.
|
2009-02-16 22:57:04 +00:00
|
|
|
///
|
HHVM calling conventions.
HHVM calling convention, hhvmcc, is used by HHVM JIT for
functions in translated cache. We currently support LLVM back end to
generate code for X86-64 and may support other architectures in the
future.
In HHVM calling convention any GP register could be used to pass and
return values, with the exception of R12 which is reserved for
thread-local area and is callee-saved. Other than R12, we always
pass RBX and RBP as args, which are our virtual machine's stack pointer
and frame pointer respectively.
When we enter translation cache via hhvmcc function, we expect
the stack to be aligned at 16 bytes, i.e. skewed by 8 bytes as opposed
to standard ABI alignment. This affects stack object alignment and stack
adjustments for function calls.
One extra calling convention, hhvm_ccc, is used to call C++ helpers from
HHVM's translation cache. It is almost identical to standard C calling
convention with an exception of first argument which is passed in RBP
(before we use RDI, RSI, etc.)
Differential Revision: http://reviews.llvm.org/D12681
llvm-svn: 248832
2015-09-29 22:09:16 +00:00
|
|
|
/// If non-zero \p Skew is specified, the return value will be a minimal
|
|
|
|
/// integer that is greater than or equal to \p Value and equal to
|
|
|
|
/// \p Align * N + \p Skew for some integer N. If \p Skew is larger than
|
|
|
|
/// \p Align, its value is adjusted to '\p Skew mod \p Align'.
|
|
|
|
///
|
2009-02-16 22:57:04 +00:00
|
|
|
/// Examples:
|
2012-09-13 12:34:29 +00:00
|
|
|
/// \code
|
2016-01-14 20:43:11 +00:00
|
|
|
/// alignTo(5, 8) = 8
|
|
|
|
/// alignTo(17, 8) = 24
|
|
|
|
/// alignTo(~0LL, 8) = 0
|
|
|
|
/// alignTo(321, 255) = 510
|
HHVM calling conventions.
HHVM calling convention, hhvmcc, is used by HHVM JIT for
functions in translated cache. We currently support LLVM back end to
generate code for X86-64 and may support other architectures in the
future.
In HHVM calling convention any GP register could be used to pass and
return values, with the exception of R12 which is reserved for
thread-local area and is callee-saved. Other than R12, we always
pass RBX and RBP as args, which are our virtual machine's stack pointer
and frame pointer respectively.
When we enter translation cache via hhvmcc function, we expect
the stack to be aligned at 16 bytes, i.e. skewed by 8 bytes as opposed
to standard ABI alignment. This affects stack object alignment and stack
adjustments for function calls.
One extra calling convention, hhvm_ccc, is used to call C++ helpers from
HHVM's translation cache. It is almost identical to standard C calling
convention with an exception of first argument which is passed in RBP
(before we use RDI, RSI, etc.)
Differential Revision: http://reviews.llvm.org/D12681
llvm-svn: 248832
2015-09-29 22:09:16 +00:00
|
|
|
///
|
2016-01-14 20:43:11 +00:00
|
|
|
/// alignTo(5, 8, 7) = 7
|
|
|
|
/// alignTo(17, 8, 1) = 17
|
|
|
|
/// alignTo(~0LL, 8, 3) = 3
|
|
|
|
/// alignTo(321, 255, 42) = 552
|
2012-09-13 12:34:29 +00:00
|
|
|
/// \endcode
|
2016-01-14 20:43:11 +00:00
|
|
|
inline uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew = 0) {
|
2016-07-30 13:38:51 +00:00
|
|
|
assert(Align != 0u && "Align can't be 0.");
|
HHVM calling conventions.
HHVM calling convention, hhvmcc, is used by HHVM JIT for
functions in translated cache. We currently support LLVM back end to
generate code for X86-64 and may support other architectures in the
future.
In HHVM calling convention any GP register could be used to pass and
return values, with the exception of R12 which is reserved for
thread-local area and is callee-saved. Other than R12, we always
pass RBX and RBP as args, which are our virtual machine's stack pointer
and frame pointer respectively.
When we enter translation cache via hhvmcc function, we expect
the stack to be aligned at 16 bytes, i.e. skewed by 8 bytes as opposed
to standard ABI alignment. This affects stack object alignment and stack
adjustments for function calls.
One extra calling convention, hhvm_ccc, is used to call C++ helpers from
HHVM's translation cache. It is almost identical to standard C calling
convention with an exception of first argument which is passed in RBP
(before we use RDI, RSI, etc.)
Differential Revision: http://reviews.llvm.org/D12681
llvm-svn: 248832
2015-09-29 22:09:16 +00:00
|
|
|
Skew %= Align;
|
|
|
|
return (Value + Align - 1 - Skew) / Align * Align + Skew;
|
AMDGPU: allow specifying a workgroup size that needs to fit in a compute unit
Summary:
For GL_ARB_compute_shader we need to support workgroup sizes of at least 1024. However, if we want to allow large workgroup sizes, we may need to use less registers, as we have to run more waves per SIMD.
This patch adds an attribute to specify the maximum work group size the compiled program needs to support. It defaults, to 256, as that has no wave restrictions.
Reducing the number of registers available is done similarly to how the registers were reserved for chips with the sgpr init bug.
Reviewers: mareko, arsenm, tstellarAMD, nhaehnle
Subscribers: FireBurn, kerberizer, llvm-commits, arsenm
Differential Revision: http://reviews.llvm.org/D18340
Patch By: Bas Nieuwenhuizen
llvm-svn: 266337
2016-04-14 16:27:07 +00:00
|
|
|
}
|
|
|
|
|
2016-07-30 14:01:00 +00:00
|
|
|
/// Returns the next integer (mod 2**64) that is greater than or equal to
|
|
|
|
/// \p Value and is a multiple of \c Align. \c Align must be non-zero.
|
2016-10-23 19:39:16 +00:00
|
|
|
template <uint64_t Align> constexpr inline uint64_t alignTo(uint64_t Value) {
|
2016-07-30 14:01:00 +00:00
|
|
|
static_assert(Align != 0u, "Align must be non-zero");
|
|
|
|
return (Value + Align - 1) / Align * Align;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \c alignTo for contexts where a constant expression is required.
|
|
|
|
/// \sa alignTo
|
|
|
|
///
|
2016-10-23 19:39:16 +00:00
|
|
|
/// \todo FIXME: remove when \c constexpr becomes really \c constexpr
|
2016-07-30 14:01:00 +00:00
|
|
|
template <uint64_t Align>
|
|
|
|
struct AlignTo {
|
|
|
|
static_assert(Align != 0u, "Align must be non-zero");
|
|
|
|
template <uint64_t Value>
|
|
|
|
struct from_value {
|
|
|
|
static const uint64_t value = (Value + Align - 1) / Align * Align;
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
AMDGPU: allow specifying a workgroup size that needs to fit in a compute unit
Summary:
For GL_ARB_compute_shader we need to support workgroup sizes of at least 1024. However, if we want to allow large workgroup sizes, we may need to use less registers, as we have to run more waves per SIMD.
This patch adds an attribute to specify the maximum work group size the compiled program needs to support. It defaults, to 256, as that has no wave restrictions.
Reducing the number of registers available is done similarly to how the registers were reserved for chips with the sgpr init bug.
Reviewers: mareko, arsenm, tstellarAMD, nhaehnle
Subscribers: FireBurn, kerberizer, llvm-commits, arsenm
Differential Revision: http://reviews.llvm.org/D18340
Patch By: Bas Nieuwenhuizen
llvm-svn: 266337
2016-04-14 16:27:07 +00:00
|
|
|
/// Returns the largest uint64_t less than or equal to \p Value and is
|
|
|
|
/// \p Skew mod \p Align. \p Align must be non-zero
|
|
|
|
inline uint64_t alignDown(uint64_t Value, uint64_t Align, uint64_t Skew = 0) {
|
2016-07-30 13:38:51 +00:00
|
|
|
assert(Align != 0u && "Align can't be 0.");
|
AMDGPU: allow specifying a workgroup size that needs to fit in a compute unit
Summary:
For GL_ARB_compute_shader we need to support workgroup sizes of at least 1024. However, if we want to allow large workgroup sizes, we may need to use less registers, as we have to run more waves per SIMD.
This patch adds an attribute to specify the maximum work group size the compiled program needs to support. It defaults, to 256, as that has no wave restrictions.
Reducing the number of registers available is done similarly to how the registers were reserved for chips with the sgpr init bug.
Reviewers: mareko, arsenm, tstellarAMD, nhaehnle
Subscribers: FireBurn, kerberizer, llvm-commits, arsenm
Differential Revision: http://reviews.llvm.org/D18340
Patch By: Bas Nieuwenhuizen
llvm-svn: 266337
2016-04-14 16:27:07 +00:00
|
|
|
Skew %= Align;
|
|
|
|
return (Value - Skew) / Align * Align + Skew;
|
2009-02-16 22:57:04 +00:00
|
|
|
}
|
2009-02-20 22:51:36 +00:00
|
|
|
|
2012-09-13 12:34:29 +00:00
|
|
|
/// Returns the offset to the next integer (mod 2**64) that is greater than
|
|
|
|
/// or equal to \p Value and is a multiple of \p Align. \p Align must be
|
|
|
|
/// non-zero.
|
2009-08-28 05:48:04 +00:00
|
|
|
inline uint64_t OffsetToAlignment(uint64_t Value, uint64_t Align) {
|
2016-01-14 21:06:47 +00:00
|
|
|
return alignTo(Value, Align) - Value;
|
2009-08-28 05:48:04 +00:00
|
|
|
}
|
|
|
|
|
2016-07-17 18:19:23 +00:00
|
|
|
/// Sign-extend the number in the bottom B bits of X to a 32-bit integer.
|
|
|
|
/// Requires 0 < B <= 32.
|
2016-10-23 19:39:16 +00:00
|
|
|
template <unsigned B> constexpr inline int32_t SignExtend32(uint32_t X) {
|
2016-07-17 18:19:23 +00:00
|
|
|
static_assert(B > 0, "Bit width can't be 0.");
|
|
|
|
static_assert(B <= 32, "Bit width out of range.");
|
|
|
|
return int32_t(X << (32 - B)) >> (32 - B);
|
2010-04-02 22:27:38 +00:00
|
|
|
}
|
|
|
|
|
2016-07-17 18:19:23 +00:00
|
|
|
/// Sign-extend the number in the bottom B bits of X to a 32-bit integer.
|
|
|
|
/// Requires 0 < B < 32.
|
2012-08-24 23:29:28 +00:00
|
|
|
inline int32_t SignExtend32(uint32_t X, unsigned B) {
|
2016-07-17 18:19:23 +00:00
|
|
|
assert(B > 0 && "Bit width can't be 0.");
|
|
|
|
assert(B <= 32 && "Bit width out of range.");
|
2012-08-24 23:29:28 +00:00
|
|
|
return int32_t(X << (32 - B)) >> (32 - B);
|
|
|
|
}
|
|
|
|
|
2016-07-17 18:19:23 +00:00
|
|
|
/// Sign-extend the number in the bottom B bits of X to a 64-bit integer.
|
|
|
|
/// Requires 0 < B < 64.
|
2016-10-23 19:39:16 +00:00
|
|
|
template <unsigned B> constexpr inline int64_t SignExtend64(uint64_t x) {
|
2016-07-17 18:19:23 +00:00
|
|
|
static_assert(B > 0, "Bit width can't be 0.");
|
|
|
|
static_assert(B <= 64, "Bit width out of range.");
|
2010-04-06 03:12:43 +00:00
|
|
|
return int64_t(x << (64 - B)) >> (64 - B);
|
2010-04-02 22:27:38 +00:00
|
|
|
}
|
|
|
|
|
2016-07-17 18:19:23 +00:00
|
|
|
/// Sign-extend the number in the bottom B bits of X to a 64-bit integer.
|
|
|
|
/// Requires 0 < B < 64.
|
2012-08-24 23:29:28 +00:00
|
|
|
inline int64_t SignExtend64(uint64_t X, unsigned B) {
|
2016-07-17 18:19:23 +00:00
|
|
|
assert(B > 0 && "Bit width can't be 0.");
|
|
|
|
assert(B <= 64 && "Bit width out of range.");
|
2012-08-24 23:29:28 +00:00
|
|
|
return int64_t(X << (64 - B)) >> (64 - B);
|
|
|
|
}
|
|
|
|
|
2016-07-17 18:19:25 +00:00
|
|
|
/// Subtract two unsigned integers, X and Y, of type T and return the absolute
|
|
|
|
/// value of the result.
|
2016-06-10 05:09:12 +00:00
|
|
|
template <typename T>
|
|
|
|
typename std::enable_if<std::is_unsigned<T>::value, T>::type
|
|
|
|
AbsoluteDifference(T X, T Y) {
|
|
|
|
return std::max(X, Y) - std::min(X, Y);
|
|
|
|
}
|
|
|
|
|
2016-07-17 18:19:25 +00:00
|
|
|
/// Add two unsigned integers, X and Y, of type T. Clamp the result to the
|
|
|
|
/// maximum representable value of T on overflow. ResultOverflowed indicates if
|
|
|
|
/// the result is larger than the maximum representable value of type T.
|
2015-11-20 13:13:53 +00:00
|
|
|
template <typename T>
|
|
|
|
typename std::enable_if<std::is_unsigned<T>::value, T>::type
|
2015-12-09 17:11:28 +00:00
|
|
|
SaturatingAdd(T X, T Y, bool *ResultOverflowed = nullptr) {
|
|
|
|
bool Dummy;
|
|
|
|
bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
|
2015-11-20 13:13:53 +00:00
|
|
|
// Hacker's Delight, p. 29
|
|
|
|
T Z = X + Y;
|
2015-12-09 17:11:28 +00:00
|
|
|
Overflowed = (Z < X || Z < Y);
|
|
|
|
if (Overflowed)
|
2015-11-20 13:13:53 +00:00
|
|
|
return std::numeric_limits<T>::max();
|
|
|
|
else
|
|
|
|
return Z;
|
|
|
|
}
|
|
|
|
|
2016-07-17 18:19:25 +00:00
|
|
|
/// Multiply two unsigned integers, X and Y, of type T. Clamp the result to the
|
|
|
|
/// maximum representable value of T on overflow. ResultOverflowed indicates if
|
|
|
|
/// the result is larger than the maximum representable value of type T.
|
2015-11-20 13:13:53 +00:00
|
|
|
template <typename T>
|
|
|
|
typename std::enable_if<std::is_unsigned<T>::value, T>::type
|
2015-12-09 17:11:28 +00:00
|
|
|
SaturatingMultiply(T X, T Y, bool *ResultOverflowed = nullptr) {
|
|
|
|
bool Dummy;
|
|
|
|
bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
|
|
|
|
|
2015-11-23 15:33:43 +00:00
|
|
|
// Hacker's Delight, p. 30 has a different algorithm, but we don't use that
|
|
|
|
// because it fails for uint16_t (where multiplication can have undefined
|
|
|
|
// behavior due to promotion to int), and requires a division in addition
|
|
|
|
// to the multiplication.
|
|
|
|
|
2015-12-09 17:11:28 +00:00
|
|
|
Overflowed = false;
|
2015-11-23 21:54:22 +00:00
|
|
|
|
2015-11-23 15:33:43 +00:00
|
|
|
// Log2(Z) would be either Log2Z or Log2Z + 1.
|
|
|
|
// Special case: if X or Y is 0, Log2_64 gives -1, and Log2Z
|
|
|
|
// will necessarily be less than Log2Max as desired.
|
|
|
|
int Log2Z = Log2_64(X) + Log2_64(Y);
|
|
|
|
const T Max = std::numeric_limits<T>::max();
|
|
|
|
int Log2Max = Log2_64(Max);
|
2015-11-23 21:54:22 +00:00
|
|
|
if (Log2Z < Log2Max) {
|
2015-11-23 15:33:43 +00:00
|
|
|
return X * Y;
|
2015-11-23 21:54:22 +00:00
|
|
|
}
|
|
|
|
if (Log2Z > Log2Max) {
|
2015-12-09 17:11:28 +00:00
|
|
|
Overflowed = true;
|
2015-11-23 15:33:43 +00:00
|
|
|
return Max;
|
2015-11-23 21:54:22 +00:00
|
|
|
}
|
2015-11-23 15:33:43 +00:00
|
|
|
|
|
|
|
// We're going to use the top bit, and maybe overflow one
|
|
|
|
// bit past it. Multiply all but the bottom bit then add
|
|
|
|
// that on at the end.
|
|
|
|
T Z = (X >> 1) * Y;
|
2015-11-23 21:54:22 +00:00
|
|
|
if (Z & ~(Max >> 1)) {
|
2015-12-09 17:11:28 +00:00
|
|
|
Overflowed = true;
|
2015-11-23 15:33:43 +00:00
|
|
|
return Max;
|
2015-11-23 21:54:22 +00:00
|
|
|
}
|
2015-11-23 15:33:43 +00:00
|
|
|
Z <<= 1;
|
2015-11-23 21:54:22 +00:00
|
|
|
if (X & 1)
|
|
|
|
return SaturatingAdd(Z, Y, ResultOverflowed);
|
|
|
|
|
|
|
|
return Z;
|
|
|
|
}
|
|
|
|
|
2016-07-17 18:19:25 +00:00
|
|
|
/// Multiply two unsigned integers, X and Y, and add the unsigned integer, A to
|
|
|
|
/// the product. Clamp the result to the maximum representable value of T on
|
|
|
|
/// overflow. ResultOverflowed indicates if the result is larger than the
|
|
|
|
/// maximum representable value of type T.
|
2016-01-12 22:34:00 +00:00
|
|
|
template <typename T>
|
|
|
|
typename std::enable_if<std::is_unsigned<T>::value, T>::type
|
|
|
|
SaturatingMultiplyAdd(T X, T Y, T A, bool *ResultOverflowed = nullptr) {
|
|
|
|
bool Dummy;
|
|
|
|
bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
|
|
|
|
|
|
|
|
T Product = SaturatingMultiply(X, Y, &Overflowed);
|
|
|
|
if (Overflowed)
|
|
|
|
return Product;
|
|
|
|
|
|
|
|
return SaturatingAdd(A, Product, &Overflowed);
|
|
|
|
}
|
|
|
|
|
2016-07-17 18:19:25 +00:00
|
|
|
/// Use this rather than HUGE_VALF; the latter causes warnings on MSVC.
|
2014-09-27 14:41:29 +00:00
|
|
|
extern const float huge_valf;
|
2015-06-23 09:49:53 +00:00
|
|
|
} // End llvm namespace
|
2003-11-11 22:41:34 +00:00
|
|
|
|
2002-05-19 21:19:55 +00:00
|
|
|
#endif
|