1
0
mirror of https://github.com/RPCS3/rpcs3.git synced 2024-11-23 03:02:53 +01:00

atomic_t update

This commit is contained in:
Nekotekina 2015-03-13 02:18:38 +03:00
parent 5764ee7a2a
commit 5c31037c04

View File

@ -7,193 +7,194 @@
#undef InterlockedXor
template<typename T, size_t size = sizeof(T)>
struct _to_atomic
struct _to_atomic_subtype
{
static_assert(size == 1 || size == 2 || size == 4 || size == 8 || size == 16, "Invalid atomic type");
typedef T type;
};
template<typename T>
struct _to_atomic<T, 1>
struct _to_atomic_subtype<T, 1>
{
typedef uint8_t type;
using type = uint8_t;
};
template<typename T>
struct _to_atomic<T, 2>
struct _to_atomic_subtype<T, 2>
{
typedef uint16_t type;
using type = uint16_t;
};
template<typename T>
struct _to_atomic<T, 4>
struct _to_atomic_subtype<T, 4>
{
typedef uint32_t type;
using type = uint32_t;
};
template<typename T>
struct _to_atomic<T, 8>
struct _to_atomic_subtype<T, 8>
{
typedef uint64_t type;
using type = uint64_t;
};
template<typename T>
struct _to_atomic<T, 16>
struct _to_atomic_subtype<T, 16>
{
typedef u128 type;
using type = u128;
};
template<typename T>
class _atomic_base
union _atomic_base
{
typedef typename _to_atomic<T, sizeof(T)>::type atomic_type;
atomic_type data;
using type = typename std::remove_cv<T>::type;
using subtype = typename _to_atomic_subtype<type, sizeof(type)>::type;
type data; // unsafe direct access
subtype volatile sub_data; // unsafe direct access to substitute type
__forceinline static const subtype to_subtype(const type& value)
{
return reinterpret_cast<const subtype&>(value);
}
__forceinline static const type from_subtype(const subtype value)
{
return reinterpret_cast<const type&>(value);
}
__forceinline static type& to_type(subtype& value)
{
return reinterpret_cast<type&>(value);
}
public:
// atomically compare data with cmp, replace with exch if equal, return previous data value anyway
__forceinline const T compare_and_swap(const T& cmp, const T& exch) volatile
__forceinline const type compare_and_swap(const type& cmp, const type& exch)
{
const atomic_type res = InterlockedCompareExchange(&data, (atomic_type&)(exch), (atomic_type&)(cmp));
return (T&)res;
return from_subtype(InterlockedCompareExchange(&sub_data, to_subtype(exch), to_subtype(cmp)));
}
// atomically compare data with cmp, replace with exch if equal, return true if data was replaced
__forceinline bool compare_and_swap_test(const T& cmp, const T& exch) volatile
__forceinline bool compare_and_swap_test(const type& cmp, const type& exch)
{
return InterlockedCompareExchangeTest(&data, (atomic_type&)(exch), (atomic_type&)(cmp));
return InterlockedCompareExchangeTest(&sub_data, to_subtype(exch), to_subtype(cmp));
}
// read data with memory barrier
__forceinline const T read_sync() const volatile
__forceinline const type read_sync() const
{
const atomic_type res = InterlockedCompareExchange(const_cast<volatile atomic_type*>(&data), 0, 0);
return (T&)res;
return from_subtype(InterlockedCompareExchange(const_cast<subtype*>(&sub_data), 0, 0));
}
// atomically replace data with exch, return previous data value
__forceinline const T exchange(const T& exch) volatile
__forceinline const type exchange(const type& exch)
{
const atomic_type res = InterlockedExchange(&data, (atomic_type&)(exch));
return (T&)res;
return from_subtype(InterlockedExchange(&sub_data, to_subtype(exch)));
}
// read data without memory barrier
__forceinline const T read_relaxed() const volatile
__forceinline const type read_relaxed() const
{
return (T&)data;
const subtype value = const_cast<const subtype&>(sub_data);
return from_subtype(value);
}
// write data without memory barrier
__forceinline void write_relaxed(const T& value)
__forceinline void write_relaxed(const type& value)
{
data = (atomic_type&)(value);
const_cast<subtype&>(sub_data) = to_subtype(value);
}
// perform atomic operation on data
template<typename FT> __forceinline void atomic_op(const FT atomic_proc) volatile
template<typename FT> __forceinline void atomic_op(const FT atomic_proc)
{
while (true)
{
const T old = read_relaxed();
T _new = old;
atomic_proc(_new); // function should accept reference to T type
if (compare_and_swap_test(old, _new)) return;
const subtype old = const_cast<const subtype&>(sub_data);
subtype _new = old;
atomic_proc(to_type(_new)); // function should accept reference to T type
if (InterlockedCompareExchangeTest(&sub_data, _new, old)) return;
}
}
// perform atomic operation on data with special exit condition (if intermediate result != proceed_value)
template<typename RT, typename FT> __forceinline RT atomic_op(const RT proceed_value, const FT atomic_proc) volatile
template<typename RT, typename FT> __forceinline RT atomic_op(const RT proceed_value, const FT atomic_proc)
{
while (true)
{
const T old = read_relaxed();
T _new = old;
RT res = (RT)atomic_proc(_new); // function should accept reference to T type and return some value
const subtype old = const_cast<const subtype&>(sub_data);
subtype _new = old;
auto res = static_cast<RT>(atomic_proc(to_type(_new))); // function should accept reference to T type and return some value
if (res != proceed_value) return res;
if (compare_and_swap_test(old, _new)) return proceed_value;
if (InterlockedCompareExchangeTest(&sub_data, _new, old)) return proceed_value;
}
}
// perform atomic operation on data with additional memory barrier
template<typename FT> __forceinline void atomic_op_sync(const FT atomic_proc) volatile
template<typename FT> __forceinline void atomic_op_sync(const FT atomic_proc)
{
T old = read_sync();
subtype old = InterlockedCompareExchange(&sub_data, 0, 0);
while (true)
{
T _new = old;
atomic_proc(_new); // function should accept reference to T type
const T val = compare_and_swap(old, _new);
if ((atomic_type&)val == (atomic_type&)old) return;
subtype _new = old;
atomic_proc(to_type(_new)); // function should accept reference to T type
const subtype val = InterlockedCompareExchange(&sub_data, _new, old);
if (val == old) return;
old = val;
}
}
// perform atomic operation on data with additional memory barrier and special exit condition (if intermediate result != proceed_value)
template<typename RT, typename FT> __forceinline RT atomic_op_sync(const RT proceed_value, const FT atomic_proc) volatile
template<typename RT, typename FT> __forceinline RT atomic_op_sync(const RT proceed_value, const FT atomic_proc)
{
T old = read_sync();
subtype old = InterlockedCompareExchange(&sub_data, 0, 0);
while (true)
{
T _new = old;
RT res = (RT)atomic_proc(_new); // function should accept reference to T type and return some value
subtype _new = old;
auto res = static_cast<RT>(atomic_proc(to_type(_new))); // function should accept reference to T type and return some value
if (res != proceed_value) return res;
const T val = compare_and_swap(old, _new);
if ((atomic_type&)val == (atomic_type&)old) return proceed_value;
const subtype val = InterlockedCompareExchange(&sub_data, _new, old);
if (val == old) return proceed_value;
old = val;
}
}
// perform non-atomic operation on data directly without memory barriers
template<typename FT> __forceinline void direct_op(const FT direct_proc) volatile
{
direct_proc((T&)data);
}
// atomic bitwise OR, returns previous data
__forceinline const T _or(const T& right) volatile
__forceinline const type _or(const type& right)
{
const atomic_type res = InterlockedOr(&data, (atomic_type&)(right));
return (T&)res;
return from_subtype(InterlockedOr(&sub_data, to_subtype(right)));
}
// atomic bitwise AND, returns previous data
__forceinline const T _and(const T& right) volatile
__forceinline const type _and(const type& right)
{
const atomic_type res = InterlockedAnd(&data, (atomic_type&)(right));
return (T&)res;
return from_subtype(InterlockedAnd(&sub_data, to_subtype(right)));
}
// atomic bitwise AND NOT (inverts right argument), returns previous data
__forceinline const T _and_not(const T& right) volatile
__forceinline const type _and_not(const type& right)
{
const atomic_type res = InterlockedAnd(&data, ~(atomic_type&)(right));
return (T&)res;
return from_subtype(InterlockedAnd(&sub_data, ~to_subtype(right)));
}
// atomic bitwise XOR, returns previous data
__forceinline const T _xor(const T& right) volatile
__forceinline const type _xor(const type& right)
{
const atomic_type res = InterlockedXor(&data, (atomic_type&)(right));
return (T&)res;
return from_subtype(InterlockedXor(&sub_data, to_subtype(right)));
}
__forceinline const T operator |= (const T& right) volatile
__forceinline const type operator |= (const type& right)
{
const atomic_type res = InterlockedOr(&data, (atomic_type&)(right)) | (atomic_type&)(right);
return (T&)res;
return from_subtype(InterlockedOr(&sub_data, to_subtype(right)) | to_subtype(right));
}
__forceinline const T operator &= (const T& right) volatile
__forceinline const type operator &= (const type& right)
{
const atomic_type res = InterlockedAnd(&data, (atomic_type&)(right)) & (atomic_type&)(right);
return (T&)res;
return from_subtype(InterlockedAnd(&sub_data, to_subtype(right)) & to_subtype(right));
}
__forceinline const T operator ^= (const T& right) volatile
__forceinline const type operator ^= (const type& right)
{
const atomic_type res = InterlockedXor(&data, (atomic_type&)(right)) ^ (atomic_type&)(right);
return (T&)res;
return from_subtype(InterlockedXor(&sub_data, to_subtype(right)) ^ to_subtype(right));
}
};