1
0
mirror of https://github.com/RPCS3/rpcs3.git synced 2024-11-21 18:22:33 +01:00

Use uptr (std::uintptr_t alias)

This commit is contained in:
Nekotekina 2020-12-12 16:29:55 +03:00
parent b59f142d4e
commit a6a5292cd7
15 changed files with 62 additions and 62 deletions

View File

@ -343,7 +343,7 @@ struct MemoryManager1 : llvm::RTDyldMemoryManager
return {addr, llvm::JITSymbolFlags::Exported};
}
u8* allocate(u64& oldp, std::uintptr_t size, uint align, utils::protection prot)
u8* allocate(u64& oldp, uptr size, uint align, utils::protection prot)
{
if (align > c_page_size)
{
@ -374,12 +374,12 @@ struct MemoryManager1 : llvm::RTDyldMemoryManager
return this->ptr + olda;
}
u8* allocateCodeSection(std::uintptr_t size, uint align, uint sec_id, llvm::StringRef sec_name) override
u8* allocateCodeSection(uptr size, uint align, uint sec_id, llvm::StringRef sec_name) override
{
return allocate(code_ptr, size, align, utils::protection::wx);
}
u8* allocateDataSection(std::uintptr_t size, uint align, uint sec_id, llvm::StringRef sec_name, bool is_ro) override
u8* allocateDataSection(uptr size, uint align, uint sec_id, llvm::StringRef sec_name, bool is_ro) override
{
return allocate(data_ptr, size, align, utils::protection::rw);
}
@ -407,12 +407,12 @@ struct MemoryManager2 : llvm::RTDyldMemoryManager
{
}
u8* allocateCodeSection(std::uintptr_t size, uint align, uint sec_id, llvm::StringRef sec_name) override
u8* allocateCodeSection(uptr size, uint align, uint sec_id, llvm::StringRef sec_name) override
{
return jit_runtime::alloc(size, align, true);
}
u8* allocateDataSection(std::uintptr_t size, uint align, uint sec_id, llvm::StringRef sec_name, bool is_ro) override
u8* allocateDataSection(uptr size, uint align, uint sec_id, llvm::StringRef sec_name, bool is_ro) override
{
return jit_runtime::alloc(size, align, false);
}

View File

@ -19,7 +19,7 @@ struct fmt_unveil
static inline u64 get(const T& arg)
{
return reinterpret_cast<std::uintptr_t>(&arg);
return reinterpret_cast<uptr>(&arg);
}
// Temporary value container (can possibly be created by other fmt_unveil<> specializations)
@ -30,7 +30,7 @@ struct fmt_unveil
// Allow implicit conversion
operator u64() const
{
return reinterpret_cast<std::uintptr_t>(&arg);
return reinterpret_cast<uptr>(&arg);
}
};
@ -93,7 +93,7 @@ struct fmt_unveil<T*, void>
static inline u64 get(type arg)
{
return reinterpret_cast<std::uintptr_t>(arg);
return reinterpret_cast<uptr>(arg);
}
};
@ -104,7 +104,7 @@ struct fmt_unveil<T[N], void>
static inline u64 get(type arg)
{
return reinterpret_cast<std::uintptr_t>(arg);
return reinterpret_cast<uptr>(arg);
}
};
@ -132,7 +132,7 @@ struct fmt_class_string
// Helper function (converts arg to object reference)
static SAFE_BUFFERS FORCE_INLINE const T& get_object(u64 arg)
{
return *reinterpret_cast<const T*>(static_cast<std::uintptr_t>(arg));
return *reinterpret_cast<const T*>(static_cast<uptr>(arg));
}
// Enum -> string function type

View File

@ -2459,8 +2459,8 @@ void thread_ctrl::detect_cpu_layout()
else
{
// Iterate through the buffer until a core with hyperthreading is found
auto ptr = reinterpret_cast<std::uintptr_t>(buffer.data());
const std::uintptr_t end = ptr + buffer_size;
auto ptr = reinterpret_cast<uptr>(buffer.data());
const uptr end = ptr + buffer_size;
while (ptr < end)
{

View File

@ -586,7 +586,7 @@ namespace utils
const uint align = type->align;
const uint ssize = ::align<uint>(sizeof(typemap_block), align) + ::align(type->size, align);
const auto total = std::size_t{ssize} * type->count;
const auto start = std::uintptr_t{::align(m_total, align)};
const auto start = uptr{::align(m_total, align)};
if (total)
{
@ -612,7 +612,7 @@ namespace utils
{
if (m_map[i].m_count)
{
m_map[i].m_ptr = static_cast<uchar*>(m_memory) + reinterpret_cast<std::uintptr_t>(m_map[i].m_ptr);
m_map[i].m_ptr = static_cast<uchar*>(m_memory) + reinterpret_cast<uptr>(m_map[i].m_ptr);
}
}
}

View File

@ -2487,7 +2487,7 @@ public:
#ifdef _WIN32
func->setCallingConv(llvm::CallingConv::Win64);
#endif
m_engine->updateGlobalMapping({lame.data(), lame.size()}, reinterpret_cast<std::uintptr_t>(_func));
m_engine->updateGlobalMapping({lame.data(), lame.size()}, reinterpret_cast<uptr>(_func));
const auto inst = m_ir->CreateCall(func, {args...});
#ifdef _WIN32

View File

@ -132,7 +132,7 @@ static u64 ppu_cache(u32 addr)
g_cfg.core.ppu_decoder == ppu_decoder_type::fast ? &g_ppu_interpreter_fast.get_table() :
(fmt::throw_exception("Invalid PPU decoder"), nullptr));
return reinterpret_cast<std::uintptr_t>(table[ppu_decode(vm::read32(addr))]);
return reinterpret_cast<uptr>(table[ppu_decode(vm::read32(addr))]);
}
static bool ppu_fallback(ppu_thread& ppu, ppu_opcode_t op)
@ -270,7 +270,7 @@ extern void ppu_register_function_at(u32 addr, u32 size, ppu_function_t ptr)
// Initialize specific function
if (ptr)
{
ppu_ref(addr) = reinterpret_cast<std::uintptr_t>(ptr);
ppu_ref(addr) = reinterpret_cast<uptr>(ptr);
return;
}
@ -290,7 +290,7 @@ extern void ppu_register_function_at(u32 addr, u32 size, ppu_function_t ptr)
}
// Initialize interpreter cache
const u64 _break = reinterpret_cast<std::uintptr_t>(ppu_break);
const u64 _break = reinterpret_cast<uptr>(ppu_break);
while (size)
{
@ -335,7 +335,7 @@ extern void ppu_breakpoint(u32 addr, bool isAdding)
return;
}
const u64 _break = reinterpret_cast<std::uintptr_t>(&ppu_break);
const u64 _break = reinterpret_cast<uptr>(&ppu_break);
if (isAdding)
{
@ -357,7 +357,7 @@ extern void ppu_set_breakpoint(u32 addr)
return;
}
const u64 _break = reinterpret_cast<std::uintptr_t>(&ppu_break);
const u64 _break = reinterpret_cast<uptr>(&ppu_break);
if (ppu_ref(addr) != _break)
{
@ -373,7 +373,7 @@ extern void ppu_remove_breakpoint(u32 addr)
return;
}
const auto _break = reinterpret_cast<std::uintptr_t>(&ppu_break);
const auto _break = reinterpret_cast<uptr>(&ppu_break);
if (ppu_ref(addr) == _break)
{
@ -408,8 +408,8 @@ extern bool ppu_patch(u32 addr, u32 value)
*vm::get_super_ptr<u32>(addr) = value;
const u64 _break = reinterpret_cast<std::uintptr_t>(&ppu_break);
const u64 fallback = reinterpret_cast<std::uintptr_t>(&ppu_fallback);
const u64 _break = reinterpret_cast<uptr>(&ppu_break);
const u64 fallback = reinterpret_cast<uptr>(&ppu_fallback);
if (is_exec)
{
@ -1990,7 +1990,7 @@ extern void ppu_initialize(const ppu_module& info)
if (g_cfg.core.ppu_debug && func.size && func.toc != umax)
{
s_ppu_toc->emplace(func.addr, func.toc);
ppu_ref(func.addr) = reinterpret_cast<std::uintptr_t>(&ppu_check_toc);
ppu_ref(func.addr) = reinterpret_cast<uptr>(&ppu_check_toc);
}
}

View File

@ -311,7 +311,7 @@ struct sys_net_linger
struct lv2_socket final
{
#ifdef _WIN32
using socket_type = std::uintptr_t;
using socket_type = uptr;
#else
using socket_type = int;
#endif

View File

@ -567,7 +567,7 @@ void GLGSRender::emit_geometry(u32 sub_index)
const auto subranges = rsx::method_registers.current_draw_clause.get_subranges();
const auto draw_count = subranges.size();
const u32 type_scale = (index_type == GL_UNSIGNED_SHORT) ? 1 : 2;
uintptr_t index_ptr = index_offset;
uptr index_ptr = index_offset;
m_scratch_buffer.resize(draw_count * 16);
GLsizei *counts = reinterpret_cast<GLsizei*>(m_scratch_buffer.data());

View File

@ -227,8 +227,8 @@ namespace rsx
fmt::throw_exception("Unreachable");
}
const uintptr_t addr = uintptr_t(address);
const uintptr_t base = uintptr_t(vm::g_base_addr);
const uptr addr = uptr(address);
const uptr base = uptr(vm::g_base_addr);
ensure(addr > base);
return utils::address_range::start_length(u32(addr - base), range);

View File

@ -80,7 +80,7 @@ namespace vk
u64 pass_value;
u64 config;
}
key{ reinterpret_cast<uintptr_t>(pass), static_cast<u64>(renderpass_config.ia.topology) };
key{ reinterpret_cast<uptr>(pass), static_cast<u64>(renderpass_config.ia.topology) };
return rpcs3::hash_struct(key);
}
}

View File

@ -4,8 +4,8 @@
namespace vk
{
std::unordered_map<uintptr_t, vmm_allocation_t> g_vmm_allocations;
std::unordered_map<uintptr_t, atomic_t<u64>> g_vmm_memory_usage;
std::unordered_map<uptr, vmm_allocation_t> g_vmm_allocations;
std::unordered_map<uptr, atomic_t<u64>> g_vmm_memory_usage;
resource_manager g_resource_manager;
atomic_t<u64> g_event_ctr;
@ -40,7 +40,7 @@ namespace vk
void vmm_notify_memory_allocated(void* handle, u32 memory_type, u64 memory_size)
{
auto key = reinterpret_cast<uintptr_t>(handle);
auto key = reinterpret_cast<uptr>(handle);
const vmm_allocation_t info = { memory_size, memory_type };
if (const auto ins = g_vmm_allocations.insert_or_assign(key, info);
@ -61,7 +61,7 @@ namespace vk
void vmm_notify_memory_freed(void* handle)
{
auto key = reinterpret_cast<uintptr_t>(handle);
auto key = reinterpret_cast<uptr>(handle);
if (auto found = g_vmm_allocations.find(key);
found != g_vmm_allocations.end())
{

View File

@ -794,8 +794,8 @@ namespace rsx
{
public:
virtual ~default_vertex_cache() = default;
virtual storage_type* find_vertex_range(uintptr_t /*local_addr*/, upload_format, u32 /*data_length*/) { return nullptr; }
virtual void store_range(uintptr_t /*local_addr*/, upload_format, u32 /*data_length*/, u32 /*offset_in_heap*/) {}
virtual storage_type* find_vertex_range(uptr /*local_addr*/, upload_format, u32 /*data_length*/) { return nullptr; }
virtual void store_range(uptr /*local_addr*/, upload_format, u32 /*data_length*/, u32 /*offset_in_heap*/) {}
virtual void purge() {}
};
@ -805,7 +805,7 @@ namespace rsx
template <typename upload_format>
struct uploaded_range
{
uintptr_t local_address;
uptr local_address;
upload_format buffer_format;
u32 offset_in_heap;
u32 data_length;
@ -817,11 +817,11 @@ namespace rsx
using storage_type = uploaded_range<upload_format>;
private:
std::unordered_map<uintptr_t, std::vector<storage_type>> vertex_ranges;
std::unordered_map<uptr, std::vector<storage_type>> vertex_ranges;
public:
storage_type* find_vertex_range(uintptr_t local_addr, upload_format fmt, u32 data_length) override
storage_type* find_vertex_range(uptr local_addr, upload_format fmt, u32 data_length) override
{
const auto data_end = local_addr + data_length;
@ -835,7 +835,7 @@ namespace rsx
return nullptr;
}
void store_range(uintptr_t local_addr, upload_format fmt, u32 data_length, u32 offset_in_heap) override
void store_range(uptr local_addr, upload_format fmt, u32 data_length, u32 offset_in_heap) override
{
storage_type v = {};
v.buffer_format = fmt;

View File

@ -25,7 +25,7 @@
static constexpr std::size_t s_hashtable_size = 1u << 17;
// Reference counter combined with shifted pointer (which is assumed to be 47 bit)
static constexpr std::uintptr_t s_ref_mask = (1u << 17) - 1;
static constexpr uptr s_ref_mask = (1u << 17) - 1;
// Fix for silly on-first-use initializer
static bool s_null_wait_cb(const void*, u64, u64){ return true; };
@ -343,7 +343,7 @@ namespace
un_t<std::mutex> mtx;
#endif
void init(std::uintptr_t iptr)
void init(uptr iptr)
{
#ifdef _WIN32
tid = GetCurrentThreadId();
@ -561,7 +561,7 @@ static u32
#ifdef _WIN32
__vectorcall
#endif
cond_alloc(std::uintptr_t iptr, __m128i mask, u32 tls_slot = -1)
cond_alloc(uptr iptr, __m128i mask, u32 tls_slot = -1)
{
// Try to get cond from tls slot instead
u16* ptls = tls_slot >= std::size(s_tls_conds.cond) ? nullptr : s_tls_conds.cond + tls_slot;
@ -713,7 +713,7 @@ static cond_handle*
#ifdef _WIN32
__vectorcall
#endif
cond_id_lock(u32 cond_id, u32 size, __m128i mask, u64 thread_id = 0, std::uintptr_t iptr = 0)
cond_id_lock(u32 cond_id, u32 size, __m128i mask, u64 thread_id = 0, uptr iptr = 0)
{
if (cond_id - 1 < u32{UINT16_MAX})
{
@ -800,12 +800,12 @@ namespace
// Allocation pool, pointers to allocated semaphores
atomic_t<u16> slots[max_threads];
static atomic_t<u16>* slot_alloc(std::uintptr_t ptr) noexcept;
static atomic_t<u16>* slot_alloc(uptr ptr) noexcept;
static void slot_free(std::uintptr_t ptr, atomic_t<u16>* slot, u32 tls_slot) noexcept;
static void slot_free(uptr ptr, atomic_t<u16>* slot, u32 tls_slot) noexcept;
template <typename F>
static auto slot_search(std::uintptr_t iptr, u32 size, u64 thread_id, __m128i mask, F func) noexcept;
static auto slot_search(uptr iptr, u32 size, u64 thread_id, __m128i mask, F func) noexcept;
};
static_assert(sizeof(root_info) == 64);
@ -831,7 +831,7 @@ namespace
u32 id;
// Initialize: PRNG on iptr, split into two 16 bit chunks, choose first chunk
explicit hash_engine(std::uintptr_t iptr)
explicit hash_engine(uptr iptr)
: init(rng(iptr)())
, r0(static_cast<u16>(init >> 48))
, r1(static_cast<u16>(init >> 32))
@ -883,7 +883,7 @@ u64 atomic_wait::get_unique_tsc()
});
}
atomic_t<u16>* root_info::slot_alloc(std::uintptr_t ptr) noexcept
atomic_t<u16>* root_info::slot_alloc(uptr ptr) noexcept
{
atomic_t<u16>* slot = nullptr;
@ -937,13 +937,13 @@ atomic_t<u16>* root_info::slot_alloc(std::uintptr_t ptr) noexcept
return slot;
}
void root_info::slot_free(std::uintptr_t iptr, atomic_t<u16>* slot, u32 tls_slot) noexcept
void root_info::slot_free(uptr iptr, atomic_t<u16>* slot, u32 tls_slot) noexcept
{
const auto begin = reinterpret_cast<std::uintptr_t>(std::begin(s_hashtable));
const auto begin = reinterpret_cast<uptr>(std::begin(s_hashtable));
const auto end = reinterpret_cast<std::uintptr_t>(std::end(s_hashtable));
const auto end = reinterpret_cast<uptr>(std::end(s_hashtable));
const auto ptr = reinterpret_cast<std::uintptr_t>(slot) - begin;
const auto ptr = reinterpret_cast<uptr>(slot) - begin;
if (ptr >= sizeof(s_hashtable))
{
@ -991,7 +991,7 @@ void root_info::slot_free(std::uintptr_t iptr, atomic_t<u16>* slot, u32 tls_slot
}
template <typename F>
FORCE_INLINE auto root_info::slot_search(std::uintptr_t iptr, u32 size, u64 thread_id, __m128i mask, F func) noexcept
FORCE_INLINE auto root_info::slot_search(uptr iptr, u32 size, u64 thread_id, __m128i mask, F func) noexcept
{
u32 index = 0;
u32 total = 0;
@ -1054,11 +1054,11 @@ atomic_wait_engine::wait(const void* data, u32 size, __m128i old_value, u64 time
return;
}
const std::uintptr_t iptr = reinterpret_cast<std::uintptr_t>(data) & (~s_ref_mask >> 17);
const uptr iptr = reinterpret_cast<uptr>(data) & (~s_ref_mask >> 17);
uint ext_size = 0;
std::uintptr_t iptr_ext[atomic_wait::max_list - 1]{};
uptr iptr_ext[atomic_wait::max_list - 1]{};
if (ext) [[unlikely]]
{
@ -1077,7 +1077,7 @@ atomic_wait_engine::wait(const void* data, u32 size, __m128i old_value, u64 time
}
}
iptr_ext[ext_size] = reinterpret_cast<std::uintptr_t>(e->data) & (~s_ref_mask >> 17);
iptr_ext[ext_size] = reinterpret_cast<uptr>(e->data) & (~s_ref_mask >> 17);
ext_size++;
}
}
@ -1481,7 +1481,7 @@ bool atomic_wait_engine::raw_notify(const void* data, u64 thread_id)
return false;
}
const std::uintptr_t iptr = reinterpret_cast<std::uintptr_t>(data) & (~s_ref_mask >> 17);
const uptr iptr = reinterpret_cast<uptr>(data) & (~s_ref_mask >> 17);
if (s_tls_notify_cb)
s_tls_notify_cb(data, 0);
@ -1520,7 +1520,7 @@ __vectorcall
#endif
atomic_wait_engine::notify_one(const void* data, u32 size, __m128i mask, __m128i new_value)
{
const std::uintptr_t iptr = reinterpret_cast<std::uintptr_t>(data) & (~s_ref_mask >> 17);
const uptr iptr = reinterpret_cast<uptr>(data) & (~s_ref_mask >> 17);
if (s_tls_notify_cb)
s_tls_notify_cb(data, 0);
@ -1549,7 +1549,7 @@ __vectorcall
#endif
atomic_wait_engine::notify_all(const void* data, u32 size, __m128i mask)
{
const std::uintptr_t iptr = reinterpret_cast<std::uintptr_t>(data) & (~s_ref_mask >> 17);
const uptr iptr = reinterpret_cast<uptr>(data) & (~s_ref_mask >> 17);
if (s_tls_notify_cb)
s_tls_notify_cb(data, 0);

View File

@ -53,7 +53,7 @@ namespace utils
template <typename R, typename... Args>
struct dynamic_import<R(Args...)>
{
atomic_t<std::uintptr_t> ptr;
atomic_t<uptr> ptr;
const char* const lib;
const char* const name;
@ -67,7 +67,7 @@ namespace utils
void init() noexcept
{
ptr.release(reinterpret_cast<std::uintptr_t>(get_proc_address(lib, name)));
ptr.release(reinterpret_cast<uptr>(get_proc_address(lib, name)));
}
operator bool() noexcept

View File

@ -871,7 +871,7 @@ struct pointer_hash
{
std::size_t operator()(T* ptr) const
{
return reinterpret_cast<std::uintptr_t>(ptr) / Align;
return reinterpret_cast<uptr>(ptr) / Align;
}
};