From a6a5292cd745b3259da0e43b9e4bb381871dc67c Mon Sep 17 00:00:00 2001 From: Nekotekina Date: Sat, 12 Dec 2020 16:29:55 +0300 Subject: [PATCH] Use uptr (std::uintptr_t alias) --- Utilities/JIT.cpp | 10 +++---- Utilities/StrFmt.h | 10 +++---- Utilities/Thread.cpp | 4 +-- Utilities/typemap.h | 4 +-- rpcs3/Emu/CPU/CPUTranslator.h | 2 +- rpcs3/Emu/Cell/PPUThread.cpp | 18 ++++++------ rpcs3/Emu/Cell/lv2/sys_net.h | 2 +- rpcs3/Emu/RSX/GL/GLDraw.cpp | 2 +- rpcs3/Emu/RSX/RSXOffload.cpp | 4 +-- rpcs3/Emu/RSX/VK/VKOverlays.h | 2 +- rpcs3/Emu/RSX/VK/VKResourceManager.cpp | 8 +++--- rpcs3/Emu/RSX/rsx_cache.h | 12 ++++---- rpcs3/util/atomic.cpp | 40 +++++++++++++------------- rpcs3/util/dyn_lib.hpp | 4 +-- rpcs3/util/types.hpp | 2 +- 15 files changed, 62 insertions(+), 62 deletions(-) diff --git a/Utilities/JIT.cpp b/Utilities/JIT.cpp index 2d8431d873..6ef91c9817 100644 --- a/Utilities/JIT.cpp +++ b/Utilities/JIT.cpp @@ -343,7 +343,7 @@ struct MemoryManager1 : llvm::RTDyldMemoryManager return {addr, llvm::JITSymbolFlags::Exported}; } - u8* allocate(u64& oldp, std::uintptr_t size, uint align, utils::protection prot) + u8* allocate(u64& oldp, uptr size, uint align, utils::protection prot) { if (align > c_page_size) { @@ -374,12 +374,12 @@ struct MemoryManager1 : llvm::RTDyldMemoryManager return this->ptr + olda; } - u8* allocateCodeSection(std::uintptr_t size, uint align, uint sec_id, llvm::StringRef sec_name) override + u8* allocateCodeSection(uptr size, uint align, uint sec_id, llvm::StringRef sec_name) override { return allocate(code_ptr, size, align, utils::protection::wx); } - u8* allocateDataSection(std::uintptr_t size, uint align, uint sec_id, llvm::StringRef sec_name, bool is_ro) override + u8* allocateDataSection(uptr size, uint align, uint sec_id, llvm::StringRef sec_name, bool is_ro) override { return allocate(data_ptr, size, align, utils::protection::rw); } @@ -407,12 +407,12 @@ struct MemoryManager2 : llvm::RTDyldMemoryManager { } - u8* allocateCodeSection(std::uintptr_t size, uint align, uint sec_id, llvm::StringRef sec_name) override + u8* allocateCodeSection(uptr size, uint align, uint sec_id, llvm::StringRef sec_name) override { return jit_runtime::alloc(size, align, true); } - u8* allocateDataSection(std::uintptr_t size, uint align, uint sec_id, llvm::StringRef sec_name, bool is_ro) override + u8* allocateDataSection(uptr size, uint align, uint sec_id, llvm::StringRef sec_name, bool is_ro) override { return jit_runtime::alloc(size, align, false); } diff --git a/Utilities/StrFmt.h b/Utilities/StrFmt.h index fd18d08f11..3b274c934c 100644 --- a/Utilities/StrFmt.h +++ b/Utilities/StrFmt.h @@ -19,7 +19,7 @@ struct fmt_unveil static inline u64 get(const T& arg) { - return reinterpret_cast(&arg); + return reinterpret_cast(&arg); } // Temporary value container (can possibly be created by other fmt_unveil<> specializations) @@ -30,7 +30,7 @@ struct fmt_unveil // Allow implicit conversion operator u64() const { - return reinterpret_cast(&arg); + return reinterpret_cast(&arg); } }; @@ -93,7 +93,7 @@ struct fmt_unveil static inline u64 get(type arg) { - return reinterpret_cast(arg); + return reinterpret_cast(arg); } }; @@ -104,7 +104,7 @@ struct fmt_unveil static inline u64 get(type arg) { - return reinterpret_cast(arg); + return reinterpret_cast(arg); } }; @@ -132,7 +132,7 @@ struct fmt_class_string // Helper function (converts arg to object reference) static SAFE_BUFFERS FORCE_INLINE const T& get_object(u64 arg) { - return *reinterpret_cast(static_cast(arg)); + return *reinterpret_cast(static_cast(arg)); } // Enum -> string function type diff --git a/Utilities/Thread.cpp b/Utilities/Thread.cpp index 23a4975952..cb9e073587 100644 --- a/Utilities/Thread.cpp +++ b/Utilities/Thread.cpp @@ -2459,8 +2459,8 @@ void thread_ctrl::detect_cpu_layout() else { // Iterate through the buffer until a core with hyperthreading is found - auto ptr = reinterpret_cast(buffer.data()); - const std::uintptr_t end = ptr + buffer_size; + auto ptr = reinterpret_cast(buffer.data()); + const uptr end = ptr + buffer_size; while (ptr < end) { diff --git a/Utilities/typemap.h b/Utilities/typemap.h index ed1bb7bcdc..3bc2b4053f 100644 --- a/Utilities/typemap.h +++ b/Utilities/typemap.h @@ -586,7 +586,7 @@ namespace utils const uint align = type->align; const uint ssize = ::align(sizeof(typemap_block), align) + ::align(type->size, align); const auto total = std::size_t{ssize} * type->count; - const auto start = std::uintptr_t{::align(m_total, align)}; + const auto start = uptr{::align(m_total, align)}; if (total) { @@ -612,7 +612,7 @@ namespace utils { if (m_map[i].m_count) { - m_map[i].m_ptr = static_cast(m_memory) + reinterpret_cast(m_map[i].m_ptr); + m_map[i].m_ptr = static_cast(m_memory) + reinterpret_cast(m_map[i].m_ptr); } } } diff --git a/rpcs3/Emu/CPU/CPUTranslator.h b/rpcs3/Emu/CPU/CPUTranslator.h index fbd371ede8..abfa0089b4 100644 --- a/rpcs3/Emu/CPU/CPUTranslator.h +++ b/rpcs3/Emu/CPU/CPUTranslator.h @@ -2487,7 +2487,7 @@ public: #ifdef _WIN32 func->setCallingConv(llvm::CallingConv::Win64); #endif - m_engine->updateGlobalMapping({lame.data(), lame.size()}, reinterpret_cast(_func)); + m_engine->updateGlobalMapping({lame.data(), lame.size()}, reinterpret_cast(_func)); const auto inst = m_ir->CreateCall(func, {args...}); #ifdef _WIN32 diff --git a/rpcs3/Emu/Cell/PPUThread.cpp b/rpcs3/Emu/Cell/PPUThread.cpp index acee0e540e..83a2c42631 100644 --- a/rpcs3/Emu/Cell/PPUThread.cpp +++ b/rpcs3/Emu/Cell/PPUThread.cpp @@ -132,7 +132,7 @@ static u64 ppu_cache(u32 addr) g_cfg.core.ppu_decoder == ppu_decoder_type::fast ? &g_ppu_interpreter_fast.get_table() : (fmt::throw_exception("Invalid PPU decoder"), nullptr)); - return reinterpret_cast(table[ppu_decode(vm::read32(addr))]); + return reinterpret_cast(table[ppu_decode(vm::read32(addr))]); } static bool ppu_fallback(ppu_thread& ppu, ppu_opcode_t op) @@ -270,7 +270,7 @@ extern void ppu_register_function_at(u32 addr, u32 size, ppu_function_t ptr) // Initialize specific function if (ptr) { - ppu_ref(addr) = reinterpret_cast(ptr); + ppu_ref(addr) = reinterpret_cast(ptr); return; } @@ -290,7 +290,7 @@ extern void ppu_register_function_at(u32 addr, u32 size, ppu_function_t ptr) } // Initialize interpreter cache - const u64 _break = reinterpret_cast(ppu_break); + const u64 _break = reinterpret_cast(ppu_break); while (size) { @@ -335,7 +335,7 @@ extern void ppu_breakpoint(u32 addr, bool isAdding) return; } - const u64 _break = reinterpret_cast(&ppu_break); + const u64 _break = reinterpret_cast(&ppu_break); if (isAdding) { @@ -357,7 +357,7 @@ extern void ppu_set_breakpoint(u32 addr) return; } - const u64 _break = reinterpret_cast(&ppu_break); + const u64 _break = reinterpret_cast(&ppu_break); if (ppu_ref(addr) != _break) { @@ -373,7 +373,7 @@ extern void ppu_remove_breakpoint(u32 addr) return; } - const auto _break = reinterpret_cast(&ppu_break); + const auto _break = reinterpret_cast(&ppu_break); if (ppu_ref(addr) == _break) { @@ -408,8 +408,8 @@ extern bool ppu_patch(u32 addr, u32 value) *vm::get_super_ptr(addr) = value; - const u64 _break = reinterpret_cast(&ppu_break); - const u64 fallback = reinterpret_cast(&ppu_fallback); + const u64 _break = reinterpret_cast(&ppu_break); + const u64 fallback = reinterpret_cast(&ppu_fallback); if (is_exec) { @@ -1990,7 +1990,7 @@ extern void ppu_initialize(const ppu_module& info) if (g_cfg.core.ppu_debug && func.size && func.toc != umax) { s_ppu_toc->emplace(func.addr, func.toc); - ppu_ref(func.addr) = reinterpret_cast(&ppu_check_toc); + ppu_ref(func.addr) = reinterpret_cast(&ppu_check_toc); } } diff --git a/rpcs3/Emu/Cell/lv2/sys_net.h b/rpcs3/Emu/Cell/lv2/sys_net.h index ae6b10571a..b457880a7e 100644 --- a/rpcs3/Emu/Cell/lv2/sys_net.h +++ b/rpcs3/Emu/Cell/lv2/sys_net.h @@ -311,7 +311,7 @@ struct sys_net_linger struct lv2_socket final { #ifdef _WIN32 - using socket_type = std::uintptr_t; + using socket_type = uptr; #else using socket_type = int; #endif diff --git a/rpcs3/Emu/RSX/GL/GLDraw.cpp b/rpcs3/Emu/RSX/GL/GLDraw.cpp index f1d9a860a0..259e66880f 100644 --- a/rpcs3/Emu/RSX/GL/GLDraw.cpp +++ b/rpcs3/Emu/RSX/GL/GLDraw.cpp @@ -567,7 +567,7 @@ void GLGSRender::emit_geometry(u32 sub_index) const auto subranges = rsx::method_registers.current_draw_clause.get_subranges(); const auto draw_count = subranges.size(); const u32 type_scale = (index_type == GL_UNSIGNED_SHORT) ? 1 : 2; - uintptr_t index_ptr = index_offset; + uptr index_ptr = index_offset; m_scratch_buffer.resize(draw_count * 16); GLsizei *counts = reinterpret_cast(m_scratch_buffer.data()); diff --git a/rpcs3/Emu/RSX/RSXOffload.cpp b/rpcs3/Emu/RSX/RSXOffload.cpp index 49a7e92638..4c9986c7ac 100644 --- a/rpcs3/Emu/RSX/RSXOffload.cpp +++ b/rpcs3/Emu/RSX/RSXOffload.cpp @@ -227,8 +227,8 @@ namespace rsx fmt::throw_exception("Unreachable"); } - const uintptr_t addr = uintptr_t(address); - const uintptr_t base = uintptr_t(vm::g_base_addr); + const uptr addr = uptr(address); + const uptr base = uptr(vm::g_base_addr); ensure(addr > base); return utils::address_range::start_length(u32(addr - base), range); diff --git a/rpcs3/Emu/RSX/VK/VKOverlays.h b/rpcs3/Emu/RSX/VK/VKOverlays.h index 7b12112467..0de15191dc 100644 --- a/rpcs3/Emu/RSX/VK/VKOverlays.h +++ b/rpcs3/Emu/RSX/VK/VKOverlays.h @@ -80,7 +80,7 @@ namespace vk u64 pass_value; u64 config; } - key{ reinterpret_cast(pass), static_cast(renderpass_config.ia.topology) }; + key{ reinterpret_cast(pass), static_cast(renderpass_config.ia.topology) }; return rpcs3::hash_struct(key); } } diff --git a/rpcs3/Emu/RSX/VK/VKResourceManager.cpp b/rpcs3/Emu/RSX/VK/VKResourceManager.cpp index eef3220453..4fe4451221 100644 --- a/rpcs3/Emu/RSX/VK/VKResourceManager.cpp +++ b/rpcs3/Emu/RSX/VK/VKResourceManager.cpp @@ -4,8 +4,8 @@ namespace vk { - std::unordered_map g_vmm_allocations; - std::unordered_map> g_vmm_memory_usage; + std::unordered_map g_vmm_allocations; + std::unordered_map> g_vmm_memory_usage; resource_manager g_resource_manager; atomic_t g_event_ctr; @@ -40,7 +40,7 @@ namespace vk void vmm_notify_memory_allocated(void* handle, u32 memory_type, u64 memory_size) { - auto key = reinterpret_cast(handle); + auto key = reinterpret_cast(handle); const vmm_allocation_t info = { memory_size, memory_type }; if (const auto ins = g_vmm_allocations.insert_or_assign(key, info); @@ -61,7 +61,7 @@ namespace vk void vmm_notify_memory_freed(void* handle) { - auto key = reinterpret_cast(handle); + auto key = reinterpret_cast(handle); if (auto found = g_vmm_allocations.find(key); found != g_vmm_allocations.end()) { diff --git a/rpcs3/Emu/RSX/rsx_cache.h b/rpcs3/Emu/RSX/rsx_cache.h index 9b80aa7c87..9060cc1e9b 100644 --- a/rpcs3/Emu/RSX/rsx_cache.h +++ b/rpcs3/Emu/RSX/rsx_cache.h @@ -794,8 +794,8 @@ namespace rsx { public: virtual ~default_vertex_cache() = default; - virtual storage_type* find_vertex_range(uintptr_t /*local_addr*/, upload_format, u32 /*data_length*/) { return nullptr; } - virtual void store_range(uintptr_t /*local_addr*/, upload_format, u32 /*data_length*/, u32 /*offset_in_heap*/) {} + virtual storage_type* find_vertex_range(uptr /*local_addr*/, upload_format, u32 /*data_length*/) { return nullptr; } + virtual void store_range(uptr /*local_addr*/, upload_format, u32 /*data_length*/, u32 /*offset_in_heap*/) {} virtual void purge() {} }; @@ -805,7 +805,7 @@ namespace rsx template struct uploaded_range { - uintptr_t local_address; + uptr local_address; upload_format buffer_format; u32 offset_in_heap; u32 data_length; @@ -817,11 +817,11 @@ namespace rsx using storage_type = uploaded_range; private: - std::unordered_map> vertex_ranges; + std::unordered_map> vertex_ranges; public: - storage_type* find_vertex_range(uintptr_t local_addr, upload_format fmt, u32 data_length) override + storage_type* find_vertex_range(uptr local_addr, upload_format fmt, u32 data_length) override { const auto data_end = local_addr + data_length; @@ -835,7 +835,7 @@ namespace rsx return nullptr; } - void store_range(uintptr_t local_addr, upload_format fmt, u32 data_length, u32 offset_in_heap) override + void store_range(uptr local_addr, upload_format fmt, u32 data_length, u32 offset_in_heap) override { storage_type v = {}; v.buffer_format = fmt; diff --git a/rpcs3/util/atomic.cpp b/rpcs3/util/atomic.cpp index 6700e94527..38ce30040e 100644 --- a/rpcs3/util/atomic.cpp +++ b/rpcs3/util/atomic.cpp @@ -25,7 +25,7 @@ static constexpr std::size_t s_hashtable_size = 1u << 17; // Reference counter combined with shifted pointer (which is assumed to be 47 bit) -static constexpr std::uintptr_t s_ref_mask = (1u << 17) - 1; +static constexpr uptr s_ref_mask = (1u << 17) - 1; // Fix for silly on-first-use initializer static bool s_null_wait_cb(const void*, u64, u64){ return true; }; @@ -343,7 +343,7 @@ namespace un_t mtx; #endif - void init(std::uintptr_t iptr) + void init(uptr iptr) { #ifdef _WIN32 tid = GetCurrentThreadId(); @@ -561,7 +561,7 @@ static u32 #ifdef _WIN32 __vectorcall #endif -cond_alloc(std::uintptr_t iptr, __m128i mask, u32 tls_slot = -1) +cond_alloc(uptr iptr, __m128i mask, u32 tls_slot = -1) { // Try to get cond from tls slot instead u16* ptls = tls_slot >= std::size(s_tls_conds.cond) ? nullptr : s_tls_conds.cond + tls_slot; @@ -713,7 +713,7 @@ static cond_handle* #ifdef _WIN32 __vectorcall #endif -cond_id_lock(u32 cond_id, u32 size, __m128i mask, u64 thread_id = 0, std::uintptr_t iptr = 0) +cond_id_lock(u32 cond_id, u32 size, __m128i mask, u64 thread_id = 0, uptr iptr = 0) { if (cond_id - 1 < u32{UINT16_MAX}) { @@ -800,12 +800,12 @@ namespace // Allocation pool, pointers to allocated semaphores atomic_t slots[max_threads]; - static atomic_t* slot_alloc(std::uintptr_t ptr) noexcept; + static atomic_t* slot_alloc(uptr ptr) noexcept; - static void slot_free(std::uintptr_t ptr, atomic_t* slot, u32 tls_slot) noexcept; + static void slot_free(uptr ptr, atomic_t* slot, u32 tls_slot) noexcept; template - static auto slot_search(std::uintptr_t iptr, u32 size, u64 thread_id, __m128i mask, F func) noexcept; + static auto slot_search(uptr iptr, u32 size, u64 thread_id, __m128i mask, F func) noexcept; }; static_assert(sizeof(root_info) == 64); @@ -831,7 +831,7 @@ namespace u32 id; // Initialize: PRNG on iptr, split into two 16 bit chunks, choose first chunk - explicit hash_engine(std::uintptr_t iptr) + explicit hash_engine(uptr iptr) : init(rng(iptr)()) , r0(static_cast(init >> 48)) , r1(static_cast(init >> 32)) @@ -883,7 +883,7 @@ u64 atomic_wait::get_unique_tsc() }); } -atomic_t* root_info::slot_alloc(std::uintptr_t ptr) noexcept +atomic_t* root_info::slot_alloc(uptr ptr) noexcept { atomic_t* slot = nullptr; @@ -937,13 +937,13 @@ atomic_t* root_info::slot_alloc(std::uintptr_t ptr) noexcept return slot; } -void root_info::slot_free(std::uintptr_t iptr, atomic_t* slot, u32 tls_slot) noexcept +void root_info::slot_free(uptr iptr, atomic_t* slot, u32 tls_slot) noexcept { - const auto begin = reinterpret_cast(std::begin(s_hashtable)); + const auto begin = reinterpret_cast(std::begin(s_hashtable)); - const auto end = reinterpret_cast(std::end(s_hashtable)); + const auto end = reinterpret_cast(std::end(s_hashtable)); - const auto ptr = reinterpret_cast(slot) - begin; + const auto ptr = reinterpret_cast(slot) - begin; if (ptr >= sizeof(s_hashtable)) { @@ -991,7 +991,7 @@ void root_info::slot_free(std::uintptr_t iptr, atomic_t* slot, u32 tls_slot } template -FORCE_INLINE auto root_info::slot_search(std::uintptr_t iptr, u32 size, u64 thread_id, __m128i mask, F func) noexcept +FORCE_INLINE auto root_info::slot_search(uptr iptr, u32 size, u64 thread_id, __m128i mask, F func) noexcept { u32 index = 0; u32 total = 0; @@ -1054,11 +1054,11 @@ atomic_wait_engine::wait(const void* data, u32 size, __m128i old_value, u64 time return; } - const std::uintptr_t iptr = reinterpret_cast(data) & (~s_ref_mask >> 17); + const uptr iptr = reinterpret_cast(data) & (~s_ref_mask >> 17); uint ext_size = 0; - std::uintptr_t iptr_ext[atomic_wait::max_list - 1]{}; + uptr iptr_ext[atomic_wait::max_list - 1]{}; if (ext) [[unlikely]] { @@ -1077,7 +1077,7 @@ atomic_wait_engine::wait(const void* data, u32 size, __m128i old_value, u64 time } } - iptr_ext[ext_size] = reinterpret_cast(e->data) & (~s_ref_mask >> 17); + iptr_ext[ext_size] = reinterpret_cast(e->data) & (~s_ref_mask >> 17); ext_size++; } } @@ -1481,7 +1481,7 @@ bool atomic_wait_engine::raw_notify(const void* data, u64 thread_id) return false; } - const std::uintptr_t iptr = reinterpret_cast(data) & (~s_ref_mask >> 17); + const uptr iptr = reinterpret_cast(data) & (~s_ref_mask >> 17); if (s_tls_notify_cb) s_tls_notify_cb(data, 0); @@ -1520,7 +1520,7 @@ __vectorcall #endif atomic_wait_engine::notify_one(const void* data, u32 size, __m128i mask, __m128i new_value) { - const std::uintptr_t iptr = reinterpret_cast(data) & (~s_ref_mask >> 17); + const uptr iptr = reinterpret_cast(data) & (~s_ref_mask >> 17); if (s_tls_notify_cb) s_tls_notify_cb(data, 0); @@ -1549,7 +1549,7 @@ __vectorcall #endif atomic_wait_engine::notify_all(const void* data, u32 size, __m128i mask) { - const std::uintptr_t iptr = reinterpret_cast(data) & (~s_ref_mask >> 17); + const uptr iptr = reinterpret_cast(data) & (~s_ref_mask >> 17); if (s_tls_notify_cb) s_tls_notify_cb(data, 0); diff --git a/rpcs3/util/dyn_lib.hpp b/rpcs3/util/dyn_lib.hpp index c2c5a693c5..ccee7b8e4f 100644 --- a/rpcs3/util/dyn_lib.hpp +++ b/rpcs3/util/dyn_lib.hpp @@ -53,7 +53,7 @@ namespace utils template struct dynamic_import { - atomic_t ptr; + atomic_t ptr; const char* const lib; const char* const name; @@ -67,7 +67,7 @@ namespace utils void init() noexcept { - ptr.release(reinterpret_cast(get_proc_address(lib, name))); + ptr.release(reinterpret_cast(get_proc_address(lib, name))); } operator bool() noexcept diff --git a/rpcs3/util/types.hpp b/rpcs3/util/types.hpp index 9f5e558539..d0c784cd04 100644 --- a/rpcs3/util/types.hpp +++ b/rpcs3/util/types.hpp @@ -871,7 +871,7 @@ struct pointer_hash { std::size_t operator()(T* ptr) const { - return reinterpret_cast(ptr) / Align; + return reinterpret_cast(ptr) / Align; } };