mirror of
https://github.com/RPCS3/rpcs3.git
synced 2024-11-22 10:42:36 +01:00
vm: remove g_mutex, use g_range_lock instead
Simplification and performance improvements.
This commit is contained in:
parent
799c4837d3
commit
5c1f79ab26
@ -1430,7 +1430,7 @@ bool handle_access_violation(u32 addr, bool is_writing, ucontext_t* context) noe
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vm::reader_lock rlock; vm::check_addr(addr, 0))
|
if (vm::writer_lock mlock; vm::check_addr(addr, 0))
|
||||||
{
|
{
|
||||||
// For allocated memory with protection lower than required (such as protection::no or read-only while writing to it)
|
// For allocated memory with protection lower than required (such as protection::no or read-only while writing to it)
|
||||||
utils::memory_protect(vm::base(addr & -0x1000), 0x1000, utils::protection::rw);
|
utils::memory_protect(vm::base(addr & -0x1000), 0x1000, utils::protection::rw);
|
||||||
@ -1485,7 +1485,7 @@ bool handle_access_violation(u32 addr, bool is_writing, ucontext_t* context) noe
|
|||||||
|
|
||||||
u64 data3;
|
u64 data3;
|
||||||
{
|
{
|
||||||
vm::reader_lock rlock;
|
vm::writer_lock rlock;
|
||||||
if (vm::check_addr(addr, is_writing ? vm::page_writable : vm::page_readable))
|
if (vm::check_addr(addr, is_writing ? vm::page_writable : vm::page_readable))
|
||||||
{
|
{
|
||||||
// Memory was allocated inbetween, retry
|
// Memory was allocated inbetween, retry
|
||||||
|
@ -772,7 +772,7 @@ extern bool ppu_patch(u32 addr, u32 value)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
vm::reader_lock rlock;
|
vm::writer_lock rlock;
|
||||||
|
|
||||||
if (!vm::check_addr(addr))
|
if (!vm::check_addr(addr))
|
||||||
{
|
{
|
||||||
|
@ -182,7 +182,7 @@ error_code sys_memory_get_page_attribute(cpu_thread& cpu, u32 addr, vm::ptr<sys_
|
|||||||
|
|
||||||
sys_memory.trace("sys_memory_get_page_attribute(addr=0x%x, attr=*0x%x)", addr, attr);
|
sys_memory.trace("sys_memory_get_page_attribute(addr=0x%x, attr=*0x%x)", addr, attr);
|
||||||
|
|
||||||
vm::reader_lock rlock;
|
vm::writer_lock rlock;
|
||||||
|
|
||||||
if (!vm::check_addr(addr) || addr >= SPU_FAKE_BASE_ADDR)
|
if (!vm::check_addr(addr) || addr >= SPU_FAKE_BASE_ADDR)
|
||||||
{
|
{
|
||||||
|
@ -62,7 +62,13 @@ void _sys_ppu_thread_exit(ppu_thread& ppu, u64 errorcode)
|
|||||||
ppu.state += cpu_flag::wait;
|
ppu.state += cpu_flag::wait;
|
||||||
|
|
||||||
// Need to wait until the current writer finish
|
// Need to wait until the current writer finish
|
||||||
if (ppu.state & cpu_flag::memory) vm::g_mutex.lock_unlock();
|
if (ppu.state & cpu_flag::memory)
|
||||||
|
{
|
||||||
|
while (vm::g_range_lock)
|
||||||
|
{
|
||||||
|
busy_wait(200);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
sys_ppu_thread.trace("_sys_ppu_thread_exit(errorcode=0x%llx)", errorcode);
|
sys_ppu_thread.trace("_sys_ppu_thread_exit(errorcode=0x%llx)", errorcode);
|
||||||
|
|
||||||
|
@ -318,7 +318,7 @@ error_code sys_rsx_context_iomap(cpu_thread& cpu, u32 context_id, u32 io, u32 ea
|
|||||||
// Wait until we have no active RSX locks and reserve iomap for use. Must do so before acquiring vm lock to avoid deadlocks
|
// Wait until we have no active RSX locks and reserve iomap for use. Must do so before acquiring vm lock to avoid deadlocks
|
||||||
rsx::reservation_lock<true> rsx_lock(ea, size);
|
rsx::reservation_lock<true> rsx_lock(ea, size);
|
||||||
|
|
||||||
vm::reader_lock rlock;
|
vm::writer_lock rlock;
|
||||||
|
|
||||||
for (u32 addr = ea, end = ea + size; addr < end; addr += 0x100000)
|
for (u32 addr = ea, end = ea + size; addr < end; addr += 0x100000)
|
||||||
{
|
{
|
||||||
@ -379,7 +379,7 @@ error_code sys_rsx_context_iounmap(cpu_thread& cpu, u32 context_id, u32 io, u32
|
|||||||
sys_rsx.warning("sys_rsx_context_iounmap(): RSX is not idle while unmapping io");
|
sys_rsx.warning("sys_rsx_context_iounmap(): RSX is not idle while unmapping io");
|
||||||
}
|
}
|
||||||
|
|
||||||
vm::reader_lock rlock;
|
vm::writer_lock rlock;
|
||||||
|
|
||||||
std::scoped_lock lock(render->sys_rsx_mtx);
|
std::scoped_lock lock(render->sys_rsx_mtx);
|
||||||
|
|
||||||
@ -627,7 +627,7 @@ error_code sys_rsx_context_attribute(u32 context_id, u32 package_id, u64 a3, u64
|
|||||||
|
|
||||||
if (location == CELL_GCM_LOCATION_MAIN && bound)
|
if (location == CELL_GCM_LOCATION_MAIN && bound)
|
||||||
{
|
{
|
||||||
vm::reader_lock rlock;
|
vm::writer_lock rlock;
|
||||||
|
|
||||||
for (u32 io = (offset >> 20), end = (range.end >> 20); io <= end; io++)
|
for (u32 io = (offset >> 20), end = (range.end >> 20); io <= end; io++)
|
||||||
{
|
{
|
||||||
|
@ -13,7 +13,6 @@
|
|||||||
#include "Emu/Cell/SPURecompiler.h"
|
#include "Emu/Cell/SPURecompiler.h"
|
||||||
#include "Emu/perf_meter.hpp"
|
#include "Emu/perf_meter.hpp"
|
||||||
#include <deque>
|
#include <deque>
|
||||||
#include <shared_mutex>
|
|
||||||
|
|
||||||
#include "util/vm.hpp"
|
#include "util/vm.hpp"
|
||||||
#include "util/asm.hpp"
|
#include "util/asm.hpp"
|
||||||
@ -64,9 +63,6 @@ namespace vm
|
|||||||
// Memory locations
|
// Memory locations
|
||||||
alignas(64) std::vector<std::shared_ptr<block_t>> g_locations;
|
alignas(64) std::vector<std::shared_ptr<block_t>> g_locations;
|
||||||
|
|
||||||
// Memory mutex core
|
|
||||||
shared_mutex g_mutex;
|
|
||||||
|
|
||||||
// Memory mutex acknowledgement
|
// Memory mutex acknowledgement
|
||||||
thread_local atomic_t<cpu_thread*>* g_tls_locked = nullptr;
|
thread_local atomic_t<cpu_thread*>* g_tls_locked = nullptr;
|
||||||
|
|
||||||
@ -199,23 +195,6 @@ namespace vm
|
|||||||
{
|
{
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Wait a bit before accessing g_mutex
|
|
||||||
range_lock->store(0);
|
|
||||||
busy_wait(200);
|
|
||||||
|
|
||||||
std::shared_lock lock(g_mutex, std::try_to_lock);
|
|
||||||
|
|
||||||
if (!lock && i < 15)
|
|
||||||
{
|
|
||||||
busy_wait(200);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
else if (!lock)
|
|
||||||
{
|
|
||||||
lock.lock();
|
|
||||||
}
|
|
||||||
|
|
||||||
u32 test = 0;
|
u32 test = 0;
|
||||||
|
|
||||||
@ -230,16 +209,18 @@ namespace vm
|
|||||||
|
|
||||||
if (test)
|
if (test)
|
||||||
{
|
{
|
||||||
lock.unlock();
|
range_lock->release(0);
|
||||||
|
|
||||||
// Try tiggering a page fault (write)
|
// Try triggering a page fault (write)
|
||||||
// TODO: Read memory if needed
|
// TODO: Read memory if needed
|
||||||
vm::_ref<atomic_t<u8>>(test) += 0;
|
vm::_ref<atomic_t<u8>>(test) += 0;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
range_lock->release(begin | (u64{size} << 32));
|
// Wait a bit before accessing global lock
|
||||||
break;
|
range_lock->store(0);
|
||||||
|
busy_wait(200);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (_cpu)
|
if (_cpu)
|
||||||
@ -295,7 +276,6 @@ namespace vm
|
|||||||
if (size == 0)
|
if (size == 0)
|
||||||
{
|
{
|
||||||
vm_log.warning("Tried to lock empty range (flags=0x%x, addr=0x%x)", flags >> 32, addr);
|
vm_log.warning("Tried to lock empty range (flags=0x%x, addr=0x%x)", flags >> 32, addr);
|
||||||
g_range_lock.release(0);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -350,7 +330,7 @@ namespace vm
|
|||||||
cpu.state -= cpu_flag::memory;
|
cpu.state -= cpu_flag::memory;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (g_mutex.is_lockable())
|
if (!g_range_lock)
|
||||||
{
|
{
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -360,12 +340,21 @@ namespace vm
|
|||||||
|
|
||||||
if (!ok || cpu.state & cpu_flag::memory)
|
if (!ok || cpu.state & cpu_flag::memory)
|
||||||
{
|
{
|
||||||
while (true)
|
for (u64 i = 0;; i++)
|
||||||
{
|
{
|
||||||
g_mutex.lock_unlock();
|
if (i < 100)
|
||||||
|
busy_wait(200);
|
||||||
|
else
|
||||||
|
std::this_thread::yield();
|
||||||
|
|
||||||
|
if (g_range_lock)
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
cpu.state -= cpu_flag::memory;
|
cpu.state -= cpu_flag::memory;
|
||||||
|
|
||||||
if (g_mutex.is_lockable()) [[likely]]
|
if (!g_range_lock) [[likely]]
|
||||||
{
|
{
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -405,7 +394,12 @@ namespace vm
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
reader_lock::reader_lock()
|
writer_lock::writer_lock()
|
||||||
|
: writer_lock(0, 1)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
writer_lock::writer_lock(u32 const addr, u32 const size, u64 const flags)
|
||||||
{
|
{
|
||||||
auto cpu = get_current_cpu_thread();
|
auto cpu = get_current_cpu_thread();
|
||||||
|
|
||||||
@ -421,55 +415,21 @@ namespace vm
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
g_mutex.lock_shared();
|
for (u64 i = 0;; i++)
|
||||||
|
|
||||||
if (cpu)
|
|
||||||
{
|
{
|
||||||
cpu->state -= cpu_flag::memory + cpu_flag::wait;
|
if (g_range_lock || !g_range_lock.compare_and_swap_test(0, addr | u64{size} << 32 | flags))
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
reader_lock::~reader_lock()
|
|
||||||
{
|
{
|
||||||
if (m_upgraded)
|
if (i < 100)
|
||||||
{
|
busy_wait(200);
|
||||||
g_mutex.unlock();
|
else
|
||||||
|
std::this_thread::yield();
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
g_mutex.unlock_shared();
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void reader_lock::upgrade()
|
|
||||||
{
|
|
||||||
if (m_upgraded)
|
|
||||||
{
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
g_mutex.lock_upgrade();
|
|
||||||
m_upgraded = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
writer_lock::writer_lock(u32 addr /*mutable*/)
|
|
||||||
{
|
|
||||||
auto cpu = get_current_cpu_thread();
|
|
||||||
|
|
||||||
if (cpu)
|
|
||||||
{
|
|
||||||
if (!g_tls_locked || *g_tls_locked != cpu || cpu->state & cpu_flag::wait)
|
|
||||||
{
|
|
||||||
cpu = nullptr;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
cpu->state += cpu_flag::wait;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
g_mutex.lock();
|
|
||||||
|
|
||||||
if (addr >= 0x10000)
|
if (addr >= 0x10000)
|
||||||
{
|
{
|
||||||
perf_meter<"SUSPEND"_u64> perf0;
|
perf_meter<"SUSPEND"_u64> perf0;
|
||||||
@ -490,8 +450,6 @@ namespace vm
|
|||||||
addr1 = static_cast<u16>(addr) | is_shared;
|
addr1 = static_cast<u16>(addr) | is_shared;
|
||||||
}
|
}
|
||||||
|
|
||||||
g_range_lock = addr | range_locked;
|
|
||||||
|
|
||||||
utils::prefetch_read(g_range_lock_set + 0);
|
utils::prefetch_read(g_range_lock_set + 0);
|
||||||
utils::prefetch_read(g_range_lock_set + 2);
|
utils::prefetch_read(g_range_lock_set + 2);
|
||||||
utils::prefetch_read(g_range_lock_set + 4);
|
utils::prefetch_read(g_range_lock_set + 4);
|
||||||
@ -546,8 +504,7 @@ namespace vm
|
|||||||
|
|
||||||
writer_lock::~writer_lock()
|
writer_lock::~writer_lock()
|
||||||
{
|
{
|
||||||
g_range_lock.release(0);
|
g_range_lock = 0;
|
||||||
g_mutex.unlock();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 reservation_lock_internal(u32 addr, atomic_t<u64>& res)
|
u64 reservation_lock_internal(u32 addr, atomic_t<u64>& res)
|
||||||
@ -764,16 +721,13 @@ namespace vm
|
|||||||
fmt::throw_exception("Concurrent access (addr=0x%x, size=0x%x, flags=0x%x, current_addr=0x%x)", addr, size, flags, i * 4096);
|
fmt::throw_exception("Concurrent access (addr=0x%x, size=0x%x, flags=0x%x, current_addr=0x%x)", addr, size, flags, i * 4096);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unlock
|
|
||||||
g_range_lock.release(0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool page_protect(u32 addr, u32 size, u8 flags_test, u8 flags_set, u8 flags_clear)
|
bool page_protect(u32 addr, u32 size, u8 flags_test, u8 flags_set, u8 flags_clear)
|
||||||
{
|
{
|
||||||
perf_meter<"PAGE_PRO"_u64> perf0;
|
perf_meter<"PAGE_PRO"_u64> perf0;
|
||||||
|
|
||||||
vm::writer_lock lock(0);
|
vm::writer_lock lock;
|
||||||
|
|
||||||
if (!size || (size | addr) % 4096)
|
if (!size || (size | addr) % 4096)
|
||||||
{
|
{
|
||||||
@ -837,18 +791,12 @@ namespace vm
|
|||||||
utils::memory_protect(g_base_addr + start * 4096, page_size, protection);
|
utils::memory_protect(g_base_addr + start * 4096, page_size, protection);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
|
||||||
g_range_lock.release(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
start_value = new_val;
|
start_value = new_val;
|
||||||
start = i;
|
start = i;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
g_range_lock.release(0);
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -943,9 +891,6 @@ namespace vm
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unlock
|
|
||||||
g_range_lock.release(0);
|
|
||||||
|
|
||||||
return size;
|
return size;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1268,7 +1213,7 @@ namespace vm
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
vm::writer_lock lock(0);
|
vm::writer_lock lock;
|
||||||
|
|
||||||
if (!is_valid())
|
if (!is_valid())
|
||||||
{
|
{
|
||||||
@ -1338,7 +1283,7 @@ namespace vm
|
|||||||
shm = std::make_shared<utils::shm>(size);
|
shm = std::make_shared<utils::shm>(size);
|
||||||
}
|
}
|
||||||
|
|
||||||
vm::writer_lock lock(0);
|
vm::writer_lock lock;
|
||||||
|
|
||||||
if (!is_valid())
|
if (!is_valid())
|
||||||
{
|
{
|
||||||
@ -1358,7 +1303,7 @@ namespace vm
|
|||||||
{
|
{
|
||||||
auto& m_map = (m.*block_map)();
|
auto& m_map = (m.*block_map)();
|
||||||
{
|
{
|
||||||
vm::writer_lock lock(0);
|
vm::writer_lock lock;
|
||||||
|
|
||||||
const auto found = m_map.find(addr - (flags & stack_guarded ? 0x1000 : 0));
|
const auto found = m_map.find(addr - (flags & stack_guarded ? 0x1000 : 0));
|
||||||
|
|
||||||
@ -1408,7 +1353,7 @@ namespace vm
|
|||||||
|
|
||||||
auto& m_map = (m.*block_map)();
|
auto& m_map = (m.*block_map)();
|
||||||
|
|
||||||
vm::reader_lock lock;
|
vm::writer_lock lock;
|
||||||
|
|
||||||
const auto upper = m_map.upper_bound(addr);
|
const auto upper = m_map.upper_bound(addr);
|
||||||
|
|
||||||
@ -1454,7 +1399,7 @@ namespace vm
|
|||||||
|
|
||||||
u32 block_t::used()
|
u32 block_t::used()
|
||||||
{
|
{
|
||||||
vm::writer_lock lock(0);
|
vm::writer_lock lock;
|
||||||
|
|
||||||
return imp_used(lock);
|
return imp_used(lock);
|
||||||
}
|
}
|
||||||
@ -1563,14 +1508,14 @@ namespace vm
|
|||||||
|
|
||||||
std::shared_ptr<block_t> map(u32 addr, u32 size, u64 flags)
|
std::shared_ptr<block_t> map(u32 addr, u32 size, u64 flags)
|
||||||
{
|
{
|
||||||
vm::writer_lock lock(0);
|
vm::writer_lock lock;
|
||||||
|
|
||||||
return _map(addr, size, flags);
|
return _map(addr, size, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::shared_ptr<block_t> find_map(u32 orig_size, u32 align, u64 flags)
|
std::shared_ptr<block_t> find_map(u32 orig_size, u32 align, u64 flags)
|
||||||
{
|
{
|
||||||
vm::writer_lock lock(0);
|
vm::writer_lock lock;
|
||||||
|
|
||||||
// Align to minimal page size
|
// Align to minimal page size
|
||||||
const u32 size = utils::align(orig_size, 0x10000);
|
const u32 size = utils::align(orig_size, 0x10000);
|
||||||
@ -1603,7 +1548,7 @@ namespace vm
|
|||||||
|
|
||||||
std::pair<std::shared_ptr<block_t>, bool> result{};
|
std::pair<std::shared_ptr<block_t>, bool> result{};
|
||||||
|
|
||||||
vm::writer_lock lock(0);
|
vm::writer_lock lock;
|
||||||
|
|
||||||
for (auto it = g_locations.begin() + memory_location_max; it != g_locations.end(); it++)
|
for (auto it = g_locations.begin() + memory_location_max; it != g_locations.end(); it++)
|
||||||
{
|
{
|
||||||
@ -1643,14 +1588,14 @@ namespace vm
|
|||||||
|
|
||||||
std::shared_ptr<block_t> get(memory_location_t location, u32 addr)
|
std::shared_ptr<block_t> get(memory_location_t location, u32 addr)
|
||||||
{
|
{
|
||||||
vm::reader_lock lock;
|
vm::writer_lock lock;
|
||||||
|
|
||||||
return _get_map(location, addr);
|
return _get_map(location, addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::shared_ptr<block_t> reserve_map(memory_location_t location, u32 addr, u32 area_size, u64 flags)
|
std::shared_ptr<block_t> reserve_map(memory_location_t location, u32 addr, u32 area_size, u64 flags)
|
||||||
{
|
{
|
||||||
vm::reader_lock lock;
|
vm::writer_lock lock;
|
||||||
|
|
||||||
auto area = _get_map(location, addr);
|
auto area = _get_map(location, addr);
|
||||||
|
|
||||||
@ -1659,8 +1604,6 @@ namespace vm
|
|||||||
return area;
|
return area;
|
||||||
}
|
}
|
||||||
|
|
||||||
lock.upgrade();
|
|
||||||
|
|
||||||
// Allocation on arbitrary address
|
// Allocation on arbitrary address
|
||||||
if (location != any && location < g_locations.size())
|
if (location != any && location < g_locations.size())
|
||||||
{
|
{
|
||||||
@ -1689,7 +1632,7 @@ namespace vm
|
|||||||
|
|
||||||
bool try_access(u32 addr, void* ptr, u32 size, bool is_write)
|
bool try_access(u32 addr, void* ptr, u32 size, bool is_write)
|
||||||
{
|
{
|
||||||
vm::reader_lock lock;
|
vm::writer_lock lock;
|
||||||
|
|
||||||
if (vm::check_addr(addr, is_write ? page_writable : page_readable, size))
|
if (vm::check_addr(addr, is_write ? page_writable : page_readable, size))
|
||||||
{
|
{
|
||||||
@ -1771,7 +1714,7 @@ namespace vm
|
|||||||
void close()
|
void close()
|
||||||
{
|
{
|
||||||
{
|
{
|
||||||
vm::writer_lock lock(0);
|
vm::writer_lock lock;
|
||||||
|
|
||||||
for (auto& block : g_locations)
|
for (auto& block : g_locations)
|
||||||
{
|
{
|
||||||
|
@ -7,8 +7,6 @@ class shared_mutex;
|
|||||||
|
|
||||||
namespace vm
|
namespace vm
|
||||||
{
|
{
|
||||||
extern shared_mutex g_mutex;
|
|
||||||
|
|
||||||
extern thread_local atomic_t<cpu_thread*>* g_tls_locked;
|
extern thread_local atomic_t<cpu_thread*>* g_tls_locked;
|
||||||
|
|
||||||
enum range_lock_flags : u64
|
enum range_lock_flags : u64
|
||||||
@ -110,24 +108,12 @@ namespace vm
|
|||||||
void temporary_unlock(cpu_thread& cpu) noexcept;
|
void temporary_unlock(cpu_thread& cpu) noexcept;
|
||||||
void temporary_unlock() noexcept;
|
void temporary_unlock() noexcept;
|
||||||
|
|
||||||
class reader_lock final
|
|
||||||
{
|
|
||||||
bool m_upgraded = false;
|
|
||||||
|
|
||||||
public:
|
|
||||||
reader_lock(const reader_lock&) = delete;
|
|
||||||
reader_lock& operator=(const reader_lock&) = delete;
|
|
||||||
reader_lock();
|
|
||||||
~reader_lock();
|
|
||||||
|
|
||||||
void upgrade();
|
|
||||||
};
|
|
||||||
|
|
||||||
struct writer_lock final
|
struct writer_lock final
|
||||||
{
|
{
|
||||||
writer_lock(const writer_lock&) = delete;
|
writer_lock(const writer_lock&) = delete;
|
||||||
writer_lock& operator=(const writer_lock&) = delete;
|
writer_lock& operator=(const writer_lock&) = delete;
|
||||||
writer_lock(u32 addr = 0);
|
writer_lock();
|
||||||
|
writer_lock(u32 addr, u32 size = 0, u64 flags = range_locked);
|
||||||
~writer_lock();
|
~writer_lock();
|
||||||
};
|
};
|
||||||
} // namespace vm
|
} // namespace vm
|
||||||
|
@ -314,7 +314,7 @@ u64 memory_string_searcher::OnSearch(std::string wstr, int mode)
|
|||||||
|
|
||||||
static constexpr u32 block_size = 0x2000000;
|
static constexpr u32 block_size = 0x2000000;
|
||||||
|
|
||||||
vm::reader_lock rlock;
|
vm::writer_lock rlock;
|
||||||
|
|
||||||
const named_thread_group workers("Memory Searcher "sv, max_threads, [&]()
|
const named_thread_group workers("Memory Searcher "sv, max_threads, [&]()
|
||||||
{
|
{
|
||||||
|
@ -15,7 +15,6 @@
|
|||||||
#include <QTextEdit>
|
#include <QTextEdit>
|
||||||
#include <QComboBox>
|
#include <QComboBox>
|
||||||
#include <QWheelEvent>
|
#include <QWheelEvent>
|
||||||
#include <shared_mutex>
|
|
||||||
|
|
||||||
#include "util/asm.hpp"
|
#include "util/asm.hpp"
|
||||||
#include "util/vm.hpp"
|
#include "util/vm.hpp"
|
||||||
@ -591,8 +590,6 @@ void memory_viewer_panel::ShowImage(QWidget* parent, u32 addr, color_format form
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::shared_lock rlock(vm::g_mutex);
|
|
||||||
|
|
||||||
const auto originalBuffer = static_cast<u8*>(this->to_ptr(addr, memsize));
|
const auto originalBuffer = static_cast<u8*>(this->to_ptr(addr, memsize));
|
||||||
const auto convertedBuffer = new (std::nothrow) u8[memsize];
|
const auto convertedBuffer = new (std::nothrow) u8[memsize];
|
||||||
|
|
||||||
@ -672,8 +669,6 @@ void memory_viewer_panel::ShowImage(QWidget* parent, u32 addr, color_format form
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
rlock.unlock();
|
|
||||||
|
|
||||||
// Flip vertically
|
// Flip vertically
|
||||||
if (flipv && height > 1 && memsize > 1)
|
if (flipv && height > 1 && memsize > 1)
|
||||||
{
|
{
|
||||||
|
Loading…
Reference in New Issue
Block a user