1
0
mirror of https://github.com/RPCS3/rpcs3.git synced 2024-11-25 12:12:50 +01:00

vm: Fix vm::unmap

* Make vm::unmap atomic, squash the memory unmapping process inside this function while still using the same VM mutex ownership.
* Make vm::unmap not fail due to random vm::get calls, shared_ptr reference count is no longer a condition.
* Fix sys_mmapper_free_address spuriously failing with EBUSY due to random vm::get calls.
* Fix sys_vm_unmap race condition due to non-atomic vm::unmap.
* Add an optional verification block ptr arg to vm::unmap, used by patches.
This commit is contained in:
Eladash 2021-09-11 08:26:08 +03:00 committed by Ivan
parent fe6cb4774e
commit 50ad7ba1f6
6 changed files with 99 additions and 23 deletions

View File

@ -558,9 +558,7 @@ void unmap_vm_area(std::shared_ptr<vm::block_t>& ptr)
{
if (ptr && ptr->flags & (1ull << 62))
{
const u32 addr = ptr->addr;
ptr.reset();
vm::unmap(addr, true);
vm::unmap(0, true, &ptr);
}
}
@ -679,7 +677,7 @@ static usz apply_modification(std::basic_string<u32>& applied, const patch_engin
const u32 out_branch = vm::try_get_addr(dst + (offset & -4)).first;
// Allow only if points to a PPU executable instruction
if (out_branch < 0x10000 || out_branch >= 0x4000'0000 || !vm::check_addr<4>(out_branch, vm::alloc_executable))
if (out_branch < 0x10000 || out_branch >= 0x4000'0000 || !vm::check_addr<4>(out_branch, vm::page_executable))
{
continue;
}

View File

@ -2688,7 +2688,7 @@ extern void ppu_precompile(std::vector<std::string>& dir_queue, std::vector<lv2_
if (!had_ovl)
{
ensure(vm::unmap(0x3000'0000));
ensure(vm::unmap(0x3000'0000).second);
}
g_ps3_process_info.ppc_seg = ppc_seg;

View File

@ -462,24 +462,30 @@ error_code sys_mmapper_free_address(ppu_thread& ppu, u32 addr)
auto& pf_events = g_fxo->get<page_fault_event_entries>();
std::lock_guard pf_lock(pf_events.pf_mutex);
const auto mem = vm::get(vm::any, addr);
if (!mem || mem->addr != addr)
{
return {CELL_EINVAL, addr};
}
for (const auto& ev : pf_events.events)
{
auto mem = vm::get(vm::any, addr);
if (mem && addr <= ev.second && ev.second <= addr + mem->size - 1)
if (addr <= ev.second && ev.second <= addr + mem->size - 1)
{
return CELL_EBUSY;
}
}
// Try to unmap area
const auto area = vm::unmap(addr, true);
const auto [area, success] = vm::unmap(addr, true, &mem);
if (!area)
{
return {CELL_EINVAL, addr};
}
if (area.use_count() != 1)
if (!success)
{
return CELL_EBUSY;
}
@ -586,6 +592,12 @@ error_code sys_mmapper_map_shared_memory(ppu_thread& ppu, u32 addr, u32 mem_id,
if (!area->falloc(addr, mem->size, &mem->shm, mem->align == 0x10000 ? SYS_MEMORY_PAGE_SIZE_64K : SYS_MEMORY_PAGE_SIZE_1M))
{
mem->counter--;
if (!area->is_valid())
{
return {CELL_EINVAL, addr};
}
return CELL_EBUSY;
}
@ -634,6 +646,12 @@ error_code sys_mmapper_search_and_map(ppu_thread& ppu, u32 start_addr, u32 mem_i
if (!addr)
{
mem->counter--;
if (!area->is_valid())
{
return {CELL_EINVAL, start_addr};
}
return CELL_ENOMEM;
}

View File

@ -116,7 +116,7 @@ error_code sys_vm_unmap(ppu_thread& ppu, u32 addr)
const auto vmo = idm::withdraw<sys_vm_t>(sys_vm_t::find_id(addr), [&](sys_vm_t& vmo)
{
// Free block
ensure(vm::unmap(addr));
ensure(vm::unmap(addr).second);
// Return memory
vmo.ct->used -= vmo.psize;

View File

@ -1170,7 +1170,8 @@ namespace vm
}
block_t::block_t(u32 addr, u32 size, u64 flags)
: addr(addr)
: m_id([](){ static atomic_t<u64> s_id = 1; return s_id++; }())
, addr(addr)
, size(size)
, flags(process_block_flags(flags))
{
@ -1183,12 +1184,12 @@ namespace vm
}
}
block_t::~block_t()
bool block_t::unmap()
{
auto& m_map = (m.*block_map)();
{
vm::writer_lock lock(0);
if (m_id.exchange(0))
{
// Deallocate all memory
for (auto it = m_map.begin(), end = m_map.end(); it != end;)
{
@ -1205,7 +1206,16 @@ namespace vm
m_common->unmap_critical(vm::get_super_ptr(addr));
#endif
}
return true;
}
return false;
}
block_t::~block_t()
{
ensure(!is_valid());
}
u32 block_t::alloc(const u32 orig_size, const std::shared_ptr<utils::shm>* src, u32 align, u64 flags)
@ -1257,6 +1267,12 @@ namespace vm
vm::writer_lock lock(0);
if (!is_valid())
{
// Expired block
return 0;
}
// Search for an appropriate place (unoptimized)
for (;; addr += align)
{
@ -1321,6 +1337,12 @@ namespace vm
vm::writer_lock lock(0);
if (!is_valid())
{
// Expired block
return 0;
}
if (!try_alloc(addr, flags, size, std::move(shm)))
{
return 0;
@ -1569,8 +1591,15 @@ namespace vm
return block;
}
std::shared_ptr<block_t> unmap(u32 addr, bool must_be_empty)
std::pair<std::shared_ptr<block_t>, bool> unmap(u32 addr, bool must_be_empty, const std::shared_ptr<block_t>* ptr)
{
if (ptr)
{
addr = (*ptr)->addr;
}
std::pair<std::shared_ptr<block_t>, bool> result{};
vm::writer_lock lock(0);
for (auto it = g_locations.begin() + memory_location_max; it != g_locations.end(); it++)
@ -1587,18 +1616,26 @@ namespace vm
continue;
}
if (must_be_empty && (it->use_count() != 1 || (*it)->imp_used(lock)))
if (ptr && *it != *ptr)
{
return *it;
return {};
}
auto block = std::move(*it);
if (must_be_empty && (*it)->imp_used(lock))
{
result.first = *it;
return result;
}
result.first = std::move(*it);
g_locations.erase(it);
return block;
ensure(result.first->unmap());
result.second = true;
return result;
}
}
return nullptr;
return {};
}
std::shared_ptr<block_t> get(memory_location_t location, u32 addr)
@ -1730,7 +1767,16 @@ namespace vm
void close()
{
{
vm::writer_lock lock(0);
for (auto& block : g_locations)
{
if (block) block->unmap();
}
g_locations.clear();
}
utils::memory_decommit(g_base_addr, 0x200000000);
utils::memory_decommit(g_exec_addr, 0x200000000);

View File

@ -123,8 +123,13 @@ namespace vm
// Common mapped region for special cases
std::shared_ptr<utils::shm> m_common;
atomic_t<u64> m_id = 0;
bool try_alloc(u32 addr, u64 bflags, u32 size, std::shared_ptr<utils::shm>&&) const;
// Unmap block
bool unmap();
public:
block_t(u32 addr, u32 size, u64 flags);
@ -155,6 +160,15 @@ namespace vm
// Internal
u32 imp_used(const vm::writer_lock&) const;
// Returns 0 if invalid, none-zero unique id if valid
u64 is_valid() const
{
return m_id;
}
friend std::pair<std::shared_ptr<block_t>, bool> unmap(u32, bool, const std::shared_ptr<block_t>*);
friend void close();
};
// Create new memory block with specified parameters and return it
@ -163,8 +177,8 @@ namespace vm
// Create new memory block with at arbitrary position with specified alignment
std::shared_ptr<block_t> find_map(u32 size, u32 align, u64 flags = 0);
// Delete existing memory block with specified start address, return it
std::shared_ptr<block_t> unmap(u32 addr, bool must_be_empty = false);
// Delete existing memory block with specified start address, .first=its ptr, .second=success
std::pair<std::shared_ptr<block_t>, bool> unmap(u32 addr, bool must_be_empty = false, const std::shared_ptr<block_t>* ptr = nullptr);
// Get memory block associated with optionally specified memory location or optionally specified address
std::shared_ptr<block_t> get(memory_location_t location, u32 addr = 0);