mirror of
https://github.com/RPCS3/rpcs3.git
synced 2024-11-22 02:32:36 +01:00
rsx/vm: Exclude events from VM mutex
Fixes a deadlock from a recent pull request, perhaps also some deadlocks with locking both IDM and VM mutex.
This commit is contained in:
parent
4206b022b6
commit
488814bb2d
@ -1009,7 +1009,7 @@ namespace vm
|
||||
// the RSX might try to call VirtualProtect on memory that is already unmapped
|
||||
if (auto rsxthr = g_fxo->try_get<rsx::thread>())
|
||||
{
|
||||
rsxthr->on_notify_memory_unmapped(addr, size);
|
||||
rsxthr->on_notify_pre_memory_unmapped(addr, size);
|
||||
}
|
||||
|
||||
// Deregister PPU related data
|
||||
@ -1309,7 +1309,7 @@ namespace vm
|
||||
}
|
||||
}
|
||||
|
||||
bool block_t::unmap()
|
||||
bool block_t::unmap(std::vector<std::pair<u32, u32>>* unmapped)
|
||||
{
|
||||
auto& m_map = (m.*block_map)();
|
||||
|
||||
@ -1320,7 +1320,13 @@ namespace vm
|
||||
{
|
||||
const auto next = std::next(it);
|
||||
const auto size = it->second.first;
|
||||
_page_unmap(it->first, size, this->flags, it->second.second.get());
|
||||
auto unmap = std::make_pair(it->first, _page_unmap(it->first, size, this->flags, it->second.second.get()));
|
||||
|
||||
if (unmapped)
|
||||
{
|
||||
unmapped->emplace_back(unmap);
|
||||
}
|
||||
|
||||
it = next;
|
||||
}
|
||||
|
||||
@ -1480,6 +1486,20 @@ namespace vm
|
||||
{
|
||||
auto& m_map = (m.*block_map)();
|
||||
{
|
||||
struct notify_t
|
||||
{
|
||||
u32 addr{};
|
||||
u32 size{};
|
||||
|
||||
~notify_t() noexcept
|
||||
{
|
||||
if (auto rsxthr = g_fxo->try_get<rsx::thread>(); rsxthr && size)
|
||||
{
|
||||
rsxthr->on_notify_post_memory_unmapped(addr, size);
|
||||
}
|
||||
}
|
||||
} unmap_notification;
|
||||
|
||||
vm::writer_lock lock;
|
||||
|
||||
const auto found = m_map.find(addr - (flags & stack_guarded ? 0x1000 : 0));
|
||||
@ -1517,6 +1537,8 @@ namespace vm
|
||||
// Remove entry
|
||||
m_map.erase(found);
|
||||
|
||||
unmap_notification.size = size;
|
||||
unmap_notification.addr = addr;
|
||||
return size;
|
||||
}
|
||||
}
|
||||
@ -1815,9 +1837,9 @@ namespace vm
|
||||
}
|
||||
}
|
||||
|
||||
bool _unmap_block(const std::shared_ptr<block_t>& block)
|
||||
bool _unmap_block(const std::shared_ptr<block_t>& block, std::vector<std::pair<u32, u32>>* unmapped = nullptr)
|
||||
{
|
||||
return block->unmap();
|
||||
return block->unmap(unmapped);
|
||||
}
|
||||
|
||||
static bool _test_map(u32 addr, u32 size)
|
||||
@ -1964,6 +1986,22 @@ namespace vm
|
||||
|
||||
std::pair<std::shared_ptr<block_t>, bool> result{};
|
||||
|
||||
struct notify_t
|
||||
{
|
||||
std::vector<std::pair<u32, u32>> addr_size_pairs;
|
||||
|
||||
~notify_t() noexcept
|
||||
{
|
||||
for (const auto [addr, size] : addr_size_pairs)
|
||||
{
|
||||
if (auto rsxthr = g_fxo->try_get<rsx::thread>())
|
||||
{
|
||||
rsxthr->on_notify_post_memory_unmapped(addr, size);
|
||||
}
|
||||
}
|
||||
}
|
||||
} unmap_notifications;
|
||||
|
||||
vm::writer_lock lock;
|
||||
|
||||
for (auto it = g_locations.begin() + memory_location_max; it != g_locations.end(); it++)
|
||||
@ -1993,7 +2031,7 @@ namespace vm
|
||||
|
||||
result.first = std::move(*it);
|
||||
g_locations.erase(it);
|
||||
ensure(_unmap_block(result.first));
|
||||
ensure(_unmap_block(result.first, &unmap_notifications.addr_size_pairs));
|
||||
result.second = true;
|
||||
return result;
|
||||
}
|
||||
|
@ -133,8 +133,8 @@ namespace vm
|
||||
bool try_alloc(u32 addr, u64 bflags, u32 size, std::shared_ptr<utils::shm>&&) const;
|
||||
|
||||
// Unmap block
|
||||
bool unmap();
|
||||
friend bool _unmap_block(const std::shared_ptr<block_t>&);
|
||||
bool unmap(std::vector<std::pair<u32, u32>>* unmapped = nullptr);
|
||||
friend bool _unmap_block(const std::shared_ptr<block_t>&, std::vector<std::pair<u32, u32>>* unmapped);
|
||||
|
||||
public:
|
||||
block_t(u32 addr, u32 size, u64 flags);
|
||||
|
@ -1214,7 +1214,7 @@ void GLGSRender::notify_tile_unbound(u32 tile)
|
||||
if (false)
|
||||
{
|
||||
u32 addr = rsx::get_address(tiles[tile].offset, tiles[tile].location);
|
||||
on_notify_memory_unmapped(addr, tiles[tile].size);
|
||||
on_notify_pre_memory_unmapped(addr, tiles[tile].size);
|
||||
m_rtts.invalidate_surface_address(addr, false);
|
||||
}
|
||||
|
||||
|
@ -3508,7 +3508,40 @@ namespace rsx
|
||||
}
|
||||
}
|
||||
|
||||
void thread::on_notify_memory_unmapped(u32 address, u32 size)
|
||||
void thread::on_notify_pre_memory_unmapped(u32 address, u32 size)
|
||||
{
|
||||
if (rsx_thread_running && address < rsx::constants::local_mem_base)
|
||||
{
|
||||
// Pause RSX thread momentarily to handle unmapping
|
||||
eng_lock elock(this);
|
||||
|
||||
// Queue up memory invalidation
|
||||
std::lock_guard lock(m_mtx_task);
|
||||
const bool existing_range_valid = m_invalidated_memory_range.valid();
|
||||
const auto unmap_range = address_range::start_length(address, size);
|
||||
|
||||
if (existing_range_valid && m_invalidated_memory_range.touches(unmap_range))
|
||||
{
|
||||
// Merge range-to-invalidate in case of consecutive unmaps
|
||||
m_invalidated_memory_range.set_min_max(unmap_range);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (existing_range_valid)
|
||||
{
|
||||
// We can only delay consecutive unmaps.
|
||||
// Otherwise, to avoid VirtualProtect failures, we need to do the invalidation here
|
||||
handle_invalidated_memory_range();
|
||||
}
|
||||
|
||||
m_invalidated_memory_range = unmap_range;
|
||||
}
|
||||
|
||||
m_eng_interrupt_mask |= rsx::memory_config_interrupt;
|
||||
}
|
||||
}
|
||||
|
||||
void thread::on_notify_post_memory_unmapped(u32 address, u32 size)
|
||||
{
|
||||
if (rsx_thread_running && address < rsx::constants::local_mem_base)
|
||||
{
|
||||
@ -3559,33 +3592,6 @@ namespace rsx
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Pause RSX thread momentarily to handle unmapping
|
||||
eng_lock elock(this);
|
||||
|
||||
// Queue up memory invalidation
|
||||
std::lock_guard lock(m_mtx_task);
|
||||
const bool existing_range_valid = m_invalidated_memory_range.valid();
|
||||
const auto unmap_range = address_range::start_length(address, size);
|
||||
|
||||
if (existing_range_valid && m_invalidated_memory_range.touches(unmap_range))
|
||||
{
|
||||
// Merge range-to-invalidate in case of consecutive unmaps
|
||||
m_invalidated_memory_range.set_min_max(unmap_range);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (existing_range_valid)
|
||||
{
|
||||
// We can only delay consecutive unmaps.
|
||||
// Otherwise, to avoid VirtualProtect failures, we need to do the invalidation here
|
||||
handle_invalidated_memory_range();
|
||||
}
|
||||
|
||||
m_invalidated_memory_range = unmap_range;
|
||||
}
|
||||
|
||||
m_eng_interrupt_mask |= rsx::memory_config_interrupt;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -499,11 +499,17 @@ namespace rsx
|
||||
*/
|
||||
void on_notify_memory_mapped(u32 address_base, u32 size);
|
||||
|
||||
/**
|
||||
* Notify that a section of memory is to be unmapped
|
||||
* Any data held in the defined range is discarded
|
||||
*/
|
||||
void on_notify_pre_memory_unmapped(u32 address_base, u32 size);
|
||||
|
||||
/**
|
||||
* Notify that a section of memory has been unmapped
|
||||
* Any data held in the defined range is discarded
|
||||
*/
|
||||
void on_notify_memory_unmapped(u32 address_base, u32 size);
|
||||
void on_notify_post_memory_unmapped(u32 address_base, u32 size);
|
||||
|
||||
/**
|
||||
* Notify to check internal state during semaphore wait
|
||||
|
@ -1257,7 +1257,7 @@ void VKGSRender::notify_tile_unbound(u32 tile)
|
||||
if (false)
|
||||
{
|
||||
u32 addr = rsx::get_address(tiles[tile].offset, tiles[tile].location);
|
||||
on_notify_memory_unmapped(addr, tiles[tile].size);
|
||||
on_notify_pre_memory_unmapped(addr, tiles[tile].size);
|
||||
m_rtts.invalidate_surface_address(addr, false);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user