mirror of
https://github.com/RPCS3/rpcs3.git
synced 2024-11-22 10:42:36 +01:00
RSX/GCM: Fix memory unmapping for HLE GCM
This commit is contained in:
parent
488814bb2d
commit
2222807624
@ -945,7 +945,7 @@ namespace vm
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 _page_unmap(u32 addr, u32 max_size, u64 bflags, utils::shm* shm)
|
static u32 _page_unmap(u32 addr, u32 max_size, u64 bflags, utils::shm* shm, std::vector<std::pair<u64, u64>>& unmap_events)
|
||||||
{
|
{
|
||||||
perf_meter<"PAGE_UNm"_u64> perf0;
|
perf_meter<"PAGE_UNm"_u64> perf0;
|
||||||
|
|
||||||
@ -1009,7 +1009,7 @@ namespace vm
|
|||||||
// the RSX might try to call VirtualProtect on memory that is already unmapped
|
// the RSX might try to call VirtualProtect on memory that is already unmapped
|
||||||
if (auto rsxthr = g_fxo->try_get<rsx::thread>())
|
if (auto rsxthr = g_fxo->try_get<rsx::thread>())
|
||||||
{
|
{
|
||||||
rsxthr->on_notify_pre_memory_unmapped(addr, size);
|
rsxthr->on_notify_pre_memory_unmapped(addr, size, unmap_events);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deregister PPU related data
|
// Deregister PPU related data
|
||||||
@ -1309,7 +1309,7 @@ namespace vm
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool block_t::unmap(std::vector<std::pair<u32, u32>>* unmapped)
|
bool block_t::unmap(std::vector<std::pair<u64, u64>>* unmapped)
|
||||||
{
|
{
|
||||||
auto& m_map = (m.*block_map)();
|
auto& m_map = (m.*block_map)();
|
||||||
|
|
||||||
@ -1320,12 +1320,9 @@ namespace vm
|
|||||||
{
|
{
|
||||||
const auto next = std::next(it);
|
const auto next = std::next(it);
|
||||||
const auto size = it->second.first;
|
const auto size = it->second.first;
|
||||||
auto unmap = std::make_pair(it->first, _page_unmap(it->first, size, this->flags, it->second.second.get()));
|
|
||||||
|
|
||||||
if (unmapped)
|
std::vector<std::pair<u64, u64>> event_data;
|
||||||
{
|
ensure(size == _page_unmap(it->first, size, this->flags, it->second.second.get(), unmapped ? *unmapped : event_data));
|
||||||
unmapped->emplace_back(unmap);
|
|
||||||
}
|
|
||||||
|
|
||||||
it = next;
|
it = next;
|
||||||
}
|
}
|
||||||
@ -1488,14 +1485,16 @@ namespace vm
|
|||||||
{
|
{
|
||||||
struct notify_t
|
struct notify_t
|
||||||
{
|
{
|
||||||
u32 addr{};
|
std::vector<std::pair<u64, u64>> event_data;
|
||||||
u32 size{};
|
|
||||||
|
|
||||||
~notify_t() noexcept
|
~notify_t() noexcept
|
||||||
{
|
{
|
||||||
if (auto rsxthr = g_fxo->try_get<rsx::thread>(); rsxthr && size)
|
if (auto rsxthr = g_fxo->try_get<rsx::thread>())
|
||||||
{
|
{
|
||||||
rsxthr->on_notify_post_memory_unmapped(addr, size);
|
for (const auto [event_data1, event_data2] : event_data)
|
||||||
|
{
|
||||||
|
rsxthr->on_notify_post_memory_unmapped(event_data1, event_data2);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} unmap_notification;
|
} unmap_notification;
|
||||||
@ -1525,7 +1524,7 @@ namespace vm
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Unmap "real" memory pages
|
// Unmap "real" memory pages
|
||||||
ensure(size == _page_unmap(addr, size, this->flags, found->second.second.get()));
|
ensure(size == _page_unmap(addr, size, this->flags, found->second.second.get(), unmap_notification.event_data));
|
||||||
|
|
||||||
// Clear stack guards
|
// Clear stack guards
|
||||||
if (flags & stack_guarded)
|
if (flags & stack_guarded)
|
||||||
@ -1537,8 +1536,6 @@ namespace vm
|
|||||||
// Remove entry
|
// Remove entry
|
||||||
m_map.erase(found);
|
m_map.erase(found);
|
||||||
|
|
||||||
unmap_notification.size = size;
|
|
||||||
unmap_notification.addr = addr;
|
|
||||||
return size;
|
return size;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1837,7 +1834,7 @@ namespace vm
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool _unmap_block(const std::shared_ptr<block_t>& block, std::vector<std::pair<u32, u32>>* unmapped = nullptr)
|
bool _unmap_block(const std::shared_ptr<block_t>& block, std::vector<std::pair<u64, u64>>* unmapped = nullptr)
|
||||||
{
|
{
|
||||||
return block->unmap(unmapped);
|
return block->unmap(unmapped);
|
||||||
}
|
}
|
||||||
@ -1988,15 +1985,15 @@ namespace vm
|
|||||||
|
|
||||||
struct notify_t
|
struct notify_t
|
||||||
{
|
{
|
||||||
std::vector<std::pair<u32, u32>> addr_size_pairs;
|
std::vector<std::pair<u64, u64>> unmap_data;
|
||||||
|
|
||||||
~notify_t() noexcept
|
~notify_t() noexcept
|
||||||
{
|
{
|
||||||
for (const auto [addr, size] : addr_size_pairs)
|
if (auto rsxthr = g_fxo->try_get<rsx::thread>())
|
||||||
{
|
{
|
||||||
if (auto rsxthr = g_fxo->try_get<rsx::thread>())
|
for (const auto [event_data1, event_data2] : unmap_data)
|
||||||
{
|
{
|
||||||
rsxthr->on_notify_post_memory_unmapped(addr, size);
|
rsxthr->on_notify_post_memory_unmapped(event_data1, event_data2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2031,7 +2028,7 @@ namespace vm
|
|||||||
|
|
||||||
result.first = std::move(*it);
|
result.first = std::move(*it);
|
||||||
g_locations.erase(it);
|
g_locations.erase(it);
|
||||||
ensure(_unmap_block(result.first, &unmap_notifications.addr_size_pairs));
|
ensure(_unmap_block(result.first, &unmap_notifications.unmap_data));
|
||||||
result.second = true;
|
result.second = true;
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
@ -133,8 +133,8 @@ namespace vm
|
|||||||
bool try_alloc(u32 addr, u64 bflags, u32 size, std::shared_ptr<utils::shm>&&) const;
|
bool try_alloc(u32 addr, u64 bflags, u32 size, std::shared_ptr<utils::shm>&&) const;
|
||||||
|
|
||||||
// Unmap block
|
// Unmap block
|
||||||
bool unmap(std::vector<std::pair<u32, u32>>* unmapped = nullptr);
|
bool unmap(std::vector<std::pair<u64, u64>>* unmapped = nullptr);
|
||||||
friend bool _unmap_block(const std::shared_ptr<block_t>&, std::vector<std::pair<u32, u32>>* unmapped);
|
friend bool _unmap_block(const std::shared_ptr<block_t>&, std::vector<std::pair<u64, u64>>* unmapped);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
block_t(u32 addr, u32 size, u64 flags);
|
block_t(u32 addr, u32 size, u64 flags);
|
||||||
|
@ -1214,7 +1214,7 @@ void GLGSRender::notify_tile_unbound(u32 tile)
|
|||||||
if (false)
|
if (false)
|
||||||
{
|
{
|
||||||
u32 addr = rsx::get_address(tiles[tile].offset, tiles[tile].location);
|
u32 addr = rsx::get_address(tiles[tile].offset, tiles[tile].location);
|
||||||
on_notify_pre_memory_unmapped(addr, tiles[tile].size);
|
on_notify_pre_memory_unmapped(addr, tiles[tile].size, *std::make_unique<std::vector<std::pair<u64, u64>>>());
|
||||||
m_rtts.invalidate_surface_address(addr, false);
|
m_rtts.invalidate_surface_address(addr, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3508,10 +3508,69 @@ namespace rsx
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void thread::on_notify_pre_memory_unmapped(u32 address, u32 size)
|
void thread::on_notify_pre_memory_unmapped(u32 address, u32 size, std::vector<std::pair<u64, u64>>& event_data)
|
||||||
{
|
{
|
||||||
if (rsx_thread_running && address < rsx::constants::local_mem_base)
|
if (rsx_thread_running && address < rsx::constants::local_mem_base)
|
||||||
{
|
{
|
||||||
|
// Each bit represents io entry to be unmapped
|
||||||
|
u64 unmap_status[512 / 64]{};
|
||||||
|
|
||||||
|
for (u32 ea = address >> 20, end = ea + (size >> 20); ea < end; ea++)
|
||||||
|
{
|
||||||
|
const u32 io = utils::rol32(iomap_table.io[ea], 32 - 20);
|
||||||
|
|
||||||
|
if (io + 1)
|
||||||
|
{
|
||||||
|
unmap_status[io / 64] |= 1ull << (io & 63);
|
||||||
|
iomap_table.io[ea].release(-1);
|
||||||
|
iomap_table.ea[io].release(-1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
auto& cfg = g_fxo->get<gcm_config>();
|
||||||
|
|
||||||
|
std::unique_lock<shared_mutex> hle_lock;
|
||||||
|
|
||||||
|
for (u32 i = 0; i < std::size(unmap_status); i++)
|
||||||
|
{
|
||||||
|
// TODO: Check order when sending multiple events
|
||||||
|
if (u64 to_unmap = unmap_status[i])
|
||||||
|
{
|
||||||
|
if (isHLE)
|
||||||
|
{
|
||||||
|
if (!hle_lock)
|
||||||
|
{
|
||||||
|
hle_lock = std::unique_lock{cfg.gcmio_mutex};
|
||||||
|
}
|
||||||
|
|
||||||
|
while (to_unmap)
|
||||||
|
{
|
||||||
|
const int bit = (std::countr_zero<u64>(utils::rol64(to_unmap, 0 - bit)) + bit);
|
||||||
|
to_unmap &= ~(1ull << bit);
|
||||||
|
|
||||||
|
constexpr u16 null_entry = 0xFFFF;
|
||||||
|
const u32 ea = std::exchange(cfg.offsetTable.eaAddress[(i * 64 + bit)], null_entry);
|
||||||
|
|
||||||
|
if (ea < (rsx::constants::local_mem_base >> 20))
|
||||||
|
{
|
||||||
|
cfg.offsetTable.eaAddress[ea] = null_entry;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Each 64 entries are grouped by a bit
|
||||||
|
const u64 io_event = SYS_RSX_EVENT_UNMAPPED_BASE << i;
|
||||||
|
event_data.emplace_back(io_event, to_unmap);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (hle_lock)
|
||||||
|
{
|
||||||
|
hle_lock.unlock();
|
||||||
|
}
|
||||||
|
|
||||||
// Pause RSX thread momentarily to handle unmapping
|
// Pause RSX thread momentarily to handle unmapping
|
||||||
eng_lock elock(this);
|
eng_lock elock(this);
|
||||||
|
|
||||||
@ -3541,57 +3600,11 @@ namespace rsx
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void thread::on_notify_post_memory_unmapped(u32 address, u32 size)
|
void thread::on_notify_post_memory_unmapped(u64 event_data1, u64 event_data2)
|
||||||
{
|
{
|
||||||
if (rsx_thread_running && address < rsx::constants::local_mem_base)
|
if (!isHLE)
|
||||||
{
|
{
|
||||||
if (!isHLE)
|
send_event(0, event_data1, event_data2);
|
||||||
{
|
|
||||||
// Each bit represents io entry to be unmapped
|
|
||||||
u64 unmap_status[512 / 64]{};
|
|
||||||
|
|
||||||
for (u32 ea = address >> 20, end = ea + (size >> 20); ea < end; ea++)
|
|
||||||
{
|
|
||||||
const u32 io = utils::rol32(iomap_table.io[ea], 32 - 20);
|
|
||||||
|
|
||||||
if (io + 1)
|
|
||||||
{
|
|
||||||
unmap_status[io / 64] |= 1ull << (io & 63);
|
|
||||||
iomap_table.ea[io].release(-1);
|
|
||||||
iomap_table.io[ea].release(-1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (u32 i = 0; i < std::size(unmap_status); i++)
|
|
||||||
{
|
|
||||||
// TODO: Check order when sending multiple events
|
|
||||||
if (u64 to_unmap = unmap_status[i])
|
|
||||||
{
|
|
||||||
// Each 64 entries are grouped by a bit
|
|
||||||
const u64 io_event = SYS_RSX_EVENT_UNMAPPED_BASE << i;
|
|
||||||
send_event(0, io_event, to_unmap);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
// TODO: Fix this
|
|
||||||
u32 ea = address >> 20, io = iomap_table.io[ea];
|
|
||||||
|
|
||||||
if (io + 1)
|
|
||||||
{
|
|
||||||
io >>= 20;
|
|
||||||
|
|
||||||
auto& cfg = g_fxo->get<gcm_config>();
|
|
||||||
std::lock_guard lock(cfg.gcmio_mutex);
|
|
||||||
|
|
||||||
for (const u32 end = ea + (size >> 20); ea < end;)
|
|
||||||
{
|
|
||||||
cfg.offsetTable.ioAddress[ea++] = 0xFFFF;
|
|
||||||
cfg.offsetTable.eaAddress[io++] = 0xFFFF;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -502,14 +502,15 @@ namespace rsx
|
|||||||
/**
|
/**
|
||||||
* Notify that a section of memory is to be unmapped
|
* Notify that a section of memory is to be unmapped
|
||||||
* Any data held in the defined range is discarded
|
* Any data held in the defined range is discarded
|
||||||
|
* Sets optional unmap event data
|
||||||
*/
|
*/
|
||||||
void on_notify_pre_memory_unmapped(u32 address_base, u32 size);
|
void on_notify_pre_memory_unmapped(u32 address_base, u32 size, std::vector<std::pair<u64, u64>>& event_data);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Notify that a section of memory has been unmapped
|
* Notify that a section of memory has been unmapped
|
||||||
* Any data held in the defined range is discarded
|
* Any data held in the defined range is discarded
|
||||||
*/
|
*/
|
||||||
void on_notify_post_memory_unmapped(u32 address_base, u32 size);
|
void on_notify_post_memory_unmapped(u64 event_data1, u64 event_data2);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Notify to check internal state during semaphore wait
|
* Notify to check internal state during semaphore wait
|
||||||
|
@ -1257,7 +1257,7 @@ void VKGSRender::notify_tile_unbound(u32 tile)
|
|||||||
if (false)
|
if (false)
|
||||||
{
|
{
|
||||||
u32 addr = rsx::get_address(tiles[tile].offset, tiles[tile].location);
|
u32 addr = rsx::get_address(tiles[tile].offset, tiles[tile].location);
|
||||||
on_notify_pre_memory_unmapped(addr, tiles[tile].size);
|
on_notify_pre_memory_unmapped(addr, tiles[tile].size, *std::make_unique<std::vector<std::pair<u64, u64>>>());
|
||||||
m_rtts.invalidate_surface_address(addr, false);
|
m_rtts.invalidate_surface_address(addr, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user