1
0
mirror of https://github.com/RPCS3/rpcs3.git synced 2024-11-22 02:32:36 +01:00

SPU: Enable the MFC list optimization for Atomic RSX FIFO

This commit is contained in:
Eladash 2022-10-09 11:06:40 +03:00 committed by Ivan
parent 750e7b73e3
commit a6dfc3be2f
2 changed files with 47 additions and 22 deletions

View File

@ -2721,10 +2721,8 @@ bool spu_thread::do_list_transfer(spu_mfc_cmd& args)
{
optimization_compatible = 0;
}
else if (optimization_compatible == MFC_PUT_CMD && (g_cfg.video.strict_rendering_mode || g_cfg.core.rsx_fifo_accuracy))
{
optimization_compatible &= ~MFC_PUT_CMD;
}
rsx::reservation_lock<false, 1> rsx_lock(0, 128, optimization_compatible == MFC_PUT_CMD && (g_cfg.video.strict_rendering_mode || (g_cfg.core.rsx_fifo_accuracy && !g_cfg.core.spu_accurate_dma)));
constexpr u32 ts_mask = 0x7fff;
@ -3038,16 +3036,6 @@ bool spu_thread::do_list_transfer(spu_mfc_cmd& args)
const u32 size = items[index].ts & ts_mask;
const u32 addr = items[index].ea;
auto check_carry_16 = [](u16 addr, u16 size)
{
#ifdef _MSC_VER
u16 out;
return _addcarry_u16(0, addr, size - 1, &out);
#else
return ((addr + size - 1) >> 16) != 0;
#endif
};
// Try to inline the transfer
if (addr < RAW_SPU_BASE_ADDR && size && optimization_compatible == MFC_GET_CMD)
{
@ -3120,8 +3108,10 @@ bool spu_thread::do_list_transfer(spu_mfc_cmd& args)
arg_lsa += utils::align<u32>(size, 16);
}
// Avoid inlining huge transfers because it intentionally drops range lock unlock
else if (addr < RAW_SPU_BASE_ADDR && size - 1 <= 0x400 - 1 && optimization_compatible == MFC_PUT_CMD && !check_carry_16(static_cast<u16>(addr), static_cast<u16>(size)))
else if (addr < RAW_SPU_BASE_ADDR && size - 1 <= 0x400 - 1 && optimization_compatible == MFC_PUT_CMD && (addr % 0x10000 + (size - 1)) < 0x10000)
{
rsx_lock.update_if_enabled(addr, size, range_lock);
if (!g_use_rtm)
{
vm::range_lock(range_lock, addr & -128, utils::align<u32>(addr + size, 128) - (addr & -128));
@ -3198,6 +3188,8 @@ bool spu_thread::do_list_transfer(spu_mfc_cmd& args)
else if (size)
{
range_lock->release(0);
rsx_lock.unlock();
spu_log.trace("LIST: item=0x%016x, lsa=0x%05x", std::bit_cast<be_t<u64>>(items[index]), arg_lsa | (addr & 0xf));
transfer.eal = addr;

View File

@ -42,9 +42,11 @@ namespace rsx
struct rsx_iomap_table
{
static constexpr u32 c_lock_stride = 8096;
std::array<atomic_t<u32>, 4096> ea;
std::array<atomic_t<u32>, 4096> io;
std::array<shared_mutex, 0x8'0000> rs;
std::array<shared_mutex, 0x1'0000'0000 / c_lock_stride> rs;
rsx_iomap_table() noexcept;
@ -892,15 +894,18 @@ namespace rsx
template<bool IsFullLock = false, uint Stride = 128>
class reservation_lock
{
u32 addr = 0, length = 0;
bool locked = false;
u32 addr = 0;
u32 length = 0;
inline void lock_range(u32 addr, u32 length)
{
if (!get_current_renderer()->iomap_table.lock<IsFullLock, Stride>(addr, length, get_current_cpu_thread()))
{
length = 0;
}
this->addr = addr;
this->length = length;
this->locked = get_current_renderer()->iomap_table.lock<IsFullLock, Stride>(addr, length, get_current_cpu_thread());
}
public:
@ -948,13 +953,41 @@ namespace rsx
}
}
~reservation_lock()
// Very special utility for batched transfers (SPU related)
template <typename T = void>
void update_if_enabled(u32 addr, u32 _length, const std::add_pointer_t<T>& lock_release = std::add_pointer_t<void>{})
{
if (locked)
// This check is not perfect but it covers the important cases fast (this check is only an optimization - forcing true disables it)
if (length && (this->addr / rsx_iomap_table::c_lock_stride != addr / rsx_iomap_table::c_lock_stride || (addr % rsx_iomap_table::c_lock_stride + _length) > rsx_iomap_table::c_lock_stride))
{
if constexpr (!std::is_void_v<T>)
{
// See SPUThread.cpp
lock_release->release(0);
}
unlock();
lock_range(addr, _length);
}
}
void unlock(bool destructor = false)
{
if (length)
{
get_current_renderer()->iomap_table.unlock<IsFullLock, Stride>(addr, length);
if (!destructor)
{
length = 0;
}
}
}
~reservation_lock()
{
unlock(true);
}
};
class eng_lock