1
0
mirror of https://github.com/RPCS3/rpcs3.git synced 2024-11-23 11:13:19 +01:00

spu: Fix support for multiple lists when one is stalled

This commit is contained in:
eladash 2019-01-07 08:13:17 +02:00 committed by Ivan
parent 58a22d1461
commit f19fd23227
3 changed files with 50 additions and 16 deletions

View File

@ -2758,8 +2758,17 @@ void spu_recompiler::WRCH(spu_opcode_t op)
}
case MFC_WrListStallAck:
{
auto sub = [](spu_thread* _spu, spu_function_t _ret)
auto sub = [](spu_thread* _spu, spu_function_t _ret, u32 tag)
{
for (u32 i = 0; i < _spu->mfc_size; i++)
{
if (_spu->mfc_queue[i].tag == (tag | 0x80))
{
// Unset stall bit
_spu->mfc_queue[i].tag &= 0x7f;
}
}
_spu->do_mfc(true);
_ret(*_spu, _spu->_ptr<u8>(0), nullptr);
};
@ -2770,7 +2779,7 @@ void spu_recompiler::WRCH(spu_opcode_t op)
c->btr(SPU_OFF_32(ch_stall_mask), qw0->r32());
c->jnc(ret);
c->lea(*ls, x86::qword_ptr(ret));
c->jmp(imm_ptr<void(*)(spu_thread*, spu_function_t)>(sub));
c->jmp(imm_ptr<void(*)(spu_thread*, spu_function_t, u32)>(sub));
c->align(kAlignCode, 16);
c->bind(ret);
return;

View File

@ -3347,6 +3347,19 @@ public:
return _spu->do_mfc();
}
static void exec_list_unstall(spu_thread* _spu, u32 tag)
{
for (u32 i = 0; i < _spu->mfc_size; i++)
{
if (_spu->mfc_queue[i].tag == (tag | 0x80))
{
_spu->mfc_queue[i].tag &= 0x7f;
}
}
return exec_mfc(_spu);
}
static bool exec_mfc_cmd(spu_thread* _spu)
{
return _spu->process_mfc_cmd(_spu->ch_mfc_cmd);
@ -3725,7 +3738,7 @@ public:
const auto _mfc = llvm::BasicBlock::Create(m_context, "", m_function);
m_ir->CreateCondBr(m_ir->CreateICmpNE(_old, _new), _mfc, next);
m_ir->SetInsertPoint(_mfc);
call(&exec_mfc, m_thread);
call(&exec_list_unstall, m_thread, eval(val & 0x1f).value);
m_ir->CreateBr(next);
m_ir->SetInsertPoint(next);
return;

View File

@ -920,7 +920,7 @@ void spu_thread::do_dma_transfer(const spu_mfc_cmd& args)
bool spu_thread::do_dma_check(const spu_mfc_cmd& args)
{
const u32 mask = 1u << args.tag;
const u32 mask = utils::rol32(1, args.tag);
if (UNLIKELY(mfc_barrier & mask || (args.cmd & (MFC_BARRIER_MASK | MFC_FENCE_MASK) && mfc_fence & mask)))
{
@ -941,7 +941,7 @@ bool spu_thread::do_dma_check(const spu_mfc_cmd& args)
if (true)
{
const u32 _mask = 1u << mfc_queue[i].tag;
const u32 _mask = utils::rol32(1u, mfc_queue[i].tag);
// A command with barrier hard blocks that tag until it's been dealt with
if (mfc_queue[i].cmd & MFC_BARRIER_MASK)
@ -981,14 +981,16 @@ bool spu_thread::do_list_transfer(spu_mfc_cmd& args)
{
if (UNLIKELY(item.sb & 0x8000))
{
ch_stall_mask |= (1u << args.tag);
ch_stall_mask |= utils::rol32(1, args.tag);
if (!ch_stall_stat.get_count())
{
ch_event_stat |= SPU_EVENT_SN;
}
ch_stall_stat.set_value((1u << args.tag) | ch_stall_stat.get_value());
ch_stall_stat.set_value(utils::rol32(1, args.tag) | ch_stall_stat.get_value());
args.tag |= 0x80; // Set stalled status
return false;
}
@ -1100,7 +1102,7 @@ void spu_thread::do_mfc(bool wait)
}
// Select tag bit in the tag mask or the stall mask
const u32 mask = 1u << args.tag;
const u32 mask = utils::rol32(1, args.tag);
if (barrier & mask)
{
@ -1120,7 +1122,7 @@ void spu_thread::do_mfc(bool wait)
if (args.cmd & MFC_LIST_MASK)
{
if (!(ch_stall_mask & mask))
if (!(args.tag & 0x80))
{
if (do_list_transfer(args))
{
@ -1358,7 +1360,7 @@ bool spu_thread::process_mfc_cmd(spu_mfc_cmd args)
}
case MFC_PUTQLLUC_CMD:
{
const u32 mask = 1u << args.tag;
const u32 mask = utils::rol32(1, args.tag);
if (UNLIKELY((mfc_barrier | mfc_fence) & mask))
{
@ -1403,11 +1405,11 @@ bool spu_thread::process_mfc_cmd(spu_mfc_cmd args)
}
mfc_queue[mfc_size++] = args;
mfc_fence |= 1u << args.tag;
mfc_fence |= utils::rol32(1, args.tag);
if (args.cmd & MFC_BARRIER_MASK)
{
mfc_barrier |= 1u << args.tag;
mfc_barrier |= utils::rol32(1, args.tag);
}
return true;
@ -1427,7 +1429,7 @@ bool spu_thread::process_mfc_cmd(spu_mfc_cmd args)
{
if (LIKELY(args.size <= 0x4000))
{
if (LIKELY(do_dma_check(args) && !(ch_stall_mask & 1u << args.tag)))
if (LIKELY(do_dma_check(args)))
{
if (LIKELY(do_list_transfer(args)))
{
@ -1436,11 +1438,11 @@ bool spu_thread::process_mfc_cmd(spu_mfc_cmd args)
}
mfc_queue[mfc_size++] = args;
mfc_fence |= 1u << args.tag;
mfc_fence |= utils::rol32(1, args.tag);
if (args.cmd & MFC_BARRIER_MASK)
{
mfc_barrier |= 1u << args.tag;
mfc_barrier |= utils::rol32(1, args.tag);
}
return true;
@ -2030,11 +2032,21 @@ bool spu_thread::set_ch_value(u32 ch, u32 value)
case MFC_WrListStallAck:
{
// Reset stall status for specified tag
const u32 tag_mask = 1u << value;
const u32 tag_mask = utils::rol32(1, value);
if (ch_stall_mask & tag_mask)
{
ch_stall_mask &= ~tag_mask;
for (u32 i = 0; i < mfc_size; i++)
{
if (mfc_queue[i].tag == (value | 0x80))
{
// Unset stall bit
mfc_queue[i].tag &= 0x7f;
}
}
do_mfc(true);
}