1
0
mirror of https://github.com/RPCS3/rpcs3.git synced 2024-11-26 12:42:41 +01:00

CellSpurs: JobChain functions and some more (#9080)

This commit is contained in:
Eladash 2020-10-16 20:35:20 +03:00 committed by GitHub
parent 583ed61712
commit 5185ddb8b5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 498 additions and 159 deletions

View File

@ -155,11 +155,6 @@ void fmt_class_string<SpursWorkloadState>::format(std::string& out, u64 arg)
});
}
extern u64 ppu_ldarx(ppu_thread&, u32);
extern u32 ppu_lwarx(ppu_thread&, u32);
extern bool ppu_stwcx(ppu_thread&, u32, u32);
extern bool ppu_stdcx(ppu_thread&, u32, u64);
error_code sys_spu_image_close(ppu_thread&, vm::ptr<sys_spu_image> img);
//----------------------------------------------------------------------------
@ -321,7 +316,7 @@ s32 cellSpursReadyCountAdd(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid, v
//s32 cellSpursGetWorkloadInfo();
//s32 cellSpursSetExceptionEventHandler();
//s32 cellSpursUnsetExceptionEventHandler();
//s32 _cellSpursWorkloadFlagReceiver(vm::ptr<CellSpurs> spurs, u32 wid, u32 is_set);
s32 _cellSpursWorkloadFlagReceiver(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid, u32 is_set);
//s32 _cellSpursWorkloadFlagReceiver2();
//s32 cellSpursRequestIdleSpu();
@ -459,8 +454,13 @@ namespace _spurs
namespace _spurs
{
s32 check_job_chain_attribute(u32 sdkVer, vm::cptr<u64> jcEntry, u16 sizeJobDescr, u16 maxGrabbedJob
, u64 priorities, u32 maxContention, u8 autoSpuCount, u32 tag1, u32 tag2
, u8 isFixedMemAlloc, u32 maxSizeJob, u32 initSpuCount);
, u64 priorities, u32 maxContention, u8 autoSpuCount, u32 tag1, u32 tag2
, u8 isFixedMemAlloc, u32 maxSizeJob, u32 initSpuCount);
s32 create_job_chain(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::ptr<CellSpursJobChain> jobChain, vm::cptr<u64> jobChainEntry, u16 sizeJob
, u16 maxGrabbedJob, vm::cptr<u8[8]> prio, u32 maxContention, b8 autoReadyCount
, u32 tag1, u32 tag2, u32 HaltOnError, vm::cptr<char> name, u32 param_13, u32 param_14);
}
//s32 cellSpursCreateJobChainWithAttribute();
@ -494,9 +494,8 @@ s32 cellSpursRunJobChain(ppu_thread& ppu, vm::ptr<CellSpursJobChain> jobChain);
s32 _spurs::get_sdk_version()
{
s32 version = -1;
return process_get_sdk_version(process_getpid(), version) || version == -1 ? 0x465000 : version;
const s32 version = static_cast<s32>(g_ps3_process_info.sdk_ver);
return version == -1 ? 0x485000 : version;
}
bool _spurs::is_libprof_loaded()
@ -772,7 +771,7 @@ s32 _spurs::wakeup_shutdown_completion_waiter(ppu_thread& ppu, vm::ptr<CellSpurs
return CELL_SPURS_POLICY_MODULE_ERROR_ALIGN;
}
if (wid >= (spurs->flags1 & SF1_32_WORKLOADS ? CELL_SPURS_MAX_WORKLOAD2 : CELL_SPURS_MAX_WORKLOAD))
if (wid >= spurs->max_workloads())
{
return CELL_SPURS_POLICY_MODULE_ERROR_INVAL;
}
@ -1731,7 +1730,7 @@ s32 cellSpursSetMaxContention(vm::ptr<CellSpurs> spurs, u32 wid, u32 maxContenti
return CELL_SPURS_CORE_ERROR_ALIGN;
}
if (wid >= (spurs->flags1 & SF1_32_WORKLOADS ? CELL_SPURS_MAX_WORKLOAD2 : CELL_SPURS_MAX_WORKLOAD))
if (wid >= spurs->max_workloads())
{
return CELL_SPURS_CORE_ERROR_INVAL;
}
@ -1751,7 +1750,7 @@ s32 cellSpursSetMaxContention(vm::ptr<CellSpurs> spurs, u32 wid, u32 maxContenti
maxContention = CELL_SPURS_MAX_SPU;
}
spurs->wklMaxContention[wid % CELL_SPURS_MAX_WORKLOAD].atomic_op([spurs, wid, maxContention](u8& value)
vm::reservation_light_op(spurs->wklMaxContention[wid % CELL_SPURS_MAX_WORKLOAD], [&](atomic_t<u8>& value)
{
value &= wid < CELL_SPURS_MAX_WORKLOAD ? 0xF0 : 0x0F;
value |= wid < CELL_SPURS_MAX_WORKLOAD ? maxContention : maxContention << 4;
@ -1775,7 +1774,7 @@ s32 cellSpursSetPriorities(vm::ptr<CellSpurs> spurs, u32 wid, vm::cptr<u8[8]> pr
return CELL_SPURS_CORE_ERROR_ALIGN;
}
if (wid >= (spurs->flags1 & SF1_32_WORKLOADS ? CELL_SPURS_MAX_WORKLOAD2 : CELL_SPURS_MAX_WORKLOAD))
if (wid >= spurs->max_workloads())
{
return CELL_SPURS_CORE_ERROR_INVAL;
}
@ -1822,7 +1821,7 @@ s32 cellSpursSetPriority(vm::ptr<CellSpurs> spurs, u32 wid, u32 spuId, u32 prior
if (!spurs.aligned())
return CELL_SPURS_CORE_ERROR_ALIGN;
if (wid >= (spurs->flags1 & SF1_32_WORKLOADS ? CELL_SPURS_MAX_WORKLOAD2 : CELL_SPURS_MAX_WORKLOAD))
if (wid >= spurs->max_workloads())
return CELL_SPURS_CORE_ERROR_INVAL;
return CELL_OK;
@ -2194,7 +2193,7 @@ s32 cellSpursTraceStop(ppu_thread& ppu, vm::ptr<CellSpurs> spurs)
//----------------------------------------------------------------------------
/// Initialize attributes of a workload
s32 _cellSpursWorkloadAttributeInitialize(vm::ptr<CellSpursWorkloadAttribute> attr, u32 revision, u32 sdkVersion, vm::cptr<void> pm, u32 size, u64 data, vm::cptr<u8[8]> priority, u32 minCnt, u32 maxCnt)
s32 _cellSpursWorkloadAttributeInitialize(ppu_thread& ppu, vm::ptr<CellSpursWorkloadAttribute> attr, u32 revision, u32 sdkVersion, vm::cptr<void> pm, u32 size, u64 data, vm::cptr<u8[8]> priority, u32 minCnt, u32 maxCnt)
{
cellSpurs.warning("_cellSpursWorkloadAttributeInitialize(attr=*0x%x, revision=%d, sdkVersion=0x%x, pm=*0x%x, size=0x%x, data=0x%llx, priority=*0x%x, minCnt=0x%x, maxCnt=0x%x)",
attr, revision, sdkVersion, pm, size, data, priority, minCnt, maxCnt);
@ -2241,7 +2240,7 @@ s32 _cellSpursWorkloadAttributeInitialize(vm::ptr<CellSpursWorkloadAttribute> at
}
/// Set the name of a workload
s32 cellSpursWorkloadAttributeSetName(vm::ptr<CellSpursWorkloadAttribute> attr, vm::cptr<char> nameClass, vm::cptr<char> nameInstance)
s32 cellSpursWorkloadAttributeSetName(ppu_thread& ppu, vm::ptr<CellSpursWorkloadAttribute> attr, vm::cptr<char> nameClass, vm::cptr<char> nameInstance)
{
cellSpurs.warning("cellSpursWorkloadAttributeSetName(attr=*0x%x, nameClass=%s, nameInstance=%s)", attr, nameClass, nameInstance);
@ -2304,7 +2303,7 @@ s32 _spurs::add_workload(vm::ptr<CellSpurs> spurs, vm::ptr<u32> wid, vm::cptr<vo
u32 wnum;
const u32 wmax = spurs->flags1 & SF1_32_WORKLOADS ? CELL_SPURS_MAX_WORKLOAD2 : CELL_SPURS_MAX_WORKLOAD; // TODO: check if can be changed
spurs->wklEnabled.atomic_op([spurs, wmax, &wnum](be_t<u32>& value)
vm::reservation_light_op(spurs->wklEnabled, [&](atomic_be_t<u32>& value)
{
wnum = std::countl_one<u32>(value); // found empty position
if (wnum < wmax)
@ -2387,23 +2386,18 @@ s32 _spurs::add_workload(vm::ptr<CellSpurs> spurs, vm::ptr<u32> wid, vm::cptr<vo
spurs->wklIdleSpuCountOrReadyCount2[wnum] = 0;
}
if (wnum <= 15)
{
spurs->wklMaxContention[wnum].atomic_op([maxContention](u8& v)
auto [res, rtime] = vm::reservation_lock(spurs.addr());
spurs->wklMaxContention[index].atomic_op([wnum, maxContention](u8& v)
{
v &= ~0xf;
v |= (maxContention > 8 ? 8 : maxContention);
});
spurs->wklSignal1.fetch_and(~(0x8000 >> index)); // clear bit in wklFlag1
}
else
{
spurs->wklMaxContention[index].atomic_op([maxContention](u8& v)
{
v &= ~0xf0;
v &= (wnum <= 15 ? ~0xf : ~0xf0);
v |= (maxContention > 8 ? 8 : maxContention) << 4;
});
spurs->wklSignal2.fetch_and(~(0x8000 >> index)); // clear bit in wklFlag2
(wnum <= 15 ? spurs->wklSignal1 : spurs->wklSignal2).fetch_and(~(0x8000 >> index));
res.release(rtime + 128);
res.notify_all();
}
spurs->wklFlagReceiver.compare_and_swap(wnum, 0xff);
@ -2455,7 +2449,7 @@ s32 cellSpursAddWorkload(vm::ptr<CellSpurs> spurs, vm::ptr<u32> wid, vm::cptr<vo
}
/// Add workload
s32 cellSpursAddWorkloadWithAttribute(vm::ptr<CellSpurs> spurs, vm::ptr<u32> wid, vm::cptr<CellSpursWorkloadAttribute> attr)
s32 cellSpursAddWorkloadWithAttribute(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::ptr<u32> wid, vm::cptr<CellSpursWorkloadAttribute> attr)
{
cellSpurs.warning("cellSpursAddWorkloadWithAttribute(spurs=*0x%x, wid=*0x%x, attr=*0x%x)", spurs, wid, attr);
@ -2488,60 +2482,58 @@ s32 cellSpursShutdownWorkload(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid
if (!spurs.aligned())
return CELL_SPURS_POLICY_MODULE_ERROR_ALIGN;
if (wid >= (spurs->flags1 & SF1_32_WORKLOADS ? CELL_SPURS_MAX_WORKLOAD2 : CELL_SPURS_MAX_WORKLOAD))
if (wid >= spurs->max_workloads())
return CELL_SPURS_POLICY_MODULE_ERROR_INVAL;
if (spurs->exception)
return CELL_SPURS_POLICY_MODULE_ERROR_STAT;
u32 state_new;
while (true)
bool send_event;
s32 rc, old_state;
if (!vm::reservation_op(vm::unsafe_ptr_cast<spurs_wkl_state_op>(spurs.ptr(&CellSpurs::wklState1)), [&](spurs_wkl_state_op& op)
{
const u32 value = ppu_lwarx(ppu, vm::get_addr(&spurs->wklState(wid & -4)));
union
{
le_t<u32> _u32;
u8 _u8[4];
} data{value};
const u32 state = data._u8[wid % 4];
auto& state = wid < CELL_SPURS_MAX_WORKLOAD ? op.wklState1[wid] : op.wklState2[wid % 16];
if (state <= SPURS_WKL_STATE_PREPARING)
{
// Cleanly leave the function without traces of reservation
ppu.raddr = 0;
return CELL_SPURS_POLICY_MODULE_ERROR_STAT;
rc = CELL_SPURS_POLICY_MODULE_ERROR_STAT;
return false;
}
if (state == SPURS_WKL_STATE_SHUTTING_DOWN || state == SPURS_WKL_STATE_REMOVABLE)
{
ppu.raddr = 0;
return CELL_OK;
rc = CELL_OK;
return false;
}
state_new = spurs->wklStatus(wid) ? SPURS_WKL_STATE_SHUTTING_DOWN : SPURS_WKL_STATE_REMOVABLE;
data._u8[wid % 4] = state_new;
auto& status = wid < CELL_SPURS_MAX_WORKLOAD ? op.wklStatus1[wid] : op.wklStatus2[wid % 16];
if (ppu_stwcx(ppu, vm::get_addr(&spurs->wklState(wid & -4)), data._u32))
old_state = state = status ? SPURS_WKL_STATE_SHUTTING_DOWN : SPURS_WKL_STATE_REMOVABLE;
if (state == SPURS_WKL_STATE_SHUTTING_DOWN)
{
break;
op.sysSrvMsgUpdateWorkload = -1;
rc = CELL_OK;
return true;
}
auto& event = wid < CELL_SPURS_MAX_WORKLOAD ? op.wklEvent1[wid] : op.wklEvent2[wid % 16];
send_event = event & 0x12 && !(event & 1);
event |= 1;
rc = CELL_OK;
return true;
}))
{
return rc;
}
if (state_new == SPURS_WKL_STATE_SHUTTING_DOWN)
if (old_state == SPURS_WKL_STATE_SHUTTING_DOWN)
{
spurs->sysSrvMsgUpdateWorkload = -1;
spurs->sysSrvMessage = 0;
spurs->sysSrvMessage = -1;
return CELL_OK;
}
const auto old = vm::reservation_light_op(spurs->wklEvent(wid), [](atomic_t<u8>& v)
{
return v.fetch_or(1);
});
if (old & 0x12 && !(old & 1) && sys_event_port_send(spurs->eventPort, 0, 0, (1u << 31) >> wid))
if (send_event && sys_event_port_send(spurs->eventPort, 0, 0, (1u << 31) >> wid))
{
return CELL_SPURS_CORE_ERROR_STAT;
}
@ -2550,9 +2542,69 @@ s32 cellSpursShutdownWorkload(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid
}
/// Wait for workload shutdown
s32 cellSpursWaitForWorkloadShutdown()
s32 cellSpursWaitForWorkloadShutdown(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid)
{
UNIMPLEMENTED_FUNC(cellSpurs);
cellSpurs.trace("cellSpursWaitForWorkloadShutdown(spurs=*0x%x, wid=0x%x)", spurs, wid);
if (!spurs)
return CELL_SPURS_POLICY_MODULE_ERROR_NULL_POINTER;
if (!spurs.aligned())
return CELL_SPURS_POLICY_MODULE_ERROR_ALIGN;
if (wid >= spurs->max_workloads())
return CELL_SPURS_POLICY_MODULE_ERROR_INVAL;
if (!(spurs->wklEnabled & (0x80000000u >> wid)))
return CELL_SPURS_POLICY_MODULE_ERROR_SRCH;
if (spurs->exception)
return CELL_SPURS_POLICY_MODULE_ERROR_STAT;
auto& info = spurs->wklSyncInfo(wid);
const bool ok = vm::reservation_light_op(info.x28, [](atomic_be_t<u32>& state)
{
return state.fetch_op([](be_t<u32>& val)
{
if (val)
{
return false;
}
val = 2;
return true;
}).second;
});
if (!ok)
{
return CELL_SPURS_POLICY_MODULE_ERROR_STAT;
}
const bool wait_sema = vm::reservation_light_op<true>(spurs->wklEvent(wid), [](atomic_t<u8>& event)
{
return event.fetch_op([](u8& event)
{
if ((event & 1) == 0 || (event & 0x22) == 0x2)
{
event |= 0x10;
return true;
}
return false;
}).second;
});
if (wait_sema)
{
verify(HERE), sys_semaphore_wait(ppu, static_cast<u32>(info.sem), 0) == 0;
}
// Reverified
if (spurs->exception)
return CELL_SPURS_POLICY_MODULE_ERROR_STAT;
return CELL_OK;
}
@ -2563,10 +2615,59 @@ s32 cellSpursRemoveSystemWorkloadForUtility()
}
/// Remove workload
s32 cellSpursRemoveWorkload()
s32 cellSpursRemoveWorkload(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid)
{
UNIMPLEMENTED_FUNC(cellSpurs);
return CELL_OK;
cellSpurs.warning("cellSpursRemoveWorkload(spurs=*0x%x, wid=%u)", spurs, wid);
if (!spurs)
return CELL_SPURS_POLICY_MODULE_ERROR_NULL_POINTER;
if (!spurs.aligned())
return CELL_SPURS_POLICY_MODULE_ERROR_ALIGN;
if (wid >= CELL_SPURS_MAX_WORKLOAD2 || (wid >= CELL_SPURS_MAX_WORKLOAD && (spurs->flags1 & SF1_32_WORKLOADS) == 0))
return CELL_SPURS_POLICY_MODULE_ERROR_INVAL;
if (!(spurs->wklEnabled.load() & (0x80000000u >> wid)))
return CELL_SPURS_POLICY_MODULE_ERROR_SRCH;
if (spurs->exception)
return CELL_SPURS_POLICY_MODULE_ERROR_STAT;
switch (spurs->wklState(wid))
{
case SPURS_WKL_STATE_SHUTTING_DOWN: return CELL_SPURS_POLICY_MODULE_ERROR_BUSY;
case SPURS_WKL_STATE_REMOVABLE: break;
default: return CELL_SPURS_POLICY_MODULE_ERROR_STAT;
}
if (spurs->wklFlagReceiver == wid)
{
verify(HERE), ppu_execute<&_cellSpursWorkloadFlagReceiver>(ppu, spurs, wid, 0) == 0;
}
s32 rc;
vm::reservation_op(vm::unsafe_ptr_cast<spurs_wkl_state_op>(spurs.ptr(&CellSpurs::wklState1)), [&](spurs_wkl_state_op& op)
{
auto& state = wid < CELL_SPURS_MAX_WORKLOAD ? op.wklState1[wid] : op.wklState2[wid % 16];
// Re-verification, does not exist on realfw
switch (state)
{
case SPURS_WKL_STATE_SHUTTING_DOWN: rc = CELL_SPURS_POLICY_MODULE_ERROR_BUSY; return false;
case SPURS_WKL_STATE_REMOVABLE: break;
default: rc = CELL_SPURS_POLICY_MODULE_ERROR_STAT; return false;
}
state = SPURS_WKL_STATE_NON_EXISTENT;
op.wklEnabled &= ~(0x80000000u >> wid);
op.wklMskB &= ~(0x80000000u >> wid);
rc = CELL_OK;
return true;
});
return rc;
}
s32 cellSpursWakeUp(ppu_thread& ppu, vm::ptr<CellSpurs> spurs)
@ -2633,14 +2734,10 @@ s32 cellSpursSendWorkloadSignal(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 w
return CELL_SPURS_POLICY_MODULE_ERROR_STAT;
}
if (wid >= CELL_SPURS_MAX_WORKLOAD)
vm::reservation_light_op<true>(wid < CELL_SPURS_MAX_WORKLOAD ? spurs->wklSignal1 : spurs->wklSignal2, [&](atomic_be_t<u16>& sig)
{
spurs->wklSignal2 |= 0x8000 >> (wid & 0x0F);
}
else
{
spurs->wklSignal1 |= 0x8000 >> wid;
}
sig |= 0x8000 >> (wid % 16);
});
return CELL_OK;
}
@ -2679,7 +2776,7 @@ s32 cellSpursReadyCountStore(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid,
return CELL_SPURS_POLICY_MODULE_ERROR_ALIGN;
}
if (wid >= (spurs->flags1 & SF1_32_WORKLOADS ? CELL_SPURS_MAX_WORKLOAD2 : CELL_SPURS_MAX_WORKLOAD) || value > 0xffu)
if (wid >= spurs->max_workloads() || value > 0xffu)
{
return CELL_SPURS_POLICY_MODULE_ERROR_INVAL;
}
@ -2717,7 +2814,7 @@ s32 cellSpursReadyCountSwap(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid,
return CELL_SPURS_POLICY_MODULE_ERROR_ALIGN;
}
if (wid >= (spurs->flags1 & SF1_32_WORKLOADS ? CELL_SPURS_MAX_WORKLOAD2 : CELL_SPURS_MAX_WORKLOAD) || swap > 0xffu)
if (wid >= spurs->max_workloads() || swap > 0xffu)
{
return CELL_SPURS_POLICY_MODULE_ERROR_INVAL;
}
@ -2755,7 +2852,7 @@ s32 cellSpursReadyCountCompareAndSwap(ppu_thread& ppu, vm::ptr<CellSpurs> spurs,
return CELL_SPURS_POLICY_MODULE_ERROR_ALIGN;
}
if (wid >= (spurs->flags1 & SF1_32_WORKLOADS ? CELL_SPURS_MAX_WORKLOAD2 : CELL_SPURS_MAX_WORKLOAD) || (swap | compare) > 0xffu)
if (wid >= spurs->max_workloads() || (swap | compare) > 0xffu)
{
return CELL_SPURS_POLICY_MODULE_ERROR_INVAL;
}
@ -2796,7 +2893,7 @@ s32 cellSpursReadyCountAdd(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid, v
return CELL_SPURS_POLICY_MODULE_ERROR_ALIGN;
}
if (wid >= (spurs->flags1 & SF1_32_WORKLOADS ? CELL_SPURS_MAX_WORKLOAD2 : CELL_SPURS_MAX_WORKLOAD))
if (wid >= spurs->max_workloads())
{
return CELL_SPURS_POLICY_MODULE_ERROR_INVAL;
}
@ -2887,7 +2984,7 @@ s32 cellSpursUnsetExceptionEventHandler()
}
/// Set/unset the recipient of the workload flag
s32 _cellSpursWorkloadFlagReceiver(vm::ptr<CellSpurs> spurs, u32 wid, u32 is_set)
s32 _cellSpursWorkloadFlagReceiver(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 wid, u32 is_set)
{
cellSpurs.warning("_cellSpursWorkloadFlagReceiver(spurs=*0x%x, wid=%d, is_set=%d)", spurs, wid, is_set);
@ -2901,7 +2998,7 @@ s32 _cellSpursWorkloadFlagReceiver(vm::ptr<CellSpurs> spurs, u32 wid, u32 is_set
return CELL_SPURS_POLICY_MODULE_ERROR_ALIGN;
}
if (wid >= (spurs->flags1 & SF1_32_WORKLOADS ? CELL_SPURS_MAX_WORKLOAD2 : CELL_SPURS_MAX_WORKLOAD))
if (wid >= spurs->max_workloads())
{
return CELL_SPURS_POLICY_MODULE_ERROR_INVAL;
}
@ -2918,47 +3015,56 @@ s32 _cellSpursWorkloadFlagReceiver(vm::ptr<CellSpurs> spurs, u32 wid, u32 is_set
std::atomic_thread_fence(std::memory_order_acq_rel);
if (s32 res = spurs->wklFlag.flag.atomic_op([spurs, wid, is_set](be_t<u32>& flag) -> s32
struct alignas(128) wklFlagOp
{
if (is_set)
{
if (spurs->wklFlagReceiver != 0xff)
{
return CELL_SPURS_POLICY_MODULE_ERROR_BUSY;
}
}
else
{
if (spurs->wklFlagReceiver != wid)
{
return CELL_SPURS_POLICY_MODULE_ERROR_PERM;
}
}
flag = -1;
return 0;
}))
{
return res;
}
u8 uns[0x6C];
be_t<u32> Flag; // 0x6C
u8 uns2[0x7];
u8 FlagReceiver; // 0x77
};
spurs->wklFlagReceiver.atomic_op([wid, is_set](u8& FR)
s32 res;
vm::reservation_op(vm::unsafe_ptr_cast<wklFlagOp>(spurs), [&](wklFlagOp& val)
{
if (is_set)
{
if (FR == 0xff)
if (val.FlagReceiver != 0xff)
{
FR = static_cast<u8>(wid);
res = CELL_SPURS_POLICY_MODULE_ERROR_BUSY;
return;
}
}
else
{
if (FR == wid)
if (val.FlagReceiver != wid)
{
FR = 0xff;
res = CELL_SPURS_POLICY_MODULE_ERROR_PERM;
return;
}
}
val.Flag = -1;
if (is_set)
{
if (val.FlagReceiver == 0xff)
{
val.FlagReceiver = static_cast<u8>(wid);
}
}
else
{
if (val.FlagReceiver == wid)
{
val.FlagReceiver = 0xff;
}
}
res = CELL_OK;
return;
});
return CELL_OK;
return res;
}
/// Set/unset the recipient of the workload flag
@ -3044,7 +3150,7 @@ s32 cellSpursEventFlagClear(vm::ptr<CellSpursEventFlag> eventFlag, u16 bits)
/// Set a SPURS event flag
s32 cellSpursEventFlagSet(ppu_thread& ppu, vm::ptr<CellSpursEventFlag> eventFlag, u16 bits)
{
cellSpurs.warning("cellSpursEventFlagSet(eventFlag=*0x%x, bits=0x%x)", eventFlag, bits);
cellSpurs.trace("cellSpursEventFlagSet(eventFlag=*0x%x, bits=0x%x)", eventFlag, bits);
if (!eventFlag)
{
@ -3056,7 +3162,7 @@ s32 cellSpursEventFlagSet(ppu_thread& ppu, vm::ptr<CellSpursEventFlag> eventFlag
return CELL_SPURS_TASK_ERROR_ALIGN;
}
if (eventFlag->direction != CELL_SPURS_EVENT_FLAG_SPU2PPU && eventFlag->direction != CELL_SPURS_EVENT_FLAG_ANY2ANY)
if (auto dir = eventFlag->direction; dir != CELL_SPURS_EVENT_FLAG_SPU2PPU && dir != CELL_SPURS_EVENT_FLAG_ANY2ANY)
{
return CELL_SPURS_TASK_ERROR_PERM;
}
@ -3066,7 +3172,8 @@ s32 cellSpursEventFlagSet(ppu_thread& ppu, vm::ptr<CellSpursEventFlag> eventFlag
u16 ppuEvents;
u16 pendingRecv;
u16 pendingRecvTaskEvents[16];
eventFlag->ctrl.atomic_op([eventFlag, bits, &send, &ppuWaitSlot, &ppuEvents, &pendingRecv, &pendingRecvTaskEvents](CellSpursEventFlag::ControlSyncVar& ctrl)
vm::reservation_op(vm::unsafe_ptr_cast<CellSpursEventFlag_x00>(eventFlag), [bits, &send, &ppuWaitSlot, &ppuEvents, &pendingRecv, &pendingRecvTaskEvents](CellSpursEventFlag_x00& eventFlag)
{
send = false;
ppuWaitSlot = 0;
@ -3075,7 +3182,8 @@ s32 cellSpursEventFlagSet(ppu_thread& ppu, vm::ptr<CellSpursEventFlag> eventFlag
u16 eventsToClear = 0;
if (eventFlag->direction == CELL_SPURS_EVENT_FLAG_ANY2ANY && ctrl.ppuWaitMask)
auto& ctrl = eventFlag.ctrl;
if (eventFlag.direction == CELL_SPURS_EVENT_FLAG_ANY2ANY && ctrl.ppuWaitMask)
{
u16 ppuRelevantEvents = (ctrl.events | bits) & ctrl.ppuWaitMask;
@ -3095,17 +3203,17 @@ s32 cellSpursEventFlagSet(ppu_thread& ppu, vm::ptr<CellSpursEventFlag> eventFlag
s32 i = CELL_SPURS_EVENT_FLAG_MAX_WAIT_SLOTS - 1;
s32 j = 0;
u16 relevantWaitSlots = eventFlag->spuTaskUsedWaitSlots & ~ctrl.spuTaskPendingRecv;
u16 relevantWaitSlots = eventFlag.spuTaskUsedWaitSlots & ~ctrl.spuTaskPendingRecv;
while (relevantWaitSlots)
{
if (relevantWaitSlots & 0x0001)
{
u16 spuTaskRelevantEvents = (ctrl.events | bits) & eventFlag->spuTaskWaitMask[i];
u16 spuTaskRelevantEvents = (ctrl.events | bits) & eventFlag.spuTaskWaitMask[i];
// Unblock the waiting SPU task if either all the bits being waited by the task have been set or
// if the wait mode of the task is OR and atleast one bit the thread is waiting on has been set
if ((eventFlag->spuTaskWaitMask[i] & ~spuTaskRelevantEvents) == 0 ||
(((eventFlag->spuTaskWaitMode >> j) & 0x0001) == CELL_SPURS_EVENT_FLAG_OR && spuTaskRelevantEvents != 0))
if ((eventFlag.spuTaskWaitMask[i] & ~spuTaskRelevantEvents) == 0 ||
(((eventFlag.spuTaskWaitMode >> j) & 0x0001) == CELL_SPURS_EVENT_FLAG_OR && spuTaskRelevantEvents != 0))
{
eventsToClear |= spuTaskRelevantEvents;
pendingRecv |= 1 << j;
@ -3122,7 +3230,7 @@ s32 cellSpursEventFlagSet(ppu_thread& ppu, vm::ptr<CellSpursEventFlag> eventFlag
ctrl.spuTaskPendingRecv |= pendingRecv;
// If the clear flag is AUTO then clear the bits comnsumed by all tasks marked to be unblocked
if (eventFlag->clearMode == CELL_SPURS_EVENT_FLAG_CLEAR_AUTO)
if (eventFlag.clearMode == CELL_SPURS_EVENT_FLAG_CLEAR_AUTO)
{
ctrl.events &= ~eventsToClear;
}
@ -3187,7 +3295,7 @@ s32 _spurs::event_flag_wait(ppu_thread& ppu, vm::ptr<CellSpursEventFlag> eventFl
return CELL_SPURS_TASK_ERROR_INVAL;
}
if (eventFlag->direction != CELL_SPURS_EVENT_FLAG_SPU2PPU && eventFlag->direction != CELL_SPURS_EVENT_FLAG_ANY2ANY)
if (auto dir = eventFlag->direction; dir != CELL_SPURS_EVENT_FLAG_SPU2PPU && dir != CELL_SPURS_EVENT_FLAG_ANY2ANY)
{
return CELL_SPURS_TASK_ERROR_PERM;
}
@ -3670,18 +3778,18 @@ s32 _spurs::create_taskset(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::ptr<Ce
taskset->size = size;
vm::var<CellSpursWorkloadAttribute> wkl_attr;
_cellSpursWorkloadAttributeInitialize(wkl_attr, 1 /*revision*/, 0x33 /*sdk_version*/, vm::cptr<void>::make(SPURS_IMG_ADDR_TASKSET_PM), 0x1E40 /*pm_size*/,
taskset.addr(), priority, 8 /*min_contention*/, max_contention);
_cellSpursWorkloadAttributeInitialize(ppu, wkl_attr, 1, 0x330000, vm::cptr<void>::make(SPURS_IMG_ADDR_TASKSET_PM), 0x1E40 /*pm_size*/,
taskset.addr(), priority, 8, max_contention);
// TODO: Check return code
cellSpursWorkloadAttributeSetName(wkl_attr, vm::null, name);
cellSpursWorkloadAttributeSetName(ppu, wkl_attr, vm::null, name);
// TODO: Check return code
// TODO: cellSpursWorkloadAttributeSetShutdownCompletionEventHook(wkl_attr, hook, taskset);
// TODO: Check return code
vm::var<u32> wid;
cellSpursAddWorkloadWithAttribute(spurs, wid, wkl_attr);
cellSpursAddWorkloadWithAttribute(ppu, spurs, wid, wkl_attr);
// TODO: Check return code
taskset->wkl_flag_wait_task = 0x80;
@ -3956,21 +4064,20 @@ s32 _cellSpursSendSignal(ppu_thread& ppu, vm::ptr<CellSpursTaskset> taskset, u32
}
int signal;
for (;;)
{
const u32 addr = taskset.ptr(&CellSpursTaskset::signalled).ptr(&decltype(CellSpursTaskset::signalled)::values, taskId / 32).addr();
u32 signalled = ppu_lwarx(ppu, addr);
const u32 running = taskset->running.values[taskId / 32];
const u32 ready = taskset->ready.values[taskId / 32];
const u32 waiting = taskset->waiting.values[taskId / 32];
const u32 enabled = taskset->enabled.values[taskId / 32];
const u32 pready = taskset->pending_ready.values[taskId / 32];
vm::reservation_op(vm::unsafe_ptr_cast<spurs_taskset_signal_op>(taskset), [&](spurs_taskset_signal_op& op)
{
const u32 signalled = op.signalled[taskId / 32];
const u32 running = op.running[taskId / 32];
const u32 ready = op.ready[taskId / 32];
const u32 waiting = op.waiting[taskId / 32];
const u32 enabled = op.enabled[taskId / 32];
const u32 pready = op.pending_ready[taskId / 32];
const u32 mask = (1u << 31) >> (taskId % 32);
if ((running & waiting) || (ready & pready) ||
((signalled | waiting | pready | running | ready) & ~enabled) || !(enabled & mask))
((signalled | waiting | pready | running | ready) & ~enabled) || !(enabled & mask))
{
// Error conditions:
// 1) Cannot have a waiting bit and running bit set at the same time
@ -3978,18 +4085,13 @@ s32 _cellSpursSendSignal(ppu_thread& ppu, vm::ptr<CellSpursTaskset> taskset, u32
// 3) Any disabled bit in enabled mask must be not set
// 4) Specified task must be enabled
signal = -1;
}
else
{
signal = !!(~signalled & waiting & mask);
signalled |= mask;
return false;
}
if (ppu_stwcx(ppu, addr, signalled))
{
break;
}
}
signal = !!(~signalled & waiting & mask);
op.signalled[taskId / 32] = signalled | mask;
return true;
});
switch (signal)
{
@ -4394,24 +4496,169 @@ s32 _spurs::check_job_chain_attribute(u32 sdkVer, vm::cptr<u64> jcEntry, u16 siz
return CELL_OK;
}
s32 cellSpursCreateJobChainWithAttribute()
s32 _spurs::create_job_chain(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::ptr<CellSpursJobChain> jobChain, vm::cptr<u64> jobChainEntry, u16 sizeJob
, u16 maxGrabbedJob, vm::cptr<u8[8]> prio, u32 maxContention, b8 autoReadyCount
, u32 tag1, u32 tag2, u32 HaltOnError, vm::cptr<char> name, u32 param_13, u32 param_14)
{
UNIMPLEMENTED_FUNC(cellSpurs);
const s32 sdkVer = _spurs::get_sdk_version();
jobChain->spurs = spurs;
jobChain->jmVer = sdkVer > 0x14ffff ? CELL_SPURS_JOB_REVISION_1 : CELL_SPURS_JOB_REVISION_0;
// Real hack in firmware
jobChain->val2F = Emu.GetTitleID() == "BLJM60093" ? 1 : 0;
jobChain->tag1 = static_cast<u8>(tag1);
jobChain->tag2 = static_cast<u8>(tag2);
jobChain->isHalted = false;
jobChain->maxGrabbedJob = maxGrabbedJob;
jobChain->pc = jobChainEntry;
auto as_job_error = [](s32 error) -> s32
{
switch (error + 0u)
{
case CELL_SPURS_POLICY_MODULE_ERROR_AGAIN: return CELL_SPURS_JOB_ERROR_AGAIN;
case CELL_SPURS_POLICY_MODULE_ERROR_INVAL: return CELL_SPURS_JOB_ERROR_INVAL;
case CELL_SPURS_POLICY_MODULE_ERROR_STAT: return CELL_SPURS_JOB_ERROR_STAT;
default: return error;
}
};
vm::var<CellSpursWorkloadAttribute> attr_wkl;
vm::var<u32> wid;
// TODO
if (auto err = _cellSpursWorkloadAttributeInitialize(ppu, +attr_wkl, 1, 0x330000, vm::null, 0, jobChain.addr(), prio, 1, maxContention))
{
return as_job_error(err);
}
ppu_execute<&cellSpursWorkloadAttributeSetName>(ppu, +attr_wkl, +vm::make_str("JobChain"), name);
if (auto err = ppu_execute<&cellSpursAddWorkloadWithAttribute>(ppu, spurs, +wid, +attr_wkl))
{
return as_job_error(err);
}
jobChain->cause = vm::null;
jobChain->error = 0;
jobChain->workloadId = *wid;
return CELL_OK;
}
s32 cellSpursCreateJobChain()
s32 cellSpursCreateJobChainWithAttribute(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::ptr<CellSpursJobChain> jobChain, vm::ptr<CellSpursJobChainAttribute> attr)
{
UNIMPLEMENTED_FUNC(cellSpurs);
cellSpurs.warning("cellSpursCreateJobChainWithAttribute(spurs=*0x%x, jobChain=*0x%x, attr=*0x%x)", spurs, jobChain, attr);
if (!attr)
return CELL_SPURS_JOB_ERROR_NULL_POINTER;
if (!attr.aligned())
return CELL_SPURS_JOB_ERROR_ALIGN;
const u64 prio = std::bit_cast<u64>(attr->priorities);
if (auto err = _spurs::check_job_chain_attribute(attr->sdkVer, attr->jobChainEntry, attr->sizeJobDescriptor, attr->maxGrabbedJob, prio, attr->maxContention
, attr->autoSpuCount, attr->tag1, attr->tag2, attr->isFixedMemAlloc, attr->maxSizeJobDescriptor, attr->initSpuCount))
{
return err;
}
if (!jobChain || !spurs)
return CELL_SPURS_JOB_ERROR_NULL_POINTER;
if (!jobChain.aligned() || !spurs.aligned())
return CELL_SPURS_JOB_ERROR_ALIGN;
std::memset(jobChain.get_ptr(), 0, 0x110);
// Only allowed revisions in this function
if (auto ver = attr->jmVer; ver != CELL_SPURS_JOB_REVISION_2 && ver != CELL_SPURS_JOB_REVISION_3)
{
return CELL_SPURS_JOB_ERROR_INVAL;
}
jobChain->val2C = +attr->isFixedMemAlloc << 7 | (((attr->maxSizeJobDescriptor - 0x100) / 128 & 7) << 4);
if (auto err = _spurs::create_job_chain(ppu, spurs, jobChain, attr->jobChainEntry, attr->sizeJobDescriptor
, attr->maxGrabbedJob, attr.ptr(&CellSpursJobChainAttribute::priorities), attr->maxContention, attr->autoSpuCount
, attr->tag1, attr->tag2, attr->haltOnError, attr->name, 0, 0))
{
return err;
}
jobChain->initSpuCount = attr->initSpuCount;
jobChain->jmVer = attr->jmVer;
jobChain->sdkVer = attr->sdkVer;
jobChain->jobMemoryCheck = +attr->jobMemoryCheck << 1;
return CELL_OK;
}
s32 cellSpursJoinJobChain()
s32 cellSpursCreateJobChain(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::ptr<CellSpursJobChain> jobChain, vm::cptr<u64> jobChainEntry, u16 sizeJobDescriptor
, u16 maxGrabbedJob, vm::cptr<u8[8]> priorities, u32 maxContention, b8 autoReadyCount, u32 tag1, u32 tag2)
{
UNIMPLEMENTED_FUNC(cellSpurs);
cellSpurs.warning("cellSpursCreateJobChain(spurs=*0x%x, jobChain=*0x%x, jobChainEntry=*0x%x, sizeJobDescriptor=0x%x"
", maxGrabbedJob=0x%x, priorities=*0x%x, maxContention=%u, autoReadyCount=%s, tag1=%u, %u)", spurs, jobChain, jobChainEntry, sizeJobDescriptor
, maxGrabbedJob, priorities, maxContention, autoReadyCount, tag1, tag2);
const u64 prio = std::bit_cast<u64>(*priorities);
if (auto err = _spurs::check_job_chain_attribute(UINT32_MAX, jobChainEntry, sizeJobDescriptor, maxGrabbedJob, prio, maxContention
, autoReadyCount, tag1, tag2, 0, 0, 0))
{
return err;
}
std::memset(jobChain.get_ptr(), 0, 0x110);
if (auto err = _spurs::create_job_chain(ppu, spurs, jobChain, jobChainEntry, sizeJobDescriptor, maxGrabbedJob, priorities
, maxContention, autoReadyCount, tag1, tag2, 0, vm::null, 0, 0))
{
return err;
}
return CELL_OK;
}
s32 cellSpursJoinJobChain(ppu_thread& ppu, vm::ptr<CellSpursJobChain> jobChain)
{
cellSpurs.trace("cellSpursJoinJobChain(jobChain=*0x%x)", jobChain);
if (!jobChain)
return CELL_SPURS_JOB_ERROR_NULL_POINTER;
if (!jobChain.aligned())
return CELL_SPURS_JOB_ERROR_ALIGN;
const u32 wid = jobChain->workloadId;
const auto spurs = +jobChain->spurs;
if (wid >= CELL_SPURS_MAX_WORKLOAD2)
return CELL_SPURS_JOB_ERROR_INVAL;
auto as_job_error = [](s32 error) -> s32
{
switch (error + 0u)
{
case CELL_SPURS_POLICY_MODULE_ERROR_STAT: return CELL_SPURS_JOB_ERROR_STAT;
default: return error;
}
};
if (auto err = ppu_execute<&cellSpursWaitForWorkloadShutdown>(ppu, +jobChain->spurs, wid))
{
return as_job_error(err);
}
if (auto err = ppu_execute<&cellSpursRemoveWorkload>(ppu, +jobChain->spurs, wid))
{
// Returned as is
return err;
}
jobChain->workloadId = CELL_SPURS_MAX_WORKLOAD2;
return jobChain->error;
}
s32 cellSpursKickJobChain(ppu_thread& ppu, vm::ptr<CellSpursJobChain> jobChain, u8 numReadyCount)
{
cellSpurs.trace("cellSpursKickJobChain(jobChain=*0x%x, numReadyCount=0x%x)", jobChain, numReadyCount);
@ -4428,7 +4675,7 @@ s32 cellSpursKickJobChain(ppu_thread& ppu, vm::ptr<CellSpursJobChain> jobChain,
if (wid >= CELL_SPURS_MAX_WORKLOAD2)
return CELL_SPURS_JOB_ERROR_INVAL;
if (jobChain->val2D > 1)
if (jobChain->jmVer > CELL_SPURS_JOB_REVISION_1)
return CELL_SPURS_JOB_ERROR_PERM;
if (jobChain->autoReadyCount)
@ -4462,7 +4709,7 @@ s32 _cellSpursJobChainAttributeInitialize(u32 jmRevsion, u32 sdkRevision, vm::pt
const u64 prio = std::bit_cast<u64>(*priorityTable);
if (auto err = _spurs::check_job_chain_attribute(sdkRevision, jobChainEntry, sizeJobDescriptor, maxGrabbedJob, prio, maxContention
, autoRequestSpuCount, tag1, tag2, isFixedMemAlloc, maxSizeJobDescriptor, initialRequestSpuCount))
, autoRequestSpuCount, tag1, tag2, isFixedMemAlloc, maxSizeJobDescriptor, initialRequestSpuCount))
{
return err;
}
@ -4735,7 +4982,7 @@ s32 cellSpursJobGuardNotify(ppu_thread& ppu, vm::ptr<CellSpursJobGuard> jobGuard
auto jobChain = +jobGuard->jobChain;
if (jobChain->val2D <= 1)
if (jobChain->jmVer <= CELL_SPURS_JOB_REVISION_1)
{
ppu_execute<&cellSpursKickJobChain>(ppu, jobChain, static_cast<u8>(jobGuard->requestSpuCount));
}
@ -4780,7 +5027,7 @@ s32 cellSpursRunJobChain(ppu_thread& ppu, vm::ptr<CellSpursJobChain> jobChain)
if (wid >= CELL_SPURS_MAX_WORKLOAD2)
return CELL_SPURS_JOB_ERROR_INVAL;
if (jobChain->val2D <= 1)
if (jobChain->jmVer <= CELL_SPURS_JOB_REVISION_1)
return CELL_SPURS_JOB_ERROR_PERM;
const auto spurs = +jobChain->spurs;

View File

@ -277,6 +277,14 @@ enum CellSpursJobOpcode : u64
CELL_SPURS_JOB_OPCODE_JTS = 0x800000000ull | CELL_SPURS_JOB_OPCODE_LWSYNC,
};
enum CellSpursJobChainRevision : u32
{
CELL_SPURS_JOB_REVISION_0 = 0,
CELL_SPURS_JOB_REVISION_1 = 1,
CELL_SPURS_JOB_REVISION_2 = 2,
CELL_SPURS_JOB_REVISION_3 = 3,
};
// Event flag constants
enum SpursEventFlagConstants
{
@ -446,23 +454,30 @@ struct alignas(128) CellSpursJobChain
u8 unk0[0x3]; // 0x20
b8 isHalted; // 0x23
b8 autoReadyCount; // 0x24
u8 unk1[0x7]; // 0x25
u8 unk1[0x3]; // 0x25
u8 initSpuCount; // 0x28
u8 unk5; // 0x29
u8 tag1; // 0x2A
u8 tag2; // 0x2B
u8 val2C; // 0x2C
u8 val2D; // 0x2D
u8 jmVer; // 0x2D
u8 val2E; // 0x2E
u8 val2F; // 0x2F
atomic_be_t<u64> urgentCmds[4]; // 0x30
u8 unk2[0x22]; // 0x50
u8 unk2[0x20]; // 0x50
be_t<u16> sizeJobDescriptor; // 0x70
atomic_be_t<u16> maxGrabbedJob; // 0x72
be_t<u32> workloadId; // 0x74
vm::bptr<CellSpurs, u64> spurs; // 0x78
be_t<s32> error; // 0x80
be_t<u32> unk3; // 0x84
vm::bptr<void, u64> cause; // 0x88
u8 unk4[0x8]; // 0x90
be_t<u32> sdkVer; // 0x90
u8 jobMemoryCheck; // 0x94 (unlike Attribute::jobMmeoryCheck it is not a boolean but a bitset it seems)
u8 unk4[0x3]; // 0x95
vm::bptr<CellSpursJobChainExceptionEventHandler, u64> exceptionEventHandler; // 0x98
vm::bptr<void, u64> exceptionEventHandlerArgument; // 0xA0
u8 unk5[0x100 - 0xA8];
u8 unk6[0x100 - 0xA8];
};
struct alignas(128) CellSpursJobChain_x00
@ -510,7 +525,7 @@ struct alignas(8) CellSpursJobChainAttribute
be_t<u32> sdkVer; // 0x04
vm::bcptr<u64> jobChainEntry; // 0x08
be_t<u16> sizeJobDescriptor; // 0x0C
be_t<u32> maxGrabbedJob; // 0x0E
be_t<u16> maxGrabbedJob; // 0x0E
u8 priorities[8]; // 0x10
be_t<u32> maxContention; // 0x18
b8 autoSpuCount; // 0x1C
@ -584,7 +599,7 @@ struct alignas(128) CellSpurs
{
u8 unk0[0x20]; // 0x00 - SPU exception handler 0x08 - SPU exception handler args
be_t<u64> sem; // 0x20
be_t<u32> x28; // 0x28
atomic_be_t<u32> x28; // 0x28
be_t<u32> x2C; // 0x2C
vm::bptr<CellSpursShutdownCompletionEventHook, u64> hook; // 0x30
vm::bptr<void, u64> hookArg; // 0x38
@ -729,6 +744,11 @@ struct alignas(128) CellSpurs
_sub_str4 wklH2[0x10]; // 0x1A00
u8 unknown_[0x2000 - 0x1B00];
u32 max_workloads() const
{
return (flags1 & SF1_32_WORKLOADS ? CELL_SPURS_MAX_WORKLOAD2 : CELL_SPURS_MAX_WORKLOAD);
}
atomic_t<SpursWorkloadState>& wklState(u32 wid)
{
if (wid & 0x10)
@ -777,6 +797,18 @@ struct alignas(128) CellSpurs
}
}
_sub_str1& wklSyncInfo(u32 wid)
{
if (wid & 0x10)
{
return wklF2[wid & 0xf];
}
else
{
return wklF1[wid & 0xf];
}
}
_sub_str4& wklName(u32 wid)
{
if (wid & 0x10)
@ -858,6 +890,33 @@ struct alignas(128) CellSpursEventFlag
CHECK_SIZE_ALIGN(CellSpursEventFlag, 128, 128);
struct alignas(128) CellSpursEventFlag_x00
{
struct alignas(8) ControlSyncVar
{
be_t<u16> events; // 0x00 Event bits
be_t<u16> spuTaskPendingRecv; // 0x02 A bit is set to 1 when the condition of the SPU task using the slot are met and back to 0 when the SPU task unblocks
be_t<u16> ppuWaitMask; // 0x04 Wait mask for blocked PPU thread
u8 ppuWaitSlotAndMode; // 0x06 Top 4 bits: Wait slot number of the blocked PPU threads, Bottom 4 bits: Wait mode of the blocked PPU thread
u8 ppuPendingRecv; // 0x07 Set to 1 when the blocked PPU thread's conditions are met and back to 0 when the PPU thread is unblocked
};
ControlSyncVar ctrl; // 0x00
be_t<u16> spuTaskUsedWaitSlots; // 0x08 A bit is set to 1 if the wait slot corresponding to the bit is used by an SPU task and 0 otherwise
be_t<u16> spuTaskWaitMode; // 0x0A A bit is set to 1 if the wait mode for the SPU task corresponding to the bit is AND and 0 otherwise
u8 spuPort; // 0x0C
u8 isIwl; // 0x0D
u8 direction; // 0x0E
u8 clearMode; // 0x0F
be_t<u16> spuTaskWaitMask[16]; // 0x10 Wait mask for blocked SPU tasks
be_t<u16> pendingRecvTaskEvents[16]; // 0x30 The value of event flag when the wait condition for the thread/task was met
u8 waitingTaskId[16]; // 0x50 Task id of waiting SPU threads
u8 waitingTaskWklId[16]; // 0x60 Workload id of waiting SPU threads
be_t<u64> addr; // 0x70
be_t<u32> eventPortId; // 0x78
be_t<u32> eventQueueId; // 0x7C
};
using CellSpursLFQueue = CellSyncLFQueue;
union CellSpursTaskArgument
@ -997,6 +1056,39 @@ struct alignas(128) CellSpursTaskset
CHECK_SIZE_ALIGN(CellSpursTaskset, 128 * 50, 128);
struct alignas(128) spurs_taskset_signal_op
{
be_t<u32> running[4]; // 0x00
be_t<u32> ready[4]; // 0x10
be_t<u32> pending_ready[4]; // 0x20
be_t<u32> enabled[4]; // 0x30
be_t<u32> signalled[4]; // 0x40
be_t<u32> waiting[4]; // 0x50
vm::bptr<CellSpurs, u64> spurs; // 0x60
be_t<u64> args; // 0x68
u8 enable_clear_ls; // 0x70
u8 x71; // 0x71
u8 wkl_flag_wait_task; // 0x72
u8 last_scheduled_task; // 0x73
be_t<u32> wid; // 0x74
be_t<u64> x78; // 0x78
};
struct alignas(128) spurs_wkl_state_op
{
SpursWorkloadState wklState1[0x10]; // 0x00
u8 wklStatus1[0x10]; // 0x10
u8 wklEvent1[0x10]; // 0x20
be_t<u32> wklEnabled; // 0x30
be_t<u32> wklMskB; // 0x34
u8 uns[0x5]; // 0x38
u8 sysSrvMsgUpdateWorkload; // 0x3D
u8 uns2[0x12]; // 0x3E
SpursWorkloadState wklState2[0x10]; // 0x50
u8 wklStatus2[0x10]; // 0x60
u8 wklEvent2[0x10]; // 0x70
};
struct alignas(128) CellSpursTaskset2
{
struct TaskInfo