mirror of
https://github.com/RPCS3/rpcs3.git
synced 2024-11-22 18:53:28 +01:00
SPURS: Implement some portions of taskset pm
This commit is contained in:
parent
d8bed3b0ce
commit
61342946a4
@ -1373,10 +1373,7 @@ private:
|
||||
}
|
||||
void FSCRWR(u32 rt, u32 ra)
|
||||
{
|
||||
CPU.FPSCR._u32[3] = CPU.GPR[ra]._u32[3] & 0x00000F07;
|
||||
CPU.FPSCR._u32[2] = CPU.GPR[ra]._u32[2] & 0x00003F07;
|
||||
CPU.FPSCR._u32[1] = CPU.GPR[ra]._u32[1] & 0x00003F07;
|
||||
CPU.FPSCR._u32[0] = CPU.GPR[ra]._u32[0] & 0x00000F07;
|
||||
CPU.FPSCR.Write(CPU.GPR[ra]);
|
||||
}
|
||||
void DFTSV(u32 rt, u32 ra, s32 i7)
|
||||
{
|
||||
|
@ -248,6 +248,15 @@ public:
|
||||
{
|
||||
_u32[1+slice] |= exceptions;
|
||||
}
|
||||
|
||||
// Write the FPSCR
|
||||
void Write(u128 & r)
|
||||
{
|
||||
_u32[3] = r._u32[3] & 0x00000F07;
|
||||
_u32[2] = r._u32[2] & 0x00003F07;
|
||||
_u32[1] = r._u32[1] & 0x00003F07;
|
||||
_u32[0] = r._u32[0] & 0x00000F07;
|
||||
}
|
||||
};
|
||||
|
||||
union SPU_SNRConfig_hdr
|
||||
|
@ -40,6 +40,7 @@ enum
|
||||
{
|
||||
CELL_SPURS_TASK_ERROR_AGAIN = 0x80410901,
|
||||
CELL_SPURS_TASK_ERROR_INVAL = 0x80410902,
|
||||
CELL_SPURS_TASK_ERROR_NOSYS = 0x80410903,
|
||||
CELL_SPURS_TASK_ERROR_NOMEM = 0x80410904,
|
||||
CELL_SPURS_TASK_ERROR_SRCH = 0x80410905,
|
||||
CELL_SPURS_TASK_ERROR_NOEXEC = 0x80410907,
|
||||
@ -213,6 +214,17 @@ enum SpursTaskConstants
|
||||
CELL_SPURS_TASK_EXECUTION_CONTEXT_SIZE = 1024,
|
||||
CELL_SPURS_TASKSET_PM_ENTRY_ADDR = 0xA00,
|
||||
CELL_SPURS_TASKSET_PM_SYSCALL_ADDR = 0xA70,
|
||||
|
||||
// Task syscall numbers
|
||||
CELL_SPURS_TASK_SYSCALL_EXIT = 0,
|
||||
CELL_SPURS_TASK_SYSCALL_YIELD = 1,
|
||||
CELL_SPURS_TASK_SYSCALL_WAIT_SIGNAL = 2,
|
||||
CELL_SPURS_TASK_SYSCALL_POLL = 3,
|
||||
CELL_SPURS_TASK_SYSCALL_RECV_WKL_FLAG = 4,
|
||||
|
||||
// Task poll status
|
||||
CELL_SPURS_TASK_POLL_FOUND_TASK = 1,
|
||||
CELL_SPURS_TASK_POLL_FOUND_WORKLOAD = 2,
|
||||
};
|
||||
|
||||
enum CellSpursEventFlagWaitMode
|
||||
@ -627,10 +639,10 @@ struct CellSpursTaskset
|
||||
|
||||
struct TaskInfo
|
||||
{
|
||||
CellSpursTaskArgument args;
|
||||
vm::bptr<u64, 1, u64> elf_addr;
|
||||
be_t<u64> context_save_storage_and_alloc_ls_blocks; // This is (context_save_storage_addr | allocated_ls_blocks)
|
||||
CellSpursTaskLsPattern ls_pattern;
|
||||
CellSpursTaskArgument args; // 0x00
|
||||
vm::bptr<u64, 1, u64> elf_addr; // 0x10
|
||||
be_t<u64> context_save_storage_and_alloc_ls_blocks; // 0x18 This is (context_save_storage_addr | allocated_ls_blocks)
|
||||
CellSpursTaskLsPattern ls_pattern; // 0x20
|
||||
};
|
||||
|
||||
static_assert(sizeof(TaskInfo) == 0x30, "Wrong TaskInfo size");
|
||||
@ -656,7 +668,7 @@ struct CellSpursTaskset
|
||||
u8 x72; // 0x72
|
||||
u8 last_scheduled_task; // 0x73
|
||||
be_t<u32> wid; // 0x74
|
||||
u8 unk1[8]; // 0x78
|
||||
be_t<u64> x78; // 0x78
|
||||
TaskInfo task_info[128]; // 0x80
|
||||
vm::bptr<u64, 1, u64> exception_handler; // 0x1880
|
||||
vm::bptr<u64, 1, u64> exception_handler_arg; // 0x1888
|
||||
@ -765,7 +777,7 @@ struct CellSpursTaskset2
|
||||
u8 x72; // 0x72
|
||||
u8 last_scheduled_task; // 0x73
|
||||
be_t<u32> wid; // 0x74
|
||||
u8 unk1[8]; // 0x78
|
||||
be_t<u64> x78; // 0x78
|
||||
TaskInfo task_info[128]; // 0x80
|
||||
vm::bptr<u64, 1, u64> exception_handler; // 0x1880
|
||||
vm::bptr<u64, 1, u64> exception_handler_arg; // 0x1888
|
||||
@ -773,9 +785,9 @@ struct CellSpursTaskset2
|
||||
u32 unk2; // 0x1894
|
||||
u32 event_flag_id1; // 0x1898
|
||||
u32 event_flag_id2; // 0x189C
|
||||
u8 unk3[0xE8]; // 0x18A0
|
||||
u64 task_exit_code[256]; // 0x1988
|
||||
u8 unk4[0x778]; // 0x2188
|
||||
u8 unk3[0x1980 - 0x18A0]; // 0x18A0
|
||||
be_t<u128> task_exit_code[128]; // 0x1980
|
||||
u8 unk4[0x2900 - 0x2180]; // 0x2180
|
||||
} m;
|
||||
|
||||
static_assert(sizeof(_CellSpursTaskset2) == size, "Wrong _CellSpursTaskset2 size");
|
||||
@ -939,8 +951,8 @@ struct SpursTasksetPmMgmtData
|
||||
be_t<u32> lowestLoadSegmentAddr; // 0x2FBC
|
||||
be_t<u64> x2FC0; // 0x2FC0
|
||||
be_t<u64> x2FC8; // 0x2FC8
|
||||
be_t<u32> x2FD0; // 0x2FD0
|
||||
be_t<u32> taskExitCode; // 0x2FD4
|
||||
be_t<u32> taskExitCode; // 0x2FD0
|
||||
be_t<u32> x2FD4; // 0x2FD4
|
||||
u8 x2FD8[0x3000 - 0x2FD8]; // 0x2FD8
|
||||
};
|
||||
|
||||
|
@ -18,7 +18,7 @@ void cellSpursModuleExit(SPUThread & spu);
|
||||
bool spursDma(SPUThread & spu, u32 cmd, u64 ea, u32 lsa, u32 size, u32 tag);
|
||||
u32 spursDmaGetCompletionStatus(SPUThread & spu, u32 tagMask);
|
||||
u32 spursDmaWaitForCompletion(SPUThread & spu, u32 tagMask, bool waitForAll = true);
|
||||
void spursHalt();
|
||||
void spursHalt(SPUThread & spu);
|
||||
|
||||
//
|
||||
// SPURS Kernel functions
|
||||
@ -44,9 +44,10 @@ bool spursSysServiceWorkloadEntry(SPUThread & spu);
|
||||
//
|
||||
bool spursTasksetProcessRequest(SPUThread & spu, s32 request, u32 * taskId, u32 * isWaiting);
|
||||
void spursTasksetExit(SPUThread & spu);
|
||||
void spursTasksetStartTask(SPUThread & spu, CellSpursTaskArgument & taskArgs);
|
||||
void spursTasksetDispatch(SPUThread & spu);
|
||||
void spursTasksetProcessPollStatus(SPUThread & spu, u32 pollStatus);
|
||||
bool spursTasksetShouldYield(SPUThread & spu);
|
||||
bool spursTasksetPollStatus(SPUThread & spu);
|
||||
void spursTasksetInit(SPUThread & spu, u32 pollStatus);
|
||||
void spursTasksetResumeTask(SPUThread & spu);
|
||||
bool spursTasksetEntry(SPUThread & spu);
|
||||
@ -1018,6 +1019,8 @@ bool spursTasksetProcessRequest(SPUThread & spu, s32 request, u32 * taskId, u32
|
||||
// TODO: Implement cases
|
||||
s32 delta = 0;
|
||||
switch (request + 1) {
|
||||
case -1:
|
||||
break;
|
||||
case 0:
|
||||
break;
|
||||
case 1:
|
||||
@ -1033,7 +1036,7 @@ bool spursTasksetProcessRequest(SPUThread & spu, s32 request, u32 * taskId, u32
|
||||
case 6:
|
||||
break;
|
||||
default:
|
||||
assert(0);
|
||||
spursHalt(spu);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1076,9 +1079,24 @@ void spursTasksetExit(SPUThread & spu) {
|
||||
cellSpursModuleExit(spu);
|
||||
}
|
||||
|
||||
void spursTasksetStartTask(SPUThread & spu, CellSpursTaskArgument & taskArgs) {
|
||||
auto mgmt = vm::get_ptr<SpursTasksetPmMgmtData>(spu.ls_offset + 0x2700);
|
||||
auto taskset = vm::get_ptr<CellSpursTaskset>(spu.ls_offset + 0x2700);
|
||||
|
||||
spu.GPR[2].clear();
|
||||
spu.GPR[3] = u128::from64(taskArgs.u64[0], taskArgs.u64[1]);
|
||||
spu.GPR[4]._u64[1] = taskset->m.args;
|
||||
spu.GPR[4]._u64[0] = taskset->m.spurs.addr();
|
||||
for (auto i = 5; i < 128; i++) {
|
||||
spu.GPR[i].clear();
|
||||
}
|
||||
|
||||
spu.SetBranch(mgmt->savedContextLr.value()._u32[3]);
|
||||
}
|
||||
|
||||
void spursTasksetDispatch(SPUThread & spu) {
|
||||
auto mgmt = vm::get_ptr<SpursTasksetPmMgmtData>(spu.ls_offset + 0x2700);
|
||||
auto kernelMgmt = vm::get_ptr<SpursKernelMgmtData>(spu.ls_offset + 0x100);
|
||||
auto mgmt = vm::get_ptr<SpursTasksetPmMgmtData>(spu.ls_offset + 0x2700);
|
||||
auto taskset = vm::get_ptr<CellSpursTaskset>(spu.ls_offset + 0x2700);
|
||||
|
||||
u32 taskId;
|
||||
u32 isWaiting;
|
||||
@ -1089,7 +1107,13 @@ void spursTasksetDispatch(SPUThread & spu) {
|
||||
}
|
||||
|
||||
mgmt->taskId = taskId;
|
||||
u64 elfAddr = mgmt->taskset->m.task_info[taskId].elf_addr.addr() & 0xFFFFFFFFFFFFFFF8ull;
|
||||
|
||||
// DMA in the task info for the selected task
|
||||
spursDma(spu, MFC_GET_CMD, mgmt->taskset.addr() + offsetof(CellSpursTaskset, m.task_info[taskId]), 0x2780/*LSA*/, sizeof(CellSpursTaskset::TaskInfo), mgmt->dmaTagId);
|
||||
spursDmaWaitForCompletion(spu, 1 << mgmt->dmaTagId);
|
||||
auto taskInfo = vm::get_ptr<CellSpursTaskset::TaskInfo>(spu.ls_offset + 0x2780);
|
||||
auto elfAddr = taskInfo->elf_addr.addr().value();
|
||||
taskInfo->elf_addr.set(taskInfo->elf_addr.addr() & 0xFFFFFFFFFFFFFFF8ull);
|
||||
|
||||
// Trace - Task: Incident=dispatch
|
||||
CellSpursTracePacket pkt;
|
||||
@ -1097,37 +1121,84 @@ void spursTasksetDispatch(SPUThread & spu) {
|
||||
pkt.header.tag = CELL_SPURS_TRACE_TAG_TASK;
|
||||
pkt.data.task.incident = CELL_SPURS_TRACE_TASK_DISPATCH;
|
||||
pkt.data.task.taskId = taskId;
|
||||
cellSpursModulePutTrace(&pkt, 0x1F);
|
||||
cellSpursModulePutTrace(&pkt, CELL_SPURS_KERNEL_DMA_TAG_ID);
|
||||
|
||||
if (isWaiting == 0) {
|
||||
}
|
||||
// If we reach here it means that the task is being started and not being resumed
|
||||
mgmt->lowestLoadSegmentAddr = CELL_SPURS_TASK_TOP;
|
||||
|
||||
if (mgmt->taskset->m.enable_clear_ls) {
|
||||
memset(vm::get_ptr<void>(spu.ls_offset + CELL_SPURS_TASK_TOP), 0, CELL_SPURS_TASK_BOTTOM - CELL_SPURS_TASK_TOP);
|
||||
}
|
||||
// TODO: Load elf
|
||||
// TODO: halt if rc of Load elf != CELL_OK
|
||||
|
||||
// If the entire LS is saved then there is no need to load the ELF as it will be be saved in the context save area
|
||||
if (mgmt->taskset->m.task_info[taskId].ls_pattern.u64[0] != 0xFFFFFFFFFFFFFFFFull ||
|
||||
(mgmt->taskset->m.task_info[taskId].ls_pattern.u64[0] | 0xFC00000000000000ull) != 0xFFFFFFFFFFFFFFFFull) {
|
||||
// Load the ELF
|
||||
// TODO: Load ELF
|
||||
}
|
||||
spursDmaWaitForCompletion(spu, 1 << mgmt->dmaTagId);
|
||||
|
||||
// Load save context from main memory to LS
|
||||
u64 context_save_storage = mgmt->taskset->m.task_info[taskId].context_save_storage_and_alloc_ls_blocks & 0xFFFFFFFFFFFFFF80ull;
|
||||
for (auto i = 6; i < 128; i++) {
|
||||
bool shouldLoad = mgmt->taskset->m.task_info[taskId].ls_pattern.u64[i < 64 ? 1 : 0] & (0x8000000000000000ull >> i) ? true : false;
|
||||
if (shouldLoad) {
|
||||
memcpy(vm::get_ptr<void>(spu.ls_offset + CELL_SPURS_TASK_TOP + ((i - 6) << 11)),
|
||||
vm::get_ptr<void>((u32)context_save_storage + 0x400 + ((i - 6) << 11)), 0x800);
|
||||
mgmt->tasksetMgmtAddr = 0x2700;
|
||||
mgmt->x2FC0 = 0;
|
||||
mgmt->taskExitCode = isWaiting;
|
||||
mgmt->x2FD4 = elfAddr & 5; // TODO: Figure this out
|
||||
|
||||
if ((elfAddr & 5) == 1) {
|
||||
spursDma(spu, MFC_GET_CMD, mgmt->taskset.addr() + offsetof(CellSpursTaskset2, m.task_exit_code[taskId]), 0x2FC0/*LSA*/, 0x10/*size*/, mgmt->dmaTagId);
|
||||
}
|
||||
}
|
||||
|
||||
// Trace - GUID
|
||||
memset(&pkt, 0, sizeof(pkt));
|
||||
pkt.header.tag = CELL_SPURS_TRACE_TAG_GUID;
|
||||
pkt.data.guid = 0; // TODO: Put GUID of taskId here
|
||||
cellSpursModulePutTrace(&pkt, 0x1F);
|
||||
// Trace - GUID
|
||||
memset(&pkt, 0, sizeof(pkt));
|
||||
pkt.header.tag = CELL_SPURS_TRACE_TAG_GUID;
|
||||
pkt.data.guid = 0; // TODO: Put GUID of taskId here
|
||||
cellSpursModulePutTrace(&pkt, 0x1F);
|
||||
|
||||
if (elfAddr & 2) { // TODO: Figure this out
|
||||
spu.SPU.Status.SetValue(SPU_STATUS_STOPPED_BY_STOP);
|
||||
spu.Stop();
|
||||
return;
|
||||
}
|
||||
|
||||
spursTasksetStartTask(spu, taskInfo->args);
|
||||
} else {
|
||||
if (taskset->m.enable_clear_ls) {
|
||||
memset(vm::get_ptr<void>(spu.ls_offset + CELL_SPURS_TASK_TOP), 0, CELL_SPURS_TASK_BOTTOM - CELL_SPURS_TASK_TOP);
|
||||
}
|
||||
|
||||
// If the entire LS is saved then there is no need to load the ELF as it will be be saved in the context save area as well
|
||||
if (taskInfo->ls_pattern.u64[0] != 0xFFFFFFFFFFFFFFFFull ||
|
||||
(taskInfo->ls_pattern.u64[1] | 0xFC00000000000000ull) != 0xFFFFFFFFFFFFFFFFull) {
|
||||
// Load the ELF
|
||||
// TODO: Load ELF
|
||||
// TODO: halt if rc of Load elf != CELL_OK
|
||||
}
|
||||
|
||||
// Load saved context from main memory to LS
|
||||
u64 contextSaveStorage = taskInfo->context_save_storage_and_alloc_ls_blocks & 0xFFFFFFFFFFFFFF80ull;
|
||||
for (auto i = 6; i < 128; i++) {
|
||||
bool shouldLoad = taskInfo->ls_pattern.u64[i < 64 ? 1 : 0] & (0x8000000000000000ull >> i) ? true : false;
|
||||
if (shouldLoad) {
|
||||
// TODO: Combine DMA requests for consecutive blocks into a single request
|
||||
spursDma(spu, MFC_GET_CMD, contextSaveStorage + 0x400 + ((i - 6) << 11), CELL_SPURS_TASK_TOP + ((i - 6) << 11), 0x800/*size*/, mgmt->dmaTagId);
|
||||
}
|
||||
}
|
||||
|
||||
spursDmaWaitForCompletion(spu, 1 << mgmt->dmaTagId);
|
||||
|
||||
// Restore saved registers
|
||||
spu.FPSCR.Write(mgmt->savedContextFpscr.value());
|
||||
spu.WriteChannel(MFC_WrTagMask, u128::from32r(mgmt->savedWriteTagGroupQueryMask));
|
||||
spu.WriteChannel(SPU_WrEventMask, u128::from32r(mgmt->savedSpuWriteEventMask));
|
||||
|
||||
// Trace - GUID
|
||||
memset(&pkt, 0, sizeof(pkt));
|
||||
pkt.header.tag = CELL_SPURS_TRACE_TAG_GUID;
|
||||
pkt.data.guid = 0; // TODO: Put GUID of taskId here
|
||||
cellSpursModulePutTrace(&pkt, 0x1F);
|
||||
|
||||
if (elfAddr & 2) { // TODO: Figure this out
|
||||
spu.SPU.Status.SetValue(SPU_STATUS_STOPPED_BY_STOP);
|
||||
spu.Stop();
|
||||
return;
|
||||
}
|
||||
|
||||
spu.GPR[3].clear();
|
||||
spursTasksetResumeTask(spu);
|
||||
}
|
||||
}
|
||||
|
||||
void spursTasksetProcessPollStatus(SPUThread & spu, u32 pollStatus) {
|
||||
@ -1136,7 +1207,7 @@ void spursTasksetProcessPollStatus(SPUThread & spu, u32 pollStatus) {
|
||||
}
|
||||
}
|
||||
|
||||
bool spursTasksetShouldYield(SPUThread & spu) {
|
||||
bool spursTasksetPollStatus(SPUThread & spu) {
|
||||
u32 pollStatus;
|
||||
|
||||
if (cellSpursModulePollStatus(spu, &pollStatus)) {
|
||||
@ -1179,6 +1250,99 @@ void spursTasksetResumeTask(SPUThread & spu) {
|
||||
spu.SetBranch(spu.GPR[0]._u32[3]);
|
||||
}
|
||||
|
||||
s32 spursTasksetProcessSyscall(SPUThread & spu, u32 syscallNum, u32 args) {
|
||||
auto mgmt = vm::get_ptr<SpursTasksetPmMgmtData>(spu.ls_offset + 0x2700);
|
||||
auto taskset = vm::get_ptr<CellSpursTaskset>(spu.ls_offset + 0x2700);
|
||||
|
||||
// If the 0x10 bit is set in syscallNum then its the 2nd version of the
|
||||
// syscall (e.g. cellSpursYield2 instead of cellSpursYield) and so don't wait
|
||||
// for DMA completion
|
||||
if ((syscallNum & 0x10) == 0) {
|
||||
spursDmaWaitForCompletion(spu, 0xFFFFFFFF);
|
||||
syscallNum &= 0x0F;
|
||||
}
|
||||
|
||||
s32 rc = 0;
|
||||
u32 incident = 0;
|
||||
switch (syscallNum) {
|
||||
case CELL_SPURS_TASK_SYSCALL_EXIT:
|
||||
if (mgmt->x2FD4 == 4 || (mgmt->x2FC0 & 0xFFFFFFFF) != 0) { // TODO: Figure this out
|
||||
if (mgmt->x2FD4 != 4) {
|
||||
spursTasksetProcessRequest(spu, 0, nullptr, nullptr);
|
||||
}
|
||||
|
||||
auto a = mgmt->x2FD4 == 4 ? taskset->m.x78 : mgmt->x2FC0;
|
||||
auto b = mgmt->x2FD4 == 4 ? 0 : mgmt->x2FC8;
|
||||
// TODO: onTaskExit(a, mgmt->taskId, mgmt->taskExitCode, b)
|
||||
}
|
||||
|
||||
incident = CELL_SPURS_TRACE_TASK_EXIT;
|
||||
break;
|
||||
case CELL_SPURS_TASK_SYSCALL_YIELD:
|
||||
if (spursTasksetPollStatus(spu) || spursTasksetProcessRequest(spu, 3, nullptr, nullptr)) {
|
||||
// If we reach here then it means that either another task can be scheduled or another workload can be scheduled
|
||||
// Save the context of the current task
|
||||
// TODO: rc = saveContext
|
||||
if (rc == CELL_OK) {
|
||||
spursTasksetProcessRequest(spu, 1, nullptr, nullptr);
|
||||
incident = CELL_SPURS_TRACE_TASK_YIELD;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case CELL_SPURS_TASK_SYSCALL_WAIT_SIGNAL:
|
||||
if (spursTasksetProcessRequest(spu, -1, nullptr, nullptr)) {
|
||||
// TODO: rc = saveContext
|
||||
if (rc == CELL_OK) {
|
||||
if (spursTasksetProcessRequest(spu, 2, nullptr, nullptr) == false) {
|
||||
incident = CELL_SPURS_TRACE_TASK_WAIT;
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
case CELL_SPURS_TASK_SYSCALL_POLL:
|
||||
rc = spursTasksetPollStatus(spu) ? CELL_SPURS_TASK_POLL_FOUND_WORKLOAD : 0;
|
||||
rc |= spursTasksetProcessRequest(spu, 3, nullptr, nullptr) ? CELL_SPURS_TASK_POLL_FOUND_TASK : 0;
|
||||
break;
|
||||
case CELL_SPURS_TASK_SYSCALL_RECV_WKL_FLAG:
|
||||
if (args == 0) { // TODO: Figure this out
|
||||
spursHalt(spu);
|
||||
}
|
||||
|
||||
if (spursTasksetPollStatus(spu) || spursTasksetProcessRequest(spu, 4, nullptr, nullptr) != true) {
|
||||
// TODO: rc = saveContext
|
||||
if (rc == CELL_OK) {
|
||||
spursTasksetProcessRequest(spu, 1, nullptr, nullptr);
|
||||
incident = CELL_SPURS_TRACE_TASK_WAIT;
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
rc = CELL_SPURS_TASK_ERROR_NOSYS;
|
||||
break;
|
||||
}
|
||||
|
||||
if (incident) {
|
||||
// Trace - TASK
|
||||
CellSpursTracePacket pkt;
|
||||
memset(&pkt, 0, sizeof(pkt));
|
||||
pkt.header.tag = CELL_SPURS_TRACE_TAG_TASK;
|
||||
pkt.data.task.incident = incident;
|
||||
pkt.data.task.taskId = mgmt->taskId;
|
||||
cellSpursModulePutTrace(&pkt, mgmt->dmaTagId);
|
||||
|
||||
// Clear the GUID of the task
|
||||
memset(vm::get_ptr<void>(spu.ls_offset + mgmt->lowestLoadSegmentAddr), 0, 0x10);
|
||||
|
||||
if (spursTasksetPollStatus(spu)) {
|
||||
spursTasksetExit(spu);
|
||||
}
|
||||
|
||||
spursTasksetDispatch(spu);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
bool spursTasksetEntry(SPUThread & spu) {
|
||||
auto mgmt = vm::get_ptr<SpursTasksetPmMgmtData>(spu.ls_offset + 0x2700);
|
||||
|
||||
@ -1188,6 +1352,7 @@ bool spursTasksetEntry(SPUThread & spu) {
|
||||
auto arg = spu.GPR[4]._u64[1];
|
||||
auto pollStatus = spu.GPR[5]._u32[3];
|
||||
|
||||
// Initialise memory and save args
|
||||
memset(mgmt, 0, sizeof(*mgmt));
|
||||
mgmt->taskset.set(arg);
|
||||
memcpy(mgmt->moduleId, "SPURSTASK MODULE", 16);
|
||||
@ -1212,8 +1377,12 @@ bool spursTasksetEntry(SPUThread & spu) {
|
||||
mgmt->savedContextR80ToR127[i] = spu.GPR[80 + i];
|
||||
}
|
||||
|
||||
// TODO: Process syscall
|
||||
spursTasksetResumeTask(spu);
|
||||
spu.GPR[3]._u32[3] = spursTasksetProcessSyscall(spu, spu.GPR[3]._u32[3], spu.GPR[4]._u32[3]);
|
||||
|
||||
// Resume the previously executing task if the syscall did not cause a context switch
|
||||
if (spu.m_is_branch == false) {
|
||||
spursTasksetResumeTask(spu);
|
||||
}
|
||||
} else {
|
||||
assert(0);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user