mirror of
https://github.com/RPCS3/rpcs3.git
synced 2024-11-26 12:42:41 +01:00
commit
fc97e3d2b8
@ -314,32 +314,6 @@ public:
|
||||
return pop(data, &no_wait);
|
||||
}
|
||||
|
||||
void clear()
|
||||
{
|
||||
while (m_sync.atomic_op_sync(SQSVR_OK, [](squeue_sync_var_t& sync) -> u32
|
||||
{
|
||||
assert(sync.count <= sq_size);
|
||||
assert(sync.position < sq_size);
|
||||
|
||||
if (sync.pop_lock || sync.push_lock)
|
||||
{
|
||||
return SQSVR_LOCKED;
|
||||
}
|
||||
|
||||
sync.pop_lock = 1;
|
||||
sync.push_lock = 1;
|
||||
return SQSVR_OK;
|
||||
}))
|
||||
{
|
||||
std::unique_lock<std::mutex> rcv_lock(m_rcv_mutex);
|
||||
m_rcv.wait_for(rcv_lock, std::chrono::milliseconds(1));
|
||||
}
|
||||
|
||||
m_sync.exchange({});
|
||||
m_wcv.notify_one();
|
||||
m_rcv.notify_one();
|
||||
}
|
||||
|
||||
bool peek(T& data, u32 start_pos = 0, const volatile bool* do_exit = nullptr)
|
||||
{
|
||||
assert(start_pos < sq_size);
|
||||
@ -393,4 +367,93 @@ public:
|
||||
|
||||
return peek(data, start_pos, &no_wait);
|
||||
}
|
||||
|
||||
class squeue_data_t
|
||||
{
|
||||
T* const m_data;
|
||||
const u32 m_pos;
|
||||
const u32 m_count;
|
||||
|
||||
squeue_data_t(T* data, u32 pos, u32 count)
|
||||
: m_data(data)
|
||||
, m_pos(pos)
|
||||
, m_count(count)
|
||||
{
|
||||
}
|
||||
|
||||
public:
|
||||
T& operator [] (u32 index)
|
||||
{
|
||||
assert(index < m_count);
|
||||
index += m_pos;
|
||||
index = index < sq_size ? index : index - sq_size;
|
||||
return m_data[index];
|
||||
}
|
||||
};
|
||||
|
||||
void process(void(*proc)(squeue_data_t data))
|
||||
{
|
||||
u32 pos, count;
|
||||
|
||||
while (m_sync.atomic_op_sync(SQSVR_OK, [&pos, &count](squeue_sync_var_t& sync) -> u32
|
||||
{
|
||||
assert(sync.count <= sq_size);
|
||||
assert(sync.position < sq_size);
|
||||
|
||||
if (sync.pop_lock || sync.push_lock)
|
||||
{
|
||||
return SQSVR_LOCKED;
|
||||
}
|
||||
|
||||
pos = sync.position;
|
||||
count = sync.count;
|
||||
sync.pop_lock = 1;
|
||||
sync.push_lock = 1;
|
||||
return SQSVR_OK;
|
||||
}))
|
||||
{
|
||||
std::unique_lock<std::mutex> rcv_lock(m_rcv_mutex);
|
||||
m_rcv.wait_for(rcv_lock, std::chrono::milliseconds(1));
|
||||
}
|
||||
|
||||
proc(squeue_data_t(m_data, pos, count));
|
||||
|
||||
m_sync.atomic_op([](squeue_sync_var_t& sync)
|
||||
{
|
||||
assert(sync.count <= sq_size);
|
||||
assert(sync.position < sq_size);
|
||||
assert(sync.pop_lock && sync.push_lock);
|
||||
sync.pop_lock = 0;
|
||||
sync.push_lock = 0;
|
||||
});
|
||||
|
||||
m_wcv.notify_one();
|
||||
m_rcv.notify_one();
|
||||
}
|
||||
|
||||
void clear()
|
||||
{
|
||||
while (m_sync.atomic_op_sync(SQSVR_OK, [](squeue_sync_var_t& sync) -> u32
|
||||
{
|
||||
assert(sync.count <= sq_size);
|
||||
assert(sync.position < sq_size);
|
||||
|
||||
if (sync.pop_lock || sync.push_lock)
|
||||
{
|
||||
return SQSVR_LOCKED;
|
||||
}
|
||||
|
||||
sync.pop_lock = 1;
|
||||
sync.push_lock = 1;
|
||||
return SQSVR_OK;
|
||||
}))
|
||||
{
|
||||
std::unique_lock<std::mutex> rcv_lock(m_rcv_mutex);
|
||||
m_rcv.wait_for(rcv_lock, std::chrono::milliseconds(1));
|
||||
}
|
||||
|
||||
m_sync.exchange({});
|
||||
m_wcv.notify_one();
|
||||
m_rcv.notify_one();
|
||||
}
|
||||
};
|
||||
|
@ -169,6 +169,7 @@ template<typename T> T ARMv7_instrs::Shift_C(T value, SRType type, s32 amount, b
|
||||
case SRType_ASR: return ASR_C(value, amount, carry_out);
|
||||
case SRType_ROR: return ROR_C(value, amount, carry_out);
|
||||
case SRType_RRX: return RRX_C(value, carry_in, carry_out);
|
||||
default: throw __FUNCTION__;
|
||||
}
|
||||
}
|
||||
|
||||
@ -710,6 +711,7 @@ void ARMv7_instrs::B(ARMv7Thread* thr, const ARMv7_encoding type)
|
||||
jump = 1 + 4 + sign<26, u32>((thr->code.data & 0xffffff) << 2);
|
||||
break;
|
||||
}
|
||||
default: throw __FUNCTION__;
|
||||
}
|
||||
|
||||
if (ConditionPassed(thr, cond))
|
||||
@ -1410,7 +1412,7 @@ void ARMv7_instrs::LSL_IMM(ARMv7Thread* thr, const ARMv7_encoding type)
|
||||
if (ConditionPassed(thr, cond))
|
||||
{
|
||||
bool carry;
|
||||
const u32 res = Shift_C(thr->read_gpr(m), SRType_LSL, shift_n, thr->APSR.C, carry);
|
||||
const u32 res = Shift_C<u32>(thr->read_gpr(m), SRType_LSL, shift_n, thr->APSR.C, carry);
|
||||
thr->write_gpr(d, res);
|
||||
if (set_flags)
|
||||
{
|
||||
|
@ -3664,7 +3664,14 @@ private:
|
||||
void DCBZ(u32 ra, u32 rb)
|
||||
{
|
||||
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
|
||||
auto const cache_line = vm::get_ptr<u8>(addr & ~127);
|
||||
if ((u32)addr != addr)
|
||||
{
|
||||
LOG_ERROR(PPU, "%s(): invalid address (0x%llx)", __FUNCTION__, addr);
|
||||
Emu.Pause();
|
||||
return;
|
||||
}
|
||||
|
||||
auto const cache_line = vm::get_ptr<u8>((u32)addr & ~127);
|
||||
if (cache_line)
|
||||
memset(cache_line, 0, 128);
|
||||
}
|
||||
|
@ -451,6 +451,8 @@ namespace PPU_opcodes
|
||||
class PPUOpcodes
|
||||
{
|
||||
public:
|
||||
virtual ~PPUOpcodes() {}
|
||||
|
||||
static u32 branchTarget(const u32 pc, const u32 imm)
|
||||
{
|
||||
return pc + (imm & ~0x3ULL);
|
||||
|
@ -229,6 +229,8 @@ namespace SPU_opcodes
|
||||
class SPUOpcodes
|
||||
{
|
||||
public:
|
||||
virtual ~SPUOpcodes() {}
|
||||
|
||||
static u32 branchTarget(const u32 pc, const s32 imm)
|
||||
{
|
||||
return (pc + (imm << 2)) & 0x3fffc;
|
||||
|
@ -1095,7 +1095,7 @@ void SPUThread::StopAndSignal(u32 code)
|
||||
{
|
||||
case 0:
|
||||
{
|
||||
const u32 next = eq->events.count() ? eq->sq.pop(eq->protocol) : 0;
|
||||
const u32 next = eq->events.count() ? eq->sq.signal(eq->protocol) : 0;
|
||||
if (next != tid)
|
||||
{
|
||||
if (!eq->owner.compare_and_swap_test(tid, next))
|
||||
@ -1118,13 +1118,20 @@ void SPUThread::StopAndSignal(u32 code)
|
||||
SPU.In_MBox.PushUncond((u32)event.data1);
|
||||
SPU.In_MBox.PushUncond((u32)event.data2);
|
||||
SPU.In_MBox.PushUncond((u32)event.data3);
|
||||
if (!eq->sq.invalidate(tid, eq->protocol) && !eq->sq.pop(tid, eq->protocol))
|
||||
{
|
||||
assert(!"sys_spu_thread_receive_event() failed (receiving)");
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (!~old_owner)
|
||||
{
|
||||
eq->sq.invalidate(tid);
|
||||
if (!eq->sq.invalidate(tid, eq->protocol))
|
||||
{
|
||||
assert(!"sys_spu_thread_receive_event() failed (cancelling)");
|
||||
}
|
||||
SPU.In_MBox.PushUncond(CELL_ECANCELED);
|
||||
return;
|
||||
}
|
||||
@ -1133,7 +1140,6 @@ void SPUThread::StopAndSignal(u32 code)
|
||||
if (Emu.IsStopped())
|
||||
{
|
||||
LOG_WARNING(Log::SPU, "sys_spu_thread_receive_event(spuq=0x%x) aborted", spuq);
|
||||
eq->sq.invalidate(tid);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -170,36 +170,28 @@ void MemoryBase::Close()
|
||||
MemoryBlocks.clear();
|
||||
}
|
||||
|
||||
void MemoryBase::WriteMMIO32(u32 addr, const u32 data)
|
||||
bool MemoryBase::WriteMMIO32(u32 addr, const u32 data)
|
||||
{
|
||||
{
|
||||
LV2_LOCK(0);
|
||||
LV2_LOCK(0);
|
||||
|
||||
if (RawSPUMem[(addr - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET] &&
|
||||
((RawSPUThread*)RawSPUMem[(addr - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET])->Write32(addr, data))
|
||||
{
|
||||
return;
|
||||
}
|
||||
if (RawSPUMem[(addr - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET] && ((RawSPUThread*)RawSPUMem[(addr - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET])->Write32(addr, data))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
*(u32*)((u8*)GetBaseAddr() + addr) = re32(data); // provoke error
|
||||
return false;
|
||||
}
|
||||
|
||||
u32 MemoryBase::ReadMMIO32(u32 addr)
|
||||
bool MemoryBase::ReadMMIO32(u32 addr, u32& res)
|
||||
{
|
||||
u32 res;
|
||||
{
|
||||
LV2_LOCK(0);
|
||||
LV2_LOCK(0);
|
||||
|
||||
if (RawSPUMem[(addr - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET] &&
|
||||
((RawSPUThread*)RawSPUMem[(addr - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET])->Read32(addr, &res))
|
||||
{
|
||||
return res;
|
||||
}
|
||||
if (RawSPUMem[(addr - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET] && ((RawSPUThread*)RawSPUMem[(addr - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET])->Read32(addr, &res))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
res = re32(*(u32*)((u8*)GetBaseAddr() + addr)); // provoke error
|
||||
return res;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool MemoryBase::Map(const u64 addr, const u32 size)
|
||||
|
@ -127,9 +127,9 @@ public:
|
||||
|
||||
void Close();
|
||||
|
||||
__noinline void WriteMMIO32(u32 addr, const u32 data);
|
||||
__noinline bool WriteMMIO32(u32 addr, const u32 data);
|
||||
|
||||
__noinline u32 ReadMMIO32(u32 addr);
|
||||
__noinline bool ReadMMIO32(u32 addr, u32& res);
|
||||
|
||||
u32 GetUserMemTotalSize()
|
||||
{
|
||||
|
@ -64,26 +64,20 @@ namespace vm
|
||||
|
||||
static u32 read32(u32 addr)
|
||||
{
|
||||
if (addr < RAW_SPU_BASE_ADDR || (addr % RAW_SPU_OFFSET) < RAW_SPU_PROB_OFFSET)
|
||||
u32 res;
|
||||
if (addr < RAW_SPU_BASE_ADDR || (addr % RAW_SPU_OFFSET) < RAW_SPU_PROB_OFFSET || !Memory.ReadMMIO32((u32)addr, res))
|
||||
{
|
||||
return re32(*(u32*)((u8*)g_base_addr + addr));
|
||||
}
|
||||
else
|
||||
{
|
||||
return Memory.ReadMMIO32((u32)addr);
|
||||
res = re32(*(u32*)((u8*)g_base_addr + addr));
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
static void write32(u32 addr, be_t<u32> value)
|
||||
{
|
||||
if (addr < RAW_SPU_BASE_ADDR || (addr % RAW_SPU_OFFSET) < RAW_SPU_PROB_OFFSET)
|
||||
if (addr < RAW_SPU_BASE_ADDR || (addr % RAW_SPU_OFFSET) < RAW_SPU_PROB_OFFSET || !Memory.WriteMMIO32((u32)addr, value))
|
||||
{
|
||||
*(be_t<u32>*)((u8*)g_base_addr + addr) = value;
|
||||
}
|
||||
else
|
||||
{
|
||||
Memory.WriteMMIO32((u32)addr, value);
|
||||
}
|
||||
}
|
||||
|
||||
static u64 read64(u32 addr)
|
||||
|
@ -67,12 +67,12 @@ namespace vm
|
||||
|
||||
__forceinline _ptr_base<T, lvl - 1, std::conditional<is_be_t<T>::value, typename to_be_t<AT>::type, AT>>& operator *() const
|
||||
{
|
||||
return vm::get_ref<_ptr_base<T, lvl - 1, std::conditional<is_be_t<T>::value, typename to_be_t<AT>::type, AT>>>(m_addr);
|
||||
return vm::get_ref<_ptr_base<T, lvl - 1, std::conditional<is_be_t<T>::value, typename to_be_t<AT>::type, AT>>>((u32)m_addr);
|
||||
}
|
||||
|
||||
__forceinline _ptr_base<T, lvl - 1, std::conditional<is_be_t<T>::value, typename to_be_t<AT>::type, AT>>& operator [](AT index) const
|
||||
{
|
||||
return vm::get_ref<_ptr_base<T, lvl - 1, std::conditional<is_be_t<T>::value, typename to_be_t<AT>::type, AT>>>(m_addr + sizeof(AT)* index);
|
||||
return vm::get_ref<_ptr_base<T, lvl - 1, std::conditional<is_be_t<T>::value, typename to_be_t<AT>::type, AT>>>((u32)(m_addr + sizeof(AT)* index));
|
||||
}
|
||||
|
||||
//typedef typename invert_be_t<AT>::type AT2;
|
||||
@ -112,7 +112,7 @@ namespace vm
|
||||
|
||||
__forceinline T* const operator -> () const
|
||||
{
|
||||
return vm::get_ptr<T>(m_addr);
|
||||
return vm::get_ptr<T>((u32)m_addr);
|
||||
}
|
||||
|
||||
_ptr_base operator++ (int)
|
||||
@ -160,17 +160,17 @@ namespace vm
|
||||
|
||||
__forceinline T& operator *() const
|
||||
{
|
||||
return vm::get_ref<T>(m_addr);
|
||||
return vm::get_ref<T>((u32)m_addr);
|
||||
}
|
||||
|
||||
__forceinline T& operator [](typename remove_be_t<AT>::type index) const
|
||||
{
|
||||
return vm::get_ref<T>(m_addr + sizeof(T)* index);
|
||||
return vm::get_ref<T>((u32)(m_addr + sizeof(T) * index));
|
||||
}
|
||||
|
||||
__forceinline T& operator [](typename to_be_t<AT>::forced_type index) const
|
||||
{
|
||||
return vm::get_ref<T>(m_addr + sizeof(T)* index);
|
||||
return vm::get_ref<T>((u32)(m_addr + sizeof(T)* index));
|
||||
}
|
||||
|
||||
__forceinline bool operator <(const _ptr_base& right) const { return m_addr < right.m_addr; }
|
||||
@ -224,7 +224,7 @@ namespace vm
|
||||
|
||||
T* get_ptr() const
|
||||
{
|
||||
return vm::get_ptr<T>(m_addr);
|
||||
return vm::get_ptr<T>((u32)m_addr);
|
||||
}
|
||||
|
||||
static _ptr_base make(AT addr)
|
||||
@ -253,7 +253,7 @@ namespace vm
|
||||
|
||||
void* get_ptr() const
|
||||
{
|
||||
return vm::get_ptr<void>(m_addr);
|
||||
return vm::get_ptr<void>((u32)m_addr);
|
||||
}
|
||||
|
||||
explicit operator void*() const
|
||||
@ -313,7 +313,7 @@ namespace vm
|
||||
|
||||
const void* get_ptr() const
|
||||
{
|
||||
return vm::get_ptr<const void>(m_addr);
|
||||
return vm::get_ptr<const void>((u32)m_addr);
|
||||
}
|
||||
|
||||
explicit operator const void*() const
|
||||
@ -356,9 +356,9 @@ namespace vm
|
||||
public:
|
||||
typedef RT(*type)(T...);
|
||||
|
||||
RT call(CPUThread& CPU, T... args) const; // call using specified CPU thread context, defined in CB_FUNC.h
|
||||
RT call(CPUThread& CPU, T... args) const; // defined in CB_FUNC.h, call using specified CPU thread context
|
||||
|
||||
RT operator()(T... args) const; // call using current CPU thread context, defined in CB_FUNC.h
|
||||
RT operator()(T... args) const; // defined in CB_FUNC.h, call using current CPU thread context
|
||||
|
||||
AT addr() const
|
||||
{
|
||||
|
@ -16,16 +16,18 @@ int cellVideoOutGetScreenSize(u32 videoOut, vm::ptr<float> screenSize)
|
||||
{
|
||||
cellAvconfExt->Warning("cellVideoOutGetScreenSize(videoOut=%d, screenSize_addr=0x%x)", videoOut, screenSize.addr());
|
||||
|
||||
if (!videoOut == CELL_VIDEO_OUT_PRIMARY)
|
||||
if (videoOut != CELL_VIDEO_OUT_PRIMARY)
|
||||
{
|
||||
return CELL_VIDEO_OUT_ERROR_UNSUPPORTED_VIDEO_OUT;
|
||||
}
|
||||
|
||||
//TODO: Use virtual screen size
|
||||
#ifdef _WIN32
|
||||
HDC screen = GetDC(NULL);
|
||||
float diagonal = round(sqrt((pow(GetDeviceCaps(screen, HORZSIZE), 2) + pow(GetDeviceCaps(screen, VERTSIZE), 2))) * 0.0393);
|
||||
float diagonal = roundf(sqrtf((powf(float(GetDeviceCaps(screen, HORZSIZE)), 2) + powf(float(GetDeviceCaps(screen, VERTSIZE)), 2))) * 0.0393f);
|
||||
#else
|
||||
// TODO: Linux implementation, without using wx
|
||||
// float diagonal = round(sqrt((pow(wxGetDisplaySizeMM().GetWidth(), 2) + pow(wxGetDisplaySizeMM().GetHeight(), 2))) * 0.0393);
|
||||
// float diagonal = roundf(sqrtf((powf(wxGetDisplaySizeMM().GetWidth(), 2) + powf(wxGetDisplaySizeMM().GetHeight(), 2))) * 0.0393f);
|
||||
#endif
|
||||
|
||||
if (Ini.GS3DTV.GetValue())
|
||||
@ -55,7 +57,8 @@ void cellAvconfExt_init(Module *pxThis)
|
||||
{
|
||||
cellAvconfExt = pxThis;
|
||||
|
||||
cellAvconfExt->AddFunc(0x4ec8c141, cellVideoOutConvertCursorColor);
|
||||
cellAvconfExt->AddFunc(0xfaa275a4, cellVideoOutGetScreenSize);
|
||||
cellAvconfExt->AddFunc(0xc7020f62, cellVideoOutSetGamma);
|
||||
REG_FUNC(cellAvconfExt, cellVideoOutConvertCursorColor);
|
||||
REG_FUNC(cellAvconfExt, cellVideoOutGetScreenSize);
|
||||
REG_FUNC(cellAvconfExt, cellVideoOutGetGamma);
|
||||
REG_FUNC(cellAvconfExt, cellVideoOutSetGamma);
|
||||
}
|
@ -92,7 +92,7 @@ ElementaryStream::ElementaryStream(Demuxer* dmux, u32 addr, u32 size, u32 fidMaj
|
||||
, cbFunc(cbFunc)
|
||||
, cbArg(cbArg)
|
||||
, spec(spec)
|
||||
, put(memAddr)
|
||||
, put(a128(addr))
|
||||
, put_count(0)
|
||||
, got_count(0)
|
||||
, released(0)
|
||||
|
@ -77,13 +77,13 @@ s64 pngDecOpen(
|
||||
stream->fd = 0;
|
||||
stream->src = *src;
|
||||
|
||||
switch (src->srcSelect.ToLE())
|
||||
switch ((u32)src->srcSelect.ToBE())
|
||||
{
|
||||
case CELL_PNGDEC_BUFFER:
|
||||
case se32(CELL_PNGDEC_BUFFER):
|
||||
stream->fileSize = src->streamSize.ToLE();
|
||||
break;
|
||||
|
||||
case CELL_PNGDEC_FILE:
|
||||
case se32(CELL_PNGDEC_FILE):
|
||||
// Get file descriptor
|
||||
vm::var<be_t<u32>> fd;
|
||||
int ret = cellFsOpen(vm::ptr<const char>::make(src->fileName.addr()), 0, fd, vm::ptr<u32>::make(0), 0);
|
||||
@ -145,7 +145,7 @@ s64 pngReadHeader(
|
||||
auto buffer_32 = buffer.To<be_t<u32>>();
|
||||
vm::var<be_t<u64>> pos, nread;
|
||||
|
||||
switch (stream->src.srcSelect.ToBE())
|
||||
switch ((u32)stream->src.srcSelect.ToBE())
|
||||
{
|
||||
case se32(CELL_PNGDEC_BUFFER):
|
||||
memmove(buffer.begin(), stream->src.streamPtr.get_ptr(), buffer.size());
|
||||
@ -206,17 +206,21 @@ s64 pngDecSetParameter(
|
||||
current_outParam.outputHeight = current_info.imageHeight;
|
||||
current_outParam.outputColorSpace = inParam->outputColorSpace;
|
||||
|
||||
switch ((u32)current_outParam.outputColorSpace)
|
||||
switch ((u32)current_outParam.outputColorSpace.ToBE())
|
||||
{
|
||||
case CELL_PNGDEC_PALETTE:
|
||||
case CELL_PNGDEC_GRAYSCALE: current_outParam.outputComponents = 1; break;
|
||||
case se32(CELL_PNGDEC_PALETTE):
|
||||
case se32(CELL_PNGDEC_GRAYSCALE):
|
||||
current_outParam.outputComponents = 1; break;
|
||||
|
||||
case CELL_PNGDEC_GRAYSCALE_ALPHA: current_outParam.outputComponents = 2; break;
|
||||
case se32(CELL_PNGDEC_GRAYSCALE_ALPHA):
|
||||
current_outParam.outputComponents = 2; break;
|
||||
|
||||
case CELL_PNGDEC_RGB: current_outParam.outputComponents = 3; break;
|
||||
case se32(CELL_PNGDEC_RGB):
|
||||
current_outParam.outputComponents = 3; break;
|
||||
|
||||
case CELL_PNGDEC_RGBA:
|
||||
case CELL_PNGDEC_ARGB: current_outParam.outputComponents = 4; break;
|
||||
case se32(CELL_PNGDEC_RGBA):
|
||||
case se32(CELL_PNGDEC_ARGB):
|
||||
current_outParam.outputComponents = 4; break;
|
||||
|
||||
default:
|
||||
cellPngDec->Error("pngDecSetParameter: Unsupported color space (%d)", current_outParam.outputColorSpace.ToLE());
|
||||
@ -250,7 +254,7 @@ s64 pngDecodeData(
|
||||
vm::var<unsigned char[]> png((u32)fileSize);
|
||||
vm::var<be_t<u64>> pos, nread;
|
||||
|
||||
switch (stream->src.srcSelect.ToBE())
|
||||
switch ((u32)stream->src.srcSelect.ToBE())
|
||||
{
|
||||
case se32(CELL_PNGDEC_BUFFER):
|
||||
memmove(png.begin(), stream->src.streamPtr.get_ptr(), png.size());
|
||||
@ -279,10 +283,10 @@ s64 pngDecodeData(
|
||||
const int bytesPerLine = (u32)dataCtrlParam->outputBytesPerLine;
|
||||
uint image_size = width * height;
|
||||
|
||||
switch ((u32)current_outParam.outputColorSpace)
|
||||
switch ((u32)current_outParam.outputColorSpace.ToBE())
|
||||
{
|
||||
case CELL_PNGDEC_RGB:
|
||||
case CELL_PNGDEC_RGBA:
|
||||
case se32(CELL_PNGDEC_RGB):
|
||||
case se32(CELL_PNGDEC_RGBA):
|
||||
{
|
||||
const char nComponents = current_outParam.outputColorSpace == CELL_PNGDEC_RGBA ? 4 : 3;
|
||||
image_size *= nComponents;
|
||||
@ -300,10 +304,10 @@ s64 pngDecodeData(
|
||||
{
|
||||
memcpy(data.get_ptr(), image.get(), image_size);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case CELL_PNGDEC_ARGB:
|
||||
case se32(CELL_PNGDEC_ARGB):
|
||||
{
|
||||
const int nComponents = 4;
|
||||
image_size *= nComponents;
|
||||
@ -342,12 +346,12 @@ s64 pngDecodeData(
|
||||
memcpy(data.get_ptr(), img, image_size);
|
||||
delete[] img;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case CELL_PNGDEC_GRAYSCALE:
|
||||
case CELL_PNGDEC_PALETTE:
|
||||
case CELL_PNGDEC_GRAYSCALE_ALPHA:
|
||||
case se32(CELL_PNGDEC_GRAYSCALE):
|
||||
case se32(CELL_PNGDEC_PALETTE):
|
||||
case se32(CELL_PNGDEC_GRAYSCALE_ALPHA):
|
||||
cellPngDec->Error("pngDecodeData: Unsupported color space (%d)", current_outParam.outputColorSpace.ToLE());
|
||||
break;
|
||||
|
||||
|
@ -2441,7 +2441,7 @@ s64 _cellSpursTasksetAttribute2Initialize(vm::ptr<CellSpursTasksetAttribute2> at
|
||||
return GetCurrentPPUThread().FastCall2(libsre + 0x1474C, libsre_rtoc);
|
||||
#else
|
||||
attribute->revision = revision;
|
||||
attribute->name_addr = NULL;
|
||||
attribute->name_addr = 0;
|
||||
attribute->argTaskset = 0;
|
||||
|
||||
for (s32 i = 0; i < 8; i++)
|
||||
@ -2554,7 +2554,7 @@ s64 _cellSpursTaskAttribute2Initialize(vm::ptr<CellSpursTaskAttribute2> attribut
|
||||
#else
|
||||
attribute->revision = revision;
|
||||
attribute->sizeContext = 0;
|
||||
attribute->eaContext = NULL;
|
||||
attribute->eaContext = 0;
|
||||
|
||||
for (s32 c = 0; c < 4; c++)
|
||||
{
|
||||
|
@ -20,7 +20,7 @@ SemaphoreAttributes SyncPrimManager::GetSemaphoreData(u32 id)
|
||||
return{};
|
||||
}
|
||||
|
||||
return{ std::string((const char*)&sem->name, 8), sem->value, sem->max };
|
||||
return{ std::string((const char*)&sem->name, 8), sem->value.read_sync(), sem->max };
|
||||
}
|
||||
|
||||
LwMutexAttributes SyncPrimManager::GetLwMutexData(u32 id)
|
||||
@ -67,6 +67,7 @@ std::string SyncPrimManager::GetSyncPrimName(u32 id, IDType type)
|
||||
}
|
||||
break;
|
||||
}
|
||||
default: break;
|
||||
}
|
||||
|
||||
LOG_ERROR(GENERAL, "SyncPrimManager::GetSyncPrimName(id=%d, type=%d) failed", id, type);
|
||||
|
@ -11,9 +11,13 @@
|
||||
|
||||
sleep_queue_t::~sleep_queue_t()
|
||||
{
|
||||
for (auto& tid : m_list)
|
||||
for (auto& tid : m_waiting)
|
||||
{
|
||||
LOG_NOTICE(HLE, "~sleep_queue_t('%s'): m_list[%lld]=%d", m_name.c_str(), &tid - m_list.data(), tid);
|
||||
LOG_NOTICE(HLE, "~sleep_queue_t['%s']: m_waiting[%lld]=%d", m_name.c_str(), &tid - m_waiting.data(), tid);
|
||||
}
|
||||
for (auto& tid : m_signaled)
|
||||
{
|
||||
LOG_NOTICE(HLE, "~sleep_queue_t['%s']: m_signaled[%lld]=%d", m_name.c_str(), &tid - m_signaled.data(), tid);
|
||||
}
|
||||
}
|
||||
|
||||
@ -28,15 +32,27 @@ void sleep_queue_t::push(u32 tid, u32 protocol)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
for (auto& v : m_list)
|
||||
for (auto& v : m_waiting)
|
||||
{
|
||||
if (v == tid)
|
||||
{
|
||||
assert(!"sleep_queue_t::push() failed (duplication)");
|
||||
LOG_ERROR(HLE, "sleep_queue_t['%s']::push() failed: thread already waiting (%d)", m_name.c_str(), tid);
|
||||
Emu.Pause();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
m_list.push_back(tid);
|
||||
for (auto& v : m_signaled)
|
||||
{
|
||||
if (v == tid)
|
||||
{
|
||||
LOG_ERROR(HLE, "sleep_queue_t['%s']::push() failed: thread already signaled (%d)", m_name.c_str(), tid);
|
||||
Emu.Pause();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
m_waiting.push_back(tid);
|
||||
return;
|
||||
}
|
||||
case SYS_SYNC_RETRY:
|
||||
@ -45,34 +61,86 @@ void sleep_queue_t::push(u32 tid, u32 protocol)
|
||||
}
|
||||
}
|
||||
|
||||
LOG_ERROR(HLE, "sleep_queue_t::push(): unsupported protocol (0x%x)", protocol);
|
||||
LOG_ERROR(HLE, "sleep_queue_t['%s']::push() failed: unsupported protocol (0x%x)", m_name.c_str(), protocol);
|
||||
Emu.Pause();
|
||||
}
|
||||
|
||||
u32 sleep_queue_t::pop(u32 protocol)
|
||||
bool sleep_queue_t::pop(u32 tid, u32 protocol)
|
||||
{
|
||||
assert(tid);
|
||||
|
||||
switch (protocol & SYS_SYNC_ATTR_PROTOCOL_MASK)
|
||||
{
|
||||
case SYS_SYNC_FIFO:
|
||||
case SYS_SYNC_PRIORITY:
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
if (m_signaled.size() && m_signaled[0] == tid)
|
||||
{
|
||||
m_signaled.erase(m_signaled.begin());
|
||||
return true;
|
||||
}
|
||||
|
||||
for (auto& v : m_signaled)
|
||||
{
|
||||
if (v == tid)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
for (auto& v : m_waiting)
|
||||
{
|
||||
if (v == tid)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
LOG_ERROR(HLE, "sleep_queue_t['%s']::pop() failed: thread not found (%d)", m_name.c_str(), tid);
|
||||
Emu.Pause();
|
||||
return true; // ???
|
||||
}
|
||||
//case SYS_SYNC_RETRY: // ???
|
||||
//{
|
||||
// return true; // ???
|
||||
//}
|
||||
}
|
||||
|
||||
LOG_ERROR(HLE, "sleep_queue_t['%s']::pop() failed: unsupported protocol (0x%x)", m_name.c_str(), protocol);
|
||||
Emu.Pause();
|
||||
return false; // ???
|
||||
}
|
||||
|
||||
u32 sleep_queue_t::signal(u32 protocol)
|
||||
{
|
||||
u32 res = ~0;
|
||||
|
||||
switch (protocol & SYS_SYNC_ATTR_PROTOCOL_MASK)
|
||||
{
|
||||
case SYS_SYNC_FIFO:
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
if (m_list.size())
|
||||
if (m_waiting.size())
|
||||
{
|
||||
const u32 res = m_list[0];
|
||||
res = m_waiting[0];
|
||||
if (!Emu.GetIdManager().CheckID(res))
|
||||
{
|
||||
LOG_ERROR(HLE, "sleep_queue_t::pop(SYS_SYNC_FIFO): invalid thread (%d)", res);
|
||||
LOG_ERROR(HLE, "sleep_queue_t['%s']::signal(SYS_SYNC_FIFO) failed: invalid thread (%d)", m_name.c_str(), res);
|
||||
Emu.Pause();
|
||||
}
|
||||
|
||||
m_list.erase(m_list.begin());
|
||||
return res;
|
||||
m_waiting.erase(m_waiting.begin());
|
||||
m_signaled.push_back(res);
|
||||
}
|
||||
else
|
||||
{
|
||||
return 0;
|
||||
res = 0;
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
case SYS_SYNC_PRIORITY:
|
||||
{
|
||||
@ -80,7 +148,7 @@ u32 sleep_queue_t::pop(u32 protocol)
|
||||
|
||||
u64 highest_prio = ~0ull;
|
||||
u64 sel = ~0ull;
|
||||
for (auto& v : m_list)
|
||||
for (auto& v : m_waiting)
|
||||
{
|
||||
if (std::shared_ptr<CPUThread> t = Emu.GetCPU().GetThread(v))
|
||||
{
|
||||
@ -88,20 +156,21 @@ u32 sleep_queue_t::pop(u32 protocol)
|
||||
if (prio < highest_prio)
|
||||
{
|
||||
highest_prio = prio;
|
||||
sel = &v - m_list.data();
|
||||
sel = &v - m_waiting.data();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_ERROR(HLE, "sleep_queue_t::pop(SYS_SYNC_PRIORITY): invalid thread (%d)", v);
|
||||
LOG_ERROR(HLE, "sleep_queue_t['%s']::signal(SYS_SYNC_PRIORITY) failed: invalid thread (%d)", m_name.c_str(), v);
|
||||
Emu.Pause();
|
||||
}
|
||||
}
|
||||
|
||||
if (~sel)
|
||||
{
|
||||
const u32 res = m_list[sel];
|
||||
m_list.erase(m_list.begin() + sel);
|
||||
res = m_waiting[sel];
|
||||
m_waiting.erase(m_waiting.begin() + sel);
|
||||
m_signaled.push_back(res);
|
||||
return res;
|
||||
}
|
||||
// fallthrough
|
||||
@ -112,22 +181,69 @@ u32 sleep_queue_t::pop(u32 protocol)
|
||||
}
|
||||
}
|
||||
|
||||
LOG_ERROR(HLE, "sleep_queue_t::pop(): unsupported protocol (0x%x)", protocol);
|
||||
LOG_ERROR(HLE, "sleep_queue_t['%s']::signal(): unsupported protocol (0x%x)", m_name.c_str(), protocol);
|
||||
Emu.Pause();
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool sleep_queue_t::invalidate(u32 tid)
|
||||
bool sleep_queue_t::invalidate(u32 tid, u32 protocol)
|
||||
{
|
||||
assert(tid);
|
||||
|
||||
switch (protocol & SYS_SYNC_ATTR_PROTOCOL_MASK)
|
||||
{
|
||||
case SYS_SYNC_FIFO:
|
||||
case SYS_SYNC_PRIORITY:
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
for (auto& v : m_waiting)
|
||||
{
|
||||
if (v == tid)
|
||||
{
|
||||
m_waiting.erase(m_waiting.begin() + (&v - m_waiting.data()));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
for (auto& v : m_signaled)
|
||||
{
|
||||
if (v == tid)
|
||||
{
|
||||
if (&v == m_signaled.data())
|
||||
{
|
||||
return false; // if the thread is signaled, pop() should be used
|
||||
}
|
||||
m_signaled.erase(m_signaled.begin() + (&v - m_signaled.data()));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
case SYS_SYNC_RETRY:
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
LOG_ERROR(HLE, "sleep_queue_t['%s']::invalidate(): unsupported protocol (0x%x)", m_name.c_str(), protocol);
|
||||
Emu.Pause();
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool sleep_queue_t::signal_selected(u32 tid)
|
||||
{
|
||||
assert(tid);
|
||||
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
for (auto& v : m_list)
|
||||
for (auto& v : m_waiting)
|
||||
{
|
||||
if (v == tid)
|
||||
{
|
||||
m_list.erase(m_list.begin() + (&v - m_list.data()));
|
||||
m_waiting.erase(m_waiting.begin() + (&v - m_waiting.data()));
|
||||
m_signaled.push_back(tid);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -139,5 +255,5 @@ u32 sleep_queue_t::count()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
return (u32)m_list.size();
|
||||
return (u32)m_waiting.size() + (u32)m_signaled.size();
|
||||
}
|
||||
|
@ -28,7 +28,8 @@ enum
|
||||
|
||||
class sleep_queue_t
|
||||
{
|
||||
std::vector<u32> m_list;
|
||||
std::vector<u32> m_waiting;
|
||||
std::vector<u32> m_signaled;
|
||||
std::mutex m_mutex;
|
||||
std::string m_name;
|
||||
|
||||
@ -46,7 +47,9 @@ public:
|
||||
const std::string& get_full_name() { return m_name; }
|
||||
|
||||
void push(u32 tid, u32 protocol);
|
||||
u32 pop(u32 protocol);
|
||||
bool invalidate(u32 tid);
|
||||
bool pop(u32 tid, u32 protocol);
|
||||
u32 signal(u32 protocol);
|
||||
bool signal_selected(u32 tid);
|
||||
bool invalidate(u32 tid, u32 protocol);
|
||||
u32 count();
|
||||
};
|
||||
|
@ -36,7 +36,7 @@ s32 sys_cond_create(vm::ptr<u32> cond_id, u32 mutex_id, vm::ptr<sys_cond_attribu
|
||||
*cond_id = id;
|
||||
mutex->cond_count++; // TODO: check safety
|
||||
|
||||
sys_cond.Warning("*** condition created [%s] (mutex_id=%d): id = %d", std::string(attr->name, 8).c_str(), mutex_id, id);
|
||||
sys_cond.Warning("*** cond created [%s] (mutex_id=%d): id = %d", std::string(attr->name, 8).c_str(), mutex_id, id);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
@ -70,16 +70,7 @@ s32 sys_cond_signal(u32 cond_id)
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
if (u32 target = cond->queue.pop(cond->mutex->protocol))
|
||||
{
|
||||
cond->signal.push(target);
|
||||
|
||||
if (Emu.IsStopped())
|
||||
{
|
||||
sys_cond.Warning("sys_cond_signal(id=%d) aborted", cond_id);
|
||||
}
|
||||
}
|
||||
|
||||
u32 target = cond->queue.signal(cond->mutex->protocol);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
@ -95,10 +86,8 @@ s32 sys_cond_signal_all(u32 cond_id)
|
||||
|
||||
Mutex* mutex = cond->mutex.get();
|
||||
|
||||
while (u32 target = cond->queue.pop(mutex->protocol))
|
||||
while (u32 target = cond->queue.signal(mutex->protocol))
|
||||
{
|
||||
cond->signal.push(target);
|
||||
|
||||
if (Emu.IsStopped())
|
||||
{
|
||||
sys_cond.Warning("sys_cond_signal_all(id=%d) aborted", cond_id);
|
||||
@ -124,21 +113,10 @@ s32 sys_cond_signal_to(u32 cond_id, u32 thread_id)
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
if (!cond->queue.invalidate(thread_id))
|
||||
if (!cond->queue.signal_selected(thread_id))
|
||||
{
|
||||
return CELL_EPERM;
|
||||
}
|
||||
|
||||
u32 target = thread_id;
|
||||
{
|
||||
cond->signal.push(target);
|
||||
}
|
||||
|
||||
if (Emu.IsStopped())
|
||||
{
|
||||
sys_cond.Warning("sys_cond_signal_to(id=%d, to=%d) aborted", cond_id, thread_id);
|
||||
}
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
@ -146,6 +124,8 @@ s32 sys_cond_wait(PPUThread& CPU, u32 cond_id, u64 timeout)
|
||||
{
|
||||
sys_cond.Log("sys_cond_wait(cond_id=%d, timeout=%lld)", cond_id, timeout);
|
||||
|
||||
const u64 start_time = get_system_time();
|
||||
|
||||
std::shared_ptr<Cond> cond;
|
||||
if (!Emu.GetIdManager().GetIDData(cond_id, cond))
|
||||
{
|
||||
@ -164,17 +144,15 @@ s32 sys_cond_wait(PPUThread& CPU, u32 cond_id, u64 timeout)
|
||||
|
||||
auto old_recursive = mutex->recursive_count.load();
|
||||
mutex->recursive_count = 0;
|
||||
if (!mutex->owner.compare_and_swap_test(tid, mutex->queue.pop(mutex->protocol)))
|
||||
if (!mutex->owner.compare_and_swap_test(tid, mutex->queue.signal(mutex->protocol)))
|
||||
{
|
||||
assert(!"sys_cond_wait() failed");
|
||||
}
|
||||
|
||||
bool pushed_in_sleep_queue = false;
|
||||
const u64 time_start = get_system_time();
|
||||
bool pushed_in_sleep_queue = false, signaled = false;
|
||||
while (true)
|
||||
{
|
||||
u32 signaled;
|
||||
if (cond->signal.try_peek(signaled) && signaled == tid) // check signaled threads
|
||||
if ((signaled = signaled || cond->queue.pop(tid, mutex->protocol))) // check if signaled
|
||||
{
|
||||
if (mutex->owner.compare_and_swap_test(0, tid)) // try to lock
|
||||
{
|
||||
@ -188,12 +166,7 @@ s32 sys_cond_wait(PPUThread& CPU, u32 cond_id, u64 timeout)
|
||||
}
|
||||
|
||||
auto old_owner = mutex->owner.compare_and_swap(0, tid);
|
||||
if (!old_owner)
|
||||
{
|
||||
mutex->queue.invalidate(tid);
|
||||
break;
|
||||
}
|
||||
if (old_owner == tid)
|
||||
if (!old_owner || old_owner == tid)
|
||||
{
|
||||
break;
|
||||
}
|
||||
@ -201,9 +174,12 @@ s32 sys_cond_wait(PPUThread& CPU, u32 cond_id, u64 timeout)
|
||||
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
|
||||
|
||||
if (timeout && get_system_time() - time_start > timeout)
|
||||
if (timeout && get_system_time() - start_time > timeout)
|
||||
{
|
||||
cond->queue.invalidate(tid);
|
||||
if (!cond->queue.invalidate(tid, mutex->protocol))
|
||||
{
|
||||
assert(!"sys_cond_wait() failed (timeout)");
|
||||
}
|
||||
CPU.owned_mutexes--; // ???
|
||||
return CELL_ETIMEDOUT; // mutex not locked
|
||||
}
|
||||
@ -215,7 +191,10 @@ s32 sys_cond_wait(PPUThread& CPU, u32 cond_id, u64 timeout)
|
||||
}
|
||||
}
|
||||
|
||||
if (pushed_in_sleep_queue && !mutex->queue.invalidate(tid, mutex->protocol) && !mutex->queue.pop(tid, mutex->protocol))
|
||||
{
|
||||
assert(!"sys_cond_wait() failed (locking)");
|
||||
}
|
||||
mutex->recursive_count = old_recursive;
|
||||
cond->signal.pop(cond_id /* unused result */);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -4,7 +4,7 @@ struct sys_cond_attribute
|
||||
{
|
||||
be_t<u32> pshared;
|
||||
be_t<u64> ipc_key;
|
||||
be_t<int> flags;
|
||||
be_t<s32> flags;
|
||||
union
|
||||
{
|
||||
char name[8];
|
||||
@ -15,7 +15,6 @@ struct sys_cond_attribute
|
||||
struct Cond
|
||||
{
|
||||
std::shared_ptr<Mutex> mutex; // associated with mutex
|
||||
squeue_t<u32, 32> signal;
|
||||
sleep_queue_t queue;
|
||||
|
||||
Cond(std::shared_ptr<Mutex>& mutex, u64 name)
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include "Emu/Cell/PPUThread.h"
|
||||
#include "Emu/Event.h"
|
||||
#include "sleep_queue_type.h"
|
||||
#include "sys_time.h"
|
||||
#include "sys_process.h"
|
||||
#include "sys_event.h"
|
||||
|
||||
@ -156,6 +157,8 @@ s32 sys_event_queue_receive(u32 equeue_id, vm::ptr<sys_event_data> dummy_event,
|
||||
sys_event.Log("sys_event_queue_receive(equeue_id=%d, dummy_event_addr=0x%x, timeout=%lld)",
|
||||
equeue_id, dummy_event.addr(), timeout);
|
||||
|
||||
const u64 start_time = get_system_time();
|
||||
|
||||
std::shared_ptr<EventQueue> eq;
|
||||
if (!Emu.GetIdManager().GetIDData(equeue_id, eq))
|
||||
{
|
||||
@ -167,22 +170,20 @@ s32 sys_event_queue_receive(u32 equeue_id, vm::ptr<sys_event_data> dummy_event,
|
||||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
u32 tid = GetCurrentPPUThread().GetId();
|
||||
const u32 tid = GetCurrentPPUThread().GetId();
|
||||
|
||||
eq->sq.push(tid, eq->protocol); // add thread to sleep queue
|
||||
|
||||
timeout = timeout ? (timeout / 1000) : ~0;
|
||||
u64 counter = 0;
|
||||
while (true)
|
||||
{
|
||||
u32 old_owner = eq->owner.compare_and_swap(0, tid);
|
||||
const u32 old_owner = eq->owner.compare_and_swap(0, tid);
|
||||
const s32 res = old_owner ? (old_owner == tid ? 1 : 2) : 0;
|
||||
|
||||
switch (res)
|
||||
{
|
||||
case 0:
|
||||
{
|
||||
const u32 next = eq->events.count() ? eq->sq.pop(eq->protocol) : 0;
|
||||
const u32 next = eq->events.count() ? eq->sq.signal(eq->protocol) : 0;
|
||||
if (next != tid)
|
||||
{
|
||||
if (!eq->owner.compare_and_swap_test(tid, next))
|
||||
@ -209,23 +210,39 @@ s32 sys_event_queue_receive(u32 equeue_id, vm::ptr<sys_event_data> dummy_event,
|
||||
t.GPR[5] = event.data1;
|
||||
t.GPR[6] = event.data2;
|
||||
t.GPR[7] = event.data3;
|
||||
if (!eq->sq.invalidate(tid, eq->protocol) && !eq->sq.pop(tid, eq->protocol))
|
||||
{
|
||||
assert(!"sys_event_queue_receive() failed (receiving)");
|
||||
}
|
||||
return CELL_OK;
|
||||
}
|
||||
}
|
||||
|
||||
if (!~old_owner)
|
||||
{
|
||||
eq->sq.invalidate(tid);
|
||||
if (!eq->sq.invalidate(tid, eq->protocol))
|
||||
{
|
||||
assert(!"sys_event_queue_receive() failed (cancelling)");
|
||||
}
|
||||
return CELL_ECANCELED;
|
||||
}
|
||||
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
|
||||
if (counter++ > timeout || Emu.IsStopped())
|
||||
|
||||
if (timeout && get_system_time() - start_time > timeout)
|
||||
{
|
||||
if (Emu.IsStopped()) sys_event.Warning("sys_event_queue_receive(equeue=%d) aborted", equeue_id);
|
||||
eq->sq.invalidate(tid);
|
||||
if (!eq->sq.invalidate(tid, eq->protocol))
|
||||
{
|
||||
assert(!"sys_event_queue_receive() failed (timeout)");
|
||||
}
|
||||
return CELL_ETIMEDOUT;
|
||||
}
|
||||
|
||||
if (Emu.IsStopped())
|
||||
{
|
||||
sys_event.Warning("sys_event_queue_receive(equeue=%d) aborted", equeue_id);
|
||||
return CELL_OK;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -43,6 +43,10 @@ u32 EventFlag::check()
|
||||
assert(!"EventFlag::check(): waiter not found");
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
assert(!"EventFlag::check(): unknown protocol");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -51,8 +55,7 @@ u32 EventFlag::check()
|
||||
|
||||
s32 sys_event_flag_create(vm::ptr<u32> eflag_id, vm::ptr<sys_event_flag_attr> attr, u64 init)
|
||||
{
|
||||
sys_event_flag.Warning("sys_event_flag_create(eflag_id_addr=0x%x, attr_addr=0x%x, init=0x%llx)",
|
||||
eflag_id.addr(), attr.addr(), init);
|
||||
sys_event_flag.Warning("sys_event_flag_create(eflag_id_addr=0x%x, attr_addr=0x%x, init=0x%llx)", eflag_id.addr(), attr.addr(), init);
|
||||
|
||||
if (!eflag_id)
|
||||
{
|
||||
@ -76,7 +79,9 @@ s32 sys_event_flag_create(vm::ptr<u32> eflag_id, vm::ptr<sys_event_flag_attr> at
|
||||
}
|
||||
|
||||
if (attr->pshared.ToBE() != se32(0x200))
|
||||
{
|
||||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
switch (attr->type.ToBE())
|
||||
{
|
||||
|
@ -67,10 +67,8 @@ s32 sys_lwcond_signal(vm::ptr<sys_lwcond_t> lwcond)
|
||||
|
||||
auto mutex = vm::ptr<sys_lwmutex_t>::make(lwcond->lwmutex.addr());
|
||||
|
||||
if (u32 target = lw->queue.pop(mutex->attribute))
|
||||
if (u32 target = lw->queue.signal(mutex->attribute))
|
||||
{
|
||||
lw->signal.push(target);
|
||||
|
||||
if (Emu.IsStopped())
|
||||
{
|
||||
sys_lwcond.Warning("sys_lwcond_signal(id=%d) aborted", (u32)lwcond->lwcond_queue);
|
||||
@ -93,10 +91,8 @@ s32 sys_lwcond_signal_all(vm::ptr<sys_lwcond_t> lwcond)
|
||||
|
||||
auto mutex = vm::ptr<sys_lwmutex_t>::make(lwcond->lwmutex.addr());
|
||||
|
||||
while (u32 target = lw->queue.pop(mutex->attribute))
|
||||
while (u32 target = lw->queue.signal(mutex->attribute))
|
||||
{
|
||||
lw->signal.push(target);
|
||||
|
||||
if (Emu.IsStopped())
|
||||
{
|
||||
sys_lwcond.Warning("sys_lwcond_signal_all(id=%d) aborted", (u32)lwcond->lwcond_queue);
|
||||
@ -122,22 +118,11 @@ s32 sys_lwcond_signal_to(vm::ptr<sys_lwcond_t> lwcond, u32 ppu_thread_id)
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
if (!lw->queue.invalidate(ppu_thread_id))
|
||||
if (!lw->queue.signal_selected(ppu_thread_id))
|
||||
{
|
||||
return CELL_EPERM;
|
||||
}
|
||||
|
||||
u32 target = ppu_thread_id;
|
||||
{
|
||||
lw->signal.push(target);
|
||||
|
||||
if (Emu.IsStopped())
|
||||
{
|
||||
sys_lwcond.Warning("sys_lwcond_signal_to(id=%d, to=%d) aborted", (u32)lwcond->lwcond_queue, ppu_thread_id);
|
||||
return CELL_OK;
|
||||
}
|
||||
}
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
@ -145,6 +130,8 @@ s32 sys_lwcond_wait(PPUThread& CPU, vm::ptr<sys_lwcond_t> lwcond, u64 timeout)
|
||||
{
|
||||
sys_lwcond.Log("sys_lwcond_wait(lwcond_addr=0x%x, timeout=%lld)", lwcond.addr(), timeout);
|
||||
|
||||
const u64 start_time = get_system_time();
|
||||
|
||||
std::shared_ptr<Lwcond> lw;
|
||||
if (!Emu.GetIdManager().GetIDData((u32)lwcond->lwcond_queue, lw))
|
||||
{
|
||||
@ -173,19 +160,18 @@ s32 sys_lwcond_wait(PPUThread& CPU, vm::ptr<sys_lwcond_t> lwcond, u64 timeout)
|
||||
auto old_recursive = mutex->recursive_count.read_relaxed();
|
||||
mutex->recursive_count.exchange(be_t<u32>::make(0));
|
||||
|
||||
be_t<u32> target = be_t<u32>::make(sq->pop(mutex->attribute));
|
||||
be_t<u32> target = be_t<u32>::make(sq->signal(mutex->attribute));
|
||||
if (!mutex->owner.compare_and_swap_test(tid, target))
|
||||
{
|
||||
assert(!"sys_lwcond_wait(): mutex unlocking failed");
|
||||
}
|
||||
|
||||
const u64 time_start = get_system_time();
|
||||
bool signaled = false;
|
||||
while (true)
|
||||
{
|
||||
u32 signaled;
|
||||
if (lw->signal.try_peek(signaled) && signaled == tid_le) // check signaled threads
|
||||
if ((signaled = signaled || lw->queue.pop(tid, mutex->attribute))) // check signaled threads
|
||||
{
|
||||
s32 res = mutex->lock(tid, timeout ? get_system_time() - time_start : 0); // this is bad
|
||||
s32 res = mutex->lock(tid, timeout ? get_system_time() - start_time : 0); // this is bad
|
||||
if (res == CELL_OK)
|
||||
{
|
||||
break;
|
||||
@ -196,35 +182,25 @@ s32 sys_lwcond_wait(PPUThread& CPU, vm::ptr<sys_lwcond_t> lwcond, u64 timeout)
|
||||
case static_cast<int>(CELL_EDEADLK):
|
||||
{
|
||||
sys_lwcond.Error("sys_lwcond_wait(id=%d): associated mutex was locked", (u32)lwcond->lwcond_queue);
|
||||
lw->queue.invalidate(tid_le);
|
||||
lw->signal.pop(tid_le /* unused result */);
|
||||
return CELL_OK; // mutex not locked (but already locked in the incorrect way)
|
||||
}
|
||||
case static_cast<int>(CELL_ESRCH):
|
||||
{
|
||||
sys_lwcond.Error("sys_lwcond_wait(id=%d): associated mutex not found (%d)", (u32)lwcond->lwcond_queue, (u32)mutex->sleep_queue);
|
||||
lw->queue.invalidate(tid_le);
|
||||
lw->signal.pop(tid_le /* unused result */);
|
||||
return CELL_ESRCH; // mutex not locked
|
||||
}
|
||||
case static_cast<int>(CELL_ETIMEDOUT):
|
||||
{
|
||||
lw->queue.invalidate(tid_le);
|
||||
lw->signal.pop(tid_le /* unused result */);
|
||||
return CELL_ETIMEDOUT; // mutex not locked
|
||||
}
|
||||
case static_cast<int>(CELL_EINVAL):
|
||||
{
|
||||
sys_lwcond.Error("sys_lwcond_wait(id=%d): invalid associated mutex (%d)", (u32)lwcond->lwcond_queue, (u32)mutex->sleep_queue);
|
||||
lw->queue.invalidate(tid_le);
|
||||
lw->signal.pop(tid_le /* unused result */);
|
||||
return CELL_EINVAL; // mutex not locked
|
||||
}
|
||||
default:
|
||||
{
|
||||
sys_lwcond.Error("sys_lwcond_wait(id=%d): mutex->lock() returned 0x%x", (u32)lwcond->lwcond_queue, res);
|
||||
lw->queue.invalidate(tid_le);
|
||||
lw->signal.pop(tid_le /* unused result */);
|
||||
return CELL_EINVAL; // mutex not locked
|
||||
}
|
||||
}
|
||||
@ -232,9 +208,12 @@ s32 sys_lwcond_wait(PPUThread& CPU, vm::ptr<sys_lwcond_t> lwcond, u64 timeout)
|
||||
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
|
||||
|
||||
if (timeout && get_system_time() - time_start > timeout)
|
||||
if (timeout && get_system_time() - start_time > timeout)
|
||||
{
|
||||
lw->queue.invalidate(tid_le);
|
||||
if (!lw->queue.invalidate(tid_le, mutex->attribute))
|
||||
{
|
||||
assert(!"sys_lwcond_wait() failed (timeout)");
|
||||
}
|
||||
return CELL_ETIMEDOUT; // mutex not locked
|
||||
}
|
||||
|
||||
@ -246,6 +225,5 @@ s32 sys_lwcond_wait(PPUThread& CPU, vm::ptr<sys_lwcond_t> lwcond, u64 timeout)
|
||||
}
|
||||
|
||||
mutex->recursive_count.exchange(old_recursive);
|
||||
lw->signal.pop(tid_le /* unused result */);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -19,7 +19,6 @@ struct sys_lwcond_t
|
||||
|
||||
struct Lwcond
|
||||
{
|
||||
squeue_t<u32, 32> signal;
|
||||
sleep_queue_t queue;
|
||||
|
||||
const u32 addr;
|
||||
|
@ -157,7 +157,7 @@ s32 sys_lwmutex_t::unlock(be_t<u32> tid)
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
if (!owner.compare_and_swap_test(tid, be_t<u32>::make(sq->pop(attribute))))
|
||||
if (!owner.compare_and_swap_test(tid, be_t<u32>::make(sq->signal(attribute))))
|
||||
{
|
||||
assert(!"sys_lwmutex_t::unlock() failed");
|
||||
}
|
||||
@ -168,6 +168,8 @@ s32 sys_lwmutex_t::unlock(be_t<u32> tid)
|
||||
|
||||
s32 sys_lwmutex_t::lock(be_t<u32> tid, u64 timeout)
|
||||
{
|
||||
const u64 start_time = get_system_time();
|
||||
|
||||
switch (s32 res = trylock(tid))
|
||||
{
|
||||
case static_cast<s32>(CELL_EBUSY): break;
|
||||
@ -182,25 +184,22 @@ s32 sys_lwmutex_t::lock(be_t<u32> tid, u64 timeout)
|
||||
|
||||
sq->push(tid, attribute);
|
||||
|
||||
const u64 time_start = get_system_time();
|
||||
while (true)
|
||||
{
|
||||
auto old_owner = owner.compare_and_swap(be_t<u32>::make(0), tid);
|
||||
if (!old_owner.ToBE())
|
||||
{
|
||||
sq->invalidate(tid);
|
||||
break;
|
||||
}
|
||||
if (old_owner == tid)
|
||||
if (!old_owner.ToBE() || old_owner == tid)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
|
||||
|
||||
if (timeout && get_system_time() - time_start > timeout)
|
||||
if (timeout && get_system_time() - start_time > timeout)
|
||||
{
|
||||
sq->invalidate(tid);
|
||||
if (!sq->invalidate(tid, attribute))
|
||||
{
|
||||
assert(!"sys_lwmutex_t::lock() failed (timeout)");
|
||||
}
|
||||
return CELL_ETIMEDOUT;
|
||||
}
|
||||
|
||||
@ -211,6 +210,10 @@ s32 sys_lwmutex_t::lock(be_t<u32> tid, u64 timeout)
|
||||
}
|
||||
}
|
||||
|
||||
if (!sq->invalidate(tid, attribute) && !sq->pop(tid, attribute))
|
||||
{
|
||||
assert(!"sys_lwmutex_t::lock() failed (locking)");
|
||||
}
|
||||
recursive_count.exchange(be_t<u32>::make(1));
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -81,26 +81,11 @@ s32 sys_mutex_destroy(PPUThread& CPU, u32 mutex_id)
|
||||
return CELL_EPERM;
|
||||
}
|
||||
|
||||
const u32 tid = CPU.GetId();
|
||||
|
||||
if (!mutex->owner.compare_and_swap_test(0, tid)) // check if locked
|
||||
if (!mutex->owner.compare_and_swap_test(0, ~0)) // check if locked and make unusable
|
||||
{
|
||||
return CELL_EBUSY;
|
||||
}
|
||||
|
||||
if (mutex->queue.count()) // TODO: safely make object unusable
|
||||
{
|
||||
if (!mutex->owner.compare_and_swap_test(tid, 0))
|
||||
{
|
||||
assert(!"sys_mutex_destroy() failed (busy)");
|
||||
}
|
||||
return CELL_EBUSY;
|
||||
}
|
||||
|
||||
if (!mutex->owner.compare_and_swap_test(tid, ~0))
|
||||
{
|
||||
assert(!"sys_mutex_destroy() failed");
|
||||
}
|
||||
Emu.GetIdManager().RemoveID(mutex_id);
|
||||
return CELL_OK;
|
||||
}
|
||||
@ -109,6 +94,8 @@ s32 sys_mutex_lock(PPUThread& CPU, u32 mutex_id, u64 timeout)
|
||||
{
|
||||
sys_mutex.Log("sys_mutex_lock(mutex_id=%d, timeout=%lld)", mutex_id, timeout);
|
||||
|
||||
const u64 start_time = get_system_time();
|
||||
|
||||
std::shared_ptr<Mutex> mutex;
|
||||
if (!Emu.GetIdManager().GetIDData(mutex_id, mutex))
|
||||
{
|
||||
@ -117,7 +104,12 @@ s32 sys_mutex_lock(PPUThread& CPU, u32 mutex_id, u64 timeout)
|
||||
|
||||
const u32 tid = CPU.GetId();
|
||||
|
||||
if (mutex->owner.read_sync() == tid)
|
||||
const u32 old_owner = mutex->owner.compare_and_swap(0, tid);
|
||||
if (!~old_owner)
|
||||
{
|
||||
return CELL_ESRCH; // mutex is going to be destroyed
|
||||
}
|
||||
if (old_owner == tid)
|
||||
{
|
||||
if (mutex->is_recursive)
|
||||
{
|
||||
@ -133,8 +125,7 @@ s32 sys_mutex_lock(PPUThread& CPU, u32 mutex_id, u64 timeout)
|
||||
return CELL_EDEADLK;
|
||||
}
|
||||
}
|
||||
|
||||
if (mutex->owner.compare_and_swap_test(0, tid))
|
||||
else if (!old_owner)
|
||||
{
|
||||
mutex->recursive_count = 1;
|
||||
CPU.owned_mutexes++;
|
||||
@ -143,25 +134,22 @@ s32 sys_mutex_lock(PPUThread& CPU, u32 mutex_id, u64 timeout)
|
||||
|
||||
mutex->queue.push(tid, mutex->protocol);
|
||||
|
||||
const u64 time_start = get_system_time();
|
||||
while (true)
|
||||
{
|
||||
auto old_owner = mutex->owner.compare_and_swap(0, tid);
|
||||
if (!old_owner)
|
||||
{
|
||||
mutex->queue.invalidate(tid);
|
||||
break;
|
||||
}
|
||||
if (old_owner == tid)
|
||||
if (!old_owner || old_owner == tid)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
|
||||
|
||||
if (timeout && get_system_time() - time_start > timeout)
|
||||
if (timeout && get_system_time() - start_time > timeout)
|
||||
{
|
||||
mutex->queue.invalidate(tid);
|
||||
if (!mutex->queue.invalidate(tid, mutex->protocol))
|
||||
{
|
||||
assert(!"sys_mutex_lock() failed (timeout)");
|
||||
}
|
||||
return CELL_ETIMEDOUT;
|
||||
}
|
||||
|
||||
@ -172,6 +160,10 @@ s32 sys_mutex_lock(PPUThread& CPU, u32 mutex_id, u64 timeout)
|
||||
}
|
||||
}
|
||||
|
||||
if (!mutex->queue.invalidate(tid, mutex->protocol) && !mutex->queue.pop(tid, mutex->protocol))
|
||||
{
|
||||
assert(!"sys_mutex_lock() failed (locking)");
|
||||
}
|
||||
mutex->recursive_count = 1;
|
||||
CPU.owned_mutexes++;
|
||||
return CELL_OK;
|
||||
@ -189,7 +181,12 @@ s32 sys_mutex_trylock(PPUThread& CPU, u32 mutex_id)
|
||||
|
||||
const u32 tid = CPU.GetId();
|
||||
|
||||
if (mutex->owner.read_sync() == tid)
|
||||
const u32 old_owner = mutex->owner.compare_and_swap(0, tid);
|
||||
if (!~old_owner)
|
||||
{
|
||||
return CELL_ESRCH; // mutex is going to be destroyed
|
||||
}
|
||||
if (old_owner == tid)
|
||||
{
|
||||
if (mutex->is_recursive)
|
||||
{
|
||||
@ -205,15 +202,14 @@ s32 sys_mutex_trylock(PPUThread& CPU, u32 mutex_id)
|
||||
return CELL_EDEADLK;
|
||||
}
|
||||
}
|
||||
|
||||
if (!mutex->owner.compare_and_swap_test(0, tid))
|
||||
else if (!old_owner)
|
||||
{
|
||||
return CELL_EBUSY;
|
||||
mutex->recursive_count = 1;
|
||||
CPU.owned_mutexes++;
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
mutex->recursive_count = 1;
|
||||
CPU.owned_mutexes++;
|
||||
return CELL_OK;
|
||||
return CELL_EBUSY;
|
||||
}
|
||||
|
||||
s32 sys_mutex_unlock(PPUThread& CPU, u32 mutex_id)
|
||||
@ -228,7 +224,12 @@ s32 sys_mutex_unlock(PPUThread& CPU, u32 mutex_id)
|
||||
|
||||
const u32 tid = CPU.GetId();
|
||||
|
||||
if (mutex->owner.read_sync() != tid)
|
||||
const u32 owner = mutex->owner.read_sync();
|
||||
if (!~owner)
|
||||
{
|
||||
return CELL_ESRCH; // mutex is going to be destroyed
|
||||
}
|
||||
if (owner != tid)
|
||||
{
|
||||
return CELL_EPERM;
|
||||
}
|
||||
@ -241,7 +242,7 @@ s32 sys_mutex_unlock(PPUThread& CPU, u32 mutex_id)
|
||||
|
||||
if (!--mutex->recursive_count)
|
||||
{
|
||||
if (!mutex->owner.compare_and_swap_test(tid, mutex->queue.pop(mutex->protocol)))
|
||||
if (!mutex->owner.compare_and_swap_test(tid, mutex->queue.signal(mutex->protocol)))
|
||||
{
|
||||
assert(!"sys_mutex_unlock() failed");
|
||||
}
|
||||
|
@ -6,6 +6,7 @@
|
||||
|
||||
#include "Emu/Cell/PPUThread.h"
|
||||
#include "sleep_queue_type.h"
|
||||
#include "sys_time.h"
|
||||
#include "sys_rwlock.h"
|
||||
|
||||
SysCallBase sys_rwlock("sys_rwlock");
|
||||
@ -14,31 +15,27 @@ s32 sys_rwlock_create(vm::ptr<u32> rw_lock_id, vm::ptr<sys_rwlock_attribute_t> a
|
||||
{
|
||||
sys_rwlock.Warning("sys_rwlock_create(rw_lock_id_addr=0x%x, attr_addr=0x%x)", rw_lock_id.addr(), attr.addr());
|
||||
|
||||
if (!attr)
|
||||
return CELL_EFAULT;
|
||||
|
||||
switch (attr->attr_protocol.ToBE())
|
||||
switch (attr->protocol.ToBE())
|
||||
{
|
||||
case se32(SYS_SYNC_PRIORITY): sys_rwlock.Todo("SYS_SYNC_PRIORITY"); break;
|
||||
case se32(SYS_SYNC_PRIORITY): break;
|
||||
case se32(SYS_SYNC_RETRY): sys_rwlock.Error("SYS_SYNC_RETRY"); return CELL_EINVAL;
|
||||
case se32(SYS_SYNC_PRIORITY_INHERIT): sys_rwlock.Todo("SYS_SYNC_PRIORITY_INHERIT"); break;
|
||||
case se32(SYS_SYNC_FIFO): break;
|
||||
default: return CELL_EINVAL;
|
||||
}
|
||||
|
||||
if (attr->attr_pshared.ToBE() != se32(0x200))
|
||||
if (attr->pshared.ToBE() != se32(0x200))
|
||||
{
|
||||
sys_rwlock.Error("Invalid attr_pshared(0x%x)", (u32)attr->attr_pshared);
|
||||
sys_rwlock.Error("Invalid pshared value (0x%x)", (u32)attr->pshared);
|
||||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
std::shared_ptr<RWLock> rw(new RWLock((u32)attr->attr_protocol, attr->name_u64));
|
||||
u32 id = sys_rwlock.GetNewId(rw, TYPE_RWLOCK);
|
||||
std::shared_ptr<RWLock> rw(new RWLock((u32)attr->protocol, attr->name_u64));
|
||||
const u32 id = sys_rwlock.GetNewId(rw, TYPE_RWLOCK);
|
||||
*rw_lock_id = id;
|
||||
rw->wqueue.set_full_name(fmt::Format("Rwlock(%d)", id));
|
||||
|
||||
sys_rwlock.Warning("*** rwlock created [%s] (protocol=0x%x): id = %d",
|
||||
std::string(attr->name, 8).c_str(), (u32)attr->attr_protocol, id);
|
||||
|
||||
sys_rwlock.Warning("*** rwlock created [%s] (protocol=0x%x): id = %d", std::string(attr->name, 8).c_str(), rw->protocol, id);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
@ -47,14 +44,17 @@ s32 sys_rwlock_destroy(u32 rw_lock_id)
|
||||
sys_rwlock.Warning("sys_rwlock_destroy(rw_lock_id=%d)", rw_lock_id);
|
||||
|
||||
std::shared_ptr<RWLock> rw;
|
||||
if (!sys_rwlock.CheckId(rw_lock_id, rw)) return CELL_ESRCH;
|
||||
if (!sys_rwlock.CheckId(rw_lock_id, rw))
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
std::lock_guard<std::mutex> lock(rw->m_lock);
|
||||
|
||||
if (rw->wlock_queue.size() || rw->rlock_list.size() || rw->wlock_thread) return CELL_EBUSY;
|
||||
if (!rw->sync.compare_and_swap_test({ 0, 0 }, { ~0u, ~0u })) // check if locked and make unusable
|
||||
{
|
||||
return CELL_EBUSY;
|
||||
}
|
||||
|
||||
Emu.GetIdManager().RemoveID(rw_lock_id);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
@ -62,37 +62,46 @@ s32 sys_rwlock_rlock(u32 rw_lock_id, u64 timeout)
|
||||
{
|
||||
sys_rwlock.Log("sys_rwlock_rlock(rw_lock_id=%d, timeout=%lld)", rw_lock_id, timeout);
|
||||
|
||||
const u64 start_time = get_system_time();
|
||||
|
||||
std::shared_ptr<RWLock> rw;
|
||||
if (!sys_rwlock.CheckId(rw_lock_id, rw)) return CELL_ESRCH;
|
||||
const u32 tid = GetCurrentPPUThread().GetId();
|
||||
|
||||
if (rw->rlock_trylock(tid)) return CELL_OK;
|
||||
|
||||
u64 counter = 0;
|
||||
const u64 max_counter = timeout ? (timeout / 1000) : 20000;
|
||||
do
|
||||
if (!sys_rwlock.CheckId(rw_lock_id, rw))
|
||||
{
|
||||
if (Emu.IsStopped())
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
while (true)
|
||||
{
|
||||
bool succeeded;
|
||||
rw->sync.atomic_op_sync([&succeeded](RWLock::sync_var_t& sync)
|
||||
{
|
||||
sys_rwlock.Warning("sys_rwlock_rlock(rw_lock_id=%d, ...) aborted", rw_lock_id);
|
||||
return CELL_ETIMEDOUT;
|
||||
assert(~sync.readers);
|
||||
if ((succeeded = !sync.writer))
|
||||
{
|
||||
sync.readers++;
|
||||
}
|
||||
});
|
||||
|
||||
if (succeeded)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
|
||||
|
||||
if (rw->rlock_trylock(tid)) return CELL_OK;
|
||||
|
||||
if (counter++ > max_counter)
|
||||
if (timeout && get_system_time() - start_time > timeout)
|
||||
{
|
||||
if (!timeout)
|
||||
{
|
||||
counter = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
return CELL_ETIMEDOUT;
|
||||
}
|
||||
}
|
||||
} while (true);
|
||||
return CELL_ETIMEDOUT;
|
||||
}
|
||||
|
||||
if (Emu.IsStopped())
|
||||
{
|
||||
sys_rwlock.Warning("sys_rwlock_rlock(id=%d) aborted", rw_lock_id);
|
||||
return CELL_OK;
|
||||
}
|
||||
}
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 sys_rwlock_tryrlock(u32 rw_lock_id)
|
||||
@ -100,11 +109,27 @@ s32 sys_rwlock_tryrlock(u32 rw_lock_id)
|
||||
sys_rwlock.Log("sys_rwlock_tryrlock(rw_lock_id=%d)", rw_lock_id);
|
||||
|
||||
std::shared_ptr<RWLock> rw;
|
||||
if (!sys_rwlock.CheckId(rw_lock_id, rw)) return CELL_ESRCH;
|
||||
if (!sys_rwlock.CheckId(rw_lock_id, rw))
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
if (!rw->rlock_trylock(GetCurrentPPUThread().GetId())) return CELL_EBUSY;
|
||||
bool succeeded;
|
||||
rw->sync.atomic_op_sync([&succeeded](RWLock::sync_var_t& sync)
|
||||
{
|
||||
assert(~sync.readers);
|
||||
if ((succeeded = !sync.writer))
|
||||
{
|
||||
sync.readers++;
|
||||
}
|
||||
});
|
||||
|
||||
return CELL_OK;
|
||||
if (succeeded)
|
||||
{
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
return CELL_EBUSY;
|
||||
}
|
||||
|
||||
s32 sys_rwlock_runlock(u32 rw_lock_id)
|
||||
@ -112,75 +137,134 @@ s32 sys_rwlock_runlock(u32 rw_lock_id)
|
||||
sys_rwlock.Log("sys_rwlock_runlock(rw_lock_id=%d)", rw_lock_id);
|
||||
|
||||
std::shared_ptr<RWLock> rw;
|
||||
if (!sys_rwlock.CheckId(rw_lock_id, rw)) return CELL_ESRCH;
|
||||
if (!sys_rwlock.CheckId(rw_lock_id, rw))
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
if (!rw->rlock_unlock(GetCurrentPPUThread().GetId())) return CELL_EPERM;
|
||||
bool succeeded;
|
||||
rw->sync.atomic_op_sync([&succeeded](RWLock::sync_var_t& sync)
|
||||
{
|
||||
if ((succeeded = sync.readers != 0))
|
||||
{
|
||||
assert(!sync.writer);
|
||||
sync.readers--;
|
||||
}
|
||||
});
|
||||
|
||||
return CELL_OK;
|
||||
if (succeeded)
|
||||
{
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
return CELL_EPERM;
|
||||
}
|
||||
|
||||
s32 sys_rwlock_wlock(u32 rw_lock_id, u64 timeout)
|
||||
s32 sys_rwlock_wlock(PPUThread& CPU, u32 rw_lock_id, u64 timeout)
|
||||
{
|
||||
sys_rwlock.Log("sys_rwlock_wlock(rw_lock_id=%d, timeout=%lld)", rw_lock_id, timeout);
|
||||
|
||||
const u64 start_time = get_system_time();
|
||||
|
||||
std::shared_ptr<RWLock> rw;
|
||||
if (!sys_rwlock.CheckId(rw_lock_id, rw)) return CELL_ESRCH;
|
||||
const u32 tid = GetCurrentPPUThread().GetId();
|
||||
|
||||
if (!rw->wlock_check(tid)) return CELL_EDEADLK;
|
||||
|
||||
if (rw->wlock_trylock(tid, true)) return CELL_OK;
|
||||
|
||||
u64 counter = 0;
|
||||
const u64 max_counter = timeout ? (timeout / 1000) : 20000;
|
||||
do
|
||||
if (!sys_rwlock.CheckId(rw_lock_id, rw))
|
||||
{
|
||||
if (Emu.IsStopped())
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
const u32 tid = CPU.GetId();
|
||||
|
||||
if (rw->sync.compare_and_swap_test({ 0, 0 }, { 0, tid }))
|
||||
{
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
if (rw->sync.read_relaxed().writer == tid)
|
||||
{
|
||||
return CELL_EDEADLK;
|
||||
}
|
||||
|
||||
rw->wqueue.push(tid, rw->protocol);
|
||||
|
||||
while (true)
|
||||
{
|
||||
auto old_sync = rw->sync.compare_and_swap({ 0, 0 }, { 0, tid });
|
||||
if (!old_sync.readers && (!old_sync.writer || old_sync.writer == tid))
|
||||
{
|
||||
sys_rwlock.Warning("sys_rwlock_wlock(rw_lock_id=%d, ...) aborted", rw_lock_id);
|
||||
return CELL_ETIMEDOUT;
|
||||
break;
|
||||
}
|
||||
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
|
||||
|
||||
if (rw->wlock_trylock(tid, true)) return CELL_OK;
|
||||
|
||||
if (counter++ > max_counter)
|
||||
if (timeout && get_system_time() - start_time > timeout)
|
||||
{
|
||||
if (!timeout)
|
||||
if (!rw->wqueue.invalidate(tid, rw->protocol))
|
||||
{
|
||||
counter = 0;
|
||||
assert(!"sys_rwlock_wlock() failed (timeout)");
|
||||
}
|
||||
else
|
||||
{
|
||||
return CELL_ETIMEDOUT;
|
||||
}
|
||||
}
|
||||
} while (true);
|
||||
return CELL_ETIMEDOUT;
|
||||
}
|
||||
|
||||
if (Emu.IsStopped())
|
||||
{
|
||||
sys_rwlock.Warning("sys_rwlock_wlock(id=%d) aborted", rw_lock_id);
|
||||
return CELL_OK;
|
||||
}
|
||||
}
|
||||
|
||||
if (!rw->wqueue.invalidate(tid, rw->protocol) && !rw->wqueue.pop(tid, rw->protocol))
|
||||
{
|
||||
assert(!"sys_rwlock_wlock() failed (locking)");
|
||||
}
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 sys_rwlock_trywlock(u32 rw_lock_id)
|
||||
s32 sys_rwlock_trywlock(PPUThread& CPU, u32 rw_lock_id)
|
||||
{
|
||||
sys_rwlock.Log("sys_rwlock_trywlock(rw_lock_id=%d)", rw_lock_id);
|
||||
|
||||
std::shared_ptr<RWLock> rw;
|
||||
if (!sys_rwlock.CheckId(rw_lock_id, rw)) return CELL_ESRCH;
|
||||
const u32 tid = GetCurrentPPUThread().GetId();
|
||||
if (!sys_rwlock.CheckId(rw_lock_id, rw))
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
if (!rw->wlock_check(tid)) return CELL_EDEADLK;
|
||||
const u32 tid = CPU.GetId();
|
||||
|
||||
if (!rw->wlock_trylock(tid, false)) return CELL_EBUSY;
|
||||
if (rw->sync.compare_and_swap_test({ 0, 0 }, { 0, tid }))
|
||||
{
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
return CELL_OK;
|
||||
if (rw->sync.read_relaxed().writer == tid)
|
||||
{
|
||||
return CELL_EDEADLK;
|
||||
}
|
||||
|
||||
return CELL_EBUSY;
|
||||
}
|
||||
|
||||
s32 sys_rwlock_wunlock(u32 rw_lock_id)
|
||||
s32 sys_rwlock_wunlock(PPUThread& CPU, u32 rw_lock_id)
|
||||
{
|
||||
sys_rwlock.Log("sys_rwlock_wunlock(rw_lock_id=%d)", rw_lock_id);
|
||||
|
||||
std::shared_ptr<RWLock> rw;
|
||||
if (!sys_rwlock.CheckId(rw_lock_id, rw)) return CELL_ESRCH;
|
||||
if (!sys_rwlock.CheckId(rw_lock_id, rw))
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
if (!rw->wlock_unlock(GetCurrentPPUThread().GetId())) return CELL_EPERM;
|
||||
const u32 tid = CPU.GetId();
|
||||
const u32 target = rw->wqueue.signal(rw->protocol);
|
||||
|
||||
return CELL_OK;
|
||||
if (rw->sync.compare_and_swap_test({ 0, tid }, { 0, target }))
|
||||
{
|
||||
if (!target)
|
||||
{
|
||||
// TODO: signal readers
|
||||
}
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
return CELL_EPERM;
|
||||
}
|
||||
|
@ -2,11 +2,11 @@
|
||||
|
||||
struct sys_rwlock_attribute_t
|
||||
{
|
||||
be_t<u32> attr_protocol;
|
||||
be_t<u32> attr_pshared; // == 0x200 (NOT SHARED)
|
||||
be_t<u64> key; // process-shared key (not used)
|
||||
be_t<s32> flags; // process-shared flags (not used)
|
||||
be_t<u32> pad; // not used
|
||||
be_t<u32> protocol;
|
||||
be_t<u32> pshared;
|
||||
be_t<u64> ipc_key;
|
||||
be_t<s32> flags;
|
||||
be_t<u32> pad;
|
||||
union
|
||||
{
|
||||
char name[8];
|
||||
@ -16,136 +16,22 @@ struct sys_rwlock_attribute_t
|
||||
|
||||
struct RWLock
|
||||
{
|
||||
std::mutex m_lock; // internal lock
|
||||
u32 wlock_thread; // write lock owner
|
||||
std::vector<u32> wlock_queue; // write lock queue
|
||||
std::vector<u32> rlock_list; // read lock list
|
||||
u32 m_protocol; // TODO
|
||||
|
||||
union
|
||||
struct sync_var_t
|
||||
{
|
||||
u64 m_name_u64;
|
||||
char m_name[8];
|
||||
u32 readers; // reader count
|
||||
u32 writer; // writer thread id
|
||||
};
|
||||
|
||||
sleep_queue_t wqueue;
|
||||
atomic_le_t<sync_var_t> sync;
|
||||
|
||||
const u32 protocol;
|
||||
|
||||
RWLock(u32 protocol, u64 name)
|
||||
: m_protocol(protocol)
|
||||
, m_name_u64(name)
|
||||
, wlock_thread(0)
|
||||
: protocol(protocol)
|
||||
, wqueue(name)
|
||||
{
|
||||
}
|
||||
|
||||
bool rlock_trylock(u32 tid)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_lock);
|
||||
|
||||
if (!wlock_thread && !wlock_queue.size())
|
||||
{
|
||||
rlock_list.push_back(tid);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool rlock_unlock(u32 tid)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_lock);
|
||||
|
||||
for (u32 i = (u32)rlock_list.size() - 1; ~i; i--)
|
||||
{
|
||||
if (rlock_list[i] == tid)
|
||||
{
|
||||
rlock_list.erase(rlock_list.begin() + i);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool wlock_check(u32 tid)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_lock);
|
||||
|
||||
if (wlock_thread == tid)
|
||||
{
|
||||
return false; // deadlock
|
||||
}
|
||||
for (u32 i = (u32)rlock_list.size() - 1; ~i; i--)
|
||||
{
|
||||
if (rlock_list[i] == tid)
|
||||
{
|
||||
return false; // deadlock
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool wlock_trylock(u32 tid, bool enqueue)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_lock);
|
||||
|
||||
if (wlock_thread || rlock_list.size()) // already locked
|
||||
{
|
||||
if (!enqueue)
|
||||
{
|
||||
return false; // do not enqueue
|
||||
}
|
||||
for (u32 i = (u32)wlock_queue.size() - 1; ~i; i--)
|
||||
{
|
||||
if (wlock_queue[i] == tid)
|
||||
{
|
||||
return false; // already enqueued
|
||||
}
|
||||
}
|
||||
wlock_queue.push_back(tid); // enqueue new thread
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (wlock_queue.size())
|
||||
{
|
||||
// SYNC_FIFO only yet
|
||||
if (wlock_queue[0] == tid)
|
||||
{
|
||||
wlock_thread = tid;
|
||||
wlock_queue.erase(wlock_queue.begin());
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (!enqueue)
|
||||
{
|
||||
return false; // do not enqueue
|
||||
}
|
||||
for (u32 i = (u32)wlock_queue.size() - 1; ~i; i--)
|
||||
{
|
||||
if (wlock_queue[i] == tid)
|
||||
{
|
||||
return false; // already enqueued
|
||||
}
|
||||
}
|
||||
wlock_queue.push_back(tid); // enqueue new thread
|
||||
return false;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
wlock_thread = tid; // easy way
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool wlock_unlock(u32 tid)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_lock);
|
||||
|
||||
if (wlock_thread == tid)
|
||||
{
|
||||
wlock_thread = 0;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
sync.write_relaxed({ 0, 0 });
|
||||
}
|
||||
};
|
||||
|
||||
@ -155,6 +41,6 @@ s32 sys_rwlock_destroy(u32 rw_lock_id);
|
||||
s32 sys_rwlock_rlock(u32 rw_lock_id, u64 timeout);
|
||||
s32 sys_rwlock_tryrlock(u32 rw_lock_id);
|
||||
s32 sys_rwlock_runlock(u32 rw_lock_id);
|
||||
s32 sys_rwlock_wlock(u32 rw_lock_id, u64 timeout);
|
||||
s32 sys_rwlock_trywlock(u32 rw_lock_id);
|
||||
s32 sys_rwlock_wunlock(u32 rw_lock_id);
|
||||
s32 sys_rwlock_wlock(PPUThread& CPU, u32 rw_lock_id, u64 timeout);
|
||||
s32 sys_rwlock_trywlock(PPUThread& CPU, u32 rw_lock_id);
|
||||
s32 sys_rwlock_wunlock(PPUThread& CPU, u32 rw_lock_id);
|
||||
|
@ -88,6 +88,8 @@ s32 sys_semaphore_wait(u32 sem_id, u64 timeout)
|
||||
{
|
||||
sys_semaphore.Log("sys_semaphore_wait(sem_id=%d, timeout=%lld)", sem_id, timeout);
|
||||
|
||||
const u64 start_time = get_system_time();
|
||||
|
||||
std::shared_ptr<Semaphore> sem;
|
||||
if (!Emu.GetIdManager().GetIDData(sem_id, sem))
|
||||
{
|
||||
@ -95,46 +97,58 @@ s32 sys_semaphore_wait(u32 sem_id, u64 timeout)
|
||||
}
|
||||
|
||||
const u32 tid = GetCurrentPPUThread().GetId();
|
||||
const u64 start_time = get_system_time();
|
||||
s32 old_value;
|
||||
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(sem->mutex);
|
||||
if (sem->value > 0)
|
||||
sem->value.atomic_op_sync([&old_value](s32& value)
|
||||
{
|
||||
old_value = value;
|
||||
if (value > 0)
|
||||
{
|
||||
value--;
|
||||
}
|
||||
});
|
||||
|
||||
if (old_value > 0)
|
||||
{
|
||||
sem->value--;
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
sem->queue.push(tid, sem->protocol);
|
||||
}
|
||||
|
||||
|
||||
while (true)
|
||||
{
|
||||
if (sem->queue.pop(tid, sem->protocol))
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
assert(!sem->value.read_sync());
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
|
||||
|
||||
if (timeout && get_system_time() - start_time > timeout)
|
||||
{
|
||||
if (!sem->queue.invalidate(tid, sem->protocol))
|
||||
{
|
||||
if (sem->queue.pop(tid, sem->protocol))
|
||||
{
|
||||
return CELL_OK;
|
||||
}
|
||||
assert(!"sys_semaphore_wait() failed (timeout)");
|
||||
}
|
||||
return CELL_ETIMEDOUT;
|
||||
}
|
||||
|
||||
if (Emu.IsStopped())
|
||||
{
|
||||
sys_semaphore.Warning("sys_semaphore_wait(%d) aborted", sem_id);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
if (timeout && get_system_time() - start_time > timeout)
|
||||
{
|
||||
sem->queue.invalidate(tid);
|
||||
return CELL_ETIMEDOUT;
|
||||
}
|
||||
|
||||
if (tid == sem->signal)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(sem->mutex);
|
||||
|
||||
if (tid != sem->signal)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
sem->signal = 0;
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
|
||||
}
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 sys_semaphore_trywait(u32 sem_id)
|
||||
@ -147,11 +161,19 @@ s32 sys_semaphore_trywait(u32 sem_id)
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
std::lock_guard<std::mutex> lock(sem->mutex);
|
||||
s32 old_value;
|
||||
|
||||
if (sem->value > 0)
|
||||
sem->value.atomic_op_sync([&old_value](s32& value)
|
||||
{
|
||||
old_value = value;
|
||||
if (value > 0)
|
||||
{
|
||||
value--;
|
||||
}
|
||||
});
|
||||
|
||||
if (old_value > 0)
|
||||
{
|
||||
sem->value--;
|
||||
return CELL_OK;
|
||||
}
|
||||
else
|
||||
@ -175,7 +197,7 @@ s32 sys_semaphore_post(u32 sem_id, s32 count)
|
||||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
if (count + sem->value - (s32)sem->queue.count() > sem->max)
|
||||
if (count + sem->value.read_sync() - (s32)sem->queue.count() > sem->max)
|
||||
{
|
||||
return CELL_EBUSY;
|
||||
}
|
||||
@ -188,22 +210,16 @@ s32 sys_semaphore_post(u32 sem_id, s32 count)
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
std::lock_guard<std::mutex> lock(sem->mutex);
|
||||
|
||||
if (sem->signal && sem->queue.count())
|
||||
{
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
|
||||
continue;
|
||||
}
|
||||
|
||||
if (u32 target = sem->queue.pop(sem->protocol))
|
||||
if (u32 target = sem->queue.signal(sem->protocol))
|
||||
{
|
||||
count--;
|
||||
sem->signal = target;
|
||||
}
|
||||
else
|
||||
{
|
||||
sem->value += count;
|
||||
sem->value.atomic_op([count](s32& value)
|
||||
{
|
||||
value += count;
|
||||
});
|
||||
count = 0;
|
||||
}
|
||||
}
|
||||
@ -217,7 +233,7 @@ s32 sys_semaphore_get_value(u32 sem_id, vm::ptr<s32> count)
|
||||
|
||||
if (!count)
|
||||
{
|
||||
sys_semaphore.Error("sys_semaphore_get_value(): invalid memory access (count=0x%x)", count.addr());
|
||||
sys_semaphore.Error("sys_semaphore_get_value(): invalid memory access (addr=0x%x)", count.addr());
|
||||
return CELL_EFAULT;
|
||||
}
|
||||
|
||||
@ -227,9 +243,6 @@ s32 sys_semaphore_get_value(u32 sem_id, vm::ptr<s32> count)
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
std::lock_guard<std::mutex> lock(sem->mutex);
|
||||
|
||||
*count = sem->value;
|
||||
|
||||
*count = sem->value.read_sync();
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -16,22 +16,19 @@ struct sys_semaphore_attribute
|
||||
|
||||
struct Semaphore
|
||||
{
|
||||
std::mutex mutex;
|
||||
sleep_queue_t queue;
|
||||
s32 value;
|
||||
u32 signal;
|
||||
atomic_le_t<s32> value;
|
||||
|
||||
const s32 max;
|
||||
const u32 protocol;
|
||||
const u64 name;
|
||||
|
||||
Semaphore(s32 initial_count, s32 max_count, u32 protocol, u64 name)
|
||||
: value(initial_count)
|
||||
, signal(0)
|
||||
, max(max_count)
|
||||
: max(max_count)
|
||||
, protocol(protocol)
|
||||
, name(name)
|
||||
{
|
||||
value.write_relaxed(initial_count);
|
||||
}
|
||||
};
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user