mirror of
https://github.com/RPCS3/rpcs3.git
synced 2024-11-23 19:22:48 +01:00
types.hpp: implement smin, smax, amin, amax
Rewritten the following global utility constants: `umax` returns max number, restricted to unsigned. `smax` returns max signed number, restricted to integrals. `smin` returns min signed number, restricted to signed. `amin` returns smin or zero, less restricted. `amax` returns smax or umax, less restricted. Fix operators == and <=> for synthesized rel-ops.
This commit is contained in:
parent
613777afde
commit
160b131de3
@ -252,7 +252,7 @@ namespace cfg
|
||||
static_assert(Min < Max, "Invalid cfg::_int range");
|
||||
|
||||
// Prefer 32 bit type if possible
|
||||
using int_type = std::conditional_t<Min >= INT32_MIN && Max <= INT32_MAX, s32, s64>;
|
||||
using int_type = std::conditional_t<Min >= s32{smin} && Max <= s32{smax}, s32, s64>;
|
||||
|
||||
atomic_t<int_type> m_value;
|
||||
|
||||
@ -314,10 +314,10 @@ namespace cfg
|
||||
};
|
||||
|
||||
// Alias for 32 bit int
|
||||
using int32 = _int<INT32_MIN, INT32_MAX>;
|
||||
using int32 = _int<s32{smin}, s32{smax}>;
|
||||
|
||||
// Alias for 64 bit int
|
||||
using int64 = _int<INT64_MIN, INT64_MAX>;
|
||||
using int64 = _int<s64{smin}, s64{smax}>;
|
||||
|
||||
// Unsigned 32/64-bit integer entry with custom Min/Max range.
|
||||
template <u64 Min, u64 Max>
|
||||
@ -326,7 +326,7 @@ namespace cfg
|
||||
static_assert(Min < Max, "Invalid cfg::uint range");
|
||||
|
||||
// Prefer 32 bit type if possible
|
||||
using int_type = std::conditional_t<Max <= UINT32_MAX, u32, u64>;
|
||||
using int_type = std::conditional_t<Max <= u32{umax}, u32, u64>;
|
||||
|
||||
atomic_t<int_type> m_value;
|
||||
|
||||
@ -388,10 +388,10 @@ namespace cfg
|
||||
};
|
||||
|
||||
// Alias for 32 bit uint
|
||||
using uint32 = uint<0, UINT32_MAX>;
|
||||
using uint32 = uint<0, u32{umax}>;
|
||||
|
||||
// Alias for 64 bit int
|
||||
using uint64 = uint<0, UINT64_MAX>;
|
||||
using uint64 = uint<0, u64{umax}>;
|
||||
|
||||
// Simple string entry with mutex
|
||||
class string : public _base
|
||||
|
@ -97,7 +97,7 @@ static FILETIME from_time(s64 _time)
|
||||
result.dwLowDateTime = 0;
|
||||
result.dwHighDateTime = 0;
|
||||
}
|
||||
else if (_time > INT64_MAX / 10000000ll - 11644473600ll)
|
||||
else if (_time > s64{smax} / 10000000ll - 11644473600ll)
|
||||
{
|
||||
result.dwLowDateTime = 0xffffffff;
|
||||
result.dwHighDateTime = 0x7fffffff;
|
||||
|
@ -1644,7 +1644,7 @@ static LONG exception_handler(PEXCEPTION_POINTERS pExp) noexcept
|
||||
{
|
||||
addr = addr0;
|
||||
}
|
||||
else if (const usz exec64 = (ptr - vm::g_exec_addr) / 2; exec64 <= UINT32_MAX)
|
||||
else if (const usz exec64 = (ptr - vm::g_exec_addr) / 2; exec64 <= u32{umax})
|
||||
{
|
||||
addr = static_cast<u32>(exec64);
|
||||
}
|
||||
@ -2808,7 +2808,7 @@ u64 thread_ctrl::get_affinity_mask(thread_class group)
|
||||
}
|
||||
}
|
||||
|
||||
return UINT64_MAX;
|
||||
return -1;
|
||||
}
|
||||
|
||||
void thread_ctrl::set_native_priority(int priority)
|
||||
|
@ -41,7 +41,7 @@ namespace utils
|
||||
class address_range
|
||||
{
|
||||
public:
|
||||
u32 start = UINT32_MAX; // First address in range
|
||||
u32 start = umax; // First address in range
|
||||
u32 end = 0; // Last address
|
||||
|
||||
private:
|
||||
@ -174,7 +174,7 @@ namespace utils
|
||||
address_range get_min_max(const address_range &other) const
|
||||
{
|
||||
return {
|
||||
std::min(valid() ? start : UINT32_MAX, other.valid() ? other.start : UINT32_MAX),
|
||||
std::min(valid() ? start : umax, other.valid() ? other.start : umax),
|
||||
std::max(valid() ? end : 0, other.valid() ? other.end : 0)
|
||||
};
|
||||
}
|
||||
@ -234,7 +234,7 @@ namespace utils
|
||||
|
||||
void invalidate()
|
||||
{
|
||||
start = UINT32_MAX;
|
||||
start = umax;
|
||||
end = 0;
|
||||
}
|
||||
|
||||
|
@ -136,7 +136,7 @@ public:
|
||||
void append_title_patches(const std::string& title_id);
|
||||
|
||||
// Apply patch (returns the number of entries applied)
|
||||
std::basic_string<u32> apply(const std::string& name, u8* dst, u32 filesz = UINT32_MAX, u32 min_addr = 0);
|
||||
std::basic_string<u32> apply(const std::string& name, u8* dst, u32 filesz = -1, u32 min_addr = 0);
|
||||
|
||||
private:
|
||||
// Database
|
||||
|
@ -1,7 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include "util/types.hpp"
|
||||
#include <climits>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <algorithm>
|
||||
@ -46,7 +45,7 @@ usz cfmt_append(Dst& out, const Char* fmt, Src&& src)
|
||||
|
||||
const auto read_decimal = [&](uint result) -> uint
|
||||
{
|
||||
while (fmt[0] >= '0' && fmt[0] <= '9' && result <= (UINT_MAX / 10))
|
||||
while (fmt[0] >= '0' && fmt[0] <= '9' && result <= (uint{umax} / 10))
|
||||
{
|
||||
result = result * 10 + (fmt[0] - '0');
|
||||
fmt++, ctx.size++;
|
||||
|
@ -9,7 +9,7 @@ void cond_variable::imp_wait(u32 _old, u64 _timeout) noexcept
|
||||
ensure(_old);
|
||||
|
||||
// Wait with timeout
|
||||
m_value.wait(_old, c_signal_mask, atomic_wait_timeout{_timeout > max_timeout ? UINT64_MAX : _timeout * 1000});
|
||||
m_value.wait(_old, c_signal_mask, atomic_wait_timeout{_timeout > max_timeout ? umax : _timeout * 1000});
|
||||
|
||||
// Cleanup
|
||||
m_value.atomic_op([](u32& value)
|
||||
|
@ -91,5 +91,5 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
static constexpr u64 max_timeout = UINT64_MAX / 1000;
|
||||
static constexpr u64 max_timeout = u64{umax} / 1000;
|
||||
};
|
||||
|
@ -22,20 +22,20 @@ void semaphore_base::imp_wait()
|
||||
const s32 value = m_value.atomic_op([](s32& value)
|
||||
{
|
||||
// Use sign bit to acknowledge waiter presence
|
||||
if (value && value > INT32_MIN)
|
||||
if (value && value > smin)
|
||||
{
|
||||
value--;
|
||||
|
||||
if (value < 0)
|
||||
{
|
||||
// Remove sign bit
|
||||
value -= INT32_MIN;
|
||||
value -= s32{smin};
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// Set sign bit
|
||||
value = INT32_MIN;
|
||||
value = smin;
|
||||
}
|
||||
|
||||
return value;
|
||||
|
@ -913,7 +913,7 @@ static NEVER_INLINE error_code savedata_op(ppu_thread& ppu, u32 operation, u32 v
|
||||
}
|
||||
case CELL_SAVEDATA_FOCUSPOS_LATEST:
|
||||
{
|
||||
s64 max = INT64_MIN;
|
||||
s64 max = smin;
|
||||
|
||||
for (u32 i = 0; i < save_entries.size(); i++)
|
||||
{
|
||||
@ -928,7 +928,7 @@ static NEVER_INLINE error_code savedata_op(ppu_thread& ppu, u32 operation, u32 v
|
||||
}
|
||||
case CELL_SAVEDATA_FOCUSPOS_OLDEST:
|
||||
{
|
||||
s64 min = INT64_MAX;
|
||||
s64 min = smax;
|
||||
|
||||
for (u32 i = 0; i < save_entries.size(); i++)
|
||||
{
|
||||
|
@ -260,8 +260,8 @@ struct vdec_context final
|
||||
|
||||
packet.data = vm::_ptr<u8>(au_addr);
|
||||
packet.size = au_size;
|
||||
packet.pts = au_pts != umax ? au_pts : INT64_MIN;
|
||||
packet.dts = au_dts != umax ? au_dts : INT64_MIN;
|
||||
packet.pts = au_pts != umax ? au_pts : s64{smin};
|
||||
packet.dts = au_dts != umax ? au_dts : s64{smin};
|
||||
|
||||
if (next_pts == 0 && au_pts != umax)
|
||||
{
|
||||
@ -281,8 +281,8 @@ struct vdec_context final
|
||||
}
|
||||
else
|
||||
{
|
||||
packet.pts = INT64_MIN;
|
||||
packet.dts = INT64_MIN;
|
||||
packet.pts = smin;
|
||||
packet.dts = smin;
|
||||
cellVdec.trace("End sequence...");
|
||||
}
|
||||
|
||||
@ -336,12 +336,12 @@ struct vdec_context final
|
||||
fmt::throw_exception("Repeated frames not supported (0x%x)", frame->repeat_pict);
|
||||
}
|
||||
|
||||
if (frame->pts != INT64_MIN)
|
||||
if (frame->pts != smin)
|
||||
{
|
||||
next_pts = frame->pts;
|
||||
}
|
||||
|
||||
if (frame->pkt_dts != INT64_MIN)
|
||||
if (frame->pkt_dts != smin)
|
||||
{
|
||||
next_dts = frame->pkt_dts;
|
||||
}
|
||||
|
@ -2880,7 +2880,7 @@ void ppu_acontext::MFOCRF(ppu_opcode_t op)
|
||||
|
||||
void ppu_acontext::LWARX(ppu_opcode_t op)
|
||||
{
|
||||
gpr[op.rd] = spec_gpr::range(0, UINT32_MAX);
|
||||
gpr[op.rd] = spec_gpr::range(0, u32{umax});
|
||||
}
|
||||
|
||||
void ppu_acontext::LDX(ppu_opcode_t op)
|
||||
|
@ -11,6 +11,7 @@
|
||||
|
||||
#include <bit>
|
||||
#include <cmath>
|
||||
#include <climits>
|
||||
|
||||
#include "util/asm.hpp"
|
||||
#include "util/v128.hpp"
|
||||
|
@ -1213,7 +1213,7 @@ bool ppu_load_exec(const ppu_exec_object& elf)
|
||||
|
||||
const addr_range r = addr_range::start_length(static_cast<u32>(prog.p_vaddr), static_cast<u32>(prog.p_memsz));
|
||||
|
||||
if ((prog.p_vaddr | prog.p_memsz) > UINT32_MAX || !r.valid() || !r.inside(addr_range::start_length(0x00000000, 0x30000000)))
|
||||
if ((prog.p_vaddr | prog.p_memsz) > u32{umax} || !r.valid() || !r.inside(addr_range::start_length(0x00000000, 0x30000000)))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
@ -1433,7 +1433,7 @@ bool ppu_load_exec(const ppu_exec_object& elf)
|
||||
{
|
||||
ppu_loader.notice("TLS info segment found: tls-image=*0x%x, image-size=0x%x, tls-size=0x%x", prog.p_vaddr, prog.p_filesz, prog.p_memsz);
|
||||
|
||||
if ((prog.p_vaddr | prog.p_filesz | prog.p_memsz) > UINT32_MAX)
|
||||
if ((prog.p_vaddr | prog.p_filesz | prog.p_memsz) > u32{umax})
|
||||
{
|
||||
ppu_loader.fatal("ppu_load_exec(): TLS segment is invalid!");
|
||||
return false;
|
||||
|
@ -594,7 +594,7 @@ extern bool ppu_patch(u32 addr, u32 value)
|
||||
|
||||
std::array<u32, 2> op_branch_targets(u32 pc, ppu_opcode_t op)
|
||||
{
|
||||
std::array<u32, 2> res{pc + 4, UINT32_MAX};
|
||||
std::array<u32, 2> res{pc + 4, umax};
|
||||
|
||||
switch (const auto type = g_ppu_itype.decode(op.opcode))
|
||||
{
|
||||
@ -608,7 +608,7 @@ std::array<u32, 2> op_branch_targets(u32 pc, ppu_opcode_t op)
|
||||
case ppu_itype::BCLR:
|
||||
case ppu_itype::UNK:
|
||||
{
|
||||
res[0] = UINT32_MAX;
|
||||
res[0] = umax;
|
||||
break;
|
||||
}
|
||||
default: break;
|
||||
@ -633,7 +633,7 @@ std::string ppu_thread::dump_regs() const
|
||||
constexpr u32 max_str_len = 32;
|
||||
constexpr u32 hex_count = 8;
|
||||
|
||||
if (reg <= UINT32_MAX && vm::check_addr<max_str_len>(static_cast<u32>(reg)))
|
||||
if (reg <= u32{umax} && vm::check_addr<max_str_len>(static_cast<u32>(reg)))
|
||||
{
|
||||
bool is_function = false;
|
||||
u32 toc = 0;
|
||||
@ -759,7 +759,7 @@ std::vector<std::pair<u32, u32>> ppu_thread::dump_callstack_list() const
|
||||
// Determine stack range
|
||||
const u64 r1 = gpr[1];
|
||||
|
||||
if (r1 > UINT32_MAX || r1 % 0x10)
|
||||
if (r1 > u32{umax} || r1 % 0x10)
|
||||
{
|
||||
return {};
|
||||
}
|
||||
@ -799,7 +799,7 @@ std::vector<std::pair<u32, u32>> ppu_thread::dump_callstack_list() const
|
||||
|
||||
auto is_invalid = [](u64 addr)
|
||||
{
|
||||
if (addr > UINT32_MAX || addr % 4 || !vm::check_addr(static_cast<u32>(addr), vm::page_executable))
|
||||
if (addr > u32{umax} || addr % 4 || !vm::check_addr(static_cast<u32>(addr), vm::page_executable))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
@ -1356,7 +1356,7 @@ extern void sse_cellbe_stvrx_v0(u64 addr, __m128i a);
|
||||
|
||||
void ppu_trap(ppu_thread& ppu, u64 addr)
|
||||
{
|
||||
ensure((addr & (~u64{UINT32_MAX} | 0x3)) == 0);
|
||||
ensure((addr & (~u64{0xffff'ffff} | 0x3)) == 0);
|
||||
ppu.cia = static_cast<u32>(addr);
|
||||
|
||||
u32 add = static_cast<u32>(g_cfg.core.stub_ppu_traps) * 4;
|
||||
@ -1931,7 +1931,7 @@ static bool ppu_store_reservation(ppu_thread& ppu, u32 addr, u64 reg_value)
|
||||
{
|
||||
switch (u64 count = ppu_stcx_accurate_tx(addr & -8, rtime, ppu.rdata, std::bit_cast<u64>(new_data)))
|
||||
{
|
||||
case UINT64_MAX:
|
||||
case umax:
|
||||
{
|
||||
auto& all_data = *vm::get_super_ptr<spu_rdata_t>(addr & -128);
|
||||
auto& sdata = *vm::get_super_ptr<atomic_be_t<u64>>(addr & -8);
|
||||
|
@ -3239,8 +3239,8 @@ void PPUTranslator::DIVW(ppu_opcode_t op)
|
||||
{
|
||||
const auto a = GetGpr(op.ra, 32);
|
||||
const auto b = GetGpr(op.rb, 32);
|
||||
const auto o = m_ir->CreateOr(IsZero(b), m_ir->CreateAnd(m_ir->CreateICmpEQ(a, m_ir->getInt32(INT32_MIN)), IsOnes(b)));
|
||||
const auto result = m_ir->CreateSDiv(a, m_ir->CreateSelect(o, m_ir->getInt32(INT32_MIN), b));
|
||||
const auto o = m_ir->CreateOr(IsZero(b), m_ir->CreateAnd(m_ir->CreateICmpEQ(a, m_ir->getInt32(s32{smin})), IsOnes(b)));
|
||||
const auto result = m_ir->CreateSDiv(a, m_ir->CreateSelect(o, m_ir->getInt32(s32{smin}), b));
|
||||
SetGpr(op.rd, m_ir->CreateSelect(o, m_ir->getInt32(0), result));
|
||||
if (op.rc) SetCrFieldSignedCmp(0, GetGpr(op.rd), m_ir->getInt64(0));
|
||||
if (op.oe) SetOverflow(o);
|
||||
|
@ -3499,7 +3499,7 @@ void spu_recompiler::CLGTH(spu_opcode_t op)
|
||||
// compare if-greater-than
|
||||
const XmmLink& va = XmmGet(op.ra, XmmType::Int);
|
||||
const XmmLink& vi = XmmAlloc();
|
||||
c->movdqa(vi, XmmConst(_mm_set1_epi16(INT16_MIN)));
|
||||
c->movdqa(vi, XmmConst(_mm_set1_epi16(smin)));
|
||||
c->pxor(va, vi);
|
||||
c->pxor(vi, SPU_OFF_128(gpr, op.rb));
|
||||
c->pcmpgtw(va, vi);
|
||||
@ -3595,7 +3595,7 @@ void spu_recompiler::CLGTB(spu_opcode_t op)
|
||||
// compare if-greater-than
|
||||
const XmmLink& va = XmmGet(op.ra, XmmType::Int);
|
||||
const XmmLink& vi = XmmAlloc();
|
||||
c->movdqa(vi, XmmConst(_mm_set1_epi8(INT8_MIN)));
|
||||
c->movdqa(vi, XmmConst(_mm_set1_epi8(smin)));
|
||||
c->pxor(va, vi);
|
||||
c->pxor(vi, SPU_OFF_128(gpr, op.rb));
|
||||
c->pcmpgtb(va, vi);
|
||||
@ -3721,7 +3721,7 @@ void spu_recompiler::CGX(spu_opcode_t op) //nf
|
||||
c->paddd(res, vb);
|
||||
}
|
||||
|
||||
c->movdqa(sign, XmmConst(_mm_set1_epi32(INT32_MIN)));
|
||||
c->movdqa(sign, XmmConst(_mm_set1_epi32(smin)));
|
||||
c->pxor(va, sign);
|
||||
c->pxor(res, sign);
|
||||
c->pcmpgtd(va, res);
|
||||
@ -3754,7 +3754,7 @@ void spu_recompiler::BGX(spu_opcode_t op) //nf
|
||||
}
|
||||
|
||||
c->pand(vt, temp);
|
||||
c->movdqa(sign, XmmConst(_mm_set1_epi32(INT32_MIN)));
|
||||
c->movdqa(sign, XmmConst(_mm_set1_epi32(smin)));
|
||||
c->pxor(va, sign);
|
||||
c->pxor(vb, sign);
|
||||
c->pcmpgtd(vb, va);
|
||||
@ -4484,7 +4484,7 @@ void spu_recompiler::CLGTI(spu_opcode_t op)
|
||||
void spu_recompiler::CLGTHI(spu_opcode_t op)
|
||||
{
|
||||
const XmmLink& va = XmmGet(op.ra, XmmType::Int);
|
||||
c->pxor(va, XmmConst(_mm_set1_epi16(INT16_MIN)));
|
||||
c->pxor(va, XmmConst(_mm_set1_epi16(smin)));
|
||||
c->pcmpgtw(va, XmmConst(_mm_set1_epi16(op.si10 - 0x8000)));
|
||||
c->movdqa(SPU_OFF_128(gpr, op.rt), va);
|
||||
}
|
||||
@ -4492,7 +4492,7 @@ void spu_recompiler::CLGTHI(spu_opcode_t op)
|
||||
void spu_recompiler::CLGTBI(spu_opcode_t op)
|
||||
{
|
||||
const XmmLink& va = XmmGet(op.ra, XmmType::Int);
|
||||
c->psubb(va, XmmConst(_mm_set1_epi8(INT8_MIN)));
|
||||
c->psubb(va, XmmConst(_mm_set1_epi8(smin)));
|
||||
c->pcmpgtb(va, XmmConst(_mm_set1_epi8(op.si10 - 0x80)));
|
||||
c->movdqa(SPU_OFF_128(gpr, op.rt), va);
|
||||
}
|
||||
|
@ -716,7 +716,7 @@ spu_function_t spu_runtime::rebuild_ubertrampoline(u32 id_inst)
|
||||
// Compute the distance
|
||||
const s64 rel = taddr - reinterpret_cast<u64>(raw) - (op != 0xe9 ? 6 : 5);
|
||||
|
||||
ensure(rel >= INT32_MIN && rel <= INT32_MAX);
|
||||
ensure(rel >= s32{smin} && rel <= s32{smax});
|
||||
|
||||
if (op != 0xe9)
|
||||
{
|
||||
@ -757,7 +757,7 @@ spu_function_t spu_runtime::rebuild_ubertrampoline(u32 id_inst)
|
||||
u32 size2 = w.size - size1;
|
||||
std::advance(it2, w.size / 2);
|
||||
|
||||
while (ensure(w.level < UINT16_MAX))
|
||||
while (ensure(w.level < umax))
|
||||
{
|
||||
it = it2;
|
||||
size1 = w.size - size2;
|
||||
@ -1150,11 +1150,11 @@ void spu_recompiler_base::branch(spu_thread& spu, void*, u8* rip)
|
||||
u64 result;
|
||||
};
|
||||
|
||||
if (rel >= INT32_MIN && rel <= INT32_MAX)
|
||||
if (rel >= s32{smin} && rel <= s32{smax})
|
||||
{
|
||||
const s64 rel8 = (rel + 5) - 2;
|
||||
|
||||
if (rel8 >= INT8_MIN && rel8 <= INT8_MAX)
|
||||
if (rel8 >= s8{smin} && rel8 <= s8{smax})
|
||||
{
|
||||
bytes[0] = 0xeb; // jmp rel8
|
||||
bytes[1] = static_cast<s8>(rel8);
|
||||
@ -5810,7 +5810,7 @@ public:
|
||||
const auto completed = m_ir->CreateAnd(tag_mask, m_ir->CreateNot(mfc_fence));
|
||||
const auto upd_ptr = spu_ptr<u32>(&spu_thread::ch_tag_upd);
|
||||
const auto stat_ptr = spu_ptr<u64>(&spu_thread::ch_tag_stat);
|
||||
const auto stat_val = m_ir->CreateOr(m_ir->CreateZExt(completed, get_type<u64>()), INT64_MIN);
|
||||
const auto stat_val = m_ir->CreateOr(m_ir->CreateZExt(completed, get_type<u64>()), s64{smin});
|
||||
|
||||
const auto next = llvm::BasicBlock::Create(m_context, "", m_function);
|
||||
const auto next0 = llvm::BasicBlock::Create(m_context, "", m_function);
|
||||
@ -6012,7 +6012,7 @@ public:
|
||||
switch (csize)
|
||||
{
|
||||
case 0:
|
||||
case UINT64_MAX:
|
||||
case umax:
|
||||
{
|
||||
break;
|
||||
}
|
||||
@ -8034,11 +8034,11 @@ public:
|
||||
{
|
||||
if (data[i] >= std::exp2(31.f))
|
||||
{
|
||||
result._s32[i] = INT32_MAX;
|
||||
result._s32[i] = smax;
|
||||
}
|
||||
else if (data[i] < std::exp2(-31.f))
|
||||
{
|
||||
result._s32[i] = INT32_MIN;
|
||||
result._s32[i] = smin;
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -8108,7 +8108,7 @@ public:
|
||||
{
|
||||
if (data[i] >= std::exp2(32.f))
|
||||
{
|
||||
result._u32[i] = UINT32_MAX;
|
||||
result._u32[i] = umax;
|
||||
}
|
||||
else if (data[i] < 0.)
|
||||
{
|
||||
|
@ -368,7 +368,7 @@ namespace spu
|
||||
|
||||
std::array<u32, 2> op_branch_targets(u32 pc, spu_opcode_t op)
|
||||
{
|
||||
std::array<u32, 2> res{spu_branch_target(pc + 4), UINT32_MAX};
|
||||
std::array<u32, 2> res{spu_branch_target(pc + 4), umax};
|
||||
|
||||
switch (const auto type = s_spu_itype.decode(op.opcode))
|
||||
{
|
||||
@ -396,7 +396,7 @@ std::array<u32, 2> op_branch_targets(u32 pc, spu_opcode_t op)
|
||||
|
||||
case spu_itype::UNK:
|
||||
{
|
||||
res[0] = UINT32_MAX;
|
||||
res[0] = umax;
|
||||
break;
|
||||
}
|
||||
default: break;
|
||||
@ -2728,7 +2728,7 @@ bool spu_thread::do_putllc(const spu_mfc_cmd& args)
|
||||
{
|
||||
switch (u64 count = spu_putllc_tx(addr, rtime, rdata, to_write))
|
||||
{
|
||||
case UINT64_MAX:
|
||||
case umax:
|
||||
{
|
||||
auto& data = *vm::get_super_ptr<spu_rdata_t>(addr);
|
||||
|
||||
|
@ -89,7 +89,7 @@ error_code _sys_lwcond_signal(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id, u6
|
||||
{
|
||||
ppu_thread* cpu = nullptr;
|
||||
|
||||
if (ppu_thread_id != UINT32_MAX)
|
||||
if (ppu_thread_id != u32{umax})
|
||||
{
|
||||
cpu = idm::check_unlocked<named_thread<ppu_thread>>(static_cast<u32>(ppu_thread_id));
|
||||
|
||||
@ -164,7 +164,7 @@ error_code _sys_lwcond_signal(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id, u6
|
||||
|
||||
if (!cond.ret)
|
||||
{
|
||||
if (ppu_thread_id == UINT32_MAX)
|
||||
if (ppu_thread_id == u32{umax})
|
||||
{
|
||||
if (mode == 3)
|
||||
{
|
||||
@ -289,7 +289,7 @@ error_code _sys_lwcond_queue_wait(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id
|
||||
// Try to increment lwmutex's lwcond's waiters count
|
||||
if (!mutex->lwcond_waiters.fetch_op([](s32& val)
|
||||
{
|
||||
if (val == INT32_MIN)
|
||||
if (val == smin)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
@ -381,7 +381,7 @@ error_code _sys_lwcond_queue_wait(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id
|
||||
}
|
||||
}
|
||||
|
||||
if (--mutex->lwcond_waiters == INT32_MIN)
|
||||
if (--mutex->lwcond_waiters == smin)
|
||||
{
|
||||
// Notify the thread destroying lwmutex on last waiter
|
||||
mutex->lwcond_waiters.notify_all();
|
||||
|
@ -52,7 +52,7 @@ error_code _sys_lwmutex_destroy(ppu_thread& ppu, u32 lwmutex_id)
|
||||
if (std::scoped_lock lock(mutex->mutex); mutex->sq.empty())
|
||||
{
|
||||
// Set "destroyed" bit
|
||||
if (mutex->lwcond_waiters.fetch_or(INT32_MIN) & 0x7fff'ffff)
|
||||
if (mutex->lwcond_waiters.fetch_or(smin) & 0x7fff'ffff)
|
||||
{
|
||||
// Deschedule if waiters were found
|
||||
lv2_obj::sleep(ppu);
|
||||
@ -64,7 +64,7 @@ error_code _sys_lwmutex_destroy(ppu_thread& ppu, u32 lwmutex_id)
|
||||
}
|
||||
|
||||
// Wait for all lwcond waiters to quit
|
||||
if (const s32 old = mutex->lwcond_waiters; old != INT32_MIN)
|
||||
if (const s32 old = mutex->lwcond_waiters; old != smin)
|
||||
{
|
||||
if (old >= 0)
|
||||
{
|
||||
@ -125,7 +125,7 @@ error_code _sys_lwmutex_lock(ppu_thread& ppu, u32 lwmutex_id, u64 timeout)
|
||||
|
||||
if (old)
|
||||
{
|
||||
if (old == INT32_MIN)
|
||||
if (old == smin)
|
||||
{
|
||||
ppu.gpr[3] = CELL_EBUSY;
|
||||
}
|
||||
@ -274,7 +274,7 @@ error_code _sys_lwmutex_unlock2(ppu_thread& ppu, u32 lwmutex_id)
|
||||
return;
|
||||
}
|
||||
|
||||
mutex.signaled |= INT32_MIN;
|
||||
mutex.signaled |= smin;
|
||||
});
|
||||
|
||||
if (!mutex)
|
||||
|
@ -86,7 +86,7 @@ struct lv2_lwmutex final : lv2_obj
|
||||
// Turn off the "destroying" bit as we are adding an lwmutex waiter
|
||||
val &= 0x7fff'ffff;
|
||||
return true;
|
||||
}).first; old != INT32_MIN)
|
||||
}).first; old != smin)
|
||||
{
|
||||
sq.emplace_back(cpu);
|
||||
|
||||
|
@ -65,7 +65,7 @@ error_code sys_mmapper_allocate_address(ppu_thread& ppu, u64 size, u64 flags, u6
|
||||
return CELL_EALIGN;
|
||||
}
|
||||
|
||||
if (size > UINT32_MAX)
|
||||
if (size > u32{umax})
|
||||
{
|
||||
return CELL_ENOMEM;
|
||||
}
|
||||
|
@ -591,7 +591,7 @@ error_code sys_rsx_context_attribute(u32 context_id, u32 package_id, u64 a3, u64
|
||||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
u32 limit = UINT32_MAX;
|
||||
u32 limit = -1;
|
||||
|
||||
switch (location)
|
||||
{
|
||||
|
@ -74,7 +74,7 @@ struct lv2_obj
|
||||
private:
|
||||
enum thread_cmd : s32
|
||||
{
|
||||
yield_cmd = INT32_MIN,
|
||||
yield_cmd = smin,
|
||||
enqueue_cmd,
|
||||
};
|
||||
|
||||
@ -166,7 +166,7 @@ private:
|
||||
static bool awake_unlocked(cpu_thread*, s32 prio = enqueue_cmd);
|
||||
|
||||
public:
|
||||
static constexpr u64 max_timeout = UINT64_MAX / 1000;
|
||||
static constexpr u64 max_timeout = u64{umax} / 1000;
|
||||
|
||||
static void sleep(cpu_thread& cpu, const u64 timeout = 0);
|
||||
|
||||
@ -319,12 +319,12 @@ public:
|
||||
template <bool IsUsleep = false, bool Scale = true>
|
||||
static bool wait_timeout(u64 usec, cpu_thread* const cpu = {})
|
||||
{
|
||||
static_assert(UINT64_MAX / max_timeout >= 100, "max timeout is not valid for scaling");
|
||||
static_assert(u64{umax} / max_timeout >= 100, "max timeout is not valid for scaling");
|
||||
|
||||
if constexpr (Scale)
|
||||
{
|
||||
// Scale time
|
||||
usec = std::min<u64>(usec, UINT64_MAX / 100) * 100 / g_cfg.core.clocks_scale;
|
||||
usec = std::min<u64>(usec, u64{umax} / 100) * 100 / g_cfg.core.clocks_scale;
|
||||
}
|
||||
|
||||
// Clamp
|
||||
|
@ -53,7 +53,7 @@ namespace id_manager
|
||||
|
||||
static constexpr std::pair<u32, u32> invl_range = invl_range_extract_impl<T>::invl_range;
|
||||
|
||||
static_assert(count && step && u64{step} * (count - 1) + base < u64{UINT32_MAX} + (base != 0 ? 1 : 0), "ID traits: invalid object range");
|
||||
static_assert(count && step && u64{step} * (count - 1) + base < u32{umax} + u64{base != 0 ? 1 : 0}, "ID traits: invalid object range");
|
||||
|
||||
// TODO: Add more conditions
|
||||
static_assert(!invl_range.second || (u64{invl_range.second} + invl_range.first <= 32 /*....*/ ));
|
||||
|
@ -226,7 +226,7 @@ namespace rsx
|
||||
|
||||
const int target_area = (external_subresource_desc.width * external_subresource_desc.height * threshold) / 100;
|
||||
int covered_area = 0;
|
||||
areai bbox{ INT_MAX, INT_MAX, 0, 0 };
|
||||
areai bbox{smax, smax, 0, 0};
|
||||
|
||||
for (const auto& section : external_subresource_desc.sections_to_copy)
|
||||
{
|
||||
|
@ -1699,14 +1699,14 @@ namespace gl
|
||||
region.width == m_width && region.height == m_height && region.depth == m_depth)
|
||||
{
|
||||
if (caps.ARB_dsa_supported)
|
||||
glGetTextureImage(m_id, level, static_cast<GLenum>(format), static_cast<GLenum>(type), INT32_MAX, dst);
|
||||
glGetTextureImage(m_id, level, static_cast<GLenum>(format), static_cast<GLenum>(type), s32{smax}, dst);
|
||||
else
|
||||
glGetTextureImageEXT(m_id, static_cast<GLenum>(m_target), level, static_cast<GLenum>(format), static_cast<GLenum>(type), dst);
|
||||
}
|
||||
else if (caps.ARB_dsa_supported)
|
||||
{
|
||||
glGetTextureSubImage(m_id, level, region.x, region.y, region.z, region.width, region.height, region.depth,
|
||||
static_cast<GLenum>(format), static_cast<GLenum>(type), INT32_MAX, dst);
|
||||
static_cast<GLenum>(format), static_cast<GLenum>(type), s32{smax}, dst);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -24,7 +24,7 @@ struct cfg_root : cfg::node
|
||||
cfg::_bool ppu_debug{ this, "PPU Debug" };
|
||||
cfg::_bool llvm_logs{ this, "Save LLVM logs" };
|
||||
cfg::string llvm_cpu{ this, "Use LLVM CPU" };
|
||||
cfg::_int<0, INT32_MAX> llvm_threads{ this, "Max LLVM Compile Threads", 0 };
|
||||
cfg::_int<0, 1024> llvm_threads{ this, "Max LLVM Compile Threads", 0 };
|
||||
cfg::_bool ppu_llvm_greedy_mode{ this, "PPU LLVM Greedy Mode", false, false };
|
||||
cfg::_bool ppu_llvm_precompilation{ this, "PPU LLVM Precompilation", true };
|
||||
cfg::_enum<thread_scheduler_mode> thread_scheduler{this, "Thread Scheduler Mode", thread_scheduler_mode::os};
|
||||
|
@ -89,7 +89,7 @@ protected:
|
||||
const s32 rem = calibData.sens_numer % calibData.sens_denom;
|
||||
const s32 output = (quot * biased) + ((rem * biased) / calibData.sens_denom);
|
||||
|
||||
return static_cast<s16>(std::clamp<s32>(output, INT16_MIN, INT16_MAX));
|
||||
return static_cast<s16>(std::clamp<s32>(output, s16{smin}, s16{smax}));
|
||||
}
|
||||
|
||||
static s16 read_s16(const void* buf)
|
||||
|
@ -585,7 +585,7 @@ void memory_viewer_panel::SetPC(const uint pc)
|
||||
void memory_viewer_panel::ShowImage(QWidget* parent, u32 addr, color_format format, u32 width, u32 height, bool flipv) const
|
||||
{
|
||||
// If exceeds 32-bits it is invalid as well, UINT32_MAX always fails checks
|
||||
const u32 memsize = static_cast<u32>(std::min<u64>(4ull * width * height, UINT32_MAX));
|
||||
const u32 memsize = static_cast<u32>(std::min<u64>(4ull * width * height, u32{umax}));
|
||||
if (memsize == 0)
|
||||
{
|
||||
return;
|
||||
|
@ -13,7 +13,6 @@
|
||||
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <climits>
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
@ -71,7 +71,7 @@ static NEVER_INLINE bool ptr_cmp(const void* data, u32 _size, u128 old128, u128
|
||||
{
|
||||
u64 new_value = 0;
|
||||
u64 old_value = static_cast<u64>(old128);
|
||||
u64 mask = static_cast<u64>(mask128) & (UINT64_MAX >> ((64 - size * 8) & 63));
|
||||
u64 mask = static_cast<u64>(mask128) & (u64{umax} >> ((64 - size * 8) & 63));
|
||||
|
||||
// Don't load memory on empty mask
|
||||
switch (mask ? size : 0)
|
||||
@ -482,10 +482,10 @@ static atomic_t<u128> s_cond_sem2[8]{{1}};
|
||||
static atomic_t<u128> s_cond_sem3[64]{{1}};
|
||||
|
||||
// Allocation bits (level 4) - guarantee 1 free bit
|
||||
static atomic_t<u64> s_cond_bits[(UINT16_MAX + 1) / 64]{1};
|
||||
static atomic_t<u64> s_cond_bits[65536 / 64]{1};
|
||||
|
||||
// Max allowed thread number is chosen to fit in 16 bits
|
||||
static cond_handle s_cond_list[UINT16_MAX + 1]{};
|
||||
static cond_handle s_cond_list[65536]{};
|
||||
|
||||
namespace
|
||||
{
|
||||
@ -590,7 +590,7 @@ static u32 cond_alloc(uptr iptr, u128 mask, u32 tls_slot = -1)
|
||||
|
||||
static void cond_free(u32 cond_id, u32 tls_slot = -1)
|
||||
{
|
||||
if (cond_id - 1 >= u32{UINT16_MAX}) [[unlikely]]
|
||||
if (cond_id - 1 >= u16{umax}) [[unlikely]]
|
||||
{
|
||||
fmt::throw_exception("bad id %u", cond_id);
|
||||
}
|
||||
@ -664,7 +664,7 @@ static void cond_free(u32 cond_id, u32 tls_slot = -1)
|
||||
|
||||
static cond_handle* cond_id_lock(u32 cond_id, u128 mask, uptr iptr = 0)
|
||||
{
|
||||
if (cond_id - 1 < u32{UINT16_MAX})
|
||||
if (cond_id - 1 < u16{umax})
|
||||
{
|
||||
const auto cond = s_cond_list + cond_id;
|
||||
|
||||
@ -842,7 +842,7 @@ atomic_t<u16>* root_info::slot_alloc(uptr ptr) noexcept
|
||||
slot = _this->bits.atomic_op([&](slot_allocator& bits) -> atomic_t<u16>*
|
||||
{
|
||||
// Increment reference counter on every hashtable slot we attempt to allocate on
|
||||
if (bits.ref == UINT16_MAX)
|
||||
if (bits.ref == u16{umax})
|
||||
{
|
||||
fmt::throw_exception("Thread limit (65535) reached for a single hashtable slot.");
|
||||
return nullptr;
|
||||
@ -1354,7 +1354,7 @@ SAFE_BUFFERS(void) atomic_wait_engine::notify_all(const void* data, u32 size, u1
|
||||
{
|
||||
u32 res = alert_sema<true>(cond_id, size, mask);
|
||||
|
||||
if (res && ~res <= UINT16_MAX)
|
||||
if (res && ~res <= u16{umax})
|
||||
{
|
||||
// Add to the end of the "stack"
|
||||
*(std::end(cond_ids) - ++count) = ~res;
|
||||
@ -1381,7 +1381,7 @@ SAFE_BUFFERS(void) atomic_wait_engine::notify_all(const void* data, u32 size, u1
|
||||
{
|
||||
const u32 cond_id = *(std::end(cond_ids) - i - 1);
|
||||
|
||||
if (cond_id <= UINT16_MAX)
|
||||
if (cond_id <= u16{umax})
|
||||
{
|
||||
if (s_cond_list[cond_id].try_alert_native())
|
||||
{
|
||||
@ -1398,7 +1398,7 @@ SAFE_BUFFERS(void) atomic_wait_engine::notify_all(const void* data, u32 size, u1
|
||||
{
|
||||
const u32 cond_id = *(std::end(cond_ids) - i - 1);
|
||||
|
||||
if (cond_id <= UINT16_MAX)
|
||||
if (cond_id <= u16{umax})
|
||||
{
|
||||
s_cond_list[cond_id].alert_native();
|
||||
if (s_tls_notify_cb)
|
||||
|
@ -180,7 +180,7 @@ namespace atomic_wait
|
||||
} any_value;
|
||||
|
||||
template <typename X, typename T = decltype(std::declval<X>().observe())>
|
||||
constexpr u128 default_mask = sizeof(T) <= 8 ? u128{UINT64_MAX >> ((64 - sizeof(T) * 8) & 63)} : u128(-1);
|
||||
constexpr u128 default_mask = sizeof(T) <= 8 ? u128{u64{umax} >> ((64 - sizeof(T) * 8) & 63)} : u128(-1);
|
||||
|
||||
template <typename X, typename T = decltype(std::declval<X>().observe())>
|
||||
constexpr u128 get_value(X&, T value = T{}, ...)
|
||||
|
@ -19,7 +19,7 @@ namespace stx
|
||||
class type_info final : public Info
|
||||
{
|
||||
// Current type id (starts from 0)
|
||||
u32 type = UINT32_MAX;
|
||||
u32 type = umax;
|
||||
|
||||
u32 size = 1;
|
||||
u32 align = 1;
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <chrono>
|
||||
#include <array>
|
||||
#include <tuple>
|
||||
#include <compare>
|
||||
|
||||
using std::chrono::steady_clock;
|
||||
|
||||
@ -522,25 +523,133 @@ struct get_int_impl<16>
|
||||
// Return magic value for any unsigned type
|
||||
constexpr struct umax_impl_t
|
||||
{
|
||||
template <typename T> requires (std::is_unsigned_v<std::common_type_t<T>>) || (std::is_same_v<std::common_type_t<T>, u128>)
|
||||
template <typename T>
|
||||
static constexpr T value = static_cast<T>(-1);
|
||||
|
||||
template <typename T, typename CT = std::common_type_t<T>> requires (std::is_unsigned_v<CT>) || (std::is_same_v<CT, u128>)
|
||||
constexpr bool operator==(const T& rhs) const
|
||||
{
|
||||
return rhs == static_cast<std::common_type_t<T>>(-1);
|
||||
return rhs == value<CT>;
|
||||
}
|
||||
|
||||
template <typename T> requires (std::is_unsigned_v<std::common_type_t<T>>) || (std::is_same_v<std::common_type_t<T>, u128>)
|
||||
constexpr bool operator<(const T& rhs) const
|
||||
template <typename T, typename CT = std::common_type_t<T>> requires (std::is_unsigned_v<CT>) || (std::is_same_v<CT, u128>)
|
||||
constexpr std::strong_ordering operator<=>(const T& rhs) const
|
||||
{
|
||||
return rhs < static_cast<std::common_type_t<T>>(-1);
|
||||
return rhs == value<CT> ? std::strong_ordering::equal : std::strong_ordering::greater;
|
||||
}
|
||||
|
||||
template <typename T> requires (std::is_unsigned_v<std::common_type_t<T>>) || (std::is_same_v<std::common_type_t<T>, u128>)
|
||||
template <typename T, typename CT = std::common_type_t<T>> requires (std::is_unsigned_v<CT>) || (std::is_same_v<CT, u128>)
|
||||
constexpr operator T() const
|
||||
{
|
||||
return static_cast<std::common_type_t<T>>(-1);
|
||||
return value<CT>;
|
||||
}
|
||||
} umax;
|
||||
|
||||
constexpr struct smin_impl_t
|
||||
{
|
||||
template <typename T>
|
||||
static constexpr T value = static_cast<T>(-1) << (sizeof(T) * 8 - 1);
|
||||
|
||||
template <typename T, typename CT = std::common_type_t<T>> requires (std::is_signed_v<CT>) || (std::is_same_v<CT, s128>)
|
||||
constexpr bool operator==(const T& rhs) const
|
||||
{
|
||||
return rhs == value<CT>;
|
||||
}
|
||||
|
||||
template <typename T, typename CT = std::common_type_t<T>> requires (std::is_signed_v<CT>) || (std::is_same_v<CT, s128>)
|
||||
constexpr std::strong_ordering operator<=>(const T& rhs) const
|
||||
{
|
||||
return rhs == value<CT> ? std::strong_ordering::equal : std::strong_ordering::less;
|
||||
}
|
||||
|
||||
template <typename T, typename CT = std::common_type_t<T>> requires (std::is_signed_v<CT>) || (std::is_same_v<CT, s128>)
|
||||
constexpr operator T() const
|
||||
{
|
||||
return value<CT>;
|
||||
}
|
||||
} smin;
|
||||
|
||||
constexpr struct smax_impl_t
|
||||
{
|
||||
template <typename T>
|
||||
static constexpr T value = static_cast<T>(~smin_impl_t::value<T>);
|
||||
|
||||
template <typename T, typename CT = std::common_type_t<T>> requires (std::is_integral_v<CT>) || (std::is_same_v<CT, s128>) || (std::is_same_v<CT, u128>)
|
||||
constexpr bool operator==(const T& rhs) const
|
||||
{
|
||||
return rhs == value<CT>;
|
||||
}
|
||||
|
||||
template <typename T, typename CT = std::common_type_t<T>> requires (std::is_integral_v<CT>) || (std::is_same_v<CT, s128>) || (std::is_same_v<CT, u128>)
|
||||
constexpr std::strong_ordering operator<=>(const T& rhs) const
|
||||
{
|
||||
if constexpr (std::is_signed_v<CT> || std::is_same_v<CT, s128>)
|
||||
{
|
||||
return rhs == value<CT> ? std::strong_ordering::equal : std::strong_ordering::greater;
|
||||
}
|
||||
else
|
||||
{
|
||||
return value<CT> <=> rhs;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T, typename CT = std::common_type_t<T>> requires (std::is_integral_v<CT>) || (std::is_same_v<CT, s128>) || (std::is_same_v<CT, u128>)
|
||||
constexpr operator T() const
|
||||
{
|
||||
return value<CT>;
|
||||
}
|
||||
} smax;
|
||||
|
||||
// Compare signed or unsigned type with its max value
|
||||
constexpr struct amax_impl_t
|
||||
{
|
||||
template <typename T>
|
||||
static constexpr T value = (std::is_unsigned_v<T> || std::is_same_v<T, u128>) ? umax_impl_t::value<T> : smax_impl_t::value<T>;
|
||||
|
||||
template <typename T, typename CT = std::common_type_t<T>> requires (std::is_integral_v<CT>) || (std::is_same_v<CT, s128>) || (std::is_same_v<CT, u128>)
|
||||
constexpr bool operator ==(const T& rhs) const
|
||||
{
|
||||
return rhs == value<CT>;
|
||||
}
|
||||
|
||||
template <typename T, typename CT = std::common_type_t<T>> requires (std::is_integral_v<CT>) || (std::is_same_v<CT, s128>) || (std::is_same_v<CT, u128>)
|
||||
constexpr std::strong_ordering operator <=>(const T& rhs) const
|
||||
{
|
||||
return value<CT> <=> rhs;
|
||||
}
|
||||
|
||||
template <typename T, typename CT = std::common_type_t<T>> requires (std::is_integral_v<CT>) || (std::is_same_v<CT, s128>) || (std::is_same_v<CT, u128>)
|
||||
constexpr operator T() const
|
||||
{
|
||||
return value<CT>;
|
||||
}
|
||||
} amax;
|
||||
|
||||
// Compare signed or unsigned type with its minimal value (like zero or INT_MIN)
|
||||
constexpr struct amin_impl_t
|
||||
{
|
||||
template <typename T>
|
||||
static constexpr T value = (std::is_signed_v<T> || std::is_same_v<T, s128>) ? smin_impl_t::value<T> : 0;
|
||||
|
||||
template <typename T, typename CT = std::common_type_t<T>> requires (std::is_integral_v<CT>) || (std::is_same_v<CT, s128>) || (std::is_same_v<CT, u128>)
|
||||
constexpr bool operator ==(const T& rhs) const
|
||||
{
|
||||
return rhs == value<CT>;
|
||||
}
|
||||
|
||||
template <typename T, typename CT = std::common_type_t<T>> requires (std::is_integral_v<T>) || (std::is_same_v<T, s128>) || (std::is_same_v<T, u128>)
|
||||
constexpr std::strong_ordering operator <=>(const T& rhs) const
|
||||
{
|
||||
return value<CT> <=> rhs;
|
||||
}
|
||||
|
||||
template <typename T, typename CT = std::common_type_t<T>> requires (std::is_integral_v<T>) || (std::is_same_v<T, s128>) || (std::is_same_v<T, u128>)
|
||||
constexpr operator T() const
|
||||
{
|
||||
return value<CT>;
|
||||
}
|
||||
} amin;
|
||||
|
||||
enum class f16 : u16{};
|
||||
|
||||
using f32 = float;
|
||||
@ -881,7 +990,7 @@ template <typename CT, typename = decltype(static_cast<u32>(std::declval<CT>().s
|
||||
template <typename T, usz Size>
|
||||
[[nodiscard]] constexpr u32 size32(const T (&)[Size])
|
||||
{
|
||||
static_assert(Size < UINT32_MAX, "Array is too big for 32-bit");
|
||||
static_assert(Size < u32{umax}, "Array is too big for 32-bit");
|
||||
return static_cast<u32>(Size);
|
||||
}
|
||||
|
||||
|
@ -174,5 +174,5 @@ inline v128 operator^(const v128& left, const v128& right)
|
||||
|
||||
inline v128 operator~(const v128& other)
|
||||
{
|
||||
return other ^ v128::from32p(UINT32_MAX); // XOR with ones
|
||||
return other ^ v128::from32p(umax); // XOR with ones
|
||||
}
|
||||
|
@ -124,7 +124,7 @@ namespace utils
|
||||
|
||||
auto ptr = ::mmap(use_addr, size, PROT_NONE, MAP_ANON | MAP_PRIVATE | c_map_noreserve, -1, 0);
|
||||
|
||||
if (ptr == reinterpret_cast<void*>(UINT64_MAX))
|
||||
if (ptr == reinterpret_cast<void*>(uptr{umax}))
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
@ -195,7 +195,7 @@ namespace utils
|
||||
ensure(::VirtualFree(pointer, size, MEM_DECOMMIT));
|
||||
#else
|
||||
const u64 ptr64 = reinterpret_cast<u64>(pointer);
|
||||
ensure(::mmap(pointer, size, PROT_NONE, MAP_FIXED | MAP_ANON | MAP_PRIVATE | c_map_noreserve, -1, 0) != reinterpret_cast<void*>(UINT64_MAX));
|
||||
ensure(::mmap(pointer, size, PROT_NONE, MAP_FIXED | MAP_ANON | MAP_PRIVATE | c_map_noreserve, -1, 0) != reinterpret_cast<void*>(uptr{umax}));
|
||||
|
||||
if constexpr (c_madv_no_dump != 0)
|
||||
{
|
||||
@ -215,7 +215,7 @@ namespace utils
|
||||
memory_commit(pointer, size, prot);
|
||||
#else
|
||||
const u64 ptr64 = reinterpret_cast<u64>(pointer);
|
||||
ensure(::mmap(pointer, size, +prot, MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0) != reinterpret_cast<void*>(UINT64_MAX));
|
||||
ensure(::mmap(pointer, size, +prot, MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0) != reinterpret_cast<void*>(uptr{umax}));
|
||||
|
||||
if constexpr (c_madv_hugepage != 0)
|
||||
{
|
||||
@ -513,7 +513,7 @@ namespace utils
|
||||
#else
|
||||
const auto result = reinterpret_cast<u8*>(::mmap(reinterpret_cast<void*>(target), m_size, +prot, (cow ? MAP_PRIVATE : MAP_SHARED), m_file, 0));
|
||||
|
||||
if (result == reinterpret_cast<void*>(UINT64_MAX))
|
||||
if (result == reinterpret_cast<void*>(uptr{umax}))
|
||||
{
|
||||
[[unlikely]] return nullptr;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user