mirror of
https://github.com/RPCS3/rpcs3.git
synced 2024-11-22 02:32:36 +01:00
C-style cast cleanup (partial)
Replace C-style casts with C++ casts.
This commit is contained in:
parent
8bd52c9843
commit
5b9df53c13
@ -1234,7 +1234,7 @@ fs::file::file(const std::string& path, bs_t<open_mode> mode)
|
||||
static_assert(sizeof(iovec) == sizeof(iovec_clone), "Weird iovec size");
|
||||
static_assert(offsetof(iovec, iov_len) == offsetof(iovec_clone, iov_len), "Weird iovec::iov_len offset");
|
||||
|
||||
const auto result = ::writev(m_fd, (const iovec*)buffers, buf_count);
|
||||
const auto result = ::writev(m_fd, reinterpret_cast<const iovec*>(buffers), buf_count);
|
||||
verify("file::write_gather" HERE), result != -1;
|
||||
|
||||
return result;
|
||||
|
@ -391,10 +391,10 @@ logs::file_writer::file_writer(const std::string& name)
|
||||
// Initialize memory mapped file
|
||||
#ifdef _WIN32
|
||||
m_fmap = CreateFileMappingW(m_file.get_handle(), 0, PAGE_READWRITE, s_log_size >> 32, s_log_size & 0xffffffff, 0);
|
||||
m_fptr = m_fmap ? (uchar*)MapViewOfFile(m_fmap, FILE_MAP_WRITE, 0, 0, 0) : nullptr;
|
||||
m_fptr = m_fmap ? static_cast<uchar*>(MapViewOfFile(m_fmap, FILE_MAP_WRITE, 0, 0, 0)) : nullptr;
|
||||
#else
|
||||
m_file.trunc(s_log_size);
|
||||
m_fptr = (uchar*)::mmap(0, s_log_size, PROT_READ | PROT_WRITE, MAP_SHARED, m_file.get_handle(), 0);
|
||||
m_fptr = static_cast<uchar*>(::mmap(0, s_log_size, PROT_READ | PROT_WRITE, MAP_SHARED, m_file.get_handle(), 0));
|
||||
#endif
|
||||
|
||||
verify(name.c_str()), m_fptr;
|
||||
|
@ -185,7 +185,7 @@ void decode_x64_reg_op(const u8* code, x64_op_t& out_op, x64_reg_t& out_reg, siz
|
||||
{
|
||||
if (lock)
|
||||
{
|
||||
LOG_ERROR(MEMORY, "decode_x64_reg_op(%016llxh): LOCK prefix found twice", (size_t)code - out_length);
|
||||
LOG_ERROR(MEMORY, "decode_x64_reg_op(%016llxh): LOCK prefix found twice", code - out_length);
|
||||
}
|
||||
|
||||
lock = true;
|
||||
@ -195,7 +195,7 @@ void decode_x64_reg_op(const u8* code, x64_op_t& out_op, x64_reg_t& out_reg, siz
|
||||
{
|
||||
if (repne)
|
||||
{
|
||||
LOG_ERROR(MEMORY, "decode_x64_reg_op(%016llxh): REPNE/REPNZ prefix found twice", (size_t)code - out_length);
|
||||
LOG_ERROR(MEMORY, "decode_x64_reg_op(%016llxh): REPNE/REPNZ prefix found twice", code - out_length);
|
||||
}
|
||||
|
||||
repne = true;
|
||||
@ -205,7 +205,7 @@ void decode_x64_reg_op(const u8* code, x64_op_t& out_op, x64_reg_t& out_reg, siz
|
||||
{
|
||||
if (repe)
|
||||
{
|
||||
LOG_ERROR(MEMORY, "decode_x64_reg_op(%016llxh): REP/REPE/REPZ prefix found twice", (size_t)code - out_length);
|
||||
LOG_ERROR(MEMORY, "decode_x64_reg_op(%016llxh): REP/REPE/REPZ prefix found twice", code - out_length);
|
||||
}
|
||||
|
||||
repe = true;
|
||||
@ -221,7 +221,7 @@ void decode_x64_reg_op(const u8* code, x64_op_t& out_op, x64_reg_t& out_reg, siz
|
||||
{
|
||||
if (pg2)
|
||||
{
|
||||
LOG_ERROR(MEMORY, "decode_x64_reg_op(%016llxh): 0x%02x (group 2 prefix) found after 0x%02x", (size_t)code - out_length, prefix, pg2);
|
||||
LOG_ERROR(MEMORY, "decode_x64_reg_op(%016llxh): 0x%02x (group 2 prefix) found after 0x%02x", code - out_length, prefix, pg2);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -234,7 +234,7 @@ void decode_x64_reg_op(const u8* code, x64_op_t& out_op, x64_reg_t& out_reg, siz
|
||||
{
|
||||
if (oso)
|
||||
{
|
||||
LOG_ERROR(MEMORY, "decode_x64_reg_op(%016llxh): operand-size override prefix found twice", (size_t)code - out_length);
|
||||
LOG_ERROR(MEMORY, "decode_x64_reg_op(%016llxh): operand-size override prefix found twice", code - out_length);
|
||||
}
|
||||
|
||||
oso = true;
|
||||
@ -243,7 +243,7 @@ void decode_x64_reg_op(const u8* code, x64_op_t& out_op, x64_reg_t& out_reg, siz
|
||||
|
||||
case 0x67: // group 4
|
||||
{
|
||||
LOG_ERROR(MEMORY, "decode_x64_reg_op(%016llxh): address-size override prefix found", (size_t)code - out_length, prefix);
|
||||
LOG_ERROR(MEMORY, "decode_x64_reg_op(%016llxh): address-size override prefix found", code - out_length, prefix);
|
||||
out_op = X64OP_NONE;
|
||||
out_reg = X64_NOT_SET;
|
||||
out_size = 0;
|
||||
@ -257,7 +257,7 @@ void decode_x64_reg_op(const u8* code, x64_op_t& out_op, x64_reg_t& out_reg, siz
|
||||
{
|
||||
if (rex)
|
||||
{
|
||||
LOG_ERROR(MEMORY, "decode_x64_reg_op(%016llxh): 0x%02x (REX prefix) found after 0x%02x", (size_t)code - out_length, prefix, rex);
|
||||
LOG_ERROR(MEMORY, "decode_x64_reg_op(%016llxh): 0x%02x (REX prefix) found after 0x%02x", code - out_length, prefix, rex);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -273,17 +273,17 @@ void decode_x64_reg_op(const u8* code, x64_op_t& out_op, x64_reg_t& out_reg, siz
|
||||
|
||||
auto get_modRM_reg = [](const u8* code, const u8 rex) -> x64_reg_t
|
||||
{
|
||||
return (x64_reg_t)(((*code & 0x38) >> 3 | (/* check REX.R bit */ rex & 4 ? 8 : 0)) + X64R_RAX);
|
||||
return x64_reg_t{((*code & 0x38) >> 3 | (/* check REX.R bit */ rex & 4 ? 8 : 0)) + X64R_RAX};
|
||||
};
|
||||
|
||||
auto get_modRM_reg_xmm = [](const u8* code, const u8 rex) -> x64_reg_t
|
||||
{
|
||||
return (x64_reg_t)(((*code & 0x38) >> 3 | (/* check REX.R bit */ rex & 4 ? 8 : 0)) + X64R_XMM0);
|
||||
return x64_reg_t{((*code & 0x38) >> 3 | (/* check REX.R bit */ rex & 4 ? 8 : 0)) + X64R_XMM0};
|
||||
};
|
||||
|
||||
auto get_modRM_reg_lh = [](const u8* code) -> x64_reg_t
|
||||
{
|
||||
return (x64_reg_t)(((*code & 0x38) >> 3) + X64R_AL);
|
||||
return x64_reg_t{((*code & 0x38) >> 3) + X64R_AL};
|
||||
};
|
||||
|
||||
auto get_op_size = [](const u8 rex, const bool oso) -> size_t
|
||||
@ -887,57 +887,57 @@ bool get_x64_reg_value(x64_context* context, x64_reg_t reg, size_t d_size, size_
|
||||
|
||||
switch (d_size)
|
||||
{
|
||||
case 1: out_value = (u8)reg_value; return true;
|
||||
case 2: out_value = (u16)reg_value; return true;
|
||||
case 4: out_value = (u32)reg_value; return true;
|
||||
case 1: out_value = static_cast<u8>(reg_value); return true;
|
||||
case 2: out_value = static_cast<u16>(reg_value); return true;
|
||||
case 4: out_value = static_cast<u32>(reg_value); return true;
|
||||
case 8: out_value = reg_value; return true;
|
||||
}
|
||||
}
|
||||
else if (reg - X64R_AL < 4 && d_size == 1)
|
||||
{
|
||||
out_value = (u8)(*X64REG(context, reg - X64R_AL));
|
||||
out_value = static_cast<u8>(*X64REG(context, reg - X64R_AL));
|
||||
return true;
|
||||
}
|
||||
else if (reg - X64R_AH < 4 && d_size == 1)
|
||||
{
|
||||
out_value = (u8)(*X64REG(context, reg - X64R_AH) >> 8);
|
||||
out_value = static_cast<u8>(*X64REG(context, reg - X64R_AH) >> 8);
|
||||
return true;
|
||||
}
|
||||
else if (reg == X64_IMM8)
|
||||
{
|
||||
// load the immediate value (assuming it's at the end of the instruction)
|
||||
const s8 imm_value = *(s8*)(RIP(context) + i_size - 1);
|
||||
const s8 imm_value = *reinterpret_cast<s8*>(RIP(context) + i_size - 1);
|
||||
|
||||
switch (d_size)
|
||||
{
|
||||
case 1: out_value = (u8)imm_value; return true;
|
||||
case 2: out_value = (u16)imm_value; return true; // sign-extended
|
||||
case 4: out_value = (u32)imm_value; return true; // sign-extended
|
||||
case 8: out_value = (u64)imm_value; return true; // sign-extended
|
||||
case 1: out_value = static_cast<u8>(imm_value); return true;
|
||||
case 2: out_value = static_cast<u16>(imm_value); return true; // sign-extended
|
||||
case 4: out_value = static_cast<u32>(imm_value); return true; // sign-extended
|
||||
case 8: out_value = static_cast<u64>(imm_value); return true; // sign-extended
|
||||
}
|
||||
}
|
||||
else if (reg == X64_IMM16)
|
||||
{
|
||||
const s16 imm_value = *(s16*)(RIP(context) + i_size - 2);
|
||||
const s16 imm_value = *reinterpret_cast<s16*>(RIP(context) + i_size - 2);
|
||||
|
||||
switch (d_size)
|
||||
{
|
||||
case 2: out_value = (u16)imm_value; return true;
|
||||
case 2: out_value = static_cast<u16>(imm_value); return true;
|
||||
}
|
||||
}
|
||||
else if (reg == X64_IMM32)
|
||||
{
|
||||
const s32 imm_value = *(s32*)(RIP(context) + i_size - 4);
|
||||
const s32 imm_value = *reinterpret_cast<s32*>(RIP(context) + i_size - 4);
|
||||
|
||||
switch (d_size)
|
||||
{
|
||||
case 4: out_value = (u32)imm_value; return true;
|
||||
case 8: out_value = (u64)imm_value; return true; // sign-extended
|
||||
case 4: out_value = static_cast<u32>(imm_value); return true;
|
||||
case 8: out_value = static_cast<u64>(imm_value); return true; // sign-extended
|
||||
}
|
||||
}
|
||||
else if (reg == X64R_ECX)
|
||||
{
|
||||
out_value = (u32)RCX(context);
|
||||
out_value = static_cast<u32>(RCX(context));
|
||||
return true;
|
||||
}
|
||||
else if (reg >= X64_BIT_O && reg <= X64_BIT_NLE)
|
||||
@ -964,7 +964,7 @@ bool get_x64_reg_value(x64_context* context, x64_reg_t reg, size_t d_size, size_
|
||||
return true;
|
||||
}
|
||||
|
||||
LOG_ERROR(MEMORY, "get_x64_reg_value(): invalid arguments (reg=%d, d_size=%lld, i_size=%lld)", (u32)reg, d_size, i_size);
|
||||
LOG_ERROR(MEMORY, "get_x64_reg_value(): invalid arguments (reg=%d, d_size=%lld, i_size=%lld)", +reg, d_size, i_size);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -983,7 +983,7 @@ bool put_x64_reg_value(x64_context* context, x64_reg_t reg, size_t d_size, u64 v
|
||||
}
|
||||
}
|
||||
|
||||
LOG_ERROR(MEMORY, "put_x64_reg_value(): invalid destination (reg=%d, d_size=%lld, value=0x%llx)", (u32)reg, d_size, value);
|
||||
LOG_ERROR(MEMORY, "put_x64_reg_value(): invalid destination (reg=%d, d_size=%lld, value=0x%llx)", +reg, d_size, value);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1038,7 +1038,7 @@ bool set_x64_cmp_flags(x64_context* context, size_t d_size, u64 x, u64 y, bool c
|
||||
EFLAGS(context) &= ~0x800; // clear OF
|
||||
}
|
||||
|
||||
const u8 p1 = (u8)diff ^ ((u8)diff >> 4);
|
||||
const u8 p1 = static_cast<u8>(diff) ^ (static_cast<u8>(diff) >> 4);
|
||||
const u8 p2 = p1 ^ (p1 >> 2);
|
||||
const u8 p3 = p2 ^ (p2 >> 1);
|
||||
|
||||
@ -1145,7 +1145,7 @@ bool handle_access_violation(u32 addr, bool is_writing, x64_context* context)
|
||||
}
|
||||
}
|
||||
|
||||
auto code = (const u8*)RIP(context);
|
||||
const u8* const code = reinterpret_cast<u8*>(RIP(context));
|
||||
|
||||
x64_op_t op;
|
||||
x64_reg_t reg;
|
||||
@ -1159,7 +1159,7 @@ bool handle_access_violation(u32 addr, bool is_writing, x64_context* context)
|
||||
{
|
||||
if (op == X64OP_NONE)
|
||||
{
|
||||
LOG_ERROR(MEMORY, "decode_x64_reg_op(%p): unsupported opcode: %s", code, *(be_t<v128, 1>*)code);
|
||||
LOG_ERROR(MEMORY, "decode_x64_reg_op(%p): unsupported opcode: %s", code, *reinterpret_cast<const be_t<v128, 1>*>(code));
|
||||
}
|
||||
};
|
||||
|
||||
@ -1192,7 +1192,7 @@ bool handle_access_violation(u32 addr, bool is_writing, x64_context* context)
|
||||
|
||||
if (a_size != 4 || !d_size || !i_size)
|
||||
{
|
||||
LOG_ERROR(MEMORY, "Invalid or unsupported instruction (op=%d, reg=%d, d_size=%lld, a_size=0x%llx, i_size=%lld)", (u32)op, (u32)reg, d_size, a_size, i_size);
|
||||
LOG_ERROR(MEMORY, "Invalid or unsupported instruction (op=%d, reg=%d, d_size=%lld, a_size=0x%llx, i_size=%lld)", +op, +reg, d_size, a_size, i_size);
|
||||
report_opcode();
|
||||
return false;
|
||||
}
|
||||
@ -1253,7 +1253,8 @@ bool handle_access_violation(u32 addr, bool is_writing, x64_context* context)
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!thread->write_reg(addr, op == X64OP_STORE ? se_storage<u32>::swap((u32)reg_value) : (u32)reg_value))
|
||||
u32 val32 = static_cast<u32>(reg_value);
|
||||
if (!thread->write_reg(addr, op == X64OP_STORE ? se_storage<u32>::swap(val32) : val32))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
@ -1264,7 +1265,7 @@ bool handle_access_violation(u32 addr, bool is_writing, x64_context* context)
|
||||
case X64OP_STOS:
|
||||
default:
|
||||
{
|
||||
LOG_ERROR(MEMORY, "Invalid or unsupported operation (op=%d, reg=%d, d_size=%lld, i_size=%lld)", (u32)op, (u32)reg, d_size, i_size);
|
||||
LOG_ERROR(MEMORY, "Invalid or unsupported operation (op=%d, reg=%d, d_size=%lld, i_size=%lld)", +op, +reg, d_size, i_size);
|
||||
report_opcode();
|
||||
return false;
|
||||
}
|
||||
@ -1475,13 +1476,13 @@ bool handle_access_violation(u32 addr, bool is_writing, x64_context* context)
|
||||
|
||||
static LONG exception_handler(PEXCEPTION_POINTERS pExp)
|
||||
{
|
||||
const u64 addr64 = pExp->ExceptionRecord->ExceptionInformation[1] - (u64)vm::g_base_addr;
|
||||
const u64 exec64 = (pExp->ExceptionRecord->ExceptionInformation[1] - (u64)vm::g_exec_addr) / 2;
|
||||
const u64 addr64 = pExp->ExceptionRecord->ExceptionInformation[1] - reinterpret_cast<u64>(vm::g_base_addr);
|
||||
const u64 exec64 = (pExp->ExceptionRecord->ExceptionInformation[1] - reinterpret_cast<u64>(vm::g_exec_addr)) / 2;
|
||||
const bool is_writing = pExp->ExceptionRecord->ExceptionInformation[0] != 0;
|
||||
|
||||
if (pExp->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && addr64 < 0x100000000ull)
|
||||
{
|
||||
if (thread_ctrl::get_current() && handle_access_violation((u32)addr64, is_writing, pExp->ContextRecord))
|
||||
if (thread_ctrl::get_current() && handle_access_violation(static_cast<u32>(addr64), is_writing, pExp->ContextRecord))
|
||||
{
|
||||
return EXCEPTION_CONTINUE_EXECUTION;
|
||||
}
|
||||
@ -1489,7 +1490,7 @@ static LONG exception_handler(PEXCEPTION_POINTERS pExp)
|
||||
|
||||
if (pExp->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && exec64 < 0x100000000ull)
|
||||
{
|
||||
if (thread_ctrl::get_current() && handle_access_violation((u32)exec64, is_writing, pExp->ContextRecord))
|
||||
if (thread_ctrl::get_current() && handle_access_violation(static_cast<u32>(exec64), is_writing, pExp->ContextRecord))
|
||||
{
|
||||
return EXCEPTION_CONTINUE_EXECUTION;
|
||||
}
|
||||
@ -1546,7 +1547,7 @@ static LONG exception_filter(PEXCEPTION_POINTERS pExp)
|
||||
MODULEINFO info;
|
||||
if (GetModuleInformation(GetCurrentProcess(), module, &info, sizeof(info)))
|
||||
{
|
||||
const DWORD64 base = (DWORD64)info.lpBaseOfDll;
|
||||
const DWORD64 base = reinterpret_cast<DWORD64>(info.lpBaseOfDll);
|
||||
|
||||
if (pExp->ContextRecord->Rip >= base && pExp->ContextRecord->Rip < base + info.SizeOfImage)
|
||||
{
|
||||
@ -1596,7 +1597,7 @@ const bool s_exception_handler_set = []() -> bool
|
||||
|
||||
static void signal_handler(int sig, siginfo_t* info, void* uct)
|
||||
{
|
||||
x64_context* context = (ucontext_t*)uct;
|
||||
x64_context* context = static_cast<ucontext_t*>(uct);
|
||||
|
||||
#ifdef __APPLE__
|
||||
const bool is_writing = context->uc_mcontext->__es.__err & 0x2;
|
||||
@ -1610,14 +1611,14 @@ static void signal_handler(int sig, siginfo_t* info, void* uct)
|
||||
const bool is_writing = context->uc_mcontext.gregs[REG_ERR] & 0x2;
|
||||
#endif
|
||||
|
||||
const u64 addr64 = (u64)info->si_addr - (u64)vm::g_base_addr;
|
||||
const u64 exec64 = ((u64)info->si_addr - (u64)vm::g_exec_addr) / 2;
|
||||
const u64 addr64 = reinterpret_cast<u64>(info->si_addr) - reinterpret_cast<u64>(vm::g_base_addr);
|
||||
const u64 exec64 = (reinterpret_cast<u64>(info->si_addr) - reinterpret_cast<u64>(vm::g_exec_addr)) / 2;
|
||||
const auto cause = is_writing ? "writing" : "reading";
|
||||
|
||||
if (addr64 < 0x100000000ull)
|
||||
{
|
||||
// Try to process access violation
|
||||
if (thread_ctrl::get_current() && handle_access_violation((u32)addr64, is_writing, context))
|
||||
if (thread_ctrl::get_current() && handle_access_violation(static_cast<u32>(addr64), is_writing, context))
|
||||
{
|
||||
return;
|
||||
}
|
||||
@ -1625,7 +1626,7 @@ static void signal_handler(int sig, siginfo_t* info, void* uct)
|
||||
|
||||
if (exec64 < 0x100000000ull)
|
||||
{
|
||||
if (thread_ctrl::get_current() && handle_access_violation((u32)exec64, is_writing, context))
|
||||
if (thread_ctrl::get_current() && handle_access_violation(static_cast<u32>(exec64), is_writing, context))
|
||||
{
|
||||
return;
|
||||
}
|
||||
@ -1864,9 +1865,9 @@ thread_base::~thread_base()
|
||||
if (m_thread)
|
||||
{
|
||||
#ifdef _WIN32
|
||||
CloseHandle((HANDLE)m_thread.raw());
|
||||
CloseHandle(reinterpret_cast<HANDLE>(m_thread.raw()));
|
||||
#else
|
||||
pthread_detach((pthread_t)m_thread.raw());
|
||||
pthread_detach(reinterpret_cast<pthread_t>(m_thread.raw()));
|
||||
#endif
|
||||
}
|
||||
}
|
||||
@ -1895,10 +1896,10 @@ u64 thread_base::get_cycles()
|
||||
u64 cycles;
|
||||
|
||||
#ifdef _WIN32
|
||||
if (QueryThreadCycleTime((HANDLE)m_thread.load(), &cycles))
|
||||
if (QueryThreadCycleTime(reinterpret_cast<HANDLE>(m_thread.load()), &cycles))
|
||||
{
|
||||
#elif __APPLE__
|
||||
mach_port_name_t port = pthread_mach_thread_np((pthread_t)m_thread.load());
|
||||
mach_port_name_t port = pthread_mach_thread_np(reinterpret_cast<pthread_t>(m_thread.load()));
|
||||
mach_msg_type_number_t count = THREAD_BASIC_INFO_COUNT;
|
||||
thread_basic_info_data_t info;
|
||||
kern_return_t ret = thread_info(port, THREAD_BASIC_INFO, (thread_info_t)&info, &count);
|
||||
@ -1909,7 +1910,7 @@ u64 thread_base::get_cycles()
|
||||
#else
|
||||
clockid_t _clock;
|
||||
struct timespec thread_time;
|
||||
if (!pthread_getcpuclockid((pthread_t)m_thread.load(), &_clock) && !clock_gettime(_clock, &thread_time))
|
||||
if (!pthread_getcpuclockid(reinterpret_cast<pthread_t>(m_thread.load()), &_clock) && !clock_gettime(_clock, &thread_time))
|
||||
{
|
||||
cycles = static_cast<u64>(thread_time.tv_sec) * 1'000'000'000 + thread_time.tv_nsec;
|
||||
#endif
|
||||
|
@ -80,7 +80,8 @@ namespace utils
|
||||
#ifdef _WIN32
|
||||
verify(HERE), ::VirtualAlloc(pointer, size, MEM_COMMIT, +prot);
|
||||
#else
|
||||
verify(HERE), ::mprotect((void*)((u64)pointer & -4096), size + ((u64)pointer & 4095), +prot) != -1;
|
||||
const u64 ptr64 = reinterpret_cast<u64>(pointer);
|
||||
verify(HERE), ::mprotect(reinterpret_cast<void*>(ptr64 & -4096), size + (ptr64 & 4095), +prot) != -1;
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -115,13 +116,13 @@ namespace utils
|
||||
void memory_protect(void* pointer, std::size_t size, protection prot)
|
||||
{
|
||||
#ifdef _WIN32
|
||||
for (u64 addr = (u64)pointer, end = addr + size; addr < end;)
|
||||
for (u64 addr = reinterpret_cast<u64>(pointer), end = addr + size; addr < end;)
|
||||
{
|
||||
const u64 boundary = (addr + 0x10000) & -0x10000;
|
||||
const u64 block_size = std::min(boundary, end) - addr;
|
||||
|
||||
DWORD old;
|
||||
if (!::VirtualProtect((LPVOID)addr, block_size, +prot, &old))
|
||||
if (!::VirtualProtect(reinterpret_cast<LPVOID>(addr), block_size, +prot, &old))
|
||||
{
|
||||
fmt::throw_exception("VirtualProtect failed (%p, 0x%x, addr=0x%x, error=%#x)", pointer, size, addr, GetLastError());
|
||||
}
|
||||
@ -130,7 +131,8 @@ namespace utils
|
||||
addr += block_size;
|
||||
}
|
||||
#else
|
||||
verify(HERE), ::mprotect((void*)((u64)pointer & -4096), size + ((u64)pointer & 4095), +prot) != -1;
|
||||
const u64 ptr64 = reinterpret_cast<u64>(pointer);
|
||||
verify(HERE), ::mprotect(reinterpret_cast<void*>(ptr64 & -4096), size + (ptr64 & 4095), +prot) != -1;
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -202,13 +204,14 @@ namespace utils
|
||||
|
||||
return nullptr;
|
||||
#else
|
||||
return static_cast<u8*>(::mmap((void*)((u64)ptr & -0x10000), m_size, +prot, MAP_SHARED | (ptr ? MAP_FIXED : 0), m_file, 0));
|
||||
const u64 ptr64 = reinterpret_cast<u64>(ptr);
|
||||
return static_cast<u8*>(::mmap(reinterpret_cast<void*>(ptr64 & -0x10000), m_size, +prot, MAP_SHARED | (ptr ? MAP_FIXED : 0), m_file, 0));
|
||||
#endif
|
||||
}
|
||||
|
||||
u8* shm::map_critical(void* ptr, protection prot)
|
||||
{
|
||||
const auto target = (u8*)((u64)ptr & -0x10000);
|
||||
const auto target = reinterpret_cast<u8*>(reinterpret_cast<u64>(ptr) & -0x10000);
|
||||
|
||||
#ifdef _WIN32
|
||||
::MEMORY_BASIC_INFORMATION mem;
|
||||
@ -245,7 +248,7 @@ namespace utils
|
||||
|
||||
void shm::unmap_critical(void* ptr)
|
||||
{
|
||||
const auto target = (u8*)((u64)ptr & -0x10000);
|
||||
const auto target = reinterpret_cast<u8*>(reinterpret_cast<u64>(ptr) & -0x10000);
|
||||
|
||||
this->unmap(target);
|
||||
|
||||
|
@ -341,7 +341,7 @@ std::size_t cfmt_append(Dst& out, const Char* fmt, Src&& src)
|
||||
|
||||
if (!ctx.type)
|
||||
{
|
||||
ctx.type = (u8)src.type(ctx.args);
|
||||
ctx.type = static_cast<u8>(src.type(ctx.args));
|
||||
|
||||
if (!ctx.type)
|
||||
{
|
||||
@ -403,7 +403,7 @@ std::size_t cfmt_append(Dst& out, const Char* fmt, Src&& src)
|
||||
|
||||
if (!ctx.type)
|
||||
{
|
||||
ctx.type = (u8)src.type(ctx.args);
|
||||
ctx.type = static_cast<u8>(src.type(ctx.args));
|
||||
|
||||
if (!ctx.type)
|
||||
{
|
||||
@ -460,7 +460,7 @@ std::size_t cfmt_append(Dst& out, const Char* fmt, Src&& src)
|
||||
|
||||
if (!ctx.type)
|
||||
{
|
||||
ctx.type = (u8)src.type(ctx.args);
|
||||
ctx.type = static_cast<u8>(src.type(ctx.args));
|
||||
|
||||
if (!ctx.type)
|
||||
{
|
||||
@ -524,7 +524,7 @@ std::size_t cfmt_append(Dst& out, const Char* fmt, Src&& src)
|
||||
|
||||
if (!ctx.type)
|
||||
{
|
||||
ctx.type = (u8)src.type(ctx.args);
|
||||
ctx.type = static_cast<u8>(src.type(ctx.args));
|
||||
|
||||
if (!ctx.type)
|
||||
{
|
||||
|
@ -9,7 +9,7 @@
|
||||
|
||||
namespace utils
|
||||
{
|
||||
dynamic_library::dynamic_library(const std::string &path)
|
||||
dynamic_library::dynamic_library(const std::string& path)
|
||||
{
|
||||
load(path);
|
||||
}
|
||||
@ -19,7 +19,7 @@ namespace utils
|
||||
close();
|
||||
}
|
||||
|
||||
bool dynamic_library::load(const std::string &path)
|
||||
bool dynamic_library::load(const std::string& path)
|
||||
{
|
||||
#ifdef _WIN32
|
||||
m_handle = LoadLibraryA(path.c_str());
|
||||
@ -32,19 +32,19 @@ namespace utils
|
||||
void dynamic_library::close()
|
||||
{
|
||||
#ifdef _WIN32
|
||||
FreeLibrary((HMODULE)m_handle);
|
||||
FreeLibrary(reinterpret_cast<HMODULE>(m_handle));
|
||||
#else
|
||||
dlclose(m_handle);
|
||||
#endif
|
||||
m_handle = nullptr;
|
||||
}
|
||||
|
||||
void *dynamic_library::get_impl(const std::string &name) const
|
||||
void* dynamic_library::get_impl(const std::string& name) const
|
||||
{
|
||||
#ifdef _WIN32
|
||||
return (void*)GetProcAddress((HMODULE)m_handle, name.c_str());
|
||||
return reinterpret_cast<void*>(GetProcAddress(reinterpret_cast<HMODULE>(m_handle), name.c_str()));
|
||||
#else
|
||||
return dlsym(m_handle, (char *)name.c_str());
|
||||
return dlsym(m_handle, name.c_str());
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -4,35 +4,35 @@ namespace utils
|
||||
{
|
||||
class dynamic_library
|
||||
{
|
||||
void *m_handle = nullptr;
|
||||
void* m_handle = nullptr;
|
||||
|
||||
public:
|
||||
dynamic_library() = default;
|
||||
dynamic_library(const std::string &path);
|
||||
dynamic_library(const std::string& path);
|
||||
|
||||
~dynamic_library();
|
||||
|
||||
bool load(const std::string &path);
|
||||
bool load(const std::string& path);
|
||||
void close();
|
||||
|
||||
private:
|
||||
void *get_impl(const std::string &name) const;
|
||||
void* get_impl(const std::string& name) const;
|
||||
|
||||
public:
|
||||
template<typename Type = void>
|
||||
Type *get(const std::string &name) const
|
||||
template <typename Type = void>
|
||||
Type* get(const std::string& name) const
|
||||
{
|
||||
Type *result;
|
||||
*(void **)(&result) = get_impl(name);
|
||||
Type* result;
|
||||
*reinterpret_cast<void**>(&result) = get_impl(name);
|
||||
return result;
|
||||
}
|
||||
|
||||
template<typename Type>
|
||||
bool get(Type *&function, const std::string &name) const
|
||||
template <typename Type>
|
||||
bool get(Type*& function, const std::string& name) const
|
||||
{
|
||||
*(void **)(&function) = get_impl(name);
|
||||
*reinterpret_cast<void**>(&function) = get_impl(name);
|
||||
|
||||
return !!function;
|
||||
return function != nullptr;
|
||||
}
|
||||
|
||||
bool loaded() const;
|
||||
@ -51,15 +51,15 @@ namespace utils
|
||||
template <typename R, typename... Args>
|
||||
struct dynamic_import<R(Args...)>
|
||||
{
|
||||
R(*ptr)(Args...);
|
||||
R (*ptr)(Args...);
|
||||
const char* const lib;
|
||||
const char* const name;
|
||||
|
||||
// Constant initialization
|
||||
constexpr dynamic_import(const char* lib, const char* name)
|
||||
: ptr(nullptr)
|
||||
, lib(lib)
|
||||
, name(name)
|
||||
: ptr(nullptr)
|
||||
, lib(lib)
|
||||
, name(name)
|
||||
{
|
||||
}
|
||||
|
||||
@ -68,7 +68,7 @@ namespace utils
|
||||
if (!ptr)
|
||||
{
|
||||
// TODO: atomic
|
||||
ptr = reinterpret_cast<R(*)(Args...)>(get_proc_address(lib, name));
|
||||
ptr = reinterpret_cast<R (*)(Args...)>(get_proc_address(lib, name));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -38,20 +38,20 @@
|
||||
#ifndef GET_UINT32_LE
|
||||
#define GET_UINT32_LE(n,b,i) \
|
||||
{ \
|
||||
(n) = ( (uint32_t) (b)[(i) ] ) \
|
||||
| ( (uint32_t) (b)[(i) + 1] << 8 ) \
|
||||
| ( (uint32_t) (b)[(i) + 2] << 16 ) \
|
||||
| ( (uint32_t) (b)[(i) + 3] << 24 ); \
|
||||
(n) = ( static_cast<uint32_t>((b)[(i) ]) ) \
|
||||
| ( static_cast<uint32_t>((b)[(i) + 1]) << 8 ) \
|
||||
| ( static_cast<uint32_t>((b)[(i) + 2]) << 16 ) \
|
||||
| ( static_cast<uint32_t>((b)[(i) + 3]) << 24 );\
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef PUT_UINT32_LE
|
||||
#define PUT_UINT32_LE(n,b,i) \
|
||||
{ \
|
||||
(b)[(i) ] = (unsigned char) ( (n) ); \
|
||||
(b)[(i) + 1] = (unsigned char) ( (n) >> 8 ); \
|
||||
(b)[(i) + 2] = (unsigned char) ( (n) >> 16 ); \
|
||||
(b)[(i) + 3] = (unsigned char) ( (n) >> 24 ); \
|
||||
(b)[(i) ] = static_cast<unsigned char> ( (n) ); \
|
||||
(b)[(i) + 1] = static_cast<unsigned char> ( (n) >> 8 ); \
|
||||
(b)[(i) + 2] = static_cast<unsigned char> ( (n) >> 16 ); \
|
||||
(b)[(i) + 3] = static_cast<unsigned char> ( (n) >> 24 ); \
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -326,10 +326,10 @@ static const uint32_t RCON[10] =
|
||||
* Forward S-box & tables
|
||||
*/
|
||||
static unsigned char FSb[256];
|
||||
static uint32_t FT0[256];
|
||||
static uint32_t FT1[256];
|
||||
static uint32_t FT2[256];
|
||||
static uint32_t FT3[256];
|
||||
static uint32_t FT0[256];
|
||||
static uint32_t FT1[256];
|
||||
static uint32_t FT2[256];
|
||||
static uint32_t FT3[256];
|
||||
|
||||
/*
|
||||
* Reverse S-box & tables
|
||||
@ -375,7 +375,7 @@ static void aes_gen_tables( void )
|
||||
*/
|
||||
for( i = 0, x = 1; i < 10; i++ )
|
||||
{
|
||||
RCON[i] = (uint32_t) x;
|
||||
RCON[i] = static_cast<uint32_t>(x;
|
||||
x = XTIME( x ) & 0xFF;
|
||||
}
|
||||
|
||||
@ -408,10 +408,10 @@ static void aes_gen_tables( void )
|
||||
y = XTIME( x ) & 0xFF;
|
||||
z = ( y ^ x ) & 0xFF;
|
||||
|
||||
FT0[i] = ( (uint32_t) y ) ^
|
||||
( (uint32_t) x << 8 ) ^
|
||||
( (uint32_t) x << 16 ) ^
|
||||
( (uint32_t) z << 24 );
|
||||
FT0[i] = ( static_cast<uint32_t>(y ) ^
|
||||
( static_cast<uint32_t>(x << 8 ) ^
|
||||
( static_cast<uint32_t>(x << 16 ) ^
|
||||
( static_cast<uint32_t>(z << 24 );
|
||||
|
||||
FT1[i] = ROTL8( FT0[i] );
|
||||
FT2[i] = ROTL8( FT1[i] );
|
||||
@ -419,10 +419,10 @@ static void aes_gen_tables( void )
|
||||
|
||||
x = RSb[i];
|
||||
|
||||
RT0[i] = ( (uint32_t) MUL( 0x0E, x ) ) ^
|
||||
( (uint32_t) MUL( 0x09, x ) << 8 ) ^
|
||||
( (uint32_t) MUL( 0x0D, x ) << 16 ) ^
|
||||
( (uint32_t) MUL( 0x0B, x ) << 24 );
|
||||
RT0[i] = ( static_cast<uint32_t>(MUL( 0x0E, x ) ) ^
|
||||
( static_cast<uint32_t>(MUL( 0x09, x ) << 8 ) ^
|
||||
( static_cast<uint32_t>(MUL( 0x0D, x ) << 16 ) ^
|
||||
( static_cast<uint32_t>(MUL( 0x0B, x ) << 24 );
|
||||
|
||||
RT1[i] = ROTL8( RT0[i] );
|
||||
RT2[i] = ROTL8( RT1[i] );
|
||||
@ -460,7 +460,7 @@ int aes_setkey_enc( aes_context *ctx, const unsigned char *key, unsigned int key
|
||||
ctx->rk = RK = ctx->buf;
|
||||
|
||||
if( aesni_supports( POLARSSL_AESNI_AES ) )
|
||||
return( aesni_setkey_enc( (unsigned char *) ctx->rk, key, keysize ) );
|
||||
return( aesni_setkey_enc( reinterpret_cast<unsigned char*>(ctx->rk), key, keysize ) );
|
||||
|
||||
for( i = 0; i < (keysize >> 5); i++ )
|
||||
{
|
||||
@ -474,10 +474,10 @@ int aes_setkey_enc( aes_context *ctx, const unsigned char *key, unsigned int key
|
||||
for( i = 0; i < 10; i++, RK += 4 )
|
||||
{
|
||||
RK[4] = RK[0] ^ RCON[i] ^
|
||||
( (uint32_t) FSb[ ( RK[3] >> 8 ) & 0xFF ] ) ^
|
||||
( (uint32_t) FSb[ ( RK[3] >> 16 ) & 0xFF ] << 8 ) ^
|
||||
( (uint32_t) FSb[ ( RK[3] >> 24 ) & 0xFF ] << 16 ) ^
|
||||
( (uint32_t) FSb[ ( RK[3] ) & 0xFF ] << 24 );
|
||||
( static_cast<uint32_t>(FSb[ ( RK[3] >> 8 ) & 0xFF ]) ) ^
|
||||
( static_cast<uint32_t>(FSb[ ( RK[3] >> 16 ) & 0xFF ]) << 8 ) ^
|
||||
( static_cast<uint32_t>(FSb[ ( RK[3] >> 24 ) & 0xFF ]) << 16 ) ^
|
||||
( static_cast<uint32_t>(FSb[ ( RK[3] ) & 0xFF ]) << 24 );
|
||||
|
||||
RK[5] = RK[1] ^ RK[4];
|
||||
RK[6] = RK[2] ^ RK[5];
|
||||
@ -490,10 +490,10 @@ int aes_setkey_enc( aes_context *ctx, const unsigned char *key, unsigned int key
|
||||
for( i = 0; i < 8; i++, RK += 6 )
|
||||
{
|
||||
RK[6] = RK[0] ^ RCON[i] ^
|
||||
( (uint32_t) FSb[ ( RK[5] >> 8 ) & 0xFF ] ) ^
|
||||
( (uint32_t) FSb[ ( RK[5] >> 16 ) & 0xFF ] << 8 ) ^
|
||||
( (uint32_t) FSb[ ( RK[5] >> 24 ) & 0xFF ] << 16 ) ^
|
||||
( (uint32_t) FSb[ ( RK[5] ) & 0xFF ] << 24 );
|
||||
( static_cast<uint32_t>(FSb[ ( RK[5] >> 8 ) & 0xFF ]) ) ^
|
||||
( static_cast<uint32_t>(FSb[ ( RK[5] >> 16 ) & 0xFF ]) << 8 ) ^
|
||||
( static_cast<uint32_t>(FSb[ ( RK[5] >> 24 ) & 0xFF ]) << 16 ) ^
|
||||
( static_cast<uint32_t>(FSb[ ( RK[5] ) & 0xFF ]) << 24 );
|
||||
|
||||
RK[7] = RK[1] ^ RK[6];
|
||||
RK[8] = RK[2] ^ RK[7];
|
||||
@ -508,20 +508,20 @@ int aes_setkey_enc( aes_context *ctx, const unsigned char *key, unsigned int key
|
||||
for( i = 0; i < 7; i++, RK += 8 )
|
||||
{
|
||||
RK[8] = RK[0] ^ RCON[i] ^
|
||||
( (uint32_t) FSb[ ( RK[7] >> 8 ) & 0xFF ] ) ^
|
||||
( (uint32_t) FSb[ ( RK[7] >> 16 ) & 0xFF ] << 8 ) ^
|
||||
( (uint32_t) FSb[ ( RK[7] >> 24 ) & 0xFF ] << 16 ) ^
|
||||
( (uint32_t) FSb[ ( RK[7] ) & 0xFF ] << 24 );
|
||||
( static_cast<uint32_t>(FSb[ ( RK[7] >> 8 ) & 0xFF ]) ) ^
|
||||
( static_cast<uint32_t>(FSb[ ( RK[7] >> 16 ) & 0xFF ]) << 8 ) ^
|
||||
( static_cast<uint32_t>(FSb[ ( RK[7] >> 24 ) & 0xFF ]) << 16 ) ^
|
||||
( static_cast<uint32_t>(FSb[ ( RK[7] ) & 0xFF ]) << 24 );
|
||||
|
||||
RK[9] = RK[1] ^ RK[8];
|
||||
RK[10] = RK[2] ^ RK[9];
|
||||
RK[11] = RK[3] ^ RK[10];
|
||||
|
||||
RK[12] = RK[4] ^
|
||||
( (uint32_t) FSb[ ( RK[11] ) & 0xFF ] ) ^
|
||||
( (uint32_t) FSb[ ( RK[11] >> 8 ) & 0xFF ] << 8 ) ^
|
||||
( (uint32_t) FSb[ ( RK[11] >> 16 ) & 0xFF ] << 16 ) ^
|
||||
( (uint32_t) FSb[ ( RK[11] >> 24 ) & 0xFF ] << 24 );
|
||||
( static_cast<uint32_t>(FSb[ ( RK[11] ) & 0xFF ]) ) ^
|
||||
( static_cast<uint32_t>(FSb[ ( RK[11] >> 8 ) & 0xFF ]) << 8 ) ^
|
||||
( static_cast<uint32_t>(FSb[ ( RK[11] >> 16 ) & 0xFF ]) << 16 ) ^
|
||||
( static_cast<uint32_t>(FSb[ ( RK[11] >> 24 ) & 0xFF ]) << 24 );
|
||||
|
||||
RK[13] = RK[5] ^ RK[12];
|
||||
RK[14] = RK[6] ^ RK[13];
|
||||
@ -564,8 +564,8 @@ int aes_setkey_dec( aes_context *ctx, const unsigned char *key, unsigned int key
|
||||
|
||||
if( aesni_supports( POLARSSL_AESNI_AES ) )
|
||||
{
|
||||
aesni_inverse_key( (unsigned char *) ctx->rk,
|
||||
(const unsigned char *) cty.rk, ctx->nr );
|
||||
aesni_inverse_key( reinterpret_cast<unsigned char*>(ctx->rk),
|
||||
reinterpret_cast<const unsigned char*>(cty.rk), ctx->nr );
|
||||
goto done;
|
||||
}
|
||||
|
||||
@ -676,28 +676,28 @@ int aes_crypt_ecb( aes_context *ctx,
|
||||
AES_RROUND( Y0, Y1, Y2, Y3, X0, X1, X2, X3 );
|
||||
|
||||
X0 = *RK++ ^ \
|
||||
( (uint32_t) RSb[ ( Y0 ) & 0xFF ] ) ^
|
||||
( (uint32_t) RSb[ ( Y3 >> 8 ) & 0xFF ] << 8 ) ^
|
||||
( (uint32_t) RSb[ ( Y2 >> 16 ) & 0xFF ] << 16 ) ^
|
||||
( (uint32_t) RSb[ ( Y1 >> 24 ) & 0xFF ] << 24 );
|
||||
( static_cast<uint32_t>(RSb[ ( Y0 ) & 0xFF ]) ) ^
|
||||
( static_cast<uint32_t>(RSb[ ( Y3 >> 8 ) & 0xFF ]) << 8 ) ^
|
||||
( static_cast<uint32_t>(RSb[ ( Y2 >> 16 ) & 0xFF ]) << 16 ) ^
|
||||
( static_cast<uint32_t>(RSb[ ( Y1 >> 24 ) & 0xFF ]) << 24 );
|
||||
|
||||
X1 = *RK++ ^ \
|
||||
( (uint32_t) RSb[ ( Y1 ) & 0xFF ] ) ^
|
||||
( (uint32_t) RSb[ ( Y0 >> 8 ) & 0xFF ] << 8 ) ^
|
||||
( (uint32_t) RSb[ ( Y3 >> 16 ) & 0xFF ] << 16 ) ^
|
||||
( (uint32_t) RSb[ ( Y2 >> 24 ) & 0xFF ] << 24 );
|
||||
( static_cast<uint32_t>(RSb[ ( Y1 ) & 0xFF ]) ) ^
|
||||
( static_cast<uint32_t>(RSb[ ( Y0 >> 8 ) & 0xFF ]) << 8 ) ^
|
||||
( static_cast<uint32_t>(RSb[ ( Y3 >> 16 ) & 0xFF ]) << 16 ) ^
|
||||
( static_cast<uint32_t>(RSb[ ( Y2 >> 24 ) & 0xFF ]) << 24 );
|
||||
|
||||
X2 = *RK++ ^ \
|
||||
( (uint32_t) RSb[ ( Y2 ) & 0xFF ] ) ^
|
||||
( (uint32_t) RSb[ ( Y1 >> 8 ) & 0xFF ] << 8 ) ^
|
||||
( (uint32_t) RSb[ ( Y0 >> 16 ) & 0xFF ] << 16 ) ^
|
||||
( (uint32_t) RSb[ ( Y3 >> 24 ) & 0xFF ] << 24 );
|
||||
( static_cast<uint32_t>(RSb[ ( Y2 ) & 0xFF ]) ) ^
|
||||
( static_cast<uint32_t>(RSb[ ( Y1 >> 8 ) & 0xFF ]) << 8 ) ^
|
||||
( static_cast<uint32_t>(RSb[ ( Y0 >> 16 ) & 0xFF ]) << 16 ) ^
|
||||
( static_cast<uint32_t>(RSb[ ( Y3 >> 24 ) & 0xFF ]) << 24 );
|
||||
|
||||
X3 = *RK++ ^ \
|
||||
( (uint32_t) RSb[ ( Y3 ) & 0xFF ] ) ^
|
||||
( (uint32_t) RSb[ ( Y2 >> 8 ) & 0xFF ] << 8 ) ^
|
||||
( (uint32_t) RSb[ ( Y1 >> 16 ) & 0xFF ] << 16 ) ^
|
||||
( (uint32_t) RSb[ ( Y0 >> 24 ) & 0xFF ] << 24 );
|
||||
( static_cast<uint32_t>(RSb[ ( Y3 ) & 0xFF ]) ) ^
|
||||
( static_cast<uint32_t>(RSb[ ( Y2 >> 8 ) & 0xFF ]) << 8 ) ^
|
||||
( static_cast<uint32_t>(RSb[ ( Y1 >> 16 ) & 0xFF ]) << 16 ) ^
|
||||
( static_cast<uint32_t>(RSb[ ( Y0 >> 24 ) & 0xFF ]) << 24 );
|
||||
}
|
||||
else /* AES_ENCRYPT */
|
||||
{
|
||||
@ -710,28 +710,28 @@ int aes_crypt_ecb( aes_context *ctx,
|
||||
AES_FROUND( Y0, Y1, Y2, Y3, X0, X1, X2, X3 );
|
||||
|
||||
X0 = *RK++ ^ \
|
||||
( (uint32_t) FSb[ ( Y0 ) & 0xFF ] ) ^
|
||||
( (uint32_t) FSb[ ( Y1 >> 8 ) & 0xFF ] << 8 ) ^
|
||||
( (uint32_t) FSb[ ( Y2 >> 16 ) & 0xFF ] << 16 ) ^
|
||||
( (uint32_t) FSb[ ( Y3 >> 24 ) & 0xFF ] << 24 );
|
||||
( static_cast<uint32_t>(FSb[ ( Y0 ) & 0xFF ]) ) ^
|
||||
( static_cast<uint32_t>(FSb[ ( Y1 >> 8 ) & 0xFF ]) << 8 ) ^
|
||||
( static_cast<uint32_t>(FSb[ ( Y2 >> 16 ) & 0xFF ]) << 16 ) ^
|
||||
( static_cast<uint32_t>(FSb[ ( Y3 >> 24 ) & 0xFF ]) << 24 );
|
||||
|
||||
X1 = *RK++ ^ \
|
||||
( (uint32_t) FSb[ ( Y1 ) & 0xFF ] ) ^
|
||||
( (uint32_t) FSb[ ( Y2 >> 8 ) & 0xFF ] << 8 ) ^
|
||||
( (uint32_t) FSb[ ( Y3 >> 16 ) & 0xFF ] << 16 ) ^
|
||||
( (uint32_t) FSb[ ( Y0 >> 24 ) & 0xFF ] << 24 );
|
||||
( static_cast<uint32_t>(FSb[ ( Y1 ) & 0xFF ]) ) ^
|
||||
( static_cast<uint32_t>(FSb[ ( Y2 >> 8 ) & 0xFF ]) << 8 ) ^
|
||||
( static_cast<uint32_t>(FSb[ ( Y3 >> 16 ) & 0xFF ]) << 16 ) ^
|
||||
( static_cast<uint32_t>(FSb[ ( Y0 >> 24 ) & 0xFF ]) << 24 );
|
||||
|
||||
X2 = *RK++ ^ \
|
||||
( (uint32_t) FSb[ ( Y2 ) & 0xFF ] ) ^
|
||||
( (uint32_t) FSb[ ( Y3 >> 8 ) & 0xFF ] << 8 ) ^
|
||||
( (uint32_t) FSb[ ( Y0 >> 16 ) & 0xFF ] << 16 ) ^
|
||||
( (uint32_t) FSb[ ( Y1 >> 24 ) & 0xFF ] << 24 );
|
||||
( static_cast<uint32_t>(FSb[ ( Y2 ) & 0xFF ]) ) ^
|
||||
( static_cast<uint32_t>(FSb[ ( Y3 >> 8 ) & 0xFF ]) << 8 ) ^
|
||||
( static_cast<uint32_t>(FSb[ ( Y0 >> 16 ) & 0xFF ]) << 16 ) ^
|
||||
( static_cast<uint32_t>(FSb[ ( Y1 >> 24 ) & 0xFF ]) << 24 );
|
||||
|
||||
X3 = *RK++ ^ \
|
||||
( (uint32_t) FSb[ ( Y3 ) & 0xFF ] ) ^
|
||||
( (uint32_t) FSb[ ( Y0 >> 8 ) & 0xFF ] << 8 ) ^
|
||||
( (uint32_t) FSb[ ( Y1 >> 16 ) & 0xFF ] << 16 ) ^
|
||||
( (uint32_t) FSb[ ( Y2 >> 24 ) & 0xFF ] << 24 );
|
||||
( static_cast<uint32_t>(FSb[ ( Y3 ) & 0xFF ]) ) ^
|
||||
( static_cast<uint32_t>(FSb[ ( Y0 >> 8 ) & 0xFF ]) << 8 ) ^
|
||||
( static_cast<uint32_t>(FSb[ ( Y1 >> 16 ) & 0xFF ]) << 16 ) ^
|
||||
( static_cast<uint32_t>(FSb[ ( Y2 >> 24 ) & 0xFF ]) << 24 );
|
||||
}
|
||||
|
||||
PUT_UINT32_LE( X0, output, 0 );
|
||||
@ -766,7 +766,7 @@ int aes_crypt_cbc( aes_context *ctx,
|
||||
aes_crypt_ecb( ctx, mode, input, output );
|
||||
|
||||
for( i = 0; i < 16; i++ )
|
||||
output[i] = (unsigned char)( output[i] ^ iv[i] );
|
||||
output[i] ^= iv[i];
|
||||
|
||||
memcpy( iv, temp, 16 );
|
||||
|
||||
@ -780,7 +780,7 @@ int aes_crypt_cbc( aes_context *ctx,
|
||||
while( length > 0 )
|
||||
{
|
||||
for( i = 0; i < 16; i++ )
|
||||
output[i] = (unsigned char)( input[i] ^ iv[i] );
|
||||
output[i] = input[i] ^ iv[i];
|
||||
|
||||
aes_crypt_ecb( ctx, mode, output, output );
|
||||
memcpy( iv, output, 16 );
|
||||
@ -816,8 +816,8 @@ int aes_crypt_cfb128( aes_context *ctx,
|
||||
aes_crypt_ecb( ctx, AES_ENCRYPT, iv, iv );
|
||||
|
||||
c = *input++;
|
||||
*output++ = (unsigned char)( c ^ iv[n] );
|
||||
iv[n] = (unsigned char) c;
|
||||
*output++ = static_cast<unsigned char>( c ^ iv[n] );
|
||||
iv[n] = static_cast<unsigned char>(c);
|
||||
|
||||
n = (n + 1) & 0x0F;
|
||||
}
|
||||
@ -829,7 +829,7 @@ int aes_crypt_cfb128( aes_context *ctx,
|
||||
if( n == 0 )
|
||||
aes_crypt_ecb( ctx, AES_ENCRYPT, iv, iv );
|
||||
|
||||
iv[n] = *output++ = (unsigned char)( iv[n] ^ *input++ );
|
||||
iv[n] = *output++ = static_cast<unsigned char>( iv[n] ^ *input++ );
|
||||
|
||||
n = (n + 1) & 0x0F;
|
||||
}
|
||||
@ -864,7 +864,7 @@ int aes_crypt_ctr( aes_context *ctx,
|
||||
break;
|
||||
}
|
||||
c = *input++;
|
||||
*output++ = (unsigned char)( c ^ stream_block[n] );
|
||||
*output++ = static_cast<unsigned char>( c ^ stream_block[n] );
|
||||
|
||||
n = (n + 1) & 0x0F;
|
||||
}
|
||||
@ -890,7 +890,7 @@ void leftshift_onebit(unsigned char *input, unsigned char *output)
|
||||
int i;
|
||||
unsigned char overflow = 0;
|
||||
|
||||
for (i = 15; i >= 0; i--)
|
||||
for (i = 15; i >= 0; i--)
|
||||
{
|
||||
output[i] = input[i] << 1;
|
||||
output[i] |= overflow;
|
||||
@ -901,7 +901,7 @@ void leftshift_onebit(unsigned char *input, unsigned char *output)
|
||||
void xor_128(unsigned char *a, unsigned char *b, unsigned char *out)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < 16; i++)
|
||||
for (i = 0; i < 16; i++)
|
||||
out[i] = a[i] ^ b[i];
|
||||
}
|
||||
|
||||
@ -910,7 +910,7 @@ void generate_subkey(aes_context *ctx, unsigned char *K1, unsigned char *K2)
|
||||
unsigned char L[16];
|
||||
unsigned char Z[16];
|
||||
unsigned char tmp[16];
|
||||
|
||||
|
||||
int i;
|
||||
for (i = 0; i < 16; i++) Z[i] = 0;
|
||||
|
||||
@ -924,7 +924,7 @@ void generate_subkey(aes_context *ctx, unsigned char *K1, unsigned char *K2)
|
||||
xor_128(tmp,const_Rb,K1);
|
||||
}
|
||||
|
||||
if ((K1[0] & 0x80) == 0)
|
||||
if ((K1[0] & 0x80) == 0)
|
||||
{
|
||||
leftshift_onebit(K1,K2);
|
||||
} else {
|
||||
@ -936,9 +936,9 @@ void generate_subkey(aes_context *ctx, unsigned char *K1, unsigned char *K2)
|
||||
void padding (unsigned char *lastb, unsigned char *pad, int length)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < 16; i++)
|
||||
for (i = 0; i < 16; i++)
|
||||
{
|
||||
if (i < length)
|
||||
if (i < length)
|
||||
pad[i] = lastb[i];
|
||||
else if (i == length)
|
||||
pad[i] = 0x80;
|
||||
@ -955,7 +955,7 @@ void aes_cmac(aes_context *ctx, int length, unsigned char *input, unsigned char
|
||||
generate_subkey(ctx, K1, K2);
|
||||
|
||||
n = (length + 15) / 16;
|
||||
if (n == 0)
|
||||
if (n == 0)
|
||||
{
|
||||
n = 1;
|
||||
flag = 0;
|
||||
@ -966,7 +966,7 @@ void aes_cmac(aes_context *ctx, int length, unsigned char *input, unsigned char
|
||||
flag = 0;
|
||||
}
|
||||
|
||||
if (flag)
|
||||
if (flag)
|
||||
{
|
||||
xor_128(&input[16 * (n - 1)], K1, M_last);
|
||||
} else {
|
||||
@ -975,10 +975,10 @@ void aes_cmac(aes_context *ctx, int length, unsigned char *input, unsigned char
|
||||
}
|
||||
|
||||
for (i = 0; i < 16; i++) X[i] = 0;
|
||||
for (i = 0; i < n - 1; i++)
|
||||
for (i = 0; i < n - 1; i++)
|
||||
{
|
||||
xor_128(X, &input[16*i], Y);
|
||||
aes_crypt_ecb(ctx, AES_ENCRYPT, Y, X);
|
||||
aes_crypt_ecb(ctx, AES_ENCRYPT, Y, X);
|
||||
}
|
||||
|
||||
xor_128(X,M_last,Y);
|
||||
|
@ -416,49 +416,6 @@ static void point_mul(struct point *d, u8 *a, struct point *b) // a is bignum
|
||||
}
|
||||
}
|
||||
|
||||
static void generate_ecdsa(u8 *R, u8 *S, u8 *k, u8 *hash)
|
||||
{
|
||||
u8 e[21];
|
||||
u8 kk[21];
|
||||
u8 m[21];
|
||||
u8 minv[21];
|
||||
struct point mG;
|
||||
|
||||
e[0] = 0;
|
||||
memcpy(e + 1, hash, 20);
|
||||
bn_reduce(e, ec_N, 21);
|
||||
|
||||
try_again:
|
||||
prng(m, 21);
|
||||
m[0] = 0;
|
||||
if (bn_compare(m, ec_N, 21) >= 0)
|
||||
goto try_again;
|
||||
|
||||
// R = (mG).x
|
||||
|
||||
point_mul(&mG, m, &ec_G);
|
||||
point_from_mon(&mG);
|
||||
R[0] = 0;
|
||||
elt_copy(R+1, mG.x);
|
||||
|
||||
// S = m**-1*(e + Rk) (mod N)
|
||||
|
||||
bn_copy(kk, k, 21);
|
||||
bn_reduce(kk, ec_N, 21);
|
||||
bn_to_mon(m, ec_N, 21);
|
||||
bn_to_mon(e, ec_N, 21);
|
||||
bn_to_mon(R, ec_N, 21);
|
||||
bn_to_mon(kk, ec_N, 21);
|
||||
|
||||
bn_mon_mul(S, R, kk, ec_N, 21);
|
||||
bn_add(kk, S, e, ec_N, 21);
|
||||
bn_mon_inv(minv, m, ec_N, 21);
|
||||
bn_mon_mul(S, minv, kk, ec_N, 21);
|
||||
|
||||
bn_from_mon(R, ec_N, 21);
|
||||
bn_from_mon(S, ec_N, 21);
|
||||
}
|
||||
|
||||
static int check_ecdsa(struct point *Q, u8 *R, u8 *S, u8 *hash)
|
||||
{
|
||||
u8 Sinv[21];
|
||||
@ -515,14 +472,14 @@ int ecdsa_set_curve(u8* p, u8* a, u8* b, u8* N, u8* Gx, u8* Gy)
|
||||
memcpy(ec_N, N, 21);
|
||||
memcpy(ec_G.x, Gx, 20);
|
||||
memcpy(ec_G.y, Gy, 20);
|
||||
|
||||
|
||||
bn_to_mon(ec_a, ec_p, 20);
|
||||
bn_to_mon(ec_b, ec_p, 20);
|
||||
|
||||
point_to_mon(&ec_G);
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
void ecdsa_set_pub(u8 *Q)
|
||||
{
|
||||
@ -540,8 +497,3 @@ int ecdsa_verify(u8 *hash, u8 *R, u8 *S)
|
||||
{
|
||||
return check_ecdsa(&ec_Q, R, S, hash);
|
||||
}
|
||||
|
||||
void ecdsa_sign(u8 *hash, u8 *R, u8 *S)
|
||||
{
|
||||
generate_ecdsa(R, S, ec_k, hash);
|
||||
}
|
||||
|
@ -11,4 +11,3 @@ int ecdsa_set_curve(unsigned char *p, unsigned char *a, unsigned char *b, unsign
|
||||
void ecdsa_set_pub(unsigned char *Q);
|
||||
void ecdsa_set_priv(unsigned char *k);
|
||||
int ecdsa_verify(unsigned char *hash, unsigned char *R, unsigned char *S);
|
||||
void ecdsa_sign(unsigned char *hash, unsigned char *R, unsigned char *S);
|
||||
|
@ -146,7 +146,7 @@ int decompress(unsigned char *out, unsigned char *in, unsigned int size)
|
||||
result = -1;
|
||||
if (code <= size)
|
||||
{
|
||||
memcpy(out, (const void *)(in + 5), code);
|
||||
memcpy(out, in + 5, code);
|
||||
result = static_cast<int>(start - out);
|
||||
}
|
||||
}
|
||||
@ -165,7 +165,7 @@ int decompress(unsigned char *out, unsigned char *in, unsigned int size)
|
||||
if (start == end) return static_cast<int>(start - out);
|
||||
|
||||
// Locate first section.
|
||||
int sect = (((((((int)(start - out)) & 7) << 8) + prev) >> head) & 7) * 0xFF - 1;
|
||||
int sect = ((((((static_cast<int>(start - out)) & 7) << 8) + prev) >> head) & 7) * 0xFF - 1;
|
||||
tmp_sect1 = tmp + sect;
|
||||
int index = 1;
|
||||
|
||||
@ -198,7 +198,7 @@ int decompress(unsigned char *out, unsigned char *in, unsigned int size)
|
||||
if ((index >= 0) || (bit_flag != 0))
|
||||
{
|
||||
// Locate next section.
|
||||
int sect = (index << 5) | (((((int)(start - out)) << index) & 3) << 3) | (offset & 7);
|
||||
int sect = (index << 5) | ((((static_cast<int>(start - out)) << index) & 3) << 3) | (offset & 7);
|
||||
tmp_sect1 = tmp + 0xBA8 + sect;
|
||||
|
||||
// Decode the data length (8 bit fields).
|
||||
@ -265,7 +265,7 @@ int decompress(unsigned char *out, unsigned char *in, unsigned int size)
|
||||
}
|
||||
|
||||
// Update offset.
|
||||
offset = ((((int)(buf_end - out)) + 1) & 1) + 6;
|
||||
offset = (((static_cast<int>(buf_end - out)) + 1) & 1) + 6;
|
||||
|
||||
// Copy data.
|
||||
do
|
||||
|
@ -35,7 +35,7 @@
|
||||
/* Implementation that should never be optimized out by the compiler */
|
||||
static void mbedtls_zeroize(void* v, size_t n)
|
||||
{
|
||||
volatile unsigned char* p = (volatile unsigned char*)v;
|
||||
auto p = const_cast<volatile char*>(static_cast<char*>(v));
|
||||
while (n--)
|
||||
*p++ = 0;
|
||||
}
|
||||
@ -46,20 +46,20 @@ static void mbedtls_zeroize(void* v, size_t n)
|
||||
#ifndef GET_UINT32_LE
|
||||
#define GET_UINT32_LE(n,b,i) \
|
||||
{ \
|
||||
(n) = ( (uint32_t) (b)[(i) ] ) \
|
||||
| ( (uint32_t) (b)[(i) + 1] << 8 ) \
|
||||
| ( (uint32_t) (b)[(i) + 2] << 16 ) \
|
||||
| ( (uint32_t) (b)[(i) + 3] << 24 ); \
|
||||
(n) = ( static_cast<uint32_t>((b)[(i) ]) ) \
|
||||
| ( static_cast<uint32_t>((b)[(i) + 1]) << 8 ) \
|
||||
| ( static_cast<uint32_t>((b)[(i) + 2]) << 16 ) \
|
||||
| ( static_cast<uint32_t>((b)[(i) + 3]) << 24 );\
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef PUT_UINT32_LE
|
||||
#define PUT_UINT32_LE(n,b,i) \
|
||||
{ \
|
||||
(b)[(i) ] = (unsigned char) ( ( (n) ) & 0xFF ); \
|
||||
(b)[(i) + 1] = (unsigned char) ( ( (n) >> 8 ) & 0xFF ); \
|
||||
(b)[(i) + 2] = (unsigned char) ( ( (n) >> 16 ) & 0xFF ); \
|
||||
(b)[(i) + 3] = (unsigned char) ( ( (n) >> 24 ) & 0xFF ); \
|
||||
(b)[(i) ] = static_cast<unsigned char>(( (n) ) & 0xFF ); \
|
||||
(b)[(i) + 1] = static_cast<unsigned char>(( (n) >> 8 ) & 0xFF ); \
|
||||
(b)[(i) + 2] = static_cast<unsigned char>(( (n) >> 16 ) & 0xFF ); \
|
||||
(b)[(i) + 3] = static_cast<unsigned char>(( (n) >> 24 ) & 0xFF ); \
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -258,15 +258,15 @@ int mbedtls_md5_update_ret( mbedtls_md5_context *ctx,
|
||||
left = ctx->total[0] & 0x3F;
|
||||
fill = 64 - left;
|
||||
|
||||
ctx->total[0] += (uint32_t) ilen;
|
||||
ctx->total[0] += static_cast<uint32_t>(ilen);
|
||||
ctx->total[0] &= 0xFFFFFFFF;
|
||||
|
||||
if( ctx->total[0] < (uint32_t) ilen )
|
||||
if( ctx->total[0] < static_cast<uint32_t>(ilen) )
|
||||
ctx->total[1]++;
|
||||
|
||||
if( left && ilen >= fill )
|
||||
{
|
||||
memcpy( (void *) (ctx->buffer + left), input, fill );
|
||||
memcpy( ctx->buffer + left, input, fill );
|
||||
if( ( ret = mbedtls_internal_md5_process( ctx, ctx->buffer ) ) != 0 )
|
||||
return( ret );
|
||||
|
||||
@ -286,7 +286,7 @@ int mbedtls_md5_update_ret( mbedtls_md5_context *ctx,
|
||||
|
||||
if( ilen > 0 )
|
||||
{
|
||||
memcpy( (void *) (ctx->buffer + left), input, ilen );
|
||||
memcpy( ctx->buffer + left, input, ilen );
|
||||
}
|
||||
|
||||
return( 0 );
|
||||
|
@ -27,7 +27,7 @@
|
||||
*
|
||||
* http://www.itl.nist.gov/fipspubs/fip180-1.htm
|
||||
*/
|
||||
|
||||
|
||||
#include "sha1.h"
|
||||
|
||||
/*
|
||||
@ -36,20 +36,20 @@
|
||||
#ifndef GET_UINT32_BE
|
||||
#define GET_UINT32_BE(n,b,i) \
|
||||
{ \
|
||||
(n) = ( (uint32_t) (b)[(i) ] << 24 ) \
|
||||
| ( (uint32_t) (b)[(i) + 1] << 16 ) \
|
||||
| ( (uint32_t) (b)[(i) + 2] << 8 ) \
|
||||
| ( (uint32_t) (b)[(i) + 3] ); \
|
||||
(n) = ( static_cast<uint32_t>((b)[(i) ]) << 24 ) \
|
||||
| ( static_cast<uint32_t>((b)[(i) + 1]) << 16 ) \
|
||||
| ( static_cast<uint32_t>((b)[(i) + 2]) << 8 ) \
|
||||
| ( static_cast<uint32_t>((b)[(i) + 3]) );\
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef PUT_UINT32_BE
|
||||
#define PUT_UINT32_BE(n,b,i) \
|
||||
{ \
|
||||
(b)[(i) ] = (unsigned char) ( (n) >> 24 ); \
|
||||
(b)[(i) + 1] = (unsigned char) ( (n) >> 16 ); \
|
||||
(b)[(i) + 2] = (unsigned char) ( (n) >> 8 ); \
|
||||
(b)[(i) + 3] = (unsigned char) ( (n) ); \
|
||||
(b)[(i) ] = static_cast<unsigned char> ( (n) >> 24 ); \
|
||||
(b)[(i) + 1] = static_cast<unsigned char> ( (n) >> 16 ); \
|
||||
(b)[(i) + 2] = static_cast<unsigned char> ( (n) >> 8 ); \
|
||||
(b)[(i) + 3] = static_cast<unsigned char> ( (n) ); \
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -239,15 +239,15 @@ void sha1_update( sha1_context *ctx, const unsigned char *input, size_t ilen )
|
||||
left = ctx->total[0] & 0x3F;
|
||||
fill = 64 - left;
|
||||
|
||||
ctx->total[0] += (uint32_t) ilen;
|
||||
ctx->total[0] += static_cast<uint32_t>(ilen);
|
||||
ctx->total[0] &= 0xFFFFFFFF;
|
||||
|
||||
if( ctx->total[0] < (uint32_t) ilen )
|
||||
if( ctx->total[0] < static_cast<uint32_t>(ilen) )
|
||||
ctx->total[1]++;
|
||||
|
||||
if( left && ilen >= fill )
|
||||
{
|
||||
memcpy( (void *) (ctx->buffer + left), input, fill );
|
||||
memcpy( ctx->buffer + left, input, fill );
|
||||
sha1_process( ctx, ctx->buffer );
|
||||
input += fill;
|
||||
ilen -= fill;
|
||||
@ -262,7 +262,7 @@ void sha1_update( sha1_context *ctx, const unsigned char *input, size_t ilen )
|
||||
}
|
||||
|
||||
if( ilen > 0 )
|
||||
memcpy( (void *) (ctx->buffer + left), input, ilen );
|
||||
memcpy( ctx->buffer + left, input, ilen );
|
||||
}
|
||||
|
||||
static const unsigned char sha1_padding[64] =
|
||||
@ -336,8 +336,8 @@ void sha1_hmac_starts( sha1_context *ctx, const unsigned char *key, size_t keyle
|
||||
|
||||
for( i = 0; i < keylen; i++ )
|
||||
{
|
||||
ctx->ipad[i] = (unsigned char)( ctx->ipad[i] ^ key[i] );
|
||||
ctx->opad[i] = (unsigned char)( ctx->opad[i] ^ key[i] );
|
||||
ctx->ipad[i] ^= key[i];
|
||||
ctx->opad[i] ^= key[i];
|
||||
}
|
||||
|
||||
sha1_starts( ctx );
|
||||
|
@ -42,8 +42,8 @@
|
||||
#endif /* MBEDTLS_PLATFORM_C */
|
||||
#endif /* MBEDTLS_SELF_TEST */
|
||||
|
||||
#define SHA256_VALIDATE_RET(cond)
|
||||
#define SHA256_VALIDATE(cond)
|
||||
#define SHA256_VALIDATE_RET(cond)
|
||||
#define SHA256_VALIDATE(cond)
|
||||
|
||||
#if !defined(MBEDTLS_SHA256_ALT)
|
||||
|
||||
@ -53,27 +53,27 @@
|
||||
#ifndef GET_UINT32_BE
|
||||
#define GET_UINT32_BE(n,b,i) \
|
||||
do { \
|
||||
(n) = ( (uint32_t) (b)[(i) ] << 24 ) \
|
||||
| ( (uint32_t) (b)[(i) + 1] << 16 ) \
|
||||
| ( (uint32_t) (b)[(i) + 2] << 8 ) \
|
||||
| ( (uint32_t) (b)[(i) + 3] ); \
|
||||
(n) = ( static_cast<uint32_t>((b)[(i) ]) << 24 ) \
|
||||
| ( static_cast<uint32_t>((b)[(i) + 1]) << 16 ) \
|
||||
| ( static_cast<uint32_t>((b)[(i) + 2]) << 8 ) \
|
||||
| ( static_cast<uint32_t>((b)[(i) + 3]) );\
|
||||
} while( 0 )
|
||||
#endif
|
||||
|
||||
#ifndef PUT_UINT32_BE
|
||||
#define PUT_UINT32_BE(n,b,i) \
|
||||
do { \
|
||||
(b)[(i) ] = (unsigned char) ( (n) >> 24 ); \
|
||||
(b)[(i) + 1] = (unsigned char) ( (n) >> 16 ); \
|
||||
(b)[(i) + 2] = (unsigned char) ( (n) >> 8 ); \
|
||||
(b)[(i) + 3] = (unsigned char) ( (n) ); \
|
||||
(b)[(i) ] = static_cast<unsigned char> ( (n) >> 24 ); \
|
||||
(b)[(i) + 1] = static_cast<unsigned char> ( (n) >> 16 ); \
|
||||
(b)[(i) + 2] = static_cast<unsigned char> ( (n) >> 8 ); \
|
||||
(b)[(i) + 3] = static_cast<unsigned char> ( (n) ); \
|
||||
} while( 0 )
|
||||
#endif
|
||||
|
||||
/* Implementation that should never be optimized out by the compiler */
|
||||
static void mbedtls_zeroize_sha256(void* v, size_t n)
|
||||
{
|
||||
volatile unsigned char* p = (volatile unsigned char*)v;
|
||||
auto p = const_cast<volatile char*>(static_cast<char*>(v));
|
||||
while (n--)
|
||||
*p++ = 0;
|
||||
}
|
||||
@ -288,15 +288,15 @@ int mbedtls_sha256_update_ret( mbedtls_sha256_context *ctx,
|
||||
left = ctx->total[0] & 0x3F;
|
||||
fill = 64 - left;
|
||||
|
||||
ctx->total[0] += (uint32_t) ilen;
|
||||
ctx->total[0] += static_cast<uint32_t>(ilen);
|
||||
ctx->total[0] &= 0xFFFFFFFF;
|
||||
|
||||
if( ctx->total[0] < (uint32_t) ilen )
|
||||
if( ctx->total[0] < static_cast<uint32_t>(ilen) )
|
||||
ctx->total[1]++;
|
||||
|
||||
if( left && ilen >= fill )
|
||||
{
|
||||
memcpy( (void *) (ctx->buffer + left), input, fill );
|
||||
memcpy( ctx->buffer + left, input, fill );
|
||||
|
||||
if( ( ret = mbedtls_internal_sha256_process( ctx, ctx->buffer ) ) != 0 )
|
||||
return( ret );
|
||||
@ -316,7 +316,7 @@ int mbedtls_sha256_update_ret( mbedtls_sha256_context *ctx,
|
||||
}
|
||||
|
||||
if( ilen > 0 )
|
||||
memcpy( (void *) (ctx->buffer + left), input, ilen );
|
||||
memcpy( ctx->buffer + left, input, ilen );
|
||||
|
||||
return( 0 );
|
||||
}
|
||||
|
@ -9,7 +9,7 @@
|
||||
|
||||
#include <memory>
|
||||
|
||||
// Auxiliary functions (endian swap, xor and prng).
|
||||
// Auxiliary functions (endian swap, xor).
|
||||
|
||||
void xor_key(unsigned char *dest, const u8* src1, const u8* src2)
|
||||
{
|
||||
@ -19,18 +19,10 @@ void xor_key(unsigned char *dest, const u8* src1, const u8* src2)
|
||||
}
|
||||
}
|
||||
|
||||
void prng(unsigned char *dest, int size)
|
||||
{
|
||||
srand((u32)time(0));
|
||||
|
||||
for(int i = 0; i < size; i++)
|
||||
dest[i] = (unsigned char)(rand() & 0xFF);
|
||||
}
|
||||
|
||||
// Hex string conversion auxiliary functions.
|
||||
u64 hex_to_u64(const char* hex_str)
|
||||
{
|
||||
u32 length = (u32) strlen(hex_str);
|
||||
auto length = std::strlen(hex_str);
|
||||
u64 tmp = 0;
|
||||
u64 result = 0;
|
||||
char c;
|
||||
@ -54,8 +46,8 @@ u64 hex_to_u64(const char* hex_str)
|
||||
|
||||
void hex_to_bytes(unsigned char* data, const char* hex_str, unsigned int str_length)
|
||||
{
|
||||
u32 strn_length = (str_length > 0) ? str_length : (u32)std::strlen(hex_str);
|
||||
u32 data_length = strn_length / 2;
|
||||
auto strn_length = (str_length > 0) ? str_length : std::strlen(hex_str);
|
||||
auto data_length = strn_length / 2;
|
||||
char tmp_buf[3] = {0, 0, 0};
|
||||
|
||||
// Don't convert if the string length is odd.
|
||||
@ -66,7 +58,7 @@ void hex_to_bytes(unsigned char* data, const char* hex_str, unsigned int str_len
|
||||
tmp_buf[0] = *hex_str++;
|
||||
tmp_buf[1] = *hex_str++;
|
||||
|
||||
*data++ = (u8)(hex_to_u64(tmp_buf) & 0xFF);
|
||||
*data++ = static_cast<u8>(hex_to_u64(tmp_buf) & 0xFF);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -155,6 +147,6 @@ char* extract_file_name(const char* file_path, char real_file_name[MAX_PATH])
|
||||
if (!p) p = strrchr(file_path, '\\');
|
||||
if (p) file_path_len = file_path + file_path_len - p - 1;
|
||||
strncpy(real_file_name, p ? (p + 1) : file_path, file_path_len + 1);
|
||||
|
||||
|
||||
return real_file_name;
|
||||
}
|
||||
|
@ -14,7 +14,7 @@
|
||||
#include "lz.h"
|
||||
#include "ec.h"
|
||||
|
||||
// Auxiliary functions (endian swap, xor, prng and file name).
|
||||
// Auxiliary functions (endian swap, xor, and file name).
|
||||
inline u16 swap16(u16 i)
|
||||
{
|
||||
#if defined(__GNUG__)
|
||||
@ -45,11 +45,10 @@ inline u64 swap64(u64 i)
|
||||
void xor_key(unsigned char *dest, const u8* src1, const u8* src2);
|
||||
inline void xor_key_sse(u8* dest, const u8* src1, const u8* src2)
|
||||
{
|
||||
_mm_storeu_si128(&(((__m128i*)dest)[0]),
|
||||
_mm_xor_si128(_mm_loadu_si128((__m128i*)src1), _mm_loadu_si128((__m128i*)src2)));
|
||||
_mm_storeu_si128(reinterpret_cast<__m128i*>(dest),
|
||||
_mm_xor_si128(_mm_loadu_si128(reinterpret_cast<const __m128i*>(src1)), _mm_loadu_si128(reinterpret_cast<const __m128i*>(src2))));
|
||||
}
|
||||
|
||||
void prng(unsigned char *dest, int size);
|
||||
char* extract_file_name(const char* file_path, char real_file_name[MAX_PATH]);
|
||||
|
||||
// Hex string conversion auxiliary functions.
|
||||
|
@ -285,7 +285,7 @@ inline RT ppu_execute(ppu_thread& ppu, Args... args)
|
||||
return func(ppu, args...);
|
||||
}
|
||||
|
||||
#define REG_FNID(module, nid, func) ppu_module_manager::register_static_function<&func>(#module, ppu_select_name(#func, nid), BIND_FUNC(func, ppu.cia = (u32)ppu.lr & ~3), ppu_generate_id(nid))
|
||||
#define REG_FNID(module, nid, func) ppu_module_manager::register_static_function<&func>(#module, ppu_select_name(#func, nid), BIND_FUNC(func, ppu.cia = static_cast<u32>(ppu.lr) & ~3), ppu_generate_id(nid))
|
||||
|
||||
#define REG_FUNC(module, func) REG_FNID(module, #func, func)
|
||||
|
||||
|
@ -171,7 +171,7 @@ namespace vk
|
||||
check_heap();
|
||||
|
||||
const auto size = count * sizeof(f32);
|
||||
m_vao_offset = (u32)m_vao.alloc<16>(size);
|
||||
m_vao_offset = static_cast<u32>(m_vao.alloc<16>(size));
|
||||
auto dst = m_vao.map(m_vao_offset, size);
|
||||
std::memcpy(dst, data, size);
|
||||
m_vao.unmap();
|
||||
@ -352,15 +352,15 @@ namespace vk
|
||||
virtual void set_up_viewport(vk::command_buffer &cmd, u32 x, u32 y, u32 w, u32 h)
|
||||
{
|
||||
VkViewport vp{};
|
||||
vp.x = (f32)x;
|
||||
vp.y = (f32)y;
|
||||
vp.width = (f32)w;
|
||||
vp.height = (f32)h;
|
||||
vp.x = static_cast<f32>(x);
|
||||
vp.y = static_cast<f32>(y);
|
||||
vp.width = static_cast<f32>(w);
|
||||
vp.height = static_cast<f32>(h);
|
||||
vp.minDepth = 0.f;
|
||||
vp.maxDepth = 1.f;
|
||||
vkCmdSetViewport(cmd, 0, 1, &vp);
|
||||
|
||||
VkRect2D vs = { { (s32)x, (s32)y }, { w, h } };
|
||||
VkRect2D vs = { { static_cast<s32>(x), static_cast<s32>(y) }, { w, h } };
|
||||
vkCmdSetScissor(cmd, 0, 1, &vs);
|
||||
}
|
||||
|
||||
@ -373,8 +373,8 @@ namespace vk
|
||||
rp_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
|
||||
rp_begin.renderPass = render_pass;
|
||||
rp_begin.framebuffer = fbo->value;
|
||||
rp_begin.renderArea.offset.x = (s32)viewport.x1;
|
||||
rp_begin.renderArea.offset.y = (s32)viewport.y1;
|
||||
rp_begin.renderArea.offset.x = static_cast<s32>(viewport.x1);
|
||||
rp_begin.renderArea.offset.y = static_cast<s32>(viewport.y1);
|
||||
rp_begin.renderArea.extent.width = viewport.width();
|
||||
rp_begin.renderArea.extent.height = viewport.height();
|
||||
|
||||
@ -440,8 +440,8 @@ namespace vk
|
||||
|
||||
void update_uniforms(vk::command_buffer& /*cmd*/, vk::glsl::program* /*program*/) override
|
||||
{
|
||||
m_ubo_offset = (u32)m_ubo.alloc<256>(128);
|
||||
auto dst = (f32*)m_ubo.map(m_ubo_offset, 128);
|
||||
m_ubo_offset = static_cast<u32>(m_ubo.alloc<256>(128));
|
||||
auto dst = static_cast<f32*>(m_ubo.map(m_ubo_offset, 128));
|
||||
dst[0] = src_scale_x;
|
||||
dst[1] = src_scale_y;
|
||||
dst[2] = 0.f;
|
||||
@ -454,8 +454,8 @@ namespace vk
|
||||
auto real_src = src->image();
|
||||
verify(HERE), real_src;
|
||||
|
||||
src_scale_x = f32(src_area.x2) / real_src->width();
|
||||
src_scale_y = f32(src_area.y2) / real_src->height();
|
||||
src_scale_x = static_cast<f32>(src_area.x2) / real_src->width();
|
||||
src_scale_y = static_cast<f32>(src_area.y2) / real_src->height();
|
||||
|
||||
overlay_pass::run(cmd, dst_area, dst, src, render_pass);
|
||||
}
|
||||
@ -637,7 +637,7 @@ namespace vk
|
||||
region.bufferRowLength = w;
|
||||
region.bufferImageHeight = h;
|
||||
region.imageOffset = {};
|
||||
region.imageExtent = { (u32)w, (u32)h, 1u};
|
||||
region.imageExtent = { static_cast<u32>(w), static_cast<u32>(h), 1u};
|
||||
|
||||
change_image_layout(cmd, tex.get(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, range);
|
||||
vkCmdCopyBufferToImage(cmd, upload_heap.heap->value, tex->value, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion);
|
||||
@ -711,7 +711,7 @@ namespace vk
|
||||
|
||||
vk::image_view* find_font(rsx::overlays::font *font, vk::command_buffer &cmd, vk::data_heap &upload_heap)
|
||||
{
|
||||
u64 key = (u64)font;
|
||||
u64 key = reinterpret_cast<u64>(font);
|
||||
auto found = view_cache.find(key);
|
||||
if (found != view_cache.end())
|
||||
return found->second.get();
|
||||
@ -723,7 +723,7 @@ namespace vk
|
||||
|
||||
vk::image_view* find_temp_image(rsx::overlays::image_info *desc, vk::command_buffer &cmd, vk::data_heap &upload_heap, u32 owner_uid)
|
||||
{
|
||||
u64 key = (u64)desc;
|
||||
u64 key = reinterpret_cast<u64>(desc);
|
||||
auto found = temp_view_cache.find(key);
|
||||
if (found != temp_view_cache.end())
|
||||
return found->second.get();
|
||||
@ -734,8 +734,8 @@ namespace vk
|
||||
|
||||
void update_uniforms(vk::command_buffer& /*cmd*/, vk::glsl::program* /*program*/) override
|
||||
{
|
||||
m_ubo_offset = (u32)m_ubo.alloc<256>(128);
|
||||
auto dst = (f32*)m_ubo.map(m_ubo_offset, 128);
|
||||
m_ubo_offset = static_cast<u32>(m_ubo.alloc<256>(128));
|
||||
auto dst = static_cast<f32*>(m_ubo.map(m_ubo_offset, 128));
|
||||
|
||||
// regs[0] = scaling parameters
|
||||
dst[0] = m_scale_offset.r;
|
||||
@ -752,7 +752,7 @@ namespace vk
|
||||
// regs[2] = fs config parameters
|
||||
dst[8] = m_time;
|
||||
dst[9] = m_pulse_glow? 1.f : 0.f;
|
||||
dst[10] = m_skip_texture_read? 0.f : (f32)m_texture_type;
|
||||
dst[10] = m_skip_texture_read? 0.f : static_cast<f32>(m_texture_type);
|
||||
dst[11] = m_clip_enabled ? 1.f : 0.f;
|
||||
|
||||
// regs[3] = clip rect
|
||||
@ -815,22 +815,22 @@ namespace vk
|
||||
void run(vk::command_buffer &cmd, const areau& viewport, vk::framebuffer* target, VkRenderPass render_pass,
|
||||
vk::data_heap &upload_heap, rsx::overlays::overlay &ui)
|
||||
{
|
||||
m_scale_offset = color4f((f32)ui.virtual_width, (f32)ui.virtual_height, 1.f, 1.f);
|
||||
m_time = (f32)(get_system_time() / 1000) * 0.005f;
|
||||
m_viewport_size = { f32(viewport.width()), f32(viewport.height()) };
|
||||
m_scale_offset = color4f(ui.virtual_width, ui.virtual_height, 1.f, 1.f);
|
||||
m_time = static_cast<f32>(get_system_time() / 1000) * 0.005f;
|
||||
m_viewport_size = { static_cast<f32>(viewport.width()), static_cast<f32>(viewport.height()) };
|
||||
|
||||
for (auto &command : ui.get_compiled().draw_commands)
|
||||
{
|
||||
num_drawable_elements = (u32)command.verts.size();
|
||||
num_drawable_elements = static_cast<u32>(command.verts.size());
|
||||
const u32 value_count = num_drawable_elements * 4;
|
||||
|
||||
upload_vertex_data((f32*)command.verts.data(), value_count);
|
||||
upload_vertex_data(reinterpret_cast<f32*>(command.verts.data()), value_count);
|
||||
set_primitive_type(command.config.primitives);
|
||||
|
||||
m_skip_texture_read = false;
|
||||
m_color = command.config.color;
|
||||
m_pulse_glow = command.config.pulse_glow;
|
||||
m_blur_strength = f32(command.config.blur_strength) * 0.01f;
|
||||
m_blur_strength = static_cast<f32>(command.config.blur_strength) * 0.01f;
|
||||
m_clip_enabled = command.config.clip_region;
|
||||
m_clip_region = command.config.clip_rect;
|
||||
m_texture_type = 1;
|
||||
@ -849,7 +849,7 @@ namespace vk
|
||||
src = find_font(command.config.font_ref, cmd, upload_heap);
|
||||
break;
|
||||
case rsx::overlays::image_resource_id::raw_image:
|
||||
src = find_temp_image((rsx::overlays::image_info*)command.config.external_data_ref, cmd, upload_heap, ui.uid);
|
||||
src = find_temp_image(static_cast<rsx::overlays::image_info*>(command.config.external_data_ref), cmd, upload_heap, ui.uid);
|
||||
break;
|
||||
default:
|
||||
src = view_cache[command.config.texture_ref].get();
|
||||
@ -937,10 +937,10 @@ namespace vk
|
||||
void set_up_viewport(vk::command_buffer &cmd, u32 x, u32 y, u32 w, u32 h) override
|
||||
{
|
||||
VkViewport vp{};
|
||||
vp.x = (f32)x;
|
||||
vp.y = (f32)y;
|
||||
vp.width = (f32)w;
|
||||
vp.height = (f32)h;
|
||||
vp.x = static_cast<f32>(x);
|
||||
vp.y = static_cast<f32>(y);
|
||||
vp.width = static_cast<f32>(w);
|
||||
vp.height = static_cast<f32>(h);
|
||||
vp.minDepth = 0.f;
|
||||
vp.maxDepth = 1.f;
|
||||
vkCmdSetViewport(cmd, 0, 1, &vp);
|
||||
|
@ -11,7 +11,7 @@ namespace vk
|
||||
private:
|
||||
std::unique_ptr<vk::buffer> m_vertex_buffer;
|
||||
std::unique_ptr<vk::buffer> m_uniforms_buffer;
|
||||
|
||||
|
||||
std::unique_ptr<vk::glsl::program> m_program;
|
||||
vk::glsl::shader m_vertex_shader;
|
||||
vk::glsl::shader m_fragment_shader;
|
||||
@ -42,7 +42,7 @@ namespace vk
|
||||
m_descriptor_pool.create(dev, descriptor_pools, 1, 120, 2);
|
||||
|
||||
VkDescriptorSetLayoutBinding bindings[1] = {};
|
||||
|
||||
|
||||
//Scale and offset data plus output color
|
||||
bindings[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
|
||||
bindings[0].descriptorCount = 1;
|
||||
@ -128,7 +128,7 @@ namespace vk
|
||||
|
||||
VkVertexInputAttributeDescription vdesc;
|
||||
VkVertexInputBindingDescription vbind;
|
||||
|
||||
|
||||
vdesc.binding = 0;
|
||||
vdesc.format = VK_FORMAT_R32G32_SFLOAT;
|
||||
vdesc.location = 0;
|
||||
@ -195,7 +195,7 @@ namespace vk
|
||||
CHECK_RESULT(vkCreateGraphicsPipelines(dev, nullptr, 1, &info, NULL, &pipeline));
|
||||
|
||||
const std::vector<vk::glsl::program_input> unused;
|
||||
m_program = std::make_unique<vk::glsl::program>((VkDevice)dev, pipeline, unused, unused);
|
||||
m_program = std::make_unique<vk::glsl::program>(static_cast<VkDevice>(dev), pipeline, unused, unused);
|
||||
}
|
||||
|
||||
void load_program(vk::command_buffer &cmd, float scale_x, float scale_y, const float *offsets, size_t nb_offsets, std::array<float, 4> color)
|
||||
@ -213,7 +213,7 @@ namespace vk
|
||||
|
||||
float scale[] = { scale_x, scale_y };
|
||||
float colors[] = { color[0], color[1], color[2], color[3] };
|
||||
float *dst = (float*)m_uniforms_buffer->map(m_uniform_buffer_offset, 8192);
|
||||
float* dst = static_cast<float*>(m_uniforms_buffer->map(m_uniform_buffer_offset, 8192));
|
||||
|
||||
//std140 spec demands that arrays be multiples of 16 bytes
|
||||
for (size_t i = 0; i < nb_offsets; ++i)
|
||||
@ -232,7 +232,7 @@ namespace vk
|
||||
|
||||
vkCmdBindPipeline(cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, m_program->pipeline);
|
||||
vkCmdBindDescriptorSets(cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, m_pipeline_layout, 0, 1, &m_descriptor_set, 0, nullptr);
|
||||
|
||||
|
||||
VkDeviceSize zero = 0;
|
||||
vkCmdBindVertexBuffers(cmd, 0, 1, &m_vertex_buffer->value, &zero);
|
||||
}
|
||||
@ -264,15 +264,15 @@ namespace vk
|
||||
|
||||
m_render_pass = render_pass;
|
||||
m_uniform_buffer_size = 983040;
|
||||
|
||||
|
||||
init_descriptor_set(dev);
|
||||
init_program(dev);
|
||||
|
||||
GlyphManager glyph_source;
|
||||
auto points = glyph_source.generate_point_map();
|
||||
const size_t buffer_size = points.size() * sizeof(GlyphManager::glyph_point);
|
||||
|
||||
u8 *dst = (u8*)m_vertex_buffer->map(0, buffer_size);
|
||||
|
||||
u8* dst = static_cast<u8*>(m_vertex_buffer->map(0, buffer_size));
|
||||
memcpy(dst, points.data(), buffer_size);
|
||||
m_vertex_buffer->unmap();
|
||||
|
||||
@ -302,7 +302,7 @@ namespace vk
|
||||
|
||||
while (*s)
|
||||
{
|
||||
u8 offset = (u8)*s;
|
||||
u8 offset = static_cast<u8>(*s);
|
||||
bool to_draw = false; //Can be false for space or unsupported characters
|
||||
|
||||
auto o = m_offsets.find(offset);
|
||||
@ -334,8 +334,8 @@ namespace vk
|
||||
}
|
||||
|
||||
VkViewport vp{};
|
||||
vp.width = (f32)target_w;
|
||||
vp.height = (f32)target_h;
|
||||
vp.width = static_cast<f32>(target_w);
|
||||
vp.height = static_cast<f32>(target_h);
|
||||
vp.minDepth = 0.f;
|
||||
vp.maxDepth = 1.f;
|
||||
vkCmdSetViewport(cmd, 0, 1, &vp);
|
||||
|
@ -590,8 +590,8 @@ struct cfg_root : cfg::node
|
||||
{
|
||||
node_sys(cfg::node* _this) : cfg::node(_this, "System") {}
|
||||
|
||||
cfg::_enum<CellSysutilLang> language{this, "Language", (CellSysutilLang)1}; // CELL_SYSUTIL_LANG_ENGLISH_US
|
||||
cfg::_enum<CellKbMappingType> keyboard_type{this, "Keyboard Type", (CellKbMappingType)0}; // CELL_KB_MAPPING_101 = US
|
||||
cfg::_enum<CellSysutilLang> language{this, "Language", CellSysutilLang{1}}; // CELL_SYSUTIL_LANG_ENGLISH_US
|
||||
cfg::_enum<CellKbMappingType> keyboard_type{this, "Keyboard Type", CellKbMappingType{0}}; // CELL_KB_MAPPING_101 = US
|
||||
cfg::_enum<enter_button_assign> enter_button_assignment{this, "Enter button assignment", enter_button_assign::cross};
|
||||
|
||||
} sys{this};
|
||||
|
Loading…
Reference in New Issue
Block a user