1
0
mirror of https://github.com/RPCS3/rpcs3.git synced 2024-11-25 04:02:42 +01:00

PPU: refactor vector rounding instructions

Fix: nearbyint -> roundeven
This commit is contained in:
Nekotekina 2022-01-17 11:32:44 +03:00
parent 248f9424ac
commit 14cca55b50
5 changed files with 190 additions and 72 deletions

View File

@ -270,7 +270,7 @@ public:
built_function& operator=(const built_function&) = delete; built_function& operator=(const built_function&) = delete;
template <typename F> requires (std::is_invocable_v<F, native_asm&, native_args&>) template <typename F>
built_function(std::string_view name, F&& builder, built_function(std::string_view name, F&& builder,
u32 line = __builtin_LINE(), u32 line = __builtin_LINE(),
u32 col = __builtin_COLUMN(), u32 col = __builtin_COLUMN(),
@ -280,16 +280,6 @@ public:
{ {
} }
template <typename F> requires (std::is_invocable_v<F>)
built_function(std::string_view, F&& getter,
u32 line = __builtin_LINE(),
u32 col = __builtin_COLUMN(),
const char* file = __builtin_FILE(),
const char* func = __builtin_FUNCTION())
: m_func(ensure(getter(), const_str(), line, col, file, func))
{
}
operator FT() const noexcept operator FT() const noexcept
{ {
return m_func; return m_func;

View File

@ -2305,14 +2305,14 @@ auto VREFP()
if constexpr (Build == 0xf1a6) if constexpr (Build == 0xf1a6)
return ppu_exec_select<Flags...>::template select<use_nj, fix_nj, set_vnan, fix_vnan>(); return ppu_exec_select<Flags...>::template select<use_nj, fix_nj, set_vnan, fix_vnan>();
static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { static const auto exec = [](auto&& d, auto&& b_, auto&& jm_mask)
const auto a = _mm_set_ps(1.0f, 1.0f, 1.0f, 1.0f); {
const auto m = gv_bcst32(ppu.jm_mask, &ppu_thread::jm_mask); auto m = gv_bcst32(jm_mask, &ppu_thread::jm_mask);
const auto b = ppu_flush_denormal<false, Flags...>(m, ppu.vr[op.vb]); auto b = ppu_flush_denormal<false, Flags...>(m, std::move(b_));
const auto result = _mm_div_ps(a, b); d = ppu_flush_denormal<true, Flags...>(std::move(m), ppu_set_vnan<Flags...>(gv_divfs(gv_bcstfs(1.0f), b), b));
ppu.vr[op.vd] = ppu_flush_denormal<true, Flags...>(m, ppu_set_vnan<Flags...>(result, a, b));
}; };
RETURN_(ppu, op);
RETURN_(ppu.vr[op.vd], ppu.vr[op.vb], ppu.jm_mask);
} }
template <u32 Build, ppu_exec_bit... Flags> template <u32 Build, ppu_exec_bit... Flags>
@ -2321,19 +2321,14 @@ auto VRFIM()
if constexpr (Build == 0xf1a6) if constexpr (Build == 0xf1a6)
return ppu_exec_select<Flags...>::template select<use_nj, fix_nj, set_vnan, fix_vnan>(); return ppu_exec_select<Flags...>::template select<use_nj, fix_nj, set_vnan, fix_vnan>();
static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { static const auto exec = [](auto&& d, auto&& b_, auto&& jm_mask)
const auto m = gv_bcst32(ppu.jm_mask, &ppu_thread::jm_mask);
const auto b = ppu_flush_denormal<false, Flags...>(m, ppu.vr[op.vb]);
v128 d;
for (uint w = 0; w < 4; w++)
{ {
d._f[w] = std::floor(b._f[w]); auto m = gv_bcst32(jm_mask, &ppu_thread::jm_mask);
} auto b = ppu_flush_denormal<false, Flags...>(m, std::move(b_));
d = ppu_flush_denormal<true, Flags...>(std::move(m), ppu_set_vnan<Flags...>(gv_roundfs_floor(b), b));
ppu.vr[op.vd] = ppu_flush_denormal<true, Flags...>(m, ppu_set_vnan<Flags...>(d, b));
}; };
RETURN_(ppu, op);
RETURN_(ppu.vr[op.vd], ppu.vr[op.vb], ppu.jm_mask);
} }
template <u32 Build, ppu_exec_bit... Flags> template <u32 Build, ppu_exec_bit... Flags>
@ -2342,18 +2337,13 @@ auto VRFIN()
if constexpr (Build == 0xf1a6) if constexpr (Build == 0xf1a6)
return ppu_exec_select<Flags...>::template select<fix_nj, set_vnan, fix_vnan>(); return ppu_exec_select<Flags...>::template select<fix_nj, set_vnan, fix_vnan>();
static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { static const auto exec = [](auto&& d, auto&& b, auto&& jm_mask)
const auto b = ppu.vr[op.vb];
v128 d;
for (uint w = 0; w < 4; w++)
{ {
d._f[w] = std::nearbyint(b._f[w]); auto m = gv_bcst32(jm_mask, &ppu_thread::jm_mask);
} d = ppu_flush_denormal<true, Flags...>(std::move(m), ppu_set_vnan<Flags...>(gv_roundfs_even(b), b));
ppu.vr[op.vd] = ppu_flush_denormal<true, Flags...>(gv_bcst32(ppu.jm_mask, &ppu_thread::jm_mask), ppu_set_vnan<Flags...>(d, b));
}; };
RETURN_(ppu, op);
RETURN_(ppu.vr[op.vd], ppu.vr[op.vb], ppu.jm_mask);
} }
template <u32 Build, ppu_exec_bit... Flags> template <u32 Build, ppu_exec_bit... Flags>
@ -2362,19 +2352,14 @@ auto VRFIP()
if constexpr (Build == 0xf1a6) if constexpr (Build == 0xf1a6)
return ppu_exec_select<Flags...>::template select<use_nj, fix_nj, set_vnan, fix_vnan>(); return ppu_exec_select<Flags...>::template select<use_nj, fix_nj, set_vnan, fix_vnan>();
static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { static const auto exec = [](auto&& d, auto&& b_, auto&& jm_mask)
const auto m = gv_bcst32(ppu.jm_mask, &ppu_thread::jm_mask);
const auto b = ppu_flush_denormal<false, Flags...>(m, ppu.vr[op.vb]);
v128 d;
for (uint w = 0; w < 4; w++)
{ {
d._f[w] = std::ceil(b._f[w]); auto m = gv_bcst32(jm_mask, &ppu_thread::jm_mask);
} auto b = ppu_flush_denormal<false, Flags...>(m, std::move(b_));
d = ppu_flush_denormal<true, Flags...>(std::move(m), ppu_set_vnan<Flags...>(gv_roundfs_ceil(b), b));
ppu.vr[op.vd] = ppu_flush_denormal<true, Flags...>(m, ppu_set_vnan<Flags...>(d, b));
}; };
RETURN_(ppu, op);
RETURN_(ppu.vr[op.vd], ppu.vr[op.vb], ppu.jm_mask);
} }
template <u32 Build, ppu_exec_bit... Flags> template <u32 Build, ppu_exec_bit... Flags>
@ -2383,18 +2368,13 @@ auto VRFIZ()
if constexpr (Build == 0xf1a6) if constexpr (Build == 0xf1a6)
return ppu_exec_select<Flags...>::template select<fix_nj, set_vnan, fix_vnan>(); return ppu_exec_select<Flags...>::template select<fix_nj, set_vnan, fix_vnan>();
static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { static const auto exec = [](auto&& d, auto&& b, auto&& jm_mask)
const auto b = ppu.vr[op.vb];
v128 d;
for (uint w = 0; w < 4; w++)
{ {
d._f[w] = std::truncf(b._f[w]); auto m = gv_bcst32(jm_mask, &ppu_thread::jm_mask);
} d = ppu_flush_denormal<true, Flags...>(std::move(m), ppu_set_vnan<Flags...>(gv_roundfs_trunc(b), b));
ppu.vr[op.vd] = ppu_flush_denormal<true, Flags...>(gv_bcst32(ppu.jm_mask, &ppu_thread::jm_mask), ppu_set_vnan<Flags...>(d, b));
}; };
RETURN_(ppu, op);
RETURN_(ppu.vr[op.vd], ppu.vr[op.vb], ppu.jm_mask);
} }
template <u32 Build, ppu_exec_bit... Flags> template <u32 Build, ppu_exec_bit... Flags>
@ -2460,14 +2440,14 @@ auto VRSQRTEFP()
if constexpr (Build == 0xf1a6) if constexpr (Build == 0xf1a6)
return ppu_exec_select<Flags...>::template select<use_nj, fix_nj, set_vnan, fix_vnan>(); return ppu_exec_select<Flags...>::template select<use_nj, fix_nj, set_vnan, fix_vnan>();
static const auto exec = [](ppu_thread& ppu, ppu_opcode_t op) { static const auto exec = [](auto&& d, auto&& b_, auto&& jm_mask)
const auto a = _mm_set_ps(1.0f, 1.0f, 1.0f, 1.0f); {
const auto m = gv_bcst32(ppu.jm_mask, &ppu_thread::jm_mask); auto m = gv_bcst32(jm_mask, &ppu_thread::jm_mask);
const auto b = ppu_flush_denormal<false, Flags...>(m, ppu.vr[op.vb]); auto b = ppu_flush_denormal<false, Flags...>(m, std::move(b_));
const auto result = _mm_div_ps(a, _mm_sqrt_ps(b)); d = ppu_flush_denormal<true, Flags...>(std::move(m), ppu_set_vnan<Flags...>(gv_divfs(gv_bcstfs(1.0f), gv_sqrtfs(b)), b));
ppu.vr[op.vd] = ppu_flush_denormal<true, Flags...>(m, ppu_set_vnan<Flags...>(result, a, b));
}; };
RETURN_(ppu, op);
RETURN_(ppu.vr[op.vd], ppu.vr[op.vb], ppu.jm_mask);
} }
template <u32 Build, ppu_exec_bit... Flags> template <u32 Build, ppu_exec_bit... Flags>

View File

@ -1419,7 +1419,7 @@ void PPUTranslator::VRFIM(ppu_opcode_t op)
void PPUTranslator::VRFIN(ppu_opcode_t op) void PPUTranslator::VRFIN(ppu_opcode_t op)
{ {
set_vr(op.vd, vec_handle_result(call<f32[4]>(get_intrinsic<f32[4]>(Intrinsic::nearbyint), get_vr<f32[4]>(op.vb)))); set_vr(op.vd, vec_handle_result(call<f32[4]>(get_intrinsic<f32[4]>(Intrinsic::roundeven), get_vr<f32[4]>(op.vb))));
} }
void PPUTranslator::VRFIP(ppu_opcode_t op) void PPUTranslator::VRFIP(ppu_opcode_t op)

View File

@ -275,11 +275,11 @@ namespace
c.jmp(asmjit::imm_ptr(&copy_data_swap_u32_naive<Compare>)); c.jmp(asmjit::imm_ptr(&copy_data_swap_u32_naive<Compare>));
} }
#else #elif defined(ARCH_ARM64)
template <bool Compare> template <bool Compare>
constexpr auto build_copy_data_swap_u32() void build_copy_data_swap_u32(native_asm& c, native_args& args)
{ {
return &copy_data_swap_u32_naive<Compare>; c.b(asmjit::imm_ptr(&copy_data_swap_u32_naive<Compare>));
} }
#endif #endif
} }

View File

@ -14,7 +14,6 @@
#include <immintrin.h> #include <immintrin.h>
#include <emmintrin.h> #include <emmintrin.h>
#include <cmath>
#endif #endif
#if defined(ARCH_ARM64) #if defined(ARCH_ARM64)
@ -22,6 +21,7 @@
#endif #endif
#include <cmath> #include <cmath>
#include <math.h>
#include <cfenv> #include <cfenv>
namespace asmjit namespace asmjit
@ -1541,6 +1541,24 @@ inline v128 gv_avgs32(const v128& a, const v128& b)
#endif #endif
} }
inline v128 gv_divfs(const v128& a, const v128& b)
{
#if defined(ARCH_X64)
return _mm_div_ps(a, b);
#elif defined(ARCH_ARM64)
return vdivq_f32(a, b);
#endif
}
inline v128 gv_sqrtfs(const v128& a)
{
#if defined(ARCH_X64)
return _mm_sqrt_ps(a);
#elif defined(ARCH_ARM64)
return vsqrtq_f32(a);
#endif
}
inline v128 gv_fmafs(const v128& a, const v128& b, const v128& c) inline v128 gv_fmafs(const v128& a, const v128& b, const v128& c)
{ {
#if defined(ARCH_X64) && defined(__FMA__) #if defined(ARCH_X64) && defined(__FMA__)
@ -1925,6 +1943,136 @@ inline v128 gv_cvtfs_tou32(const v128& src)
#endif #endif
} }
namespace utils
{
inline f32 roundevenf32(f32 arg)
{
u32 val = std::bit_cast<u32>(arg);
u32 exp = (val >> 23) & 0xff;
u32 abs = val & 0x7fffffff;
if (exp >= 127 + 23)
{
// Big enough, NaN or INF
return arg;
}
else if (exp >= 127)
{
u32 int_pos = (127 + 23) - exp;
u32 half_pos = int_pos - 1;
u32 half_bit = 1u << half_pos;
u32 int_bit = 1u << int_pos;
if (val & (int_bit | (half_bit - 1)))
val += half_bit;
val &= ~(int_bit - 1);
}
else if (exp == 126 && abs > 0x3f000000)
{
val &= 0x80000000;
val |= 0x3f800000;
}
else
{
val &= 0x80000000;
}
return std::bit_cast<f32>(val);
}
}
#if defined(ARCH_X64)
template <uint Mode>
inline built_function<__m128(*)(__m128)> sse41_roundf("sse41_roundf", [](native_asm& c, native_args&)
{
static_assert(Mode < 4);
using namespace asmjit;
if (utils::has_avx())
c.vroundps(x86::xmm0, x86::xmm0, 8 + Mode);
else if (utils::has_sse41())
c.roundps(x86::xmm0, x86::xmm0, 8 + Mode);
else
c.jmp(+[](__m128 a) -> __m128
{
v128 r = a;
for (u32 i = 0; i < 4; i++)
if constexpr (Mode == 0)
r._f[i] = utils::roundevenf32(r._f[i]);
else if constexpr (Mode == 1)
r._f[i] = ::floorf(r._f[i]);
else if constexpr (Mode == 2)
r._f[i] = ::ceilf(r._f[i]);
else if constexpr (Mode == 3)
r._f[i] = ::truncf(r._f[i]);
return r;
});
c.ret();
});
#endif
inline v128 gv_roundfs_even(const v128& a)
{
#if defined(__SSE4_1__)
return _mm_round_ps(a, 8 + 0);
#elif defined(ARCH_ARM64)
return vrndnq_f32(a);
#elif defined(ARCH_X64)
return sse41_roundf<0>(a);
#else
v128 r;
for (u32 i = 0; i < 4; i++)
r._f[i] = utils::roundevenf32(a._f[i]);
return r;
#endif
}
inline v128 gv_roundfs_ceil(const v128& a)
{
#if defined(__SSE4_1__)
return _mm_round_ps(a, 8 + 2);
#elif defined(ARCH_ARM64)
return vrndpq_f32(a);
#elif defined(ARCH_X64)
return sse41_roundf<2>(a);
#else
v128 r;
for (u32 i = 0; i < 4; i++)
r._f[i] = ::ceilf(a._f[i]);
return r;
#endif
}
inline v128 gv_roundfs_floor(const v128& a)
{
#if defined(__SSE4_1__)
return _mm_round_ps(a, 8 + 1);
#elif defined(ARCH_ARM64)
return vrndmq_f32(a);
#elif defined(ARCH_X64)
return sse41_roundf<1>(a);
#else
v128 r;
for (u32 i = 0; i < 4; i++)
r._f[i] = ::floorf(a._f[i]);
return r;
#endif
}
inline v128 gv_roundfs_trunc(const v128& a)
{
#if defined(__SSE4_1__)
return _mm_round_ps(a, 8 + 3);
#elif defined(ARCH_ARM64)
return vrndq_f32(a);
#elif defined(ARCH_X64)
return sse41_roundf<3>(a);
#else
v128 r;
for (u32 i = 0; i < 4; i++)
r._f[i] = ::truncf(a._f[i]);
return r;
#endif
}
inline bool gv_testz(const v128& a) inline bool gv_testz(const v128& a)
{ {
#if defined(__SSE4_1__) #if defined(__SSE4_1__)