1
0
mirror of https://github.com/RPCS3/rpcs3.git synced 2024-11-25 12:12:50 +01:00

rsx: Refactor texture cache utils

- Also lays groundwork for optional hashed sections
This commit is contained in:
kd-11 2021-01-25 19:31:00 +03:00 committed by kd-11
parent 8d6504d6e3
commit 1bad9a939f
10 changed files with 465 additions and 395 deletions

View File

@ -4,23 +4,33 @@
namespace rpcs3
{
constexpr usz fnv_seed = 14695981039346656037ull;
constexpr usz fnv_prime = 1099511628211ull;
template<typename T>
static usz hash_base(T value)
{
return static_cast<usz>(value);
}
template<typename T, typename = std::enable_if_t<std::is_integral<T>::value, bool>>
static inline usz hash64(usz hash_value, const T data)
{
hash_value ^= data;
hash_value *= fnv_prime;
return hash_value;
}
template<typename T, typename U>
static usz hash_struct_base(const T& value)
{
// FNV 64-bit
usz result = 14695981039346656037ull;
usz result = fnv_seed;
const U *bits = reinterpret_cast<const U*>(&value);
for (usz n = 0; n < (sizeof(T) / sizeof(U)); ++n)
{
result ^= bits[n];
result *= 1099511628211ull;
result = hash64(result, bits[n]);
}
return result;

View File

@ -406,6 +406,7 @@ target_sources(rpcs3_emu PRIVATE
RSX/Common/ProgramStateCache.cpp
RSX/Common/surface_store.cpp
RSX/Common/TextureUtils.cpp
RSX/Common/texture_cache.cpp
RSX/Common/VertexProgramDecompiler.cpp
RSX/Null/NullGSRender.cpp
RSX/Overlays/overlay_animation.cpp

View File

@ -0,0 +1,213 @@
#include "stdafx.h"
#include "texture_cache_utils.h"
#include "Utilities/address_range.h"
#include "Utilities/hash.h"
namespace rsx
{
void buffered_section::init_lockable_range(const address_range& range)
{
locked_range = range.to_page_range();
AUDIT((locked_range.start == page_start(range.start)) || (locked_range.start == next_page(range.start)));
AUDIT(locked_range.end <= page_end(range.end));
ensure(locked_range.is_page_range());
}
void buffered_section::reset(const address_range& memory_range)
{
ensure(memory_range.valid() && locked == false);
cpu_range = address_range(memory_range);
confirmed_range.invalidate();
locked_range.invalidate();
protection = utils::protection::rw;
protection_strat = section_protection_strategy::lock;
locked = false;
init_lockable_range(cpu_range);
if (memory_range.length() < 4096)
{
protection_strat = section_protection_strategy::hash;
mem_hash = 0;
}
}
void buffered_section::invalidate_range()
{
ensure(!locked);
cpu_range.invalidate();
confirmed_range.invalidate();
locked_range.invalidate();
}
void buffered_section::protect(utils::protection new_prot, bool force)
{
if (new_prot == protection && !force) return;
ensure(locked_range.is_page_range());
AUDIT(!confirmed_range.valid() || confirmed_range.inside(cpu_range));
#ifdef TEXTURE_CACHE_DEBUG
if (new_prot != protection || force)
{
if (locked && !force) // When force=true, it is the responsibility of the caller to remove this section from the checker refcounting
tex_cache_checker.remove(locked_range, protection);
if (new_prot != utils::protection::rw)
tex_cache_checker.add(locked_range, new_prot);
}
#endif // TEXTURE_CACHE_DEBUG
if (new_prot == utils::protection::no)
{
// Override
protection_strat = section_protection_strategy::lock;
}
if (protection_strat == section_protection_strategy::lock)
{
rsx::memory_protect(locked_range, new_prot);
}
else if (new_prot != utils::protection::rw)
{
mem_hash = fast_hash_internal();
}
protection = new_prot;
locked = (protection != utils::protection::rw);
if (!locked)
{
// Unprotect range also invalidates secured range
confirmed_range.invalidate();
}
}
void buffered_section::protect(utils::protection prot, const std::pair<u32, u32>& new_confirm)
{
// new_confirm.first is an offset after cpu_range.start
// new_confirm.second is the length (after cpu_range.start + new_confirm.first)
#ifdef TEXTURE_CACHE_DEBUG
// We need to remove the lockable range from page_info as we will be re-protecting with force==true
if (locked)
tex_cache_checker.remove(locked_range, protection);
#endif
if (prot != utils::protection::rw)
{
if (confirmed_range.valid())
{
confirmed_range.start = std::min(confirmed_range.start, cpu_range.start + new_confirm.first);
confirmed_range.end = std::max(confirmed_range.end, cpu_range.start + new_confirm.first + new_confirm.second - 1);
}
else
{
confirmed_range = address_range::start_length(cpu_range.start + new_confirm.first, new_confirm.second);
ensure(!locked || locked_range.inside(confirmed_range.to_page_range()));
}
ensure(confirmed_range.inside(cpu_range));
init_lockable_range(confirmed_range);
}
protect(prot, true);
}
void buffered_section::unprotect()
{
AUDIT(protection != utils::protection::rw);
protect(utils::protection::rw);
}
void buffered_section::discard()
{
#ifdef TEXTURE_CACHE_DEBUG
if (locked)
tex_cache_checker.remove(locked_range, protection);
#endif
protection = utils::protection::rw;
confirmed_range.invalidate();
locked = false;
}
const address_range& buffered_section::get_bounds(section_bounds bounds) const
{
switch (bounds)
{
case section_bounds::full_range:
return cpu_range;
case section_bounds::locked_range:
return locked_range;
case section_bounds::confirmed_range:
return confirmed_range.valid() ? confirmed_range : cpu_range;
default:
fmt::throw_exception("Unreachable");
}
}
u64 buffered_section::fast_hash_internal() const
{
const auto hash_range = confirmed_range.valid() ? confirmed_range : cpu_range;
const auto hash_length = hash_range.length();
const auto cycles = hash_length / 8;
auto rem = hash_length % 8;
auto src = get_ptr<const char>(hash_range.start);
auto data64 = reinterpret_cast<const u64*>(src);
usz hash = rpcs3::fnv_seed;
for (unsigned i = 0; i < cycles; ++i)
{
hash = rpcs3::hash64(hash, data64[i]);
}
if (rem) [[unlikely]] // Data often aligned to some power of 2
{
src += hash_length - rem;
if (rem > 4)
{
hash = rpcs3::hash64(hash, *reinterpret_cast<const u32*>(src));
src += 4;
}
if (rem > 2)
{
hash = rpcs3::hash64(hash, *reinterpret_cast<const u16*>(src));
src += 2;
}
while (rem--)
{
hash = rpcs3::hash64(hash, *reinterpret_cast<const u8*>(src));
src++;
}
}
return hash;
}
bool buffered_section::is_locked(bool actual_page_flags) const
{
if (!actual_page_flags || !locked)
{
return locked;
}
return (protection_strat == section_protection_strategy::lock);
}
bool buffered_section::sync()
{
if (protection_strat == section_protection_strategy::lock || !locked)
{
return true;
}
return (fast_hash_internal() == mem_hash);
}
}

View File

@ -670,11 +670,7 @@ namespace rsx
u32 last_dirty_block = UINT32_MAX;
bool repeat_loop = false;
// Not having full-range protections means some textures will check the confirmed range and not the locked range
const bool not_full_range_protected = (buffered_section::guard_policy != protection_policy::protect_policy_full_range);
section_bounds range_it_bounds = not_full_range_protected ? confirmed_range : locked_range;
auto It = m_storage.range_begin(invalidate_range, range_it_bounds, true); // will iterate through locked sections only
auto It = m_storage.range_begin(invalidate_range, locked_range, true); // will iterate through locked sections only
while (It != m_storage.range_end())
{
const u32 base = It.get_block().get_start();
@ -690,9 +686,16 @@ namespace rsx
if (tex.cache_tag != cache_tag) //flushable sections can be 'clean' but unlocked. TODO: Handle this better
{
tex.sync_protection();
if (!tex.is_locked())
{
It++;
continue;
}
const rsx::section_bounds bounds = tex.get_overlap_test_bounds();
if (range_it_bounds == bounds || tex.overlaps(invalidate_range, bounds))
if (locked_range == bounds || tex.overlaps(invalidate_range, bounds))
{
const auto new_range = tex.get_min_max(invalidate_range, bounds).to_page_range();
AUDIT(new_range.is_page_range() && invalidate_range.inside(new_range));
@ -728,7 +731,7 @@ namespace rsx
// repeat_loop==true means some blocks are still dirty and we need to repeat the loop again
if (repeat_loop && It == m_storage.range_end())
{
It = m_storage.range_begin(invalidate_range, range_it_bounds, true);
It = m_storage.range_begin(invalidate_range, locked_range, true);
repeat_loop = false;
}
}
@ -863,7 +866,7 @@ namespace rsx
// Write if and only if no one else has trashed section memory already
// TODO: Proper section management should prevent this from happening
// TODO: Blit engine section merge support and/or partial texture memory buffering
if (tex.is_dirty() || !tex.test_memory_head() || !tex.test_memory_tail())
if (tex.is_dirty())
{
// Contents clobbered, destroy this
if (!tex.is_dirty())
@ -973,18 +976,17 @@ namespace rsx
{
std::vector<section_storage_type*> results;
for (auto It = m_storage.range_begin(test_range, full_range); It != m_storage.range_end(); It++)
for (auto It = m_storage.range_begin(test_range, full_range, check_unlocked); It != m_storage.range_end(); It++)
{
auto &tex = *It;
if constexpr (check_unlocked)
{
tex.sync_protection();
}
if (!tex.is_dirty() && (context_mask & static_cast<u32>(tex.get_context())))
{
if constexpr (check_unlocked)
{
if (!tex.is_locked())
continue;
}
if (required_pitch && !rsx::pitch_compatible<false>(&tex, required_pitch, UINT16_MAX))
{
continue;
@ -1393,6 +1395,13 @@ namespace rsx
return false;
}
void trim_sections()
{
std::lock_guard lock(m_cache_mutex);
m_storage.trim_sections();
}
image_view_type create_temporary_subresource(commandbuffer_type &cmd, deferred_subresource& desc)
{
if (!desc.do_not_cache) [[likely]]

View File

@ -10,6 +10,31 @@
namespace rsx
{
enum section_bounds
{
full_range,
locked_range,
confirmed_range
};
enum section_protection_strategy
{
lock,
hash
};
static inline void memory_protect(const address_range& range, utils::protection prot)
{
ensure(range.is_page_range());
//rsx_log.error("memory_protect(0x%x, 0x%x, %x)", static_cast<u32>(range.start), static_cast<u32>(range.length()), static_cast<u32>(prot));
utils::memory_protect(vm::base(range.start), range.length(), prot);
#ifdef TEXTURE_CACHE_DEBUG
tex_cache_checker.set_protection(range, prot);
#endif
}
/**
* List structure used in Ranged Storage Blocks
* List of Arrays
@ -603,6 +628,24 @@ namespace rsx
return any_released;
}
void trim_sections()
{
for (auto it = m_in_use.begin(); it != m_in_use.end(); it++)
{
auto* block = *it;
if (block->get_locked_count() > 256)
{
for (auto& tex : *block)
{
if (tex.is_locked() && !tex.is_locked(true))
{
tex.sync_protection();
}
}
}
}
}
/**
* Callbacks
@ -854,7 +897,157 @@ namespace rsx
};
class buffered_section
{
private:
address_range locked_range;
address_range cpu_range = {};
address_range confirmed_range;
utils::protection protection = utils::protection::rw;
section_protection_strategy protection_strat = section_protection_strategy::lock;
u64 mem_hash = 0;
bool locked = false;
void init_lockable_range(const address_range& range);
u64 fast_hash_internal() const;
public:
buffered_section() = default;
~buffered_section() = default;
void reset(const address_range& memory_range);
protected:
void invalidate_range();
public:
void protect(utils::protection new_prot, bool force = false);
void protect(utils::protection prot, const std::pair<u32, u32>& new_confirm);
void unprotect();
bool sync();
void discard();
const address_range& get_bounds(section_bounds bounds) const;
bool is_locked(bool actual_page_flags = false) const;
/**
* Overlapping checks
*/
inline bool overlaps(const u32 address, section_bounds bounds) const
{
return get_bounds(bounds).overlaps(address);
}
inline bool overlaps(const address_range& other, section_bounds bounds) const
{
return get_bounds(bounds).overlaps(other);
}
inline bool overlaps(const address_range_vector& other, section_bounds bounds) const
{
return get_bounds(bounds).overlaps(other);
}
inline bool overlaps(const buffered_section& other, section_bounds bounds) const
{
return get_bounds(bounds).overlaps(other.get_bounds(bounds));
}
inline bool inside(const address_range& other, section_bounds bounds) const
{
return get_bounds(bounds).inside(other);
}
inline bool inside(const address_range_vector& other, section_bounds bounds) const
{
return get_bounds(bounds).inside(other);
}
inline bool inside(const buffered_section& other, section_bounds bounds) const
{
return get_bounds(bounds).inside(other.get_bounds(bounds));
}
inline s32 signed_distance(const address_range& other, section_bounds bounds) const
{
return get_bounds(bounds).signed_distance(other);
}
inline u32 distance(const address_range& other, section_bounds bounds) const
{
return get_bounds(bounds).distance(other);
}
/**
* Utilities
*/
inline bool valid_range() const
{
return cpu_range.valid();
}
inline u32 get_section_base() const
{
return cpu_range.start;
}
inline u32 get_section_size() const
{
return cpu_range.valid() ? cpu_range.length() : 0;
}
inline const address_range& get_locked_range() const
{
AUDIT(locked);
return locked_range;
}
inline const address_range& get_section_range() const
{
return cpu_range;
}
const address_range& get_confirmed_range() const
{
return confirmed_range.valid() ? confirmed_range : cpu_range;
}
const std::pair<u32, u32> get_confirmed_range_delta() const
{
if (!confirmed_range.valid())
return { 0, cpu_range.length() };
return { confirmed_range.start - cpu_range.start, confirmed_range.length() };
}
inline bool matches(const address_range& range) const
{
return cpu_range.valid() && cpu_range == range;
}
inline utils::protection get_protection() const
{
return protection;
}
inline address_range get_min_max(const address_range& current_min_max, section_bounds bounds) const
{
return get_bounds(bounds).get_min_max(current_min_max);
}
/**
* Super Pointer
*/
template <typename T = void>
inline T* get_ptr(u32 address) const
{
return reinterpret_cast<T*>(vm::g_sudo_addr + address);
}
};
/**
* Cached Texture Section
@ -1287,6 +1480,15 @@ namespace rsx
flush_exclusions.clear();
}
void sync_protection()
{
if (!buffered_section::sync())
{
discard(true);
ensure(is_dirty());
}
}
/**
* Flush
@ -1522,13 +1724,7 @@ namespace rsx
rsx::section_bounds get_overlap_test_bounds() const
{
if (guard_policy == protection_policy::protect_policy_full_range)
return rsx::section_bounds::locked_range;
const bool strict_range_check = g_cfg.video.write_color_buffers || g_cfg.video.write_depth_buffer;
return (strict_range_check || get_context() == rsx::texture_upload_context::blit_engine_dst) ?
rsx::section_bounds::confirmed_range :
rsx::section_bounds::locked_range;
return rsx::section_bounds::locked_range;
}
rsx::texture_dimension_extended get_image_type() const

View File

@ -873,6 +873,8 @@ namespace gl
void on_frame_end() override
{
trim_sections();
if (m_storage.m_unreleased_texture_objects >= m_max_zombie_objects)
{
purge_unreleased_sections();

View File

@ -1098,6 +1098,8 @@ namespace vk
void on_frame_end() override
{
trim_sections();
if (m_storage.m_unreleased_texture_objects >= m_max_zombie_objects ||
m_temporary_memory_size > 0x4000000) //If already holding over 64M in discardable memory, be frugal with memory resources
{

View File

@ -20,373 +20,6 @@
namespace rsx
{
enum protection_policy
{
protect_policy_one_page, //Only guard one page, preferably one where this section 'wholly' fits
protect_policy_conservative, //Guards as much memory as possible that is guaranteed to only be covered by the defined range without sharing
protect_policy_full_range //Guard the full memory range. Shared pages may be invalidated by access outside the object we're guarding
};
enum section_bounds
{
full_range,
locked_range,
confirmed_range
};
static inline void memory_protect(const address_range& range, utils::protection prot)
{
ensure(range.is_page_range());
//rsx_log.error("memory_protect(0x%x, 0x%x, %x)", static_cast<u32>(range.start), static_cast<u32>(range.length()), static_cast<u32>(prot));
utils::memory_protect(vm::base(range.start), range.length(), prot);
#ifdef TEXTURE_CACHE_DEBUG
tex_cache_checker.set_protection(range, prot);
#endif
}
class buffered_section
{
public:
static const protection_policy guard_policy = protect_policy_full_range;
private:
address_range locked_range;
address_range cpu_range = {};
address_range confirmed_range;
utils::protection protection = utils::protection::rw;
bool locked = false;
inline void init_lockable_range(const address_range &range)
{
locked_range = range.to_page_range();
if ((guard_policy != protect_policy_full_range) && (range.length() >= 4096))
{
const u32 block_start = (locked_range.start < range.start) ? (locked_range.start + 4096u) : locked_range.start;
const u32 block_end = locked_range.end;
if (block_start < block_end)
{
// protect unique page range
locked_range.start = block_start;
locked_range.end = block_end;
}
if (guard_policy == protect_policy_one_page)
{
// protect exactly one page
locked_range.set_length(4096u);
}
}
AUDIT( (locked_range.start == page_start(range.start)) || (locked_range.start == next_page(range.start)) );
AUDIT( locked_range.end <= page_end(range.end) );
ensure(locked_range.is_page_range());
}
public:
buffered_section() = default;
~buffered_section() = default;
void reset(const address_range &memory_range)
{
ensure(memory_range.valid() && locked == false);
cpu_range = address_range(memory_range);
confirmed_range.invalidate();
locked_range.invalidate();
protection = utils::protection::rw;
locked = false;
init_lockable_range(cpu_range);
}
protected:
void invalidate_range()
{
ensure(!locked);
cpu_range.invalidate();
confirmed_range.invalidate();
locked_range.invalidate();
}
public:
void protect(utils::protection new_prot, bool force = false)
{
if (new_prot == protection && !force) return;
ensure(locked_range.is_page_range());
AUDIT( !confirmed_range.valid() || confirmed_range.inside(cpu_range) );
#ifdef TEXTURE_CACHE_DEBUG
if (new_prot != protection || force)
{
if (locked && !force) // When force=true, it is the responsibility of the caller to remove this section from the checker refcounting
tex_cache_checker.remove(locked_range, protection);
if (new_prot != utils::protection::rw)
tex_cache_checker.add(locked_range, new_prot);
}
#endif // TEXTURE_CACHE_DEBUG
rsx::memory_protect(locked_range, new_prot);
protection = new_prot;
locked = (protection != utils::protection::rw);
if (protection == utils::protection::no)
{
tag_memory();
}
else
{
if (!locked)
{
//Unprotect range also invalidates secured range
confirmed_range.invalidate();
}
}
}
void protect(utils::protection prot, const std::pair<u32, u32>& new_confirm)
{
// new_confirm.first is an offset after cpu_range.start
// new_confirm.second is the length (after cpu_range.start + new_confirm.first)
#ifdef TEXTURE_CACHE_DEBUG
// We need to remove the lockable range from page_info as we will be re-protecting with force==true
if (locked)
tex_cache_checker.remove(locked_range, protection);
#endif
if (prot != utils::protection::rw)
{
if (confirmed_range.valid())
{
confirmed_range.start = std::min(confirmed_range.start, cpu_range.start + new_confirm.first);
confirmed_range.end = std::max(confirmed_range.end, cpu_range.start + new_confirm.first + new_confirm.second - 1);
}
else
{
confirmed_range = address_range::start_length(cpu_range.start + new_confirm.first, new_confirm.second);
ensure(!locked || locked_range.inside(confirmed_range.to_page_range()));
}
ensure(confirmed_range.inside(cpu_range));
init_lockable_range(confirmed_range);
}
protect(prot, true);
}
inline void unprotect()
{
AUDIT(protection != utils::protection::rw);
protect(utils::protection::rw);
}
inline void discard()
{
#ifdef TEXTURE_CACHE_DEBUG
if (locked)
tex_cache_checker.remove(locked_range, protection);
#endif
protection = utils::protection::rw;
confirmed_range.invalidate();
locked = false;
}
inline const address_range& get_bounds(section_bounds bounds) const
{
switch (bounds)
{
case section_bounds::full_range:
return cpu_range;
case section_bounds::locked_range:
return locked_range;
case section_bounds::confirmed_range:
return confirmed_range.valid() ? confirmed_range : cpu_range;
default:
fmt::throw_exception("Unreachable");
}
}
/**
* Overlapping checks
*/
inline bool overlaps(const u32 address, section_bounds bounds) const
{
return get_bounds(bounds).overlaps(address);
}
inline bool overlaps(const address_range &other, section_bounds bounds) const
{
return get_bounds(bounds).overlaps(other);
}
inline bool overlaps(const address_range_vector &other, section_bounds bounds) const
{
return get_bounds(bounds).overlaps(other);
}
inline bool overlaps(const buffered_section &other, section_bounds bounds) const
{
return get_bounds(bounds).overlaps(other.get_bounds(bounds));
}
inline bool inside(const address_range &other, section_bounds bounds) const
{
return get_bounds(bounds).inside(other);
}
inline bool inside(const address_range_vector &other, section_bounds bounds) const
{
return get_bounds(bounds).inside(other);
}
inline bool inside(const buffered_section &other, section_bounds bounds) const
{
return get_bounds(bounds).inside(other.get_bounds(bounds));
}
inline s32 signed_distance(const address_range &other, section_bounds bounds) const
{
return get_bounds(bounds).signed_distance(other);
}
inline u32 distance(const address_range &other, section_bounds bounds) const
{
return get_bounds(bounds).distance(other);
}
/**
* Utilities
*/
inline bool valid_range() const
{
return cpu_range.valid();
}
inline bool is_locked() const
{
return locked;
}
inline u32 get_section_base() const
{
return cpu_range.start;
}
inline u32 get_section_size() const
{
return cpu_range.valid() ? cpu_range.length() : 0;
}
inline const address_range& get_locked_range() const
{
AUDIT( locked );
return locked_range;
}
inline const address_range& get_section_range() const
{
return cpu_range;
}
const address_range& get_confirmed_range() const
{
return confirmed_range.valid() ? confirmed_range : cpu_range;
}
const std::pair<u32, u32> get_confirmed_range_delta() const
{
if (!confirmed_range.valid())
return { 0, cpu_range.length() };
return { confirmed_range.start - cpu_range.start, confirmed_range.length() };
}
inline bool matches(const address_range &range) const
{
return cpu_range.valid() && cpu_range == range;
}
inline utils::protection get_protection() const
{
return protection;
}
inline address_range get_min_max(const address_range& current_min_max, section_bounds bounds) const
{
return get_bounds(bounds).get_min_max(current_min_max);
}
/**
* Super Pointer
*/
template <typename T = void>
inline T* get_ptr(u32 address) const
{
return reinterpret_cast<T*>(vm::g_sudo_addr + address);
}
/**
* Memory tagging
*/
private:
inline void tag_memory()
{
// We only need to tag memory if we are in full-range mode
if (guard_policy == protect_policy_full_range)
return;
AUDIT(locked);
const address_range& range = get_confirmed_range();
volatile u32* first = get_ptr<volatile u32>(range.start);
volatile u32* last = get_ptr<volatile u32>(range.end - 3);
*first = range.start;
*last = range.end;
}
public:
bool test_memory_head()
{
if (guard_policy == protect_policy_full_range)
return true;
AUDIT(locked);
const auto& range = get_confirmed_range();
volatile const u32* first = get_ptr<volatile const u32>(range.start);
return (*first == range.start);
}
bool test_memory_tail()
{
if (guard_policy == protect_policy_full_range)
return true;
AUDIT(locked);
const auto& range = get_confirmed_range();
volatile const u32* last = get_ptr<volatile const u32>(range.end-3);
return (*last == range.end);
}
};
template <typename pipeline_storage_type, typename backend_storage>
class shaders_cache
{

View File

@ -84,6 +84,7 @@
<ClCompile Include="Emu\Io\pad_config_types.cpp" />
<ClCompile Include="Emu\localized_string.cpp" />
<ClCompile Include="Emu\NP\rpcn_config.cpp" />
<ClCompile Include="Emu\RSX\Common\texture_cache.cpp" />
<ClCompile Include="Emu\RSX\Overlays\overlay_osk_panel.cpp" />
<ClCompile Include="Emu\RSX\Overlays\overlay_utils.cpp" />
<ClCompile Include="Emu\RSX\Overlays\Shaders\shader_loading_dialog.cpp" />

View File

@ -977,6 +977,9 @@
<ClCompile Include="Emu\RSX\RSXDisAsm.cpp">
<Filter>Emu\GPU\RSX</Filter>
</ClCompile>
<ClCompile Include="Emu\RSX\Common\texture_cache.cpp">
<Filter>Emu\GPU\RSX\Common</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="Crypto\aes.h">