From 2f01c69710c787ebeb221bf3fd5d4eee65877b60 Mon Sep 17 00:00:00 2001 From: MrPurple666 Date: Sat, 21 Jun 2025 19:32:32 +0000 Subject: [PATCH] LRU Cache Refactor with Thread-Safety (#199) The cache is now thread-safe using std::shared_mutex, allowing concurrent reads without blocking and some other minor things to better maintenance Reviewed-on: https://git.eden-emu.dev/eden-emu/eden/pulls/199 Co-authored-by: MrPurple666 Co-committed-by: MrPurple666 --- src/core/arm/nce/lru_cache.h | 251 +++++++++++++++++++++-------------- src/core/arm/nce/patcher.cpp | 21 ++- src/core/arm/nce/patcher.h | 13 +- 3 files changed, 179 insertions(+), 106 deletions(-) diff --git a/src/core/arm/nce/lru_cache.h b/src/core/arm/nce/lru_cache.h index aacab368c4..b2d6b2f7e0 100644 --- a/src/core/arm/nce/lru_cache.h +++ b/src/core/arm/nce/lru_cache.h @@ -1,136 +1,187 @@ -#pragma once +// SPDX-FileCopyrightText: 2025 Eden Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later +#pragma once #include -#include #include +#include +#include +#include + #include "common/logging/log.h" -template +template class LRUCache { -private: - bool enabled = true; - size_t capacity; - std::list cache_list; - std::unordered_map::iterator, ValueType>> cache_map; - public: - explicit LRUCache(size_t capacity, bool enabled = true) : enabled(enabled), capacity(capacity) { - cache_map.reserve(capacity); - LOG_WARNING(Core, "LRU Cache initialized with state: {}", enabled ? "enabled" : "disabled"); + using key_type = KeyType; + using value_type = ValueType; + using size_type = std::size_t; + + struct Statistics { + size_type hits = 0; + size_type misses = 0; + void reset() noexcept { hits = misses = 0; } + }; + + explicit LRUCache(size_type capacity, bool enabled = true) + : enabled_{enabled}, capacity_{capacity} { + cache_map_.reserve(capacity_); + LOG_WARNING(Core, "LRU Cache initialised (state: {} | capacity: {})", enabled_ ? "enabled" : "disabled", capacity_); } - // Returns pointer to value if found, nullptr otherwise - ValueType* get(const KeyType& key) { - if (!enabled) return nullptr; + // Non-movable copy semantics + LRUCache(const LRUCache&) = delete; + LRUCache& operator=(const LRUCache&) = delete; + LRUCache(LRUCache&& other) noexcept { *this = std::move(other); } + LRUCache& operator=(LRUCache&& other) noexcept { + if (this == &other) return *this; + std::unique_lock this_lock(mutex_, std::defer_lock); + std::unique_lock other_lock(other.mutex_, std::defer_lock); + std::lock(this_lock, other_lock); + enabled_ = other.enabled_; + capacity_ = other.capacity_; + cache_list_ = std::move(other.cache_list_); + cache_map_ = std::move(other.cache_map_); + stats_ = other.stats_; + return *this; + } + ~LRUCache() = default; - auto it = cache_map.find(key); - if (it == cache_map.end()) { + [[nodiscard]] value_type* get(const key_type& key) { + if (!enabled_) [[unlikely]] return nullptr; + std::unique_lock lock(mutex_); + auto it = cache_map_.find(key); + if (it == cache_map_.end()) { + ++stats_.misses; return nullptr; } - - // Move the accessed item to the front of the list (most recently used) - cache_list.splice(cache_list.begin(), cache_list, it->second.first); - return &(it->second.second); + move_to_front(it); + ++stats_.hits; + return &it->second.second; } - // Returns pointer to value if found (without promoting it), nullptr otherwise - ValueType* peek(const KeyType& key) const { - if (!enabled) return nullptr; - - auto it = cache_map.find(key); - return it != cache_map.end() ? &(it->second.second) : nullptr; + [[nodiscard]] value_type* peek(const key_type& key) const { + if (!enabled_) [[unlikely]] return nullptr; + std::shared_lock lock(mutex_); + auto it = cache_map_.find(key); + return it == cache_map_.end() ? nullptr : &it->second.second; } - // Inserts or updates a key-value pair - void put(const KeyType& key, const ValueType& value) { - if (!enabled) return; + template + void put(const key_type& key, V&& value) { + if (!enabled_) [[unlikely]] return; + std::unique_lock lock(mutex_); + insert_or_update(key, std::forward(value)); + } - auto it = cache_map.find(key); - - if (it != cache_map.end()) { - // Key exists, update value and move to front - it->second.second = value; - cache_list.splice(cache_list.begin(), cache_list, it->second.first); - return; + template + value_type& get_or_emplace(const key_type& key, ValueFactory&& factory) { + std::unique_lock lock(mutex_); + auto it = cache_map_.find(key); + if (it != cache_map_.end()) { + move_to_front(it); + return it->second.second; } - - // Remove the least recently used item if cache is full - if (cache_map.size() >= capacity) { - auto last = cache_list.back(); - cache_map.erase(last); - cache_list.pop_back(); - } - - // Insert new item at the front - cache_list.push_front(key); - cache_map[key] = {cache_list.begin(), value}; + value_type new_value = factory(); + insert_or_update(key, std::move(new_value)); + return cache_map_.find(key)->second.second; } - // Enable or disable the LRU cache - void setEnabled(bool state) { - enabled = state; - LOG_WARNING(Core, "LRU Cache state changed to: {}", state ? "enabled" : "disabled"); - if (!enabled) { - clear(); - } + [[nodiscard]] bool contains(const key_type& key) const { + if (!enabled_) return false; + std::shared_lock lock(mutex_); + return cache_map_.find(key) != cache_map_.end(); } - // Check if the cache is enabled - bool isEnabled() const { - return enabled; - } - - // Attempts to get value, returns std::nullopt if not found - std::optional try_get(const KeyType& key) { - auto* val = get(key); - return val ? std::optional(*val) : std::nullopt; - } - - // Checks if key exists in cache - bool contains(const KeyType& key) const { - if (!enabled) return false; - return cache_map.find(key) != cache_map.end(); - } - - // Removes a key from the cache if it exists - bool erase(const KeyType& key) { - if (!enabled) return false; - - auto it = cache_map.find(key); - if (it == cache_map.end()) { - return false; - } - cache_list.erase(it->second.first); - cache_map.erase(it); + bool erase(const key_type& key) { + if (!enabled_) return false; + std::unique_lock lock(mutex_); + auto it = cache_map_.find(key); + if (it == cache_map_.end()) return false; + cache_list_.erase(it->second.first); + cache_map_.erase(it); return true; } - // Removes all elements from the cache void clear() { - cache_map.clear(); - cache_list.clear(); + std::unique_lock lock(mutex_); + cache_list_.clear(); + cache_map_.clear(); + stats_.reset(); } - // Returns current number of elements in cache - size_t size() const { - return enabled ? cache_map.size() : 0; + [[nodiscard]] size_type size() const { + if (!enabled_) return 0; + std::shared_lock lock(mutex_); + return cache_map_.size(); } - // Returns maximum capacity of cache - size_t get_capacity() const { - return capacity; + [[nodiscard]] size_type get_capacity() const { return capacity_; } + + void resize(size_type new_capacity) { + if (!enabled_) return; + std::unique_lock lock(mutex_); + capacity_ = new_capacity; + shrink_if_needed(); + cache_map_.reserve(capacity_); } - // Resizes the cache, evicting LRU items if new capacity is smaller - void resize(size_t new_capacity) { - if (!enabled) return; + void setEnabled(bool state) { + std::unique_lock lock(mutex_); + enabled_ = state; + LOG_WARNING(Core, "LRU Cache state changed to: {}", state ? "enabled" : "disabled"); + if (!enabled_) clear(); + } - capacity = new_capacity; - while (cache_map.size() > capacity) { - auto last = cache_list.back(); - cache_map.erase(last); - cache_list.pop_back(); + [[nodiscard]] bool isEnabled() const { return enabled_; } + + [[nodiscard]] Statistics stats() const { + std::shared_lock lock(mutex_); + return stats_; + } + +private: + using list_type = std::list; + using list_iterator = typename list_type::iterator; + using map_value_type = std::pair; + using map_type = std::unordered_map; + + template + void insert_or_update(const key_type& key, V&& value) { + auto it = cache_map_.find(key); + if (it != cache_map_.end()) { + it->second.second = std::forward(value); + move_to_front(it); + return; } - cache_map.reserve(capacity); + // evict LRU if full + if (cache_map_.size() >= capacity_) { + const auto& lru_key = cache_list_.back(); + cache_map_.erase(lru_key); + cache_list_.pop_back(); + } + cache_list_.push_front(key); + cache_map_[key] = {cache_list_.begin(), std::forward(value)}; } + + void move_to_front(typename map_type::iterator it) { + cache_list_.splice(cache_list_.begin(), cache_list_, it->second.first); + it->second.first = cache_list_.begin(); + } + + void shrink_if_needed() { + while (cache_map_.size() > capacity_) { + const auto& lru_key = cache_list_.back(); + cache_map_.erase(lru_key); + cache_list_.pop_back(); + } + } + +private: + mutable std::shared_mutex mutex_; + bool enabled_{true}; + size_type capacity_; + list_type cache_list_; + map_type cache_map_; + mutable Statistics stats_; }; \ No newline at end of file diff --git a/src/core/arm/nce/patcher.cpp b/src/core/arm/nce/patcher.cpp index f6563bb20f..6f476caa81 100644 --- a/src/core/arm/nce/patcher.cpp +++ b/src/core/arm/nce/patcher.cpp @@ -1,5 +1,5 @@ -// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later +// SPDX-FileCopyrightText: 2025 Eden Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later #include "common/arm64/native_clock.h" #include "common/bit_cast.h" @@ -14,6 +14,23 @@ namespace Core::NCE { +Patcher::Patcher(Patcher&& other) noexcept + : patch_cache(std::move(other.patch_cache)), + m_patch_instructions(std::move(other.m_patch_instructions)), + c(m_patch_instructions), + m_save_context(other.m_save_context), + m_load_context(other.m_load_context), + mode(other.mode), + total_program_size(other.total_program_size), + m_relocate_module_index(other.m_relocate_module_index), + modules(std::move(other.modules)), + curr_patch(nullptr) { + if (!modules.empty()) { + curr_patch = &modules.back(); + } +} + + using namespace Common::Literals; using namespace oaknut::util; diff --git a/src/core/arm/nce/patcher.h b/src/core/arm/nce/patcher.h index 8e6363b2d5..11c255ef82 100644 --- a/src/core/arm/nce/patcher.h +++ b/src/core/arm/nce/patcher.h @@ -1,5 +1,5 @@ -// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later +// SPDX-FileCopyrightText: 2025 Eden Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later #pragma once @@ -15,6 +15,7 @@ #include "core/hle/kernel/k_typed_address.h" #include "core/hle/kernel/physical_memory.h" #include "lru_cache.h" +#include namespace Core::NCE { @@ -30,6 +31,10 @@ using EntryTrampolines = std::unordered_map class Patcher { public: + Patcher(const Patcher&) = delete; + Patcher& operator=(const Patcher&) = delete; + Patcher(Patcher&& other) noexcept; + Patcher& operator=(Patcher&&) noexcept = delete; explicit Patcher(); ~Patcher(); @@ -62,7 +67,7 @@ private: void WriteCntpctHandler(ModuleDestLabel module_dest, oaknut::XReg dest_reg); private: - static constexpr size_t CACHE_SIZE = 4096; // Cache size for patch entries + static constexpr size_t CACHE_SIZE = 16384; // Cache size for patch entries LRUCache patch_cache{CACHE_SIZE, Settings::values.lru_cache_enabled.GetValue()}; void BranchToPatch(uintptr_t module_dest) { @@ -70,7 +75,7 @@ private: LOG_DEBUG(Core_ARM, "LRU cache lookup for address {:#x}", module_dest); // Try to get existing patch entry from cache if (auto* cached_patch = patch_cache.get(module_dest)) { - LOG_DEBUG(Core_ARM, "LRU cache hit for address {:#x}", module_dest); + LOG_WARNING(Core_ARM, "LRU cache hit for address {:#x}", module_dest); curr_patch->m_branch_to_patch_relocations.push_back({c.offset(), *cached_patch}); return; }