mirror of
https://git.eden-emu.dev/eden-emu/eden.git
synced 2025-07-20 08:15:46 +00:00
LRU Cache Refactor with Thread-Safety (#199)
The cache is now thread-safe using std::shared_mutex, allowing concurrent reads without blocking and some other minor things to better maintenance Reviewed-on: https://git.eden-emu.dev/eden-emu/eden/pulls/199 Co-authored-by: MrPurple666 <mrpurple666@noreply.localhost> Co-committed-by: MrPurple666 <mrpurple666@noreply.localhost>
This commit is contained in:
parent
2946cdbd2d
commit
2f01c69710
3 changed files with 179 additions and 106 deletions
|
@ -1,136 +1,187 @@
|
||||||
#pragma once
|
// SPDX-FileCopyrightText: 2025 Eden Emulator Project
|
||||||
|
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
#pragma once
|
||||||
#include <list>
|
#include <list>
|
||||||
#include <unordered_map>
|
|
||||||
#include <optional>
|
#include <optional>
|
||||||
|
#include <shared_mutex>
|
||||||
|
#include <unordered_map>
|
||||||
|
#include <utility>
|
||||||
|
|
||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
|
|
||||||
template<typename KeyType, typename ValueType>
|
template <typename KeyType, typename ValueType>
|
||||||
class LRUCache {
|
class LRUCache {
|
||||||
private:
|
|
||||||
bool enabled = true;
|
|
||||||
size_t capacity;
|
|
||||||
std::list<KeyType> cache_list;
|
|
||||||
std::unordered_map<KeyType, std::pair<typename std::list<KeyType>::iterator, ValueType>> cache_map;
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
explicit LRUCache(size_t capacity, bool enabled = true) : enabled(enabled), capacity(capacity) {
|
using key_type = KeyType;
|
||||||
cache_map.reserve(capacity);
|
using value_type = ValueType;
|
||||||
LOG_WARNING(Core, "LRU Cache initialized with state: {}", enabled ? "enabled" : "disabled");
|
using size_type = std::size_t;
|
||||||
|
|
||||||
|
struct Statistics {
|
||||||
|
size_type hits = 0;
|
||||||
|
size_type misses = 0;
|
||||||
|
void reset() noexcept { hits = misses = 0; }
|
||||||
|
};
|
||||||
|
|
||||||
|
explicit LRUCache(size_type capacity, bool enabled = true)
|
||||||
|
: enabled_{enabled}, capacity_{capacity} {
|
||||||
|
cache_map_.reserve(capacity_);
|
||||||
|
LOG_WARNING(Core, "LRU Cache initialised (state: {} | capacity: {})", enabled_ ? "enabled" : "disabled", capacity_);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns pointer to value if found, nullptr otherwise
|
// Non-movable copy semantics
|
||||||
ValueType* get(const KeyType& key) {
|
LRUCache(const LRUCache&) = delete;
|
||||||
if (!enabled) return nullptr;
|
LRUCache& operator=(const LRUCache&) = delete;
|
||||||
|
LRUCache(LRUCache&& other) noexcept { *this = std::move(other); }
|
||||||
|
LRUCache& operator=(LRUCache&& other) noexcept {
|
||||||
|
if (this == &other) return *this;
|
||||||
|
std::unique_lock this_lock(mutex_, std::defer_lock);
|
||||||
|
std::unique_lock other_lock(other.mutex_, std::defer_lock);
|
||||||
|
std::lock(this_lock, other_lock);
|
||||||
|
enabled_ = other.enabled_;
|
||||||
|
capacity_ = other.capacity_;
|
||||||
|
cache_list_ = std::move(other.cache_list_);
|
||||||
|
cache_map_ = std::move(other.cache_map_);
|
||||||
|
stats_ = other.stats_;
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
~LRUCache() = default;
|
||||||
|
|
||||||
auto it = cache_map.find(key);
|
[[nodiscard]] value_type* get(const key_type& key) {
|
||||||
if (it == cache_map.end()) {
|
if (!enabled_) [[unlikely]] return nullptr;
|
||||||
|
std::unique_lock lock(mutex_);
|
||||||
|
auto it = cache_map_.find(key);
|
||||||
|
if (it == cache_map_.end()) {
|
||||||
|
++stats_.misses;
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
move_to_front(it);
|
||||||
// Move the accessed item to the front of the list (most recently used)
|
++stats_.hits;
|
||||||
cache_list.splice(cache_list.begin(), cache_list, it->second.first);
|
return &it->second.second;
|
||||||
return &(it->second.second);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns pointer to value if found (without promoting it), nullptr otherwise
|
[[nodiscard]] value_type* peek(const key_type& key) const {
|
||||||
ValueType* peek(const KeyType& key) const {
|
if (!enabled_) [[unlikely]] return nullptr;
|
||||||
if (!enabled) return nullptr;
|
std::shared_lock lock(mutex_);
|
||||||
|
auto it = cache_map_.find(key);
|
||||||
auto it = cache_map.find(key);
|
return it == cache_map_.end() ? nullptr : &it->second.second;
|
||||||
return it != cache_map.end() ? &(it->second.second) : nullptr;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Inserts or updates a key-value pair
|
template <typename V>
|
||||||
void put(const KeyType& key, const ValueType& value) {
|
void put(const key_type& key, V&& value) {
|
||||||
if (!enabled) return;
|
if (!enabled_) [[unlikely]] return;
|
||||||
|
std::unique_lock lock(mutex_);
|
||||||
|
insert_or_update(key, std::forward<V>(value));
|
||||||
|
}
|
||||||
|
|
||||||
auto it = cache_map.find(key);
|
template <typename ValueFactory>
|
||||||
|
value_type& get_or_emplace(const key_type& key, ValueFactory&& factory) {
|
||||||
if (it != cache_map.end()) {
|
std::unique_lock lock(mutex_);
|
||||||
// Key exists, update value and move to front
|
auto it = cache_map_.find(key);
|
||||||
it->second.second = value;
|
if (it != cache_map_.end()) {
|
||||||
cache_list.splice(cache_list.begin(), cache_list, it->second.first);
|
move_to_front(it);
|
||||||
return;
|
return it->second.second;
|
||||||
}
|
}
|
||||||
|
value_type new_value = factory();
|
||||||
// Remove the least recently used item if cache is full
|
insert_or_update(key, std::move(new_value));
|
||||||
if (cache_map.size() >= capacity) {
|
return cache_map_.find(key)->second.second;
|
||||||
auto last = cache_list.back();
|
|
||||||
cache_map.erase(last);
|
|
||||||
cache_list.pop_back();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Insert new item at the front
|
|
||||||
cache_list.push_front(key);
|
|
||||||
cache_map[key] = {cache_list.begin(), value};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Enable or disable the LRU cache
|
[[nodiscard]] bool contains(const key_type& key) const {
|
||||||
void setEnabled(bool state) {
|
if (!enabled_) return false;
|
||||||
enabled = state;
|
std::shared_lock lock(mutex_);
|
||||||
LOG_WARNING(Core, "LRU Cache state changed to: {}", state ? "enabled" : "disabled");
|
return cache_map_.find(key) != cache_map_.end();
|
||||||
if (!enabled) {
|
|
||||||
clear();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if the cache is enabled
|
bool erase(const key_type& key) {
|
||||||
bool isEnabled() const {
|
if (!enabled_) return false;
|
||||||
return enabled;
|
std::unique_lock lock(mutex_);
|
||||||
}
|
auto it = cache_map_.find(key);
|
||||||
|
if (it == cache_map_.end()) return false;
|
||||||
// Attempts to get value, returns std::nullopt if not found
|
cache_list_.erase(it->second.first);
|
||||||
std::optional<ValueType> try_get(const KeyType& key) {
|
cache_map_.erase(it);
|
||||||
auto* val = get(key);
|
|
||||||
return val ? std::optional<ValueType>(*val) : std::nullopt;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Checks if key exists in cache
|
|
||||||
bool contains(const KeyType& key) const {
|
|
||||||
if (!enabled) return false;
|
|
||||||
return cache_map.find(key) != cache_map.end();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Removes a key from the cache if it exists
|
|
||||||
bool erase(const KeyType& key) {
|
|
||||||
if (!enabled) return false;
|
|
||||||
|
|
||||||
auto it = cache_map.find(key);
|
|
||||||
if (it == cache_map.end()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
cache_list.erase(it->second.first);
|
|
||||||
cache_map.erase(it);
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Removes all elements from the cache
|
|
||||||
void clear() {
|
void clear() {
|
||||||
cache_map.clear();
|
std::unique_lock lock(mutex_);
|
||||||
cache_list.clear();
|
cache_list_.clear();
|
||||||
|
cache_map_.clear();
|
||||||
|
stats_.reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns current number of elements in cache
|
[[nodiscard]] size_type size() const {
|
||||||
size_t size() const {
|
if (!enabled_) return 0;
|
||||||
return enabled ? cache_map.size() : 0;
|
std::shared_lock lock(mutex_);
|
||||||
|
return cache_map_.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns maximum capacity of cache
|
[[nodiscard]] size_type get_capacity() const { return capacity_; }
|
||||||
size_t get_capacity() const {
|
|
||||||
return capacity;
|
void resize(size_type new_capacity) {
|
||||||
|
if (!enabled_) return;
|
||||||
|
std::unique_lock lock(mutex_);
|
||||||
|
capacity_ = new_capacity;
|
||||||
|
shrink_if_needed();
|
||||||
|
cache_map_.reserve(capacity_);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Resizes the cache, evicting LRU items if new capacity is smaller
|
void setEnabled(bool state) {
|
||||||
void resize(size_t new_capacity) {
|
std::unique_lock lock(mutex_);
|
||||||
if (!enabled) return;
|
enabled_ = state;
|
||||||
|
LOG_WARNING(Core, "LRU Cache state changed to: {}", state ? "enabled" : "disabled");
|
||||||
|
if (!enabled_) clear();
|
||||||
|
}
|
||||||
|
|
||||||
capacity = new_capacity;
|
[[nodiscard]] bool isEnabled() const { return enabled_; }
|
||||||
while (cache_map.size() > capacity) {
|
|
||||||
auto last = cache_list.back();
|
[[nodiscard]] Statistics stats() const {
|
||||||
cache_map.erase(last);
|
std::shared_lock lock(mutex_);
|
||||||
cache_list.pop_back();
|
return stats_;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
using list_type = std::list<key_type>;
|
||||||
|
using list_iterator = typename list_type::iterator;
|
||||||
|
using map_value_type = std::pair<list_iterator, value_type>;
|
||||||
|
using map_type = std::unordered_map<key_type, map_value_type>;
|
||||||
|
|
||||||
|
template <typename V>
|
||||||
|
void insert_or_update(const key_type& key, V&& value) {
|
||||||
|
auto it = cache_map_.find(key);
|
||||||
|
if (it != cache_map_.end()) {
|
||||||
|
it->second.second = std::forward<V>(value);
|
||||||
|
move_to_front(it);
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
cache_map.reserve(capacity);
|
// evict LRU if full
|
||||||
|
if (cache_map_.size() >= capacity_) {
|
||||||
|
const auto& lru_key = cache_list_.back();
|
||||||
|
cache_map_.erase(lru_key);
|
||||||
|
cache_list_.pop_back();
|
||||||
|
}
|
||||||
|
cache_list_.push_front(key);
|
||||||
|
cache_map_[key] = {cache_list_.begin(), std::forward<V>(value)};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void move_to_front(typename map_type::iterator it) {
|
||||||
|
cache_list_.splice(cache_list_.begin(), cache_list_, it->second.first);
|
||||||
|
it->second.first = cache_list_.begin();
|
||||||
|
}
|
||||||
|
|
||||||
|
void shrink_if_needed() {
|
||||||
|
while (cache_map_.size() > capacity_) {
|
||||||
|
const auto& lru_key = cache_list_.back();
|
||||||
|
cache_map_.erase(lru_key);
|
||||||
|
cache_list_.pop_back();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
mutable std::shared_mutex mutex_;
|
||||||
|
bool enabled_{true};
|
||||||
|
size_type capacity_;
|
||||||
|
list_type cache_list_;
|
||||||
|
map_type cache_map_;
|
||||||
|
mutable Statistics stats_;
|
||||||
};
|
};
|
|
@ -1,5 +1,5 @@
|
||||||
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
// SPDX-FileCopyrightText: 2025 Eden Emulator Project
|
||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
#include "common/arm64/native_clock.h"
|
#include "common/arm64/native_clock.h"
|
||||||
#include "common/bit_cast.h"
|
#include "common/bit_cast.h"
|
||||||
|
@ -14,6 +14,23 @@
|
||||||
|
|
||||||
namespace Core::NCE {
|
namespace Core::NCE {
|
||||||
|
|
||||||
|
Patcher::Patcher(Patcher&& other) noexcept
|
||||||
|
: patch_cache(std::move(other.patch_cache)),
|
||||||
|
m_patch_instructions(std::move(other.m_patch_instructions)),
|
||||||
|
c(m_patch_instructions),
|
||||||
|
m_save_context(other.m_save_context),
|
||||||
|
m_load_context(other.m_load_context),
|
||||||
|
mode(other.mode),
|
||||||
|
total_program_size(other.total_program_size),
|
||||||
|
m_relocate_module_index(other.m_relocate_module_index),
|
||||||
|
modules(std::move(other.modules)),
|
||||||
|
curr_patch(nullptr) {
|
||||||
|
if (!modules.empty()) {
|
||||||
|
curr_patch = &modules.back();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
using namespace Common::Literals;
|
using namespace Common::Literals;
|
||||||
using namespace oaknut::util;
|
using namespace oaknut::util;
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
// SPDX-FileCopyrightText: 2025 Eden Emulator Project
|
||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
@ -15,6 +15,7 @@
|
||||||
#include "core/hle/kernel/k_typed_address.h"
|
#include "core/hle/kernel/k_typed_address.h"
|
||||||
#include "core/hle/kernel/physical_memory.h"
|
#include "core/hle/kernel/physical_memory.h"
|
||||||
#include "lru_cache.h"
|
#include "lru_cache.h"
|
||||||
|
#include <utility>
|
||||||
|
|
||||||
namespace Core::NCE {
|
namespace Core::NCE {
|
||||||
|
|
||||||
|
@ -30,6 +31,10 @@ using EntryTrampolines = std::unordered_map<ModuleTextAddress, PatchTextAddress>
|
||||||
|
|
||||||
class Patcher {
|
class Patcher {
|
||||||
public:
|
public:
|
||||||
|
Patcher(const Patcher&) = delete;
|
||||||
|
Patcher& operator=(const Patcher&) = delete;
|
||||||
|
Patcher(Patcher&& other) noexcept;
|
||||||
|
Patcher& operator=(Patcher&&) noexcept = delete;
|
||||||
explicit Patcher();
|
explicit Patcher();
|
||||||
~Patcher();
|
~Patcher();
|
||||||
|
|
||||||
|
@ -62,7 +67,7 @@ private:
|
||||||
void WriteCntpctHandler(ModuleDestLabel module_dest, oaknut::XReg dest_reg);
|
void WriteCntpctHandler(ModuleDestLabel module_dest, oaknut::XReg dest_reg);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static constexpr size_t CACHE_SIZE = 4096; // Cache size for patch entries
|
static constexpr size_t CACHE_SIZE = 16384; // Cache size for patch entries
|
||||||
LRUCache<uintptr_t, PatchTextAddress> patch_cache{CACHE_SIZE, Settings::values.lru_cache_enabled.GetValue()};
|
LRUCache<uintptr_t, PatchTextAddress> patch_cache{CACHE_SIZE, Settings::values.lru_cache_enabled.GetValue()};
|
||||||
|
|
||||||
void BranchToPatch(uintptr_t module_dest) {
|
void BranchToPatch(uintptr_t module_dest) {
|
||||||
|
@ -70,7 +75,7 @@ private:
|
||||||
LOG_DEBUG(Core_ARM, "LRU cache lookup for address {:#x}", module_dest);
|
LOG_DEBUG(Core_ARM, "LRU cache lookup for address {:#x}", module_dest);
|
||||||
// Try to get existing patch entry from cache
|
// Try to get existing patch entry from cache
|
||||||
if (auto* cached_patch = patch_cache.get(module_dest)) {
|
if (auto* cached_patch = patch_cache.get(module_dest)) {
|
||||||
LOG_DEBUG(Core_ARM, "LRU cache hit for address {:#x}", module_dest);
|
LOG_WARNING(Core_ARM, "LRU cache hit for address {:#x}", module_dest);
|
||||||
curr_patch->m_branch_to_patch_relocations.push_back({c.offset(), *cached_patch});
|
curr_patch->m_branch_to_patch_relocations.push_back({c.offset(), *cached_patch});
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue