[texture_cache, buffer_cache] Added TLS handling + changed command queue for GPU threading. (#3579)

(Merge of #3495 + #3108)

This PR works around to simplify math operations on hot pointers inside the access and requests to the cache of buffers and texture cache, removing previous logic of indirection and replaced by a PoD approach.

This will ensure less CPU times spended on the same request and flow directly into another chain of the render, in the same way, command queue currently uses an internal mutex that constraints the flow of data within the GPU threads, we're moving over a single command, I verified to keep using mutexes instead of internal mutex + mutex per operation, which are resolved by themselves.

In simplier words, this aims to improve performance on those games and devices where the waits for next orders on GPU commands were heavier than a single verification.

Co-Authored-by: @CamilleLaVey
Co-Authored-by: @Lizzie
Co-authored-by: CamilleLaVey <camillelavey99@gmail.com>
Reviewed-on: https://git.eden-emu.dev/eden-emu/eden/pulls/3579
Reviewed-by: CamilleLaVey <camillelavey99@gmail.com>
Co-authored-by: lizzie <lizzie@eden-emu.dev>
Co-committed-by: lizzie <lizzie@eden-emu.dev>
This commit is contained in:
lizzie 2026-02-20 00:52:07 +01:00 committed by crueter
parent 6f9d025ad2
commit c9c136bea7
No known key found for this signature in database
GPG key ID: 425ACD2D4830EBC6
12 changed files with 266 additions and 72 deletions

View file

@ -127,6 +127,11 @@ public:
void UpdatePagesCachedBatch(std::span<const std::pair<DAddr, size_t>> ranges, s32 delta);
private:
struct TranslationEntry {
DAddr guest_page{};
u8* host_ptr{};
};
// Internal helper that performs the update assuming the caller already holds the necessary lock.
void UpdatePagesCachedCountNoLock(DAddr addr, size_t size, s32 delta);
@ -195,6 +200,8 @@ private:
}
Common::VirtualBuffer<VAddr> cpu_backing_address;
std::array<TranslationEntry, 4> t_slot{};
u32 cache_cursor = 0;
using CounterType = u8;
using CounterAtomicType = std::atomic_uint8_t;
static constexpr size_t subentries = 8 / sizeof(CounterType);

View file

@ -1,4 +1,4 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
@ -247,6 +247,7 @@ void DeviceMemoryManager<Traits>::Map(DAddr address, VAddr virtual_address, size
}
impl->multi_dev_address.Register(new_dev, start_id);
}
t_slot = {};
if (track) {
TrackContinuityImpl(address, virtual_address, size, asid);
}
@ -278,6 +279,7 @@ void DeviceMemoryManager<Traits>::Unmap(DAddr address, size_t size) {
compressed_device_addr[phys_addr - 1] = new_start | MULTI_FLAG;
}
}
t_slot = {};
}
template <typename Traits>
void DeviceMemoryManager<Traits>::TrackContinuityImpl(DAddr address, VAddr virtual_address,
@ -417,6 +419,26 @@ void DeviceMemoryManager<Traits>::WalkBlock(DAddr addr, std::size_t size, auto o
template <typename Traits>
void DeviceMemoryManager<Traits>::ReadBlock(DAddr address, void* dest_pointer, size_t size) {
device_inter->FlushRegion(address, size);
const std::size_t page_offset = address & Memory::YUZU_PAGEMASK;
if (size <= Memory::YUZU_PAGESIZE - page_offset) {
const DAddr guest_page = address & ~static_cast<DAddr>(Memory::YUZU_PAGEMASK);
for (size_t i = 0; i < 4; ++i) {
if (t_slot[i].guest_page == guest_page && t_slot[i].host_ptr != nullptr) {
std::memcpy(dest_pointer, t_slot[i].host_ptr + page_offset, size);
return;
}
}
const std::size_t page_index = address >> Memory::YUZU_PAGEBITS;
const auto phys_addr = compressed_physical_ptr[page_index];
if (phys_addr != 0) {
auto* const mem_ptr = GetPointerFromRaw<u8>((PAddr(phys_addr - 1) << Memory::YUZU_PAGEBITS));
t_slot[cache_cursor % t_slot.size()] = TranslationEntry{.guest_page = guest_page, .host_ptr = mem_ptr};
cache_cursor = (cache_cursor + 1) & 3U;
std::memcpy(dest_pointer, mem_ptr + page_offset, size);
return;
}
}
WalkBlock(
address, size,
[&](size_t copy_amount, DAddr current_vaddr) {
@ -455,6 +477,26 @@ void DeviceMemoryManager<Traits>::WriteBlock(DAddr address, const void* src_poin
template <typename Traits>
void DeviceMemoryManager<Traits>::ReadBlockUnsafe(DAddr address, void* dest_pointer, size_t size) {
const std::size_t page_offset = address & Memory::YUZU_PAGEMASK;
if (size <= Memory::YUZU_PAGESIZE - page_offset) {
const DAddr guest_page = address & ~static_cast<DAddr>(Memory::YUZU_PAGEMASK);
for (size_t i = 0; i < 4; ++i) {
if (t_slot[i].guest_page == guest_page && t_slot[i].host_ptr != nullptr) {
std::memcpy(dest_pointer, t_slot[i].host_ptr + page_offset, size);
return;
}
}
const std::size_t page_index = address >> Memory::YUZU_PAGEBITS;
const auto phys_addr = compressed_physical_ptr[page_index];
if (phys_addr != 0) {
auto* const mem_ptr = GetPointerFromRaw<u8>((PAddr(phys_addr - 1) << Memory::YUZU_PAGEBITS));
t_slot[cache_cursor % t_slot.size()] = TranslationEntry{.guest_page = guest_page, .host_ptr = mem_ptr};
cache_cursor = (cache_cursor + 1) & 3U;
std::memcpy(dest_pointer, mem_ptr + page_offset, size);
return;
}
}
WalkBlock(
address, size,
[&](size_t copy_amount, DAddr current_vaddr) {