Compare commits

...

5 commits

Author SHA1 Message Date
lizzie
2f04b26392 LICENSE 2026-04-01 23:41:42 +00:00
lizzie
36dee9bad1 fx 2026-04-01 23:41:42 +00:00
lizzie
fcab05e9e2 license fix 2026-04-01 23:41:42 +00:00
lizzie
6960994bf4 fx 2026-04-01 23:41:42 +00:00
lizzie
a54eadb12b [memory] nuke HeapTracker, use mprotect() for mappings instead
Signed-off-by: lizzie <lizzie@eden-emu.dev>
2026-04-01 23:41:42 +00:00
9 changed files with 58 additions and 504 deletions

View file

@ -66,8 +66,6 @@ add_library(
fs/path_util.cpp fs/path_util.cpp
fs/path_util.h fs/path_util.h
hash.h hash.h
heap_tracker.cpp
heap_tracker.h
hex_util.cpp hex_util.cpp
hex_util.h hex_util.h
host_memory.cpp host_memory.cpp

View file

@ -1,282 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <fstream>
#include "common/heap_tracker.h"
#include "common/logging.h"
#include "common/assert.h"
namespace Common {
namespace {
s64 GetMaxPermissibleResidentMapCount() {
// Default value.
s64 value = 65530;
// Try to read how many mappings we can make.
std::ifstream s("/proc/sys/vm/max_map_count");
s >> value;
// Print, for debug.
LOG_INFO(HW_Memory, "Current maximum map count: {}", value);
// Allow 20000 maps for other code and to account for split inaccuracy.
return std::max<s64>(value - 20000, 0);
}
} // namespace
HeapTracker::HeapTracker(Common::HostMemory& buffer)
: m_buffer(buffer), m_max_resident_map_count(GetMaxPermissibleResidentMapCount()) {}
HeapTracker::~HeapTracker() = default;
void HeapTracker::Map(size_t virtual_offset, size_t host_offset, size_t length,
MemoryPermission perm, bool is_separate_heap) {
// When mapping other memory, map pages immediately.
if (!is_separate_heap) {
m_buffer.Map(virtual_offset, host_offset, length, perm, false);
return;
}
{
// We are mapping part of a separate heap.
std::scoped_lock lk{m_lock};
auto* const map = new SeparateHeapMap{
.vaddr = virtual_offset,
.paddr = host_offset,
.size = length,
.tick = m_tick++,
.perm = perm,
.is_resident = false,
};
// Insert into mappings.
m_map_count++;
m_mappings.insert(*map);
}
// Finally, map.
this->DeferredMapSeparateHeap(virtual_offset);
}
void HeapTracker::Unmap(size_t virtual_offset, size_t size, bool is_separate_heap) {
// If this is a separate heap...
if (is_separate_heap) {
std::scoped_lock lk{m_lock};
const SeparateHeapMap key{
.vaddr = virtual_offset,
};
// Split at the boundaries of the region we are removing.
this->SplitHeapMapLocked(virtual_offset);
this->SplitHeapMapLocked(virtual_offset + size);
// Erase all mappings in range.
auto it = m_mappings.find(key);
while (it != m_mappings.end() && it->vaddr < virtual_offset + size) {
// Get underlying item.
auto* const item = std::addressof(*it);
// If resident, erase from resident map.
if (item->is_resident) {
ASSERT(--m_resident_map_count >= 0);
m_resident_mappings.erase(m_resident_mappings.iterator_to(*item));
}
// Erase from map.
ASSERT(--m_map_count >= 0);
it = m_mappings.erase(it);
// Free the item.
delete item;
}
}
// Unmap pages.
m_buffer.Unmap(virtual_offset, size, false);
}
void HeapTracker::Protect(size_t virtual_offset, size_t size, MemoryPermission perm) {
// Ensure no rebuild occurs while reprotecting.
std::shared_lock lk{m_rebuild_lock};
// Split at the boundaries of the region we are reprotecting.
this->SplitHeapMap(virtual_offset, size);
// Declare tracking variables.
const VAddr end = virtual_offset + size;
VAddr cur = virtual_offset;
while (cur < end) {
VAddr next = cur;
bool should_protect = false;
{
std::scoped_lock lk2{m_lock};
const SeparateHeapMap key{
.vaddr = next,
};
// Try to get the next mapping corresponding to this address.
const auto it = m_mappings.nfind(key);
if (it == m_mappings.end()) {
// There are no separate heap mappings remaining.
next = end;
should_protect = true;
} else if (it->vaddr == cur) {
// We are in range.
// Update permission bits.
it->perm = perm;
// Determine next address and whether we should protect.
next = cur + it->size;
should_protect = it->is_resident;
} else /* if (it->vaddr > cur) */ {
// We weren't in range, but there is a block coming up that will be.
next = it->vaddr;
should_protect = true;
}
}
// Clamp to end.
next = (std::min)(next, end);
// Reprotect, if we need to.
if (should_protect) {
m_buffer.Protect(cur, next - cur, perm);
}
// Advance.
cur = next;
}
}
bool HeapTracker::DeferredMapSeparateHeap(u8* fault_address) {
if (m_buffer.IsInVirtualRange(fault_address)) {
return this->DeferredMapSeparateHeap(fault_address - m_buffer.VirtualBasePointer());
}
return false;
}
bool HeapTracker::DeferredMapSeparateHeap(size_t virtual_offset) {
bool rebuild_required = false;
{
std::scoped_lock lk{m_lock};
// Check to ensure this was a non-resident separate heap mapping.
const auto it = this->GetNearestHeapMapLocked(virtual_offset);
if (it == m_mappings.end() || it->is_resident) {
return false;
}
// Update tick before possible rebuild.
it->tick = m_tick++;
// Check if we need to rebuild.
if (m_resident_map_count > m_max_resident_map_count) {
rebuild_required = true;
}
// Map the area.
m_buffer.Map(it->vaddr, it->paddr, it->size, it->perm, false);
// This map is now resident.
it->is_resident = true;
m_resident_map_count++;
m_resident_mappings.insert(*it);
}
if (rebuild_required) {
// A rebuild was required, so perform it now.
this->RebuildSeparateHeapAddressSpace();
}
return true;
}
void HeapTracker::RebuildSeparateHeapAddressSpace() {
std::scoped_lock lk{m_rebuild_lock, m_lock};
ASSERT(!m_resident_mappings.empty());
// Dump half of the mappings.
//
// Despite being worse in theory, this has proven to be better in practice than more
// regularly dumping a smaller amount, because it significantly reduces average case
// lock contention.
std::size_t const desired_count = (std::min)(m_resident_map_count, m_max_resident_map_count) / 2;
std::size_t const evict_count = m_resident_map_count - desired_count;
auto it = m_resident_mappings.begin();
for (size_t i = 0; i < evict_count && it != m_resident_mappings.end(); i++) {
// Unmark and unmap.
it->is_resident = false;
m_buffer.Unmap(it->vaddr, it->size, false);
// Advance.
ASSERT(--m_resident_map_count >= 0);
it = m_resident_mappings.erase(it);
}
}
void HeapTracker::SplitHeapMap(VAddr offset, size_t size) {
std::scoped_lock lk{m_lock};
this->SplitHeapMapLocked(offset);
this->SplitHeapMapLocked(offset + size);
}
void HeapTracker::SplitHeapMapLocked(VAddr offset) {
const auto it = this->GetNearestHeapMapLocked(offset);
if (it == m_mappings.end() || it->vaddr == offset) {
// Not contained or no split required.
return;
}
// Cache the original values.
auto* const left = std::addressof(*it);
const size_t orig_size = left->size;
// Adjust the left map.
const size_t left_size = offset - left->vaddr;
left->size = left_size;
// Create the new right map.
auto* const right = new SeparateHeapMap{
.vaddr = left->vaddr + left_size,
.paddr = left->paddr + left_size,
.size = orig_size - left_size,
.tick = left->tick,
.perm = left->perm,
.is_resident = left->is_resident,
};
// Insert the new right map.
m_map_count++;
m_mappings.insert(*right);
// If resident, also insert into resident map.
if (right->is_resident) {
m_resident_map_count++;
m_resident_mappings.insert(*right);
}
}
HeapTracker::AddrTree::iterator HeapTracker::GetNearestHeapMapLocked(VAddr offset) {
const SeparateHeapMap key{
.vaddr = offset,
};
return m_mappings.find(key);
}
} // namespace Common

View file

@ -1,98 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <atomic>
#include <mutex>
#include <set>
#include <shared_mutex>
#include "common/host_memory.h"
#include "common/intrusive_red_black_tree.h"
namespace Common {
struct SeparateHeapMap {
Common::IntrusiveRedBlackTreeNode addr_node{};
Common::IntrusiveRedBlackTreeNode tick_node{};
VAddr vaddr{};
PAddr paddr{};
size_t size{};
size_t tick{};
MemoryPermission perm{};
bool is_resident{};
};
struct SeparateHeapMapAddrComparator {
static constexpr int Compare(const SeparateHeapMap& lhs, const SeparateHeapMap& rhs) {
if (lhs.vaddr < rhs.vaddr) {
return -1;
} else if (lhs.vaddr <= (rhs.vaddr + rhs.size - 1)) {
return 0;
} else {
return 1;
}
}
};
struct SeparateHeapMapTickComparator {
static constexpr int Compare(const SeparateHeapMap& lhs, const SeparateHeapMap& rhs) {
if (lhs.tick < rhs.tick) {
return -1;
} else if (lhs.tick > rhs.tick) {
return 1;
} else {
return SeparateHeapMapAddrComparator::Compare(lhs, rhs);
}
}
};
class HeapTracker {
public:
explicit HeapTracker(Common::HostMemory& buffer);
~HeapTracker();
void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perm,
bool is_separate_heap);
void Unmap(size_t virtual_offset, size_t size, bool is_separate_heap);
void Protect(size_t virtual_offset, size_t length, MemoryPermission perm);
u8* VirtualBasePointer() {
return m_buffer.VirtualBasePointer();
}
bool DeferredMapSeparateHeap(u8* fault_address);
bool DeferredMapSeparateHeap(size_t virtual_offset);
private:
using AddrTreeTraits =
Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&SeparateHeapMap::addr_node>;
using AddrTree = AddrTreeTraits::TreeType<SeparateHeapMapAddrComparator>;
using TickTreeTraits =
Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&SeparateHeapMap::tick_node>;
using TickTree = TickTreeTraits::TreeType<SeparateHeapMapTickComparator>;
AddrTree m_mappings{};
TickTree m_resident_mappings{};
private:
void SplitHeapMap(VAddr offset, size_t size);
void SplitHeapMapLocked(VAddr offset);
AddrTree::iterator GetNearestHeapMapLocked(VAddr offset);
void RebuildSeparateHeapAddressSpace();
private:
Common::HostMemory& m_buffer;
const s64 m_max_resident_map_count;
std::shared_mutex m_rebuild_lock{};
std::mutex m_lock{};
s64 m_map_count{};
s64 m_resident_map_count{};
size_t m_tick{};
};
} // namespace Common

View file

@ -572,9 +572,8 @@ public:
if (True(perms & MemoryPermission::Execute)) if (True(perms & MemoryPermission::Execute))
prot_flags |= PROT_EXEC; prot_flags |= PROT_EXEC;
#endif #endif
int flags = (fd >= 0 ? MAP_SHARED : MAP_PRIVATE) | MAP_FIXED; int ret = mprotect(virtual_base + virtual_offset, length, prot_flags);
void* ret = mmap(virtual_base + virtual_offset, length, prot_flags, flags, fd, host_offset); ASSERT_MSG(ret == 0, "mprotect: {} {}", strerror(errno), fd);
ASSERT_MSG(ret != MAP_FAILED, "mmap: {} {}", strerror(errno), fd);
} }
void Unmap(size_t virtual_offset, size_t length) { void Unmap(size_t virtual_offset, size_t length) {
@ -588,8 +587,8 @@ public:
auto [merged_pointer, merged_size] = auto [merged_pointer, merged_size] =
free_manager.FreeBlock(virtual_base + virtual_offset, length); free_manager.FreeBlock(virtual_base + virtual_offset, length);
void* ret = mmap(merged_pointer, merged_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0); int ret = mprotect(merged_pointer, merged_size, PROT_NONE);
ASSERT_MSG(ret != MAP_FAILED, "mmap: {}", strerror(errno)); ASSERT_MSG(ret == 0, "mmap: {}", strerror(errno));
} }
void Protect(size_t virtual_offset, size_t length, bool read, bool write, bool execute) { void Protect(size_t virtual_offset, size_t length, bool read, bool write, bool execute) {

View file

@ -13,13 +13,13 @@
namespace Common { namespace Common {
template <typename BaseAddr> template <typename BaseAddr>
MultiLevelPageTable<BaseAddr>::MultiLevelPageTable(std::size_t address_space_bits_, MultiLevelPageTable<BaseAddr>::MultiLevelPageTable(std::size_t address_space_bits_, std::size_t first_level_bits_, std::size_t page_bits_)
std::size_t first_level_bits_, : address_space_bits{address_space_bits_}
std::size_t page_bits_) , first_level_bits{first_level_bits_}
: address_space_bits{address_space_bits_}, , page_bits{page_bits_}
first_level_bits{first_level_bits_}, page_bits{page_bits_} { {
if (page_bits == 0) { if (page_bits == 0) {
return; return;
} }
first_level_shift = address_space_bits - first_level_bits; first_level_shift = address_space_bits - first_level_bits;
first_level_chunk_size = (1ULL << (first_level_shift - page_bits)) * sizeof(BaseAddr); first_level_chunk_size = (1ULL << (first_level_shift - page_bits)) * sizeof(BaseAddr);
@ -30,12 +30,9 @@ MultiLevelPageTable<BaseAddr>::MultiLevelPageTable(std::size_t address_space_bit
void* base{VirtualAlloc(nullptr, alloc_size, MEM_RESERVE, PAGE_READWRITE)}; void* base{VirtualAlloc(nullptr, alloc_size, MEM_RESERVE, PAGE_READWRITE)};
#else #else
void* base{mmap(nullptr, alloc_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)}; void* base{mmap(nullptr, alloc_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)};
if (base == MAP_FAILED)
if (base == MAP_FAILED) {
base = nullptr; base = nullptr;
}
#endif #endif
ASSERT(base); ASSERT(base);
base_ptr = reinterpret_cast<BaseAddr*>(base); base_ptr = reinterpret_cast<BaseAddr*>(base);
} }
@ -56,29 +53,21 @@ template <typename BaseAddr>
void MultiLevelPageTable<BaseAddr>::ReserveRange(u64 start, std::size_t size) { void MultiLevelPageTable<BaseAddr>::ReserveRange(u64 start, std::size_t size) {
const u64 new_start = start >> first_level_shift; const u64 new_start = start >> first_level_shift;
const u64 new_end = (start + size) >> first_level_shift; const u64 new_end = (start + size) >> first_level_shift;
for (u64 i = new_start; i <= new_end; i++) { for (u64 i = new_start; i <= new_end; i++)
if (!first_level_map[i]) { if (!first_level_map[i])
AllocateLevel(i); AllocateLevel(i);
}
}
} }
template <typename BaseAddr> template <typename BaseAddr>
void MultiLevelPageTable<BaseAddr>::AllocateLevel(u64 level) { void MultiLevelPageTable<BaseAddr>::AllocateLevel(u64 index) {
void* ptr = reinterpret_cast<char *>(base_ptr) + level * first_level_chunk_size; void* ptr = reinterpret_cast<char *>(base_ptr) + index * first_level_chunk_size;
#ifdef _WIN32 #ifdef _WIN32
void* base{VirtualAlloc(ptr, first_level_chunk_size, MEM_COMMIT, PAGE_READWRITE)}; void* base = VirtualAlloc(ptr, first_level_chunk_size, MEM_COMMIT, PAGE_READWRITE);
#else
void* base{mmap(ptr, first_level_chunk_size, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0)};
if (base == MAP_FAILED) {
base = nullptr;
}
#endif
ASSERT(base); ASSERT(base);
#else
first_level_map[level] = base; void* base = ptr;
#endif
first_level_map[index] = base;
} }
} // namespace Common } // namespace Common

View file

@ -16,7 +16,6 @@
#include "common/assert.h" #include "common/assert.h"
#include "common/atomic_ops.h" #include "common/atomic_ops.h"
#include "common/common_types.h" #include "common/common_types.h"
#include "common/heap_tracker.h"
#include "common/logging.h" #include "common/logging.h"
#include "common/page_table.h" #include "common/page_table.h"
#include "common/scope_exit.h" #include "common/scope_exit.h"
@ -55,37 +54,24 @@ struct Memory::Impl {
} else { } else {
current_page_table->fastmem_arena = nullptr; current_page_table->fastmem_arena = nullptr;
} }
#ifdef __ANDROID__
heap_tracker.emplace(system.DeviceMemory().buffer);
buffer = std::addressof(*heap_tracker);
#else
buffer = std::addressof(system.DeviceMemory().buffer); buffer = std::addressof(system.DeviceMemory().buffer);
#endif
} }
void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, Common::PhysicalAddress target, Common::MemoryPermission perms, bool separate_heap) {
Common::PhysicalAddress target, Common::MemoryPermission perms,
bool separate_heap) {
ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size); ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base)); ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base));
ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}", ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}", GetInteger(target));
GetInteger(target)); MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, target, Common::PageType::Memory);
MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, target,
Common::PageType::Memory);
if (current_page_table->fastmem_arena) { if (current_page_table->fastmem_arena) {
buffer->Map(GetInteger(base), GetInteger(target) - DramMemoryMap::Base, size, perms, buffer->Map(GetInteger(base), GetInteger(target) - DramMemoryMap::Base, size, perms, separate_heap);
separate_heap);
} }
} }
void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, bool separate_heap) {
bool separate_heap) {
ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size); ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base)); ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base));
MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, 0, MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, 0, Common::PageType::Unmapped);
Common::PageType::Unmapped);
if (current_page_table->fastmem_arena) { if (current_page_table->fastmem_arena) {
buffer->Unmap(GetInteger(base), size, separate_heap); buffer->Unmap(GetInteger(base), size, separate_heap);
@ -857,12 +843,7 @@ struct Memory::Impl {
std::array<Common::ScratchBuffer<u32>, Core::Hardware::NUM_CPU_CORES> scratch_buffers{}; std::array<Common::ScratchBuffer<u32>, Core::Hardware::NUM_CPU_CORES> scratch_buffers{};
std::span<Core::GPUDirtyMemoryManager> gpu_dirty_managers; std::span<Core::GPUDirtyMemoryManager> gpu_dirty_managers;
std::mutex sys_core_guard; std::mutex sys_core_guard;
#ifdef __ANDROID__
std::optional<Common::HeapTracker> heap_tracker;
Common::HeapTracker* buffer{};
#else
Common::HostMemory* buffer{}; Common::HostMemory* buffer{};
#endif
}; };
Memory::Memory(Core::System& system_) : system{system_} { Memory::Memory(Core::System& system_) : system{system_} {
@ -1055,30 +1036,14 @@ bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) {
u8* const ptr = impl->GetPointerImpl( u8* const ptr = impl->GetPointerImpl(
GetInteger(vaddr), GetInteger(vaddr),
[&] { [&] {
LOG_ERROR(HW_Memory, "Unmapped InvalidateNCE for {} bytes @ {:#x}", size, LOG_ERROR(HW_Memory, "Unmapped InvalidateNCE for {} bytes @ {:#x}", size, GetInteger(vaddr));
GetInteger(vaddr));
mapped = false; mapped = false;
}, },
[&] { rasterizer = true; }); [&] { rasterizer = true; });
if (rasterizer) { if (rasterizer) {
impl->InvalidateGPUMemory(ptr, size); impl->InvalidateGPUMemory(ptr, size);
} }
#ifdef __ANDROID__
if (!rasterizer && mapped) {
impl->buffer->DeferredMapSeparateHeap(GetInteger(vaddr));
}
#endif
return mapped && ptr != nullptr; return mapped && ptr != nullptr;
} }
bool Memory::InvalidateSeparateHeap(void* fault_address) {
#ifdef __ANDROID__
return impl->buffer->DeferredMapSeparateHeap(static_cast<u8*>(fault_address));
#else
return false;
#endif
}
} // namespace Core::Memory } // namespace Core::Memory

View file

@ -1,4 +1,4 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project // SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later // SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: 2014 Citra Emulator Project // SPDX-FileCopyrightText: 2014 Citra Emulator Project
@ -490,13 +490,8 @@ public:
* marked as debug or non-debug. * marked as debug or non-debug.
*/ */
void MarkRegionDebug(Common::ProcessAddress vaddr, u64 size, bool debug); void MarkRegionDebug(Common::ProcessAddress vaddr, u64 size, bool debug);
void SetGPUDirtyManagers(std::span<Core::GPUDirtyMemoryManager> managers); void SetGPUDirtyManagers(std::span<Core::GPUDirtyMemoryManager> managers);
bool InvalidateNCE(Common::ProcessAddress vaddr, size_t size); bool InvalidateNCE(Common::ProcessAddress vaddr, size_t size);
bool InvalidateSeparateHeap(void* fault_address);
private: private:
Core::System& system; Core::System& system;

View file

@ -82,8 +82,6 @@ private:
std::thread thread; std::thread thread;
mach_port_t server_port; mach_port_t server_port;
void MessagePump();
}; };
MachHandler::MachHandler() { MachHandler::MachHandler() {
@ -97,7 +95,30 @@ MachHandler::MachHandler() {
KCHECK(mach_port_request_notification(mach_task_self(), server_port, MACH_NOTIFY_PORT_DESTROYED, 0, server_port, MACH_MSG_TYPE_MAKE_SEND_ONCE, &prev)); KCHECK(mach_port_request_notification(mach_task_self(), server_port, MACH_NOTIFY_PORT_DESTROYED, 0, server_port, MACH_MSG_TYPE_MAKE_SEND_ONCE, &prev));
#undef KCHECK #undef KCHECK
thread = std::thread(&MachHandler::MessagePump, this); thread = std::thread([this] {
mach_msg_return_t mr;
MachMessage request;
MachMessage reply;
while (true) {
mr = mach_msg(&request.head, MACH_RCV_MSG | MACH_RCV_LARGE, 0, sizeof(request), server_port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
if (mr != MACH_MSG_SUCCESS) {
fmt::print(stderr, "dynarmic: macOS MachHandler: Failed to receive mach message. error: {:#08x} ({})\n", mr, mach_error_string(mr));
return;
}
if (!mach_exc_server(&request.head, &reply.head)) {
fmt::print(stderr, "dynarmic: macOS MachHandler: Unexpected mach message\n");
return;
}
mr = mach_msg(&reply.head, MACH_SEND_MSG, reply.head.msgh_size, 0, MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
if (mr != MACH_MSG_SUCCESS) {
fmt::print(stderr, "dynarmic: macOS MachHandler: Failed to send mach message. error: {:#08x} ({})\n", mr, mach_error_string(mr));
return;
}
}
});
thread.detach(); thread.detach();
} }
@ -105,31 +126,6 @@ MachHandler::~MachHandler() {
mach_port_deallocate(mach_task_self(), server_port); mach_port_deallocate(mach_task_self(), server_port);
} }
void MachHandler::MessagePump() {
mach_msg_return_t mr;
MachMessage request;
MachMessage reply;
while (true) {
mr = mach_msg(&request.head, MACH_RCV_MSG | MACH_RCV_LARGE, 0, sizeof(request), server_port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
if (mr != MACH_MSG_SUCCESS) {
fmt::print(stderr, "dynarmic: macOS MachHandler: Failed to receive mach message. error: {:#08x} ({})\n", mr, mach_error_string(mr));
return;
}
if (!mach_exc_server(&request.head, &reply.head)) {
fmt::print(stderr, "dynarmic: macOS MachHandler: Unexpected mach message\n");
return;
}
mr = mach_msg(&reply.head, MACH_SEND_MSG, reply.head.msgh_size, 0, MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
if (mr != MACH_MSG_SUCCESS) {
fmt::print(stderr, "dynarmic: macOS MachHandler: Failed to send mach message. error: {:#08x} ({})\n", mr, mach_error_string(mr));
return;
}
}
}
#if defined(ARCHITECTURE_x86_64) #if defined(ARCHITECTURE_x86_64)
kern_return_t MachHandler::HandleRequest(x86_thread_state64_t* ts) { kern_return_t MachHandler::HandleRequest(x86_thread_state64_t* ts) {
std::lock_guard<std::mutex> guard(code_block_infos_mutex); std::lock_guard<std::mutex> guard(code_block_infos_mutex);

View file

@ -1,4 +1,4 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project // SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later // SPDX-License-Identifier: GPL-3.0-or-later
/* This file is part of the dynarmic project. /* This file is part of the dynarmic project.
@ -46,22 +46,18 @@ class SigHandler {
}); });
} }
static void SigAction(int sig, siginfo_t* info, void* raw_context); static void SigAction(int sig, siginfo_t* info, void* raw_context);
std::vector<u8> signal_stack_memory;
bool supports_fast_mem = true;
void* signal_stack_memory = nullptr;
ankerl::unordered_dense::map<u64, CodeBlockInfo> code_block_infos; ankerl::unordered_dense::map<u64, CodeBlockInfo> code_block_infos;
std::shared_mutex code_block_infos_mutex; std::shared_mutex code_block_infos_mutex;
struct sigaction old_sa_segv; struct sigaction old_sa_segv;
struct sigaction old_sa_bus; struct sigaction old_sa_bus;
std::size_t signal_stack_size; bool supports_fast_mem = true;
public: public:
SigHandler() noexcept { SigHandler() noexcept {
signal_stack_size = std::max<size_t>(SIGSTKSZ, 2 * 1024 * 1024); signal_stack_memory.resize(std::max<std::size_t>(SIGSTKSZ, 2 * 1024 * 1024), 0);
signal_stack_memory = mmap(nullptr, signal_stack_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
stack_t signal_stack{}; stack_t signal_stack{};
signal_stack.ss_sp = signal_stack_memory; signal_stack.ss_sp = signal_stack_memory.data();
signal_stack.ss_size = signal_stack_size; signal_stack.ss_size = signal_stack_memory.size();
signal_stack.ss_flags = 0; signal_stack.ss_flags = 0;
if (sigaltstack(&signal_stack, nullptr) != 0) { if (sigaltstack(&signal_stack, nullptr) != 0) {
fmt::print(stderr, "dynarmic: POSIX SigHandler: init failure at sigaltstack\n"); fmt::print(stderr, "dynarmic: POSIX SigHandler: init failure at sigaltstack\n");
@ -88,10 +84,6 @@ public:
#endif #endif
} }
~SigHandler() noexcept {
munmap(signal_stack_memory, signal_stack_size);
}
void AddCodeBlock(u64 offset, CodeBlockInfo cbi) noexcept { void AddCodeBlock(u64 offset, CodeBlockInfo cbi) noexcept {
std::unique_lock guard(code_block_infos_mutex); std::unique_lock guard(code_block_infos_mutex);
code_block_infos.insert_or_assign(offset, cbi); code_block_infos.insert_or_assign(offset, cbi);