[common, core] remove uneeded memory indirection overhead at startup (#3306)

for core stuff:
just remove unique ptrs that dont need any pointer stability at all (afterall its an allocation within an allocation so yeah)

for fibers:
Main reasoning behind this is because virtualBuffer<> is stupidly fucking expensive and it also clutters my fstat view
ALSO mmap is a syscall, syscalls are bad for performance or whatever
ALSO std::vector<> is better suited for handling this kind of "fixed size thing where its like big but not THAT big" (512 KiB isn't going to kill your memory usage for each fiber...)

for core.cpp stuff
- inlines stuff into std::optional<> as opposed to std::unique_ptr<> (because yknow, we are making the Impl from an unique_ptr, allocating within an allocation is unnecessary)
- reorganizes the structures a bit so padding doesnt screw us up (it's not perfect but eh saves a measly 44 bytes)
- removes unused/dead code
- uses std::vector<> instead of std::deque<>

no perf impact expected, maybe some initialisation boost but very minimal impact nonethless
lto gets rid of most calls anyways - the heavy issue is with shared_ptr and the cache coherency from the atomics... but i clumped them together because well, they kinda do not suffer from cache coherency - hopefully not a mistake

this balloons the size of Impl to about 1.67 MB - which is fine because we throw it in the stack anyways

REST OF INTERFACES: most of them ballooned in size as well, but overhead is ok since its an allocation within an alloc, no stack is used (when it comes to storing these i mean)

Signed-off-by: lizzie lizzie@eden-emu.dev
Reviewed-on: https://git.eden-emu.dev/eden-emu/eden/pulls/3306
Reviewed-by: CamilleLaVey <camillelavey99@gmail.com>
Reviewed-by: MaranBr <maranbr@eden-emu.dev>
Co-authored-by: lizzie <lizzie@eden-emu.dev>
Co-committed-by: lizzie <lizzie@eden-emu.dev>
This commit is contained in:
lizzie 2026-01-16 23:39:16 +01:00 committed by crueter
parent 5768600c8b
commit 83a28dc251
No known key found for this signature in database
GPG key ID: 425ACD2D4830EBC6
40 changed files with 2602 additions and 2963 deletions

View file

@ -16,170 +16,160 @@ namespace Core {
using namespace Common::Literals;
class DynarmicCallbacks32 : public Dynarmic::A32::UserCallbacks {
public:
explicit DynarmicCallbacks32(ArmDynarmic32& parent, Kernel::KProcess* process)
: m_parent{parent}, m_memory(process->GetMemory()),
m_process(process), m_debugger_enabled{parent.m_system.DebuggerEnabled()},
m_check_memory_access{m_debugger_enabled ||
!Settings::values.cpuopt_ignore_memory_aborts.GetValue()} {}
DynarmicCallbacks32::DynarmicCallbacks32(ArmDynarmic32& parent, Kernel::KProcess* process)
: m_parent{parent}, m_memory(process->GetMemory())
, m_process(process), m_debugger_enabled{parent.m_system.DebuggerEnabled()}
, m_check_memory_access{m_debugger_enabled || !Settings::values.cpuopt_ignore_memory_aborts.GetValue()}
{}
u8 MemoryRead8(u32 vaddr) override {
CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Read);
return m_memory.Read8(vaddr);
}
u16 MemoryRead16(u32 vaddr) override {
CheckMemoryAccess(vaddr, 2, Kernel::DebugWatchpointType::Read);
return m_memory.Read16(vaddr);
}
u32 MemoryRead32(u32 vaddr) override {
CheckMemoryAccess(vaddr, 4, Kernel::DebugWatchpointType::Read);
return m_memory.Read32(vaddr);
}
u64 MemoryRead64(u32 vaddr) override {
CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Read);
return m_memory.Read64(vaddr);
}
std::optional<u32> MemoryReadCode(u32 vaddr) override {
if (!m_memory.IsValidVirtualAddressRange(vaddr, sizeof(u32))) {
return std::nullopt;
}
return m_memory.Read32(vaddr);
}
u8 DynarmicCallbacks32::MemoryRead8(u32 vaddr) {
CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Read);
return m_memory.Read8(vaddr);
}
u16 DynarmicCallbacks32::MemoryRead16(u32 vaddr) {
CheckMemoryAccess(vaddr, 2, Kernel::DebugWatchpointType::Read);
return m_memory.Read16(vaddr);
}
u32 DynarmicCallbacks32::MemoryRead32(u32 vaddr) {
CheckMemoryAccess(vaddr, 4, Kernel::DebugWatchpointType::Read);
return m_memory.Read32(vaddr);
}
u64 DynarmicCallbacks32::MemoryRead64(u32 vaddr) {
CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Read);
return m_memory.Read64(vaddr);
}
std::optional<u32> DynarmicCallbacks32::MemoryReadCode(u32 vaddr) {
if (!m_memory.IsValidVirtualAddressRange(vaddr, sizeof(u32)))
return std::nullopt;
return m_memory.Read32(vaddr);
}
void MemoryWrite8(u32 vaddr, u8 value) override {
if (CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Write)) {
m_memory.Write8(vaddr, value);
}
void DynarmicCallbacks32::MemoryWrite8(u32 vaddr, u8 value) {
if (CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Write)) {
m_memory.Write8(vaddr, value);
}
void MemoryWrite16(u32 vaddr, u16 value) override {
if (CheckMemoryAccess(vaddr, 2, Kernel::DebugWatchpointType::Write)) {
m_memory.Write16(vaddr, value);
}
}
void DynarmicCallbacks32::MemoryWrite16(u32 vaddr, u16 value) {
if (CheckMemoryAccess(vaddr, 2, Kernel::DebugWatchpointType::Write)) {
m_memory.Write16(vaddr, value);
}
void MemoryWrite32(u32 vaddr, u32 value) override {
if (CheckMemoryAccess(vaddr, 4, Kernel::DebugWatchpointType::Write)) {
m_memory.Write32(vaddr, value);
}
}
void DynarmicCallbacks32::MemoryWrite32(u32 vaddr, u32 value) {
if (CheckMemoryAccess(vaddr, 4, Kernel::DebugWatchpointType::Write)) {
m_memory.Write32(vaddr, value);
}
void MemoryWrite64(u32 vaddr, u64 value) override {
if (CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Write)) {
m_memory.Write64(vaddr, value);
}
}
void DynarmicCallbacks32::MemoryWrite64(u32 vaddr, u64 value) {
if (CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Write)) {
m_memory.Write64(vaddr, value);
}
}
bool MemoryWriteExclusive8(u32 vaddr, u8 value, u8 expected) override {
return CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Write) &&
m_memory.WriteExclusive8(vaddr, value, expected);
}
bool MemoryWriteExclusive16(u32 vaddr, u16 value, u16 expected) override {
return CheckMemoryAccess(vaddr, 2, Kernel::DebugWatchpointType::Write) &&
m_memory.WriteExclusive16(vaddr, value, expected);
}
bool MemoryWriteExclusive32(u32 vaddr, u32 value, u32 expected) override {
return CheckMemoryAccess(vaddr, 4, Kernel::DebugWatchpointType::Write) &&
m_memory.WriteExclusive32(vaddr, value, expected);
}
bool MemoryWriteExclusive64(u32 vaddr, u64 value, u64 expected) override {
return CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Write) &&
m_memory.WriteExclusive64(vaddr, value, expected);
}
bool DynarmicCallbacks32::MemoryWriteExclusive8(u32 vaddr, u8 value, u8 expected) {
return CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Write) &&
m_memory.WriteExclusive8(vaddr, value, expected);
}
bool DynarmicCallbacks32::MemoryWriteExclusive16(u32 vaddr, u16 value, u16 expected) {
return CheckMemoryAccess(vaddr, 2, Kernel::DebugWatchpointType::Write) &&
m_memory.WriteExclusive16(vaddr, value, expected);
}
bool DynarmicCallbacks32::MemoryWriteExclusive32(u32 vaddr, u32 value, u32 expected) {
return CheckMemoryAccess(vaddr, 4, Kernel::DebugWatchpointType::Write) &&
m_memory.WriteExclusive32(vaddr, value, expected);
}
bool DynarmicCallbacks32::MemoryWriteExclusive64(u32 vaddr, u64 value, u64 expected) {
return CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Write) &&
m_memory.WriteExclusive64(vaddr, value, expected);
}
void InterpreterFallback(u32 pc, std::size_t num_instructions) override {
m_parent.LogBacktrace(m_process);
LOG_ERROR(Core_ARM,
"Unimplemented instruction @ {:#X} for {} instructions (instr = {:08X})", pc,
num_instructions, m_memory.Read32(pc));
}
void DynarmicCallbacks32::InterpreterFallback(u32 pc, std::size_t num_instructions) {
m_parent.LogBacktrace(m_process);
LOG_ERROR(Core_ARM,
"Unimplemented instruction @ {:#X} for {} instructions (instr = {:08X})", pc,
num_instructions, m_memory.Read32(pc));
}
void ExceptionRaised(u32 pc, Dynarmic::A32::Exception exception) override {
switch (exception) {
case Dynarmic::A32::Exception::NoExecuteFault:
LOG_CRITICAL(Core_ARM, "Cannot execute instruction at unmapped address {:#08x}", pc);
ReturnException(pc, PrefetchAbort);
void DynarmicCallbacks32::ExceptionRaised(u32 pc, Dynarmic::A32::Exception exception) {
switch (exception) {
case Dynarmic::A32::Exception::NoExecuteFault:
LOG_CRITICAL(Core_ARM, "Cannot execute instruction at unmapped address {:#08x}", pc);
ReturnException(pc, PrefetchAbort);
return;
default:
if (m_debugger_enabled) {
ReturnException(pc, InstructionBreakpoint);
return;
default:
if (m_debugger_enabled) {
ReturnException(pc, InstructionBreakpoint);
return;
}
m_parent.LogBacktrace(m_process);
LOG_CRITICAL(Core_ARM,
"ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X}, thumb = {})",
exception, pc, m_memory.Read32(pc), m_parent.IsInThumbMode());
}
m_parent.LogBacktrace(m_process);
LOG_CRITICAL(Core_ARM,
"ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X}, thumb = {})",
exception, pc, m_memory.Read32(pc), m_parent.IsInThumbMode());
}
}
void CallSVC(u32 swi) override {
m_parent.m_svc_swi = swi;
m_parent.m_jit->HaltExecution(SupervisorCall);
}
void DynarmicCallbacks32::CallSVC(u32 swi) {
m_parent.m_svc_swi = swi;
m_parent.m_jit->HaltExecution(SupervisorCall);
}
void AddTicks(u64 ticks) override {
ASSERT_MSG(!m_parent.m_uses_wall_clock, "Dynarmic ticking disabled");
void DynarmicCallbacks32::AddTicks(u64 ticks) {
ASSERT_MSG(!m_parent.m_uses_wall_clock, "Dynarmic ticking disabled");
// Divide the number of ticks by the amount of CPU cores. TODO(Subv): This yields only a
// rough approximation of the amount of executed ticks in the system, it may be thrown off
// if not all cores are doing a similar amount of work. Instead of doing this, we should
// device a way so that timing is consistent across all cores without increasing the ticks 4
// times.
u64 amortized_ticks = ticks / Core::Hardware::NUM_CPU_CORES;
// Always execute at least one tick.
amortized_ticks = std::max<u64>(amortized_ticks, 1);
// Divide the number of ticks by the amount of CPU cores. TODO(Subv): This yields only a
// rough approximation of the amount of executed ticks in the system, it may be thrown off
// if not all cores are doing a similar amount of work. Instead of doing this, we should
// device a way so that timing is consistent across all cores without increasing the ticks 4
// times.
u64 amortized_ticks = ticks / Core::Hardware::NUM_CPU_CORES;
// Always execute at least one tick.
amortized_ticks = std::max<u64>(amortized_ticks, 1);
m_parent.m_system.CoreTiming().AddTicks(amortized_ticks);
}
m_parent.m_system.CoreTiming().AddTicks(amortized_ticks);
}
u64 GetTicksRemaining() override {
ASSERT_MSG(!m_parent.m_uses_wall_clock, "Dynarmic ticking disabled");
u64 DynarmicCallbacks32::GetTicksRemaining() {
ASSERT_MSG(!m_parent.m_uses_wall_clock, "Dynarmic ticking disabled");
return std::max<s64>(m_parent.m_system.CoreTiming().GetDowncount(), 0);
}
bool CheckMemoryAccess(u64 addr, u64 size, Kernel::DebugWatchpointType type) {
if (!m_check_memory_access) {
return true;
}
if (!m_memory.IsValidVirtualAddressRange(addr, size)) {
LOG_CRITICAL(Core_ARM, "Stopping execution due to unmapped memory access at {:#x}",
addr);
m_parent.m_jit->HaltExecution(PrefetchAbort);
return false;
}
if (!m_debugger_enabled) {
return true;
}
const auto match{m_parent.MatchingWatchpoint(addr, size, type)};
if (match) {
m_parent.m_halted_watchpoint = match;
m_parent.m_jit->HaltExecution(DataAbort);
return false;
}
return std::max<s64>(m_parent.m_system.CoreTiming().GetDowncount(), 0);
}
bool DynarmicCallbacks32::CheckMemoryAccess(u64 addr, u64 size, Kernel::DebugWatchpointType type) {
if (!m_check_memory_access) {
return true;
}
void ReturnException(u32 pc, Dynarmic::HaltReason hr) {
m_parent.GetContext(m_parent.m_breakpoint_context);
m_parent.m_breakpoint_context.pc = pc;
m_parent.m_breakpoint_context.r[15] = pc;
m_parent.m_jit->HaltExecution(hr);
if (!m_memory.IsValidVirtualAddressRange(addr, size)) {
LOG_CRITICAL(Core_ARM, "Stopping execution due to unmapped memory access at {:#x}",
addr);
m_parent.m_jit->HaltExecution(PrefetchAbort);
return false;
}
ArmDynarmic32& m_parent;
Core::Memory::Memory& m_memory;
Kernel::KProcess* m_process{};
const bool m_debugger_enabled{};
const bool m_check_memory_access{};
};
if (!m_debugger_enabled) {
return true;
}
std::shared_ptr<Dynarmic::A32::Jit> ArmDynarmic32::MakeJit(Common::PageTable* page_table) const {
const auto match{m_parent.MatchingWatchpoint(addr, size, type)};
if (match) {
m_parent.m_halted_watchpoint = match;
m_parent.m_jit->HaltExecution(DataAbort);
return false;
}
return true;
}
void DynarmicCallbacks32::ReturnException(u32 pc, Dynarmic::HaltReason hr) {
m_parent.GetContext(m_parent.m_breakpoint_context);
m_parent.m_breakpoint_context.pc = pc;
m_parent.m_breakpoint_context.r[15] = pc;
m_parent.m_jit->HaltExecution(hr);
}
void ArmDynarmic32::MakeJit(Common::PageTable* page_table) {
Dynarmic::A32::UserConfig config;
config.callbacks = m_cb.get();
config.callbacks = std::addressof(*m_cb);
config.coprocessors[15] = m_cp15;
config.define_unpredictable_behaviour = true;
@ -315,7 +305,7 @@ std::shared_ptr<Dynarmic::A32::Jit> ArmDynarmic32::MakeJit(Common::PageTable* pa
default:
break;
}
return std::make_unique<Dynarmic::A32::Jit>(config);
m_jit.emplace(config);
}
static std::pair<u32, u32> FpscrToFpsrFpcr(u32 fpscr) {
@ -360,21 +350,17 @@ u32 ArmDynarmic32::GetSvcNumber() const {
}
void ArmDynarmic32::GetSvcArguments(std::span<uint64_t, 8> args) const {
Dynarmic::A32::Jit& j = *m_jit;
Dynarmic::A32::Jit const& j = *m_jit;
auto& gpr = j.Regs();
for (size_t i = 0; i < 8; i++) {
for (size_t i = 0; i < 8; i++)
args[i] = gpr[i];
}
}
void ArmDynarmic32::SetSvcArguments(std::span<const uint64_t, 8> args) {
Dynarmic::A32::Jit& j = *m_jit;
auto& gpr = j.Regs();
for (size_t i = 0; i < 8; i++) {
gpr[i] = static_cast<u32>(args[i]);
}
for (size_t i = 0; i < 8; i++)
gpr[i] = u32(args[i]);
}
const Kernel::DebugWatchpoint* ArmDynarmic32::HaltedWatchpoint() const {
@ -387,11 +373,12 @@ void ArmDynarmic32::RewindBreakpointInstruction() {
ArmDynarmic32::ArmDynarmic32(System& system, bool uses_wall_clock, Kernel::KProcess* process,
DynarmicExclusiveMonitor& exclusive_monitor, std::size_t core_index)
: ArmInterface{uses_wall_clock}, m_system{system}, m_exclusive_monitor{exclusive_monitor},
m_cb(std::make_unique<DynarmicCallbacks32>(*this, process)),
m_cp15(std::make_shared<DynarmicCP15>(*this)), m_core_index{core_index} {
: ArmInterface{uses_wall_clock}, m_system{system}, m_exclusive_monitor{exclusive_monitor}
, m_cb(std::make_optional<DynarmicCallbacks32>(*this, process))
, m_cp15(std::make_shared<DynarmicCP15>(*this)), m_core_index{core_index}
{
auto& page_table_impl = process->GetPageTable().GetBasePageTable().GetImpl();
m_jit = MakeJit(&page_table_impl);
MakeJit(&page_table_impl);
}
ArmDynarmic32::~ArmDynarmic32() = default;
@ -401,23 +388,18 @@ void ArmDynarmic32::SetTpidrroEl0(u64 value) {
}
void ArmDynarmic32::GetContext(Kernel::Svc::ThreadContext& ctx) const {
Dynarmic::A32::Jit& j = *m_jit;
Dynarmic::A32::Jit const& j = *m_jit;
auto& gpr = j.Regs();
auto& fpr = j.ExtRegs();
for (size_t i = 0; i < 16; i++) {
for (size_t i = 0; i < 16; i++)
ctx.r[i] = gpr[i];
}
ctx.fp = gpr[11];
ctx.sp = gpr[13];
ctx.lr = gpr[14];
ctx.pc = gpr[15];
ctx.pstate = j.Cpsr();
static_assert(sizeof(fpr) <= sizeof(ctx.v));
std::memcpy(ctx.v.data(), &fpr, sizeof(fpr));
auto [fpsr, fpcr] = FpscrToFpsrFpcr(j.Fpscr());
ctx.fpcr = fpcr;
ctx.fpsr = fpsr;
@ -428,16 +410,11 @@ void ArmDynarmic32::SetContext(const Kernel::Svc::ThreadContext& ctx) {
Dynarmic::A32::Jit& j = *m_jit;
auto& gpr = j.Regs();
auto& fpr = j.ExtRegs();
for (size_t i = 0; i < 16; i++) {
gpr[i] = static_cast<u32>(ctx.r[i]);
}
for (size_t i = 0; i < 16; i++)
gpr[i] = u32(ctx.r[i]);
j.SetCpsr(ctx.pstate);
static_assert(sizeof(fpr) <= sizeof(ctx.v));
std::memcpy(&fpr, ctx.v.data(), sizeof(fpr));
j.SetFpscr(FpsrFpcrToFpscr(ctx.fpsr, ctx.fpcr));
m_cp15->uprw = static_cast<u32>(ctx.tpidr);
}

View file

@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
@ -12,16 +15,50 @@ namespace Core::Memory {
class Memory;
}
namespace Kernel {
enum class DebugWatchpointType : u8;
class KPRocess;
}
namespace Core {
class DynarmicCallbacks32;
class ArmDynarmic32;
class DynarmicCP15;
class System;
class DynarmicCallbacks32 : public Dynarmic::A32::UserCallbacks {
public:
explicit DynarmicCallbacks32(ArmDynarmic32& parent, Kernel::KProcess* process);
u8 MemoryRead8(u32 vaddr) override;
u16 MemoryRead16(u32 vaddr) override;
u32 MemoryRead32(u32 vaddr) override;
u64 MemoryRead64(u32 vaddr) override;
std::optional<u32> MemoryReadCode(u32 vaddr) override;
void MemoryWrite8(u32 vaddr, u8 value) override;
void MemoryWrite16(u32 vaddr, u16 value) override;
void MemoryWrite32(u32 vaddr, u32 value) override;
void MemoryWrite64(u32 vaddr, u64 value) override;
bool MemoryWriteExclusive8(u32 vaddr, u8 value, u8 expected) override;
bool MemoryWriteExclusive16(u32 vaddr, u16 value, u16 expected) override;
bool MemoryWriteExclusive32(u32 vaddr, u32 value, u32 expected) override;
bool MemoryWriteExclusive64(u32 vaddr, u64 value, u64 expected) override;
void InterpreterFallback(u32 pc, std::size_t num_instructions) override;
void ExceptionRaised(u32 pc, Dynarmic::A32::Exception exception) override;
void CallSVC(u32 swi) override;
void AddTicks(u64 ticks) override;
u64 GetTicksRemaining() override;
bool CheckMemoryAccess(u64 addr, u64 size, Kernel::DebugWatchpointType type);
void ReturnException(u32 pc, Dynarmic::HaltReason hr);
ArmDynarmic32& m_parent;
Core::Memory::Memory& m_memory;
Kernel::KProcess* m_process{};
const bool m_debugger_enabled{};
const bool m_check_memory_access{};
};
class ArmDynarmic32 final : public ArmInterface {
public:
ArmDynarmic32(System& system, bool uses_wall_clock, Kernel::KProcess* process,
DynarmicExclusiveMonitor& exclusive_monitor, std::size_t core_index);
ArmDynarmic32(System& system, bool uses_wall_clock, Kernel::KProcess* process, DynarmicExclusiveMonitor& exclusive_monitor, std::size_t core_index);
~ArmDynarmic32() override;
Architecture GetArchitecture() const override {
@ -57,13 +94,13 @@ private:
friend class DynarmicCallbacks32;
friend class DynarmicCP15;
std::shared_ptr<Dynarmic::A32::Jit> MakeJit(Common::PageTable* page_table) const;
void MakeJit(Common::PageTable* page_table);
std::unique_ptr<DynarmicCallbacks32> m_cb{};
std::optional<DynarmicCallbacks32> m_cb{};
std::shared_ptr<DynarmicCP15> m_cp15{};
std::size_t m_core_index{};
std::shared_ptr<Dynarmic::A32::Jit> m_jit{};
std::optional<Dynarmic::A32::Jit> m_jit{};
// SVC callback
u32 m_svc_swi{};

View file

@ -13,223 +13,203 @@
namespace Core {
using Vector = Dynarmic::A64::Vector;
using namespace Common::Literals;
class DynarmicCallbacks64 : public Dynarmic::A64::UserCallbacks {
public:
explicit DynarmicCallbacks64(ArmDynarmic64& parent, Kernel::KProcess* process)
: m_parent{parent}, m_memory(process->GetMemory()),
m_process(process), m_debugger_enabled{parent.m_system.DebuggerEnabled()},
m_check_memory_access{m_debugger_enabled ||
!Settings::values.cpuopt_ignore_memory_aborts.GetValue()} {}
DynarmicCallbacks64::DynarmicCallbacks64(ArmDynarmic64& parent, Kernel::KProcess* process)
: m_parent{parent}, m_memory(process->GetMemory())
, m_process(process), m_debugger_enabled{parent.m_system.DebuggerEnabled()}
, m_check_memory_access{m_debugger_enabled || !Settings::values.cpuopt_ignore_memory_aborts.GetValue()}
{}
u8 MemoryRead8(u64 vaddr) override {
CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Read);
return m_memory.Read8(vaddr);
}
u16 MemoryRead16(u64 vaddr) override {
CheckMemoryAccess(vaddr, 2, Kernel::DebugWatchpointType::Read);
return m_memory.Read16(vaddr);
}
u32 MemoryRead32(u64 vaddr) override {
CheckMemoryAccess(vaddr, 4, Kernel::DebugWatchpointType::Read);
return m_memory.Read32(vaddr);
}
u64 MemoryRead64(u64 vaddr) override {
CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Read);
return m_memory.Read64(vaddr);
}
Vector MemoryRead128(u64 vaddr) override {
CheckMemoryAccess(vaddr, 16, Kernel::DebugWatchpointType::Read);
return {m_memory.Read64(vaddr), m_memory.Read64(vaddr + 8)};
}
std::optional<u32> MemoryReadCode(u64 vaddr) override {
if (!m_memory.IsValidVirtualAddressRange(vaddr, sizeof(u32))) {
return std::nullopt;
}
return m_memory.Read32(vaddr);
}
u8 DynarmicCallbacks64::MemoryRead8(u64 vaddr) {
CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Read);
return m_memory.Read8(vaddr);
}
u16 DynarmicCallbacks64::MemoryRead16(u64 vaddr) {
CheckMemoryAccess(vaddr, 2, Kernel::DebugWatchpointType::Read);
return m_memory.Read16(vaddr);
}
u32 DynarmicCallbacks64::MemoryRead32(u64 vaddr) {
CheckMemoryAccess(vaddr, 4, Kernel::DebugWatchpointType::Read);
return m_memory.Read32(vaddr);
}
u64 DynarmicCallbacks64::MemoryRead64(u64 vaddr) {
CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Read);
return m_memory.Read64(vaddr);
}
Dynarmic::A64::Vector DynarmicCallbacks64::MemoryRead128(u64 vaddr) {
CheckMemoryAccess(vaddr, 16, Kernel::DebugWatchpointType::Read);
return {m_memory.Read64(vaddr), m_memory.Read64(vaddr + 8)};
}
std::optional<u32> DynarmicCallbacks64::MemoryReadCode(u64 vaddr) {
if (!m_memory.IsValidVirtualAddressRange(vaddr, sizeof(u32)))
return std::nullopt;
return m_memory.Read32(vaddr);
}
void MemoryWrite8(u64 vaddr, u8 value) override {
if (CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Write)) {
m_memory.Write8(vaddr, value);
}
void DynarmicCallbacks64::MemoryWrite8(u64 vaddr, u8 value) {
if (CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Write)) {
m_memory.Write8(vaddr, value);
}
void MemoryWrite16(u64 vaddr, u16 value) override {
if (CheckMemoryAccess(vaddr, 2, Kernel::DebugWatchpointType::Write)) {
m_memory.Write16(vaddr, value);
}
}
void DynarmicCallbacks64::MemoryWrite16(u64 vaddr, u16 value) {
if (CheckMemoryAccess(vaddr, 2, Kernel::DebugWatchpointType::Write)) {
m_memory.Write16(vaddr, value);
}
void MemoryWrite32(u64 vaddr, u32 value) override {
if (CheckMemoryAccess(vaddr, 4, Kernel::DebugWatchpointType::Write)) {
m_memory.Write32(vaddr, value);
}
}
void DynarmicCallbacks64::MemoryWrite32(u64 vaddr, u32 value) {
if (CheckMemoryAccess(vaddr, 4, Kernel::DebugWatchpointType::Write)) {
m_memory.Write32(vaddr, value);
}
void MemoryWrite64(u64 vaddr, u64 value) override {
if (CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Write)) {
m_memory.Write64(vaddr, value);
}
}
void DynarmicCallbacks64::MemoryWrite64(u64 vaddr, u64 value) {
if (CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Write)) {
m_memory.Write64(vaddr, value);
}
void MemoryWrite128(u64 vaddr, Vector value) override {
if (CheckMemoryAccess(vaddr, 16, Kernel::DebugWatchpointType::Write)) {
m_memory.Write64(vaddr, value[0]);
m_memory.Write64(vaddr + 8, value[1]);
}
}
void DynarmicCallbacks64::MemoryWrite128(u64 vaddr, Dynarmic::A64::Vector value) {
if (CheckMemoryAccess(vaddr, 16, Kernel::DebugWatchpointType::Write)) {
m_memory.Write64(vaddr, value[0]);
m_memory.Write64(vaddr + 8, value[1]);
}
}
bool MemoryWriteExclusive8(u64 vaddr, std::uint8_t value, std::uint8_t expected) override {
return CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Write) &&
m_memory.WriteExclusive8(vaddr, value, expected);
}
bool MemoryWriteExclusive16(u64 vaddr, std::uint16_t value, std::uint16_t expected) override {
return CheckMemoryAccess(vaddr, 2, Kernel::DebugWatchpointType::Write) &&
m_memory.WriteExclusive16(vaddr, value, expected);
}
bool MemoryWriteExclusive32(u64 vaddr, std::uint32_t value, std::uint32_t expected) override {
return CheckMemoryAccess(vaddr, 4, Kernel::DebugWatchpointType::Write) &&
m_memory.WriteExclusive32(vaddr, value, expected);
}
bool MemoryWriteExclusive64(u64 vaddr, std::uint64_t value, std::uint64_t expected) override {
return CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Write) &&
m_memory.WriteExclusive64(vaddr, value, expected);
}
bool MemoryWriteExclusive128(u64 vaddr, Vector value, Vector expected) override {
return CheckMemoryAccess(vaddr, 16, Kernel::DebugWatchpointType::Write) &&
m_memory.WriteExclusive128(vaddr, value, expected);
}
bool DynarmicCallbacks64::MemoryWriteExclusive8(u64 vaddr, std::uint8_t value, std::uint8_t expected) {
return CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Write) &&
m_memory.WriteExclusive8(vaddr, value, expected);
}
bool DynarmicCallbacks64::MemoryWriteExclusive16(u64 vaddr, std::uint16_t value, std::uint16_t expected) {
return CheckMemoryAccess(vaddr, 2, Kernel::DebugWatchpointType::Write) &&
m_memory.WriteExclusive16(vaddr, value, expected);
}
bool DynarmicCallbacks64::MemoryWriteExclusive32(u64 vaddr, std::uint32_t value, std::uint32_t expected) {
return CheckMemoryAccess(vaddr, 4, Kernel::DebugWatchpointType::Write) &&
m_memory.WriteExclusive32(vaddr, value, expected);
}
bool DynarmicCallbacks64::MemoryWriteExclusive64(u64 vaddr, std::uint64_t value, std::uint64_t expected) {
return CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Write) &&
m_memory.WriteExclusive64(vaddr, value, expected);
}
bool DynarmicCallbacks64::MemoryWriteExclusive128(u64 vaddr, Dynarmic::A64::Vector value, Dynarmic::A64::Vector expected) {
return CheckMemoryAccess(vaddr, 16, Kernel::DebugWatchpointType::Write) &&
m_memory.WriteExclusive128(vaddr, value, expected);
}
void InterpreterFallback(u64 pc, std::size_t num_instructions) override {
m_parent.LogBacktrace(m_process);
LOG_ERROR(Core_ARM,
"Unimplemented instruction @ {:#X} for {} instructions (instr = {:08X})", pc,
num_instructions, m_memory.Read32(pc));
void DynarmicCallbacks64::InterpreterFallback(u64 pc, std::size_t num_instructions) {
m_parent.LogBacktrace(m_process);
LOG_ERROR(Core_ARM, "Unimplemented instruction @ {:#X} for {} instructions (instr = {:08X})", pc,
num_instructions, m_memory.Read32(pc));
ReturnException(pc, PrefetchAbort);
}
void DynarmicCallbacks64::InstructionCacheOperationRaised(Dynarmic::A64::InstructionCacheOperation op, u64 value) {
switch (op) {
case Dynarmic::A64::InstructionCacheOperation::InvalidateByVAToPoU: {
static constexpr u64 ICACHE_LINE_SIZE = 64;
const u64 cache_line_start = value & ~(ICACHE_LINE_SIZE - 1);
m_parent.InvalidateCacheRange(cache_line_start, ICACHE_LINE_SIZE);
break;
}
case Dynarmic::A64::InstructionCacheOperation::InvalidateAllToPoU:
m_parent.ClearInstructionCache();
break;
case Dynarmic::A64::InstructionCacheOperation::InvalidateAllToPoUInnerSharable:
default:
LOG_DEBUG(Core_ARM, "Unprocesseed instruction cache operation: {}", op);
break;
}
m_parent.m_jit->HaltExecution(Dynarmic::HaltReason::CacheInvalidation);
}
void DynarmicCallbacks64::ExceptionRaised(u64 pc, Dynarmic::A64::Exception exception) {
switch (exception) {
case Dynarmic::A64::Exception::WaitForInterrupt:
case Dynarmic::A64::Exception::WaitForEvent:
case Dynarmic::A64::Exception::SendEvent:
case Dynarmic::A64::Exception::SendEventLocal:
case Dynarmic::A64::Exception::Yield:
LOG_TRACE(Core_ARM, "ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X})", static_cast<std::size_t>(exception), pc, m_memory.Read32(pc));
return;
case Dynarmic::A64::Exception::NoExecuteFault:
LOG_CRITICAL(Core_ARM, "Cannot execute instruction at unmapped address {:#016x}", pc);
ReturnException(pc, PrefetchAbort);
}
void InstructionCacheOperationRaised(Dynarmic::A64::InstructionCacheOperation op,
u64 value) override {
switch (op) {
case Dynarmic::A64::InstructionCacheOperation::InvalidateByVAToPoU: {
static constexpr u64 ICACHE_LINE_SIZE = 64;
const u64 cache_line_start = value & ~(ICACHE_LINE_SIZE - 1);
m_parent.InvalidateCacheRange(cache_line_start, ICACHE_LINE_SIZE);
break;
}
case Dynarmic::A64::InstructionCacheOperation::InvalidateAllToPoU:
m_parent.ClearInstructionCache();
break;
case Dynarmic::A64::InstructionCacheOperation::InvalidateAllToPoUInnerSharable:
default:
LOG_DEBUG(Core_ARM, "Unprocesseed instruction cache operation: {}", op);
break;
}
m_parent.m_jit->HaltExecution(Dynarmic::HaltReason::CacheInvalidation);
}
void ExceptionRaised(u64 pc, Dynarmic::A64::Exception exception) override {
switch (exception) {
case Dynarmic::A64::Exception::WaitForInterrupt:
case Dynarmic::A64::Exception::WaitForEvent:
case Dynarmic::A64::Exception::SendEvent:
case Dynarmic::A64::Exception::SendEventLocal:
case Dynarmic::A64::Exception::Yield:
LOG_TRACE(Core_ARM, "ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X})", static_cast<std::size_t>(exception), pc, m_memory.Read32(pc));
return;
case Dynarmic::A64::Exception::NoExecuteFault:
LOG_CRITICAL(Core_ARM, "Cannot execute instruction at unmapped address {:#016x}", pc);
ReturnException(pc, PrefetchAbort);
return;
default:
if (m_debugger_enabled) {
ReturnException(pc, InstructionBreakpoint);
} else {
m_parent.LogBacktrace(m_process);
LOG_CRITICAL(Core_ARM, "ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X})", static_cast<std::size_t>(exception), pc, m_memory.Read32(pc));
}
return;
default:
if (m_debugger_enabled) {
ReturnException(pc, InstructionBreakpoint);
} else {
m_parent.LogBacktrace(m_process);
LOG_CRITICAL(Core_ARM, "ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X})", static_cast<std::size_t>(exception), pc, m_memory.Read32(pc));
}
}
}
void CallSVC(u32 svc) override {
m_parent.m_svc = svc;
m_parent.m_jit->HaltExecution(SupervisorCall);
}
void DynarmicCallbacks64::CallSVC(u32 svc) {
m_parent.m_svc = svc;
m_parent.m_jit->HaltExecution(SupervisorCall);
}
void AddTicks(u64 ticks) override {
ASSERT_MSG(!m_parent.m_uses_wall_clock, "Dynarmic ticking disabled");
void DynarmicCallbacks64::AddTicks(u64 ticks) {
ASSERT_MSG(!m_parent.m_uses_wall_clock, "Dynarmic ticking disabled");
// Divide the number of ticks by the amount of CPU cores. TODO(Subv): This yields only a
// rough approximation of the amount of executed ticks in the system, it may be thrown off
// if not all cores are doing a similar amount of work. Instead of doing this, we should
// device a way so that timing is consistent across all cores without increasing the ticks 4
// times.
u64 amortized_ticks = ticks / Core::Hardware::NUM_CPU_CORES;
// Always execute at least one tick.
amortized_ticks = std::max<u64>(amortized_ticks, 1);
// Divide the number of ticks by the amount of CPU cores. TODO(Subv): This yields only a
// rough approximation of the amount of executed ticks in the system, it may be thrown off
// if not all cores are doing a similar amount of work. Instead of doing this, we should
// device a way so that timing is consistent across all cores without increasing the ticks 4
// times.
u64 amortized_ticks = ticks / Core::Hardware::NUM_CPU_CORES;
// Always execute at least one tick.
amortized_ticks = std::max<u64>(amortized_ticks, 1);
m_parent.m_system.CoreTiming().AddTicks(amortized_ticks);
}
m_parent.m_system.CoreTiming().AddTicks(amortized_ticks);
}
u64 GetTicksRemaining() override {
ASSERT_MSG(!m_parent.m_uses_wall_clock, "Dynarmic ticking disabled");
u64 DynarmicCallbacks64::GetTicksRemaining() {
ASSERT(!m_parent.m_uses_wall_clock && "Dynarmic ticking disabled");
return std::max<s64>(m_parent.m_system.CoreTiming().GetDowncount(), 0);
}
return std::max<s64>(m_parent.m_system.CoreTiming().GetDowncount(), 0);
}
u64 GetCNTPCT() override {
return m_parent.m_system.CoreTiming().GetClockTicks();
}
bool CheckMemoryAccess(u64 addr, u64 size, Kernel::DebugWatchpointType type) {
if (!m_check_memory_access) {
return true;
}
if (!m_memory.IsValidVirtualAddressRange(addr, size)) {
LOG_CRITICAL(Core_ARM, "Stopping execution due to unmapped memory access at {:#x}",
addr);
m_parent.m_jit->HaltExecution(PrefetchAbort);
return false;
}
if (!m_debugger_enabled) {
return true;
}
const auto match{m_parent.MatchingWatchpoint(addr, size, type)};
if (match) {
m_parent.m_halted_watchpoint = match;
m_parent.m_jit->HaltExecution(DataAbort);
return false;
}
u64 DynarmicCallbacks64::GetCNTPCT() {
return m_parent.m_system.CoreTiming().GetClockTicks();
}
bool DynarmicCallbacks64::CheckMemoryAccess(u64 addr, u64 size, Kernel::DebugWatchpointType type) {
if (!m_check_memory_access) {
return true;
}
void ReturnException(u64 pc, Dynarmic::HaltReason hr) {
m_parent.GetContext(m_parent.m_breakpoint_context);
m_parent.m_breakpoint_context.pc = pc;
m_parent.m_jit->HaltExecution(hr);
if (!m_memory.IsValidVirtualAddressRange(addr, size)) {
LOG_CRITICAL(Core_ARM, "Stopping execution due to unmapped memory access at {:#x}",
addr);
m_parent.m_jit->HaltExecution(PrefetchAbort);
return false;
}
ArmDynarmic64& m_parent;
Core::Memory::Memory& m_memory;
u64 m_tpidrro_el0{};
u64 m_tpidr_el0{};
Kernel::KProcess* m_process{};
const bool m_debugger_enabled{};
const bool m_check_memory_access{};
static constexpr u64 MinimumRunCycles = 10000U;
};
if (!m_debugger_enabled) {
return true;
}
std::shared_ptr<Dynarmic::A64::Jit> ArmDynarmic64::MakeJit(Common::PageTable* page_table,
std::size_t address_space_bits) const {
const auto match{m_parent.MatchingWatchpoint(addr, size, type)};
if (match) {
m_parent.m_halted_watchpoint = match;
m_parent.m_jit->HaltExecution(DataAbort);
return false;
}
return true;
}
void DynarmicCallbacks64::ReturnException(u64 pc, Dynarmic::HaltReason hr) {
m_parent.GetContext(m_parent.m_breakpoint_context);
m_parent.m_breakpoint_context.pc = pc;
m_parent.m_jit->HaltExecution(hr);
}
void ArmDynarmic64::MakeJit(Common::PageTable* page_table, std::size_t address_space_bits) {
Dynarmic::A64::UserConfig config;
// Callbacks
config.callbacks = m_cb.get();
config.callbacks = std::addressof(*m_cb);
// Memory
if (page_table) {
@ -375,7 +355,7 @@ std::shared_ptr<Dynarmic::A64::Jit> ArmDynarmic64::MakeJit(Common::PageTable* pa
default:
break;
}
return std::make_shared<Dynarmic::A64::Jit>(config);
m_jit.emplace(config);
}
HaltReason ArmDynarmic64::RunThread(Kernel::KThread* thread) {
@ -393,19 +373,15 @@ u32 ArmDynarmic64::GetSvcNumber() const {
}
void ArmDynarmic64::GetSvcArguments(std::span<uint64_t, 8> args) const {
Dynarmic::A64::Jit& j = *m_jit;
for (size_t i = 0; i < 8; i++) {
Dynarmic::A64::Jit const& j = *m_jit;
for (size_t i = 0; i < 8; i++)
args[i] = j.GetRegister(i);
}
}
void ArmDynarmic64::SetSvcArguments(std::span<const uint64_t, 8> args) {
Dynarmic::A64::Jit& j = *m_jit;
for (size_t i = 0; i < 8; i++) {
for (size_t i = 0; i < 8; i++)
j.SetRegister(i, args[i]);
}
}
const Kernel::DebugWatchpoint* ArmDynarmic64::HaltedWatchpoint() const {
@ -416,13 +392,14 @@ void ArmDynarmic64::RewindBreakpointInstruction() {
this->SetContext(m_breakpoint_context);
}
ArmDynarmic64::ArmDynarmic64(System& system, bool uses_wall_clock, Kernel::KProcess* process,
DynarmicExclusiveMonitor& exclusive_monitor, std::size_t core_index)
: ArmInterface{uses_wall_clock}, m_system{system}, m_exclusive_monitor{exclusive_monitor},
m_cb(std::make_unique<DynarmicCallbacks64>(*this, process)), m_core_index{core_index} {
ArmDynarmic64::ArmDynarmic64(System& system, bool uses_wall_clock, Kernel::KProcess* process, DynarmicExclusiveMonitor& exclusive_monitor, std::size_t core_index)
: ArmInterface{uses_wall_clock}, m_system{system}, m_exclusive_monitor{exclusive_monitor}
, m_cb(std::make_optional<DynarmicCallbacks64>(*this, process))
, m_core_index{core_index}
{
auto& page_table = process->GetPageTable().GetBasePageTable();
auto& page_table_impl = page_table.GetImpl();
m_jit = MakeJit(&page_table_impl, page_table.GetAddressSpaceWidth());
MakeJit(&page_table_impl, page_table.GetAddressSpaceWidth());
}
ArmDynarmic64::~ArmDynarmic64() = default;
@ -432,17 +409,14 @@ void ArmDynarmic64::SetTpidrroEl0(u64 value) {
}
void ArmDynarmic64::GetContext(Kernel::Svc::ThreadContext& ctx) const {
Dynarmic::A64::Jit& j = *m_jit;
Dynarmic::A64::Jit const& j = *m_jit;
auto gpr = j.GetRegisters();
auto fpr = j.GetVectors();
// TODO: this is inconvenient
for (size_t i = 0; i < 29; i++) {
for (size_t i = 0; i < 29; i++)
ctx.r[i] = gpr[i];
}
ctx.fp = gpr[29];
ctx.lr = gpr[30];
ctx.sp = j.GetSP();
ctx.pc = j.GetPC();
ctx.pstate = j.GetPstate();
@ -454,16 +428,12 @@ void ArmDynarmic64::GetContext(Kernel::Svc::ThreadContext& ctx) const {
void ArmDynarmic64::SetContext(const Kernel::Svc::ThreadContext& ctx) {
Dynarmic::A64::Jit& j = *m_jit;
// TODO: this is inconvenient
std::array<u64, 31> gpr;
for (size_t i = 0; i < 29; i++) {
for (size_t i = 0; i < 29; i++)
gpr[i] = ctx.r[i];
}
gpr[29] = ctx.fp;
gpr[30] = ctx.lr;
j.SetRegisters(gpr);
j.SetSP(ctx.sp);
j.SetPC(ctx.pc);

View file

@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
@ -17,12 +20,57 @@ namespace Core::Memory {
class Memory;
}
namespace Kernel {
enum class DebugWatchpointType : u8;
class KPRocess;
}
namespace Core {
class DynarmicCallbacks64;
class ArmDynarmic64;
class DynarmicExclusiveMonitor;
class System;
class DynarmicCallbacks64 : public Dynarmic::A64::UserCallbacks {
public:
explicit DynarmicCallbacks64(ArmDynarmic64& parent, Kernel::KProcess* process);
u8 MemoryRead8(u64 vaddr) override;
u16 MemoryRead16(u64 vaddr) override;
u32 MemoryRead32(u64 vaddr) override;
u64 MemoryRead64(u64 vaddr) override;
Dynarmic::A64::Vector MemoryRead128(u64 vaddr) override;
std::optional<u32> MemoryReadCode(u64 vaddr) override;
void MemoryWrite8(u64 vaddr, u8 value) override;
void MemoryWrite16(u64 vaddr, u16 value) override;
void MemoryWrite32(u64 vaddr, u32 value) override;
void MemoryWrite64(u64 vaddr, u64 value) override;
void MemoryWrite128(u64 vaddr, Dynarmic::A64::Vector value) override;
bool MemoryWriteExclusive8(u64 vaddr, std::uint8_t value, std::uint8_t expected) override;
bool MemoryWriteExclusive16(u64 vaddr, std::uint16_t value, std::uint16_t expected) override;
bool MemoryWriteExclusive32(u64 vaddr, std::uint32_t value, std::uint32_t expected) override;
bool MemoryWriteExclusive64(u64 vaddr, std::uint64_t value, std::uint64_t expected) override;
bool MemoryWriteExclusive128(u64 vaddr, Dynarmic::A64::Vector value, Dynarmic::A64::Vector expected) override;
void InterpreterFallback(u64 pc, std::size_t num_instructions) override;
void InstructionCacheOperationRaised(Dynarmic::A64::InstructionCacheOperation op, u64 value) override;
void ExceptionRaised(u64 pc, Dynarmic::A64::Exception exception) override;
void CallSVC(u32 svc) override;
void AddTicks(u64 ticks) override;
u64 GetTicksRemaining() override;
u64 GetCNTPCT() override;
bool CheckMemoryAccess(u64 addr, u64 size, Kernel::DebugWatchpointType type);
void ReturnException(u64 pc, Dynarmic::HaltReason hr);
ArmDynarmic64& m_parent;
Core::Memory::Memory& m_memory;
u64 m_tpidrro_el0{};
u64 m_tpidr_el0{};
Kernel::KProcess* m_process{};
const bool m_debugger_enabled{};
const bool m_check_memory_access{};
static constexpr u64 MinimumRunCycles = 10000U;
};
class ArmDynarmic64 final : public ArmInterface {
public:
ArmDynarmic64(System& system, bool uses_wall_clock, Kernel::KProcess* process,
@ -59,12 +107,11 @@ private:
private:
friend class DynarmicCallbacks64;
std::shared_ptr<Dynarmic::A64::Jit> MakeJit(Common::PageTable* page_table,
std::size_t address_space_bits) const;
std::unique_ptr<DynarmicCallbacks64> m_cb{};
void MakeJit(Common::PageTable* page_table, std::size_t address_space_bits);
std::optional<DynarmicCallbacks64> m_cb{};
std::size_t m_core_index{};
std::shared_ptr<Dynarmic::A64::Jit> m_jit{};
std::optional<Dynarmic::A64::Jit> m_jit{};
// SVC callback
u32 m_svc{};