Compare commits
8 commits
2f04b26392
...
49148d168f
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
49148d168f | ||
|
|
836c00fc97 | ||
|
|
514bacf9f3 | ||
|
|
6a5c18b55e | ||
|
|
75c5393ab3 | ||
|
|
ac99ea96da | ||
|
|
d1b7824443 | ||
|
|
34fa39eae8 |
279
dist/dev.eden_emu.eden.svg
vendored
|
Before Width: | Height: | Size: 19 KiB After Width: | Height: | Size: 13 KiB |
BIN
dist/eden.bmp
vendored
|
Before Width: | Height: | Size: 181 KiB After Width: | Height: | Size: 256 KiB |
BIN
dist/eden.ico
vendored
|
Before Width: | Height: | Size: 317 KiB After Width: | Height: | Size: 335 KiB |
BIN
dist/qt_themes/default/icons/256x256/eden.png
vendored
|
Before Width: | Height: | Size: 15 KiB After Width: | Height: | Size: 35 KiB |
|
Before Width: | Height: | Size: 57 KiB After Width: | Height: | Size: 131 KiB |
|
Before Width: | Height: | Size: 35 KiB After Width: | Height: | Size: 56 KiB |
|
Before Width: | Height: | Size: 24 KiB After Width: | Height: | Size: 51 KiB |
|
Before Width: | Height: | Size: 5 KiB After Width: | Height: | Size: 14 KiB |
|
Before Width: | Height: | Size: 3.3 KiB After Width: | Height: | Size: 9.4 KiB |
|
Before Width: | Height: | Size: 9.5 KiB After Width: | Height: | Size: 24 KiB |
|
Before Width: | Height: | Size: 15 KiB After Width: | Height: | Size: 35 KiB |
|
Before Width: | Height: | Size: 44 KiB After Width: | Height: | Size: 67 KiB |
|
|
@ -1 +1 @@
|
||||||
<?xml version='1.0' encoding='utf-8'?><resources><color name='ic_launcher_background'>#43fcfcff</color></resources>
|
<?xml version='1.0' encoding='utf-8'?><resources><color name='ic_launcher_background'>#1F143C</color></resources>
|
||||||
|
|
|
||||||
|
|
@ -66,8 +66,6 @@ add_library(
|
||||||
fs/path_util.cpp
|
fs/path_util.cpp
|
||||||
fs/path_util.h
|
fs/path_util.h
|
||||||
hash.h
|
hash.h
|
||||||
heap_tracker.cpp
|
|
||||||
heap_tracker.h
|
|
||||||
hex_util.cpp
|
hex_util.cpp
|
||||||
hex_util.h
|
hex_util.h
|
||||||
host_memory.cpp
|
host_memory.cpp
|
||||||
|
|
|
||||||
|
|
@ -1,282 +0,0 @@
|
||||||
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
|
|
||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
|
||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
||||||
|
|
||||||
#include <fstream>
|
|
||||||
#include "common/heap_tracker.h"
|
|
||||||
#include "common/logging.h"
|
|
||||||
#include "common/assert.h"
|
|
||||||
|
|
||||||
namespace Common {
|
|
||||||
|
|
||||||
namespace {
|
|
||||||
|
|
||||||
s64 GetMaxPermissibleResidentMapCount() {
|
|
||||||
// Default value.
|
|
||||||
s64 value = 65530;
|
|
||||||
|
|
||||||
// Try to read how many mappings we can make.
|
|
||||||
std::ifstream s("/proc/sys/vm/max_map_count");
|
|
||||||
s >> value;
|
|
||||||
|
|
||||||
// Print, for debug.
|
|
||||||
LOG_INFO(HW_Memory, "Current maximum map count: {}", value);
|
|
||||||
|
|
||||||
// Allow 20000 maps for other code and to account for split inaccuracy.
|
|
||||||
return std::max<s64>(value - 20000, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
HeapTracker::HeapTracker(Common::HostMemory& buffer)
|
|
||||||
: m_buffer(buffer), m_max_resident_map_count(GetMaxPermissibleResidentMapCount()) {}
|
|
||||||
HeapTracker::~HeapTracker() = default;
|
|
||||||
|
|
||||||
void HeapTracker::Map(size_t virtual_offset, size_t host_offset, size_t length,
|
|
||||||
MemoryPermission perm, bool is_separate_heap) {
|
|
||||||
// When mapping other memory, map pages immediately.
|
|
||||||
if (!is_separate_heap) {
|
|
||||||
m_buffer.Map(virtual_offset, host_offset, length, perm, false);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
// We are mapping part of a separate heap.
|
|
||||||
std::scoped_lock lk{m_lock};
|
|
||||||
|
|
||||||
auto* const map = new SeparateHeapMap{
|
|
||||||
.vaddr = virtual_offset,
|
|
||||||
.paddr = host_offset,
|
|
||||||
.size = length,
|
|
||||||
.tick = m_tick++,
|
|
||||||
.perm = perm,
|
|
||||||
.is_resident = false,
|
|
||||||
};
|
|
||||||
|
|
||||||
// Insert into mappings.
|
|
||||||
m_map_count++;
|
|
||||||
m_mappings.insert(*map);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Finally, map.
|
|
||||||
this->DeferredMapSeparateHeap(virtual_offset);
|
|
||||||
}
|
|
||||||
|
|
||||||
void HeapTracker::Unmap(size_t virtual_offset, size_t size, bool is_separate_heap) {
|
|
||||||
// If this is a separate heap...
|
|
||||||
if (is_separate_heap) {
|
|
||||||
std::scoped_lock lk{m_lock};
|
|
||||||
|
|
||||||
const SeparateHeapMap key{
|
|
||||||
.vaddr = virtual_offset,
|
|
||||||
};
|
|
||||||
|
|
||||||
// Split at the boundaries of the region we are removing.
|
|
||||||
this->SplitHeapMapLocked(virtual_offset);
|
|
||||||
this->SplitHeapMapLocked(virtual_offset + size);
|
|
||||||
|
|
||||||
// Erase all mappings in range.
|
|
||||||
auto it = m_mappings.find(key);
|
|
||||||
while (it != m_mappings.end() && it->vaddr < virtual_offset + size) {
|
|
||||||
// Get underlying item.
|
|
||||||
auto* const item = std::addressof(*it);
|
|
||||||
|
|
||||||
// If resident, erase from resident map.
|
|
||||||
if (item->is_resident) {
|
|
||||||
ASSERT(--m_resident_map_count >= 0);
|
|
||||||
m_resident_mappings.erase(m_resident_mappings.iterator_to(*item));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Erase from map.
|
|
||||||
ASSERT(--m_map_count >= 0);
|
|
||||||
it = m_mappings.erase(it);
|
|
||||||
|
|
||||||
// Free the item.
|
|
||||||
delete item;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmap pages.
|
|
||||||
m_buffer.Unmap(virtual_offset, size, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
void HeapTracker::Protect(size_t virtual_offset, size_t size, MemoryPermission perm) {
|
|
||||||
// Ensure no rebuild occurs while reprotecting.
|
|
||||||
std::shared_lock lk{m_rebuild_lock};
|
|
||||||
|
|
||||||
// Split at the boundaries of the region we are reprotecting.
|
|
||||||
this->SplitHeapMap(virtual_offset, size);
|
|
||||||
|
|
||||||
// Declare tracking variables.
|
|
||||||
const VAddr end = virtual_offset + size;
|
|
||||||
VAddr cur = virtual_offset;
|
|
||||||
|
|
||||||
while (cur < end) {
|
|
||||||
VAddr next = cur;
|
|
||||||
bool should_protect = false;
|
|
||||||
|
|
||||||
{
|
|
||||||
std::scoped_lock lk2{m_lock};
|
|
||||||
|
|
||||||
const SeparateHeapMap key{
|
|
||||||
.vaddr = next,
|
|
||||||
};
|
|
||||||
|
|
||||||
// Try to get the next mapping corresponding to this address.
|
|
||||||
const auto it = m_mappings.nfind(key);
|
|
||||||
|
|
||||||
if (it == m_mappings.end()) {
|
|
||||||
// There are no separate heap mappings remaining.
|
|
||||||
next = end;
|
|
||||||
should_protect = true;
|
|
||||||
} else if (it->vaddr == cur) {
|
|
||||||
// We are in range.
|
|
||||||
// Update permission bits.
|
|
||||||
it->perm = perm;
|
|
||||||
|
|
||||||
// Determine next address and whether we should protect.
|
|
||||||
next = cur + it->size;
|
|
||||||
should_protect = it->is_resident;
|
|
||||||
} else /* if (it->vaddr > cur) */ {
|
|
||||||
// We weren't in range, but there is a block coming up that will be.
|
|
||||||
next = it->vaddr;
|
|
||||||
should_protect = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clamp to end.
|
|
||||||
next = (std::min)(next, end);
|
|
||||||
// Reprotect, if we need to.
|
|
||||||
if (should_protect) {
|
|
||||||
m_buffer.Protect(cur, next - cur, perm);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Advance.
|
|
||||||
cur = next;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool HeapTracker::DeferredMapSeparateHeap(u8* fault_address) {
|
|
||||||
if (m_buffer.IsInVirtualRange(fault_address)) {
|
|
||||||
return this->DeferredMapSeparateHeap(fault_address - m_buffer.VirtualBasePointer());
|
|
||||||
}
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool HeapTracker::DeferredMapSeparateHeap(size_t virtual_offset) {
|
|
||||||
bool rebuild_required = false;
|
|
||||||
|
|
||||||
{
|
|
||||||
std::scoped_lock lk{m_lock};
|
|
||||||
|
|
||||||
// Check to ensure this was a non-resident separate heap mapping.
|
|
||||||
const auto it = this->GetNearestHeapMapLocked(virtual_offset);
|
|
||||||
if (it == m_mappings.end() || it->is_resident) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update tick before possible rebuild.
|
|
||||||
it->tick = m_tick++;
|
|
||||||
|
|
||||||
// Check if we need to rebuild.
|
|
||||||
if (m_resident_map_count > m_max_resident_map_count) {
|
|
||||||
rebuild_required = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Map the area.
|
|
||||||
m_buffer.Map(it->vaddr, it->paddr, it->size, it->perm, false);
|
|
||||||
|
|
||||||
// This map is now resident.
|
|
||||||
it->is_resident = true;
|
|
||||||
m_resident_map_count++;
|
|
||||||
m_resident_mappings.insert(*it);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (rebuild_required) {
|
|
||||||
// A rebuild was required, so perform it now.
|
|
||||||
this->RebuildSeparateHeapAddressSpace();
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void HeapTracker::RebuildSeparateHeapAddressSpace() {
|
|
||||||
std::scoped_lock lk{m_rebuild_lock, m_lock};
|
|
||||||
|
|
||||||
ASSERT(!m_resident_mappings.empty());
|
|
||||||
|
|
||||||
// Dump half of the mappings.
|
|
||||||
//
|
|
||||||
// Despite being worse in theory, this has proven to be better in practice than more
|
|
||||||
// regularly dumping a smaller amount, because it significantly reduces average case
|
|
||||||
// lock contention.
|
|
||||||
std::size_t const desired_count = (std::min)(m_resident_map_count, m_max_resident_map_count) / 2;
|
|
||||||
std::size_t const evict_count = m_resident_map_count - desired_count;
|
|
||||||
auto it = m_resident_mappings.begin();
|
|
||||||
|
|
||||||
for (size_t i = 0; i < evict_count && it != m_resident_mappings.end(); i++) {
|
|
||||||
// Unmark and unmap.
|
|
||||||
it->is_resident = false;
|
|
||||||
m_buffer.Unmap(it->vaddr, it->size, false);
|
|
||||||
|
|
||||||
// Advance.
|
|
||||||
ASSERT(--m_resident_map_count >= 0);
|
|
||||||
it = m_resident_mappings.erase(it);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void HeapTracker::SplitHeapMap(VAddr offset, size_t size) {
|
|
||||||
std::scoped_lock lk{m_lock};
|
|
||||||
|
|
||||||
this->SplitHeapMapLocked(offset);
|
|
||||||
this->SplitHeapMapLocked(offset + size);
|
|
||||||
}
|
|
||||||
|
|
||||||
void HeapTracker::SplitHeapMapLocked(VAddr offset) {
|
|
||||||
const auto it = this->GetNearestHeapMapLocked(offset);
|
|
||||||
if (it == m_mappings.end() || it->vaddr == offset) {
|
|
||||||
// Not contained or no split required.
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cache the original values.
|
|
||||||
auto* const left = std::addressof(*it);
|
|
||||||
const size_t orig_size = left->size;
|
|
||||||
|
|
||||||
// Adjust the left map.
|
|
||||||
const size_t left_size = offset - left->vaddr;
|
|
||||||
left->size = left_size;
|
|
||||||
|
|
||||||
// Create the new right map.
|
|
||||||
auto* const right = new SeparateHeapMap{
|
|
||||||
.vaddr = left->vaddr + left_size,
|
|
||||||
.paddr = left->paddr + left_size,
|
|
||||||
.size = orig_size - left_size,
|
|
||||||
.tick = left->tick,
|
|
||||||
.perm = left->perm,
|
|
||||||
.is_resident = left->is_resident,
|
|
||||||
};
|
|
||||||
|
|
||||||
// Insert the new right map.
|
|
||||||
m_map_count++;
|
|
||||||
m_mappings.insert(*right);
|
|
||||||
|
|
||||||
// If resident, also insert into resident map.
|
|
||||||
if (right->is_resident) {
|
|
||||||
m_resident_map_count++;
|
|
||||||
m_resident_mappings.insert(*right);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
HeapTracker::AddrTree::iterator HeapTracker::GetNearestHeapMapLocked(VAddr offset) {
|
|
||||||
const SeparateHeapMap key{
|
|
||||||
.vaddr = offset,
|
|
||||||
};
|
|
||||||
|
|
||||||
return m_mappings.find(key);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace Common
|
|
||||||
|
|
@ -1,98 +0,0 @@
|
||||||
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
|
||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include <atomic>
|
|
||||||
#include <mutex>
|
|
||||||
#include <set>
|
|
||||||
#include <shared_mutex>
|
|
||||||
|
|
||||||
#include "common/host_memory.h"
|
|
||||||
#include "common/intrusive_red_black_tree.h"
|
|
||||||
|
|
||||||
namespace Common {
|
|
||||||
|
|
||||||
struct SeparateHeapMap {
|
|
||||||
Common::IntrusiveRedBlackTreeNode addr_node{};
|
|
||||||
Common::IntrusiveRedBlackTreeNode tick_node{};
|
|
||||||
VAddr vaddr{};
|
|
||||||
PAddr paddr{};
|
|
||||||
size_t size{};
|
|
||||||
size_t tick{};
|
|
||||||
MemoryPermission perm{};
|
|
||||||
bool is_resident{};
|
|
||||||
};
|
|
||||||
|
|
||||||
struct SeparateHeapMapAddrComparator {
|
|
||||||
static constexpr int Compare(const SeparateHeapMap& lhs, const SeparateHeapMap& rhs) {
|
|
||||||
if (lhs.vaddr < rhs.vaddr) {
|
|
||||||
return -1;
|
|
||||||
} else if (lhs.vaddr <= (rhs.vaddr + rhs.size - 1)) {
|
|
||||||
return 0;
|
|
||||||
} else {
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct SeparateHeapMapTickComparator {
|
|
||||||
static constexpr int Compare(const SeparateHeapMap& lhs, const SeparateHeapMap& rhs) {
|
|
||||||
if (lhs.tick < rhs.tick) {
|
|
||||||
return -1;
|
|
||||||
} else if (lhs.tick > rhs.tick) {
|
|
||||||
return 1;
|
|
||||||
} else {
|
|
||||||
return SeparateHeapMapAddrComparator::Compare(lhs, rhs);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
class HeapTracker {
|
|
||||||
public:
|
|
||||||
explicit HeapTracker(Common::HostMemory& buffer);
|
|
||||||
~HeapTracker();
|
|
||||||
|
|
||||||
void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perm,
|
|
||||||
bool is_separate_heap);
|
|
||||||
void Unmap(size_t virtual_offset, size_t size, bool is_separate_heap);
|
|
||||||
void Protect(size_t virtual_offset, size_t length, MemoryPermission perm);
|
|
||||||
u8* VirtualBasePointer() {
|
|
||||||
return m_buffer.VirtualBasePointer();
|
|
||||||
}
|
|
||||||
|
|
||||||
bool DeferredMapSeparateHeap(u8* fault_address);
|
|
||||||
bool DeferredMapSeparateHeap(size_t virtual_offset);
|
|
||||||
|
|
||||||
private:
|
|
||||||
using AddrTreeTraits =
|
|
||||||
Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&SeparateHeapMap::addr_node>;
|
|
||||||
using AddrTree = AddrTreeTraits::TreeType<SeparateHeapMapAddrComparator>;
|
|
||||||
|
|
||||||
using TickTreeTraits =
|
|
||||||
Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&SeparateHeapMap::tick_node>;
|
|
||||||
using TickTree = TickTreeTraits::TreeType<SeparateHeapMapTickComparator>;
|
|
||||||
|
|
||||||
AddrTree m_mappings{};
|
|
||||||
TickTree m_resident_mappings{};
|
|
||||||
|
|
||||||
private:
|
|
||||||
void SplitHeapMap(VAddr offset, size_t size);
|
|
||||||
void SplitHeapMapLocked(VAddr offset);
|
|
||||||
|
|
||||||
AddrTree::iterator GetNearestHeapMapLocked(VAddr offset);
|
|
||||||
|
|
||||||
void RebuildSeparateHeapAddressSpace();
|
|
||||||
|
|
||||||
private:
|
|
||||||
Common::HostMemory& m_buffer;
|
|
||||||
const s64 m_max_resident_map_count;
|
|
||||||
|
|
||||||
std::shared_mutex m_rebuild_lock{};
|
|
||||||
std::mutex m_lock{};
|
|
||||||
s64 m_map_count{};
|
|
||||||
s64 m_resident_map_count{};
|
|
||||||
size_t m_tick{};
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace Common
|
|
||||||
|
|
@ -572,9 +572,8 @@ public:
|
||||||
if (True(perms & MemoryPermission::Execute))
|
if (True(perms & MemoryPermission::Execute))
|
||||||
prot_flags |= PROT_EXEC;
|
prot_flags |= PROT_EXEC;
|
||||||
#endif
|
#endif
|
||||||
int flags = (fd >= 0 ? MAP_SHARED : MAP_PRIVATE) | MAP_FIXED;
|
int ret = mprotect(virtual_base + virtual_offset, length, prot_flags);
|
||||||
void* ret = mmap(virtual_base + virtual_offset, length, prot_flags, flags, fd, host_offset);
|
ASSERT_MSG(ret == 0, "mprotect: {} {}", strerror(errno), fd);
|
||||||
ASSERT_MSG(ret != MAP_FAILED, "mmap: {} {}", strerror(errno), fd);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Unmap(size_t virtual_offset, size_t length) {
|
void Unmap(size_t virtual_offset, size_t length) {
|
||||||
|
|
@ -588,8 +587,8 @@ public:
|
||||||
auto [merged_pointer, merged_size] =
|
auto [merged_pointer, merged_size] =
|
||||||
free_manager.FreeBlock(virtual_base + virtual_offset, length);
|
free_manager.FreeBlock(virtual_base + virtual_offset, length);
|
||||||
|
|
||||||
void* ret = mmap(merged_pointer, merged_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
|
int ret = mprotect(merged_pointer, merged_size, PROT_NONE);
|
||||||
ASSERT_MSG(ret != MAP_FAILED, "mmap: {}", strerror(errno));
|
ASSERT_MSG(ret == 0, "mmap: {}", strerror(errno));
|
||||||
}
|
}
|
||||||
|
|
||||||
void Protect(size_t virtual_offset, size_t length, bool read, bool write, bool execute) {
|
void Protect(size_t virtual_offset, size_t length, bool read, bool write, bool execute) {
|
||||||
|
|
|
||||||
|
|
@ -13,11 +13,11 @@
|
||||||
namespace Common {
|
namespace Common {
|
||||||
|
|
||||||
template <typename BaseAddr>
|
template <typename BaseAddr>
|
||||||
MultiLevelPageTable<BaseAddr>::MultiLevelPageTable(std::size_t address_space_bits_,
|
MultiLevelPageTable<BaseAddr>::MultiLevelPageTable(std::size_t address_space_bits_, std::size_t first_level_bits_, std::size_t page_bits_)
|
||||||
std::size_t first_level_bits_,
|
: address_space_bits{address_space_bits_}
|
||||||
std::size_t page_bits_)
|
, first_level_bits{first_level_bits_}
|
||||||
: address_space_bits{address_space_bits_},
|
, page_bits{page_bits_}
|
||||||
first_level_bits{first_level_bits_}, page_bits{page_bits_} {
|
{
|
||||||
if (page_bits == 0) {
|
if (page_bits == 0) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
@ -30,12 +30,9 @@ MultiLevelPageTable<BaseAddr>::MultiLevelPageTable(std::size_t address_space_bit
|
||||||
void* base{VirtualAlloc(nullptr, alloc_size, MEM_RESERVE, PAGE_READWRITE)};
|
void* base{VirtualAlloc(nullptr, alloc_size, MEM_RESERVE, PAGE_READWRITE)};
|
||||||
#else
|
#else
|
||||||
void* base{mmap(nullptr, alloc_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)};
|
void* base{mmap(nullptr, alloc_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)};
|
||||||
|
if (base == MAP_FAILED)
|
||||||
if (base == MAP_FAILED) {
|
|
||||||
base = nullptr;
|
base = nullptr;
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
ASSERT(base);
|
ASSERT(base);
|
||||||
base_ptr = reinterpret_cast<BaseAddr*>(base);
|
base_ptr = reinterpret_cast<BaseAddr*>(base);
|
||||||
}
|
}
|
||||||
|
|
@ -56,29 +53,21 @@ template <typename BaseAddr>
|
||||||
void MultiLevelPageTable<BaseAddr>::ReserveRange(u64 start, std::size_t size) {
|
void MultiLevelPageTable<BaseAddr>::ReserveRange(u64 start, std::size_t size) {
|
||||||
const u64 new_start = start >> first_level_shift;
|
const u64 new_start = start >> first_level_shift;
|
||||||
const u64 new_end = (start + size) >> first_level_shift;
|
const u64 new_end = (start + size) >> first_level_shift;
|
||||||
for (u64 i = new_start; i <= new_end; i++) {
|
for (u64 i = new_start; i <= new_end; i++)
|
||||||
if (!first_level_map[i]) {
|
if (!first_level_map[i])
|
||||||
AllocateLevel(i);
|
AllocateLevel(i);
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename BaseAddr>
|
template <typename BaseAddr>
|
||||||
void MultiLevelPageTable<BaseAddr>::AllocateLevel(u64 level) {
|
void MultiLevelPageTable<BaseAddr>::AllocateLevel(u64 index) {
|
||||||
void* ptr = reinterpret_cast<char *>(base_ptr) + level * first_level_chunk_size;
|
void* ptr = reinterpret_cast<char *>(base_ptr) + index * first_level_chunk_size;
|
||||||
#ifdef _WIN32
|
#ifdef _WIN32
|
||||||
void* base{VirtualAlloc(ptr, first_level_chunk_size, MEM_COMMIT, PAGE_READWRITE)};
|
void* base = VirtualAlloc(ptr, first_level_chunk_size, MEM_COMMIT, PAGE_READWRITE);
|
||||||
#else
|
|
||||||
void* base{mmap(ptr, first_level_chunk_size, PROT_READ | PROT_WRITE,
|
|
||||||
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0)};
|
|
||||||
|
|
||||||
if (base == MAP_FAILED) {
|
|
||||||
base = nullptr;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
ASSERT(base);
|
ASSERT(base);
|
||||||
|
#else
|
||||||
first_level_map[level] = base;
|
void* base = ptr;
|
||||||
|
#endif
|
||||||
|
first_level_map[index] = base;
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Common
|
} // namespace Common
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,6 @@
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "common/atomic_ops.h"
|
#include "common/atomic_ops.h"
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "common/heap_tracker.h"
|
|
||||||
#include "common/logging.h"
|
#include "common/logging.h"
|
||||||
#include "common/page_table.h"
|
#include "common/page_table.h"
|
||||||
#include "common/scope_exit.h"
|
#include "common/scope_exit.h"
|
||||||
|
|
@ -55,37 +54,24 @@ struct Memory::Impl {
|
||||||
} else {
|
} else {
|
||||||
current_page_table->fastmem_arena = nullptr;
|
current_page_table->fastmem_arena = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef __ANDROID__
|
|
||||||
heap_tracker.emplace(system.DeviceMemory().buffer);
|
|
||||||
buffer = std::addressof(*heap_tracker);
|
|
||||||
#else
|
|
||||||
buffer = std::addressof(system.DeviceMemory().buffer);
|
buffer = std::addressof(system.DeviceMemory().buffer);
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
|
void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, Common::PhysicalAddress target, Common::MemoryPermission perms, bool separate_heap) {
|
||||||
Common::PhysicalAddress target, Common::MemoryPermission perms,
|
|
||||||
bool separate_heap) {
|
|
||||||
ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
|
ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
|
||||||
ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base));
|
ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base));
|
||||||
ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}",
|
ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}", GetInteger(target));
|
||||||
GetInteger(target));
|
MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, target, Common::PageType::Memory);
|
||||||
MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, target,
|
|
||||||
Common::PageType::Memory);
|
|
||||||
|
|
||||||
if (current_page_table->fastmem_arena) {
|
if (current_page_table->fastmem_arena) {
|
||||||
buffer->Map(GetInteger(base), GetInteger(target) - DramMemoryMap::Base, size, perms,
|
buffer->Map(GetInteger(base), GetInteger(target) - DramMemoryMap::Base, size, perms, separate_heap);
|
||||||
separate_heap);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
|
void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, bool separate_heap) {
|
||||||
bool separate_heap) {
|
|
||||||
ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
|
ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
|
||||||
ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base));
|
ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base));
|
||||||
MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, 0,
|
MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, 0, Common::PageType::Unmapped);
|
||||||
Common::PageType::Unmapped);
|
|
||||||
|
|
||||||
if (current_page_table->fastmem_arena) {
|
if (current_page_table->fastmem_arena) {
|
||||||
buffer->Unmap(GetInteger(base), size, separate_heap);
|
buffer->Unmap(GetInteger(base), size, separate_heap);
|
||||||
|
|
@ -857,12 +843,7 @@ struct Memory::Impl {
|
||||||
std::array<Common::ScratchBuffer<u32>, Core::Hardware::NUM_CPU_CORES> scratch_buffers{};
|
std::array<Common::ScratchBuffer<u32>, Core::Hardware::NUM_CPU_CORES> scratch_buffers{};
|
||||||
std::span<Core::GPUDirtyMemoryManager> gpu_dirty_managers;
|
std::span<Core::GPUDirtyMemoryManager> gpu_dirty_managers;
|
||||||
std::mutex sys_core_guard;
|
std::mutex sys_core_guard;
|
||||||
#ifdef __ANDROID__
|
|
||||||
std::optional<Common::HeapTracker> heap_tracker;
|
|
||||||
Common::HeapTracker* buffer{};
|
|
||||||
#else
|
|
||||||
Common::HostMemory* buffer{};
|
Common::HostMemory* buffer{};
|
||||||
#endif
|
|
||||||
};
|
};
|
||||||
|
|
||||||
Memory::Memory(Core::System& system_) : system{system_} {
|
Memory::Memory(Core::System& system_) : system{system_} {
|
||||||
|
|
@ -1055,30 +1036,14 @@ bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) {
|
||||||
u8* const ptr = impl->GetPointerImpl(
|
u8* const ptr = impl->GetPointerImpl(
|
||||||
GetInteger(vaddr),
|
GetInteger(vaddr),
|
||||||
[&] {
|
[&] {
|
||||||
LOG_ERROR(HW_Memory, "Unmapped InvalidateNCE for {} bytes @ {:#x}", size,
|
LOG_ERROR(HW_Memory, "Unmapped InvalidateNCE for {} bytes @ {:#x}", size, GetInteger(vaddr));
|
||||||
GetInteger(vaddr));
|
|
||||||
mapped = false;
|
mapped = false;
|
||||||
},
|
},
|
||||||
[&] { rasterizer = true; });
|
[&] { rasterizer = true; });
|
||||||
if (rasterizer) {
|
if (rasterizer) {
|
||||||
impl->InvalidateGPUMemory(ptr, size);
|
impl->InvalidateGPUMemory(ptr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef __ANDROID__
|
|
||||||
if (!rasterizer && mapped) {
|
|
||||||
impl->buffer->DeferredMapSeparateHeap(GetInteger(vaddr));
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return mapped && ptr != nullptr;
|
return mapped && ptr != nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Memory::InvalidateSeparateHeap(void* fault_address) {
|
|
||||||
#ifdef __ANDROID__
|
|
||||||
return impl->buffer->DeferredMapSeparateHeap(static_cast<u8*>(fault_address));
|
|
||||||
#else
|
|
||||||
return false;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace Core::Memory
|
} // namespace Core::Memory
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
|
||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
// SPDX-FileCopyrightText: 2014 Citra Emulator Project
|
// SPDX-FileCopyrightText: 2014 Citra Emulator Project
|
||||||
|
|
@ -490,13 +490,8 @@ public:
|
||||||
* marked as debug or non-debug.
|
* marked as debug or non-debug.
|
||||||
*/
|
*/
|
||||||
void MarkRegionDebug(Common::ProcessAddress vaddr, u64 size, bool debug);
|
void MarkRegionDebug(Common::ProcessAddress vaddr, u64 size, bool debug);
|
||||||
|
|
||||||
void SetGPUDirtyManagers(std::span<Core::GPUDirtyMemoryManager> managers);
|
void SetGPUDirtyManagers(std::span<Core::GPUDirtyMemoryManager> managers);
|
||||||
|
|
||||||
bool InvalidateNCE(Common::ProcessAddress vaddr, size_t size);
|
bool InvalidateNCE(Common::ProcessAddress vaddr, size_t size);
|
||||||
|
|
||||||
bool InvalidateSeparateHeap(void* fault_address);
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Core::System& system;
|
Core::System& system;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -82,8 +82,6 @@ private:
|
||||||
|
|
||||||
std::thread thread;
|
std::thread thread;
|
||||||
mach_port_t server_port;
|
mach_port_t server_port;
|
||||||
|
|
||||||
void MessagePump();
|
|
||||||
};
|
};
|
||||||
|
|
||||||
MachHandler::MachHandler() {
|
MachHandler::MachHandler() {
|
||||||
|
|
@ -97,15 +95,7 @@ MachHandler::MachHandler() {
|
||||||
KCHECK(mach_port_request_notification(mach_task_self(), server_port, MACH_NOTIFY_PORT_DESTROYED, 0, server_port, MACH_MSG_TYPE_MAKE_SEND_ONCE, &prev));
|
KCHECK(mach_port_request_notification(mach_task_self(), server_port, MACH_NOTIFY_PORT_DESTROYED, 0, server_port, MACH_MSG_TYPE_MAKE_SEND_ONCE, &prev));
|
||||||
#undef KCHECK
|
#undef KCHECK
|
||||||
|
|
||||||
thread = std::thread(&MachHandler::MessagePump, this);
|
thread = std::thread([this] {
|
||||||
thread.detach();
|
|
||||||
}
|
|
||||||
|
|
||||||
MachHandler::~MachHandler() {
|
|
||||||
mach_port_deallocate(mach_task_self(), server_port);
|
|
||||||
}
|
|
||||||
|
|
||||||
void MachHandler::MessagePump() {
|
|
||||||
mach_msg_return_t mr;
|
mach_msg_return_t mr;
|
||||||
MachMessage request;
|
MachMessage request;
|
||||||
MachMessage reply;
|
MachMessage reply;
|
||||||
|
|
@ -128,6 +118,12 @@ void MachHandler::MessagePump() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
});
|
||||||
|
thread.detach();
|
||||||
|
}
|
||||||
|
|
||||||
|
MachHandler::~MachHandler() {
|
||||||
|
mach_port_deallocate(mach_task_self(), server_port);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(ARCHITECTURE_x86_64)
|
#if defined(ARCHITECTURE_x86_64)
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
|
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
|
||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
/* This file is part of the dynarmic project.
|
/* This file is part of the dynarmic project.
|
||||||
|
|
@ -46,22 +46,18 @@ class SigHandler {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
static void SigAction(int sig, siginfo_t* info, void* raw_context);
|
static void SigAction(int sig, siginfo_t* info, void* raw_context);
|
||||||
|
std::vector<u8> signal_stack_memory;
|
||||||
bool supports_fast_mem = true;
|
|
||||||
void* signal_stack_memory = nullptr;
|
|
||||||
ankerl::unordered_dense::map<u64, CodeBlockInfo> code_block_infos;
|
ankerl::unordered_dense::map<u64, CodeBlockInfo> code_block_infos;
|
||||||
std::shared_mutex code_block_infos_mutex;
|
std::shared_mutex code_block_infos_mutex;
|
||||||
struct sigaction old_sa_segv;
|
struct sigaction old_sa_segv;
|
||||||
struct sigaction old_sa_bus;
|
struct sigaction old_sa_bus;
|
||||||
std::size_t signal_stack_size;
|
bool supports_fast_mem = true;
|
||||||
public:
|
public:
|
||||||
SigHandler() noexcept {
|
SigHandler() noexcept {
|
||||||
signal_stack_size = std::max<size_t>(SIGSTKSZ, 2 * 1024 * 1024);
|
signal_stack_memory.resize(std::max<std::size_t>(SIGSTKSZ, 2 * 1024 * 1024), 0);
|
||||||
signal_stack_memory = mmap(nullptr, signal_stack_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
|
||||||
|
|
||||||
stack_t signal_stack{};
|
stack_t signal_stack{};
|
||||||
signal_stack.ss_sp = signal_stack_memory;
|
signal_stack.ss_sp = signal_stack_memory.data();
|
||||||
signal_stack.ss_size = signal_stack_size;
|
signal_stack.ss_size = signal_stack_memory.size();
|
||||||
signal_stack.ss_flags = 0;
|
signal_stack.ss_flags = 0;
|
||||||
if (sigaltstack(&signal_stack, nullptr) != 0) {
|
if (sigaltstack(&signal_stack, nullptr) != 0) {
|
||||||
fmt::print(stderr, "dynarmic: POSIX SigHandler: init failure at sigaltstack\n");
|
fmt::print(stderr, "dynarmic: POSIX SigHandler: init failure at sigaltstack\n");
|
||||||
|
|
@ -88,10 +84,6 @@ public:
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
~SigHandler() noexcept {
|
|
||||||
munmap(signal_stack_memory, signal_stack_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
void AddCodeBlock(u64 offset, CodeBlockInfo cbi) noexcept {
|
void AddCodeBlock(u64 offset, CodeBlockInfo cbi) noexcept {
|
||||||
std::unique_lock guard(code_block_infos_mutex);
|
std::unique_lock guard(code_block_infos_mutex);
|
||||||
code_block_infos.insert_or_assign(offset, cbi);
|
code_block_infos.insert_or_assign(offset, cbi);
|
||||||
|
|
|
||||||
|
|
@ -1985,6 +1985,13 @@ void EmitX64::EmitFPVectorToHalf32(EmitContext& ctx, IR::Inst* inst) {
|
||||||
// output[i] = FPT(FP::FPToFixed<FPT>(fsize, input[i], fbits, unsigned_, fpcr, rounding_mode, fpsr));
|
// output[i] = FPT(FP::FPToFixed<FPT>(fsize, input[i], fbits, unsigned_, fpcr, rounding_mode, fpsr));
|
||||||
// }
|
// }
|
||||||
|
|
||||||
|
template<size_t fsize, bool unsigned_, FP::RoundingMode rounding_mode, size_t fbits>
|
||||||
|
static void EmitFPVectorToFixedThunk(VectorArray<mcl::unsigned_integer_of_size<fsize>>& output, const VectorArray<mcl::unsigned_integer_of_size<fsize>>& input, FP::FPCR fpcr, FP::FPSR& fpsr) {
|
||||||
|
using FPT = mcl::unsigned_integer_of_size<fsize>;
|
||||||
|
for (size_t i = 0; i < output.size(); ++i)
|
||||||
|
output[i] = FPT(FP::FPToFixed<FPT>(fsize, input[i], fbits, unsigned_, fpcr, rounding_mode, fpsr));
|
||||||
|
}
|
||||||
|
|
||||||
template<size_t fsize, bool unsigned_>
|
template<size_t fsize, bool unsigned_>
|
||||||
void EmitFPVectorToFixed(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
void EmitFPVectorToFixed(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||||
const size_t fbits = inst->GetArg(1).GetU8();
|
const size_t fbits = inst->GetArg(1).GetU8();
|
||||||
|
|
@ -2106,43 +2113,88 @@ void EmitFPVectorToFixed(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||||
ctx.reg_alloc.DefineValue(code, inst, src);
|
ctx.reg_alloc.DefineValue(code, inst, src);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
auto const fpt_fn = [fbits, rounding]() -> void (*)(VectorArray<mcl::unsigned_integer_of_size<fsize>>& output, const VectorArray<mcl::unsigned_integer_of_size<fsize>>& input, FP::FPCR fpcr, FP::FPSR& fpsr) {
|
||||||
|
#define ROUNDING_MODE_CASE(CASE, N) \
|
||||||
|
if (rounding == FP::RoundingMode::CASE && fsize >= (N) && fbits == (N)) return &EmitFPVectorToFixedThunk<fsize, unsigned_, FP::RoundingMode::CASE, N>;
|
||||||
|
#define ROUNDING_MODE_SWITCH(CASE) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x00) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x01) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x02) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x03) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x04) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x05) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x06) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x07) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x08) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x09) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x0a) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x0b) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x0c) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x0d) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x0e) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x0f) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x10) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x11) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x12) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x13) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x14) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x15) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x16) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x17) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x18) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x19) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x1a) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x1b) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x1c) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x1d) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x1e) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x1f) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x20) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x21) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x22) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x23) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x24) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x25) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x26) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x27) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x28) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x29) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x2a) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x2b) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x2c) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x2d) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x2e) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x2f) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x30) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x31) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x32) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x33) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x34) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x35) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x36) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x37) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x38) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x39) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x3a) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x3b) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x3c) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x3d) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x3e) \
|
||||||
|
ROUNDING_MODE_CASE(CASE, 0x3f)
|
||||||
|
|
||||||
using FPT = mcl::unsigned_integer_of_size<fsize>; // WORKAROUND: For issue 678 on MSVC
|
// FUCK YOU MSVC, FUCKING DEPTH CANT EVEN HANDLE 8+16+32+64 DEPTH OF A ELSE STATMENT YOU FUCKING STUPID
|
||||||
auto const func = [rounding]() -> void(*)(VectorArray<FPT>& output, const VectorArray<FPT>& input, FP::FPCR fpcr, FP::FPSR& fpsr) {
|
// BURN MSVC BURN IT STUPID COMPILER CAN'T EVEN COMPILE THE MOST BASIC C++
|
||||||
switch (rounding) {
|
ROUNDING_MODE_SWITCH(ToNearest_TieEven)
|
||||||
case FP::RoundingMode::ToNearest_TieEven:
|
ROUNDING_MODE_SWITCH(TowardsPlusInfinity)
|
||||||
return [](VectorArray<FPT>& output, const VectorArray<FPT>& input, FP::FPCR fpcr, FP::FPSR& fpsr) {
|
ROUNDING_MODE_SWITCH(TowardsMinusInfinity)
|
||||||
for (size_t i = 0; i < output.size(); ++i)
|
ROUNDING_MODE_SWITCH(TowardsZero)
|
||||||
output[i] = FPT(FP::FPToFixed<FPT>(fsize, input[i], fsize, unsigned_, fpcr, FP::RoundingMode::ToNearest_TieEven, fpsr));
|
ROUNDING_MODE_SWITCH(ToNearest_TieAwayFromZero)
|
||||||
};
|
#undef ROUNDING_MODE_SWITCH
|
||||||
case FP::RoundingMode::TowardsPlusInfinity:
|
#undef ROUNDING_MODE_CASE
|
||||||
return [](VectorArray<FPT>& output, const VectorArray<FPT>& input, FP::FPCR fpcr, FP::FPSR& fpsr) {
|
return nullptr;
|
||||||
for (size_t i = 0; i < output.size(); ++i)
|
|
||||||
output[i] = FPT(FP::FPToFixed<FPT>(fsize, input[i], fsize, unsigned_, fpcr, FP::RoundingMode::TowardsPlusInfinity, fpsr));
|
|
||||||
};
|
|
||||||
case FP::RoundingMode::TowardsMinusInfinity:
|
|
||||||
return [](VectorArray<FPT>& output, const VectorArray<FPT>& input, FP::FPCR fpcr, FP::FPSR& fpsr) {
|
|
||||||
for (size_t i = 0; i < output.size(); ++i)
|
|
||||||
output[i] = FPT(FP::FPToFixed<FPT>(fsize, input[i], fsize, unsigned_, fpcr, FP::RoundingMode::TowardsMinusInfinity, fpsr));
|
|
||||||
};
|
|
||||||
case FP::RoundingMode::TowardsZero:
|
|
||||||
return [](VectorArray<FPT>& output, const VectorArray<FPT>& input, FP::FPCR fpcr, FP::FPSR& fpsr) {
|
|
||||||
for (size_t i = 0; i < output.size(); ++i)
|
|
||||||
output[i] = FPT(FP::FPToFixed<FPT>(fsize, input[i], fsize, unsigned_, fpcr, FP::RoundingMode::TowardsZero, fpsr));
|
|
||||||
};
|
|
||||||
case FP::RoundingMode::ToNearest_TieAwayFromZero:
|
|
||||||
return [](VectorArray<FPT>& output, const VectorArray<FPT>& input, FP::FPCR fpcr, FP::FPSR& fpsr) {
|
|
||||||
for (size_t i = 0; i < output.size(); ++i)
|
|
||||||
output[i] = FPT(FP::FPToFixed<FPT>(fsize, input[i], fsize, unsigned_, fpcr, FP::RoundingMode::ToNearest_TieAwayFromZero, fpsr));
|
|
||||||
};
|
|
||||||
case FP::RoundingMode::ToOdd:
|
|
||||||
return [](VectorArray<FPT>& output, const VectorArray<FPT>& input, FP::FPCR fpcr, FP::FPSR& fpsr) {
|
|
||||||
for (size_t i = 0; i < output.size(); ++i)
|
|
||||||
output[i] = FPT(FP::FPToFixed<FPT>(fsize, input[i], fsize, unsigned_, fpcr, FP::RoundingMode::ToOdd, fpsr));
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}();
|
}();
|
||||||
EmitTwoOpFallback<3>(code, ctx, inst, func);
|
|
||||||
|
EmitTwoOpFallback<3>(code, ctx, inst, fpt_fn);
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmitX64::EmitFPVectorToSignedFixed16(EmitContext& ctx, IR::Inst* inst) {
|
void EmitX64::EmitFPVectorToSignedFixed16(EmitContext& ctx, IR::Inst* inst) {
|
||||||
|
|
|
||||||
|
|
@ -2733,8 +2733,10 @@ void TextureCache<P>::PrepareImage(ImageId image_id, bool is_modification, bool
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
RefreshContents(image, image_id);
|
RefreshContents(image, image_id);
|
||||||
|
if (!image.aliased_images.empty()) {
|
||||||
SynchronizeAliases(image_id);
|
SynchronizeAliases(image_id);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
if (is_modification) {
|
if (is_modification) {
|
||||||
MarkModification(image);
|
MarkModification(image);
|
||||||
}
|
}
|
||||||
|
|
|
||||||