mirror of
https://git.eden-emu.dev/eden-emu/eden
synced 2026-04-10 05:28:56 +02:00
no move constructor?
This commit is contained in:
parent
b784ab5b23
commit
76a2581462
3 changed files with 18 additions and 39 deletions
|
|
@ -25,42 +25,33 @@ NvMap::Handle::Handle(u64 size_, Id id_)
|
|||
flags.raw = 0;
|
||||
}
|
||||
|
||||
NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress,
|
||||
NvCore::SessionId pSessionId) {
|
||||
std::scoped_lock lock(mutex);
|
||||
NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress, NvCore::SessionId pSessionId) {
|
||||
// Handles cannot be allocated twice
|
||||
if (allocated) {
|
||||
return NvResult::AccessDenied;
|
||||
}
|
||||
|
||||
flags = pFlags;
|
||||
kind = pKind;
|
||||
align = pAlign < YUZU_PAGESIZE ? YUZU_PAGESIZE : pAlign;
|
||||
session_id = pSessionId;
|
||||
|
||||
// This flag is only applicable for handles with an address passed
|
||||
if (pAddress) {
|
||||
flags.keep_uncached_after_free.Assign(0);
|
||||
} else {
|
||||
LOG_CRITICAL(Service_NVDRV,
|
||||
"Mapping nvmap handles without a CPU side address is unimplemented!");
|
||||
LOG_CRITICAL(Service_NVDRV, "Mapping nvmap handles without a CPU side address is unimplemented!");
|
||||
}
|
||||
|
||||
size = Common::AlignUp(size, YUZU_PAGESIZE);
|
||||
aligned_size = Common::AlignUp(size, align);
|
||||
address = pAddress;
|
||||
allocated = true;
|
||||
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
NvResult NvMap::Handle::Duplicate(bool internal_session) {
|
||||
std::scoped_lock lock(mutex);
|
||||
// Unallocated handles cannot be duplicated as duplication requires memory accounting (in HOS)
|
||||
if (!allocated) [[unlikely]] {
|
||||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
// If we internally use FromId the duplication tracking of handles won't work accurately due to
|
||||
// us not implementing per-process handle refs.
|
||||
if (internal_session) {
|
||||
|
|
@ -68,7 +59,6 @@ NvResult NvMap::Handle::Duplicate(bool internal_session) {
|
|||
} else {
|
||||
dupes++;
|
||||
}
|
||||
|
||||
return NvResult::Success;
|
||||
}
|
||||
|
||||
|
|
@ -76,7 +66,7 @@ NvMap::NvMap(Container& core_, Tegra::Host1x::Host1x& host1x_) : host1x{host1x_}
|
|||
|
||||
void NvMap::AddHandle(Handle&& handle_description) {
|
||||
std::scoped_lock l(handles_lock);
|
||||
handles.emplace(handle_description.id, std::move(handle_description));
|
||||
handles.insert_or_assign(handle_description.id, std::move(handle_description));
|
||||
}
|
||||
|
||||
void NvMap::UnmapHandle(Handle& handle_description) {
|
||||
|
|
@ -137,20 +127,19 @@ NvResult NvMap::CreateHandle(u64 size, Handle::Id& out_handle) {
|
|||
}
|
||||
|
||||
std::optional<std::reference_wrapper<NvMap::Handle>> NvMap::GetHandle(Handle::Id handle) {
|
||||
std::scoped_lock lock(handles_lock);
|
||||
if (auto const it = handles.find(handle); it != handles.end())
|
||||
return {it->second};
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
DAddr NvMap::GetHandleAddress(Handle::Id handle) {
|
||||
std::scoped_lock lock(handles_lock);
|
||||
if (auto const it = handles.find(handle); it != handles.end())
|
||||
return it->second.d_address;
|
||||
return 0;
|
||||
}
|
||||
|
||||
DAddr NvMap::PinHandle(NvMap::Handle::Id handle, bool low_area_pin) {
|
||||
std::scoped_lock lock(handles_lock);
|
||||
auto o = GetHandle(handle);
|
||||
if (!o) [[unlikely]] {
|
||||
return 0;
|
||||
|
|
@ -158,7 +147,6 @@ DAddr NvMap::PinHandle(NvMap::Handle::Id handle, bool low_area_pin) {
|
|||
|
||||
auto handle_description = &o->get();
|
||||
|
||||
std::scoped_lock lock(handle_description->mutex);
|
||||
const auto map_low_area = [&] {
|
||||
if (handle_description->pin_virt_address == 0) {
|
||||
auto& gmmu_allocator = host1x.Allocator();
|
||||
|
|
@ -206,7 +194,6 @@ DAddr NvMap::PinHandle(NvMap::Handle::Id handle, bool low_area_pin) {
|
|||
if (auto free_handle = handles.find(unmap_queue.front()); free_handle != handles.end()) {
|
||||
// Handles in the unmap queue are guaranteed not to be pinned so don't bother
|
||||
// checking if they are before unmapping
|
||||
std::scoped_lock fl(free_handle->second.mutex);
|
||||
if (handle_description->d_address)
|
||||
UnmapHandle(free_handle->second);
|
||||
} else {
|
||||
|
|
@ -232,9 +219,9 @@ DAddr NvMap::PinHandle(NvMap::Handle::Id handle, bool low_area_pin) {
|
|||
}
|
||||
|
||||
void NvMap::UnpinHandle(Handle::Id handle) {
|
||||
std::scoped_lock lock(handles_lock);
|
||||
if (auto o = GetHandle(handle); o) {
|
||||
auto handle_description = &o->get();
|
||||
std::scoped_lock lock(handle_description->mutex);
|
||||
if (--handle_description->pins < 0) {
|
||||
LOG_WARNING(Service_NVDRV, "Pin count imbalance detected!");
|
||||
} else if (!handle_description->pins) {
|
||||
|
|
@ -247,6 +234,7 @@ void NvMap::UnpinHandle(Handle::Id handle) {
|
|||
}
|
||||
|
||||
void NvMap::DuplicateHandle(Handle::Id handle, bool internal_session) {
|
||||
std::scoped_lock lock(handles_lock);
|
||||
auto o = GetHandle(handle);
|
||||
if (!o) {
|
||||
LOG_CRITICAL(Service_NVDRV, "Unregistered handle!");
|
||||
|
|
@ -259,11 +247,10 @@ void NvMap::DuplicateHandle(Handle::Id handle, bool internal_session) {
|
|||
}
|
||||
|
||||
std::optional<NvMap::FreeInfo> NvMap::FreeHandle(Handle::Id handle, bool internal_session) {
|
||||
// We use a weak ptr here so we can tell when the handle has been freed and report that back to
|
||||
// guest
|
||||
// We use a weak ptr here so we can tell when the handle has been freed and report that back to guest
|
||||
std::scoped_lock lock(handles_lock);
|
||||
if (auto o = GetHandle(handle); o) {
|
||||
auto handle_description = &o->get();
|
||||
std::scoped_lock l(handle_description->mutex);
|
||||
if (internal_session) {
|
||||
if (--handle_description->internal_dupes < 0)
|
||||
LOG_WARNING(Service_NVDRV, "Internal duplicate count imbalance detected!");
|
||||
|
|
@ -303,19 +290,12 @@ std::optional<NvMap::FreeInfo> NvMap::FreeHandle(Handle::Id handle, bool interna
|
|||
}
|
||||
|
||||
void NvMap::UnmapAllHandles(NvCore::SessionId session_id) {
|
||||
auto handles_copy = [&] {
|
||||
std::scoped_lock lk{handles_lock};
|
||||
return handles;
|
||||
}();
|
||||
|
||||
for (auto& [id, handle] : handles_copy) {
|
||||
{
|
||||
std::scoped_lock lk{handle.mutex};
|
||||
if (handle.session_id.id != session_id.id || handle.dupes <= 0) {
|
||||
continue;
|
||||
}
|
||||
std::scoped_lock lk{handles_lock};
|
||||
for (auto it = handles.begin(); it != handles.end(); ++it) {
|
||||
if (it->second.session_id.id != session_id.id || it->second.dupes <= 0) {
|
||||
continue;
|
||||
}
|
||||
FreeHandle(id, false);
|
||||
FreeHandle(it->first, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -38,7 +38,6 @@ public:
|
|||
/// @brief A handle to a contiguous block of memory in an application's address space
|
||||
struct Handle {
|
||||
using Id = u32;
|
||||
std::mutex mutex;
|
||||
std::optional<typename std::list<Handle::Id>::iterator> unmap_queue_entry{};
|
||||
u64 align{}; //!< The alignment to use when pinning the handle onto the SMMU
|
||||
u64 size; //!< Page-aligned size of the memory the handle refers to
|
||||
|
|
@ -64,7 +63,6 @@ public:
|
|||
bool in_heap : 1 = false;
|
||||
bool is_shared_mem_mapped : 1 = false; //!< If this nvmap has been mapped with the MapSharedMem IPC < call
|
||||
|
||||
Handle() = default;
|
||||
Handle(u64 size, Id id);
|
||||
|
||||
/**
|
||||
|
|
@ -142,15 +140,14 @@ public:
|
|||
|
||||
void UnmapAllHandles(NvCore::SessionId session_id);
|
||||
|
||||
private:
|
||||
std::list<Handle::Id> unmap_queue{};
|
||||
boost::unordered_node_map<Handle::Id, Handle> handles{}; //!< Main owning map of handles
|
||||
std::mutex unmap_queue_lock{}; //!< Protects access to `unmap_queue`
|
||||
std::mutex handles_lock; //!< Protects access to `handles`
|
||||
|
||||
static constexpr u32 HandleIdIncrement{4}; //!< Each new handle ID is an increment of 4 from the previous
|
||||
std::atomic<u32> next_handle_id{HandleIdIncrement};
|
||||
Tegra::Host1x::Host1x& host1x;
|
||||
Container& core;
|
||||
|
||||
void AddHandle(Handle&& handle);
|
||||
|
||||
|
|
@ -166,7 +163,5 @@ private:
|
|||
* @return If the handle was removed from the map
|
||||
*/
|
||||
bool TryRemoveHandle(const Handle& handle_description);
|
||||
|
||||
Container& core;
|
||||
};
|
||||
} // namespace Service::Nvidia::NvCore
|
||||
|
|
|
|||
|
|
@ -112,6 +112,7 @@ NvResult nvmap::IocAlloc(IocAllocParams& params, DeviceFD fd) {
|
|||
params.align = YUZU_PAGESIZE;
|
||||
}
|
||||
|
||||
std::scoped_lock lock(file.handles_lock);
|
||||
auto o = file.GetHandle(params.handle);
|
||||
if (!o) {
|
||||
LOG_CRITICAL(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle);
|
||||
|
|
@ -144,6 +145,7 @@ NvResult nvmap::IocGetId(IocGetIdParams& params) {
|
|||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
std::scoped_lock lock(file.handles_lock);
|
||||
auto o = file.GetHandle(params.handle);
|
||||
if (!o) {
|
||||
LOG_CRITICAL(Service_NVDRV, "Error!");
|
||||
|
|
@ -166,6 +168,7 @@ NvResult nvmap::IocFromId(IocFromIdParams& params) {
|
|||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
std::scoped_lock lock(file.handles_lock);
|
||||
auto o = file.GetHandle(params.id);
|
||||
if (!o) {
|
||||
LOG_CRITICAL(Service_NVDRV, "Unregistered handle!");
|
||||
|
|
@ -192,6 +195,7 @@ NvResult nvmap::IocParam(IocParamParams& params) {
|
|||
return NvResult::BadValue;
|
||||
}
|
||||
|
||||
std::scoped_lock lock(file.handles_lock);
|
||||
auto o = file.GetHandle(params.handle);
|
||||
if (!o) {
|
||||
LOG_CRITICAL(Service_NVDRV, "Not registered handle!");
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue