mirror of
https://git.eden-emu.dev/eden-emu/eden
synced 2026-04-12 18:08:55 +02:00
[cmake] enable clang-cl and WoA builds (#348)
Compilation and CMake fixes for both Windows on ARM and clang-cl, meaning Windows can now be built on both MSVC and clang on both amd64 and aarch64. Compiling on clang is *dramatically* faster so this should be useful for CI. Co-authored-by: crueter <crueter@eden-emu.dev> Co-authored-by: crueter <crueter@crueter.xyz> Reviewed-on: https://git.eden-emu.dev/eden-emu/eden/pulls/348 Reviewed-by: CamilleLaVey <camillelavey99@gmail.com> Reviewed-by: crueter <crueter@eden-emu.dev> Co-authored-by: lizzie <lizzie@eden-emu.dev> Co-committed-by: lizzie <lizzie@eden-emu.dev>
This commit is contained in:
parent
428f136a75
commit
9d2681ecc9
276 changed files with 973 additions and 1010 deletions
|
|
@ -36,14 +36,14 @@ BufferCache<P>::BufferCache(Tegra::MaxwellDeviceMemoryManager& device_memory_, R
|
|||
const s64 device_local_memory = static_cast<s64>(runtime.GetDeviceLocalMemory());
|
||||
const s64 min_spacing_expected = device_local_memory - 1_GiB;
|
||||
const s64 min_spacing_critical = device_local_memory - 512_MiB;
|
||||
const s64 mem_threshold = std::min(device_local_memory, TARGET_THRESHOLD);
|
||||
const s64 mem_threshold = (std::min)(device_local_memory, TARGET_THRESHOLD);
|
||||
const s64 min_vacancy_expected = (6 * mem_threshold) / 10;
|
||||
const s64 min_vacancy_critical = (2 * mem_threshold) / 10;
|
||||
minimum_memory = static_cast<u64>(
|
||||
std::max(std::min(device_local_memory - min_vacancy_expected, min_spacing_expected),
|
||||
(std::max)((std::min)(device_local_memory - min_vacancy_expected, min_spacing_expected),
|
||||
DEFAULT_EXPECTED_MEMORY));
|
||||
critical_memory = static_cast<u64>(
|
||||
std::max(std::min(device_local_memory - min_vacancy_critical, min_spacing_critical),
|
||||
(std::max)((std::min)(device_local_memory - min_vacancy_critical, min_spacing_critical),
|
||||
DEFAULT_CRITICAL_MEMORY));
|
||||
}
|
||||
|
||||
|
|
@ -553,8 +553,8 @@ void BufferCache<P>::CommitAsyncFlushesHigh() {
|
|||
ForEachBufferInRange(device_addr, size, [&](BufferId buffer_id, Buffer& buffer) {
|
||||
const DAddr buffer_start = buffer.CpuAddr();
|
||||
const DAddr buffer_end = buffer_start + buffer.SizeBytes();
|
||||
const DAddr new_start = std::max(buffer_start, device_addr);
|
||||
const DAddr new_end = std::min(buffer_end, device_addr + size);
|
||||
const DAddr new_start = (std::max)(buffer_start, device_addr);
|
||||
const DAddr new_end = (std::min)(buffer_end, device_addr + size);
|
||||
memory_tracker.ForEachDownloadRange(
|
||||
new_start, new_end - new_start, false,
|
||||
[&](u64 device_addr_out, u64 range_size) {
|
||||
|
|
@ -574,7 +574,7 @@ void BufferCache<P>::CommitAsyncFlushesHigh() {
|
|||
constexpr u64 align = 64ULL;
|
||||
constexpr u64 mask = ~(align - 1ULL);
|
||||
total_size_bytes += (new_size + align - 1) & mask;
|
||||
largest_copy = std::max(largest_copy, new_size);
|
||||
largest_copy = (std::max)(largest_copy, new_size);
|
||||
};
|
||||
|
||||
gpu_modified_ranges.ForEachInRange(device_addr_out, range_size,
|
||||
|
|
@ -729,8 +729,8 @@ void BufferCache<P>::BindHostVertexBuffers() {
|
|||
}
|
||||
flags[Dirty::VertexBuffer0 + index] = false;
|
||||
|
||||
host_bindings.min_index = std::min(host_bindings.min_index, index);
|
||||
host_bindings.max_index = std::max(host_bindings.max_index, index);
|
||||
host_bindings.min_index = (std::min)(host_bindings.min_index, index);
|
||||
host_bindings.max_index = (std::max)(host_bindings.max_index, index);
|
||||
any_valid = true;
|
||||
}
|
||||
|
||||
|
|
@ -789,7 +789,7 @@ void BufferCache<P>::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32
|
|||
bool needs_bind) {
|
||||
const Binding& binding = channel_state->uniform_buffers[stage][index];
|
||||
const DAddr device_addr = binding.device_addr;
|
||||
const u32 size = std::min(binding.size, (*channel_state->uniform_buffer_sizes)[stage][index]);
|
||||
const u32 size = (std::min)(binding.size, (*channel_state->uniform_buffer_sizes)[stage][index]);
|
||||
Buffer& buffer = slot_buffers[binding.buffer_id];
|
||||
TouchBuffer(buffer, binding.buffer_id);
|
||||
const bool use_fast_buffer = binding.buffer_id != NULL_BUFFER_ID &&
|
||||
|
|
@ -956,7 +956,7 @@ void BufferCache<P>::BindHostComputeUniformBuffers() {
|
|||
Buffer& buffer = slot_buffers[binding.buffer_id];
|
||||
TouchBuffer(buffer, binding.buffer_id);
|
||||
const u32 size =
|
||||
std::min(binding.size, (*channel_state->compute_uniform_buffer_sizes)[index]);
|
||||
(std::min)(binding.size, (*channel_state->compute_uniform_buffer_sizes)[index]);
|
||||
SynchronizeBuffer(buffer, binding.device_addr, size);
|
||||
|
||||
const u32 offset = buffer.Offset(binding.device_addr);
|
||||
|
|
@ -1090,7 +1090,7 @@ void BufferCache<P>::UpdateIndexBuffer() {
|
|||
const u32 address_size = static_cast<u32>(gpu_addr_end - gpu_addr_begin);
|
||||
const u32 draw_size =
|
||||
(index_buffer_ref.count + index_buffer_ref.first) * index_buffer_ref.FormatSizeInBytes();
|
||||
const u32 size = std::min(address_size, draw_size);
|
||||
const u32 size = (std::min)(address_size, draw_size);
|
||||
if (size == 0 || !device_addr) {
|
||||
channel_state->index_buffer = NULL_BINDING;
|
||||
return;
|
||||
|
|
@ -1459,7 +1459,7 @@ bool BufferCache<P>::SynchronizeBuffer(Buffer& buffer, DAddr device_addr, u32 si
|
|||
.size = range_size,
|
||||
});
|
||||
total_size_bytes += range_size;
|
||||
largest_copy = std::max(largest_copy, range_size);
|
||||
largest_copy = (std::max)(largest_copy, range_size);
|
||||
});
|
||||
if (total_size_bytes == 0) {
|
||||
return true;
|
||||
|
|
@ -1594,7 +1594,7 @@ void BufferCache<P>::DownloadBufferMemory(Buffer& buffer, DAddr device_addr, u64
|
|||
constexpr u64 align = 64ULL;
|
||||
constexpr u64 mask = ~(align - 1ULL);
|
||||
total_size_bytes += (new_size + align - 1) & mask;
|
||||
largest_copy = std::max(largest_copy, new_size);
|
||||
largest_copy = (std::max)(largest_copy, new_size);
|
||||
};
|
||||
|
||||
gpu_modified_ranges.ForEachInRange(device_addr_out, range_size, add_download);
|
||||
|
|
@ -1715,7 +1715,7 @@ Binding BufferCache<P>::StorageBufferBinding(GPUVAddr ssbo_addr, u32 cbuf_index,
|
|||
// cbufs, which do not store the sizes adjacent to the addresses, so use the fully
|
||||
// mapped buffer size for now.
|
||||
const u32 memory_layout_size = static_cast<u32>(gpu_memory->GetMemoryLayoutSize(gpu_addr));
|
||||
return std::min(memory_layout_size, static_cast<u32>(8_MiB));
|
||||
return (std::min)(memory_layout_size, static_cast<u32>(8_MiB));
|
||||
}();
|
||||
// Alignment only applies to the offset of the buffer
|
||||
const u32 alignment = runtime.GetStorageBufferAlignment();
|
||||
|
|
|
|||
|
|
@ -230,7 +230,7 @@ private:
|
|||
std::size_t remaining_size{size};
|
||||
std::size_t page_index{cpu_address >> HIGHER_PAGE_BITS};
|
||||
u64 page_offset{cpu_address & HIGHER_PAGE_MASK};
|
||||
u64 begin = std::numeric_limits<u64>::max();
|
||||
u64 begin = (std::numeric_limits<u64>::max)();
|
||||
u64 end = 0;
|
||||
while (remaining_size > 0) {
|
||||
const std::size_t copy_amount{
|
||||
|
|
@ -240,8 +240,8 @@ private:
|
|||
auto [new_begin, new_end] = func(manager, page_offset, copy_amount);
|
||||
if (new_begin != 0 || new_end != 0) {
|
||||
const u64 base_address = page_index << HIGHER_PAGE_BITS;
|
||||
begin = std::min(new_begin + base_address, begin);
|
||||
end = std::max(new_end + base_address, end);
|
||||
begin = (std::min)(new_begin + base_address, begin);
|
||||
end = (std::max)(new_end + base_address, end);
|
||||
}
|
||||
};
|
||||
if (manager) {
|
||||
|
|
|
|||
|
|
@ -181,7 +181,7 @@ public:
|
|||
|
||||
static u64 ExtractBits(u64 word, size_t page_start, size_t page_end) {
|
||||
constexpr size_t number_bits = sizeof(u64) * 8;
|
||||
const size_t limit_page_end = number_bits - std::min(page_end, number_bits);
|
||||
const size_t limit_page_end = number_bits - (std::min)(page_end, number_bits);
|
||||
u64 bits = (word >> page_start) << page_start;
|
||||
bits = (bits << limit_page_end) >> limit_page_end;
|
||||
return bits;
|
||||
|
|
@ -206,11 +206,11 @@ public:
|
|||
auto [start_word, start_page] = GetWordPage(start);
|
||||
auto [end_word, end_page] = GetWordPage(end + BYTES_PER_PAGE - 1ULL);
|
||||
const size_t num_words = NumWords();
|
||||
start_word = std::min(start_word, num_words);
|
||||
end_word = std::min(end_word, num_words);
|
||||
start_word = (std::min)(start_word, num_words);
|
||||
end_word = (std::min)(end_word, num_words);
|
||||
const size_t diff = end_word - start_word;
|
||||
end_word += (end_page + PAGES_PER_WORD - 1ULL) / PAGES_PER_WORD;
|
||||
end_word = std::min(end_word, num_words);
|
||||
end_word = (std::min)(end_word, num_words);
|
||||
end_page += diff * PAGES_PER_WORD;
|
||||
constexpr u64 base_mask{~0ULL};
|
||||
for (size_t word_index = start_word; word_index < end_word; word_index++) {
|
||||
|
|
@ -382,7 +382,7 @@ public:
|
|||
const std::span<const u64> state_words = words.template Span<type>();
|
||||
[[maybe_unused]] const std::span<const u64> untracked_words =
|
||||
words.template Span<Type::Untracked>();
|
||||
u64 begin = std::numeric_limits<u64>::max();
|
||||
u64 begin = (std::numeric_limits<u64>::max)();
|
||||
u64 end = 0;
|
||||
IterateWords(offset, size, [&](size_t index, u64 mask) {
|
||||
if constexpr (type == Type::GPU) {
|
||||
|
|
@ -395,7 +395,7 @@ public:
|
|||
const u64 local_page_begin = std::countr_zero(word);
|
||||
const u64 local_page_end = PAGES_PER_WORD - std::countl_zero(word);
|
||||
const u64 page_index = index * PAGES_PER_WORD;
|
||||
begin = std::min(begin, page_index + local_page_begin);
|
||||
begin = (std::min)(begin, page_index + local_page_begin);
|
||||
end = page_index + local_page_end;
|
||||
});
|
||||
static constexpr std::pair<u64, u64> EMPTY{0, 0};
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue