Compare commits

...

68 commits

Author SHA1 Message Date
CamilleLaVey
d06eb3f52f [vulkan] Changed info.color_output_type gate 2026-03-07 06:46:15 +00:00
CamilleLaVey
21c77d5dce [vulkan] Added conservative path for RoundingModeRTZ + instrumentalization for shaders use 2026-03-07 06:46:15 +00:00
CamilleLaVey
a80e0f10ba Just meow 2026-03-07 06:46:15 +00:00
CamilleLaVey
ccb518dc05 Changed logging context 2026-03-07 06:46:15 +00:00
CamilleLaVey
bb768ad570 fix building 2 2026-03-07 06:46:15 +00:00
CamilleLaVey
c028d925bc fix build 2026-03-07 06:46:15 +00:00
CamilleLaVey
3557ff28b7 [vulkan] Added no depth vs depth compare support 2026-03-07 06:46:15 +00:00
CamilleLaVey
108bb3d28b [debug] Added extra logging/ address for shader info -> FP32Mul Optimize Path 2026-03-07 06:46:15 +00:00
CamilleLaVey
5b12a7725d fix build 2026-03-07 06:46:15 +00:00
CamilleLaVey
15d575aa31 [test] Change forcerd order for CompareMask + forced refresh/ emit 2026-03-07 06:46:15 +00:00
CamilleLaVey
47fe86be7b [vulkan] Extended 3D image handling for subresource range calculations 2026-03-07 06:46:14 +00:00
CamilleLaVey
3db45f3c46 [vulkan] Implemented active color output tracking in runtime info and update fragment color handling 2026-03-07 06:46:14 +00:00
CamilleLaVey
4481391474 [vulkan] Maintenance9 removal 2026-03-07 06:46:14 +00:00
CamilleLaVey
dee102cf92 [debug] Added extra parametters for histogram track info -> shader info for RZ 2026-03-07 06:46:14 +00:00
CamilleLaVey
ad2f40b0e7 [debug] fix logging entries for histogram 2026-03-07 06:46:14 +00:00
CamilleLaVey
d10080b757 [test] Histogram debug - shader float control -> initial target: Adreno 2026-03-07 06:46:14 +00:00
CamilleLaVey
ce15cf7cd3 [vulkan] Adjusted image view usage flags to ensure compatibility with image format in TextureCache 2026-03-07 06:46:14 +00:00
CamilleLaVey
6b87b0052a [test] shader float control returned to Adreno 2026-03-07 06:46:14 +00:00
CamilleLaVey
9630da580d [vulkan] Removed counter enable for ZPassPixelCount64 in Clear method and added initial layout transition for images in RefreshContents 2026-03-07 06:46:14 +00:00
CamilleLaVey
12fdd88a58 smol fix for query enable 2026-03-07 06:46:14 +00:00
CamilleLaVey
646aea7fbf [vulkan] Removed unused helper in texture pass 2026-03-07 06:46:14 +00:00
CamilleLaVey
186c0b0cc7 [vulkan] Replaced old logic for DescriptorType for a numeric handling per type to avoid mismatches during format binding 2026-03-07 06:46:14 +00:00
CamilleLaVey
ee1ffbaf2e [vulkan] Adjustments to wrong access of image-memory barrier on depth fragments + blending extended enabling method 2026-03-07 06:46:14 +00:00
CamilleLaVey
c931de0570 [vulkan] Adjusted QueryReset's 2026-03-07 06:46:14 +00:00
CamilleLaVey
33f1fb1cf4 fix build 2026-03-07 06:46:14 +00:00
CamilleLaVey
aabc470314 First meow in honor of meowly 2026-03-07 06:46:14 +00:00
CamilleLaVey
d82a6a273d fix build 2026-03-07 06:46:14 +00:00
lizzie
4bb853d52a Merge fix 2026-03-07 06:46:14 +00:00
CamilleLaVey
43ebdb1ffc [vulkan] Adjusted DYNAMIC_STATES setting 2026-03-07 06:46:14 +00:00
CamilleLaVey
e310f0b151 [vulkan] Indirect draw for dstStageMask 2026-03-07 06:46:14 +00:00
CamilleLaVey
9b915c8659 [vulkan] Adjusting re-cast for EDS support when bind happens 2026-03-07 06:46:14 +00:00
CamilleLaVey
35ab33de6a Fix build 2026-03-07 06:46:14 +00:00
CamilleLaVey
5615ea9ced [vulkan] re-cast vkCmdSet for dynamic states during binding 2026-03-07 06:46:14 +00:00
CamilleLaVey
d273fc4ad6 [vulkan] Changed UpdateDynamicState order 2026-03-07 06:46:14 +00:00
CamilleLaVey
e3e880e879 [vulkan] Added flag to detect last mode from provokingVertex 2026-03-07 06:46:14 +00:00
CamilleLaVey
6b8115f27a fix meow 2026-03-07 06:46:14 +00:00
CamilleLaVey
a24e7e8143 [maxwell] Adding storage flags to some surface format 2026-03-07 06:46:14 +00:00
CamilleLaVey
88b9393b44 [vulkan] Adding guards per dynamic states setters 2026-03-07 06:46:14 +00:00
CamilleLaVey
2081d659d2 [vulkan] Dead code removal from VertexInputDynamicState 2026-03-07 06:46:14 +00:00
CamilleLaVey
9eacaf2444 [vulkan] Adjustment for Viewport and Scissor counts within EDS 2026-03-07 06:46:01 +00:00
CamilleLaVey
c9eb764d2a [vulkan] Fixing some incongruences with pipeline keys and dynamic state flags 2026-03-07 06:46:01 +00:00
CamilleLaVey
72ced6b947 [vulkan] Set always vertex strides 2026-03-07 06:46:01 +00:00
CamilleLaVey
ffae2350ca [vulkan] fix custom border color query struct 2026-03-07 06:46:01 +00:00
CamilleLaVey
646542a397 [vulkan] Query custom border color properties based on device report 2026-03-07 06:46:01 +00:00
CamilleLaVey
1ae76d44c1 [vulkan, rasterizer] Filling missing byte count handling when TFB is not available 2026-03-07 06:46:01 +00:00
CamilleLaVey
24d07ab28c fix build 2026-03-07 06:46:01 +00:00
CamilleLaVey
651a999017 [vulkan] Unique representation logic fix. 2026-03-07 06:46:01 +00:00
CamilleLaVey
5c0e12fb0e [vulkan] Rework line rasterization handle 2026-03-07 06:46:01 +00:00
CamilleLaVey
712c505cd1 [vulkan] Extending conversative rasterization detection and handling 2026-03-07 06:46:01 +00:00
CamilleLaVey
051522b54e [vulkan] Fix conditional rendering enable 2026-03-07 06:46:01 +00:00
CamilleLaVey
a1fdbef129 [vulkan] removing dead code for driverID detection under EDS handling/ban 2026-03-07 06:46:01 +00:00
CamilleLaVey
ecd5c751f8 fix license headers 2026-03-07 06:46:01 +00:00
CamilleLaVey
69678d02b6 [android] Removing unneeded setting 2026-03-07 06:46:01 +00:00
CamilleLaVey
8d031532d8 [vulkan] Dead code removal 2026-03-07 06:46:01 +00:00
CamilleLaVey
c8e4818b01 [vulkan] Fixing inconsistences within VK_EXT_extended_dynamic_state1 handling 2026-03-07 06:46:01 +00:00
CamilleLaVey
2bac9cec32 [vulkan] Implenting layouts use for indexing descriptors 2026-03-07 06:46:01 +00:00
CamilleLaVey
6190fcaaef [vulkan] Changing ProvokingVertex enabling nature 2026-03-07 06:46:01 +00:00
CamilleLaVey
3272e1fcb5 [vulkan] adjusting BindVertexBuffer2EXT wrong calling in pipeline 2026-03-07 06:46:00 +00:00
CamilleLaVey
8faeffdc7e [vulkan] removal of EDS3 and VIDS 2026-03-07 06:44:48 +00:00
xbzk
ddac8c8eb5
[vk] fix crash introduced in 9a07bd0570 (#3685)
Some checks are pending
tx-src / sources (push) Waiting to run
Check Strings / check-strings (push) Waiting to run
Fix for current crash on master.
Just reverted only the necessary stuff so that PresentManager can hold a reference to khr and resist death upon application hold/restore.
@Lizzie shall judge.

Reviewed-on: https://git.eden-emu.dev/eden-emu/eden/pulls/3685
Co-authored-by: xbzk <xbzk@eden-emu.dev>
Co-committed-by: xbzk <xbzk@eden-emu.dev>
2026-03-06 19:52:17 +01:00
lizzie
c062931c9b
[qt] add translation table entry for debug_knobs,serial_battery and serial_unit (#3682)
trivial qt change

Signed-off-by: lizzie <lizzie@eden-emu.dev>

Reviewed-on: https://git.eden-emu.dev/eden-emu/eden/pulls/3682
Reviewed-by: DraVee <chimera@dravee.dev>
Co-authored-by: lizzie <lizzie@eden-emu.dev>
Co-committed-by: lizzie <lizzie@eden-emu.dev>
2026-03-06 16:38:39 +01:00
crueter
e4122dae1d
[desktop] addons: open mod folder in rc menu (#3662)
also fixed the multiselection being absolutely horrendous

Signed-off-by: crueter <crueter@eden-emu.dev>
Reviewed-on: https://git.eden-emu.dev/eden-emu/eden/pulls/3662
2026-03-06 16:38:21 +01:00
lizzie
b75e81af5e
[video_core/engines] implement stub NV01 timer, inline other channel engines (#3640)
Signed-off-by: lizzie <lizzie@eden-emu.dev>
Reviewed-on: https://git.eden-emu.dev/eden-emu/eden/pulls/3640
Reviewed-by: CamilleLaVey <camillelavey99@gmail.com>
Reviewed-by: DraVee <chimera@dravee.dev>
Co-authored-by: lizzie <lizzie@eden-emu.dev>
Co-committed-by: lizzie <lizzie@eden-emu.dev>
2026-03-06 15:05:39 +01:00
lizzie
2ed1328c93
[vk] use static_vector instead of small_vector for TFB and other bindings (#3641)
MK8D is a big offender, taking up lots of time memcpy'ing and memmov'ing small_vector<> AND to add salt to the wound it doesn't even do heap allocations (no game does I think) - so basically useless waste of compute time in hot path for NO reason :^)

Signed-off-by: lizzie <lizzie@eden-emu.dev>

Reviewed-on: https://git.eden-emu.dev/eden-emu/eden/pulls/3641
Reviewed-by: CamilleLaVey <camillelavey99@gmail.com>
Reviewed-by: DraVee <chimera@dravee.dev>
Co-authored-by: lizzie <lizzie@eden-emu.dev>
Co-committed-by: lizzie <lizzie@eden-emu.dev>
2026-03-06 15:05:05 +01:00
lizzie
c70b857c4f
[video_core/engines] Macro HLE inline (#3653)
Should slightly boost perf on android, Desktop is mainly unaffected (for now)

Reviewed-on: https://git.eden-emu.dev/eden-emu/eden/pulls/3653
Reviewed-by: CamilleLaVey <camillelavey99@gmail.com>
Reviewed-by: DraVee <chimera@dravee.dev>
Co-authored-by: lizzie <lizzie@eden-emu.dev>
Co-committed-by: lizzie <lizzie@eden-emu.dev>
2026-03-06 15:04:38 +01:00
MaranBr
23566a1f7d
[prepo] Add support for missing PlayReport commands (#3674)
This fixes:

`[ 433.095195] Debug <Critical> core\hle\service\service.cpp:operator ():69: Assertion Failed!
Unknown / unimplemented function '10107': port='prepo:u' cmd_buf={[0]=0x110006, [1]=0x80000014, [2]=0x1, [3]=0x0, [4]=0x0, [5]=0x191080, [6]=0x5A7350F8, [7]=0x112, [8]=0x5A735158}`

Reviewed-on: https://git.eden-emu.dev/eden-emu/eden/pulls/3674
Reviewed-by: CamilleLaVey <camillelavey99@gmail.com>
Reviewed-by: DraVee <chimera@dravee.dev>
Reviewed-by: Maufeat <sahyno1996@gmail.com>
Co-authored-by: MaranBr <maranbr@outlook.com>
Co-committed-by: MaranBr <maranbr@outlook.com>
2026-03-06 15:02:59 +01:00
xbzk
529b069499
[android,ui] fixed top disalignment between buttons of each column in settings fragment (#3675)
this silly little thing tickles obsessive compulsive disturbed fellas a lot hu3
was shipped along PR 3660, which was rediscussed for other reason, hence this tiny lonely PR.

Reviewed-on: https://git.eden-emu.dev/eden-emu/eden/pulls/3675
Reviewed-by: DraVee <chimera@dravee.dev>
Reviewed-by: MaranBr <maranbr@eden-emu.dev>
Co-authored-by: xbzk <xbzk@eden-emu.dev>
Co-committed-by: xbzk <xbzk@eden-emu.dev>
2026-03-05 13:58:46 +01:00
lizzie
9a07bd0570
[vk] unify VkSurfaceKHR with Android and the rest of platforms; remove technically incorrect nullptr() ctor for handles (#2971)
Removes some odd #ifdef-ing that just can use a shrimple opaque type.

Also removes nullptr() ctor'ing for vulkan handles and such; it's not incorrect per se like how `void *p = 0;` isn't incorrect, just that, y'know, any static analyzer will go "woah". Also there isn't any guarantee that handles `sizeof(Handle) == sizeof(void*)` so may as well :)

Signed-off-by: lizzie lizzie@eden-emu.dev

Reviewed-on: https://git.eden-emu.dev/eden-emu/eden/pulls/2971
Reviewed-by: CamilleLaVey <camillelavey99@gmail.com>
Reviewed-by: MaranBr <maranbr@eden-emu.dev>
Co-authored-by: lizzie <lizzie@eden-emu.dev>
Co-committed-by: lizzie <lizzie@eden-emu.dev>
2026-03-05 07:32:18 +01:00
86 changed files with 2388 additions and 2245 deletions

View file

@ -1,10 +1,11 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
package org.yuzu.yuzu_emu.features.fetcher
import android.graphics.Rect
import android.view.View
import androidx.recyclerview.widget.GridLayoutManager
import androidx.recyclerview.widget.RecyclerView
class SpacingItemDecoration(private val spacing: Int) : RecyclerView.ItemDecoration() {
@ -15,8 +16,20 @@ class SpacingItemDecoration(private val spacing: Int) : RecyclerView.ItemDecorat
state: RecyclerView.State
) {
outRect.bottom = spacing
if (parent.getChildAdapterPosition(view) == 0) {
val position = parent.getChildAdapterPosition(view)
if (position == RecyclerView.NO_POSITION) return
if (position == 0) {
outRect.top = spacing
return
}
// If the item is in the first row, but NOT in first column add top spacing as well
val layoutManager = parent.layoutManager
if (layoutManager is GridLayoutManager && layoutManager.spanSizeLookup.getSpanGroupIndex(position, layoutManager.spanCount) == 0) {
outRect.top = spacing
return
}
}
}

View file

@ -30,8 +30,6 @@ enum class BooleanSetting(override val key: String) : AbstractBooleanSetting {
BUFFER_REORDER_DISABLE("disable_buffer_reorder"),
RENDERER_DEBUG("debug"),
RENDERER_PATCH_OLD_QCOM_DRIVERS("patch_old_qcom_drivers"),
RENDERER_VERTEX_INPUT_DYNAMIC_STATE("vertex_input_dynamic_state"),
RENDERER_PROVOKING_VERTEX("provoking_vertex"),
RENDERER_DESCRIPTOR_INDEXING("descriptor_indexing"),
RENDERER_SAMPLE_SHADING("sample_shading"),
GPU_UNSWIZZLE_ENABLED("gpu_unswizzle_enabled"),

View file

@ -141,20 +141,6 @@ abstract class SettingsItem(
valuesId = R.array.dynaStateValues
)
)
put(
SwitchSetting(
BooleanSetting.RENDERER_PROVOKING_VERTEX,
titleId = R.string.provoking_vertex,
descriptionId = R.string.provoking_vertex_description
)
)
put(
SwitchSetting(
BooleanSetting.RENDERER_VERTEX_INPUT_DYNAMIC_STATE,
titleId = R.string.vertex_input_dynamic_state,
descriptionId = R.string.vertex_input_dynamic_state_description
)
)
put(
SwitchSetting(
BooleanSetting.RENDERER_DESCRIPTOR_INDEXING,
@ -349,15 +335,6 @@ abstract class SettingsItem(
valuesId = R.array.astcDecodingMethodValues
)
)
put(
SingleChoiceSetting(
IntSetting.RENDERER_ASTC_RECOMPRESSION,
titleId = R.string.astc_recompression,
descriptionId = R.string.astc_recompression_description,
choicesId = R.array.astcRecompressionMethodNames,
valuesId = R.array.astcRecompressionMethodValues
)
)
put(
SingleChoiceSetting(
IntSetting.RENDERER_VRAM_USAGE_MODE,

View file

@ -271,7 +271,6 @@ class SettingsFragmentPresenter(
add(IntSetting.MAX_ANISOTROPY.key)
add(IntSetting.RENDERER_VRAM_USAGE_MODE.key)
add(IntSetting.RENDERER_ASTC_DECODE_METHOD.key)
add(IntSetting.RENDERER_ASTC_RECOMPRESSION.key)
add(BooleanSetting.SYNC_MEMORY_OPERATIONS.key)
add(BooleanSetting.RENDERER_USE_DISK_SHADER_CACHE.key)
@ -291,8 +290,6 @@ class SettingsFragmentPresenter(
add(HeaderSetting(R.string.extensions))
add(IntSetting.RENDERER_DYNA_STATE.key)
add(BooleanSetting.RENDERER_VERTEX_INPUT_DYNAMIC_STATE.key)
add(BooleanSetting.RENDERER_PROVOKING_VERTEX.key)
add(BooleanSetting.RENDERER_DESCRIPTOR_INDEXING.key)
add(IntSetting.RENDERER_SAMPLE_SHADING.key)

View file

@ -506,8 +506,6 @@
<string name="dyna_state">الحالة الديناميكية الموسعة</string>
<string name="dyna_state_description">يتحكم هذا الخيار في عدد الميزات التي يمكن استخدامها في حالة الديناميكية الموسعة. تسمح الأرقام الأعلى بمزيد من الميزات ويمكن أن تزيد من الأداء، ولكنها قد تسبب مشاكل مع بعض برامج التشغيل والأجهزة.</string>
<string name="disabled">معطل</string>
<string name="vertex_input_dynamic_state">حالة ديناميكية لإدخال الرأس</string>
<string name="vertex_input_dynamic_state_description">يتيح ميزة الحالة الديناميكية لإدخال الرأس لتحسين الجودة والأداء.</string>
<string name="provoking_vertex">الرأس المثير</string>
<string name="provoking_vertex_description">يحسن الإضاءة ومعالجة الرؤوس في بعض الألعاب. مدعوم فقط على وحدات معالجة الرسومات Vulkan 1.0+.</string>
<string name="descriptor_indexing">فهرسة الوصف</string>

View file

@ -488,8 +488,6 @@
<string name="dyna_state">Úroveň EDS</string>
<string name="dyna_state_description">Určuje počet funkcí využívaných v rámci rozšířeného dynamického stavu API Vulkan (Extended Dynamic State). Vyšší hodnoty umožňují využít více funkcí a mohou zvýšit výkon, ale u některých ovladačů a výrobců grafických karet mohou způsobovat problémy s kompatibilitou.</string>
<string name="disabled">Vypnuto</string>
<string name="vertex_input_dynamic_state">Dynamický stav vstupu vrcholů (Vertex Input)</string>
<string name="vertex_input_dynamic_state_description">Aktivuje funkci dynamického stavu vstupu vrcholů (Vertex Input Dynamic State) pro lepší kvalitu a výkon.</string>
<string name="provoking_vertex">Určující vrchol</string>
<string name="provoking_vertex_description">Zlepšuje osvětlení a zpracování vrcholů v některých hrách. Podporováno pouze na GPU s API Vulkan 1.0+.</string>
<string name="descriptor_indexing">Indexování deskriptorů</string>

View file

@ -486,8 +486,6 @@ Wird der Handheld-Modus verwendet, verringert es die Auflösung und erhöht die
<string name="dyna_state">Erweiterter dynamischer Status</string>
<string name="dyna_state_description">Steuert die Anzahl der Funktionen, die im \"Vertex Input Dynamic State\" werden können. Höhere Werte ermöglichen mehr Funktionen und können die Leistung steigern, können aber bei einigen Treibern und Anbietern zu Problemen führen.</string>
<string name="disabled">Deaktiviert</string>
<string name="vertex_input_dynamic_state">Vertex Input Dynamic State</string>
<string name="vertex_input_dynamic_state_description">Aktiviert die Funktion \"Vertex Input Dynamic State\" für bessere Qualität und Leistung.</string>
<string name="provoking_vertex">Provokanter Vertex</string>
<string name="provoking_vertex_description">Verbessert die Beleuchtung und die Vertex-Verarbeitung in einigen Spielen. Wird nur von GPUs mit Vulkan 1.0+ unterstützt.</string>
<string name="descriptor_indexing">Deskriptor-Indizierung</string>

View file

@ -436,8 +436,6 @@
<string name="renderer_asynchronous_shaders_description">Compile les shaders de manière asynchrone. Cela peut réduire les saccades mais peut aussi provoquer des problèmes graphiques.</string>
<string name="dyna_state">État dynamique étendu</string>
<string name="disabled">Désactivé</string>
<string name="vertex_input_dynamic_state">État dynamique d\'entrée de sommet</string>
<string name="vertex_input_dynamic_state_description">Active la fonctionnalité d\'état dynamique des entrées de sommets pour une meilleure qualité et de meilleures performances.</string>
<string name="provoking_vertex">Provoque des Vertex</string>
<string name="provoking_vertex_description">Améliore l`éclairage et la gestion des vertex dans certains jeux. Pris en charge uniquement par les GPU Vulkan 1.0+.</string>
<string name="descriptor_indexing">Indexation des descripteurs</string>

View file

@ -488,8 +488,6 @@
<string name="dyna_state">Rozszerzony stan dynamiczny</string>
<string name="dyna_state_description">Kontroluje liczbę funkcji, które mogą być używane w Extended Dynamic State. Wyższe wartości pozwalają na użycie większej liczby funkcji i mogą zwiększyć wydajność, ale mogą powodować problemy z niektórymi sterownikami i u niektórych producentów.</string>
<string name="disabled">Wyłączone</string>
<string name="vertex_input_dynamic_state">Dynamiczny stan wejścia wierzchołków</string>
<string name="vertex_input_dynamic_state_description">Włącza funkcję dynamicznego stanu wejścia wierzchołków, poprawiając jakość i wydajność.</string>
<string name="provoking_vertex">Wierzchołek prowokujący</string>
<string name="provoking_vertex_description">Poprawia oświetlenie i obsługę wierzchołków w niektórych grach. Obsługiwane tylko przez GPU Vulkan 1.0+.</string>
<string name="descriptor_indexing">Indeksowanie deskryptorów</string>

View file

@ -471,8 +471,6 @@
<string name="renderer_asynchronous_shaders_description">Compila shaders de forma assíncrona. Isso pode reduzir engasgos, mas também pode introduzir falhas gráficas.</string>
<string name="dyna_state">Extended Dynamic State</string>
<string name="disabled">Desativado</string>
<string name="vertex_input_dynamic_state">Vertex Input Dynamic State</string>
<string name="vertex_input_dynamic_state_description">Ativa o recurso de vertex input dynamic state para melhor qualidade e desempenho.</string>
<string name="provoking_vertex">Provoking Vertex</string>
<string name="provoking_vertex_description">Vértice Provocante: Melhora a iluminação e o processamento de vértices em certos jogos. Suportado apenas em GPUs com Vulkan 1.0 ou superior.</string>
<string name="descriptor_indexing">Descriptor Indexing</string>

View file

@ -498,8 +498,6 @@
<string name="dyna_state">Расширенное динамическое состояние</string>
<string name="dyna_state_description">Управляет количеством функций, доступных в режиме «Расширенное динамическое состояние». Большее число позволяет задействовать больше функций и может повысить производительность, но способно вызывать проблемы с некоторыми драйверами и графикой.</string>
<string name="disabled">Отключено</string>
<string name="vertex_input_dynamic_state">Динамическое состояние ввода вершин</string>
<string name="vertex_input_dynamic_state_description">Включает функцию динамического состояния ввода вершин для повышения качества и производительности</string>
<string name="provoking_vertex">Определяющая вершина</string>
<string name="provoking_vertex_description">Улучшает освещение и обработку вершин в некоторых играх. Поддерживается только ГПУ с Vulkan 1.0+.</string>
<string name="descriptor_indexing">Индексирование дескрипторов</string>

View file

@ -502,8 +502,6 @@
<string name="dyna_state">Розширений динамічний стан</string>
<string name="dyna_state_description">Керує кількістю функцій, які можна використовувати в «Розширеному динамічному стані». Вище число дозволяє більше функцій і може покращити продуктивність, але може спричинити проблеми з деякими драйверами й виробниками.</string>
<string name="disabled">Вимкнено</string>
<string name="vertex_input_dynamic_state">Динамічний стан введення вершин</string>
<string name="vertex_input_dynamic_state_description">Вмикає можливість динамічного стану введення вершин для кращих якості й продуктивності.</string>
<string name="provoking_vertex">Провокативна вершина</string>
<string name="provoking_vertex_description">Покращує освітлення та взаємодію з вершинами у деяких іграх. Лише для ГП з підтримкою Vulkan 1.0+.</string>
<string name="descriptor_indexing">Індексація дескрипторів</string>

View file

@ -496,8 +496,6 @@
<string name="dyna_state">扩展动态状态</string>
<string name="dyna_state_description">控制在扩展动态状态中可使用的函数数量。更高的数值允许启用更多功能,并可能提升性能,但同时也可能导致额外的图形问题。</string>
<string name="disabled">已禁用</string>
<string name="vertex_input_dynamic_state">顶点输入动态状态</string>
<string name="vertex_input_dynamic_state_description">开启顶点输入动态状态功能来获得更好的质量和性能。</string>
<string name="provoking_vertex">引发顶点</string>
<string name="provoking_vertex_description">改善某些游戏中的光照和顶点处理。仅支持Vulkan 1.0+ GPU。</string>
<string name="descriptor_indexing">描述符索引</string>

View file

@ -467,8 +467,6 @@
<string name="renderer_asynchronous_shaders_description">非同步編譯著色器。這可能會減少卡頓,但也可能導致圖形錯誤。</string>
<string name="dyna_state">擴展動態狀態</string>
<string name="disabled">已停用</string>
<string name="vertex_input_dynamic_state">頂點輸入動態狀態</string>
<string name="vertex_input_dynamic_state_description">啟用頂點輸入動態狀態以取得更佳的品質及性能</string>
<string name="provoking_vertex">引發頂點</string>
<string name="provoking_vertex_description">改善某些遊戲中的光照和頂點處理。僅支援Vulkan 1.0+ GPU。</string>
<string name="descriptor_indexing">描述符索引</string>

View file

@ -632,14 +632,12 @@
<item>@string/disabled</item>
<item>ExtendedDynamicState 1</item>
<item>ExtendedDynamicState 2</item>
<item>ExtendedDynamicState 3</item>
</string-array>
<integer-array name="dynaStateValues">
<item>0</item>
<item>1</item>
<item>2</item>
<item>3</item>
</integer-array>
<string-array name="installKeysResults">

View file

@ -531,8 +531,6 @@
<string name="dyna_state">Extended Dynamic State</string>
<string name="dyna_state_description">Controls the number of features that can be used in Extended Dynamic State. Higher numbers allow for more features and can increase performance, but may cause issues with some drivers and vendors.</string>
<string name="disabled">Disabled</string>
<string name="vertex_input_dynamic_state">Vertex Input Dynamic State</string>
<string name="vertex_input_dynamic_state_description">Enables vertex input dynamic state feature for better quality and performance.</string>
<string name="provoking_vertex">Provoking Vertex</string>
<string name="provoking_vertex_description">Improves lighting and vertex handling in certain games. Only supported on Vulkan 1.0+ GPUs.</string>
<string name="descriptor_indexing">Descriptor Indexing</string>

View file

@ -453,7 +453,7 @@ struct Values {
Category::RendererAdvanced};
SwitchableSetting<AstcDecodeMode, true> accelerate_astc{linkage,
#ifdef ANDROID
AstcDecodeMode::Cpu,
AstcDecodeMode::Gpu,
#else
AstcDecodeMode::Gpu,
#endif
@ -586,7 +586,7 @@ struct Values {
SwitchableSetting<ExtendedDynamicState> dyna_state{linkage,
#if defined (ANDROID) || defined (__APPLE__)
ExtendedDynamicState::Disabled,
ExtendedDynamicState::EDS1,
#else
ExtendedDynamicState::EDS2,
#endif
@ -601,14 +601,6 @@ struct Values {
Category::RendererExtensions,
Specialization::Scalar};
SwitchableSetting<bool> vertex_input_dynamic_state{linkage,
#if defined (ANDROID)
false,
#else
true,
#endif
"vertex_input_dynamic_state", Category::RendererExtensions};
SwitchableSetting<bool> provoking_vertex{linkage, false, "provoking_vertex", Category::RendererExtensions};
SwitchableSetting<bool> descriptor_indexing{linkage, false, "descriptor_indexing", Category::RendererExtensions};
Setting<bool> renderer_debug{linkage, false, "debug", Category::RendererDebug};

View file

@ -154,7 +154,7 @@ ENUM(GpuUnswizzleSize, VerySmall, Small, Normal, Large, VeryLarge)
ENUM(GpuUnswizzle, VeryLow, Low, Normal, Medium, High)
ENUM(GpuUnswizzleChunk, VeryLow, Low, Normal, Medium, High)
ENUM(TemperatureUnits, Celsius, Fahrenheit)
ENUM(ExtendedDynamicState, Disabled, EDS1, EDS2, EDS3);
ENUM(ExtendedDynamicState, Disabled, EDS1, EDS2);
ENUM(GpuLogLevel, Off, Errors, Standard, Verbose, All)
ENUM(GameListMode, TreeView, GridView);
ENUM(SpeedMode, Standard, Turbo, Slow);

View file

@ -1,4 +1,4 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
@ -28,8 +28,10 @@ public:
{10101, &PlayReport::SaveReportWithUser<Core::Reporter::PlayReportType::Old>, "SaveReportWithUserOld"},
{10102, &PlayReport::SaveReport<Core::Reporter::PlayReportType::Old2>, "SaveReportOld2"},
{10103, &PlayReport::SaveReportWithUser<Core::Reporter::PlayReportType::Old2>, "SaveReportWithUserOld2"},
{10104, &PlayReport::SaveReport<Core::Reporter::PlayReportType::New>, "SaveReport"},
{10105, &PlayReport::SaveReportWithUser<Core::Reporter::PlayReportType::New>, "SaveReportWithUser"},
{10104, &PlayReport::SaveReport<Core::Reporter::PlayReportType::Old3>, "SaveReportOld3"},
{10105, &PlayReport::SaveReportWithUser<Core::Reporter::PlayReportType::Old3>, "SaveReportWithUserOld3"},
{10106, &PlayReport::SaveReport<Core::Reporter::PlayReportType::New>, "SaveReport"},
{10107, &PlayReport::SaveReportWithUser<Core::Reporter::PlayReportType::New>, "SaveReportWithUser"},
{10200, &PlayReport::RequestImmediateTransmission, "RequestImmediateTransmission"},
{10300, &PlayReport::GetTransmissionStatus, "GetTransmissionStatus"},
{10400, &PlayReport::GetSystemSessionId, "GetSystemSessionId"},

View file

@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
@ -53,6 +56,7 @@ public:
enum class PlayReportType {
Old,
Old2,
Old3,
New,
System,
};

View file

@ -368,17 +368,6 @@ std::unique_ptr<TranslationMap> InitializeTranslations(QObject* parent)
"Higher states allow for more features and can increase performance, but may cause "
"additional graphical issues."));
INSERT(Settings,
vertex_input_dynamic_state,
tr("Vertex Input Dynamic State"),
tr("Enables vertex input dynamic state feature for better quality and performance."));
INSERT(Settings,
provoking_vertex,
tr("Provoking Vertex"),
tr("Improves lighting and vertex handling in some games.\n"
"Only Vulkan 1.0+ devices support this extension."));
INSERT(Settings,
descriptor_indexing,
tr("Descriptor Indexing"),
@ -425,6 +414,9 @@ std::unique_ptr<TranslationMap> InitializeTranslations(QObject* parent)
"their resolution, details and supported controllers and depending on this setting.\n"
"Setting to Handheld can help improve performance for low end systems."));
INSERT(Settings, current_user, QString(), QString());
INSERT(Settings, serial_unit, tr("Unit Serial"), QString());
INSERT(Settings, serial_battery, tr("Battery Serial"), QString());
INSERT(Settings, debug_knobs, tr("Debug knobs"), QString());
// Controls
@ -796,7 +788,6 @@ std::unique_ptr<ComboboxTranslationMap> ComboboxEnumeration(QObject* parent)
PAIR(ExtendedDynamicState, Disabled, tr("Disabled")),
PAIR(ExtendedDynamicState, EDS1, tr("ExtendedDynamicState 1")),
PAIR(ExtendedDynamicState, EDS2, tr("ExtendedDynamicState 2")),
PAIR(ExtendedDynamicState, EDS3, tr("ExtendedDynamicState 3")),
}});
translations->insert({Settings::EnumMetadata<Settings::GameListMode>::Index(),

View file

@ -1,4 +1,4 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
@ -11,15 +11,159 @@
#include <vector>
#include <spirv-tools/optimizer.hpp>
#include "common/logging/log.h"
#include "common/settings.h"
#include "shader_recompiler/backend/spirv/emit_spirv.h"
#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
#include "shader_recompiler/backend/spirv/spirv_emit_context.h"
#include "shader_recompiler/frontend/ir/basic_block.h"
#include "shader_recompiler/frontend/ir/modifiers.h"
#include "shader_recompiler/frontend/ir/program.h"
namespace Shader::Backend::SPIRV {
namespace {
[[nodiscard]] constexpr std::string_view StageName(Stage stage) noexcept {
switch (stage) {
case Stage::VertexA:
return "VertexA";
case Stage::VertexB:
return "VertexB";
case Stage::TessellationControl:
return "TessellationControl";
case Stage::TessellationEval:
return "TessellationEval";
case Stage::Geometry:
return "Geometry";
case Stage::Fragment:
return "Fragment";
case Stage::Compute:
return "Compute";
}
return "Unknown";
}
[[nodiscard]] constexpr std::string_view DenormModeName(bool flush, bool preserve) noexcept {
if (flush && preserve) {
return "Flush+Preserve";
}
if (flush) {
return "Flush";
}
if (preserve) {
return "Preserve";
}
return "None";
}
[[nodiscard]] constexpr bool IsFp32RoundingRelevantOpcode(IR::Opcode opcode) noexcept {
switch (opcode) {
case IR::Opcode::FPAdd32:
case IR::Opcode::FPFma32:
case IR::Opcode::FPMul32:
case IR::Opcode::FPRoundEven32:
case IR::Opcode::FPFloor32:
case IR::Opcode::FPCeil32:
case IR::Opcode::FPTrunc32:
case IR::Opcode::FPOrdEqual32:
case IR::Opcode::FPUnordEqual32:
case IR::Opcode::FPOrdNotEqual32:
case IR::Opcode::FPUnordNotEqual32:
case IR::Opcode::FPOrdLessThan32:
case IR::Opcode::FPUnordLessThan32:
case IR::Opcode::FPOrdGreaterThan32:
case IR::Opcode::FPUnordGreaterThan32:
case IR::Opcode::FPOrdLessThanEqual32:
case IR::Opcode::FPUnordLessThanEqual32:
case IR::Opcode::FPOrdGreaterThanEqual32:
case IR::Opcode::FPUnordGreaterThanEqual32:
case IR::Opcode::ConvertF16F32:
case IR::Opcode::ConvertF64F32:
return true;
default:
return false;
}
}
struct Fp32RoundingUsage {
u32 rz_count{};
bool has_conflicting_rounding{};
};
Fp32RoundingUsage CollectFp32RoundingUsage(const IR::Program& program) {
Fp32RoundingUsage usage{};
for (const IR::Block* const block : program.post_order_blocks) {
for (const IR::Inst& inst : block->Instructions()) {
if (!IsFp32RoundingRelevantOpcode(inst.GetOpcode())) {
continue;
}
switch (inst.Flags<IR::FpControl>().rounding) {
case IR::FpRounding::RZ:
++usage.rz_count;
break;
case IR::FpRounding::RN:
case IR::FpRounding::RM:
case IR::FpRounding::RP:
usage.has_conflicting_rounding = true;
break;
case IR::FpRounding::DontCare:
break;
}
}
}
return usage;
}
void LogRzBackendSummary(const Profile& profile, const IR::Program& program, bool optimize) {
if (!Settings::values.renderer_debug) {
return;
}
const Fp32RoundingUsage usage{CollectFp32RoundingUsage(program)};
if (usage.rz_count == 0) {
return;
}
LOG_INFO(Shader_SPIRV,
"SPV_RZ {} start={:#010x} optimize={} support_float_controls={} separate_denorm_behavior={} separate_rounding_mode={} support_fp32_rounding_rtz={} broken_fp16_float_controls={} fp16_denorm={} fp32_denorm={} signed_nan16={} signed_nan32={} signed_nan64={} rz_inst_count={} mixed_fp32_rounding={}",
StageName(program.stage), program.start_address, optimize,
profile.support_float_controls, profile.support_separate_denorm_behavior,
profile.support_separate_rounding_mode, profile.support_fp32_rounding_rtz,
profile.has_broken_fp16_float_controls,
DenormModeName(program.info.uses_fp16_denorms_flush,
program.info.uses_fp16_denorms_preserve),
DenormModeName(program.info.uses_fp32_denorms_flush,
program.info.uses_fp32_denorms_preserve),
profile.support_fp16_signed_zero_nan_preserve,
profile.support_fp32_signed_zero_nan_preserve,
profile.support_fp64_signed_zero_nan_preserve, usage.rz_count,
usage.has_conflicting_rounding);
}
void SetupRoundingControl(const Profile& profile, const IR::Program& program, EmitContext& ctx,
Id main_func) {
const Fp32RoundingUsage usage{CollectFp32RoundingUsage(program)};
if (usage.rz_count == 0) {
return;
}
if (usage.has_conflicting_rounding) {
if (Settings::values.renderer_debug) {
LOG_INFO(Shader_SPIRV,
"SPV_RZ {} start={:#010x} skipping_fp32_rtz_execution_mode reason=mixed_rounding",
StageName(program.stage), program.start_address);
}
return;
}
if (!profile.support_fp32_rounding_rtz) {
if (Settings::values.renderer_debug) {
LOG_INFO(Shader_SPIRV,
"SPV_RZ {} start={:#010x} skipping_fp32_rtz_execution_mode reason=unsupported_fp32_rtz",
StageName(program.stage), program.start_address);
}
return;
}
ctx.AddCapability(spv::Capability::RoundingModeRTZ);
ctx.AddExecutionMode(main_func, spv::ExecutionMode::RoundingModeRTZ, 32U);
}
template <class Func>
struct FuncTraits {};
thread_local std::unique_ptr<spvtools::Optimizer> thread_optimizer;
@ -503,12 +647,14 @@ void PatchPhiNodes(IR::Program& program, EmitContext& ctx) {
std::vector<u32> EmitSPIRV(const Profile& profile, const RuntimeInfo& runtime_info,
IR::Program& program, Bindings& bindings, bool optimize) {
LogRzBackendSummary(profile, program, optimize);
EmitContext ctx{profile, runtime_info, program, bindings};
const Id main{DefineMain(ctx, program)};
DefineEntryPoint(program, ctx, main);
if (profile.support_float_controls) {
ctx.AddExtension("SPV_KHR_float_controls");
SetupDenormControl(profile, program, ctx, main);
SetupRoundingControl(profile, program, ctx, main);
SetupSignedNanCapabilities(profile, program, ctx, main);
}
SetupCapabilities(profile, program.info, ctx);
@ -516,6 +662,12 @@ std::vector<u32> EmitSPIRV(const Profile& profile, const RuntimeInfo& runtime_in
PatchPhiNodes(program, ctx);
if (!optimize) {
if (Settings::values.renderer_debug && ctx.log_rz_fp_controls) {
const std::vector<u32> spirv{ctx.Assemble()};
LOG_INFO(Shader_SPIRV, "SPV_RZ {} start={:#010x} assembled_words={} optimized_words={} validator_run=false",
StageName(program.stage), program.start_address, spirv.size(), spirv.size());
return spirv;
}
return ctx.Assemble();
} else {
std::vector<u32> spirv = ctx.Assemble();
@ -535,6 +687,11 @@ std::vector<u32> EmitSPIRV(const Profile& profile, const RuntimeInfo& runtime_in
"Failed to optimize SPIRV shader output, continuing without optimization");
result = std::move(spirv);
}
if (Settings::values.renderer_debug && ctx.log_rz_fp_controls) {
LOG_INFO(Shader_SPIRV,
"SPV_RZ {} start={:#010x} assembled_words={} optimized_words={} validator_run=false",
StageName(program.stage), program.start_address, spirv.size(), result.size());
}
return result;
}
}

View file

@ -491,6 +491,9 @@ void EmitSetPatch(EmitContext& ctx, IR::Patch patch, Id value) {
}
void EmitSetFragColor(EmitContext& ctx, u32 index, u32 component, Id value) {
if (!ctx.runtime_info.active_color_outputs[index]) {
return;
}
const Id component_id{ctx.Const(component)};
const AttributeType type{ctx.runtime_info.color_output_types[index]};
if (type == AttributeType::Float) {

View file

@ -1,16 +1,60 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/logging/log.h"
#include "common/settings.h"
#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
#include "shader_recompiler/backend/spirv/spirv_emit_context.h"
#include "shader_recompiler/frontend/ir/modifiers.h"
namespace Shader::Backend::SPIRV {
namespace {
[[nodiscard]] constexpr std::string_view StageName(Stage stage) noexcept {
switch (stage) {
case Stage::VertexA:
return "VertexA";
case Stage::VertexB:
return "VertexB";
case Stage::TessellationControl:
return "TessellationControl";
case Stage::TessellationEval:
return "TessellationEval";
case Stage::Geometry:
return "Geometry";
case Stage::Fragment:
return "Fragment";
case Stage::Compute:
return "Compute";
}
return "Unknown";
}
[[nodiscard]] constexpr std::string_view FmzName(IR::FmzMode fmz_mode) noexcept {
switch (fmz_mode) {
case IR::FmzMode::DontCare:
return "DontCare";
case IR::FmzMode::FTZ:
return "FTZ";
case IR::FmzMode::FMZ:
return "FMZ";
case IR::FmzMode::None:
return "None";
}
return "Unknown";
}
Id Decorate(EmitContext& ctx, IR::Inst* inst, Id op) {
const auto flags{inst->Flags<IR::FpControl>()};
if (Settings::values.renderer_debug && ctx.log_rz_fp_controls &&
flags.rounding == IR::FpRounding::RZ) {
LOG_INFO(Shader_SPIRV,
"SPV_RZ_EMIT {} start={:#010x} ir_opcode={} spirv_op=OpFMul no_contraction={} fmz={} float_controls_ext={}",
StageName(ctx.stage), ctx.start_address, inst->GetOpcode(),
flags.no_contraction, FmzName(flags.fmz_mode), ctx.profile.support_float_controls);
}
if (flags.no_contraction) {
ctx.Decorate(op, spv::Decoration::NoContraction);
}

View file

@ -14,6 +14,25 @@
namespace Shader::Backend::SPIRV {
namespace {
Id GetResultType(EmitContext& ctx, NumericType numeric_type) {
switch (numeric_type) {
case NumericType::Float:
return ctx.F32[4];
case NumericType::SignedInt:
return ctx.S32[4];
case NumericType::UnsignedInt:
return ctx.U32[4];
}
throw LogicError("Invalid numeric type {}", static_cast<u32>(numeric_type));
}
NumericType GetTextureNumericType(EmitContext& ctx, const IR::TextureInstInfo& info) {
if (info.type == TextureType::Buffer) {
return ctx.texture_buffers.at(info.descriptor_index).numeric_type;
}
return ctx.textures.at(info.descriptor_index).numeric_type;
}
class ImageOperands {
public:
[[maybe_unused]] static constexpr bool ImageSampleOffsetAllowed = false;
@ -201,10 +220,10 @@ Id TextureImage(EmitContext& ctx, IR::TextureInstInfo info, const IR::Value& ind
const TextureBufferDefinition& def{ctx.texture_buffers.at(info.descriptor_index)};
if (def.count > 1) {
const Id idx{index.IsImmediate() ? ctx.Const(index.U32()) : ctx.Def(index)};
const Id ptr{ctx.OpAccessChain(ctx.image_buffer_type, def.id, idx)};
return ctx.OpLoad(ctx.image_buffer_type, ptr);
const Id ptr{ctx.OpAccessChain(def.pointer_type, def.id, idx)};
return ctx.OpLoad(def.image_type, ptr);
}
return ctx.OpLoad(ctx.image_buffer_type, def.id);
return ctx.OpLoad(def.image_type, def.id);
} else {
const TextureDefinition& def{ctx.textures.at(info.descriptor_index)};
if (def.count > 1) {
@ -216,23 +235,24 @@ Id TextureImage(EmitContext& ctx, IR::TextureInstInfo info, const IR::Value& ind
}
}
std::pair<Id, bool> Image(EmitContext& ctx, const IR::Value& index, IR::TextureInstInfo info) {
std::pair<Id, NumericType> Image(EmitContext& ctx, const IR::Value& index,
IR::TextureInstInfo info) {
if (info.type == TextureType::Buffer) {
const ImageBufferDefinition def{ctx.image_buffers.at(info.descriptor_index)};
if (def.count > 1) {
const Id idx{index.IsImmediate() ? ctx.Const(index.U32()) : ctx.Def(index)};
const Id ptr{ctx.OpAccessChain(def.pointer_type, def.id, idx)};
return {ctx.OpLoad(def.image_type, ptr), def.is_integer};
return {ctx.OpLoad(def.image_type, ptr), def.numeric_type};
}
return {ctx.OpLoad(def.image_type, def.id), def.is_integer};
return {ctx.OpLoad(def.image_type, def.id), def.numeric_type};
} else {
const ImageDefinition def{ctx.images.at(info.descriptor_index)};
if (def.count > 1) {
const Id idx{index.IsImmediate() ? ctx.Const(index.U32()) : ctx.Def(index)};
const Id ptr{ctx.OpAccessChain(def.pointer_type, def.id, idx)};
return {ctx.OpLoad(def.image_type, ptr), def.is_integer};
return {ctx.OpLoad(def.image_type, ptr), def.numeric_type};
}
return {ctx.OpLoad(def.image_type, def.id), def.is_integer};
return {ctx.OpLoad(def.image_type, def.id), def.numeric_type};
}
}
@ -461,8 +481,9 @@ Id EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value&
if (ctx.stage == Stage::Fragment) {
const ImageOperands operands(ctx, info.has_bias != 0, false, info.has_lod_clamp != 0,
bias_lc, offset);
const Id result_type{GetResultType(ctx, GetTextureNumericType(ctx, info))};
return Emit(&EmitContext::OpImageSparseSampleImplicitLod,
&EmitContext::OpImageSampleImplicitLod, ctx, inst, ctx.F32[4],
&EmitContext::OpImageSampleImplicitLod, ctx, inst, result_type,
Texture(ctx, info, index), coords, operands.MaskOptional(), operands.Span());
} else {
// We can't use implicit lods on non-fragment stages on SPIR-V. Maxwell hardware behaves as
@ -470,8 +491,9 @@ Id EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value&
// derivatives
const Id lod{ctx.Const(0.0f)};
const ImageOperands operands(ctx, false, true, info.has_lod_clamp != 0, lod, offset);
const Id result_type{GetResultType(ctx, GetTextureNumericType(ctx, info))};
return Emit(&EmitContext::OpImageSparseSampleExplicitLod,
&EmitContext::OpImageSampleExplicitLod, ctx, inst, ctx.F32[4],
&EmitContext::OpImageSampleExplicitLod, ctx, inst, result_type,
Texture(ctx, info, index), coords, operands.Mask(), operands.Span());
}
}
@ -480,12 +502,14 @@ Id EmitImageSampleExplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value&
Id lod, const IR::Value& offset) {
const auto info{inst->Flags<IR::TextureInstInfo>()};
const ImageOperands operands(ctx, false, true, false, lod, offset);
const NumericType numeric_type{GetTextureNumericType(ctx, info)};
const Id result_type{GetResultType(ctx, numeric_type)};
Id result = Emit(&EmitContext::OpImageSparseSampleExplicitLod,
&EmitContext::OpImageSampleExplicitLod, ctx, inst, ctx.F32[4],
&EmitContext::OpImageSampleExplicitLod, ctx, inst, result_type,
Texture(ctx, info, index), coords, operands.Mask(), operands.Span());
#ifdef ANDROID
if (Settings::values.fix_bloom_effects.GetValue()) {
if (numeric_type == NumericType::Float && Settings::values.fix_bloom_effects.GetValue()) {
result = ctx.OpVectorTimesScalar(ctx.F32[4], result, ctx.Const(0.98f));
}
#endif
@ -529,8 +553,9 @@ Id EmitImageGather(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id
if (ctx.profile.need_gather_subpixel_offset) {
coords = ImageGatherSubpixelOffset(ctx, info, TextureImage(ctx, info, index), coords);
}
const Id result_type{GetResultType(ctx, GetTextureNumericType(ctx, info))};
return Emit(&EmitContext::OpImageSparseGather, &EmitContext::OpImageGather, ctx, inst,
ctx.F32[4], Texture(ctx, info, index), coords, ctx.Const(info.gather_component),
result_type, Texture(ctx, info, index), coords, ctx.Const(info.gather_component),
operands.MaskOptional(), operands.Span());
}
@ -558,8 +583,10 @@ Id EmitImageFetch(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id c
lod = Id{};
}
const ImageOperands operands(lod, ms);
return Emit(&EmitContext::OpImageSparseFetch, &EmitContext::OpImageFetch, ctx, inst, ctx.F32[4],
TextureImage(ctx, info, index), coords, operands.MaskOptional(), operands.Span());
const Id result_type{GetResultType(ctx, GetTextureNumericType(ctx, info))};
return Emit(&EmitContext::OpImageSparseFetch, &EmitContext::OpImageFetch, ctx, inst,
result_type, TextureImage(ctx, info, index), coords, operands.MaskOptional(),
operands.Span());
}
Id EmitImageQueryDimensions(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id lod,
@ -609,8 +636,9 @@ Id EmitImageGradient(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, I
ctx.Def(offset), {}, lod_clamp)
: ImageOperands(ctx, info.has_lod_clamp != 0, derivatives,
info.num_derivatives, offset, lod_clamp);
const Id result_type{GetResultType(ctx, GetTextureNumericType(ctx, info))};
return Emit(&EmitContext::OpImageSparseSampleExplicitLod,
&EmitContext::OpImageSampleExplicitLod, ctx, inst, ctx.F32[4],
&EmitContext::OpImageSampleExplicitLod, ctx, inst, result_type,
Texture(ctx, info, index), coords, operands.Mask(), operands.Span());
}
@ -620,11 +648,11 @@ Id EmitImageRead(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id co
LOG_WARNING(Shader_SPIRV, "Typeless image read not supported by host");
return ctx.ConstantNull(ctx.U32[4]);
}
const auto [image, is_integer] = Image(ctx, index, info);
const Id result_type{is_integer ? ctx.U32[4] : ctx.F32[4]};
const auto [image, numeric_type] = Image(ctx, index, info);
const Id result_type{GetResultType(ctx, numeric_type)};
Id color{Emit(&EmitContext::OpImageSparseRead, &EmitContext::OpImageRead, ctx, inst,
result_type, image, coords, std::nullopt, std::span<const Id>{})};
if (!is_integer) {
if (numeric_type == NumericType::Float) {
color = ctx.OpBitcast(ctx.U32[4], color);
}
return color;
@ -632,8 +660,8 @@ Id EmitImageRead(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id co
void EmitImageWrite(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords, Id color) {
const auto info{inst->Flags<IR::TextureInstInfo>()};
const auto [image, is_integer] = Image(ctx, index, info);
if (!is_integer) {
const auto [image, numeric_type] = Image(ctx, index, info);
if (numeric_type == NumericType::Float) {
color = ctx.OpBitcast(ctx.F32[4], color);
}
ctx.OpImageWrite(image, coords, color);

View file

@ -17,6 +17,7 @@
#include "common/div_ceil.h"
#include "shader_recompiler/backend/spirv/emit_spirv.h"
#include "shader_recompiler/backend/spirv/spirv_emit_context.h"
#include "shader_recompiler/frontend/ir/modifiers.h"
namespace Shader::Backend::SPIRV {
namespace {
@ -28,9 +29,21 @@ enum class Operation {
FPMax,
};
Id GetNumericTypeId(EmitContext& ctx, NumericType numeric_type) {
switch (numeric_type) {
case NumericType::Float:
return ctx.F32[1];
case NumericType::SignedInt:
return ctx.S32[1];
case NumericType::UnsignedInt:
return ctx.U32[1];
}
throw InvalidArgument("Invalid numeric type {}", static_cast<u32>(numeric_type));
}
Id ImageType(EmitContext& ctx, const TextureDescriptor& desc) {
const spv::ImageFormat format{spv::ImageFormat::Unknown};
const Id type{ctx.F32[1]};
const Id type{GetNumericTypeId(ctx, desc.numeric_type)};
const bool depth{desc.is_depth};
const bool ms{desc.is_multisample};
switch (desc.type) {
@ -461,7 +474,44 @@ void VectorTypes::Define(Sirit::Module& sirit_ctx, Id base_type, std::string_vie
EmitContext::EmitContext(const Profile& profile_, const RuntimeInfo& runtime_info_,
IR::Program& program, Bindings& bindings)
: Sirit::Module(profile_.supported_spirv), profile{profile_}, runtime_info{runtime_info_},
stage{program.stage}, texture_rescaling_index{bindings.texture_scaling_index},
stage{program.stage}, start_address{program.start_address},
log_rz_fp_controls{std::ranges::any_of(program.post_order_blocks, [](const IR::Block* block) {
return std::ranges::any_of(block->Instructions(), [](const IR::Inst& inst) {
switch (inst.GetOpcode()) {
case IR::Opcode::FPAdd16:
case IR::Opcode::FPFma16:
case IR::Opcode::FPMul16:
case IR::Opcode::FPRoundEven16:
case IR::Opcode::FPFloor16:
case IR::Opcode::FPCeil16:
case IR::Opcode::FPTrunc16:
case IR::Opcode::FPAdd32:
case IR::Opcode::FPFma32:
case IR::Opcode::FPMul32:
case IR::Opcode::FPRoundEven32:
case IR::Opcode::FPFloor32:
case IR::Opcode::FPCeil32:
case IR::Opcode::FPTrunc32:
case IR::Opcode::FPOrdEqual32:
case IR::Opcode::FPUnordEqual32:
case IR::Opcode::FPOrdNotEqual32:
case IR::Opcode::FPUnordNotEqual32:
case IR::Opcode::FPOrdLessThan32:
case IR::Opcode::FPUnordLessThan32:
case IR::Opcode::FPOrdGreaterThan32:
case IR::Opcode::FPUnordGreaterThan32:
case IR::Opcode::FPOrdLessThanEqual32:
case IR::Opcode::FPUnordLessThanEqual32:
case IR::Opcode::FPOrdGreaterThanEqual32:
case IR::Opcode::FPUnordGreaterThanEqual32:
case IR::Opcode::ConvertF16F32:
case IR::Opcode::ConvertF64F32:
return inst.Flags<IR::FpControl>().rounding == IR::FpRounding::RZ;
default:
return false;
}
});
})}, texture_rescaling_index{bindings.texture_scaling_index},
image_rescaling_index{bindings.image_scaling_index} {
const bool is_unified{profile.unified_descriptor_binding};
u32& uniform_binding{is_unified ? bindings.unified : bindings.uniform_buffer};
@ -1304,22 +1354,26 @@ void EmitContext::DefineTextureBuffers(const Info& info, u32& binding) {
if (info.texture_buffer_descriptors.empty()) {
return;
}
const spv::ImageFormat format{spv::ImageFormat::Unknown};
image_buffer_type = TypeImage(F32[1], spv::Dim::Buffer, 0U, false, false, 1, format);
const Id type{TypePointer(spv::StorageClass::UniformConstant, image_buffer_type)};
texture_buffers.reserve(info.texture_buffer_descriptors.size());
for (const TextureBufferDescriptor& desc : info.texture_buffer_descriptors) {
if (desc.count != 1) {
throw NotImplementedException("Array of texture buffers");
}
const spv::ImageFormat format{spv::ImageFormat::Unknown};
const Id image_type{
TypeImage(GetNumericTypeId(*this, desc.numeric_type), spv::Dim::Buffer, 0U, false,
false, 1, format)};
const Id type{TypePointer(spv::StorageClass::UniformConstant, image_type)};
const Id id{AddGlobalVariable(type, spv::StorageClass::UniformConstant)};
Decorate(id, spv::Decoration::Binding, binding);
Decorate(id, spv::Decoration::DescriptorSet, 0U);
Name(id, NameOf(stage, desc, "texbuf"));
texture_buffers.push_back({
.id = id,
.image_type = image_type,
.pointer_type = type,
.count = desc.count,
.numeric_type = desc.numeric_type,
});
if (profile.supported_spirv >= 0x00010400) {
interfaces.push_back(id);
@ -1332,7 +1386,7 @@ void EmitContext::DefineImageBuffers(const Info& info, u32& binding) {
image_buffers.reserve(info.image_buffer_descriptors.size());
for (const ImageBufferDescriptor& desc : info.image_buffer_descriptors) {
const spv::ImageFormat format{GetImageFormat(desc.format)};
const Id sampled_type{desc.is_integer ? U32[1] : F32[1]};
const Id sampled_type{GetNumericTypeId(*this, desc.numeric_type)};
const Id image_type{
TypeImage(sampled_type, spv::Dim::Buffer, false, false, false, 2, format)};
const Id pointer_type{TypePointer(spv::StorageClass::UniformConstant, image_type)};
@ -1345,7 +1399,7 @@ void EmitContext::DefineImageBuffers(const Info& info, u32& binding) {
.image_type = image_type,
.pointer_type = pointer_type,
.count = desc.count,
.is_integer = desc.is_integer,
.numeric_type = desc.numeric_type,
});
if (profile.supported_spirv >= 0x00010400) {
interfaces.push_back(id);
@ -1372,6 +1426,7 @@ void EmitContext::DefineTextures(const Info& info, u32& binding, u32& scaling_in
.image_type = image_type,
.count = desc.count,
.is_multisample = desc.is_multisample,
.numeric_type = desc.numeric_type,
});
if (profile.supported_spirv >= 0x00010400) {
interfaces.push_back(id);
@ -1387,7 +1442,7 @@ void EmitContext::DefineTextures(const Info& info, u32& binding, u32& scaling_in
void EmitContext::DefineImages(const Info& info, u32& binding, u32& scaling_index) {
images.reserve(info.image_descriptors.size());
for (const ImageDescriptor& desc : info.image_descriptors) {
const Id sampled_type{desc.is_integer ? U32[1] : F32[1]};
const Id sampled_type{GetNumericTypeId(*this, desc.numeric_type)};
const Id image_type{ImageType(*this, desc, sampled_type)};
const Id pointer_type{TypePointer(spv::StorageClass::UniformConstant, image_type)};
const Id id{AddGlobalVariable(pointer_type, spv::StorageClass::UniformConstant)};
@ -1399,7 +1454,7 @@ void EmitContext::DefineImages(const Info& info, u32& binding, u32& scaling_inde
.image_type = image_type,
.pointer_type = pointer_type,
.count = desc.count,
.is_integer = desc.is_integer,
.numeric_type = desc.numeric_type,
});
if (profile.supported_spirv >= 0x00010400) {
interfaces.push_back(id);
@ -1671,8 +1726,10 @@ void EmitContext::DefineOutputs(const IR::Program& program) {
case Stage::Fragment:
for (u32 index = 0; index < 8; ++index) {
const bool need_dual_source = runtime_info.dual_source_blend && index <= 1;
if (!need_dual_source && !info.stores_frag_color[index] &&
!profile.need_declared_frag_colors) {
const bool should_declare = runtime_info.active_color_outputs[index] &&
(info.stores_frag_color[index] ||
profile.need_declared_frag_colors);
if (!need_dual_source && !should_declare) {
continue;
}
const Id type{GetAttributeType(*this, runtime_info.color_output_types[index])};

View file

@ -41,11 +41,15 @@ struct TextureDefinition {
Id image_type;
u32 count;
bool is_multisample;
NumericType numeric_type;
};
struct TextureBufferDefinition {
Id id;
Id image_type;
Id pointer_type;
u32 count;
NumericType numeric_type;
};
struct ImageBufferDefinition {
@ -53,7 +57,7 @@ struct ImageBufferDefinition {
Id image_type;
Id pointer_type;
u32 count;
bool is_integer;
NumericType numeric_type;
};
struct ImageDefinition {
@ -61,7 +65,7 @@ struct ImageDefinition {
Id image_type;
Id pointer_type;
u32 count;
bool is_integer;
NumericType numeric_type;
};
struct UniformDefinitions {
@ -212,6 +216,8 @@ public:
const Profile& profile;
const RuntimeInfo& runtime_info;
Stage stage{};
u32 start_address{};
bool log_rz_fp_controls{};
Id void_id{};
Id U1{};

View file

@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
@ -20,6 +23,7 @@ struct Program {
BlockList post_order_blocks;
Info info;
Stage stage{};
u32 start_address{};
std::array<u32, 3> workgroup_size{};
OutputTopology output_topology{};
u32 output_vertices{};

View file

@ -5,10 +5,13 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
#include <array>
#include <memory>
#include <string_view>
#include <vector>
#include <queue>
#include "common/logging/log.h"
#include "common/settings.h"
#include "shader_recompiler/exception.h"
#include "shader_recompiler/frontend/ir/basic_block.h"
@ -22,6 +25,214 @@
namespace Shader::Maxwell {
namespace {
struct FpControlHistogram {
std::array<u32, 2> total{};
std::array<u32, 2> no_contraction{};
std::array<std::array<u32, 5>, 2> rounding{};
std::array<std::array<u32, 4>, 2> fmz{};
std::array<std::array<std::array<u32, 4>, 5>, 2> combos{};
};
[[nodiscard]] constexpr std::string_view StageName(Stage stage) noexcept {
switch (stage) {
case Stage::VertexA:
return "VertexA";
case Stage::VertexB:
return "VertexB";
case Stage::TessellationControl:
return "TessellationControl";
case Stage::TessellationEval:
return "TessellationEval";
case Stage::Geometry:
return "Geometry";
case Stage::Fragment:
return "Fragment";
case Stage::Compute:
return "Compute";
}
return "Unknown";
}
[[nodiscard]] constexpr std::string_view RoundingName(IR::FpRounding rounding) noexcept {
switch (rounding) {
case IR::FpRounding::DontCare:
return "DontCare";
case IR::FpRounding::RN:
return "RN";
case IR::FpRounding::RM:
return "RM";
case IR::FpRounding::RP:
return "RP";
case IR::FpRounding::RZ:
return "RZ";
}
return "Unknown";
}
[[nodiscard]] constexpr std::string_view FmzName(IR::FmzMode fmz_mode) noexcept {
switch (fmz_mode) {
case IR::FmzMode::DontCare:
return "DontCare";
case IR::FmzMode::FTZ:
return "FTZ";
case IR::FmzMode::FMZ:
return "FMZ";
case IR::FmzMode::None:
return "None";
}
return "Unknown";
}
[[nodiscard]] constexpr std::optional<size_t> FpControlBucket(const IR::Opcode opcode) noexcept {
switch (opcode) {
case IR::Opcode::FPAdd16:
case IR::Opcode::FPFma16:
case IR::Opcode::FPMul16:
case IR::Opcode::FPRoundEven16:
case IR::Opcode::FPFloor16:
case IR::Opcode::FPCeil16:
case IR::Opcode::FPTrunc16:
return 0;
case IR::Opcode::FPAdd32:
case IR::Opcode::FPFma32:
case IR::Opcode::FPMul32:
case IR::Opcode::FPRoundEven32:
case IR::Opcode::FPFloor32:
case IR::Opcode::FPCeil32:
case IR::Opcode::FPTrunc32:
case IR::Opcode::FPOrdEqual32:
case IR::Opcode::FPUnordEqual32:
case IR::Opcode::FPOrdNotEqual32:
case IR::Opcode::FPUnordNotEqual32:
case IR::Opcode::FPOrdLessThan32:
case IR::Opcode::FPUnordLessThan32:
case IR::Opcode::FPOrdGreaterThan32:
case IR::Opcode::FPUnordGreaterThan32:
case IR::Opcode::FPOrdLessThanEqual32:
case IR::Opcode::FPUnordLessThanEqual32:
case IR::Opcode::FPOrdGreaterThanEqual32:
case IR::Opcode::FPUnordGreaterThanEqual32:
case IR::Opcode::ConvertF16F32:
case IR::Opcode::ConvertF64F32:
return 1;
default:
return std::nullopt;
}
}
FpControlHistogram CollectFpControlHistogram(const IR::Program& program) {
FpControlHistogram histogram{};
for (const IR::Block* const block : program.post_order_blocks) {
for (const IR::Inst& inst : block->Instructions()) {
const std::optional<size_t> bucket{FpControlBucket(inst.GetOpcode())};
if (!bucket) {
continue;
}
const auto flags{inst.Flags<IR::FpControl>()};
++histogram.total[*bucket];
if (flags.no_contraction) {
++histogram.no_contraction[*bucket];
}
++histogram.rounding[*bucket][static_cast<size_t>(flags.rounding)];
++histogram.fmz[*bucket][static_cast<size_t>(flags.fmz_mode)];
++histogram.combos[*bucket][static_cast<size_t>(flags.rounding)]
[static_cast<size_t>(flags.fmz_mode)];
}
}
return histogram;
}
void LogRzFpControlTrace(Environment& env, const IR::Program& program) {
std::array<u32, 2> totals{};
for (const IR::Block* const block : program.post_order_blocks) {
for (const IR::Inst& inst : block->Instructions()) {
const std::optional<size_t> bucket{FpControlBucket(inst.GetOpcode())};
if (!bucket) {
continue;
}
const auto flags{inst.Flags<IR::FpControl>()};
if (flags.rounding != IR::FpRounding::RZ) {
continue;
}
++totals[*bucket];
}
}
if (totals[0] == 0 && totals[1] == 0) {
return;
}
constexpr std::array<std::string_view, 2> precision_names{"fp16", "fp32"};
LOG_INFO(Shader,
"FP_RZ {} shader start={:#010x} blocks={} post_order_blocks={} fp16={} fp32={}",
StageName(program.stage), env.StartAddress(), program.blocks.size(),
program.post_order_blocks.size(), totals[0], totals[1]);
for (const IR::Block* const block : program.post_order_blocks) {
u32 inst_index{};
for (const IR::Inst& inst : block->Instructions()) {
const std::optional<size_t> bucket{FpControlBucket(inst.GetOpcode())};
if (!bucket) {
++inst_index;
continue;
}
const auto flags{inst.Flags<IR::FpControl>()};
if (flags.rounding != IR::FpRounding::RZ) {
++inst_index;
continue;
}
LOG_INFO(Shader,
"FP_RZ {} start={:#010x} block_order={} inst_index={} precision={} opcode={} no_contraction={} fmz={}",
StageName(program.stage), env.StartAddress(), block->GetOrder(), inst_index,
precision_names[*bucket], inst.GetOpcode(), flags.no_contraction,
FmzName(flags.fmz_mode));
++inst_index;
}
}
}
void LogFpControlHistogram(const IR::Program& program) {
const FpControlHistogram histogram{CollectFpControlHistogram(program)};
if (histogram.total[0] == 0 && histogram.total[1] == 0) {
return;
}
LOG_INFO(Shader, "FP_HIST {} shader blocks={} post_order_blocks={}",
StageName(program.stage), program.blocks.size(), program.post_order_blocks.size());
constexpr std::array<std::string_view, 2> precision_names{"fp16", "fp32"};
for (size_t bucket = 0; bucket < precision_names.size(); ++bucket) {
if (histogram.total[bucket] == 0) {
continue;
}
LOG_INFO(Shader,
"FP_HIST {} total={} no_contraction={} rounding[DontCare={}, RN={}, RM={}, RP={}, RZ={}] fmz[DontCare={}, FTZ={}, FMZ={}, None={}]",
precision_names[bucket], histogram.total[bucket], histogram.no_contraction[bucket],
histogram.rounding[bucket][static_cast<size_t>(IR::FpRounding::DontCare)],
histogram.rounding[bucket][static_cast<size_t>(IR::FpRounding::RN)],
histogram.rounding[bucket][static_cast<size_t>(IR::FpRounding::RM)],
histogram.rounding[bucket][static_cast<size_t>(IR::FpRounding::RP)],
histogram.rounding[bucket][static_cast<size_t>(IR::FpRounding::RZ)],
histogram.fmz[bucket][static_cast<size_t>(IR::FmzMode::DontCare)],
histogram.fmz[bucket][static_cast<size_t>(IR::FmzMode::FTZ)],
histogram.fmz[bucket][static_cast<size_t>(IR::FmzMode::FMZ)],
histogram.fmz[bucket][static_cast<size_t>(IR::FmzMode::None)]);
for (size_t rounding = 0; rounding < histogram.combos[bucket].size(); ++rounding) {
for (size_t fmz = 0; fmz < histogram.combos[bucket][rounding].size(); ++fmz) {
const u32 count{histogram.combos[bucket][rounding][fmz]};
if (count == 0) {
continue;
}
LOG_INFO(Shader, "FP_HIST {} combo {} / {} = {}", precision_names[bucket],
RoundingName(static_cast<IR::FpRounding>(rounding)),
FmzName(static_cast<IR::FmzMode>(fmz)), count);
}
}
}
}
IR::BlockList GenerateBlocks(const IR::AbstractSyntaxList& syntax_list) {
size_t num_syntax_blocks{};
for (const auto& node : syntax_list) {
@ -247,6 +458,7 @@ IR::Program TranslateProgram(ObjectPool<IR::Inst>& inst_pool, ObjectPool<IR::Blo
program.blocks = GenerateBlocks(program.syntax_list);
program.post_order_blocks = PostOrder(program.syntax_list.front());
program.stage = env.ShaderStage();
program.start_address = env.StartAddress();
program.local_memory_size = env.LocalMemorySize();
switch (program.stage) {
case Stage::TessellationControl: {
@ -315,6 +527,11 @@ IR::Program TranslateProgram(ObjectPool<IR::Inst>& inst_pool, ObjectPool<IR::Blo
Optimization::LayerPass(program, host_info);
Optimization::VendorWorkaroundPass(program);
if (Settings::values.renderer_debug) {
LogFpControlHistogram(program);
LogRzFpControlTrace(env, program);
}
CollectInterpolationInfo(env, program);
AddNVNStorageBuffers(program);
return program;
@ -338,6 +555,7 @@ IR::Program MergeDualVertexPrograms(IR::Program& vertex_a, IR::Program& vertex_b
result.post_order_blocks.push_back(block);
}
result.stage = Stage::VertexB;
result.start_address = env_vertex_b.StartAddress();
result.info = vertex_a.info;
result.local_memory_size = (std::max)(vertex_a.local_memory_size, vertex_b.local_memory_size);
result.info.loads.mask |= vertex_b.info.loads.mask;
@ -350,6 +568,10 @@ IR::Program MergeDualVertexPrograms(IR::Program& vertex_a, IR::Program& vertex_b
Optimization::VerificationPass(result);
}
Optimization::CollectShaderInfoPass(env_vertex_b, result);
if (Settings::values.renderer_debug) {
LogFpControlHistogram(result);
LogRzFpControlTrace(env_vertex_b, result);
}
return result;
}

View file

@ -1,4 +1,4 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project

View file

@ -19,6 +19,7 @@
#include "shader_recompiler/host_translate_info.h"
#include "shader_recompiler/ir_opt/passes.h"
#include "shader_recompiler/shader_info.h"
#include "video_core/surface.h"
namespace Shader::Optimization {
namespace {
@ -33,6 +34,16 @@ using TextureInstVector = boost::container::small_vector<TextureInst, 24>;
constexpr u32 DESCRIPTOR_SIZE = 8;
constexpr u32 DESCRIPTOR_SIZE_SHIFT = static_cast<u32>(std::countr_zero(DESCRIPTOR_SIZE));
NumericType GetNumericType(TexturePixelFormat format) {
const auto pixel_format = static_cast<VideoCore::Surface::PixelFormat>(format);
if (!VideoCore::Surface::IsPixelFormatInteger(pixel_format)) {
return NumericType::Float;
}
return VideoCore::Surface::IsPixelFormatSignedInteger(pixel_format)
? NumericType::SignedInt
: NumericType::UnsignedInt;
}
IR::Opcode IndexedInstruction(const IR::Inst& inst) {
switch (inst.GetOpcode()) {
case IR::Opcode::BindlessImageSampleImplicitLod:
@ -199,11 +210,6 @@ static inline TexturePixelFormat ReadTexturePixelFormatCached(Environment& env,
const ConstBufferAddr& cbuf) {
return env.ReadTexturePixelFormat(GetTextureHandleCached(env, cbuf));
}
static inline bool IsTexturePixelFormatIntegerCached(Environment& env,
const ConstBufferAddr& cbuf) {
return env.IsTexturePixelFormatInteger(GetTextureHandleCached(env, cbuf));
}
std::optional<ConstBufferAddr> Track(const IR::Value& value, Environment& env);
static inline std::optional<ConstBufferAddr> TrackCached(const IR::Value& v, Environment& env) {
@ -430,7 +436,8 @@ public:
u32 Add(const TextureBufferDescriptor& desc) {
return Add(texture_buffer_descriptors, desc, [&desc](const auto& existing) {
return desc.cbuf_index == existing.cbuf_index &&
return desc.numeric_type == existing.numeric_type &&
desc.cbuf_index == existing.cbuf_index &&
desc.cbuf_offset == existing.cbuf_offset &&
desc.shift_left == existing.shift_left &&
desc.secondary_cbuf_index == existing.secondary_cbuf_index &&
@ -449,13 +456,13 @@ public:
})};
image_buffer_descriptors[index].is_written |= desc.is_written;
image_buffer_descriptors[index].is_read |= desc.is_read;
image_buffer_descriptors[index].is_integer |= desc.is_integer;
return index;
}
u32 Add(const TextureDescriptor& desc) {
const u32 index{Add(texture_descriptors, desc, [&desc](const auto& existing) {
return desc.type == existing.type && desc.is_depth == existing.is_depth &&
desc.numeric_type == existing.numeric_type &&
desc.has_secondary == existing.has_secondary &&
desc.cbuf_index == existing.cbuf_index &&
desc.cbuf_offset == existing.cbuf_offset &&
@ -479,7 +486,6 @@ public:
})};
image_descriptors[index].is_written |= desc.is_written;
image_descriptors[index].is_read |= desc.is_read;
image_descriptors[index].is_integer |= desc.is_integer;
return index;
}
@ -651,13 +657,13 @@ void TexturePass(Environment& env, IR::Program& program, const HostTranslateInfo
}
const bool is_written{inst->GetOpcode() != IR::Opcode::ImageRead};
const bool is_read{inst->GetOpcode() != IR::Opcode::ImageWrite};
const bool is_integer{IsTexturePixelFormatIntegerCached(env, cbuf)};
const NumericType numeric_type{GetNumericType(ReadTexturePixelFormatCached(env, cbuf))};
if (flags.type == TextureType::Buffer) {
index = descriptors.Add(ImageBufferDescriptor{
.format = flags.image_format,
.is_written = is_written,
.is_read = is_read,
.is_integer = is_integer,
.numeric_type = numeric_type,
.cbuf_index = cbuf.index,
.cbuf_offset = cbuf.offset,
.count = cbuf.count,
@ -669,7 +675,7 @@ void TexturePass(Environment& env, IR::Program& program, const HostTranslateInfo
.format = flags.image_format,
.is_written = is_written,
.is_read = is_read,
.is_integer = is_integer,
.numeric_type = numeric_type,
.cbuf_index = cbuf.index,
.cbuf_offset = cbuf.offset,
.count = cbuf.count,
@ -681,6 +687,7 @@ void TexturePass(Environment& env, IR::Program& program, const HostTranslateInfo
default:
if (flags.type == TextureType::Buffer) {
index = descriptors.Add(TextureBufferDescriptor{
.numeric_type = GetNumericType(ReadTexturePixelFormatCached(env, cbuf)),
.has_secondary = cbuf.has_secondary,
.cbuf_index = cbuf.index,
.cbuf_offset = cbuf.offset,
@ -696,6 +703,7 @@ void TexturePass(Environment& env, IR::Program& program, const HostTranslateInfo
.type = flags.type,
.is_depth = flags.is_depth != 0,
.is_multisample = is_multisample,
.numeric_type = GetNumericType(ReadTexturePixelFormatCached(env, cbuf)),
.has_secondary = cbuf.has_secondary,
.cbuf_index = cbuf.index,
.cbuf_offset = cbuf.offset,

View file

@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
@ -18,6 +21,7 @@ struct Profile {
bool support_float_controls{};
bool support_separate_denorm_behavior{};
bool support_separate_rounding_mode{};
bool support_fp32_rounding_rtz{};
bool support_fp16_denorm_preserve{};
bool support_fp32_denorm_preserve{};
bool support_fp16_denorm_flush{};

View file

@ -111,6 +111,9 @@ struct RuntimeInfo {
/// Output types for each color attachment
std::array<AttributeType, 8> color_output_types{};
/// Fragment color outputs that are active for the current pipeline.
std::array<bool, 8> active_color_outputs{true, true, true, true, true, true, true, true};
/// Dual source blending
bool dual_source_blend{};
};

View file

@ -38,6 +38,12 @@ enum class TextureType : u32 {
};
constexpr u32 NUM_TEXTURE_TYPES = 9;
enum class NumericType : u8 {
Float,
SignedInt,
UnsignedInt,
};
enum class TexturePixelFormat {
A8B8G8R8_UNORM,
A8B8G8R8_SNORM,
@ -177,6 +183,7 @@ struct StorageBufferDescriptor {
};
struct TextureBufferDescriptor {
NumericType numeric_type;
bool has_secondary;
u32 cbuf_index;
u32 cbuf_offset;
@ -195,7 +202,7 @@ struct ImageBufferDescriptor {
ImageFormat format;
bool is_written;
bool is_read;
bool is_integer;
NumericType numeric_type;
u32 cbuf_index;
u32 cbuf_offset;
u32 count;
@ -209,6 +216,7 @@ struct TextureDescriptor {
TextureType type;
bool is_depth;
bool is_multisample;
NumericType numeric_type;
bool has_secondary;
u32 cbuf_index;
u32 cbuf_offset;
@ -228,7 +236,7 @@ struct ImageDescriptor {
ImageFormat format;
bool is_written;
bool is_read;
bool is_integer;
NumericType numeric_type;
u32 cbuf_index;
u32 cbuf_offset;
u32 count;

View file

@ -14,9 +14,12 @@
#include <mutex>
#include <numeric>
#include <span>
#include <ankerl/unordered_dense.h>
#include <vector>
#include <ankerl/unordered_dense.h>
#include <boost/container/static_vector.hpp>
#include <boost/container/small_vector.hpp>
#include "common/common_types.h"
#include "common/div_ceil.h"
#include "common/literals.h"
@ -94,10 +97,10 @@ static constexpr Binding NULL_BINDING{
template <typename Buffer>
struct HostBindings {
boost::container::small_vector<Buffer*, NUM_VERTEX_BUFFERS> buffers;
boost::container::small_vector<u64, NUM_VERTEX_BUFFERS> offsets;
boost::container::small_vector<u64, NUM_VERTEX_BUFFERS> sizes;
boost::container::small_vector<u64, NUM_VERTEX_BUFFERS> strides;
boost::container::static_vector<Buffer*, NUM_VERTEX_BUFFERS> buffers;
boost::container::static_vector<u64, NUM_VERTEX_BUFFERS> offsets;
boost::container::static_vector<u64, NUM_VERTEX_BUFFERS> sizes;
boost::container::static_vector<u64, NUM_VERTEX_BUFFERS> strides;
u32 min_index{NUM_VERTEX_BUFFERS};
u32 max_index{0};
};

View file

@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
@ -19,12 +22,12 @@ ChannelState::ChannelState(s32 bind_id_) : bind_id{bind_id_}, initialized{} {}
void ChannelState::Init(Core::System& system, GPU& gpu, u64 program_id_) {
ASSERT(memory_manager);
program_id = program_id_;
dma_pusher = std::make_unique<Tegra::DmaPusher>(system, gpu, *memory_manager, *this);
maxwell_3d = std::make_unique<Engines::Maxwell3D>(system, *memory_manager);
fermi_2d = std::make_unique<Engines::Fermi2D>(*memory_manager);
kepler_compute = std::make_unique<Engines::KeplerCompute>(system, *memory_manager);
maxwell_dma = std::make_unique<Engines::MaxwellDMA>(system, *memory_manager);
kepler_memory = std::make_unique<Engines::KeplerMemory>(system, *memory_manager);
dma_pusher.emplace(system, gpu, *memory_manager, *this);
maxwell_3d.emplace(system, *memory_manager);
fermi_2d.emplace(*memory_manager);
kepler_compute.emplace(system, *memory_manager);
maxwell_dma.emplace(system, *memory_manager);
kepler_memory.emplace(system, *memory_manager);
initialized = true;
}

View file

@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
@ -6,6 +9,12 @@
#include <memory>
#include "common/common_types.h"
#include "video_core/engines/fermi_2d.h"
#include "video_core/engines/kepler_memory.h"
#include "video_core/engines/kepler_compute.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/engines/maxwell_dma.h"
#include "video_core/dma_pusher.h"
namespace Core {
class System;
@ -18,49 +27,34 @@ class RasterizerInterface;
namespace Tegra {
class GPU;
namespace Engines {
class Puller;
class Fermi2D;
class Maxwell3D;
class MaxwellDMA;
class KeplerCompute;
class KeplerMemory;
} // namespace Engines
class MemoryManager;
class DmaPusher;
namespace Control {
struct ChannelState {
explicit ChannelState(s32 bind_id);
ChannelState(const ChannelState& state) = delete;
ChannelState& operator=(const ChannelState&) = delete;
ChannelState(ChannelState&& other) noexcept = default;
ChannelState& operator=(ChannelState&& other) noexcept = default;
void Init(Core::System& system, GPU& gpu, u64 program_id);
void BindRasterizer(VideoCore::RasterizerInterface* rasterizer);
s32 bind_id = -1;
u64 program_id = 0;
/// 3D engine
std::unique_ptr<Engines::Maxwell3D> maxwell_3d;
std::optional<Engines::Maxwell3D> maxwell_3d;
/// 2D engine
std::unique_ptr<Engines::Fermi2D> fermi_2d;
std::optional<Engines::Fermi2D> fermi_2d;
/// Compute engine
std::unique_ptr<Engines::KeplerCompute> kepler_compute;
std::optional<Engines::KeplerCompute> kepler_compute;
/// DMA engine
std::unique_ptr<Engines::MaxwellDMA> maxwell_dma;
std::optional<Engines::MaxwellDMA> maxwell_dma;
/// Inline memory engine
std::unique_ptr<Engines::KeplerMemory> kepler_memory;
std::optional<Engines::KeplerMemory> kepler_memory;
/// NV01 Timer
std::optional<Engines::KeplerMemory> nv01_timer;
std::optional<DmaPusher> dma_pusher;
std::shared_ptr<MemoryManager> memory_manager;
std::unique_ptr<DmaPusher> dma_pusher;
s32 bind_id = -1;
u64 program_id = 0;
bool initialized{};
};

View file

@ -1,4 +1,4 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
@ -15,6 +15,7 @@
namespace Tegra::Engines {
enum class EngineTypes : u32 {
Nv01Timer,
KeplerCompute,
Maxwell3D,
Fermi2D,

View file

@ -1,4 +1,4 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
@ -26,8 +26,15 @@ namespace Tegra::Engines {
constexpr u32 MacroRegistersStart = 0xE00;
Maxwell3D::Maxwell3D(Core::System& system_, MemoryManager& memory_manager_)
: draw_manager{std::make_unique<DrawManager>(this)}, system{system_},
memory_manager{memory_manager_}, macro_engine{GetMacroEngine(*this)}, upload_state{memory_manager, regs.upload} {
: draw_manager{std::make_unique<DrawManager>(this)}, system{system_}
, memory_manager{memory_manager_}
#ifdef ARCHITECTURE_x86_64
, macro_engine(bool(Settings::values.disable_macro_jit))
#else
, macro_engine(true)
#endif
, upload_state{memory_manager, regs.upload}
{
dirty.flags.flip();
InitializeRegisterDefaults();
execution_mask.reset();
@ -328,9 +335,9 @@ void Maxwell3D::ProcessMethodCall(u32 method, u32 argument, u32 nonshadow_argume
shadow_state.shadow_ram_control = static_cast<Regs::ShadowRamControl>(nonshadow_argument);
return;
case MAXWELL3D_REG_INDEX(load_mme.instruction_ptr):
return macro_engine->ClearCode(regs.load_mme.instruction_ptr);
return macro_engine.ClearCode(regs.load_mme.instruction_ptr);
case MAXWELL3D_REG_INDEX(load_mme.instruction):
return macro_engine->AddCode(regs.load_mme.instruction_ptr, argument);
return macro_engine.AddCode(regs.load_mme.instruction_ptr, argument);
case MAXWELL3D_REG_INDEX(load_mme.start_address):
return ProcessMacroBind(argument);
case MAXWELL3D_REG_INDEX(falcon[4]):
@ -398,7 +405,7 @@ void Maxwell3D::CallMacroMethod(u32 method, const std::vector<u32>& parameters)
((method - MacroRegistersStart) >> 1) % static_cast<u32>(macro_positions.size());
// Execute the current macro.
macro_engine->Execute(macro_positions[entry], parameters);
macro_engine.Execute(*this, macro_positions[entry], parameters);
draw_manager->DrawDeferred();
}
@ -464,7 +471,7 @@ void Maxwell3D::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
}
void Maxwell3D::ProcessMacroUpload(u32 data) {
macro_engine->AddCode(regs.load_mme.instruction_ptr++, data);
macro_engine.AddCode(regs.load_mme.instruction_ptr++, data);
}
void Maxwell3D::ProcessMacroBind(u32 data) {

View file

@ -2258,7 +2258,7 @@ public:
/// Returns whether the vertex array specified by index is supposed to be
/// accessed per instance or not.
bool IsInstancingEnabled(std::size_t index) const {
return is_instanced[index];
return bool(is_instanced[index]); //FUCK YOU MSVC
}
};
@ -3203,7 +3203,7 @@ private:
std::vector<u32> macro_params;
/// Interpreter for the macro codes uploaded to the GPU.
std::optional<MacroEngine> macro_engine;
MacroEngine macro_engine;
Upload::State upload_state;

View file

@ -0,0 +1,52 @@
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <cstddef>
#include "common/bit_field.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/logging/log.h"
#include "video_core/engines/engine_interface.h"
#include "video_core/engines/engine_upload.h"
namespace Core {
class System;
}
namespace Tegra {
class MemoryManager;
}
namespace Tegra::Engines {
class Nv01Timer final : public EngineInterface {
public:
explicit Nv01Timer(Core::System& system_, MemoryManager& memory_manager)
: system{system_}
{}
~Nv01Timer() override;
/// Write the value to the register identified by method.
void CallMethod(u32 method, u32 method_argument, bool is_last_call) override {
LOG_DEBUG(HW_GPU, "method={}, argument={}, is_last_call={}", method, method_argument, is_last_call);
}
/// Write multiple values to the register identified by method.
void CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending) override {
LOG_DEBUG(HW_GPU, "method={}, base_start={}, amount={}, pending={}", method, fmt::ptr(base_start), amount, methods_pending);
}
struct Regs {
// No fucking idea
INSERT_PADDING_BYTES_NOINIT(0x48);
} regs{};
private:
void ConsumeSinkImpl() override {}
Core::System& system;
};
}

View file

@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
@ -34,24 +37,22 @@ void Puller::ProcessBindMethod(const MethodCall& method_call) {
bound_engines[method_call.subchannel] = engine_id;
switch (engine_id) {
case EngineID::FERMI_TWOD_A:
dma_pusher.BindSubchannel(channel_state.fermi_2d.get(), method_call.subchannel,
EngineTypes::Fermi2D);
dma_pusher.BindSubchannel(&*channel_state.fermi_2d, method_call.subchannel, EngineTypes::Fermi2D);
break;
case EngineID::MAXWELL_B:
dma_pusher.BindSubchannel(channel_state.maxwell_3d.get(), method_call.subchannel,
EngineTypes::Maxwell3D);
dma_pusher.BindSubchannel(&*channel_state.maxwell_3d, method_call.subchannel, EngineTypes::Maxwell3D);
break;
case EngineID::KEPLER_COMPUTE_B:
dma_pusher.BindSubchannel(channel_state.kepler_compute.get(), method_call.subchannel,
EngineTypes::KeplerCompute);
dma_pusher.BindSubchannel(&*channel_state.kepler_compute, method_call.subchannel, EngineTypes::KeplerCompute);
break;
case EngineID::MAXWELL_DMA_COPY_A:
dma_pusher.BindSubchannel(channel_state.maxwell_dma.get(), method_call.subchannel,
EngineTypes::MaxwellDMA);
dma_pusher.BindSubchannel(&*channel_state.maxwell_dma, method_call.subchannel, EngineTypes::MaxwellDMA);
break;
case EngineID::KEPLER_INLINE_TO_MEMORY_B:
dma_pusher.BindSubchannel(channel_state.kepler_memory.get(), method_call.subchannel,
EngineTypes::KeplerMemory);
dma_pusher.BindSubchannel(&*channel_state.kepler_memory, method_call.subchannel, EngineTypes::KeplerMemory);
break;
case EngineID::NV01_TIMER:
dma_pusher.BindSubchannel(&*channel_state.nv01_timer, method_call.subchannel, EngineTypes::Nv01Timer);
break;
default:
UNIMPLEMENTED_MSG("Unimplemented engine {:04X}", engine_id);
@ -209,24 +210,22 @@ void Puller::CallEngineMethod(const MethodCall& method_call) {
switch (engine) {
case EngineID::FERMI_TWOD_A:
channel_state.fermi_2d->CallMethod(method_call.method, method_call.argument,
method_call.IsLastCall());
channel_state.fermi_2d->CallMethod(method_call.method, method_call.argument, method_call.IsLastCall());
break;
case EngineID::MAXWELL_B:
channel_state.maxwell_3d->CallMethod(method_call.method, method_call.argument,
method_call.IsLastCall());
channel_state.maxwell_3d->CallMethod(method_call.method, method_call.argument, method_call.IsLastCall());
break;
case EngineID::KEPLER_COMPUTE_B:
channel_state.kepler_compute->CallMethod(method_call.method, method_call.argument,
method_call.IsLastCall());
channel_state.kepler_compute->CallMethod(method_call.method, method_call.argument, method_call.IsLastCall());
break;
case EngineID::MAXWELL_DMA_COPY_A:
channel_state.maxwell_dma->CallMethod(method_call.method, method_call.argument,
method_call.IsLastCall());
channel_state.maxwell_dma->CallMethod(method_call.method, method_call.argument, method_call.IsLastCall());
break;
case EngineID::KEPLER_INLINE_TO_MEMORY_B:
channel_state.kepler_memory->CallMethod(method_call.method, method_call.argument,
method_call.IsLastCall());
channel_state.kepler_memory->CallMethod(method_call.method, method_call.argument, method_call.IsLastCall());
break;
case EngineID::NV01_TIMER:
channel_state.nv01_timer->CallMethod(method_call.method, method_call.argument, method_call.IsLastCall());
break;
default:
UNIMPLEMENTED_MSG("Unimplemented engine");
@ -255,6 +254,9 @@ void Puller::CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_s
case EngineID::KEPLER_INLINE_TO_MEMORY_B:
channel_state.kepler_memory->CallMultiMethod(method, base_start, amount, methods_pending);
break;
case EngineID::NV01_TIMER:
channel_state.nv01_timer->CallMultiMethod(method, base_start, amount, methods_pending);
break;
default:
UNIMPLEMENTED_MSG("Unimplemented engine");
break;

View file

@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
@ -20,6 +23,7 @@ class MemoryManager;
class DmaPusher;
enum class EngineID {
NV01_TIMER = 0x0004,
FERMI_TWOD_A = 0x902D, // 2D Engine
MAXWELL_B = 0xB197, // 3D Engine
KEPLER_COMPUTE_B = 0xB1C0,

View file

@ -1,4 +1,4 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
@ -10,6 +10,7 @@
#include <span>
#include <fstream>
#include <variant>
#ifdef ARCHITECTURE_x86_64
// xbyak hates human beings
#ifdef __GNUC__
@ -73,26 +74,12 @@ bool IsTopologySafe(Maxwell3D::Regs::PrimitiveTopology topology) {
}
}
class HLEMacroImpl : public CachedMacro {
public:
explicit HLEMacroImpl(Maxwell3D& maxwell3d_)
: CachedMacro(maxwell3d_)
{}
};
} // Anonymous namespace
/// @note: these macros have two versions, a normal and extended version, with the extended version
/// also assigning the base vertex/instance.
template <bool extended>
class HLE_DrawArraysIndirect final : public HLEMacroImpl {
public:
explicit HLE_DrawArraysIndirect(Maxwell3D& maxwell3d_)
: HLEMacroImpl(maxwell3d_)
{}
void Execute(const std::vector<u32>& parameters, [[maybe_unused]] u32 method) override {
void HLE_DrawArraysIndirect::Execute(Engines::Maxwell3D& maxwell3d, std::span<const u32> parameters, [[maybe_unused]] u32 method) {
auto topology = static_cast<Maxwell3D::Regs::PrimitiveTopology>(parameters[0]);
if (!maxwell3d.AnyParametersDirty() || !IsTopologySafe(topology)) {
Fallback(parameters);
Fallback(maxwell3d, parameters);
return;
}
@ -117,10 +104,8 @@ public:
maxwell3d.engine_state = Maxwell3D::EngineHint::None;
maxwell3d.replace_table.clear();
}
}
private:
void Fallback(const std::vector<u32>& parameters) {
}
void HLE_DrawArraysIndirect::Fallback(Engines::Maxwell3D& maxwell3d, std::span<const u32> parameters) {
SCOPE_EXIT {
if (extended) {
maxwell3d.engine_state = Maxwell3D::EngineHint::None;
@ -129,52 +114,35 @@ private:
};
maxwell3d.RefreshParameters();
const u32 instance_count = (maxwell3d.GetRegisterValue(0xD1B) & parameters[2]);
auto topology = static_cast<Maxwell3D::Regs::PrimitiveTopology>(parameters[0]);
auto topology = Maxwell3D::Regs::PrimitiveTopology(parameters[0]);
const u32 vertex_first = parameters[3];
const u32 vertex_count = parameters[1];
if (!IsTopologySafe(topology) && size_t(maxwell3d.GetMaxCurrentVertices()) < size_t(vertex_first) + size_t(vertex_count)) {
ASSERT(false && "Faulty draw!");
return;
}
const u32 base_instance = parameters[4];
if (extended) {
maxwell3d.regs.global_base_instance_index = base_instance;
maxwell3d.engine_state = Maxwell3D::EngineHint::OnHLEMacro;
maxwell3d.SetHLEReplacementAttributeType(
0, 0x640, Maxwell3D::HLEReplacementAttributeType::BaseInstance);
maxwell3d.SetHLEReplacementAttributeType(0, 0x640, Maxwell3D::HLEReplacementAttributeType::BaseInstance);
}
maxwell3d.draw_manager->DrawArray(topology, vertex_first, vertex_count, base_instance,
instance_count);
maxwell3d.draw_manager->DrawArray(topology, vertex_first, vertex_count, base_instance, instance_count);
if (extended) {
maxwell3d.regs.global_base_instance_index = 0;
maxwell3d.engine_state = Maxwell3D::EngineHint::None;
maxwell3d.replace_table.clear();
}
}
};
}
/*
* @note: these macros have two versions, a normal and extended version, with the extended version
* also assigning the base vertex/instance.
*/
template <bool extended>
class HLE_DrawIndexedIndirect final : public HLEMacroImpl {
public:
explicit HLE_DrawIndexedIndirect(Maxwell3D& maxwell3d_) : HLEMacroImpl(maxwell3d_) {}
void Execute(const std::vector<u32>& parameters, [[maybe_unused]] u32 method) override {
void HLE_DrawIndexedIndirect::Execute(Engines::Maxwell3D& maxwell3d, std::span<const u32> parameters, [[maybe_unused]] u32 method) {
auto topology = static_cast<Maxwell3D::Regs::PrimitiveTopology>(parameters[0]);
if (!maxwell3d.AnyParametersDirty() || !IsTopologySafe(topology)) {
Fallback(parameters);
Fallback(maxwell3d, parameters);
return;
}
const u32 estimate = static_cast<u32>(maxwell3d.EstimateIndexBufferSize());
const u32 estimate = u32(maxwell3d.EstimateIndexBufferSize());
const u32 element_base = parameters[4];
const u32 base_instance = parameters[5];
maxwell3d.regs.vertex_id_base = element_base;
@ -204,10 +172,8 @@ public:
maxwell3d.engine_state = Maxwell3D::EngineHint::None;
maxwell3d.replace_table.clear();
}
}
private:
void Fallback(const std::vector<u32>& parameters) {
}
void HLE_DrawIndexedIndirect::Fallback(Engines::Maxwell3D& maxwell3d, std::span<const u32> parameters) {
maxwell3d.RefreshParameters();
const u32 instance_count = (maxwell3d.GetRegisterValue(0xD1B) & parameters[2]);
const u32 element_base = parameters[4];
@ -221,9 +187,7 @@ private:
maxwell3d.SetHLEReplacementAttributeType(0, 0x640, Maxwell3D::HLEReplacementAttributeType::BaseVertex);
maxwell3d.SetHLEReplacementAttributeType(0, 0x644, Maxwell3D::HLEReplacementAttributeType::BaseInstance);
}
maxwell3d.draw_manager->DrawIndex(Tegra::Maxwell3D::Regs::PrimitiveTopology(parameters[0]), parameters[3], parameters[1], element_base, base_instance, instance_count);
maxwell3d.regs.vertex_id_base = 0x0;
maxwell3d.regs.global_base_vertex_index = 0x0;
maxwell3d.regs.global_base_instance_index = 0x0;
@ -231,14 +195,8 @@ private:
maxwell3d.engine_state = Maxwell3D::EngineHint::None;
maxwell3d.replace_table.clear();
}
}
};
class HLE_MultiLayerClear final : public HLEMacroImpl {
public:
explicit HLE_MultiLayerClear(Maxwell3D& maxwell3d_) : HLEMacroImpl(maxwell3d_) {}
void Execute(const std::vector<u32>& parameters, [[maybe_unused]] u32 method) override {
}
void HLE_MultiLayerClear::Execute(Engines::Maxwell3D& maxwell3d, std::span<const u32> parameters, [[maybe_unused]] u32 method) {
maxwell3d.RefreshParameters();
ASSERT(parameters.size() == 1);
@ -249,17 +207,11 @@ public:
maxwell3d.regs.clear_surface.raw = clear_params.raw;
maxwell3d.draw_manager->Clear(num_layers);
}
};
class HLE_MultiDrawIndexedIndirectCount final : public HLEMacroImpl {
public:
explicit HLE_MultiDrawIndexedIndirectCount(Maxwell3D& maxwell3d_) : HLEMacroImpl(maxwell3d_) {}
void Execute(const std::vector<u32>& parameters, [[maybe_unused]] u32 method) override {
}
void HLE_MultiDrawIndexedIndirectCount::Execute(Engines::Maxwell3D& maxwell3d, std::span<const u32> parameters, [[maybe_unused]] u32 method) {
const auto topology = Maxwell3D::Regs::PrimitiveTopology(parameters[2]);
if (!IsTopologySafe(topology)) {
Fallback(parameters);
Fallback(maxwell3d, parameters);
return;
}
@ -289,19 +241,14 @@ public:
params.stride = stride;
maxwell3d.dirty.flags[VideoCommon::Dirty::IndexBuffer] = true;
maxwell3d.engine_state = Maxwell3D::EngineHint::OnHLEMacro;
maxwell3d.SetHLEReplacementAttributeType(
0, 0x640, Maxwell3D::HLEReplacementAttributeType::BaseVertex);
maxwell3d.SetHLEReplacementAttributeType(
0, 0x644, Maxwell3D::HLEReplacementAttributeType::BaseInstance);
maxwell3d.SetHLEReplacementAttributeType(0, 0x648,
Maxwell3D::HLEReplacementAttributeType::DrawID);
maxwell3d.SetHLEReplacementAttributeType(0, 0x640, Maxwell3D::HLEReplacementAttributeType::BaseVertex);
maxwell3d.SetHLEReplacementAttributeType(0, 0x644, Maxwell3D::HLEReplacementAttributeType::BaseInstance);
maxwell3d.SetHLEReplacementAttributeType(0, 0x648, Maxwell3D::HLEReplacementAttributeType::DrawID);
maxwell3d.draw_manager->DrawIndexedIndirect(topology, 0, estimate);
maxwell3d.engine_state = Maxwell3D::EngineHint::None;
maxwell3d.replace_table.clear();
}
private:
void Fallback(const std::vector<u32>& parameters) {
}
void HLE_MultiDrawIndexedIndirectCount::Fallback(Engines::Maxwell3D& maxwell3d, std::span<const u32> parameters) {
SCOPE_EXIT {
// Clean everything.
maxwell3d.regs.vertex_id_base = 0x0;
@ -318,41 +265,29 @@ private:
const auto topology = static_cast<Maxwell3D::Regs::PrimitiveTopology>(parameters[2]);
const u32 padding = parameters[3];
const std::size_t max_draws = parameters[4];
const u32 indirect_words = 5 + padding;
const std::size_t first_draw = start_indirect;
const std::size_t effective_draws = end_indirect - start_indirect;
const std::size_t last_draw = start_indirect + (std::min)(effective_draws, max_draws);
for (std::size_t index = first_draw; index < last_draw; index++) {
const std::size_t base = index * indirect_words + 5;
const u32 base_vertex = parameters[base + 3];
const u32 base_instance = parameters[base + 4];
maxwell3d.regs.vertex_id_base = base_vertex;
maxwell3d.engine_state = Maxwell3D::EngineHint::OnHLEMacro;
maxwell3d.SetHLEReplacementAttributeType(
0, 0x640, Maxwell3D::HLEReplacementAttributeType::BaseVertex);
maxwell3d.SetHLEReplacementAttributeType(
0, 0x644, Maxwell3D::HLEReplacementAttributeType::BaseInstance);
maxwell3d.SetHLEReplacementAttributeType(0, 0x640, Maxwell3D::HLEReplacementAttributeType::BaseVertex);
maxwell3d.SetHLEReplacementAttributeType(0, 0x644, Maxwell3D::HLEReplacementAttributeType::BaseInstance);
maxwell3d.CallMethod(0x8e3, 0x648, true);
maxwell3d.CallMethod(0x8e4, static_cast<u32>(index), true);
maxwell3d.dirty.flags[VideoCommon::Dirty::IndexBuffer] = true;
maxwell3d.draw_manager->DrawIndex(topology, parameters[base + 2], parameters[base],
base_vertex, base_instance, parameters[base + 1]);
maxwell3d.draw_manager->DrawIndex(topology, parameters[base + 2], parameters[base], base_vertex, base_instance, parameters[base + 1]);
}
}
};
class HLE_DrawIndirectByteCount final : public HLEMacroImpl {
public:
explicit HLE_DrawIndirectByteCount(Maxwell3D& maxwell3d_) : HLEMacroImpl(maxwell3d_) {}
void Execute(const std::vector<u32>& parameters, [[maybe_unused]] u32 method) override {
}
void HLE_DrawIndirectByteCount::Execute(Engines::Maxwell3D& maxwell3d, std::span<const u32> parameters, [[maybe_unused]] u32 method) {
const bool force = maxwell3d.Rasterizer().HasDrawTransformFeedback();
auto topology = static_cast<Maxwell3D::Regs::PrimitiveTopology>(parameters[0] & 0xFFFFU);
auto topology = Maxwell3D::Regs::PrimitiveTopology(parameters[0] & 0xFFFFU);
if (!force && (!maxwell3d.AnyParametersDirty() || !IsTopologySafe(topology))) {
Fallback(parameters);
Fallback(maxwell3d, parameters);
return;
}
auto& params = maxwell3d.draw_manager->GetIndirectParams();
@ -367,12 +302,9 @@ public:
maxwell3d.regs.draw.begin = parameters[0];
maxwell3d.regs.draw_auto_stride = parameters[1];
maxwell3d.regs.draw_auto_byte_count = parameters[2];
maxwell3d.draw_manager->DrawArrayIndirect(topology);
}
private:
void Fallback(const std::vector<u32>& parameters) {
}
void HLE_DrawIndirectByteCount::Fallback(Engines::Maxwell3D& maxwell3d, std::span<const u32> parameters) {
maxwell3d.RefreshParameters();
maxwell3d.regs.draw.begin = parameters[0];
@ -382,14 +314,8 @@ private:
maxwell3d.draw_manager->DrawArray(
maxwell3d.regs.draw.topology, 0,
maxwell3d.regs.draw_auto_byte_count / maxwell3d.regs.draw_auto_stride, 0, 1);
}
};
class HLE_C713C83D8F63CCF3 final : public HLEMacroImpl {
public:
explicit HLE_C713C83D8F63CCF3(Maxwell3D& maxwell3d_) : HLEMacroImpl(maxwell3d_) {}
void Execute(const std::vector<u32>& parameters, [[maybe_unused]] u32 method) override {
}
void HLE_C713C83D8F63CCF3::Execute(Engines::Maxwell3D& maxwell3d, std::span<const u32> parameters, [[maybe_unused]] u32 method) {
maxwell3d.RefreshParameters();
const u32 offset = (parameters[0] & 0x3FFFFFFF) << 2;
const u32 address = maxwell3d.regs.shadow_scratch[24];
@ -398,14 +324,8 @@ public:
const_buffer.address_high = (address >> 24) & 0xFF;
const_buffer.address_low = address << 8;
const_buffer.offset = offset;
}
};
class HLE_D7333D26E0A93EDE final : public HLEMacroImpl {
public:
explicit HLE_D7333D26E0A93EDE(Maxwell3D& maxwell3d_) : HLEMacroImpl(maxwell3d_) {}
void Execute(const std::vector<u32>& parameters, [[maybe_unused]] u32 method) override {
}
void HLE_D7333D26E0A93EDE::Execute(Engines::Maxwell3D& maxwell3d, std::span<const u32> parameters, [[maybe_unused]] u32 method) {
maxwell3d.RefreshParameters();
const size_t index = parameters[0];
const u32 address = maxwell3d.regs.shadow_scratch[42 + index];
@ -414,14 +334,8 @@ public:
const_buffer.size = size;
const_buffer.address_high = (address >> 24) & 0xFF;
const_buffer.address_low = address << 8;
}
};
class HLE_BindShader final : public HLEMacroImpl {
public:
explicit HLE_BindShader(Maxwell3D& maxwell3d_) : HLEMacroImpl(maxwell3d_) {}
void Execute(const std::vector<u32>& parameters, [[maybe_unused]] u32 method) override {
}
void HLE_BindShader::Execute(Engines::Maxwell3D& maxwell3d, std::span<const u32> parameters, [[maybe_unused]] u32 method) {
maxwell3d.RefreshParameters();
auto& regs = maxwell3d.regs;
const u32 index = parameters[0];
@ -444,14 +358,8 @@ public:
auto& bind_group = regs.bind_groups[bind_group_id];
bind_group.raw_config = 0x11;
maxwell3d.ProcessCBBind(bind_group_id);
}
};
class HLE_SetRasterBoundingBox final : public HLEMacroImpl {
public:
explicit HLE_SetRasterBoundingBox(Maxwell3D& maxwell3d_) : HLEMacroImpl(maxwell3d_) {}
void Execute(const std::vector<u32>& parameters, [[maybe_unused]] u32 method) override {
}
void HLE_SetRasterBoundingBox::Execute(Engines::Maxwell3D& maxwell3d, std::span<const u32> parameters, [[maybe_unused]] u32 method) {
maxwell3d.RefreshParameters();
const u32 raster_mode = parameters[0];
auto& regs = maxwell3d.regs;
@ -459,33 +367,19 @@ public:
const u32 scratch_data = maxwell3d.regs.shadow_scratch[52];
regs.raster_bounding_box.raw = raster_mode & 0xFFFFF00F;
regs.raster_bounding_box.pad.Assign(scratch_data & raster_enabled);
}
};
template <size_t base_size>
class HLE_ClearConstBuffer final : public HLEMacroImpl {
public:
explicit HLE_ClearConstBuffer(Maxwell3D& maxwell3d_) : HLEMacroImpl(maxwell3d_) {}
void Execute(const std::vector<u32>& parameters, [[maybe_unused]] u32 method) override {
}
void HLE_ClearConstBuffer::Execute(Engines::Maxwell3D& maxwell3d, std::span<const u32> parameters, [[maybe_unused]] u32 method) {
static constexpr std::array<u32, 0x7000> zeroes{}; //must be bigger than either 7000 or 5F00
maxwell3d.RefreshParameters();
static constexpr std::array<u32, base_size> zeroes{};
auto& regs = maxwell3d.regs;
regs.const_buffer.size = u32(base_size);
regs.const_buffer.address_high = parameters[0];
regs.const_buffer.address_low = parameters[1];
regs.const_buffer.offset = 0;
maxwell3d.ProcessCBMultiData(zeroes.data(), parameters[2] * 4);
}
};
class HLE_ClearMemory final : public HLEMacroImpl {
public:
explicit HLE_ClearMemory(Maxwell3D& maxwell3d_) : HLEMacroImpl(maxwell3d_) {}
void Execute(const std::vector<u32>& parameters, [[maybe_unused]] u32 method) override {
}
void HLE_ClearMemory::Execute(Engines::Maxwell3D& maxwell3d, std::span<const u32> parameters, [[maybe_unused]] u32 method) {
maxwell3d.RefreshParameters();
const u32 needed_memory = parameters[2] / sizeof(u32);
if (needed_memory > zero_memory.size()) {
zero_memory.resize(needed_memory, 0);
@ -497,177 +391,94 @@ public:
regs.upload.dest.address_low = parameters[1];
maxwell3d.CallMethod(size_t(MAXWELL3D_REG_INDEX(launch_dma)), 0x1011, true);
maxwell3d.CallMultiMethod(size_t(MAXWELL3D_REG_INDEX(inline_data)), zero_memory.data(), needed_memory, needed_memory);
}
private:
std::vector<u32> zero_memory;
};
class HLE_TransformFeedbackSetup final : public HLEMacroImpl {
public:
explicit HLE_TransformFeedbackSetup(Maxwell3D& maxwell3d_) : HLEMacroImpl(maxwell3d_) {}
void Execute(const std::vector<u32>& parameters, [[maybe_unused]] u32 method) override {
}
void HLE_TransformFeedbackSetup::Execute(Engines::Maxwell3D& maxwell3d, std::span<const u32> parameters, [[maybe_unused]] u32 method) {
maxwell3d.RefreshParameters();
auto& regs = maxwell3d.regs;
regs.transform_feedback_enabled = 1;
regs.transform_feedback.buffers[0].start_offset = 0;
regs.transform_feedback.buffers[1].start_offset = 0;
regs.transform_feedback.buffers[2].start_offset = 0;
regs.transform_feedback.buffers[3].start_offset = 0;
regs.upload.line_length_in = 4;
regs.upload.line_count = 1;
regs.upload.dest.address_high = parameters[0];
regs.upload.dest.address_low = parameters[1];
maxwell3d.CallMethod(size_t(MAXWELL3D_REG_INDEX(launch_dma)), 0x1011, true);
maxwell3d.CallMethod(size_t(MAXWELL3D_REG_INDEX(inline_data)), regs.transform_feedback.controls[0].stride, true);
maxwell3d.Rasterizer().RegisterTransformFeedback(regs.upload.dest.Address());
}
};
}
} // Anonymous namespace
#define HLE_MACRO_LIST \
HLE_MACRO_ELEM(0x0D61FC9FAAC9FCADULL, HLE_DrawArraysIndirect, (false)) \
HLE_MACRO_ELEM(0x8A4D173EB99A8603ULL, HLE_DrawArraysIndirect, (true)) \
HLE_MACRO_ELEM(0x771BB18C62444DA0ULL, HLE_DrawIndexedIndirect, (false)) \
HLE_MACRO_ELEM(0x0217920100488FF7ULL, HLE_DrawIndexedIndirect, (true)) \
HLE_MACRO_ELEM(0x3F5E74B9C9A50164ULL, HLE_MultiDrawIndexedIndirectCount, ()) \
HLE_MACRO_ELEM(0xEAD26C3E2109B06BULL, HLE_MultiLayerClear, ()) \
HLE_MACRO_ELEM(0xC713C83D8F63CCF3ULL, HLE_C713C83D8F63CCF3, ()) \
HLE_MACRO_ELEM(0xD7333D26E0A93EDEULL, HLE_D7333D26E0A93EDE, ()) \
HLE_MACRO_ELEM(0xEB29B2A09AA06D38ULL, HLE_BindShader, ()) \
HLE_MACRO_ELEM(0xDB1341DBEB4C8AF7ULL, HLE_SetRasterBoundingBox, ()) \
HLE_MACRO_ELEM(0x6C97861D891EDf7EULL, HLE_ClearConstBuffer, (0x5F00)) \
HLE_MACRO_ELEM(0xD246FDDF3A6173D7ULL, HLE_ClearConstBuffer, (0x7000)) \
HLE_MACRO_ELEM(0xEE4D0004BEC8ECF4ULL, HLE_ClearMemory, ()) \
HLE_MACRO_ELEM(0xFC0CF27F5FFAA661ULL, HLE_TransformFeedbackSetup, ()) \
HLE_MACRO_ELEM(0xB5F74EDB717278ECULL, HLE_DrawIndirectByteCount, ()) \
HLEMacro::HLEMacro(Maxwell3D& maxwell3d_) : maxwell3d{maxwell3d_} {}
HLEMacro::~HLEMacro() = default;
std::unique_ptr<CachedMacro> HLEMacro::GetHLEProgram(u64 hash) const {
// Allocates and returns a cached macro if the hash matches a known function.
[[nodiscard]] inline AnyCachedMacro GetHLEProgram(u64 hash) noexcept {
// Compiler will make you a GREAT job at making an ad-hoc hash table :)
switch (hash) {
case 0x0D61FC9FAAC9FCADULL: return std::make_unique<HLE_DrawArraysIndirect<false>>(maxwell3d);
case 0x8A4D173EB99A8603ULL: return std::make_unique<HLE_DrawArraysIndirect<true>>(maxwell3d);
case 0x771BB18C62444DA0ULL: return std::make_unique<HLE_DrawIndexedIndirect<false>>(maxwell3d);
case 0x0217920100488FF7ULL: return std::make_unique<HLE_DrawIndexedIndirect<true>>(maxwell3d);
case 0x3F5E74B9C9A50164ULL: return std::make_unique<HLE_MultiDrawIndexedIndirectCount>(maxwell3d);
case 0xEAD26C3E2109B06BULL: return std::make_unique<HLE_MultiLayerClear>(maxwell3d);
case 0xC713C83D8F63CCF3ULL: return std::make_unique<HLE_C713C83D8F63CCF3>(maxwell3d);
case 0xD7333D26E0A93EDEULL: return std::make_unique<HLE_D7333D26E0A93EDE>(maxwell3d);
case 0xEB29B2A09AA06D38ULL: return std::make_unique<HLE_BindShader>(maxwell3d);
case 0xDB1341DBEB4C8AF7ULL: return std::make_unique<HLE_SetRasterBoundingBox>(maxwell3d);
case 0x6C97861D891EDf7EULL: return std::make_unique<HLE_ClearConstBuffer<0x5F00>>(maxwell3d);
case 0xD246FDDF3A6173D7ULL: return std::make_unique<HLE_ClearConstBuffer<0x7000>>(maxwell3d);
case 0xEE4D0004BEC8ECF4ULL: return std::make_unique<HLE_ClearMemory>(maxwell3d);
case 0xFC0CF27F5FFAA661ULL: return std::make_unique<HLE_TransformFeedbackSetup>(maxwell3d);
case 0xB5F74EDB717278ECULL: return std::make_unique<HLE_DrawIndirectByteCount>(maxwell3d);
default:
return nullptr;
#define HLE_MACRO_ELEM(HASH, TY, VAL) case HASH: return TY VAL;
HLE_MACRO_LIST
#undef HLE_MACRO_ELEM
default: return std::monostate{};
}
}
[[nodiscard]] inline bool CanBeHLEProgram(u64 hash) noexcept {
switch (hash) {
#define HLE_MACRO_ELEM(HASH, TY, VAL) case HASH: return true;
HLE_MACRO_LIST
#undef HLE_MACRO_ELEM
default: return false;
}
}
namespace {
class MacroInterpreterImpl final : public CachedMacro {
public:
explicit MacroInterpreterImpl(Engines::Maxwell3D& maxwell3d_, const std::vector<u32>& code_)
: CachedMacro(maxwell3d_)
, code{code_}
{}
void Execute(const std::vector<u32>& params, u32 method) override;
private:
/// Resets the execution engine state, zeroing registers, etc.
void Reset();
/**
* Executes a single macro instruction located at the current program counter. Returns whether
* the interpreter should keep running.
*
* @param is_delay_slot Whether the current step is being executed due to a delay slot in a
* previous instruction.
*/
bool Step(bool is_delay_slot);
/// Calculates the result of an ALU operation. src_a OP src_b;
u32 GetALUResult(Macro::ALUOperation operation, u32 src_a, u32 src_b);
/// Performs the result operation on the input result and stores it in the specified register
/// (if necessary).
void ProcessResult(Macro::ResultOperation operation, u32 reg, u32 result);
/// Evaluates the branch condition and returns whether the branch should be taken or not.
bool EvaluateBranchCondition(Macro::BranchCondition cond, u32 value) const;
/// Reads an opcode at the current program counter location.
Macro::Opcode GetOpcode() const;
/// Returns the specified register's value. Register 0 is hardcoded to always return 0.
u32 GetRegister(u32 register_id) const;
/// Sets the register to the input value.
void SetRegister(u32 register_id, u32 value);
/// Sets the method address to use for the next Send instruction.
void SetMethodAddress(u32 address);
/// Calls a GPU Engine method with the input parameter.
void Send(u32 value);
/// Reads a GPU register located at the method address.
u32 Read(u32 method) const;
/// Returns the next parameter in the parameter queue.
u32 FetchParameter();
/// Current program counter
u32 pc{};
/// Program counter to execute at after the delay slot is executed.
std::optional<u32> delayed_pc;
/// General purpose macro registers.
std::array<u32, Macro::NUM_MACRO_REGISTERS> registers = {};
/// Method address to use for the next Send instruction.
Macro::MethodAddress method_address = {};
/// Input parameters of the current macro.
std::unique_ptr<u32[]> parameters;
std::size_t num_parameters = 0;
std::size_t parameters_capacity = 0;
/// Index of the next parameter that will be fetched by the 'parm' instruction.
u32 next_parameter_index = 0;
bool carry_flag = false;
const std::vector<u32>& code;
};
void MacroInterpreterImpl::Execute(const std::vector<u32>& params, u32 method) {
void MacroInterpreterImpl::Execute(Engines::Maxwell3D& maxwell3d, std::span<const u32> params, u32 method) {
Reset();
registers[1] = params[0];
num_parameters = params.size();
if (num_parameters > parameters_capacity) {
parameters_capacity = num_parameters;
parameters = std::make_unique<u32[]>(num_parameters);
}
std::memcpy(parameters.get(), params.data(), num_parameters * sizeof(u32));
parameters.resize(params.size());
std::memcpy(parameters.data(), params.data(), params.size() * sizeof(u32));
// Execute the code until we hit an exit condition.
bool keep_executing = true;
while (keep_executing) {
keep_executing = Step(false);
keep_executing = Step(maxwell3d, false);
}
// Assert the the macro used all the input parameters
ASSERT(next_parameter_index == num_parameters);
ASSERT(next_parameter_index == parameters.size());
}
/// Resets the execution engine state, zeroing registers, etc.
void MacroInterpreterImpl::Reset() {
registers = {};
pc = 0;
delayed_pc = {};
method_address.raw = 0;
num_parameters = 0;
// Vector must hold its last indices otherwise wonky shit will happen
// The next parameter index starts at 1, because $r1 already has the value of the first
// parameter.
next_parameter_index = 1;
carry_flag = false;
}
bool MacroInterpreterImpl::Step(bool is_delay_slot) {
/// @brief Executes a single macro instruction located at the current program counter. Returns whether
/// the interpreter should keep running.
/// @param is_delay_slot Whether the current step is being executed due to a delay slot in a previous instruction.
bool MacroInterpreterImpl::Step(Engines::Maxwell3D& maxwell3d, bool is_delay_slot) {
u32 base_address = pc;
Macro::Opcode opcode = GetOpcode();
@ -682,14 +493,12 @@ bool MacroInterpreterImpl::Step(bool is_delay_slot) {
switch (opcode.operation) {
case Macro::Operation::ALU: {
u32 result = GetALUResult(opcode.alu_operation, GetRegister(opcode.src_a),
GetRegister(opcode.src_b));
ProcessResult(opcode.result_operation, opcode.dst, result);
u32 result = GetALUResult(opcode.alu_operation, GetRegister(opcode.src_a), GetRegister(opcode.src_b));
ProcessResult(maxwell3d, opcode.result_operation, opcode.dst, result);
break;
}
case Macro::Operation::AddImmediate: {
ProcessResult(opcode.result_operation, opcode.dst,
GetRegister(opcode.src_a) + opcode.immediate);
ProcessResult(maxwell3d, opcode.result_operation, opcode.dst, GetRegister(opcode.src_a) + opcode.immediate);
break;
}
case Macro::Operation::ExtractInsert: {
@ -699,7 +508,7 @@ bool MacroInterpreterImpl::Step(bool is_delay_slot) {
src = (src >> opcode.bf_src_bit) & opcode.GetBitfieldMask();
dst &= ~(opcode.GetBitfieldMask() << opcode.bf_dst_bit);
dst |= src << opcode.bf_dst_bit;
ProcessResult(opcode.result_operation, opcode.dst, dst);
ProcessResult(maxwell3d, opcode.result_operation, opcode.dst, dst);
break;
}
case Macro::Operation::ExtractShiftLeftImmediate: {
@ -708,7 +517,7 @@ bool MacroInterpreterImpl::Step(bool is_delay_slot) {
u32 result = ((src >> dst) & opcode.GetBitfieldMask()) << opcode.bf_dst_bit;
ProcessResult(opcode.result_operation, opcode.dst, result);
ProcessResult(maxwell3d, opcode.result_operation, opcode.dst, result);
break;
}
case Macro::Operation::ExtractShiftLeftRegister: {
@ -717,12 +526,12 @@ bool MacroInterpreterImpl::Step(bool is_delay_slot) {
u32 result = ((src >> opcode.bf_src_bit) & opcode.GetBitfieldMask()) << dst;
ProcessResult(opcode.result_operation, opcode.dst, result);
ProcessResult(maxwell3d, opcode.result_operation, opcode.dst, result);
break;
}
case Macro::Operation::Read: {
u32 result = Read(GetRegister(opcode.src_a) + opcode.immediate);
ProcessResult(opcode.result_operation, opcode.dst, result);
u32 result = Read(maxwell3d, GetRegister(opcode.src_a) + opcode.immediate);
ProcessResult(maxwell3d, opcode.result_operation, opcode.dst, result);
break;
}
case Macro::Operation::Branch: {
@ -738,7 +547,7 @@ bool MacroInterpreterImpl::Step(bool is_delay_slot) {
delayed_pc = base_address + opcode.GetBranchTarget();
// Execute one more instruction due to the delay slot.
return Step(true);
return Step(maxwell3d, true);
}
break;
}
@ -751,13 +560,13 @@ bool MacroInterpreterImpl::Step(bool is_delay_slot) {
// cause an exit if it's executed inside a delay slot.
if (opcode.is_exit && !is_delay_slot) {
// Exit has a delay slot, execute the next instruction
Step(true);
Step(maxwell3d, true);
return false;
}
return true;
}
/// Calculates the result of an ALU operation. src_a OP src_b;
u32 MacroInterpreterImpl::GetALUResult(Macro::ALUOperation operation, u32 src_a, u32 src_b) {
switch (operation) {
case Macro::ALUOperation::Add: {
@ -797,7 +606,8 @@ u32 MacroInterpreterImpl::GetALUResult(Macro::ALUOperation operation, u32 src_a,
}
}
void MacroInterpreterImpl::ProcessResult(Macro::ResultOperation operation, u32 reg, u32 result) {
/// Performs the result operation on the input result and stores it in the specified register (if necessary).
void MacroInterpreterImpl::ProcessResult(Engines::Maxwell3D& maxwell3d, Macro::ResultOperation operation, u32 reg, u32 result) {
switch (operation) {
case Macro::ResultOperation::IgnoreAndFetch:
// Fetch parameter and ignore result.
@ -815,12 +625,12 @@ void MacroInterpreterImpl::ProcessResult(Macro::ResultOperation operation, u32 r
case Macro::ResultOperation::FetchAndSend:
// Fetch parameter and send result.
SetRegister(reg, FetchParameter());
Send(result);
Send(maxwell3d, result);
break;
case Macro::ResultOperation::MoveAndSend:
// Move and send result.
SetRegister(reg, result);
Send(result);
Send(maxwell3d, result);
break;
case Macro::ResultOperation::FetchAndSetMethod:
// Fetch parameter and use result as Method Address.
@ -831,13 +641,13 @@ void MacroInterpreterImpl::ProcessResult(Macro::ResultOperation operation, u32 r
// Move result and use as Method Address, then fetch and send parameter.
SetRegister(reg, result);
SetMethodAddress(result);
Send(FetchParameter());
Send(maxwell3d, FetchParameter());
break;
case Macro::ResultOperation::MoveAndSetMethodSend:
// Move result and use as Method Address, then send bits 12:17 of result.
SetRegister(reg, result);
SetMethodAddress(result);
Send((result >> 12) & 0b111111);
Send(maxwell3d, (result >> 12) & 0b111111);
break;
default:
UNIMPLEMENTED_MSG("Unimplemented result operation {}", operation);
@ -845,6 +655,7 @@ void MacroInterpreterImpl::ProcessResult(Macro::ResultOperation operation, u32 r
}
}
/// Evaluates the branch condition and returns whether the branch should be taken or not.
bool MacroInterpreterImpl::EvaluateBranchCondition(Macro::BranchCondition cond, u32 value) const {
switch (cond) {
case Macro::BranchCondition::Zero:
@ -855,46 +666,44 @@ bool MacroInterpreterImpl::EvaluateBranchCondition(Macro::BranchCondition cond,
UNREACHABLE();
}
/// Reads an opcode at the current program counter location.
Macro::Opcode MacroInterpreterImpl::GetOpcode() const {
ASSERT((pc % sizeof(u32)) == 0);
ASSERT(pc < code.size() * sizeof(u32));
return {code[pc / sizeof(u32)]};
}
/// Returns the specified register's value. Register 0 is hardcoded to always return 0.
u32 MacroInterpreterImpl::GetRegister(u32 register_id) const {
return registers.at(register_id);
return registers[register_id];
}
/// Sets the register to the input value.
void MacroInterpreterImpl::SetRegister(u32 register_id, u32 value) {
// Register 0 is hardwired as the zero register.
// Ensure no writes to it actually occur.
if (register_id == 0) {
if (register_id == 0)
return;
}
registers.at(register_id) = value;
registers[register_id] = value;
}
void MacroInterpreterImpl::SetMethodAddress(u32 address) {
method_address.raw = address;
}
void MacroInterpreterImpl::Send(u32 value) {
/// Calls a GPU Engine method with the input parameter.
void MacroInterpreterImpl::Send(Engines::Maxwell3D& maxwell3d, u32 value) {
maxwell3d.CallMethod(method_address.address, value, true);
// Increment the method address by the method increment.
method_address.address.Assign(method_address.address.Value() +
method_address.increment.Value());
method_address.address.Assign(method_address.address.Value() + method_address.increment.Value());
}
u32 MacroInterpreterImpl::Read(u32 method) const {
/// Reads a GPU register located at the method address.
u32 MacroInterpreterImpl::Read(Engines::Maxwell3D& maxwell3d, u32 method) const {
return maxwell3d.GetRegisterValue(method);
}
/// Returns the next parameter in the parameter queue.
u32 MacroInterpreterImpl::FetchParameter() {
ASSERT(next_parameter_index < num_parameters);
ASSERT(next_parameter_index < parameters.size());
return parameters[next_parameter_index++];
}
} // Anonymous namespace
#ifdef ARCHITECTURE_x86_64
namespace {
@ -930,17 +739,15 @@ static const auto default_cg_mode = Xbyak::DontSetProtectRWE;
static const auto default_cg_mode = nullptr; //Allow RWE
#endif
class MacroJITx64Impl final : public Xbyak::CodeGenerator, public CachedMacro {
public:
explicit MacroJITx64Impl(Engines::Maxwell3D& maxwell3d_, const std::vector<u32>& code_)
struct MacroJITx64Impl final : public Xbyak::CodeGenerator, public DynamicCachedMacro {
explicit MacroJITx64Impl(std::span<const u32> code_)
: Xbyak::CodeGenerator(MAX_CODE_SIZE, default_cg_mode)
, CachedMacro(maxwell3d_)
, code{code_}
{
Compile();
}
void Execute(const std::vector<u32>& parameters, u32 method) override;
void Execute(Engines::Maxwell3D& maxwell3d, std::span<const u32> parameters, u32 method) override;
void Compile_ALU(Macro::Opcode opcode);
void Compile_AddImmediate(Macro::Opcode opcode);
@ -950,18 +757,13 @@ public:
void Compile_Read(Macro::Opcode opcode);
void Compile_Branch(Macro::Opcode opcode);
private:
void Optimizer_ScanFlags();
void Compile();
bool Compile_NextInstruction();
Xbyak::Reg32 Compile_FetchParameter();
Xbyak::Reg32 Compile_GetRegister(u32 index, Xbyak::Reg32 dst);
void Compile_ProcessResult(Macro::ResultOperation operation, u32 reg);
void Compile_Send(Xbyak::Reg32 value);
Macro::Opcode GetOpCode() const;
struct JITState {
@ -981,21 +783,17 @@ private:
bool enable_asserts{};
};
OptimizerState optimizer{};
std::optional<Macro::Opcode> next_opcode{};
ProgramType program{nullptr};
std::array<Xbyak::Label, MAX_CODE_SIZE> labels;
std::array<Xbyak::Label, MAX_CODE_SIZE> delay_skip;
Xbyak::Label end_of_code{};
bool is_delay_slot{};
u32 pc{};
const std::vector<u32>& code;
std::span<const u32> code;
};
void MacroJITx64Impl::Execute(const std::vector<u32>& parameters, u32 method) {
void MacroJITx64Impl::Execute(Engines::Maxwell3D& maxwell3d, std::span<const u32> parameters, u32 method) {
ASSERT_OR_EXECUTE(program != nullptr, { return; });
JITState state{};
state.maxwell3d = &maxwell3d;
@ -1231,7 +1029,7 @@ void MacroJITx64Impl::Compile_Read(Macro::Opcode opcode) {
Compile_ProcessResult(opcode.result_operation, opcode.dst);
}
void Send(Engines::Maxwell3D* maxwell3d, Macro::MethodAddress method_address, u32 value) {
static void MacroJIT_SendThunk(Engines::Maxwell3D* maxwell3d, Macro::MethodAddress method_address, u32 value) {
maxwell3d->CallMethod(method_address.address, value, true);
}
@ -1240,7 +1038,7 @@ void MacroJITx64Impl::Compile_Send(Xbyak::Reg32 value) {
mov(Common::X64::ABI_PARAM1, qword[STATE]);
mov(Common::X64::ABI_PARAM2.cvt32(), METHOD_ADDRESS);
mov(Common::X64::ABI_PARAM3.cvt32(), value);
Common::X64::CallFarFunction(*this, &Send);
Common::X64::CallFarFunction(*this, &MacroJIT_SendThunk);
Common::X64::ABI_PopRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0);
Xbyak::Label dont_process{};
@ -1452,10 +1250,8 @@ bool MacroJITx64Impl::Compile_NextInstruction() {
return true;
}
static void WarnInvalidParameter(uintptr_t parameter, uintptr_t max_parameter) {
LOG_CRITICAL(HW_GPU,
"Macro JIT: invalid parameter access 0x{:x} (0x{:x} is the last parameter)",
parameter, max_parameter - sizeof(u32));
static void MacroJIT_ErrorThunk(uintptr_t parameter, uintptr_t max_parameter) {
LOG_CRITICAL(HW_GPU, "Macro JIT: invalid parameter access 0x{:x} (0x{:x} is the last parameter)", parameter, max_parameter - sizeof(u32));
}
Xbyak::Reg32 MacroJITx64Impl::Compile_FetchParameter() {
@ -1465,7 +1261,7 @@ Xbyak::Reg32 MacroJITx64Impl::Compile_FetchParameter() {
Common::X64::ABI_PushRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0);
mov(Common::X64::ABI_PARAM1, PARAMETERS);
mov(Common::X64::ABI_PARAM2, MAX_PARAMETER);
Common::X64::CallFarFunction(*this, &WarnInvalidParameter);
Common::X64::CallFarFunction(*this, &MacroJIT_ErrorThunk);
Common::X64::ABI_PopRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0);
L(parameter_ok);
mov(eax, dword[PARAMETERS]);
@ -1574,33 +1370,42 @@ static void Dump(u64 hash, std::span<const u32> code, bool decompiled = false) {
macro_file.write(reinterpret_cast<const char*>(code.data()), code.size_bytes());
}
MacroEngine::MacroEngine(Engines::Maxwell3D& maxwell3d_, bool is_interpreted_)
: hle_macros{std::make_optional<Tegra::HLEMacro>(maxwell3d_)}
, maxwell3d{maxwell3d_}
, is_interpreted{is_interpreted_}
{}
MacroEngine::~MacroEngine() = default;
void MacroEngine::AddCode(u32 method, u32 data) {
uploaded_macro_code[method].push_back(data);
}
void MacroEngine::ClearCode(u32 method) {
macro_cache.erase(method);
uploaded_macro_code.erase(method);
}
void MacroEngine::Execute(u32 method, const std::vector<u32>& parameters) {
auto compiled_macro = macro_cache.find(method);
if (compiled_macro != macro_cache.end()) {
const auto& cache_info = compiled_macro->second;
if (cache_info.has_hle_program) {
cache_info.hle_program->Execute(parameters, method);
} else {
maxwell3d.RefreshParameters();
cache_info.lle_program->Execute(parameters, method);
}
void MacroEngine::Execute(Engines::Maxwell3D& maxwell3d, u32 method, std::span<const u32> parameters) {
auto const execute_variant = [&maxwell3d, &parameters, method](AnyCachedMacro& acm) {
if (auto a = std::get_if<HLE_DrawArraysIndirect>(&acm))
a->Execute(maxwell3d, parameters, method);
if (auto a = std::get_if<HLE_DrawIndexedIndirect>(&acm))
a->Execute(maxwell3d, parameters, method);
if (auto a = std::get_if<HLE_MultiDrawIndexedIndirectCount>(&acm))
a->Execute(maxwell3d, parameters, method);
if (auto a = std::get_if<HLE_MultiLayerClear>(&acm))
a->Execute(maxwell3d, parameters, method);
if (auto a = std::get_if<HLE_C713C83D8F63CCF3>(&acm))
a->Execute(maxwell3d, parameters, method);
if (auto a = std::get_if<HLE_D7333D26E0A93EDE>(&acm))
a->Execute(maxwell3d, parameters, method);
if (auto a = std::get_if<HLE_BindShader>(&acm))
a->Execute(maxwell3d, parameters, method);
if (auto a = std::get_if<HLE_SetRasterBoundingBox>(&acm))
a->Execute(maxwell3d, parameters, method);
if (auto a = std::get_if<HLE_ClearConstBuffer>(&acm))
a->Execute(maxwell3d, parameters, method);
if (auto a = std::get_if<HLE_ClearMemory>(&acm))
a->Execute(maxwell3d, parameters, method);
if (auto a = std::get_if<HLE_TransformFeedbackSetup>(&acm))
a->Execute(maxwell3d, parameters, method);
if (auto a = std::get_if<HLE_DrawIndirectByteCount>(&acm))
a->Execute(maxwell3d, parameters, method);
if (auto a = std::get_if<MacroInterpreterImpl>(&acm))
a->Execute(maxwell3d, parameters, method);
if (auto a = std::get_if<std::unique_ptr<DynamicCachedMacro>>(&acm))
a->get()->Execute(maxwell3d, parameters, method);
};
if (auto const it = macro_cache.find(method); it != macro_cache.end()) {
auto& ci = it->second;
if (!CanBeHLEProgram(ci.hash) || Settings::values.disable_macro_hle)
maxwell3d.RefreshParameters(); //LLE must reload parameters
execute_variant(ci.program);
} else {
// Macro not compiled, check if it's uploaded and if so, compile it
std::optional<u32> mid_method;
@ -1617,51 +1422,37 @@ void MacroEngine::Execute(u32 method, const std::vector<u32>& parameters) {
return;
}
}
auto& cache_info = macro_cache[method];
if (!mid_method.has_value()) {
cache_info.lle_program = Compile(macro_code->second);
cache_info.hash = Common::HashValue(macro_code->second);
} else {
auto& ci = macro_cache[method];
if (mid_method) {
const auto& macro_cached = uploaded_macro_code[mid_method.value()];
const auto rebased_method = method - mid_method.value();
auto& code = uploaded_macro_code[method];
code.resize(macro_cached.size() - rebased_method);
std::memcpy(code.data(), macro_cached.data() + rebased_method, code.size() * sizeof(u32));
cache_info.hash = Common::HashValue(code);
cache_info.lle_program = Compile(code);
}
auto hle_program = hle_macros->GetHLEProgram(cache_info.hash);
if (!hle_program || Settings::values.disable_macro_hle) {
maxwell3d.RefreshParameters();
cache_info.lle_program->Execute(parameters, method);
ci.hash = Common::HashValue(code);
ci.program = Compile(maxwell3d, code);
} else {
cache_info.has_hle_program = true;
cache_info.hle_program = std::move(hle_program);
cache_info.hle_program->Execute(parameters, method);
ci.program = Compile(maxwell3d, macro_code->second);
ci.hash = Common::HashValue(macro_code->second);
}
if (CanBeHLEProgram(ci.hash) && !Settings::values.disable_macro_hle) {
ci.program = GetHLEProgram(ci.hash);
} else {
maxwell3d.RefreshParameters();
}
execute_variant(ci.program);
if (Settings::values.dump_macros) {
Dump(cache_info.hash, macro_code->second, cache_info.has_hle_program);
Dump(ci.hash, macro_code->second, !std::holds_alternative<std::monostate>(ci.program));
}
}
}
std::unique_ptr<CachedMacro> MacroEngine::Compile(const std::vector<u32>& code) {
AnyCachedMacro MacroEngine::Compile(Engines::Maxwell3D& maxwell3d, std::span<const u32> code) {
#ifdef ARCHITECTURE_x86_64
if (!is_interpreted)
return std::make_unique<MacroJITx64Impl>(maxwell3d, code);
#endif
return std::make_unique<MacroInterpreterImpl>(maxwell3d, code);
}
std::optional<MacroEngine> GetMacroEngine(Engines::Maxwell3D& maxwell3d) {
#ifdef ARCHITECTURE_x86_64
return std::make_optional<MacroEngine>(maxwell3d, bool(Settings::values.disable_macro_jit));
#else
return std::make_optional<MacroEngine>(maxwell3d, true);
return std::make_unique<MacroJITx64Impl>(code);
#endif
return MacroInterpreterImpl(code);
}
} // namespace Tegra

View file

@ -7,8 +7,10 @@
#pragma once
#include <memory>
#include <ankerl/unordered_dense.h>
#include <span>
#include <variant>
#include <vector>
#include <ankerl/unordered_dense.h>
#include "common/bit_field.h"
#include "common/common_types.h"
@ -98,62 +100,142 @@ union MethodAddress {
} // namespace Macro
class CachedMacro {
public:
CachedMacro(Engines::Maxwell3D& maxwell3d_)
: maxwell3d{maxwell3d_}
{}
virtual ~CachedMacro() = default;
struct HLEMacro {
};
/// @note: these macros have two versions, a normal and extended version, with the extended version
/// also assigning the base vertex/instance.
struct HLE_DrawArraysIndirect final {
HLE_DrawArraysIndirect(bool extended_) noexcept : extended{extended_} {}
void Execute(Engines::Maxwell3D& maxwell3d, std::span<const u32> parameters, [[maybe_unused]] u32 method);
void Fallback(Engines::Maxwell3D& maxwell3d, std::span<const u32> parameters);
bool extended;
};
/// @note: these macros have two versions, a normal and extended version, with the extended version
/// also assigning the base vertex/instance.
struct HLE_DrawIndexedIndirect final {
explicit HLE_DrawIndexedIndirect(bool extended_) noexcept : extended{extended_} {}
void Execute(Engines::Maxwell3D& maxwell3d, std::span<const u32> parameters, [[maybe_unused]] u32 method);
void Fallback(Engines::Maxwell3D& maxwell3d, std::span<const u32> parameters);
bool extended;
};
struct HLE_MultiLayerClear final {
void Execute(Engines::Maxwell3D& maxwell3d, std::span<const u32> parameters, [[maybe_unused]] u32 method);
};
struct HLE_MultiDrawIndexedIndirectCount final {
void Execute(Engines::Maxwell3D& maxwell3d, std::span<const u32> parameters, [[maybe_unused]] u32 method);
void Fallback(Engines::Maxwell3D& maxwell3d, std::span<const u32> parameters);
};
struct HLE_DrawIndirectByteCount final {
void Execute(Engines::Maxwell3D& maxwell3d, std::span<const u32> parameters, [[maybe_unused]] u32 method);
void Fallback(Engines::Maxwell3D& maxwell3d, std::span<const u32> parameters);
};
struct HLE_C713C83D8F63CCF3 final {
void Execute(Engines::Maxwell3D& maxwell3d, std::span<const u32> parameters, [[maybe_unused]] u32 method);
};
struct HLE_D7333D26E0A93EDE final {
void Execute(Engines::Maxwell3D& maxwell3d, std::span<const u32> parameters, [[maybe_unused]] u32 method);
};
struct HLE_BindShader final {
void Execute(Engines::Maxwell3D& maxwell3d, std::span<const u32> parameters, [[maybe_unused]] u32 method);
};
struct HLE_SetRasterBoundingBox final {
void Execute(Engines::Maxwell3D& maxwell3d, std::span<const u32> parameters, [[maybe_unused]] u32 method);
};
struct HLE_ClearConstBuffer final {
HLE_ClearConstBuffer(size_t base_size_) noexcept : base_size{base_size_} {}
void Execute(Engines::Maxwell3D& maxwell3d, std::span<const u32> parameters, [[maybe_unused]] u32 method);
size_t base_size;
};
struct HLE_ClearMemory final {
void Execute(Engines::Maxwell3D& maxwell3d, std::span<const u32> parameters, [[maybe_unused]] u32 method);
std::vector<u32> zero_memory;
};
struct HLE_TransformFeedbackSetup final {
void Execute(Engines::Maxwell3D& maxwell3d, std::span<const u32> parameters, [[maybe_unused]] u32 method);
};
struct MacroInterpreterImpl final {
MacroInterpreterImpl() {}
MacroInterpreterImpl(std::span<const u32> code_) : code{code_} {}
void Execute(Engines::Maxwell3D& maxwell3d, std::span<const u32> params, u32 method);
void Reset();
bool Step(Engines::Maxwell3D& maxwell3d, bool is_delay_slot);
u32 GetALUResult(Macro::ALUOperation operation, u32 src_a, u32 src_b);
void ProcessResult(Engines::Maxwell3D& maxwell3d, Macro::ResultOperation operation, u32 reg, u32 result);
bool EvaluateBranchCondition(Macro::BranchCondition cond, u32 value) const;
Macro::Opcode GetOpcode() const;
u32 GetRegister(u32 register_id) const;
void SetRegister(u32 register_id, u32 value);
/// Sets the method address to use for the next Send instruction.
[[nodiscard]] inline void SetMethodAddress(u32 address) noexcept {
method_address.raw = address;
}
void Send(Engines::Maxwell3D& maxwell3d, u32 value);
u32 Read(Engines::Maxwell3D& maxwell3d, u32 method) const;
u32 FetchParameter();
/// General purpose macro registers.
std::array<u32, Macro::NUM_MACRO_REGISTERS> registers = {};
/// Input parameters of the current macro.
std::vector<u32> parameters;
std::span<const u32> code;
/// Program counter to execute at after the delay slot is executed.
std::optional<u32> delayed_pc;
/// Method address to use for the next Send instruction.
Macro::MethodAddress method_address = {};
/// Current program counter
u32 pc{};
/// Index of the next parameter that will be fetched by the 'parm' instruction.
u32 next_parameter_index = 0;
bool carry_flag = false;
};
struct DynamicCachedMacro {
virtual ~DynamicCachedMacro() = default;
/// Executes the macro code with the specified input parameters.
/// @param parameters The parameters of the macro
/// @param method The method to execute
virtual void Execute(const std::vector<u32>& parameters, u32 method) = 0;
Engines::Maxwell3D& maxwell3d;
virtual void Execute(Engines::Maxwell3D& maxwell3d, std::span<const u32> parameters, u32 method) = 0;
};
class HLEMacro {
public:
explicit HLEMacro(Engines::Maxwell3D& maxwell3d_);
~HLEMacro();
// Allocates and returns a cached macro if the hash matches a known function.
// Returns nullptr otherwise.
[[nodiscard]] std::unique_ptr<CachedMacro> GetHLEProgram(u64 hash) const;
private:
Engines::Maxwell3D& maxwell3d;
};
class MacroEngine {
public:
explicit MacroEngine(Engines::Maxwell3D& maxwell3d, bool is_interpreted);
~MacroEngine();
using AnyCachedMacro = std::variant<
std::monostate,
HLEMacro,
HLE_DrawArraysIndirect,
HLE_DrawIndexedIndirect,
HLE_MultiDrawIndexedIndirectCount,
HLE_MultiLayerClear,
HLE_C713C83D8F63CCF3,
HLE_D7333D26E0A93EDE,
HLE_BindShader,
HLE_SetRasterBoundingBox,
HLE_ClearConstBuffer,
HLE_ClearMemory,
HLE_TransformFeedbackSetup,
HLE_DrawIndirectByteCount,
MacroInterpreterImpl,
// Used for JIT x86 macro
std::unique_ptr<DynamicCachedMacro>
>;
struct MacroEngine {
MacroEngine(bool is_interpreted_) noexcept : is_interpreted{is_interpreted_} {}
// Store the uploaded macro code to compile them when they're called.
void AddCode(u32 method, u32 data);
inline void AddCode(u32 method, u32 data) noexcept {
uploaded_macro_code[method].push_back(data);
}
// Clear the code associated with a method.
void ClearCode(u32 method);
inline void ClearCode(u32 method) noexcept {
macro_cache.erase(method);
uploaded_macro_code.erase(method);
}
// Compiles the macro if its not in the cache, and executes the compiled macro
void Execute(u32 method, const std::vector<u32>& parameters);
protected:
std::unique_ptr<CachedMacro> Compile(const std::vector<u32>& code);
private:
void Execute(Engines::Maxwell3D& maxwell3d, u32 method, std::span<const u32> parameters);
AnyCachedMacro Compile(Engines::Maxwell3D& maxwell3d, std::span<const u32> code);
struct CacheInfo {
std::unique_ptr<CachedMacro> lle_program{};
std::unique_ptr<CachedMacro> hle_program{};
AnyCachedMacro program;
u64 hash{};
bool has_hle_program{};
};
ankerl::unordered_dense::map<u32, CacheInfo> macro_cache;
ankerl::unordered_dense::map<u32, std::vector<u32>> uploaded_macro_code;
std::optional<HLEMacro> hle_macros;
Engines::Maxwell3D& maxwell3d;
bool is_interpreted;
};
std::optional<MacroEngine> GetMacroEngine(Engines::Maxwell3D& maxwell3d);
} // namespace Tegra

View file

@ -194,6 +194,7 @@ ShaderCache::ShaderCache(Tegra::MaxwellDeviceMemoryManager& device_memory_,
.support_float_controls = false,
.support_separate_denorm_behavior = false,
.support_separate_rounding_mode = false,
.support_fp32_rounding_rtz = false,
.support_fp16_denorm_preserve = false,
.support_fp32_denorm_preserve = false,
.support_fp16_denorm_flush = false,

View file

@ -56,16 +56,23 @@ namespace {
[[nodiscard]] VkImageSubresourceRange SubresourceRangeFromView(const ImageView& image_view) {
auto range = image_view.range;
const bool is_3d_image = image_view.type == VideoCommon::ImageViewType::e3D ||
(image_view.flags & VideoCommon::ImageViewFlagBits::Slice) !=
VideoCommon::ImageViewFlagBits{};
if ((image_view.flags & VideoCommon::ImageViewFlagBits::Slice) != VideoCommon::ImageViewFlagBits{}) {
range.base.layer = 0;
range.extent.layers = 1;
}
u32 layer_count = static_cast<u32>(range.extent.layers);
if (is_3d_image && layer_count == 1) {
layer_count = VK_REMAINING_ARRAY_LAYERS;
}
return VkImageSubresourceRange{
.aspectMask = AspectMaskFromFormat(image_view.format),
.baseMipLevel = static_cast<u32>(range.base.level),
.levelCount = static_cast<u32>(range.extent.levels),
.baseArrayLayer = static_cast<u32>(range.base.layer),
.layerCount = static_cast<u32>(range.extent.layers),
.layerCount = layer_count,
};
}
@ -170,8 +177,14 @@ constexpr VkPipelineMultisampleStateCreateInfo PIPELINE_MULTISAMPLE_STATE_CREATE
.alphaToCoverageEnable = VK_FALSE,
.alphaToOneEnable = VK_FALSE,
};
constexpr std::array DYNAMIC_STATES{VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR,
VK_DYNAMIC_STATE_BLEND_CONSTANTS};
constexpr std::array DYNAMIC_STATES{
VK_DYNAMIC_STATE_VIEWPORT,
VK_DYNAMIC_STATE_SCISSOR,
VK_DYNAMIC_STATE_BLEND_CONSTANTS,
VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK,
VK_DYNAMIC_STATE_STENCIL_WRITE_MASK,
VK_DYNAMIC_STATE_STENCIL_REFERENCE,
};
constexpr VkPipelineDynamicStateCreateInfo PIPELINE_DYNAMIC_STATE_CREATE_INFO{
.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
.pNext = nullptr,
@ -1032,7 +1045,7 @@ void BlitImageHelper::ConvertDepthToColorPipeline(vk::Pipeline& pipeline, VkRend
VkShaderModule frag_shader = *convert_float_to_depth_frag;
const std::array stages = MakeStages(*full_screen_vert, frag_shader);
const VkPipelineInputAssemblyStateCreateInfo input_assembly_ci = GetPipelineInputAssemblyStateCreateInfo(device);
pipeline = device.GetLogical().CreateGraphicsPipeline({
pipeline = device.GetLogical().CreateGraphicsPipeline(VkGraphicsPipelineCreateInfo{
.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
@ -1062,7 +1075,7 @@ void BlitImageHelper::ConvertColorToDepthPipeline(vk::Pipeline& pipeline, VkRend
VkShaderModule frag_shader = *convert_depth_to_float_frag;
const std::array stages = MakeStages(*full_screen_vert, frag_shader);
const VkPipelineInputAssemblyStateCreateInfo input_assembly_ci = GetPipelineInputAssemblyStateCreateInfo(device);
pipeline = device.GetLogical().CreateGraphicsPipeline({
pipeline = device.GetLogical().CreateGraphicsPipeline(VkGraphicsPipelineCreateInfo{
.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
@ -1093,7 +1106,7 @@ void BlitImageHelper::ConvertPipelineEx(vk::Pipeline& pipeline, VkRenderPass ren
}
const std::array stages = MakeStages(*full_screen_vert, *module);
const VkPipelineInputAssemblyStateCreateInfo input_assembly_ci = GetPipelineInputAssemblyStateCreateInfo(device);
pipeline = device.GetLogical().CreateGraphicsPipeline({
pipeline = device.GetLogical().CreateGraphicsPipeline(VkGraphicsPipelineCreateInfo{
.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
@ -1135,7 +1148,7 @@ void BlitImageHelper::ConvertPipeline(vk::Pipeline& pipeline, VkRenderPass rende
is_target_depth ? *convert_float_to_depth_frag : *convert_depth_to_float_frag;
const std::array stages = MakeStages(*full_screen_vert, frag_shader);
const VkPipelineInputAssemblyStateCreateInfo input_assembly_ci = GetPipelineInputAssemblyStateCreateInfo(device);
pipeline = device.GetLogical().CreateGraphicsPipeline({
pipeline = device.GetLogical().CreateGraphicsPipeline(VkGraphicsPipelineCreateInfo{
.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,

View file

@ -1,4 +1,4 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
@ -50,6 +50,38 @@ void RefreshXfbState(VideoCommon::TransformFeedbackState& state, const Maxwell&
});
state.varyings = regs.stream_out_layout;
}
Maxwell::PrimitiveTopology NormalizeDynamicTopologyClass(Maxwell::PrimitiveTopology topology) {
switch (topology) {
case Maxwell::PrimitiveTopology::Points:
return Maxwell::PrimitiveTopology::Points;
case Maxwell::PrimitiveTopology::Lines:
case Maxwell::PrimitiveTopology::LineStrip:
return Maxwell::PrimitiveTopology::Lines;
case Maxwell::PrimitiveTopology::Triangles:
case Maxwell::PrimitiveTopology::TriangleStrip:
case Maxwell::PrimitiveTopology::TriangleFan:
case Maxwell::PrimitiveTopology::Quads:
case Maxwell::PrimitiveTopology::QuadStrip:
case Maxwell::PrimitiveTopology::Polygon:
case Maxwell::PrimitiveTopology::LineLoop:
return Maxwell::PrimitiveTopology::Triangles;
case Maxwell::PrimitiveTopology::LinesAdjacency:
case Maxwell::PrimitiveTopology::LineStripAdjacency:
return Maxwell::PrimitiveTopology::LinesAdjacency;
case Maxwell::PrimitiveTopology::TrianglesAdjacency:
case Maxwell::PrimitiveTopology::TriangleStripAdjacency:
return Maxwell::PrimitiveTopology::TrianglesAdjacency;
case Maxwell::PrimitiveTopology::Patches:
return Maxwell::PrimitiveTopology::Patches;
}
return topology;
}
} // Anonymous namespace
void FixedPipelineState::Refresh(Tegra::Engines::Maxwell3D& maxwell3d, DynamicFeatures& features) {
@ -60,9 +92,9 @@ void FixedPipelineState::Refresh(Tegra::Engines::Maxwell3D& maxwell3d, DynamicFe
extended_dynamic_state.Assign(features.has_extended_dynamic_state ? 1 : 0);
extended_dynamic_state_2.Assign(features.has_extended_dynamic_state_2 ? 1 : 0);
extended_dynamic_state_2_logic_op.Assign(features.has_extended_dynamic_state_2_logic_op ? 1 : 0);
extended_dynamic_state_3_blend.Assign(features.has_extended_dynamic_state_3_blend ? 1 : 0);
extended_dynamic_state_3_enables.Assign(features.has_extended_dynamic_state_3_enables ? 1 : 0);
dynamic_vertex_input.Assign(features.has_dynamic_vertex_input ? 1 : 0);
reserved_dynamic_state_3_blend.Assign(0);
reserved_dynamic_state_3_enables.Assign(0);
reserved_bit_5.Assign(0);
xfb_enabled.Assign(regs.transform_feedback_enabled != 0);
ndc_minus_one_to_one.Assign(regs.depth_mode == Maxwell::DepthMode::MinusOneToOne ? 1 : 0);
polygon_mode.Assign(PackPolygonMode(regs.polygon_mode_front));
@ -71,7 +103,9 @@ void FixedPipelineState::Refresh(Tegra::Engines::Maxwell3D& maxwell3d, DynamicFe
tessellation_clockwise.Assign(regs.tessellation.params.output_primitives.Value() ==
Maxwell::Tessellation::OutputPrimitives::Triangles_CW);
patch_control_points_minus_one.Assign(regs.patch_vertices - 1);
topology.Assign(topology_);
const bool can_normalize_topology =
features.has_extended_dynamic_state && features.has_extended_dynamic_state_2;
topology.Assign(can_normalize_topology ? NormalizeDynamicTopologyClass(topology_) : topology_);
msaa_mode.Assign(regs.anti_alias_samples_mode);
raw2 = 0;
@ -103,26 +137,6 @@ void FixedPipelineState::Refresh(Tegra::Engines::Maxwell3D& maxwell3d, DynamicFe
point_size = std::bit_cast<u32>(regs.point_size);
if (maxwell3d.dirty.flags[Dirty::VertexInput]) {
if (features.has_dynamic_vertex_input) {
// Dirty flag will be reset by the command buffer update
static constexpr std::array LUT{
0u, // Invalid
1u, // SignedNorm
1u, // UnsignedNorm
2u, // SignedInt
3u, // UnsignedInt
1u, // UnsignedScaled
1u, // SignedScaled
1u, // Float
};
const auto& attrs = regs.vertex_attrib_format;
attribute_types = 0;
for (size_t i = 0; i < Maxwell::NumVertexAttributes; ++i) {
const u32 mask = attrs[i].constant != 0 ? 0 : 3;
const u32 type = LUT[static_cast<size_t>(attrs[i].type.Value())];
attribute_types |= static_cast<u64>(type & mask) << (i * 2);
}
} else {
maxwell3d.dirty.flags[Dirty::VertexInput] = false;
enabled_divisors = 0;
for (size_t index = 0; index < Maxwell::NumVertexArrays; ++index) {
@ -141,7 +155,6 @@ void FixedPipelineState::Refresh(Tegra::Engines::Maxwell3D& maxwell3d, DynamicFe
attribute.size.Assign(static_cast<u32>(input.size.Value()));
}
}
}
if (maxwell3d.dirty.flags[Dirty::ViewportSwizzles]) {
maxwell3d.dirty.flags[Dirty::ViewportSwizzles] = false;
const auto& transform = regs.viewport_transform;
@ -153,24 +166,20 @@ void FixedPipelineState::Refresh(Tegra::Engines::Maxwell3D& maxwell3d, DynamicFe
dynamic_state.raw2 = 0;
if (!extended_dynamic_state) {
dynamic_state.Refresh(regs);
}
std::ranges::transform(regs.vertex_streams, vertex_strides.begin(), [](const auto& array) {
return static_cast<u16>(array.stride.Value());
});
}
if (!extended_dynamic_state_2_logic_op) {
if (!extended_dynamic_state_2) {
dynamic_state.Refresh2(regs, topology_, extended_dynamic_state_2);
}
if (!extended_dynamic_state_3_blend) {
if (maxwell3d.dirty.flags[Dirty::Blending]) {
maxwell3d.dirty.flags[Dirty::Blending] = false;
for (size_t index = 0; index < attachments.size(); ++index) {
attachments[index].Refresh(regs, index);
}
}
}
if (!extended_dynamic_state_3_enables) {
dynamic_state.Refresh3(regs);
}
if (xfb_enabled) {
RefreshXfbState(xfb_state, regs);
}

View file

@ -1,4 +1,4 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
@ -25,9 +25,6 @@ struct DynamicFeatures {
bool has_extended_dynamic_state_2;
bool has_extended_dynamic_state_2_logic_op;
bool has_extended_dynamic_state_2_patch_control_points;
bool has_extended_dynamic_state_3_blend;
bool has_extended_dynamic_state_3_enables;
bool has_dynamic_vertex_input;
};
struct FixedPipelineState {
@ -191,9 +188,9 @@ struct FixedPipelineState {
BitField<0, 1, u32> extended_dynamic_state;
BitField<1, 1, u32> extended_dynamic_state_2;
BitField<2, 1, u32> extended_dynamic_state_2_logic_op;
BitField<3, 1, u32> extended_dynamic_state_3_blend;
BitField<4, 1, u32> extended_dynamic_state_3_enables;
BitField<5, 1, u32> dynamic_vertex_input;
BitField<3, 1, u32> reserved_dynamic_state_3_blend;
BitField<4, 1, u32> reserved_dynamic_state_3_enables;
BitField<5, 1, u32> reserved_bit_5;
BitField<6, 1, u32> xfb_enabled;
BitField<7, 1, u32> ndc_minus_one_to_one;
BitField<8, 2, u32> polygon_mode;
@ -225,10 +222,7 @@ struct FixedPipelineState {
u32 point_size;
std::array<u16, Maxwell::NumViewports> viewport_swizzles;
union {
u64 attribute_types; // Used with VK_EXT_vertex_input_dynamic_state
u64 enabled_divisors;
};
DynamicState dynamic_state;
std::array<BlendingAttachment, Maxwell::NumRenderTargets> attachments;
@ -260,14 +254,6 @@ struct FixedPipelineState {
// When transform feedback is enabled, use the whole struct
return sizeof(*this);
}
if (dynamic_vertex_input && extended_dynamic_state_3_blend) {
// Exclude dynamic state and attributes
return offsetof(FixedPipelineState, dynamic_state);
}
if (dynamic_vertex_input) {
// Exclude dynamic state
return offsetof(FixedPipelineState, attributes);
}
if (extended_dynamic_state) {
// Exclude dynamic state
return offsetof(FixedPipelineState, vertex_strides);
@ -275,10 +261,6 @@ struct FixedPipelineState {
// Default
return offsetof(FixedPipelineState, xfb_state);
}
u32 DynamicAttributeType(size_t index) const noexcept {
return (attribute_types >> (index * 2)) & 0b11;
}
};
static_assert(std::has_unique_object_representations_v<FixedPipelineState>);
static_assert(std::is_trivially_copyable_v<FixedPipelineState>);

View file

@ -1,4 +1,4 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project

View file

@ -15,6 +15,7 @@
#include "shader_recompiler/shader_info.h"
#include "video_core/renderer_vulkan/vk_texture_cache.h"
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
#include "video_core/surface.h"
#include "video_core/texture_cache/types.h"
#include "video_core/vulkan_common/vulkan_device.h"
@ -31,22 +32,57 @@ public:
num_descriptors <= device->MaxPushDescriptors();
}
// TODO(crueter): utilize layout binding flags
vk::DescriptorSetLayout CreateDescriptorSetLayout(bool use_push_descriptor) const {
if (bindings.empty()) {
return nullptr;
}
variable_descriptor_count = 0;
binding_flags.clear();
VkDescriptorSetLayoutBindingFlagsCreateInfo binding_flags_ci{
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO,
.pNext = nullptr,
.bindingCount = 0,
.pBindingFlags = nullptr,
};
const bool use_descriptor_indexing =
!use_push_descriptor && device->isExtDescriptorIndexingSupported();
const void* layout_next = nullptr;
if (use_descriptor_indexing) {
binding_flags.assign(bindings.size(), 0);
for (size_t i = 0; i < bindings.size(); ++i) {
if (bindings[i].descriptorCount > 1) {
binding_flags[i] |= VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT;
}
}
if (bindings.back().descriptorCount > 1) {
binding_flags.back() |= VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT;
variable_descriptor_count = bindings.back().descriptorCount;
}
binding_flags_ci.bindingCount = static_cast<u32>(binding_flags.size());
binding_flags_ci.pBindingFlags = binding_flags.data();
layout_next = &binding_flags_ci;
}
const VkDescriptorSetLayoutCreateFlags flags =
use_push_descriptor ? VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR : 0;
return device->GetLogical().CreateDescriptorSetLayout({
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
.pNext = nullptr,
.pNext = layout_next,
.flags = flags,
.bindingCount = static_cast<u32>(bindings.size()),
.pBindings = bindings.data(),
});
}
u32 VariableDescriptorCount() const noexcept {
return variable_descriptor_count;
}
vk::DescriptorUpdateTemplate CreateTemplate(VkDescriptorSetLayout descriptor_set_layout,
VkPipelineLayout pipeline_layout,
bool use_push_descriptor) const {
@ -134,8 +170,10 @@ private:
bool is_compute{};
boost::container::small_vector<VkDescriptorSetLayoutBinding, 32> bindings;
boost::container::small_vector<VkDescriptorUpdateTemplateEntry, 32> entries;
mutable boost::container::small_vector<VkDescriptorBindingFlags, 32> binding_flags;
u32 binding{};
u32 num_descriptors{};
mutable u32 variable_descriptor_count{};
size_t offset{};
};
@ -195,10 +233,16 @@ inline void PushImageDescriptors(TextureCache& texture_cache,
ImageView& image_view{texture_cache.GetImageView(image_view_id)};
const VkImageView vk_image_view{image_view.Handle(desc.type)};
const Sampler& sampler{texture_cache.GetSampler(sampler_id)};
const auto surface_type{VideoCore::Surface::GetFormatType(image_view.format)};
const bool allow_depth_compare =
desc.is_depth && (surface_type == VideoCore::Surface::SurfaceType::Depth ||
surface_type == VideoCore::Surface::SurfaceType::DepthStencil);
const bool use_fallback_sampler{sampler.HasAddedAnisotropy() &&
!image_view.SupportsAnisotropy()};
const VkSampler vk_sampler{use_fallback_sampler ? sampler.HandleWithDefaultAnisotropy()
: sampler.Handle()};
const VkSampler vk_sampler{use_fallback_sampler
? sampler.HandleWithDefaultAnisotropy(
allow_depth_compare)
: sampler.Handle(allow_depth_compare)};
guest_descriptor_queue.AddSampledImage(vk_image_view, vk_sampler);
rescaling.PushTexture(texture_cache.IsRescaling(image_view));
}

View file

@ -461,6 +461,9 @@ static vk::Pipeline CreateWrappedPipelineImpl(
constexpr std::array dynamic_states{
VK_DYNAMIC_STATE_VIEWPORT,
VK_DYNAMIC_STATE_SCISSOR,
VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK,
VK_DYNAMIC_STATE_STENCIL_WRITE_MASK,
VK_DYNAMIC_STATE_STENCIL_REFERENCE,
};
const VkPipelineDynamicStateCreateInfo dynamic_state_ci{

View file

@ -137,14 +137,8 @@ try
memory_allocator,
scheduler,
swapchain,
#ifdef ANDROID
surface)
,
#else
*surface)
,
#endif
blit_swapchain(device_memory,
, blit_swapchain(device_memory,
device,
memory_allocator,
present_manager,

View file

@ -10,6 +10,7 @@
#include <span>
#include <vector>
#include "video_core/buffer_cache/buffer_cache_base.h"
#include "video_core/renderer_vulkan/vk_buffer_cache.h"
#include "video_core/renderer_vulkan/maxwell_to_vk.h"
@ -449,8 +450,16 @@ void BufferCacheRuntime::CopyBuffer(VkBuffer dst_buffer, VkBuffer src_buffer,
}
cmdbuf.CopyBuffer(src_buffer, dst_buffer, VideoCommon::FixSmallVectorADL(vk_copies));
if (barrier) {
// Buffer reads can go to vertex input, shaders, or compute
const VkPipelineStageFlags dst_stages =
VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT |
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, WRITE_BARRIER);
dst_stages, 0, WRITE_BARRIER);
}
});
}
@ -478,7 +487,14 @@ void BufferCacheRuntime::PostCopyBarrier() {
};
scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([](vk::CommandBuffer cmdbuf) {
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
const VkPipelineStageFlags dst_stages =
VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT |
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, dst_stages,
0, WRITE_BARRIER);
});
}
@ -505,7 +521,15 @@ void BufferCacheRuntime::ClearBuffer(VkBuffer dest_buffer, u32 offset, size_t si
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
0, READ_BARRIER);
cmdbuf.FillBuffer(dest_buffer, offset, size, value);
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
// Buffer reads can go to vertex input, shaders, or compute
const VkPipelineStageFlags dst_stages_clear =
VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT |
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, dst_stages_clear,
0, WRITE_BARRIER);
});
}
@ -563,14 +587,6 @@ void BufferCacheRuntime::BindVertexBuffer(u32 index, VkBuffer buffer, u32 offset
if (index >= device.GetMaxVertexInputBindings()) {
return;
}
if (device.IsExtExtendedDynamicStateSupported()) {
scheduler.Record([index, buffer, offset, size, stride](vk::CommandBuffer cmdbuf) {
const VkDeviceSize vk_offset = buffer != VK_NULL_HANDLE ? offset : 0;
const VkDeviceSize vk_size = buffer != VK_NULL_HANDLE ? size : VK_WHOLE_SIZE;
const VkDeviceSize vk_stride = stride;
cmdbuf.BindVertexBuffers2EXT(index, 1, &buffer, &vk_offset, &vk_size, &vk_stride);
});
} else {
if (!device.HasNullDescriptor() && buffer == VK_NULL_HANDLE) {
ReserveNullBuffer();
buffer = *null_buffer;
@ -579,22 +595,21 @@ void BufferCacheRuntime::BindVertexBuffer(u32 index, VkBuffer buffer, u32 offset
scheduler.Record([index, buffer, offset](vk::CommandBuffer cmdbuf) {
cmdbuf.BindVertexBuffer(index, buffer, offset);
});
}
}
void BufferCacheRuntime::BindVertexBuffers(VideoCommon::HostBindings<Buffer>& bindings) {
boost::container::small_vector<VkBuffer, 32> buffer_handles;
for (u32 index = 0; index < bindings.buffers.size(); ++index) {
auto handle = bindings.buffers[index]->Handle();
boost::container::static_vector<VkBuffer, VideoCommon::NUM_VERTEX_BUFFERS> buffer_handles(bindings.buffers.size());
for (u32 i = 0; i < bindings.buffers.size(); ++i) {
auto handle = bindings.buffers[i]->Handle();
if (handle == VK_NULL_HANDLE) {
bindings.offsets[index] = 0;
bindings.sizes[index] = VK_WHOLE_SIZE;
bindings.offsets[i] = 0;
bindings.sizes[i] = VK_WHOLE_SIZE;
if (!device.HasNullDescriptor()) {
ReserveNullBuffer();
handle = *null_buffer;
}
}
buffer_handles.push_back(handle);
buffer_handles[i] = handle;
}
const u32 device_max = device.GetMaxVertexInputBindings();
const u32 min_binding = (std::min)(bindings.min_index, device_max);
@ -603,22 +618,12 @@ void BufferCacheRuntime::BindVertexBuffers(VideoCommon::HostBindings<Buffer>& bi
if (binding_count == 0) {
return;
}
if (device.IsExtExtendedDynamicStateSupported()) {
scheduler.Record([bindings_ = std::move(bindings),
buffer_handles_ = std::move(buffer_handles),
binding_count](vk::CommandBuffer cmdbuf) {
cmdbuf.BindVertexBuffers2EXT(bindings_.min_index, binding_count, buffer_handles_.data(),
bindings_.offsets.data(), bindings_.sizes.data(),
bindings_.strides.data());
});
} else {
scheduler.Record([bindings_ = std::move(bindings),
buffer_handles_ = std::move(buffer_handles),
binding_count](vk::CommandBuffer cmdbuf) {
cmdbuf.BindVertexBuffers(bindings_.min_index, binding_count, buffer_handles_.data(),
bindings_.offsets.data());
});
}
}
void BufferCacheRuntime::BindTransformFeedbackBuffer(u32 index, VkBuffer buffer, u32 offset,
@ -647,15 +652,21 @@ void BufferCacheRuntime::BindTransformFeedbackBuffers(VideoCommon::HostBindings<
// Already logged in the rasterizer
return;
}
boost::container::small_vector<VkBuffer, 4> buffer_handles;
for (u32 index = 0; index < bindings.buffers.size(); ++index) {
buffer_handles.push_back(bindings.buffers[index]->Handle());
boost::container::static_vector<VkBuffer, VideoCommon::NUM_VERTEX_BUFFERS> buffer_handles(bindings.buffers.size());
for (u32 i = 0; i < bindings.buffers.size(); ++i) {
auto handle = bindings.buffers[i]->Handle();
if (handle == VK_NULL_HANDLE) {
bindings.offsets[i] = 0;
bindings.sizes[i] = VK_WHOLE_SIZE;
if (!device.HasNullDescriptor()) {
ReserveNullBuffer();
handle = *null_buffer;
}
scheduler.Record([bindings_ = std::move(bindings),
buffer_handles_ = std::move(buffer_handles)](vk::CommandBuffer cmdbuf) {
cmdbuf.BindTransformFeedbackBuffersEXT(0, static_cast<u32>(buffer_handles_.size()),
buffer_handles_.data(), bindings_.offsets.data(),
bindings_.sizes.data());
}
buffer_handles[i] = handle;
}
scheduler.Record([bindings_ = std::move(bindings), buffer_handles_ = std::move(buffer_handles)](vk::CommandBuffer cmdbuf) {
cmdbuf.BindTransformFeedbackBuffersEXT(0, u32(buffer_handles_.size()), buffer_handles_.data(), bindings_.offsets.data(), bindings_.sizes.data());
});
}

View file

@ -1,4 +1,4 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
@ -127,6 +127,7 @@ public:
void BindVertexBuffers(VideoCommon::HostBindings<Buffer>& bindings);
void BindTransformFeedbackBuffer(u32 index, VkBuffer buffer, u32 offset, u32 size);
void BindTransformFeedbackBuffers(VideoCommon::HostBindings<Buffer>& bindings);

View file

@ -9,6 +9,7 @@
#include <numeric>
#include <optional>
#include <utility>
#include <vector>
#include "video_core/renderer_vulkan/vk_texture_cache.h"
@ -237,9 +238,37 @@ ComputePass::ComputePass(const Device& device_, DescriptorPool& descriptor_pool,
vk::Span<VkPushConstantRange> push_constants, std::span<const u32> code,
std::optional<u32> optional_subgroup_size)
: device{device_} {
u32 variable_descriptor_count{};
std::vector<VkDescriptorBindingFlags> binding_flags;
VkDescriptorSetLayoutBindingFlagsCreateInfo binding_flags_ci{
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO,
.pNext = nullptr,
.bindingCount = 0,
.pBindingFlags = nullptr,
};
const bool use_descriptor_indexing = device.isExtDescriptorIndexingSupported();
const void* layout_next = nullptr;
if (use_descriptor_indexing && !bindings.empty()) {
binding_flags.assign(bindings.size(), 0);
for (size_t i = 0; i < bindings.size(); ++i) {
if (bindings[i].descriptorCount > 1) {
binding_flags[i] |= VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT;
}
}
const size_t last_binding = bindings.size() - 1;
if (bindings[last_binding].descriptorCount > 1) {
binding_flags[last_binding] |= VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT;
variable_descriptor_count = bindings[last_binding].descriptorCount;
}
binding_flags_ci.bindingCount = static_cast<u32>(binding_flags.size());
binding_flags_ci.pBindingFlags = binding_flags.data();
layout_next = &binding_flags_ci;
}
descriptor_set_layout = device.GetLogical().CreateDescriptorSetLayout({
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
.pNext = nullptr,
.pNext = layout_next,
.flags = 0,
.bindingCount = bindings.size(),
.pBindings = bindings.data(),
@ -266,7 +295,8 @@ ComputePass::ComputePass(const Device& device_, DescriptorPool& descriptor_pool,
.pipelineLayout = *layout,
.set = 0,
});
descriptor_allocator = descriptor_pool.Allocator(*descriptor_set_layout, bank_info);
descriptor_allocator =
descriptor_pool.Allocator(*descriptor_set_layout, bank_info, variable_descriptor_count);
}
if (code.empty()) {
return;
@ -285,7 +315,7 @@ ComputePass::ComputePass(const Device& device_, DescriptorPool& descriptor_pool,
.requiredSubgroupSize = optional_subgroup_size ? *optional_subgroup_size : 32U,
};
bool use_setup_size = device.IsExtSubgroupSizeControlSupported() && optional_subgroup_size;
pipeline = device.GetLogical().CreateComputePipeline({
pipeline = device.GetLogical().CreateComputePipeline(VkComputePipelineCreateInfo{
.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
@ -299,7 +329,7 @@ ComputePass::ComputePass(const Device& device_, DescriptorPool& descriptor_pool,
.pSpecializationInfo = nullptr,
},
.layout = *layout,
.basePipelineHandle = nullptr,
.basePipelineHandle = {},
.basePipelineIndex = 0,
});
}
@ -944,7 +974,7 @@ MSAACopyPass::MSAACopyPass(const Device& device_, Scheduler& scheduler_,
.codeSize = static_cast<u32>(code.size_bytes()),
.pCode = code.data(),
});
pipelines[i] = device.GetLogical().CreateComputePipeline({
pipelines[i] = device.GetLogical().CreateComputePipeline(VkComputePipelineCreateInfo{
.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
@ -958,7 +988,7 @@ MSAACopyPass::MSAACopyPass(const Device& device_, Scheduler& scheduler_,
.pSpecializationInfo = nullptr,
},
.layout = *layout,
.basePipelineHandle = nullptr,
.basePipelineHandle = {},
.basePipelineIndex = 0,
});
};

View file

@ -67,8 +67,7 @@ ComputePipeline::ComputePipeline(const Device& device_, vk::PipelineCache& pipel
if (device.IsKhrPipelineExecutablePropertiesEnabled() && Settings::values.renderer_debug.GetValue()) {
flags |= VK_PIPELINE_CREATE_CAPTURE_STATISTICS_BIT_KHR;
}
pipeline = device.GetLogical().CreateComputePipeline(
{
pipeline = device.GetLogical().CreateComputePipeline(VkComputePipelineCreateInfo{
.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
.pNext = nullptr,
.flags = flags,
@ -85,8 +84,7 @@ ComputePipeline::ComputePipeline(const Device& device_, vk::PipelineCache& pipel
.layout = *pipeline_layout,
.basePipelineHandle = 0,
.basePipelineIndex = 0,
},
*pipeline_cache);
}, *pipeline_cache);
// Log compute pipeline creation
if (Settings::values.gpu_logging_enabled.GetValue()) {

View file

@ -1,4 +1,4 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
@ -88,9 +88,10 @@ static void AllocatePool(const Device& device, DescriptorBank& bank) {
}
DescriptorAllocator::DescriptorAllocator(const Device& device_, MasterSemaphore& master_semaphore_,
DescriptorBank& bank_, VkDescriptorSetLayout layout_)
DescriptorBank& bank_, VkDescriptorSetLayout layout_,
u32 variable_descriptor_count_)
: ResourcePool(master_semaphore_, SETS_GROW_RATE), device{&device_}, bank{&bank_},
layout{layout_} {}
layout{layout_}, variable_descriptor_count{variable_descriptor_count_} {}
VkDescriptorSet DescriptorAllocator::Commit() {
const size_t index = CommitResource();
@ -103,9 +104,25 @@ void DescriptorAllocator::Allocate(size_t begin, size_t end) {
vk::DescriptorSets DescriptorAllocator::AllocateDescriptors(size_t count) {
const std::vector<VkDescriptorSetLayout> layouts(count, layout);
std::vector<u32> variable_descriptor_counts;
VkDescriptorSetVariableDescriptorCountAllocateInfo variable_descriptor_count_info{
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO,
.pNext = nullptr,
.descriptorSetCount = 0,
.pDescriptorCounts = nullptr,
};
const void* allocate_next = nullptr;
if (variable_descriptor_count != 0) {
variable_descriptor_counts.assign(count, variable_descriptor_count);
variable_descriptor_count_info.descriptorSetCount = static_cast<u32>(count);
variable_descriptor_count_info.pDescriptorCounts = variable_descriptor_counts.data();
allocate_next = &variable_descriptor_count_info;
}
VkDescriptorSetAllocateInfo allocate_info{
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
.pNext = nullptr,
.pNext = allocate_next,
.descriptorPool = *bank->pools.back(),
.descriptorSetCount = static_cast<u32>(count),
.pSetLayouts = layouts.data(),
@ -131,18 +148,22 @@ DescriptorPool::DescriptorPool(const Device& device_, Scheduler& scheduler)
DescriptorPool::~DescriptorPool() = default;
DescriptorAllocator DescriptorPool::Allocator(VkDescriptorSetLayout layout,
std::span<const Shader::Info> infos) {
return Allocator(layout, MakeBankInfo(infos));
std::span<const Shader::Info> infos,
u32 variable_descriptor_count) {
return Allocator(layout, MakeBankInfo(infos), variable_descriptor_count);
}
DescriptorAllocator DescriptorPool::Allocator(VkDescriptorSetLayout layout,
const Shader::Info& info) {
return Allocator(layout, MakeBankInfo(std::array{info}));
const Shader::Info& info,
u32 variable_descriptor_count) {
return Allocator(layout, MakeBankInfo(std::array{info}), variable_descriptor_count);
}
DescriptorAllocator DescriptorPool::Allocator(VkDescriptorSetLayout layout,
const DescriptorBankInfo& info) {
return DescriptorAllocator(device, master_semaphore, Bank(info), layout);
const DescriptorBankInfo& info,
u32 variable_descriptor_count) {
return DescriptorAllocator(device, master_semaphore, Bank(info), layout,
variable_descriptor_count);
}
DescriptorBank& DescriptorPool::Bank(const DescriptorBankInfo& reqs) {

View file

@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
@ -47,7 +50,8 @@ public:
private:
explicit DescriptorAllocator(const Device& device_, MasterSemaphore& master_semaphore_,
DescriptorBank& bank_, VkDescriptorSetLayout layout_);
DescriptorBank& bank_, VkDescriptorSetLayout layout_,
u32 variable_descriptor_count_);
void Allocate(size_t begin, size_t end) override;
@ -56,6 +60,7 @@ private:
const Device* device{};
DescriptorBank* bank{};
VkDescriptorSetLayout layout{};
u32 variable_descriptor_count{};
std::vector<vk::DescriptorSets> sets;
};
@ -69,9 +74,12 @@ public:
DescriptorPool(const DescriptorPool&) = delete;
DescriptorAllocator Allocator(VkDescriptorSetLayout layout,
std::span<const Shader::Info> infos);
DescriptorAllocator Allocator(VkDescriptorSetLayout layout, const Shader::Info& info);
DescriptorAllocator Allocator(VkDescriptorSetLayout layout, const DescriptorBankInfo& info);
std::span<const Shader::Info> infos,
u32 variable_descriptor_count = 0);
DescriptorAllocator Allocator(VkDescriptorSetLayout layout, const Shader::Info& info,
u32 variable_descriptor_count = 0);
DescriptorAllocator Allocator(VkDescriptorSetLayout layout, const DescriptorBankInfo& info,
u32 variable_descriptor_count = 0);
private:
DescriptorBank& Bank(const DescriptorBankInfo& reqs);

View file

@ -23,6 +23,7 @@
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_texture_cache.h"
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
#include "video_core/surface.h"
#include "video_core/shader_notify.h"
#include "video_core/texture_cache/texture_cache.h"
#include "video_core/vulkan_common/vulkan_device.h"
@ -273,7 +274,8 @@ GraphicsPipeline::GraphicsPipeline(
descriptor_set_layout = builder.CreateDescriptorSetLayout(uses_push_descriptor);
if (!uses_push_descriptor) {
descriptor_allocator = descriptor_pool.Allocator(*descriptor_set_layout, stage_infos);
descriptor_allocator = descriptor_pool.Allocator(
*descriptor_set_layout, stage_infos, builder.VariableDescriptorCount());
}
const VkDescriptorSetLayout set_layout{*descriptor_set_layout};
@ -529,6 +531,12 @@ void GraphicsPipeline::ConfigureDraw(const RescalingPushConstant& rescaling,
}
const void* const descriptor_data{guest_descriptor_queue.UpdateData()};
FixedPipelineState::DynamicState dynamic_state{};
if (!key.state.extended_dynamic_state) {
dynamic_state = key.state.dynamic_state;
} else {
dynamic_state.raw1 = key.state.dynamic_state.raw1;
}
scheduler.Record([this, descriptor_data, bind_pipeline, rescaling_data = rescaling.Data(),
is_rescaling, update_rescaling,
uses_render_area = render_area.uses_render_area,
@ -577,13 +585,11 @@ void GraphicsPipeline::MakePipeline(VkRenderPass render_pass) {
static_vector<VkVertexInputBindingDescription, 32> vertex_bindings;
static_vector<VkVertexInputBindingDivisorDescriptionEXT, 32> vertex_binding_divisors;
static_vector<VkVertexInputAttributeDescription, 32> vertex_attributes;
if (!key.state.dynamic_vertex_input) {
const size_t num_vertex_arrays = (std::min)(
Maxwell::NumVertexArrays, static_cast<size_t>(device.GetMaxVertexInputBindings()));
const size_t num_vertex_arrays =
(std::min)(Maxwell::NumVertexArrays, static_cast<size_t>(device.GetMaxVertexInputBindings()));
for (size_t index = 0; index < num_vertex_arrays; ++index) {
const bool instanced = key.state.binding_divisors[index] != 0;
const auto rate =
instanced ? VK_VERTEX_INPUT_RATE_INSTANCE : VK_VERTEX_INPUT_RATE_VERTEX;
const auto rate = instanced ? VK_VERTEX_INPUT_RATE_INSTANCE : VK_VERTEX_INPUT_RATE_VERTEX;
vertex_bindings.push_back({
.binding = static_cast<u32>(index),
.stride = key.state.vertex_strides[index],
@ -608,7 +614,6 @@ void GraphicsPipeline::MakePipeline(VkRenderPass render_pass) {
.offset = attribute.offset,
});
}
}
ASSERT(vertex_attributes.size() <= device.GetMaxVertexInputAttributes());
VkPipelineVertexInputStateCreateInfo vertex_input_ci{
@ -687,9 +692,9 @@ void GraphicsPipeline::MakePipeline(VkRenderPass render_pass) {
.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.viewportCount = num_viewports,
.viewportCount = key.state.extended_dynamic_state ? 0u : num_viewports,
.pViewports = nullptr,
.scissorCount = num_viewports,
.scissorCount = key.state.extended_dynamic_state ? 0u : num_viewports,
.pScissors = nullptr,
};
if (device.IsNvViewportSwizzleSupported()) {
@ -716,20 +721,62 @@ void GraphicsPipeline::MakePipeline(VkRenderPass render_pass) {
.depthBiasClamp = 0.0f,
.depthBiasSlopeFactor = 0.0f,
.lineWidth = 1.0f,
// TODO(alekpop): Transfer from regs
};
const bool smooth_lines_supported =
device.IsExtLineRasterizationSupported() && device.SupportsSmoothLines();
const bool stippled_lines_supported =
device.IsExtLineRasterizationSupported() && device.SupportsStippledRectangularLines();
const bool line_rasterization_supported = device.IsExtLineRasterizationSupported();
const bool any_stippled_lines_supported =
line_rasterization_supported &&
(device.SupportsStippledRectangularLines() || device.SupportsStippledBresenhamLines() ||
device.SupportsStippledSmoothLines());
const bool line_stipple_dynamic_state_supported =
IsLine(input_assembly_topology) && any_stippled_lines_supported;
const bool supports_rectangular_lines =
line_rasterization_supported && device.SupportsRectangularLines();
const bool supports_bresenham_lines =
line_rasterization_supported && device.SupportsBresenhamLines();
const bool supports_smooth_lines = line_rasterization_supported && device.SupportsSmoothLines();
VkLineRasterizationModeEXT line_rasterization_mode = VK_LINE_RASTERIZATION_MODE_DEFAULT_EXT;
if (line_rasterization_supported) {
if (key.state.smooth_lines != 0) {
if (supports_smooth_lines) {
line_rasterization_mode = VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT;
} else if (supports_rectangular_lines) {
line_rasterization_mode = VK_LINE_RASTERIZATION_MODE_RECTANGULAR_EXT;
} else if (supports_bresenham_lines) {
line_rasterization_mode = VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT;
}
} else {
if (supports_rectangular_lines) {
line_rasterization_mode = VK_LINE_RASTERIZATION_MODE_RECTANGULAR_EXT;
} else if (supports_bresenham_lines) {
line_rasterization_mode = VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT;
} else if (supports_smooth_lines) {
line_rasterization_mode = VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT;
}
}
}
const bool stippled_lines_supported = [&]() {
if (!line_rasterization_supported || !dynamic.line_stipple_enable) {
return false;
}
switch (line_rasterization_mode) {
case VK_LINE_RASTERIZATION_MODE_RECTANGULAR_EXT:
return device.SupportsStippledRectangularLines();
case VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT:
return device.SupportsStippledBresenhamLines();
case VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT:
return device.SupportsStippledSmoothLines();
default:
return false;
}
}();
VkPipelineRasterizationLineStateCreateInfoEXT line_state{
.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT,
.pNext = nullptr,
.lineRasterizationMode = key.state.smooth_lines != 0 && smooth_lines_supported
? VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT
: VK_LINE_RASTERIZATION_MODE_RECTANGULAR_EXT,
.stippledLineEnable =
(dynamic.line_stipple_enable && stippled_lines_supported) ? VK_TRUE : VK_FALSE,
.lineRasterizationMode = line_rasterization_mode,
.stippledLineEnable = stippled_lines_supported ? VK_TRUE : VK_FALSE,
.lineStippleFactor = key.state.line_stipple_factor,
.lineStipplePattern = static_cast<uint16_t>(key.state.line_stipple_pattern),
};
@ -737,15 +784,31 @@ void GraphicsPipeline::MakePipeline(VkRenderPass render_pass) {
.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT,
.pNext = nullptr,
.flags = 0,
.conservativeRasterizationMode = key.state.conservative_raster_enable != 0
? VK_CONSERVATIVE_RASTERIZATION_MODE_OVERESTIMATE_EXT
: VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT,
.conservativeRasterizationMode = VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT,
.extraPrimitiveOverestimationSize = 0.0f,
};
const bool conservative_requested = key.state.conservative_raster_enable != 0;
if (conservative_requested) {
const bool is_point_topology = input_assembly_topology == VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
const bool is_line_topology = IsLine(input_assembly_topology);
const bool needs_point_or_line_support = is_point_topology || is_line_topology;
const bool supports_requested_topology =
!needs_point_or_line_support || device.SupportsConservativePointAndLineRasterization();
conservative_raster.conservativeRasterizationMode =
supports_requested_topology ? VK_CONSERVATIVE_RASTERIZATION_MODE_OVERESTIMATE_EXT
: VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT;
}
const bool preserve_provoking_vertex_for_xfb =
!key.state.xfb_enabled || device.IsTransformFeedbackProvokingVertexPreserved();
const bool use_last_provoking_vertex =
key.state.provoking_vertex_last != 0 && preserve_provoking_vertex_for_xfb &&
device.IsProvokingVertexLastSupported();
VkPipelineRasterizationProvokingVertexStateCreateInfoEXT provoking_vertex{
.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_PROVOKING_VERTEX_STATE_CREATE_INFO_EXT,
.pNext = nullptr,
.provokingVertexMode = key.state.provoking_vertex_last != 0
.provokingVertexMode = use_last_provoking_vertex
? VK_PROVOKING_VERTEX_MODE_LAST_VERTEX_EXT
: VK_PROVOKING_VERTEX_MODE_FIRST_VERTEX_EXT,
};
@ -756,7 +819,7 @@ void GraphicsPipeline::MakePipeline(VkRenderPass render_pass) {
if (device.IsExtConservativeRasterizationSupported()) {
conservative_raster.pNext = std::exchange(rasterization_ci.pNext, &conservative_raster);
}
if (device.IsExtProvokingVertexSupported() && Settings::values.provoking_vertex.GetValue()) {
if (device.IsExtProvokingVertexSupported()) {
provoking_vertex.pNext = std::exchange(rasterization_ci.pNext, &provoking_vertex);
}
@ -804,13 +867,15 @@ void GraphicsPipeline::MakePipeline(VkRenderPass render_pass) {
VK_COLOR_COMPONENT_A_BIT,
};
const auto& blend{key.state.attachments[index]};
const PixelFormat color_format{DecodeFormat(key.state.color_formats[index])};
const bool supports_blending = !VideoCore::Surface::IsPixelFormatInteger(color_format);
const std::array mask{blend.Mask()};
VkColorComponentFlags write_mask{};
for (size_t i = 0; i < mask_table.size(); ++i) {
write_mask |= mask[i] ? mask_table[i] : 0;
}
cb_attachments.push_back({
.blendEnable = blend.enable != 0,
.blendEnable = supports_blending && blend.enable != 0,
.srcColorBlendFactor = MaxwellToVK::BlendFactor(blend.SourceRGBFactor()),
.dstColorBlendFactor = MaxwellToVK::BlendFactor(blend.DestRGBFactor()),
.colorBlendOp = MaxwellToVK::BlendEquation(blend.EquationRGB()),
@ -830,17 +895,22 @@ void GraphicsPipeline::MakePipeline(VkRenderPass render_pass) {
.pAttachments = cb_attachments.data(),
.blendConstants = {}
};
static_vector<VkDynamicState, 34> dynamic_states{
VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR,
static_vector<VkDynamicState, 35> dynamic_states{
VK_DYNAMIC_STATE_DEPTH_BIAS, VK_DYNAMIC_STATE_BLEND_CONSTANTS,
VK_DYNAMIC_STATE_DEPTH_BOUNDS, VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK,
VK_DYNAMIC_STATE_STENCIL_WRITE_MASK, VK_DYNAMIC_STATE_STENCIL_REFERENCE,
VK_DYNAMIC_STATE_LINE_WIDTH,
};
if (line_stipple_dynamic_state_supported) {
dynamic_states.push_back(VK_DYNAMIC_STATE_LINE_STIPPLE_EXT);
}
if (key.state.extended_dynamic_state) {
static constexpr std::array extended{
VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT,
VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT,
VK_DYNAMIC_STATE_CULL_MODE_EXT,
VK_DYNAMIC_STATE_FRONT_FACE_EXT,
VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT,
VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT,
VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT,
VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT,
@ -849,18 +919,9 @@ void GraphicsPipeline::MakePipeline(VkRenderPass render_pass) {
VK_DYNAMIC_STATE_STENCIL_OP_EXT,
};
dynamic_states.insert(dynamic_states.end(), extended.begin(), extended.end());
// VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT is part of EDS1
// Only use it if VIDS is not active (VIDS replaces it with full vertex input control)
if (!key.state.dynamic_vertex_input) {
dynamic_states.push_back(VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT);
}
}
// VK_DYNAMIC_STATE_VERTEX_INPUT_EXT (VIDS) - Independent from EDS
// Provides full dynamic vertex input control, replaces VERTEX_INPUT_BINDING_STRIDE
if (key.state.dynamic_vertex_input) {
dynamic_states.push_back(VK_DYNAMIC_STATE_VERTEX_INPUT_EXT);
} else {
dynamic_states.push_back(VK_DYNAMIC_STATE_VIEWPORT);
dynamic_states.push_back(VK_DYNAMIC_STATE_SCISSOR);
}
// EDS2 - Core (3 states)
@ -878,41 +939,6 @@ void GraphicsPipeline::MakePipeline(VkRenderPass render_pass) {
dynamic_states.push_back(VK_DYNAMIC_STATE_LOGIC_OP_EXT);
}
// EDS3 - Blending (composite: 3 states)
if (key.state.extended_dynamic_state_3_blend) {
static constexpr std::array extended3{
VK_DYNAMIC_STATE_COLOR_BLEND_ENABLE_EXT,
VK_DYNAMIC_STATE_COLOR_BLEND_EQUATION_EXT,
VK_DYNAMIC_STATE_COLOR_WRITE_MASK_EXT,
};
dynamic_states.insert(dynamic_states.end(), extended3.begin(), extended3.end());
}
// EDS3 - Enables (composite: per-feature)
if (key.state.extended_dynamic_state_3_enables) {
if (device.SupportsDynamicState3DepthClampEnable()) {
dynamic_states.push_back(VK_DYNAMIC_STATE_DEPTH_CLAMP_ENABLE_EXT);
}
if (device.SupportsDynamicState3LogicOpEnable()) {
dynamic_states.push_back(VK_DYNAMIC_STATE_LOGIC_OP_ENABLE_EXT);
}
if (device.SupportsDynamicState3LineRasterizationMode()) {
dynamic_states.push_back(VK_DYNAMIC_STATE_LINE_RASTERIZATION_MODE_EXT);
}
if (device.SupportsDynamicState3ConservativeRasterizationMode()) {
dynamic_states.push_back(VK_DYNAMIC_STATE_CONSERVATIVE_RASTERIZATION_MODE_EXT);
}
if (device.SupportsDynamicState3LineStippleEnable()) {
dynamic_states.push_back(VK_DYNAMIC_STATE_LINE_STIPPLE_ENABLE_EXT);
}
if (device.SupportsDynamicState3AlphaToCoverageEnable()) {
dynamic_states.push_back(VK_DYNAMIC_STATE_ALPHA_TO_COVERAGE_ENABLE_EXT);
}
if (device.SupportsDynamicState3AlphaToOneEnable()) {
dynamic_states.push_back(VK_DYNAMIC_STATE_ALPHA_TO_ONE_ENABLE_EXT);
}
}
const VkPipelineDynamicStateCreateInfo dynamic_state_ci{
.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
.pNext = nullptr,
@ -946,8 +972,7 @@ void GraphicsPipeline::MakePipeline(VkRenderPass render_pass) {
flags |= VK_PIPELINE_CREATE_CAPTURE_STATISTICS_BIT_KHR;
}
pipeline = device.GetLogical().CreateGraphicsPipeline(
{
pipeline = device.GetLogical().CreateGraphicsPipeline({
.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
.pNext = nullptr,
.flags = flags,
@ -967,8 +992,7 @@ void GraphicsPipeline::MakePipeline(VkRenderPass render_pass) {
.subpass = 0,
.basePipelineHandle = nullptr,
.basePipelineIndex = 0,
},
*pipeline_cache);
}, *pipeline_cache);
// Log graphics pipeline creation
if (Settings::values.gpu_logging_enabled.GetValue()) {

View file

@ -1,4 +1,4 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
@ -81,7 +81,6 @@ public:
const GraphicsPipelineCacheKey& key, std::array<vk::ShaderModule, NUM_STAGES> stages,
const std::array<const Shader::Info*, NUM_STAGES>& infos);
bool HasDynamicVertexInput() const noexcept { return key.state.dynamic_vertex_input; }
bool SupportsAlphaToCoverage() const noexcept {
return fragment_has_color0_output;
}
@ -93,6 +92,12 @@ public:
bool UsesExtendedDynamicState() const noexcept {
return key.state.extended_dynamic_state != 0;
}
bool UsesExtendedDynamicState2() const noexcept {
return key.state.extended_dynamic_state_2 != 0;
}
bool UsesExtendedDynamicState2LogicOp() const noexcept {
return key.state.extended_dynamic_state_2_logic_op != 0;
}
GraphicsPipeline& operator=(GraphicsPipeline&&) noexcept = delete;
GraphicsPipeline(GraphicsPipeline&&) noexcept = delete;

View file

@ -58,7 +58,7 @@ using VideoCommon::FileEnvironment;
using VideoCommon::GenericEnvironment;
using VideoCommon::GraphicsEnvironment;
constexpr u32 CACHE_VERSION = 16;
constexpr u32 CACHE_VERSION = 17;
constexpr std::array<char, 8> VULKAN_CACHE_MAGIC_NUMBER{'y', 'u', 'z', 'u', 'v', 'k', 'c', 'h'};
template <typename Container>
@ -132,20 +132,6 @@ Shader::AttributeType CastAttributeType(const FixedPipelineState::VertexAttribut
return Shader::AttributeType::Float;
}
Shader::AttributeType AttributeType(const FixedPipelineState& state, size_t index) {
switch (state.DynamicAttributeType(index)) {
case 0:
return Shader::AttributeType::Disabled;
case 1:
return Shader::AttributeType::Float;
case 2:
return Shader::AttributeType::SignedInt;
case 3:
return Shader::AttributeType::UnsignedInt;
}
return Shader::AttributeType::Disabled;
}
Shader::RuntimeInfo MakeRuntimeInfo(std::span<const Shader::IR::Program> programs,
const GraphicsPipelineCacheKey& key,
const Shader::IR::Program& program,
@ -183,14 +169,8 @@ Shader::RuntimeInfo MakeRuntimeInfo(std::span<const Shader::IR::Program> program
}
info.convert_depth_mode = gl_ndc;
}
if (key.state.dynamic_vertex_input) {
for (size_t index = 0; index < Maxwell::NumVertexAttributes; ++index) {
info.generic_input_types[index] = AttributeType(key.state, index);
}
} else {
std::ranges::transform(key.state.attributes, info.generic_input_types.begin(),
&CastAttributeType);
}
break;
case Shader::Stage::TessellationEval:
info.tess_clockwise = key.state.tessellation_clockwise != 0;
@ -269,7 +249,17 @@ Shader::RuntimeInfo MakeRuntimeInfo(std::span<const Shader::IR::Program> program
dst_a == F::Source1Alpha_GL || dst_a == F::OneMinusSource1Alpha_GL;
}
if (device.IsMoltenVK()) {
for (size_t i = 0; i < info.active_color_outputs.size(); ++i) {
const auto format = static_cast<Tegra::RenderTargetFormat>(key.state.color_formats[i]);
info.active_color_outputs[i] = format != Tegra::RenderTargetFormat::NONE;
}
if (info.dual_source_blend && info.active_color_outputs[0]) {
info.active_color_outputs[1] = true;
}
if (info.alpha_test_func && *info.alpha_test_func != Shader::CompareFunction::Always) {
info.active_color_outputs[0] = true;
}
for (size_t i = 0; i < 8; ++i) {
const auto format = static_cast<Tegra::RenderTargetFormat>(key.state.color_formats[i]);
const auto pixel_format = VideoCore::Surface::PixelFormatFromRenderTargetFormat(format);
@ -283,7 +273,6 @@ Shader::RuntimeInfo MakeRuntimeInfo(std::span<const Shader::IR::Program> program
info.color_output_types[i] = Shader::AttributeType::Float;
}
}
}
break;
}
default:
@ -388,6 +377,7 @@ PipelineCache::PipelineCache(Tegra::MaxwellDeviceMemoryManager& device_memory_,
float_control.denormBehaviorIndependence == VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL,
.support_separate_rounding_mode =
float_control.roundingModeIndependence == VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL,
.support_fp32_rounding_rtz = float_control.shaderRoundingModeRTZFloat32 != VK_FALSE,
.support_fp16_denorm_preserve = float_control.shaderDenormPreserveFloat16 != VK_FALSE,
.support_fp32_denorm_preserve = float_control.shaderDenormPreserveFloat32 != VK_FALSE,
.support_fp16_denorm_flush = float_control.shaderDenormFlushToZeroFloat16 != VK_FALSE,
@ -469,7 +459,7 @@ PipelineCache::PipelineCache(Tegra::MaxwellDeviceMemoryManager& device_memory_,
// Level 0: Core Dynamic States only
// Level 1: Core + EDS1
// Level 2: Core + EDS1 + EDS2 (accumulative)
// Level 3: Core + EDS1 + EDS2 + EDS3 (accumulative)
// Level 2: Core + EDS1 + EDS2 (accumulative)
// Here we only verify if extensions were successfully loaded by the device
dynamic_features.has_extended_dynamic_state =
@ -480,16 +470,6 @@ PipelineCache::PipelineCache(Tegra::MaxwellDeviceMemoryManager& device_memory_,
dynamic_features.has_extended_dynamic_state_2_logic_op =
device.IsExtExtendedDynamicState2ExtrasSupported();
dynamic_features.has_extended_dynamic_state_2_patch_control_points = false;
dynamic_features.has_extended_dynamic_state_3_blend =
device.IsExtExtendedDynamicState3BlendingSupported();
dynamic_features.has_extended_dynamic_state_3_enables =
device.IsExtExtendedDynamicState3EnablesSupported();
// VIDS: Independent toggle (not affected by dyna_state levels)
dynamic_features.has_dynamic_vertex_input =
device.IsExtVertexInputDynamicStateSupported() &&
Settings::values.vertex_input_dynamic_state.GetValue();
}
PipelineCache::~PipelineCache() {
@ -595,12 +575,7 @@ void PipelineCache::LoadDiskResources(u64 title_id, std::stop_token stop_loading
(key.state.extended_dynamic_state_2 != 0) !=
dynamic_features.has_extended_dynamic_state_2 ||
(key.state.extended_dynamic_state_2_logic_op != 0) !=
dynamic_features.has_extended_dynamic_state_2_logic_op ||
(key.state.extended_dynamic_state_3_blend != 0) !=
dynamic_features.has_extended_dynamic_state_3_blend ||
(key.state.extended_dynamic_state_3_enables != 0) !=
dynamic_features.has_extended_dynamic_state_3_enables ||
(key.state.dynamic_vertex_input != 0) != dynamic_features.has_dynamic_vertex_input) {
dynamic_features.has_extended_dynamic_state_2_logic_op) {
return;
}
workers.QueueWork([this, key, envs_ = std::move(envs), &state, &callback]() mutable {

View file

@ -1,4 +1,4 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
@ -101,22 +101,14 @@ PresentManager::PresentManager(const vk::Instance& instance_,
MemoryAllocator& memory_allocator_,
Scheduler& scheduler_,
Swapchain& swapchain_,
#ifdef ANDROID
vk::SurfaceKHR& surface_)
#else
VkSurfaceKHR_T* surface_handle_)
#endif
: instance{instance_}
, render_window{render_window_}
, device{device_}
, memory_allocator{memory_allocator_}
, scheduler{scheduler_}
, swapchain{swapchain_}
#ifdef ANDROID
, surface{surface_}
#else
, surface_handle{surface_handle_}
#endif
, blit_supported{CanBlitToSwapchain(device.GetPhysical(), swapchain.GetImageViewFormat())}
, use_present_thread{Settings::values.async_presentation.GetValue()}
{
@ -299,11 +291,7 @@ void PresentManager::PresentThread(std::stop_token token) {
}
void PresentManager::RecreateSwapchain(Frame* frame) {
#ifndef ANDROID
swapchain.Create(surface_handle, frame->width, frame->height); // Pass raw pointer
#else
swapchain.Create(*surface, frame->width, frame->height); // Pass raw pointer
#endif
SetImageCount();
}

View file

@ -1,4 +1,4 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
@ -15,8 +15,6 @@
#include "video_core/vulkan_common/vulkan_memory_allocator.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
struct VkSurfaceKHR_T;
namespace Core::Frontend {
class EmuWindow;
} // namespace Core::Frontend
@ -46,11 +44,7 @@ public:
MemoryAllocator& memory_allocator,
Scheduler& scheduler,
Swapchain& swapchain,
#ifdef ANDROID
vk::SurfaceKHR& surface);
#else
VkSurfaceKHR_T* surface_handle);
#endif
~PresentManager();
/// Returns the last used presentation frame
@ -84,11 +78,7 @@ private:
MemoryAllocator& memory_allocator;
Scheduler& scheduler;
Swapchain& swapchain;
#ifdef ANDROID
vk::SurfaceKHR& surface;
#else
VkSurfaceKHR_T* surface_handle;
#endif
vk::CommandPool cmdpool;
std::vector<Frame> frames;
boost::container::deque<Frame*> present_queue;

View file

@ -60,8 +60,6 @@ public:
void Reset() override {
ASSERT(references == 0);
VideoCommon::BankBase::Reset();
const auto& dev = device.GetLogical();
dev.ResetQueryPool(*query_pool, 0, BANK_SIZE);
host_results.fill(0ULL);
next_bank = 0;
}
@ -145,6 +143,7 @@ public:
scheduler.Record([buffer = *accumulation_buffer](vk::CommandBuffer cmdbuf) {
cmdbuf.FillBuffer(buffer, 0, 8, 0);
});
ReserveBank();
}
~SamplesStreamer() = default;
@ -441,6 +440,10 @@ private:
}
current_bank = &bank_pool.GetBank(current_bank_id);
current_query_pool = current_bank->GetInnerPool();
scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([query_pool = current_query_pool](vk::CommandBuffer cmdbuf) {
cmdbuf.ResetQueryPool(query_pool, 0, SamplesQueryBank::BANK_SIZE);
});
}
size_t ReserveBankSlot() {
@ -1205,13 +1208,18 @@ struct QueryCacheRuntimeImpl {
conditional_resolve_pass = std::make_unique<ConditionalRenderingResolvePass>(
device, scheduler, descriptor_pool, compute_pass_descriptor_queue);
VkBufferUsageFlags conditional_usage =
VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
if (device.IsExtConditionalRendering()) {
conditional_usage |= VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT;
}
const VkBufferCreateInfo buffer_ci = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.size = sizeof(u32),
.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT,
.usage = conditional_usage,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
@ -1280,7 +1288,7 @@ void QueryCacheRuntime::EndHostConditionalRendering() {
PauseHostConditionalRendering();
impl->hcr_is_set = false;
impl->is_hcr_running = false;
impl->hcr_buffer = nullptr;
impl->hcr_buffer = VkBuffer{};
impl->hcr_offset = 0;
}

View file

@ -1,3 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
@ -35,7 +38,7 @@ public:
~QueryCacheRuntime();
template <typename SyncValuesType>
void SyncValues(std::span<SyncValuesType> values, VkBuffer base_src_buffer = nullptr);
void SyncValues(std::span<SyncValuesType> values, VkBuffer base_src_buffer = VkBuffer{});
void Barriers(bool is_prebarrier);

View file

@ -173,6 +173,55 @@ DrawParams MakeDrawParams(const MaxwellDrawState& draw_state, u32 num_instances,
}
return params;
}
bool IsLineRasterizationTopology(const Device& device, Maxwell::PrimitiveTopology topology) {
const VkPrimitiveTopology vk_topology = MaxwellToVK::PrimitiveTopology(device, topology);
return vk_topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST ||
vk_topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP;
}
VkLineRasterizationModeEXT SelectLineRasterizationMode(const Device& device, bool smooth_lines) {
const bool supports_rectangular_lines = device.SupportsRectangularLines();
const bool supports_bresenham_lines = device.SupportsBresenhamLines();
const bool supports_smooth_lines = device.SupportsSmoothLines();
if (smooth_lines) {
if (supports_smooth_lines) {
return VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT;
}
if (supports_rectangular_lines) {
return VK_LINE_RASTERIZATION_MODE_RECTANGULAR_EXT;
}
if (supports_bresenham_lines) {
return VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT;
}
} else {
if (supports_rectangular_lines) {
return VK_LINE_RASTERIZATION_MODE_RECTANGULAR_EXT;
}
if (supports_bresenham_lines) {
return VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT;
}
if (supports_smooth_lines) {
return VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT;
}
}
return VK_LINE_RASTERIZATION_MODE_DEFAULT_EXT;
}
bool SupportsStippleForMode(const Device& device, VkLineRasterizationModeEXT mode) {
switch (mode) {
case VK_LINE_RASTERIZATION_MODE_RECTANGULAR_EXT:
return device.SupportsStippledRectangularLines();
case VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT:
return device.SupportsStippledBresenhamLines();
case VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT:
return device.SupportsStippledSmoothLines();
default:
return false;
}
}
} // Anonymous namespace
RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_,
@ -228,12 +277,16 @@ void RasterizerVulkan::PrepareDraw(bool is_indexed, Func&& draw_func) {
if (!pipeline->Configure(is_indexed))
return;
UpdateDynamicStates();
if (pipeline->UsesExtendedDynamicState()) {
state_tracker.InvalidateStateEnableFlag();
}
HandleTransformFeedback();
query_cache.CounterEnable(VideoCommon::QueryType::ZPassPixelCount64,
maxwell3d->regs.zpass_pixel_count_enable);
UpdateDynamicStates();
scheduler.RequestRenderpass(texture_cache.GetFramebuffer());
draw_func();
}
@ -305,11 +358,18 @@ void RasterizerVulkan::DrawIndirect() {
const auto& buffer = indirect_buffer.first;
const auto& offset = indirect_buffer.second;
if (params.is_byte_count) {
if (!device.IsExtTransformFeedbackSupported()) {
scheduler.Record([buffer_obj = buffer->Handle(), offset,
stride = params.stride](vk::CommandBuffer cmdbuf) {
cmdbuf.DrawIndirect(buffer_obj, offset, 1, static_cast<u32>(stride));
});
} else {
scheduler.Record([buffer_obj = buffer->Handle(), offset,
stride = params.stride](vk::CommandBuffer cmdbuf) {
cmdbuf.DrawIndirectByteCountEXT(1, 0, buffer_obj, offset, 0,
static_cast<u32>(stride));
});
}
return;
}
if (params.include_count) {
@ -422,7 +482,6 @@ void RasterizerVulkan::Clear(u32 layer_count) {
scheduler.RequestRenderpass(framebuffer);
query_cache.NotifySegment(true);
query_cache.CounterEnable(VideoCommon::QueryType::ZPassPixelCount64, maxwell3d->regs.zpass_pixel_count_enable);
u32 up_scale = 1;
u32 down_shift = 0;
if (texture_cache.IsRescaling()) {
@ -1008,6 +1067,7 @@ bool AccelerateDMA::BufferToImage(const Tegra::DMA::ImageCopy& copy_info,
void RasterizerVulkan::UpdateDynamicStates() {
auto& regs = maxwell3d->regs;
GraphicsPipeline* pipeline = pipeline_cache.CurrentGraphicsPipeline();
// Core Dynamic States (Vulkan 1.0) - Always active regardless of dyna_state setting
UpdateViewportsState(regs);
@ -1015,14 +1075,15 @@ void RasterizerVulkan::UpdateDynamicStates() {
UpdateDepthBias(regs);
UpdateBlendConstants(regs);
UpdateDepthBounds(regs);
UpdateStencilFaces(regs);
UpdateLineWidth(regs);
UpdateLineStipple(regs);
// EDS1: CullMode, DepthCompare, FrontFace, StencilOp, DepthBoundsTest, DepthTest, DepthWrite, StencilTest
if (device.IsExtExtendedDynamicStateSupported()) {
if (device.IsExtExtendedDynamicStateSupported() && pipeline && pipeline->UsesExtendedDynamicState()) {
UpdateCullMode(regs);
UpdateDepthCompareOp(regs);
UpdateFrontFace(regs);
UpdatePrimitiveTopology(regs);
UpdateStencilOp(regs);
if (state_tracker.TouchStateEnable()) {
UpdateDepthBoundsTestEnable(regs);
@ -1032,54 +1093,20 @@ void RasterizerVulkan::UpdateDynamicStates() {
}
}
UpdateStencilFaces(regs);
// EDS2: PrimitiveRestart, RasterizerDiscard, DepthBias enable/disable
if (device.IsExtExtendedDynamicState2Supported()) {
if (device.IsExtExtendedDynamicState2Supported() && pipeline && pipeline->UsesExtendedDynamicState2()) {
UpdatePrimitiveRestartEnable(regs);
UpdateRasterizerDiscardEnable(regs);
UpdateDepthBiasEnable(regs);
}
// EDS2 Extras: LogicOp operation selection
if (device.IsExtExtendedDynamicState2ExtrasSupported()) {
if (device.IsExtExtendedDynamicState2ExtrasSupported() && pipeline && pipeline->UsesExtendedDynamicState2LogicOp()) {
UpdateLogicOp(regs);
}
// EDS3 Enables: LogicOpEnable, DepthClamp, LineStipple, ConservativeRaster
if (device.IsExtExtendedDynamicState3EnablesSupported()) {
using namespace Tegra::Engines;
// AMD Workaround: LogicOp incompatible with float render targets
if (device.GetDriverID() == VkDriverIdKHR::VK_DRIVER_ID_AMD_OPEN_SOURCE ||
device.GetDriverID() == VkDriverIdKHR::VK_DRIVER_ID_AMD_PROPRIETARY) {
const auto has_float = std::any_of(
regs.vertex_attrib_format.begin(), regs.vertex_attrib_format.end(),
[](const auto& attrib) {
return attrib.type == Maxwell3D::Regs::VertexAttribute::Type::Float;
}
);
if (regs.logic_op.enable) {
regs.logic_op.enable = static_cast<u32>(!has_float);
}
}
UpdateLogicOpEnable(regs);
UpdateDepthClampEnable(regs);
UpdateLineRasterizationMode(regs);
UpdateLineStippleEnable(regs);
UpdateConservativeRasterizationMode(regs);
UpdateAlphaToCoverageEnable(regs);
UpdateAlphaToOneEnable(regs);
}
// EDS3 Blending: ColorBlendEnable, ColorBlendEquation, ColorWriteMask
if (device.IsExtExtendedDynamicState3BlendingSupported()) {
UpdateBlending(regs);
}
// Vertex Input Dynamic State: Independent from EDS levels
if (device.IsExtVertexInputDynamicStateSupported()) {
if (auto* gp = pipeline_cache.CurrentGraphicsPipeline(); gp && gp->HasDynamicVertexInput()) {
UpdateVertexInput(regs);
}
}
}
void RasterizerVulkan::HandleTransformFeedback() {
@ -1133,8 +1160,16 @@ void RasterizerVulkan::UpdateViewportsState(Tegra::Engines::Maxwell3D::Regs& reg
.minDepth = 0.0f,
.maxDepth = 1.0f,
};
scheduler.Record([viewport](vk::CommandBuffer cmdbuf) {
GraphicsPipeline* pipeline = pipeline_cache.CurrentGraphicsPipeline();
const bool use_viewport_with_count = device.IsExtExtendedDynamicStateSupported() &&
(!pipeline || pipeline->UsesExtendedDynamicState());
scheduler.Record([viewport, use_viewport_with_count](vk::CommandBuffer cmdbuf) {
if (use_viewport_with_count) {
std::array viewports{viewport};
cmdbuf.SetViewportWithCountEXT(viewports);
} else {
cmdbuf.SetViewport(0, viewport);
}
});
return;
}
@ -1150,10 +1185,17 @@ void RasterizerVulkan::UpdateViewportsState(Tegra::Engines::Maxwell3D::Regs& reg
GetViewportState(device, regs, 12, scale), GetViewportState(device, regs, 13, scale),
GetViewportState(device, regs, 14, scale), GetViewportState(device, regs, 15, scale),
};
scheduler.Record([this, viewport_list](vk::CommandBuffer cmdbuf) {
GraphicsPipeline* pipeline = pipeline_cache.CurrentGraphicsPipeline();
const bool use_viewport_with_count = device.IsExtExtendedDynamicStateSupported() &&
(!pipeline || pipeline->UsesExtendedDynamicState());
scheduler.Record([this, viewport_list, use_viewport_with_count](vk::CommandBuffer cmdbuf) {
const u32 num_viewports = std::min<u32>(device.GetMaxViewports(), Maxwell::NumViewports);
const vk::Span<VkViewport> viewports(viewport_list.data(), num_viewports);
if (use_viewport_with_count) {
cmdbuf.SetViewportWithCountEXT(viewports);
} else {
cmdbuf.SetViewport(0, viewports);
}
});
}
@ -1174,8 +1216,16 @@ void RasterizerVulkan::UpdateScissorsState(Tegra::Engines::Maxwell3D::Regs& regs
scissor.offset.y = static_cast<int32_t>(y);
scissor.extent.width = width;
scissor.extent.height = height;
scheduler.Record([scissor](vk::CommandBuffer cmdbuf) {
GraphicsPipeline* pipeline = pipeline_cache.CurrentGraphicsPipeline();
const bool use_scissor_with_count = device.IsExtExtendedDynamicStateSupported() &&
(!pipeline || pipeline->UsesExtendedDynamicState());
scheduler.Record([scissor, use_scissor_with_count](vk::CommandBuffer cmdbuf) {
if (use_scissor_with_count) {
std::array scissors{scissor};
cmdbuf.SetScissorWithCountEXT(scissors);
} else {
cmdbuf.SetScissor(0, scissor);
}
});
return;
}
@ -1203,10 +1253,17 @@ void RasterizerVulkan::UpdateScissorsState(Tegra::Engines::Maxwell3D::Regs& regs
GetScissorState(regs, 14, up_scale, down_shift),
GetScissorState(regs, 15, up_scale, down_shift),
};
scheduler.Record([this, scissor_list](vk::CommandBuffer cmdbuf) {
GraphicsPipeline* pipeline = pipeline_cache.CurrentGraphicsPipeline();
const bool use_scissor_with_count = device.IsExtExtendedDynamicStateSupported() &&
(!pipeline || pipeline->UsesExtendedDynamicState());
scheduler.Record([this, scissor_list, use_scissor_with_count](vk::CommandBuffer cmdbuf) {
const u32 num_scissors = std::min<u32>(device.GetMaxViewports(), Maxwell::NumViewports);
const vk::Span<VkRect2D> scissors(scissor_list.data(), num_scissors);
if (use_scissor_with_count) {
cmdbuf.SetScissorWithCountEXT(scissors);
} else {
cmdbuf.SetScissor(0, scissors);
}
});
}
@ -1374,6 +1431,33 @@ void RasterizerVulkan::UpdateLineWidth(Tegra::Engines::Maxwell3D::Regs& regs) {
scheduler.Record([width](vk::CommandBuffer cmdbuf) { cmdbuf.SetLineWidth(width); });
}
void RasterizerVulkan::UpdateLineStipple(Tegra::Engines::Maxwell3D::Regs& regs) {
if (!state_tracker.TouchLineStipple()) {
return;
}
if (!device.IsExtLineRasterizationSupported()) {
return;
}
const auto topology = maxwell3d->draw_manager->GetDrawState().topology;
if (!IsLineRasterizationTopology(device, topology)) {
return;
}
const VkLineRasterizationModeEXT mode =
SelectLineRasterizationMode(device, regs.line_anti_alias_enable != 0);
if (regs.line_stipple_enable == 0 || !SupportsStippleForMode(device, mode)) {
return;
}
scheduler.Record(
[factor = regs.line_stipple_params.factor,
pattern = static_cast<u16>(regs.line_stipple_params.pattern)](vk::CommandBuffer cmdbuf) {
cmdbuf.SetLineStippleEXT(factor, pattern);
});
}
void RasterizerVulkan::UpdateCullMode(Tegra::Engines::Maxwell3D::Regs& regs) {
if (!state_tracker.TouchCullMode()) {
return;
@ -1434,73 +1518,6 @@ void RasterizerVulkan::UpdateRasterizerDiscardEnable(Tegra::Engines::Maxwell3D::
});
}
void RasterizerVulkan::UpdateConservativeRasterizationMode(Tegra::Engines::Maxwell3D::Regs& regs) {
if (!state_tracker.TouchConservativeRasterizationMode()) {
return;
}
if (!device.SupportsDynamicState3ConservativeRasterizationMode()) {
return;
}
scheduler.Record([enable = regs.conservative_raster_enable](vk::CommandBuffer cmdbuf) {
cmdbuf.SetConservativeRasterizationModeEXT(
enable ? VK_CONSERVATIVE_RASTERIZATION_MODE_OVERESTIMATE_EXT
: VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT);
});
}
void RasterizerVulkan::UpdateLineStippleEnable(Tegra::Engines::Maxwell3D::Regs& regs) {
if (!state_tracker.TouchLineStippleEnable()) {
return;
}
if (!device.SupportsDynamicState3LineStippleEnable()) {
return;
}
scheduler.Record([enable = regs.line_stipple_enable](vk::CommandBuffer cmdbuf) {
cmdbuf.SetLineStippleEnableEXT(enable);
});
}
void RasterizerVulkan::UpdateLineRasterizationMode(Tegra::Engines::Maxwell3D::Regs& regs) {
if (!device.IsExtLineRasterizationSupported()) {
return;
}
if (!state_tracker.TouchLineRasterizationMode()) {
return;
}
if (!device.SupportsDynamicState3LineRasterizationMode()) {
static std::once_flag warn_missing_rect;
std::call_once(warn_missing_rect, [] {
LOG_WARNING(Render_Vulkan,
"Driver lacks rectangular line rasterization support; skipping dynamic "
"line state updates");
});
return;
}
const bool wants_smooth = regs.line_anti_alias_enable != 0;
VkLineRasterizationModeEXT mode = VK_LINE_RASTERIZATION_MODE_RECTANGULAR_EXT;
if (wants_smooth) {
if (device.SupportsSmoothLines()) {
mode = VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT;
} else {
static std::once_flag warn_missing_smooth;
std::call_once(warn_missing_smooth, [] {
LOG_WARNING(Render_Vulkan,
"Line anti-aliasing requested but smoothLines feature unavailable; "
"using rectangular rasterization");
});
}
}
scheduler.Record([mode](vk::CommandBuffer cmdbuf) {
cmdbuf.SetLineRasterizationModeEXT(mode);
});
}
void RasterizerVulkan::UpdateDepthBiasEnable(Tegra::Engines::Maxwell3D::Regs& regs) {
if (!state_tracker.TouchDepthBiasEnable()) {
return;
@ -1536,70 +1553,6 @@ void RasterizerVulkan::UpdateDepthBiasEnable(Tegra::Engines::Maxwell3D::Regs& re
[enable](vk::CommandBuffer cmdbuf) { cmdbuf.SetDepthBiasEnableEXT(enable != 0); });
}
void RasterizerVulkan::UpdateLogicOpEnable(Tegra::Engines::Maxwell3D::Regs& regs) {
if (!state_tracker.TouchLogicOpEnable()) {
return;
}
if (!device.SupportsDynamicState3LogicOpEnable()) {
return;
}
scheduler.Record([enable = regs.logic_op.enable](vk::CommandBuffer cmdbuf) {
cmdbuf.SetLogicOpEnableEXT(enable != 0);
});
}
void RasterizerVulkan::UpdateDepthClampEnable(Tegra::Engines::Maxwell3D::Regs& regs) {
if (!state_tracker.TouchDepthClampEnable()) {
return;
}
if (!device.SupportsDynamicState3DepthClampEnable()) {
return;
}
bool is_enabled = !(regs.viewport_clip_control.geometry_clip ==
Maxwell::ViewportClipControl::GeometryClip::Passthrough ||
regs.viewport_clip_control.geometry_clip ==
Maxwell::ViewportClipControl::GeometryClip::FrustumXYZ ||
regs.viewport_clip_control.geometry_clip ==
Maxwell::ViewportClipControl::GeometryClip::FrustumZ);
scheduler.Record(
[is_enabled](vk::CommandBuffer cmdbuf) { cmdbuf.SetDepthClampEnableEXT(is_enabled); });
}
void RasterizerVulkan::UpdateAlphaToCoverageEnable(Tegra::Engines::Maxwell3D::Regs& regs) {
if (!state_tracker.TouchAlphaToCoverageEnable()) {
return;
}
if (!device.SupportsDynamicState3AlphaToCoverageEnable()) {
return;
}
GraphicsPipeline* const pipeline = pipeline_cache.CurrentGraphicsPipeline();
const bool enable = pipeline != nullptr && pipeline->SupportsAlphaToCoverage() &&
regs.anti_alias_alpha_control.alpha_to_coverage != 0;
scheduler.Record([enable](vk::CommandBuffer cmdbuf) {
cmdbuf.SetAlphaToCoverageEnableEXT(enable ? VK_TRUE : VK_FALSE);
});
}
void RasterizerVulkan::UpdateAlphaToOneEnable(Tegra::Engines::Maxwell3D::Regs& regs) {
if (!state_tracker.TouchAlphaToOneEnable()) {
return;
}
if (!device.SupportsDynamicState3AlphaToOneEnable()) {
static std::once_flag warn_alpha_to_one;
std::call_once(warn_alpha_to_one, [] {
LOG_WARNING(Render_Vulkan,
"Alpha-to-one is not supported on this device; forcing it disabled");
});
return;
}
GraphicsPipeline* const pipeline = pipeline_cache.CurrentGraphicsPipeline();
const bool enable = pipeline != nullptr && pipeline->SupportsAlphaToOne() &&
regs.anti_alias_alpha_control.alpha_to_one != 0;
scheduler.Record([enable](vk::CommandBuffer cmdbuf) {
cmdbuf.SetAlphaToOneEnableEXT(enable ? VK_TRUE : VK_FALSE);
});
}
void RasterizerVulkan::UpdateDepthCompareOp(Tegra::Engines::Maxwell3D::Regs& regs) {
if (!state_tracker.TouchDepthCompareOp()) {
return;
@ -1609,6 +1562,17 @@ void RasterizerVulkan::UpdateDepthCompareOp(Tegra::Engines::Maxwell3D::Regs& reg
});
}
void RasterizerVulkan::UpdatePrimitiveTopology([[maybe_unused]] Tegra::Engines::Maxwell3D::Regs& regs) {
const auto topology = maxwell3d->draw_manager->GetDrawState().topology;
if (!state_tracker.ChangePrimitiveTopology(topology)) {
return;
}
const auto vk_topology = MaxwellToVK::PrimitiveTopology(device, topology);
scheduler.Record([vk_topology](vk::CommandBuffer cmdbuf) {
cmdbuf.SetPrimitiveTopologyEXT(vk_topology);
});
}
void RasterizerVulkan::UpdateFrontFace(Tegra::Engines::Maxwell3D::Regs& regs) {
if (!state_tracker.TouchFrontFace()) {
return;
@ -1671,87 +1635,15 @@ void RasterizerVulkan::UpdateBlending(Tegra::Engines::Maxwell3D::Regs& regs) {
if (!state_tracker.TouchBlending()) {
return;
}
if (state_tracker.TouchColorMask()) {
std::array<VkColorComponentFlags, Maxwell::NumRenderTargets> setup_masks{};
for (size_t index = 0; index < Maxwell::NumRenderTargets; index++) {
const auto& mask = regs.color_mask[regs.color_mask_common ? 0 : index];
auto& current = setup_masks[index];
if (mask.R) {
current |= VK_COLOR_COMPONENT_R_BIT;
}
if (mask.G) {
current |= VK_COLOR_COMPONENT_G_BIT;
}
if (mask.B) {
current |= VK_COLOR_COMPONENT_B_BIT;
}
if (mask.A) {
current |= VK_COLOR_COMPONENT_A_BIT;
}
}
scheduler.Record([setup_masks](vk::CommandBuffer cmdbuf) {
cmdbuf.SetColorWriteMaskEXT(0, setup_masks);
});
}
if (state_tracker.TouchBlendEnable()) {
std::array<VkBool32, Maxwell::NumRenderTargets> setup_enables{};
std::ranges::transform(
regs.blend.enable, setup_enables.begin(),
[&](const auto& is_enabled) { return is_enabled != 0 ? VK_TRUE : VK_FALSE; });
scheduler.Record([setup_enables](vk::CommandBuffer cmdbuf) {
cmdbuf.SetColorBlendEnableEXT(0, setup_enables);
});
}
if (state_tracker.TouchBlendEquations()) {
std::array<VkColorBlendEquationEXT, Maxwell::NumRenderTargets> setup_blends{};
const auto blend_setup = [&](auto& host_blend, const auto& guest_blend) {
host_blend.srcColorBlendFactor = MaxwellToVK::BlendFactor(guest_blend.color_source);
host_blend.dstColorBlendFactor = MaxwellToVK::BlendFactor(guest_blend.color_dest);
host_blend.colorBlendOp = MaxwellToVK::BlendEquation(guest_blend.color_op);
host_blend.srcAlphaBlendFactor = MaxwellToVK::BlendFactor(guest_blend.alpha_source);
host_blend.dstAlphaBlendFactor = MaxwellToVK::BlendFactor(guest_blend.alpha_dest);
host_blend.alphaBlendOp = MaxwellToVK::BlendEquation(guest_blend.alpha_op);
};
// Single blend equation for all targets
if (!regs.blend_per_target_enabled) {
// Temporary workaround for games that use iterated blending
if (regs.iterated_blend.enable && Settings::values.use_squashed_iterated_blend) {
setup_blends[0].srcColorBlendFactor = VK_BLEND_FACTOR_ONE;
setup_blends[0].dstColorBlendFactor = VK_BLEND_FACTOR_ONE;
setup_blends[0].colorBlendOp = VK_BLEND_OP_ADD;
setup_blends[0].srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR;
setup_blends[0].dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO;
setup_blends[0].alphaBlendOp = VK_BLEND_OP_ADD;
} else {
blend_setup(setup_blends[0], regs.blend);
}
// Copy first blend state to all other targets
for (size_t index = 1; index < Maxwell::NumRenderTargets; index++) {
setup_blends[index] = setup_blends[0];
}
} else {
// Per-target blending
for (size_t index = 0; index < Maxwell::NumRenderTargets; index++) {
blend_setup(setup_blends[index], regs.blend_per_target[index]);
}
}
scheduler.Record([setup_blends](vk::CommandBuffer cmdbuf) {
cmdbuf.SetColorBlendEquationEXT(0, setup_blends);
});
}
}
void RasterizerVulkan::UpdateStencilTestEnable(Tegra::Engines::Maxwell3D::Regs& regs) {
if (!state_tracker.TouchStencilTestEnable()) {
return;
}
if (regs.stencil_enable != 0) {
state_tracker.ResetStencilState();
}
scheduler.Record([enable = regs.stencil_enable](vk::CommandBuffer cmdbuf) {
cmdbuf.SetStencilTestEnableEXT(enable);
});
@ -1763,56 +1655,6 @@ void RasterizerVulkan::UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs)
return;
}
dirty[Dirty::VertexInput] = false;
boost::container::static_vector<VkVertexInputBindingDescription2EXT, 32> bindings;
boost::container::static_vector<VkVertexInputAttributeDescription2EXT, 32> attributes;
// There seems to be a bug on Nvidia's driver where updating only higher attributes ends up
// generating dirty state. Track the highest dirty attribute and update all attributes until
// that one.
size_t highest_dirty_attr{};
for (size_t index = 0; index < Maxwell::NumVertexAttributes; ++index) {
if (dirty[Dirty::VertexAttribute0 + index]) {
highest_dirty_attr = index;
}
}
for (size_t index = 0; index < highest_dirty_attr; ++index) {
const Maxwell::VertexAttribute attribute{regs.vertex_attrib_format[index]};
const u32 binding{attribute.buffer};
dirty[Dirty::VertexAttribute0 + index] = false;
dirty[Dirty::VertexBinding0 + static_cast<size_t>(binding)] = true;
if (!attribute.constant) {
attributes.push_back({
.sType = VK_STRUCTURE_TYPE_VERTEX_INPUT_ATTRIBUTE_DESCRIPTION_2_EXT,
.pNext = nullptr,
.location = static_cast<u32>(index),
.binding = binding,
.format = MaxwellToVK::VertexFormat(device, attribute.type, attribute.size),
.offset = attribute.offset,
});
}
}
for (size_t index = 0; index < Maxwell::NumVertexAttributes; ++index) {
if (!dirty[Dirty::VertexBinding0 + index]) {
continue;
}
dirty[Dirty::VertexBinding0 + index] = false;
const u32 binding{static_cast<u32>(index)};
const auto& input_binding{regs.vertex_streams[binding]};
const bool is_instanced{regs.vertex_stream_instances.IsInstancingEnabled(binding)};
bindings.push_back({
.sType = VK_STRUCTURE_TYPE_VERTEX_INPUT_BINDING_DESCRIPTION_2_EXT,
.pNext = nullptr,
.binding = binding,
.stride = input_binding.stride,
.inputRate = is_instanced ? VK_VERTEX_INPUT_RATE_INSTANCE : VK_VERTEX_INPUT_RATE_VERTEX,
.divisor = is_instanced ? input_binding.frequency : 1,
});
}
scheduler.Record([bindings, attributes](vk::CommandBuffer cmdbuf) {
cmdbuf.SetVertexInputEXT(bindings, attributes);
});
}
void RasterizerVulkan::InitializeChannel(Tegra::Control::ChannelState& channel) {

View file

@ -1,4 +1,4 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
@ -168,23 +168,17 @@ private:
void UpdateDepthBounds(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateStencilFaces(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateLineWidth(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateLineStipple(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateCullMode(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateDepthBoundsTestEnable(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateDepthTestEnable(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateDepthWriteEnable(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateDepthCompareOp(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdatePrimitiveTopology(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdatePrimitiveRestartEnable(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateRasterizerDiscardEnable(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateConservativeRasterizationMode(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateLineStippleEnable(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateLineStipple(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateLineRasterizationMode(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateDepthBiasEnable(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateLogicOpEnable(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateDepthClampEnable(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateAlphaToCoverageEnable(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateAlphaToOneEnable(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateFrontFace(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateStencilOp(Tegra::Engines::Maxwell3D::Regs& regs);
void UpdateStencilTestEnable(Tegra::Engines::Maxwell3D::Regs& regs);

View file

@ -337,6 +337,13 @@ void Scheduler::EndRenderPass()
images = renderpass_images,
ranges = renderpass_image_ranges](vk::CommandBuffer cmdbuf) {
std::array<VkImageMemoryBarrier, 9> barriers;
constexpr VkPipelineStageFlags src_stages =
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT |
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
constexpr VkPipelineStageFlags dst_stages =
VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT |
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
for (size_t i = 0; i < num_images; ++i) {
const VkImageSubresourceRange& range = ranges[i];
const bool is_color = (range.aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) != 0;
@ -372,12 +379,11 @@ void Scheduler::EndRenderPass()
};
}
cmdbuf.EndRenderPass();
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT |
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 0, nullptr, nullptr, vk::Span(barriers.data(), num_images));
cmdbuf.PipelineBarrier(src_stages, dst_stages, 0, nullptr, nullptr,
vk::Span(barriers.data(), num_images));
});
state.renderpass = nullptr;
state.renderpass = VkRenderPass{};
num_renderpass_images = 0;
}

View file

@ -44,10 +44,10 @@ public:
~Scheduler();
/// Sends the current execution context to the GPU.
u64 Flush(VkSemaphore signal_semaphore = nullptr, VkSemaphore wait_semaphore = nullptr);
u64 Flush(VkSemaphore signal_semaphore = {}, VkSemaphore wait_semaphore = {});
/// Sends the current execution context to the GPU and waits for it to complete.
void Finish(VkSemaphore signal_semaphore = nullptr, VkSemaphore wait_semaphore = nullptr);
void Finish(VkSemaphore signal_semaphore = {}, VkSemaphore wait_semaphore = {});
/// Waits for the worker thread to finish executing everything. After this function returns it's
/// safe to touch worker resources.
@ -237,8 +237,8 @@ private:
};
struct State {
VkRenderPass renderpass = nullptr;
VkFramebuffer framebuffer = nullptr;
VkRenderPass renderpass{};
VkFramebuffer framebuffer{};
VkExtent2D render_area = {0, 0};
GraphicsPipeline* graphics_pipeline = nullptr;
bool is_rescaling = false;

View file

@ -1,4 +1,4 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
@ -40,6 +40,7 @@ Flags MakeInvalidationFlags() {
StencilWriteMask,
StencilCompare,
LineWidth,
LineStipple,
CullMode,
DepthBoundsEnable,
DepthTestEnable,
@ -54,19 +55,11 @@ Flags MakeInvalidationFlags() {
StateEnable,
PrimitiveRestartEnable,
DepthBiasEnable,
LogicOpEnable,
DepthClampEnable,
AlphaToCoverageEnable,
AlphaToOneEnable,
LineRasterizationMode,
LogicOp,
Blending,
ColorMask,
BlendEquations,
BlendEnable,
ConservativeRasterizationMode,
LineStippleEnable,
LineStippleParams,
};
Flags flags{};
for (const int flag : INVALIDATION_FLAGS) {
@ -127,6 +120,13 @@ void SetupDirtyStencilProperties(Tables& tables) {
void SetupDirtyLineWidth(Tables& tables) {
tables[0][OFF(line_width_smooth)] = LineWidth;
tables[0][OFF(line_width_aliased)] = LineWidth;
tables[0][OFF(line_anti_alias_enable)] = LineWidth;
}
void SetupDirtyLineStipple(Tables& tables) {
tables[0][OFF(line_stipple_enable)] = LineStipple;
FillBlock(tables[0], OFF(line_stipple_params), NUM(line_stipple_params), LineStipple);
tables[1][OFF(line_anti_alias_enable)] = LineStipple;
}
void SetupDirtyCullMode(Tables& tables) {
@ -149,11 +149,6 @@ void SetupDirtyStateEnable(Tables& tables) {
setup(OFF(polygon_offset_point_enable), DepthBiasEnable);
setup(OFF(polygon_offset_line_enable), DepthBiasEnable);
setup(OFF(polygon_offset_fill_enable), DepthBiasEnable);
setup(OFF(logic_op.enable), LogicOpEnable);
setup(OFF(viewport_clip_control.geometry_clip), DepthClampEnable);
setup(OFF(line_stipple_enable), LineStippleEnable);
setup(OFF(anti_alias_alpha_control.alpha_to_coverage), AlphaToCoverageEnable);
setup(OFF(anti_alias_alpha_control.alpha_to_one), AlphaToOneEnable);
}
void SetupDirtyDepthCompareOp(Tables& tables) {
@ -227,13 +222,6 @@ void SetupDirtyVertexBindings(Tables& tables) {
}
}
void SetupRasterModes(Tables &tables) {
auto& table = tables[0];
table[OFF(line_stipple_params)] = LineStippleParams;
table[OFF(conservative_raster_enable)] = ConservativeRasterizationMode;
table[OFF(line_anti_alias_enable)] = LineRasterizationMode;
}
} // Anonymous namespace
void StateTracker::SetupTables(Tegra::Control::ChannelState& channel_state) {
@ -246,6 +234,7 @@ void StateTracker::SetupTables(Tegra::Control::ChannelState& channel_state) {
SetupDirtyDepthBounds(tables);
SetupDirtyStencilProperties(tables);
SetupDirtyLineWidth(tables);
SetupDirtyLineStipple(tables);
SetupDirtyCullMode(tables);
SetupDirtyStateEnable(tables);
SetupDirtyDepthCompareOp(tables);
@ -256,7 +245,6 @@ void StateTracker::SetupTables(Tegra::Control::ChannelState& channel_state) {
SetupDirtyVertexAttributes(tables);
SetupDirtyVertexBindings(tables);
SetupDirtySpecialOps(tables);
SetupRasterModes(tables);
}
void StateTracker::ChangeChannel(Tegra::Control::ChannelState& channel_state) {

View file

@ -1,4 +1,4 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
@ -42,6 +42,7 @@ enum : u8 {
StencilWriteMask,
StencilCompare,
LineWidth,
LineStipple,
CullMode,
DepthBoundsEnable,
@ -53,17 +54,9 @@ enum : u8 {
StencilTestEnable,
PrimitiveRestartEnable,
RasterizerDiscardEnable,
ConservativeRasterizationMode,
LineRasterizationMode,
LineStippleEnable,
LineStippleParams,
DepthBiasEnable,
StateEnable,
LogicOp,
LogicOpEnable,
DepthClampEnable,
AlphaToCoverageEnable,
AlphaToOneEnable,
Blending,
BlendEnable,
@ -177,6 +170,10 @@ public:
return ExchangeCheck(back.compare_mask, new_value) || stencil_reset;
}
void ResetStencilState() {
stencil_reset = true;
}
void ClearStencilReset() {
stencil_reset = false;
}
@ -185,6 +182,10 @@ public:
return Exchange(Dirty::LineWidth, false);
}
bool TouchLineStipple() const {
return Exchange(Dirty::LineStipple, false);
}
bool TouchCullMode() {
return Exchange(Dirty::CullMode, false);
}
@ -213,33 +214,8 @@ public:
return Exchange(Dirty::RasterizerDiscardEnable, false);
}
bool TouchConservativeRasterizationMode()
{
return Exchange(Dirty::ConservativeRasterizationMode, false);
}
bool TouchLineStippleEnable() { return Exchange(Dirty::LineStippleEnable, false); }
bool TouchLineStipple() { return Exchange(Dirty::LineStippleParams, false); }
bool TouchDepthBiasEnable() { return Exchange(Dirty::DepthBiasEnable, false); }
bool TouchLogicOpEnable() {
return Exchange(Dirty::LogicOpEnable, false);
}
bool TouchDepthClampEnable() {
return Exchange(Dirty::DepthClampEnable, false);
}
bool TouchAlphaToCoverageEnable() {
return Exchange(Dirty::AlphaToCoverageEnable, false);
}
bool TouchAlphaToOneEnable() {
return Exchange(Dirty::AlphaToOneEnable, false);
}
bool TouchDepthCompareOp() {
return Exchange(Dirty::DepthCompareOp, false);
}
@ -276,10 +252,6 @@ public:
return Exchange(Dirty::LogicOp, false);
}
bool TouchLineRasterizationMode() {
return Exchange(Dirty::LineRasterizationMode, false);
}
bool ChangePrimitiveTopology(Maxwell::PrimitiveTopology new_topology) {
const bool has_changed = current_topology != new_topology;
current_topology = new_topology;

View file

@ -109,38 +109,22 @@ VkCompositeAlphaFlagBitsKHR ChooseAlphaFlags(const VkSurfaceCapabilitiesKHR& cap
} // Anonymous namespace
Swapchain::Swapchain(
#ifdef ANDROID
VkSurfaceKHR surface_,
#else
VkSurfaceKHR_T* surface_handle_,
#endif
VkSurfaceKHR_T* surface_,
const Device& device_,
Scheduler& scheduler_,
u32 width_,
u32 height_)
#ifdef ANDROID
: surface(surface_)
#else
: surface_handle{surface_handle_}
#endif
, device{device_}
, scheduler{scheduler_}
{
#ifdef ANDROID
Create(surface, width_, height_);
#else
Create(surface_handle, width_, height_);
#endif
}
Swapchain::~Swapchain() = default;
void Swapchain::Create(
#ifdef ANDROID
VkSurfaceKHR surface_,
#else
VkSurfaceKHR_T* surface_handle_,
#endif
VkSurfaceKHR_T* surface_,
u32 width_,
u32 height_)
{
@ -148,18 +132,10 @@ void Swapchain::Create(
is_suboptimal = false;
width = width_;
height = height_;
#ifdef ANDROID
surface = surface_;
#else
surface_handle = surface_handle_;
#endif
const auto physical_device = device.GetPhysical();
#ifdef ANDROID
const auto capabilities{physical_device.GetSurfaceCapabilitiesKHR(surface)};
#else
const auto capabilities{physical_device.GetSurfaceCapabilitiesKHR(surface_handle)};
#endif
const auto capabilities{physical_device.GetSurfaceCapabilitiesKHR(VkSurfaceKHR(surface))};
if (capabilities.maxImageExtent.width == 0 || capabilities.maxImageExtent.height == 0) {
return;
}
@ -254,14 +230,8 @@ void Swapchain::Present(VkSemaphore render_semaphore) {
void Swapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities) {
const auto physical_device{device.GetPhysical()};
#ifdef ANDROID
const auto formats{physical_device.GetSurfaceFormatsKHR(surface)};
const auto present_modes = physical_device.GetSurfacePresentModesKHR(surface);
#else
const auto formats{physical_device.GetSurfaceFormatsKHR(surface_handle)};
const auto present_modes = physical_device.GetSurfacePresentModesKHR(surface_handle);
#endif
const auto formats{physical_device.GetSurfaceFormatsKHR(VkSurfaceKHR(surface))};
const auto present_modes = physical_device.GetSurfacePresentModesKHR(VkSurfaceKHR(surface));
has_mailbox = std::find(present_modes.begin(), present_modes.end(), VK_PRESENT_MODE_MAILBOX_KHR)
!= present_modes.end();
@ -290,11 +260,7 @@ void Swapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities) {
.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
.pNext = nullptr,
.flags = 0,
#ifdef ANDROID
.surface = surface,
#else
.surface = surface_handle,
#endif
.surface = VkSurfaceKHR(surface),
.minImageCount = requested_image_count,
.imageFormat = surface_format.format,
.imageColorSpace = surface_format.colorSpace,
@ -313,7 +279,7 @@ void Swapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities) {
.compositeAlpha = alpha_flags,
.presentMode = present_mode,
.clipped = VK_FALSE,
.oldSwapchain = nullptr,
.oldSwapchain = VkSwapchainKHR{},
};
const u32 graphics_family{device.GetGraphicsFamily()};
const u32 present_family{device.GetPresentFamily()};
@ -345,11 +311,7 @@ void Swapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities) {
swapchain_ci.flags |= VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR;
}
// Request the size again to reduce the possibility of a TOCTOU race condition.
#ifdef ANDROID
const auto updated_capabilities = physical_device.GetSurfaceCapabilitiesKHR(surface);
#else
const auto updated_capabilities = physical_device.GetSurfaceCapabilitiesKHR(surface_handle);
#endif
const auto updated_capabilities = physical_device.GetSurfaceCapabilitiesKHR(VkSurfaceKHR(surface));
swapchain_ci.imageExtent = ChooseSwapExtent(updated_capabilities, width, height);
// Don't add code within this and the swapchain creation.
swapchain = device.GetLogical().CreateSwapchainKHR(swapchain_ci);

View file

@ -1,4 +1,4 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
@ -11,8 +11,6 @@
#include "common/common_types.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
struct VkSurfaceKHR_T;
namespace Layout {
struct FramebufferLayout;
}
@ -25,11 +23,7 @@ class Scheduler;
class Swapchain {
public:
explicit Swapchain(
#ifdef ANDROID
VkSurfaceKHR surface,
#else
VkSurfaceKHR_T* surface_handle,
#endif
VkSurfaceKHR_T* surface,
const Device& device,
Scheduler& scheduler,
u32 width,
@ -38,11 +32,7 @@ public:
/// Creates (or recreates) the swapchain with a given size.
void Create(
#ifdef ANDROID
VkSurfaceKHR surface,
#else
VkSurfaceKHR_T* surface_handle,
#endif
VkSurfaceKHR_T* surface,
u32 width,
u32 height);
@ -128,11 +118,7 @@ private:
bool NeedsPresentModeUpdate() const;
#ifdef ANDROID
VkSurfaceKHR surface;
#else
VkSurfaceKHR_T* surface_handle;
#endif
VkSurfaceKHR_T* surface;
const Device& device;
Scheduler& scheduler;

View file

@ -446,15 +446,24 @@ TransformBufferCopies(std::span<const VideoCommon::BufferCopy> copies, size_t bu
};
}
[[nodiscard]] VkImageSubresourceRange MakeBarrierSubresourceRange(
VkImageAspectFlags aspect_mask, const SubresourceRange& range, bool is_3d_image) {
VkImageSubresourceRange subresource_range = MakeSubresourceRange(aspect_mask, range);
if (is_3d_image && subresource_range.layerCount == 1) {
subresource_range.layerCount = VK_REMAINING_ARRAY_LAYERS;
}
return subresource_range;
}
[[nodiscard]] VkImageSubresourceRange MakeSubresourceRange(const ImageView* image_view) {
SubresourceRange range = image_view->range;
const bool is_3d_image = image_view->type == VideoCommon::ImageViewType::e3D ||
True(image_view->flags & VideoCommon::ImageViewFlagBits::Slice);
if (True(image_view->flags & VideoCommon::ImageViewFlagBits::Slice)) {
// Slice image views always affect a single layer, but their subresource range corresponds
// to the slice. Override the value to affect a single layer.
range.base.layer = 0;
range.extent.layers = 1;
}
return MakeSubresourceRange(ImageAspectMask(image_view->format), range);
return MakeBarrierSubresourceRange(ImageAspectMask(image_view->format), range, is_3d_image);
}
[[nodiscard]] VkImageSubresourceLayers MakeSubresourceLayers(const ImageView* image_view) {
@ -524,18 +533,23 @@ struct RangedBarrierRange {
max_layer = (std::max)(max_layer, layers.baseArrayLayer + layers.layerCount);
}
VkImageSubresourceRange SubresourceRange(VkImageAspectFlags aspect_mask) const noexcept {
return VkImageSubresourceRange{
.aspectMask = aspect_mask,
.baseMipLevel = min_mip,
.levelCount = max_mip - min_mip,
.baseArrayLayer = min_layer,
.layerCount = max_layer - min_layer,
VkImageSubresourceRange SubresourceRange(VkImageAspectFlags aspect_mask,
bool is_3d_image) const noexcept {
const VideoCommon::SubresourceRange range{
.base = {
.level = static_cast<s32>(min_mip),
.layer = static_cast<s32>(min_layer),
},
.extent = {
.levels = static_cast<s32>(max_mip - min_mip),
.layers = static_cast<s32>(max_layer - min_layer),
},
};
return MakeBarrierSubresourceRange(aspect_mask, range, is_3d_image);
}
};
void CopyBufferToImage(vk::CommandBuffer cmdbuf, VkBuffer src_buffer, VkImage image,
VkImageAspectFlags aspect_mask, bool is_initialized,
VkImageAspectFlags aspect_mask, bool is_initialized, bool is_3d_image,
std::span<const VkBufferImageCopy> copies) {
static constexpr VkAccessFlags WRITE_ACCESS_FLAGS =
VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
@ -549,7 +563,8 @@ void CopyBufferToImage(vk::CommandBuffer cmdbuf, VkBuffer src_buffer, VkImage im
for (const auto& region : copies) {
range.AddLayers(region.imageSubresource);
}
const VkImageSubresourceRange subresource_range = range.SubresourceRange(aspect_mask);
const VkImageSubresourceRange subresource_range =
range.SubresourceRange(aspect_mask, is_3d_image);
const VkImageMemoryBarrier read_barrier{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
@ -1006,9 +1021,12 @@ void TextureCacheRuntime::ReinterpretImage(Image& dst, Image& src,
const VkBuffer copy_buffer = GetTemporaryBuffer(total_size);
const VkImage dst_image = dst.Handle();
const VkImage src_image = src.Handle();
const bool dst_is_3d = dst.info.type == ImageType::e3D;
const bool src_is_3d = src.info.type == ImageType::e3D;
scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([dst_image, src_image, copy_buffer, src_aspect_mask, dst_aspect_mask,
vk_in_copies, vk_out_copies](vk::CommandBuffer cmdbuf) {
dst_is_3d, src_is_3d, vk_in_copies,
vk_out_copies](vk::CommandBuffer cmdbuf) {
RangedBarrierRange dst_range;
RangedBarrierRange src_range;
for (const VkBufferImageCopy& copy : vk_in_copies) {
@ -1042,7 +1060,7 @@ void TextureCacheRuntime::ReinterpretImage(Image& dst, Image& src,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = src_image,
.subresourceRange = src_range.SubresourceRange(src_aspect_mask),
.subresourceRange = src_range.SubresourceRange(src_aspect_mask, src_is_3d),
},
};
const std::array middle_in_barrier{
@ -1056,7 +1074,7 @@ void TextureCacheRuntime::ReinterpretImage(Image& dst, Image& src,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = src_image,
.subresourceRange = src_range.SubresourceRange(src_aspect_mask),
.subresourceRange = src_range.SubresourceRange(src_aspect_mask, src_is_3d),
},
};
const std::array middle_out_barrier{
@ -1072,7 +1090,7 @@ void TextureCacheRuntime::ReinterpretImage(Image& dst, Image& src,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = dst_image,
.subresourceRange = dst_range.SubresourceRange(dst_aspect_mask),
.subresourceRange = dst_range.SubresourceRange(dst_aspect_mask, dst_is_3d),
},
};
const std::array post_barriers{
@ -1091,7 +1109,7 @@ void TextureCacheRuntime::ReinterpretImage(Image& dst, Image& src,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = dst_image,
.subresourceRange = dst_range.SubresourceRange(dst_aspect_mask),
.subresourceRange = dst_range.SubresourceRange(dst_aspect_mask, dst_is_3d),
},
};
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
@ -1440,6 +1458,8 @@ void TextureCacheRuntime::CopyImage(Image& dst, Image& src,
boost::container::small_vector<VkImageCopy, 16> vk_copies(copies.size());
const VkImageAspectFlags aspect_mask = dst.AspectMask();
ASSERT(aspect_mask == src.AspectMask());
const bool dst_is_3d = dst.info.type == ImageType::e3D;
const bool src_is_3d = src.info.type == ImageType::e3D;
std::ranges::transform(copies, vk_copies.begin(), [aspect_mask](const auto& copy) {
return MakeImageCopy(copy, aspect_mask);
@ -1447,7 +1467,8 @@ void TextureCacheRuntime::CopyImage(Image& dst, Image& src,
const VkImage dst_image = dst.Handle();
const VkImage src_image = src.Handle();
scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([dst_image, src_image, aspect_mask, vk_copies](vk::CommandBuffer cmdbuf) {
scheduler.Record([dst_image, src_image, aspect_mask, dst_is_3d, src_is_3d,
vk_copies](vk::CommandBuffer cmdbuf) {
RangedBarrierRange dst_range;
RangedBarrierRange src_range;
for (const VkImageCopy& copy : vk_copies) {
@ -1467,7 +1488,7 @@ void TextureCacheRuntime::CopyImage(Image& dst, Image& src,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = src_image,
.subresourceRange = src_range.SubresourceRange(aspect_mask),
.subresourceRange = src_range.SubresourceRange(aspect_mask, src_is_3d),
},
VkImageMemoryBarrier{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
@ -1481,7 +1502,7 @@ void TextureCacheRuntime::CopyImage(Image& dst, Image& src,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = dst_image,
.subresourceRange = dst_range.SubresourceRange(aspect_mask),
.subresourceRange = dst_range.SubresourceRange(aspect_mask, dst_is_3d),
},
};
const std::array post_barriers{
@ -1495,7 +1516,7 @@ void TextureCacheRuntime::CopyImage(Image& dst, Image& src,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = src_image,
.subresourceRange = src_range.SubresourceRange(aspect_mask),
.subresourceRange = src_range.SubresourceRange(aspect_mask, src_is_3d),
},
VkImageMemoryBarrier{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
@ -1512,7 +1533,7 @@ void TextureCacheRuntime::CopyImage(Image& dst, Image& src,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = dst_image,
.subresourceRange = dst_range.SubresourceRange(aspect_mask),
.subresourceRange = dst_range.SubresourceRange(aspect_mask, dst_is_3d),
},
};
cmdbuf.PipelineBarrier(
@ -1691,10 +1712,12 @@ void Image::UploadMemory(VkBuffer buffer, VkDeviceSize offset,
const VkBuffer src_buffer = buffer;
const VkImage temp_vk_image = *temp_wrapper->original_image;
const VkImageAspectFlags vk_aspect_mask = temp_wrapper->aspect_mask;
const bool temp_is_3d = temp_info.type == ImageType::e3D;
scheduler->Record([src_buffer, temp_vk_image, vk_aspect_mask, vk_copies,
scheduler->Record([src_buffer, temp_vk_image, vk_aspect_mask, temp_is_3d, vk_copies,
keep = temp_wrapper](vk::CommandBuffer cmdbuf) {
CopyBufferToImage(cmdbuf, src_buffer, temp_vk_image, vk_aspect_mask, false, VideoCommon::FixSmallVectorADL(vk_copies));
CopyBufferToImage(cmdbuf, src_buffer, temp_vk_image, vk_aspect_mask, false,
temp_is_3d, VideoCommon::FixSmallVectorADL(vk_copies));
});
// Use MSAACopyPass to convert from non-MSAA to MSAA
@ -1730,10 +1753,12 @@ void Image::UploadMemory(VkBuffer buffer, VkDeviceSize offset,
const VkImage vk_image = *original_image;
const VkImageAspectFlags vk_aspect_mask = aspect_mask;
const bool was_initialized = std::exchange(initialized, true);
const bool is_3d_image = info.type == ImageType::e3D;
scheduler->Record([src_buffer, vk_image, vk_aspect_mask, was_initialized,
scheduler->Record([src_buffer, vk_image, vk_aspect_mask, was_initialized, is_3d_image,
vk_copies](vk::CommandBuffer cmdbuf) {
CopyBufferToImage(cmdbuf, src_buffer, vk_image, vk_aspect_mask, was_initialized, VideoCommon::FixSmallVectorADL(vk_copies));
CopyBufferToImage(cmdbuf, src_buffer, vk_image, vk_aspect_mask, was_initialized,
is_3d_image, VideoCommon::FixSmallVectorADL(vk_copies));
});
if (is_rescaled) {
@ -2112,7 +2137,10 @@ ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::ImageViewI
}
}
const auto format_info = MaxwellToVK::SurfaceFormat(*device, FormatType::Optimal, true, format);
if (ImageUsageFlags(format_info, format) != image.UsageFlags()) {
const VkImageUsageFlags desired_view_usage = ImageUsageFlags(format_info, format);
const VkImageUsageFlags image_usage = image.UsageFlags();
const VkImageUsageFlags view_usage = desired_view_usage & image_usage;
if (desired_view_usage != image_usage) {
LOG_WARNING(Render_Vulkan,
"Image view format {} has different usage flags than image format {}", format,
image.info.format);
@ -2120,7 +2148,7 @@ ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::ImageViewI
const VkImageViewUsageCreateInfo image_view_usage{
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO,
.pNext = nullptr,
.usage = ImageUsageFlags(format_info, format),
.usage = view_usage,
};
const VkImageViewCreateInfo create_info{
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
@ -2286,6 +2314,7 @@ vk::ImageView ImageView::MakeView(VkFormat vk_format, VkImageAspectFlags aspect_
Sampler::Sampler(TextureCacheRuntime& runtime, const Tegra::Texture::TSCEntry& tsc) {
const auto& device = runtime.device;
has_depth_compare = tsc.depth_compare_enabled != 0;
// Check if custom border colors are supported
const bool has_custom_border_colors = runtime.device.IsCustomBorderColorsSupported();
const bool has_format_undefined = runtime.device.IsCustomBorderColorWithoutFormatSupported();
@ -2326,7 +2355,7 @@ Sampler::Sampler(TextureCacheRuntime& runtime, const Tegra::Texture::TSCEntry& t
// Some games have samplers with garbage. Sanitize them here.
const f32 max_anisotropy = std::clamp(tsc.MaxAnisotropy(), 1.0f, 16.0f);
const auto create_sampler = [&](const f32 anisotropy) {
const auto create_sampler = [&](const f32 anisotropy, bool enable_depth_compare) {
return device.GetLogical().CreateSampler(VkSamplerCreateInfo{
.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
.pNext = pnext,
@ -2340,7 +2369,7 @@ Sampler::Sampler(TextureCacheRuntime& runtime, const Tegra::Texture::TSCEntry& t
.mipLodBias = tsc.LodBias(),
.anisotropyEnable = static_cast<VkBool32>(anisotropy > 1.0f ? VK_TRUE : VK_FALSE),
.maxAnisotropy = anisotropy,
.compareEnable = tsc.depth_compare_enabled,
.compareEnable = enable_depth_compare,
.compareOp = MaxwellToVK::Sampler::DepthCompareFunction(tsc.depth_compare_func),
.minLod = tsc.mipmap_filter == TextureMipmapFilter::None ? 0.0f : tsc.MinLod(),
.maxLod = tsc.mipmap_filter == TextureMipmapFilter::None ? 0.25f : tsc.MaxLod(),
@ -2350,11 +2379,18 @@ Sampler::Sampler(TextureCacheRuntime& runtime, const Tegra::Texture::TSCEntry& t
});
};
sampler = create_sampler(max_anisotropy);
sampler = create_sampler(max_anisotropy, has_depth_compare);
if (has_depth_compare) {
sampler_no_compare = create_sampler(max_anisotropy, false);
}
const f32 max_anisotropy_default = static_cast<f32>(1U << tsc.max_anisotropy);
if (max_anisotropy > max_anisotropy_default) {
sampler_default_anisotropy = create_sampler(max_anisotropy_default);
sampler_default_anisotropy = create_sampler(max_anisotropy_default, has_depth_compare);
if (has_depth_compare) {
sampler_default_anisotropy_no_compare =
create_sampler(max_anisotropy_default, false);
}
}
}

View file

@ -1,4 +1,4 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
@ -396,11 +396,18 @@ class Sampler {
public:
explicit Sampler(TextureCacheRuntime&, const Tegra::Texture::TSCEntry&);
[[nodiscard]] VkSampler Handle() const noexcept {
[[nodiscard]] VkSampler Handle(bool enable_depth_compare = true) const noexcept {
if (!enable_depth_compare && sampler_no_compare) {
return *sampler_no_compare;
}
return *sampler;
}
[[nodiscard]] VkSampler HandleWithDefaultAnisotropy() const noexcept {
[[nodiscard]] VkSampler HandleWithDefaultAnisotropy(
bool enable_depth_compare = true) const noexcept {
if (!enable_depth_compare && sampler_default_anisotropy_no_compare) {
return *sampler_default_anisotropy_no_compare;
}
return *sampler_default_anisotropy;
}
@ -408,9 +415,16 @@ public:
return static_cast<bool>(sampler_default_anisotropy);
}
[[nodiscard]] bool HasDepthCompareEnabled() const noexcept {
return has_depth_compare;
}
private:
vk::Sampler sampler;
vk::Sampler sampler_no_compare;
vk::Sampler sampler_default_anisotropy;
vk::Sampler sampler_default_anisotropy_no_compare;
bool has_depth_compare = false;
};
struct TextureCacheParams {

View file

@ -1191,7 +1191,7 @@ void TextureCache<P>::DownloadImageIntoBuffer(typename TextureCache<P>::Image* i
template <class P>
void TextureCache<P>::RefreshContents(Image& image, ImageId image_id) {
if (False(image.flags & ImageFlagBits::CpuModified)) {
// Only upload modified images
runtime.TransitionImageLayout(image);
return;
}

View file

@ -1,4 +1,4 @@
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-FileCopyrightText: Copyright 2026 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
@ -22,16 +22,13 @@
#include <vulkan/vulkan.h>
// Define maintenance 7-9 extension names (not yet in official Vulkan headers)
// Define maintenance 7-8 extension names
#ifndef VK_KHR_MAINTENANCE_7_EXTENSION_NAME
#define VK_KHR_MAINTENANCE_7_EXTENSION_NAME "VK_KHR_maintenance7"
#endif
#ifndef VK_KHR_MAINTENANCE_8_EXTENSION_NAME
#define VK_KHR_MAINTENANCE_8_EXTENSION_NAME "VK_KHR_maintenance8"
#endif
#ifndef VK_KHR_MAINTENANCE_9_EXTENSION_NAME
#define VK_KHR_MAINTENANCE_9_EXTENSION_NAME "VK_KHR_maintenance9"
#endif
// Sanitize macros
#undef CreateEvent
@ -40,3 +37,6 @@
#undef False
#undef None
#undef True
// "Catch-all" handle for both Android and.. the rest of platforms
struct VkSurfaceKHR_T;

View file

@ -43,19 +43,9 @@ VkBool32 DebugUtilCallback(VkDebugUtilsMessageSeverityFlagBitsEXT severity,
case 0xdff2e5c1u: // VUID-vkCmdSetRasterizerDiscardEnable-None-04871
case 0x0cc85f41u: // VUID-vkCmdSetPrimitiveRestartEnable-None-04866
case 0x01257b492: // VUID-vkCmdSetLogicOpEXT-None-0486
// The below are due to incorrect reporting of vertexInputDynamicState
case 0x398e0dabu: // VUID-vkCmdSetVertexInputEXT-None-04790
// The below are due to incorrect reporting of extendedDynamicState3
case 0x970c11a5u: // VUID-vkCmdSetColorWriteMaskEXT-extendedDynamicState3ColorWriteMask-07364
case 0x6b453f78u: // VUID-vkCmdSetColorBlendEnableEXT-extendedDynamicState3ColorBlendEnable-07355
case 0xf66469d0u: // VUID-vkCmdSetColorBlendEquationEXT-extendedDynamicState3ColorBlendEquation-07356
case 0x1d43405eu: // VUID-vkCmdSetLogicOpEnableEXT-extendedDynamicState3LogicOpEnable-07365
case 0x638462e8u: // VUID-vkCmdSetDepthClampEnableEXT-extendedDynamicState3DepthClampEnable-07448
// Misc
case 0xe0a2da61u: // VUID-vkCmdDrawIndexed-format-07753
#else
case 0x682a878au: // VUID-vkCmdBindVertexBuffers2EXT-pBuffers-parameter
case 0x99fb7dfdu: // UNASSIGNED-RequiredParameter (vkCmdBindVertexBuffers2EXT pBuffers[0])
case 0xe8616bf2u: // Bound VkDescriptorSet 0x0[] was destroyed. Likely push_descriptor related
case 0x1608dec0u: // Image layout in vkUpdateDescriptorSet doesn't match descriptor use
case 0x55362756u: // Descriptor binding and framebuffer attachment overlap

View file

@ -419,7 +419,7 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
: instance{instance_}, dld{dld_}, physical{physical_},
format_properties(GetFormatProperties(physical)) {
// Get suitability and device properties.
const bool is_suitable = GetSuitability(surface != nullptr);
const bool is_suitable = GetSuitability(surface != VkSurfaceKHR{});
const VkDriverId driver_id = properties.driver.driverID;
@ -475,6 +475,9 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
if (extensions.descriptor_indexing && Settings::values.descriptor_indexing.GetValue()) {
first_next = &descriptor_indexing;
} else {
RemoveExtension(extensions.descriptor_indexing,
VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME);
}
is_blit_depth24_stencil8_supported = TestDepthStencilBlits(VK_FORMAT_D24_UNORM_S8_UINT);
@ -498,14 +501,7 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
CollectToolingInfo();
if (is_qualcomm) {
// Qualcomm Adreno GPUs doesn't handle scaled vertex attributes; keep emulation enabled
must_emulate_scaled_formats = true;
LOG_WARNING(Render_Vulkan,
"Qualcomm drivers require scaled vertex format emulation; forcing fallback");
LOG_WARNING(Render_Vulkan,
"Disabling shader float controls and 64-bit integer features on Qualcomm proprietary drivers");
RemoveExtension(extensions.shader_float_controls, VK_KHR_SHADER_FLOAT_CONTROLS_EXTENSION_NAME);
RemoveExtensionFeature(extensions.shader_atomic_int64, features.shader_atomic_int64,
VK_KHR_SHADER_ATOMIC_INT64_EXTENSION_NAME);
features.shader_atomic_int64.shaderBufferInt64Atomics = false;
@ -523,9 +519,6 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
bool should_patch_bcn = api_level >= 28;
const bool bcn_debug_override = Settings::values.patch_old_qcom_drivers.GetValue();
if (bcn_debug_override != should_patch_bcn) {
LOG_WARNING(Render_Vulkan,
"BCn patch debug override active: {} (auto-detected: {})",
bcn_debug_override, should_patch_bcn);
should_patch_bcn = bcn_debug_override;
}
@ -540,11 +533,6 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
} else {
LOG_ERROR(Render_Vulkan, "BCn patch failed! Driver code may now crash");
}
} else {
LOG_WARNING(Render_Vulkan,
"BCn texture patching skipped for stability (Android API {} < 28). "
"Driver version {}.{} would support patching, but may crash on older Android.",
api_level, major, minor);
}
} else if (patch_status == ADRENOTOOLS_BCN_BLOB) {
LOG_INFO(Render_Vulkan, "Adreno driver supports BCn textures natively (no patch needed)");
@ -563,7 +551,14 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
features.shader_float16_int8.shaderFloat16 = false;
}
// Mali/ NVIDIA proprietary drivers: Shader stencil export not supported
// NVIDIA proprietary drivers: Shader stencil export not supported
if (properties.properties.driverVersion >= VK_MAKE_API_VERSION(510, 0, 0, 0)) {
LOG_WARNING(Render_Vulkan,
"NVIDIA Drivers >= 510 do not support MSAA->MSAA image blits. "
"MSAA scaling will use 3D helpers. MSAA resolves work normally.");
cant_blit_msaa = true;
}
// Use hardware depth/stencil blits instead when available
if (!extensions.shader_stencil_export) {
LOG_INFO(Render_Vulkan,
@ -610,10 +605,6 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
const size_t derived_budget =
(std::max)(MIN_SAMPLER_BUDGET, sampler_limit - reserved);
sampler_heap_budget = derived_budget;
LOG_WARNING(Render_Vulkan,
"Qualcomm driver reports max {} samplers; reserving {} (25%) and "
"allowing Eden to use {} (75%) to avoid heap exhaustion",
sampler_limit, reserved, sampler_heap_budget);
}
}
@ -660,53 +651,25 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
const auto dyna_state = Settings::values.dyna_state.GetValue();
// Base dynamic states (VIEWPORT, SCISSOR, DEPTH_BIAS, etc.) are ALWAYS active in vk_graphics_pipeline.cpp
// This slider controls EXTENDED dynamic states with accumulative levels per Vulkan specs:
// Level 0 = Core Dynamic States only (Vulkan 1.0)
// Level 1 = Core + VK_EXT_extended_dynamic_state
// Level 2 = Core + VK_EXT_extended_dynamic_state + VK_EXT_extended_dynamic_state2
// Level 3 = Core + VK_EXT_extended_dynamic_state + VK_EXT_extended_dynamic_state2 + VK_EXT_extended_dynamic_state3
switch (dyna_state) {
case Settings::ExtendedDynamicState::Disabled:
// Level 0: Disable all extended dynamic state extensions
// Level 0: Disable all configured extended dynamic state extensions
RemoveExtensionFeature(extensions.extended_dynamic_state, features.extended_dynamic_state,
VK_EXT_EXTENDED_DYNAMIC_STATE_EXTENSION_NAME);
RemoveExtensionFeature(extensions.extended_dynamic_state2, features.extended_dynamic_state2,
VK_EXT_EXTENDED_DYNAMIC_STATE_2_EXTENSION_NAME);
RemoveExtensionFeature(extensions.extended_dynamic_state3, features.extended_dynamic_state3,
VK_EXT_EXTENDED_DYNAMIC_STATE_3_EXTENSION_NAME);
dynamic_state3_blending = false;
dynamic_state3_enables = false;
break;
case Settings::ExtendedDynamicState::EDS1:
// Level 1: Enable EDS1, disable EDS2 and EDS3
// Level 1: Enable EDS1, disable EDS2
RemoveExtensionFeature(extensions.extended_dynamic_state2, features.extended_dynamic_state2,
VK_EXT_EXTENDED_DYNAMIC_STATE_2_EXTENSION_NAME);
RemoveExtensionFeature(extensions.extended_dynamic_state3, features.extended_dynamic_state3,
VK_EXT_EXTENDED_DYNAMIC_STATE_3_EXTENSION_NAME);
dynamic_state3_blending = false;
dynamic_state3_enables = false;
break;
case Settings::ExtendedDynamicState::EDS2:
// Level 2: Enable EDS1 + EDS2, disable EDS3
RemoveExtensionFeature(extensions.extended_dynamic_state3, features.extended_dynamic_state3,
VK_EXT_EXTENDED_DYNAMIC_STATE_3_EXTENSION_NAME);
dynamic_state3_blending = false;
dynamic_state3_enables = false;
break;
case Settings::ExtendedDynamicState::EDS3:
default:
// Level 3: Enable all (EDS1 + EDS2 + EDS3)
// Level 2: Enable EDS1 + EDS2
break;
}
// VK_EXT_vertex_input_dynamic_state is independent from EDS
// It can be enabled even without extended_dynamic_state
if (!Settings::values.vertex_input_dynamic_state.GetValue()) {
RemoveExtensionFeature(extensions.vertex_input_dynamic_state, features.vertex_input_dynamic_state, VK_EXT_VERTEX_INPUT_DYNAMIC_STATE_EXTENSION_NAME);
}
logical = vk::Device::Create(physical, queue_cis, ExtensionListForVulkan(loaded_extensions), first_next, dld);
graphics_queue = logical.GetQueue(graphics_family);
@ -1063,6 +1026,11 @@ bool Device::GetSuitability(bool requires_swapchain) {
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR;
SetNext(next, properties.push_descriptor);
}
if (extensions.conservative_rasterization) {
properties.conservative_rasterization.sType =
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT;
SetNext(next, properties.conservative_rasterization);
}
if (extensions.subgroup_size_control || features.subgroup_size_control.subgroupSizeControl) {
properties.subgroup_size_control.sType =
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES;
@ -1093,6 +1061,22 @@ bool Device::GetSuitability(bool requires_swapchain) {
// Unload extensions if feature support is insufficient.
RemoveUnsuitableExtensions();
// Query VK_EXT_custom_border_color properties if the extension is enabled.
if (extensions.custom_border_color) {
auto proc = dld.vkGetInstanceProcAddr(instance, "vkGetPhysicalDeviceCustomBorderColorPropertiesEXT");
if (proc != nullptr) {
auto vkGetPhysicalDeviceCustomBorderColorPropertiesEXT =
reinterpret_cast<void(*)(VkPhysicalDevice, VkPhysicalDeviceCustomBorderColorPropertiesEXT*)>(
proc);
custom_border_color_properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_PROPERTIES_EXT;
custom_border_color_properties.pNext = nullptr;
vkGetPhysicalDeviceCustomBorderColorPropertiesEXT(physical, &custom_border_color_properties);
has_custom_border_color_properties = true;
} else {
has_custom_border_color_properties = false;
}
}
// Check limits.
struct Limit {
u32 minimum;
@ -1117,46 +1101,14 @@ bool Device::GetSuitability(bool requires_swapchain) {
// VK_DYNAMIC_STATE
// Driver detection variables for workarounds in GetSuitability
const VkDriverId driver_id = properties.driver.driverID;
// VK_EXT_extended_dynamic_state below this will appear drivers that need workarounds.
// VK_EXT_extended_dynamic_state2 below this will appear drivers that need workarounds.
// VK_EXT_extended_dynamic_state3 below this will appear drivers that need workarounds.
// Samsung: Broken extendedDynamicState3ColorBlendEquation
// Disable blend equation dynamic state, force static pipeline state
if (extensions.extended_dynamic_state3 &&
(driver_id == VK_DRIVER_ID_SAMSUNG_PROPRIETARY)) {
LOG_WARNING(Render_Vulkan,
"Samsung: Disabling broken extendedDynamicState3ColorBlendEquation");
features.extended_dynamic_state3.extendedDynamicState3ColorBlendEnable = false;
features.extended_dynamic_state3.extendedDynamicState3ColorBlendEquation = false;
}
// Intel Windows < 27.20.100.0: Broken VertexInputDynamicState
// Same for NVIDIA Proprietary < 580.119.02, unknown when VIDS was first NOT broken
// Disable VertexInputDynamicState on old Intel Windows drivers
if (extensions.vertex_input_dynamic_state) {
const u32 version = (properties.properties.driverVersion << 3) >> 3;
if ((driver_id == VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS && version < VK_MAKE_API_VERSION(27, 20, 100, 0))
|| (driver_id == VK_DRIVER_ID_NVIDIA_PROPRIETARY && version < VK_MAKE_API_VERSION(580, 119, 02, 0))) {
LOG_WARNING(Render_Vulkan, "Disabling broken VK_EXT_vertex_input_dynamic_state");
RemoveExtensionFeature(extensions.vertex_input_dynamic_state, features.vertex_input_dynamic_state, VK_EXT_VERTEX_INPUT_DYNAMIC_STATE_EXTENSION_NAME);
}
}
if (u32(Settings::values.dyna_state.GetValue()) == 0) {
LOG_INFO(Render_Vulkan, "Extended Dynamic State disabled by user setting, clearing all EDS features");
features.custom_border_color.customBorderColors = false;
features.custom_border_color.customBorderColorWithoutFormat = false;
features.extended_dynamic_state.extendedDynamicState = false;
features.extended_dynamic_state2.extendedDynamicState2 = false;
features.extended_dynamic_state3.extendedDynamicState3ColorBlendEnable = false;
features.extended_dynamic_state3.extendedDynamicState3ColorBlendEquation = false;
features.extended_dynamic_state3.extendedDynamicState3ColorWriteMask = false;
features.extended_dynamic_state3.extendedDynamicState3DepthClampEnable = false;
features.extended_dynamic_state3.extendedDynamicState3LogicOpEnable = false;
}
// Return whether we were suitable.
@ -1165,15 +1117,9 @@ bool Device::GetSuitability(bool requires_swapchain) {
void Device::RemoveUnsuitableExtensions() {
// VK_EXT_custom_border_color
// Enable extension if driver supports it, then check individual features
// - customBorderColors: Required to use VK_BORDER_COLOR_FLOAT_CUSTOM_EXT
// - customBorderColorWithoutFormat: Optional, allows VK_FORMAT_UNDEFINED
// If only customBorderColors is available, we must provide a specific format
if (extensions.custom_border_color) {
// Verify that at least customBorderColors is available
if (!features.custom_border_color.customBorderColors) {
LOG_WARNING(Render_Vulkan,
"VK_EXT_custom_border_color reported but customBorderColors feature not available, disabling");
extensions.custom_border_color = false;
}
}
@ -1196,7 +1142,7 @@ void Device::RemoveUnsuitableExtensions() {
RemoveExtensionFeatureIfUnsuitable(extensions.depth_clip_control, features.depth_clip_control,
VK_EXT_DEPTH_CLIP_CONTROL_EXTENSION_NAME);
/* */ // VK_EXT_extended_dynamic_state
// VK_EXT_extended_dynamic_state
extensions.extended_dynamic_state = features.extended_dynamic_state.extendedDynamicState;
RemoveExtensionFeatureIfUnsuitable(extensions.extended_dynamic_state,
features.extended_dynamic_state,
@ -1208,67 +1154,7 @@ void Device::RemoveUnsuitableExtensions() {
features.extended_dynamic_state2,
VK_EXT_EXTENDED_DYNAMIC_STATE_2_EXTENSION_NAME);
// VK_EXT_extended_dynamic_state3
const bool supports_color_blend_enable =
features.extended_dynamic_state3.extendedDynamicState3ColorBlendEnable;
const bool supports_color_blend_equation =
features.extended_dynamic_state3.extendedDynamicState3ColorBlendEquation;
const bool supports_color_write_mask =
features.extended_dynamic_state3.extendedDynamicState3ColorWriteMask;
dynamic_state3_blending = supports_color_blend_enable && supports_color_blend_equation &&
supports_color_write_mask;
const bool supports_depth_clamp_enable =
features.extended_dynamic_state3.extendedDynamicState3DepthClampEnable;
const bool supports_logic_op_enable =
features.extended_dynamic_state3.extendedDynamicState3LogicOpEnable;
const bool supports_line_raster_mode =
features.extended_dynamic_state3.extendedDynamicState3LineRasterizationMode &&
extensions.line_rasterization && features.line_rasterization.rectangularLines;
const bool supports_conservative_raster_mode =
features.extended_dynamic_state3.extendedDynamicState3ConservativeRasterizationMode &&
extensions.conservative_rasterization;
const bool supports_line_stipple_enable =
features.extended_dynamic_state3.extendedDynamicState3LineStippleEnable &&
extensions.line_rasterization && features.line_rasterization.stippledRectangularLines;
const bool supports_alpha_to_coverage =
features.extended_dynamic_state3.extendedDynamicState3AlphaToCoverageEnable;
const bool supports_alpha_to_one =
features.extended_dynamic_state3.extendedDynamicState3AlphaToOneEnable &&
features.features.alphaToOne;
dynamic_state3_depth_clamp_enable = supports_depth_clamp_enable;
dynamic_state3_logic_op_enable = supports_logic_op_enable;
dynamic_state3_line_raster_mode = supports_line_raster_mode;
dynamic_state3_conservative_raster_mode = supports_conservative_raster_mode;
dynamic_state3_line_stipple_enable = supports_line_stipple_enable;
dynamic_state3_alpha_to_coverage = supports_alpha_to_coverage;
dynamic_state3_alpha_to_one = supports_alpha_to_one;
dynamic_state3_enables = dynamic_state3_depth_clamp_enable || dynamic_state3_logic_op_enable ||
dynamic_state3_line_raster_mode ||
dynamic_state3_conservative_raster_mode ||
dynamic_state3_line_stipple_enable ||
dynamic_state3_alpha_to_coverage || dynamic_state3_alpha_to_one;
extensions.extended_dynamic_state3 = dynamic_state3_blending || dynamic_state3_enables;
if (!extensions.extended_dynamic_state3) {
dynamic_state3_blending = false;
dynamic_state3_enables = false;
dynamic_state3_depth_clamp_enable = false;
dynamic_state3_logic_op_enable = false;
dynamic_state3_line_raster_mode = false;
dynamic_state3_conservative_raster_mode = false;
dynamic_state3_line_stipple_enable = false;
dynamic_state3_alpha_to_coverage = false;
dynamic_state3_alpha_to_one = false;
}
RemoveExtensionFeatureIfUnsuitable(extensions.extended_dynamic_state3,
features.extended_dynamic_state3,
VK_EXT_EXTENDED_DYNAMIC_STATE_3_EXTENSION_NAME);
// VK_EXT_robustness2
// Enable if at least one robustness2 feature is available
extensions.robustness_2 = features.robustness2.robustBufferAccess2 ||
features.robustness2.robustImageAccess2 ||
features.robustness2.nullDescriptor;
@ -1277,25 +1163,10 @@ void Device::RemoveUnsuitableExtensions() {
VK_EXT_ROBUSTNESS_2_EXTENSION_NAME);
// VK_EXT_image_robustness
// Enable if robustImageAccess is available
extensions.image_robustness = features.image_robustness.robustImageAccess;
RemoveExtensionFeatureIfUnsuitable(extensions.image_robustness, features.image_robustness,
VK_EXT_IMAGE_ROBUSTNESS_EXTENSION_NAME);
// VK_EXT_provoking_vertex
if (Settings::values.provoking_vertex.GetValue()) {
extensions.provoking_vertex = features.provoking_vertex.provokingVertexLast
&& features.provoking_vertex
.transformFeedbackPreservesProvokingVertex;
RemoveExtensionFeatureIfUnsuitable(extensions.provoking_vertex,
features.provoking_vertex,
VK_EXT_PROVOKING_VERTEX_EXTENSION_NAME);
} else {
RemoveExtensionFeature(extensions.provoking_vertex,
features.provoking_vertex,
VK_EXT_PROVOKING_VERTEX_EXTENSION_NAME);
}
// VK_KHR_shader_atomic_int64
extensions.shader_atomic_int64 = features.shader_atomic_int64.shaderBufferInt64Atomics &&
features.shader_atomic_int64.shaderSharedInt64Atomics;
@ -1319,28 +1190,32 @@ void Device::RemoveUnsuitableExtensions() {
VK_EXT_SUBGROUP_SIZE_CONTROL_EXTENSION_NAME);
// VK_EXT_transform_feedback
// We only require the basic transformFeedback feature and at least
// one transform feedback buffer. We keep transformFeedbackQueries as it's used by
// the streaming byte count implementation. GeometryStreams and multiple streams
// are not strictly required since we currently support only stream 0.
extensions.transform_feedback =
features.transform_feedback.transformFeedback &&
properties.transform_feedback.maxTransformFeedbackBuffers > 0 &&
properties.transform_feedback.transformFeedbackQueries;
RemoveExtensionFeatureIfUnsuitable(extensions.transform_feedback, features.transform_feedback,
VK_EXT_TRANSFORM_FEEDBACK_EXTENSION_NAME);
if (extensions.transform_feedback) {
LOG_INFO(Render_Vulkan, "VK_EXT_transform_feedback enabled (buffers={}, queries={})",
properties.transform_feedback.maxTransformFeedbackBuffers,
properties.transform_feedback.transformFeedbackQueries);
}
// VK_EXT_vertex_input_dynamic_state
extensions.vertex_input_dynamic_state =
features.vertex_input_dynamic_state.vertexInputDynamicState;
RemoveExtensionFeatureIfUnsuitable(extensions.vertex_input_dynamic_state,
features.vertex_input_dynamic_state,
VK_EXT_VERTEX_INPUT_DYNAMIC_STATE_EXTENSION_NAME);
// VK_EXT_provoking_vertex
extensions.provoking_vertex = features.provoking_vertex.provokingVertexLast;
RemoveExtensionFeatureIfUnsuitable(extensions.provoking_vertex,
features.provoking_vertex,
VK_EXT_PROVOKING_VERTEX_EXTENSION_NAME);
// VK_EXT_line_rasterization
extensions.line_rasterization = features.line_rasterization.rectangularLines ||
features.line_rasterization.bresenhamLines ||
features.line_rasterization.smoothLines;
RemoveExtensionFeatureIfUnsuitable(extensions.line_rasterization,
features.line_rasterization,
VK_EXT_LINE_RASTERIZATION_EXTENSION_NAME);
// VK_EXT_conditional_rendering
extensions.conditional_rendering = features.conditional_rendering.conditionalRendering;
RemoveExtensionFeatureIfUnsuitable(extensions.conditional_rendering,
features.conditional_rendering,
VK_EXT_CONDITIONAL_RENDERING_EXTENSION_NAME);
// VK_EXT_multi_draw
extensions.multi_draw = features.multi_draw.multiDraw;
@ -1377,35 +1252,15 @@ void Device::RemoveUnsuitableExtensions() {
features.workgroup_memory_explicit_layout,
VK_KHR_WORKGROUP_MEMORY_EXPLICIT_LAYOUT_EXTENSION_NAME);
// VK_EXT_swapchain_maintenance1 (extension only, has features)
// Requires VK_EXT_surface_maintenance1 instance extension
extensions.swapchain_maintenance1 = features.swapchain_maintenance1.swapchainMaintenance1;
if (extensions.swapchain_maintenance1) {
// Check if VK_EXT_surface_maintenance1 instance extension is available
const auto instance_extensions = vk::EnumerateInstanceExtensionProperties(dld);
const bool has_surface_maintenance1 = instance_extensions && std::ranges::any_of(*instance_extensions,
[](const VkExtensionProperties& prop) {
return std::strcmp(prop.extensionName, VK_EXT_SURFACE_MAINTENANCE_1_EXTENSION_NAME) == 0;
});
if (!has_surface_maintenance1) {
LOG_WARNING(Render_Vulkan,
"VK_EXT_swapchain_maintenance1 requires VK_EXT_surface_maintenance1, disabling");
extensions.swapchain_maintenance1 = false;
features.swapchain_maintenance1.swapchainMaintenance1 = false;
}
}
RemoveExtensionFeatureIfUnsuitable(extensions.swapchain_maintenance1, features.swapchain_maintenance1,
VK_EXT_SWAPCHAIN_MAINTENANCE_1_EXTENSION_NAME);
// VK_KHR_maintenance1 (core in Vulkan 1.1, no features)
// VK_KHR_maintenance1
extensions.maintenance1 = loaded_extensions.contains(VK_KHR_MAINTENANCE_1_EXTENSION_NAME);
RemoveExtensionIfUnsuitable(extensions.maintenance1, VK_KHR_MAINTENANCE_1_EXTENSION_NAME);
// VK_KHR_maintenance2 (core in Vulkan 1.1, no features)
// VK_KHR_maintenance2
extensions.maintenance2 = loaded_extensions.contains(VK_KHR_MAINTENANCE_2_EXTENSION_NAME);
RemoveExtensionIfUnsuitable(extensions.maintenance2, VK_KHR_MAINTENANCE_2_EXTENSION_NAME);
// VK_KHR_maintenance3 (core in Vulkan 1.1, no features)
// VK_KHR_maintenance3
extensions.maintenance3 = loaded_extensions.contains(VK_KHR_MAINTENANCE_3_EXTENSION_NAME);
RemoveExtensionIfUnsuitable(extensions.maintenance3, VK_KHR_MAINTENANCE_3_EXTENSION_NAME);
@ -1416,17 +1271,6 @@ void Device::RemoveUnsuitableExtensions() {
// VK_KHR_maintenance5
extensions.maintenance5 = features.maintenance5.maintenance5;
if (extensions.maintenance5) {
LOG_INFO(Render_Vulkan, "VK_KHR_maintenance5 properties: polygonModePointSize={} "
"depthStencilSwizzleOne={} earlyFragmentTests={} nonStrictWideLines={}",
properties.maintenance5.polygonModePointSize,
properties.maintenance5.depthStencilSwizzleOneSupport,
properties.maintenance5.earlyFragmentMultisampleCoverageAfterSampleCounting &&
properties.maintenance5.earlyFragmentSampleMaskTestBeforeSampleCounting,
properties.maintenance5.nonStrictWideLinesUseParallelogram);
}
RemoveExtensionFeatureIfUnsuitable(extensions.maintenance5, features.maintenance5,
VK_KHR_MAINTENANCE_5_EXTENSION_NAME);
@ -1435,17 +1279,13 @@ void Device::RemoveUnsuitableExtensions() {
RemoveExtensionFeatureIfUnsuitable(extensions.maintenance6, features.maintenance6,
VK_KHR_MAINTENANCE_6_EXTENSION_NAME);
// VK_KHR_maintenance7 (proposed for Vulkan 1.4, no features)
// VK_KHR_maintenance7
extensions.maintenance7 = loaded_extensions.contains(VK_KHR_MAINTENANCE_7_EXTENSION_NAME);
RemoveExtensionIfUnsuitable(extensions.maintenance7, VK_KHR_MAINTENANCE_7_EXTENSION_NAME);
// VK_KHR_maintenance8 (proposed for Vulkan 1.4, no features)
// VK_KHR_maintenance8
extensions.maintenance8 = loaded_extensions.contains(VK_KHR_MAINTENANCE_8_EXTENSION_NAME);
RemoveExtensionIfUnsuitable(extensions.maintenance8, VK_KHR_MAINTENANCE_8_EXTENSION_NAME);
// VK_KHR_maintenance9 (proposed for Vulkan 1.4, no features)
extensions.maintenance9 = loaded_extensions.contains(VK_KHR_MAINTENANCE_9_EXTENSION_NAME);
RemoveExtensionIfUnsuitable(extensions.maintenance9, VK_KHR_MAINTENANCE_9_EXTENSION_NAME);
}
void Device::SetupFamilies(VkSurfaceKHR surface) {

View file

@ -48,12 +48,12 @@ VK_DEFINE_HANDLE(VmaAllocator)
// Define all features which may be used by the implementation and require an extension here.
#define FOR_EACH_VK_FEATURE_EXT(FEATURE) \
FEATURE(EXT, ConditionalRendering, CONDITIONAL_RENDERING, conditional_rendering) \
FEATURE(EXT, CustomBorderColor, CUSTOM_BORDER_COLOR, custom_border_color) \
FEATURE(EXT, DepthBiasControl, DEPTH_BIAS_CONTROL, depth_bias_control) \
FEATURE(EXT, DepthClipControl, DEPTH_CLIP_CONTROL, depth_clip_control) \
FEATURE(EXT, ExtendedDynamicState, EXTENDED_DYNAMIC_STATE, extended_dynamic_state) \
FEATURE(EXT, ExtendedDynamicState2, EXTENDED_DYNAMIC_STATE_2, extended_dynamic_state2) \
FEATURE(EXT, ExtendedDynamicState3, EXTENDED_DYNAMIC_STATE_3, extended_dynamic_state3) \
FEATURE(EXT, 4444Formats, 4444_FORMATS, format_a4b4g4r4) \
FEATURE(EXT, IndexTypeUint8, INDEX_TYPE_UINT8, index_type_uint8) \
FEATURE(EXT, LineRasterization, LINE_RASTERIZATION, line_rasterization) \
@ -63,8 +63,6 @@ VK_DEFINE_HANDLE(VmaAllocator)
FEATURE(EXT, ProvokingVertex, PROVOKING_VERTEX, provoking_vertex) \
FEATURE(EXT, Robustness2, ROBUSTNESS_2, robustness2) \
FEATURE(EXT, TransformFeedback, TRANSFORM_FEEDBACK, transform_feedback) \
FEATURE(EXT, VertexInputDynamicState, VERTEX_INPUT_DYNAMIC_STATE, vertex_input_dynamic_state) \
FEATURE(EXT, SwapchainMaintenance1, SWAPCHAIN_MAINTENANCE_1, swapchain_maintenance1) \
FEATURE(KHR, Maintenance5, MAINTENANCE_5, maintenance5) \
FEATURE(KHR, Maintenance6, MAINTENANCE_6, maintenance6) \
FEATURE(KHR, PipelineExecutableProperties, PIPELINE_EXECUTABLE_PROPERTIES, \
@ -76,7 +74,6 @@ VK_DEFINE_HANDLE(VmaAllocator)
// Define miscellaneous extensions which may be used by the implementation here.
#define FOR_EACH_VK_EXTENSION(EXTENSION) \
EXTENSION(EXT, CONDITIONAL_RENDERING, conditional_rendering) \
EXTENSION(EXT, CONSERVATIVE_RASTERIZATION, conservative_rasterization) \
EXTENSION(EXT, DEPTH_RANGE_UNRESTRICTED, depth_range_unrestricted) \
EXTENSION(EXT, MEMORY_BUDGET, memory_budget) \
@ -100,7 +97,6 @@ VK_DEFINE_HANDLE(VmaAllocator)
EXTENSION(KHR, MAINTENANCE_3, maintenance3) \
EXTENSION(KHR, MAINTENANCE_7, maintenance7) \
EXTENSION(KHR, MAINTENANCE_8, maintenance8) \
EXTENSION(KHR, MAINTENANCE_9, maintenance9) \
EXTENSION(NV, DEVICE_DIAGNOSTICS_CONFIG, device_diagnostics_config) \
EXTENSION(NV, GEOMETRY_SHADER_PASSTHROUGH, geometry_shader_passthrough) \
EXTENSION(NV, VIEWPORT_ARRAY2, viewport_array2) \
@ -125,13 +121,11 @@ VK_DEFINE_HANDLE(VmaAllocator)
EXTENSION_NAME(VK_EXT_DEPTH_RANGE_UNRESTRICTED_EXTENSION_NAME) \
EXTENSION_NAME(VK_EXT_EXTENDED_DYNAMIC_STATE_EXTENSION_NAME) \
EXTENSION_NAME(VK_EXT_EXTENDED_DYNAMIC_STATE_2_EXTENSION_NAME) \
EXTENSION_NAME(VK_EXT_EXTENDED_DYNAMIC_STATE_3_EXTENSION_NAME) \
EXTENSION_NAME(VK_EXT_EXTERNAL_MEMORY_HOST_EXTENSION_NAME) \
EXTENSION_NAME(VK_EXT_4444_FORMATS_EXTENSION_NAME) \
EXTENSION_NAME(VK_EXT_IMAGE_ROBUSTNESS_EXTENSION_NAME) \
EXTENSION_NAME(VK_EXT_LINE_RASTERIZATION_EXTENSION_NAME) \
EXTENSION_NAME(VK_EXT_ROBUSTNESS_2_EXTENSION_NAME) \
EXTENSION_NAME(VK_EXT_VERTEX_INPUT_DYNAMIC_STATE_EXTENSION_NAME) \
EXTENSION_NAME(VK_NV_GEOMETRY_SHADER_PASSTHROUGH_EXTENSION_NAME) \
EXTENSION_NAME(VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME) \
EXTENSION_NAME(VK_NV_VIEWPORT_SWIZZLE_EXTENSION_NAME)
@ -189,8 +183,7 @@ VK_DEFINE_HANDLE(VmaAllocator)
FEATURE_NAME(shader_float16_int8, shaderInt8) \
FEATURE_NAME(timeline_semaphore, timelineSemaphore) \
FEATURE_NAME(transform_feedback, transformFeedback) \
FEATURE_NAME(uniform_buffer_standard_layout, uniformBufferStandardLayout) \
FEATURE_NAME(vertex_input_dynamic_state, vertexInputDynamicState)
FEATURE_NAME(uniform_buffer_standard_layout, uniformBufferStandardLayout)
// These features are not required but can be helpful for drivers that can use it.
#define FOR_EACH_VK_OPTIONAL_FEATURE(FEATURE_NAME) \
@ -448,7 +441,7 @@ public:
return extensions.viewport_array2;
}
/// Returns true if the device supporst VK_EXT_DESCRIPTOR_INDEXING
/// Returns true if the device supporst VK_EXT_descriptor_indexing.
bool isExtDescriptorIndexingSupported() const {
return extensions.descriptor_indexing;
}
@ -478,11 +471,6 @@ public:
return extensions.swapchain_mutable_format;
}
/// Returns true if VK_EXT_swapchain_maintenance1 is enabled.
bool IsExtSwapchainMaintenance1Enabled() const {
return extensions.swapchain_maintenance1;
}
/// Returns true if VK_KHR_shader_float_controls is enabled.
bool IsKhrShaderFloatControlsSupported() const {
return extensions.shader_float_controls;
@ -519,13 +507,11 @@ public:
}
/// Returns true if the device supports VK_EXT_shader_stencil_export.
/// Note: Most Mali/NVIDIA drivers don't support this. Use hardware blits as fallback.
bool IsExtShaderStencilExportSupported() const {
return extensions.shader_stencil_export;
}
/// Returns true if depth/stencil operations can be performed efficiently.
/// Either through shader export or hardware blits.
/// Returns true if depth/stencil operations through shader export or hardware blits.
bool CanPerformDepthStencilOperations() const {
return extensions.shader_stencil_export || is_blit_depth24_stencil8_supported ||
is_blit_depth32_stencil8_supported;
@ -561,11 +547,14 @@ public:
return extensions.transform_feedback;
}
/// Returns true if the device supports VK_EXT_transform_feedback properly.
bool AreTransformFeedbackGeometryStreamsSupported() const {
return features.transform_feedback.geometryStreams;
}
bool IsTransformFeedbackProvokingVertexPreserved() const {
return features.provoking_vertex.transformFeedbackPreservesProvokingVertex;
}
/// Returns true if the device supports VK_EXT_custom_border_color.
bool IsExtCustomBorderColorSupported() const {
return extensions.custom_border_color;
@ -611,6 +600,16 @@ public:
return features.custom_border_color.customBorderColorWithoutFormat;
}
/// Returns true if physical device custom border color properties were queried.
bool HasCustomBorderColorProperties() const {
return has_custom_border_color_properties;
}
/// Returns the queried VkPhysicalDeviceCustomBorderColorPropertiesEXT.
const VkPhysicalDeviceCustomBorderColorPropertiesEXT& GetCustomBorderColorProperties() const {
return custom_border_color_properties;
}
/// Returns true if the device supports VK_EXT_extended_dynamic_state.
bool IsExtExtendedDynamicStateSupported() const {
return extensions.extended_dynamic_state;
@ -625,32 +624,21 @@ public:
return features.extended_dynamic_state2.extendedDynamicState2LogicOp;
}
/// Returns true if the device supports VK_EXT_extended_dynamic_state3.
bool IsExtExtendedDynamicState3Supported() const {
return extensions.extended_dynamic_state3;
}
/// Returns true if the device supports VK_EXT_4444_formats.
bool IsExt4444FormatsSupported() const {
return features.format_a4b4g4r4.formatA4B4G4R4;
}
/// Returns true if the device supports VK_EXT_extended_dynamic_state3.
bool IsExtExtendedDynamicState3BlendingSupported() const {
return dynamic_state3_blending;
}
/// Returns true if the device supports VK_EXT_extended_dynamic_state3.
bool IsExtExtendedDynamicState3EnablesSupported() const {
return dynamic_state3_enables;
}
/// Returns true if the device supports VK_EXT_filter_cubic
/// Returns true if the device supports VK_EXT_filter_cubic.
bool IsExtFilterCubicSupported() const {
return extensions.filter_cubic;
}
/// Returns true if the device supports VK_QCOM_filter_cubic_weights
/// Custom border color properties retrieved from the physical device (if available).
VkPhysicalDeviceCustomBorderColorPropertiesEXT custom_border_color_properties{};
bool has_custom_border_color_properties = false;
/// Returns true if the device supports VK_QCOM_filter_cubic_weights.
bool IsQcomFilterCubicWeightsSupported() const {
return extensions.filter_cubic_weights;
}
@ -661,55 +649,36 @@ public:
}
bool SupportsRectangularLines() const {
return features.line_rasterization.rectangularLines != VK_FALSE;
return features.line_rasterization.rectangularLines;
}
bool SupportsBresenhamLines() const {
return features.line_rasterization.bresenhamLines;
}
bool SupportsSmoothLines() const {
return features.line_rasterization.smoothLines != VK_FALSE;
return features.line_rasterization.smoothLines;
}
bool SupportsStippledRectangularLines() const {
return features.line_rasterization.stippledRectangularLines != VK_FALSE;
return features.line_rasterization.stippledRectangularLines;
}
bool SupportsStippledBresenhamLines() const {
return features.line_rasterization.stippledBresenhamLines;
}
bool SupportsStippledSmoothLines() const {
return features.line_rasterization.stippledSmoothLines;
}
/// Returns true if the device supports AlphaToOne.
bool SupportsAlphaToOne() const {
return features.features.alphaToOne != VK_FALSE;
return features.features.alphaToOne;
}
bool SupportsDynamicState3DepthClampEnable() const {
return dynamic_state3_depth_clamp_enable;
}
bool SupportsDynamicState3LogicOpEnable() const {
return dynamic_state3_logic_op_enable;
}
bool SupportsDynamicState3LineRasterizationMode() const {
return dynamic_state3_line_raster_mode;
}
bool SupportsDynamicState3ConservativeRasterizationMode() const {
return dynamic_state3_conservative_raster_mode;
}
bool SupportsDynamicState3LineStippleEnable() const {
return dynamic_state3_line_stipple_enable;
}
bool SupportsDynamicState3AlphaToCoverageEnable() const {
return dynamic_state3_alpha_to_coverage;
}
bool SupportsDynamicState3AlphaToOneEnable() const {
return dynamic_state3_alpha_to_one;
}
/// Returns true if the device supports VK_EXT_vertex_input_dynamic_state.
bool IsExtVertexInputDynamicStateSupported() const {
return extensions.vertex_input_dynamic_state;
}
/// Returns true if the device supports VK_EXT_shader_demote_to_helper_invocation
/// Returns true if the device supports VK_EXT_shader_demote_to_helper_invocation.
bool IsExtShaderDemoteToHelperInvocationSupported() const {
return extensions.shader_demote_to_helper_invocation;
}
@ -719,20 +688,36 @@ public:
return extensions.conservative_rasterization;
}
/// Returns true if the device supports conservative rasterization for points and lines.
bool SupportsConservativePointAndLineRasterization() const {
return extensions.conservative_rasterization &&
properties.conservative_rasterization.conservativePointAndLineRasterization;
}
/// Returns true if the device supports VK_EXT_provoking_vertex.
bool IsExtProvokingVertexSupported() const {
return extensions.provoking_vertex;
}
/// Returns true if the device supports provoking-vertex LAST mode.
bool IsProvokingVertexLastSupported() const {
return features.provoking_vertex.provokingVertexLast;
}
/// Returns true if the device supports VK_KHR_shader_atomic_int64.
bool IsExtShaderAtomicInt64Supported() const {
return extensions.shader_atomic_int64;
}
bool IsExtConditionalRendering() const {
/// Returns true if the device supports VK_EXT_conditional_rendering.
bool IsExtConditionalRenderingSupported() const {
return extensions.conditional_rendering;
}
bool IsExtConditionalRendering() const {
return IsExtConditionalRenderingSupported();
}
bool HasTimelineSemaphore() const;
/// Returns the minimum supported version of SPIR-V.
@ -751,7 +736,7 @@ public:
return has_renderdoc || has_nsight_graphics || has_radeon_gpu_profiler;
}
/// @returns True if compute pipelines can cause crashing.
/// Returns true if compute pipelines can cause crashing.
bool HasBrokenCompute() const {
return has_broken_compute;
}
@ -898,11 +883,6 @@ public:
return extensions.maintenance8;
}
/// Returns true if the device supports VK_KHR_maintenance9.
bool IsKhrMaintenance9Supported() const {
return extensions.maintenance9;
}
/// Returns true if the device supports UINT8 index buffer conversion via compute shader.
bool SupportsUint8Indices() const {
return features.bit8_storage.storageBuffer8BitAccess &&
@ -1027,6 +1007,7 @@ private:
VkPhysicalDeviceSubgroupProperties subgroup_properties{};
VkPhysicalDeviceFloatControlsProperties float_controls{};
VkPhysicalDevicePushDescriptorPropertiesKHR push_descriptor{};
VkPhysicalDeviceConservativeRasterizationPropertiesEXT conservative_rasterization{};
VkPhysicalDeviceSubgroupSizeControlProperties subgroup_size_control{};
VkPhysicalDeviceTransformFeedbackPropertiesEXT transform_feedback{};
VkPhysicalDeviceMaintenance5PropertiesKHR maintenance5{};
@ -1059,15 +1040,6 @@ private:
bool supports_d24_depth{}; ///< Supports D24 depth buffers.
bool cant_blit_msaa{}; ///< Does not support MSAA<->MSAA blitting.
bool must_emulate_scaled_formats{}; ///< Requires scaled vertex format emulation
bool dynamic_state3_blending{}; ///< Has blending features of dynamic_state3.
bool dynamic_state3_enables{}; ///< Has at least one enable feature of dynamic_state3.
bool dynamic_state3_depth_clamp_enable{};
bool dynamic_state3_logic_op_enable{};
bool dynamic_state3_line_raster_mode{};
bool dynamic_state3_conservative_raster_mode{};
bool dynamic_state3_line_stipple_enable{};
bool dynamic_state3_alpha_to_coverage{};
bool dynamic_state3_alpha_to_one{};
bool supports_conditional_barriers{}; ///< Allows barriers in conditional control flow.
size_t sampler_heap_budget{}; ///< Sampler budget for buggy drivers (0 = unlimited).
u64 device_access_memory{}; ///< Total size of device local memory in bytes.

View file

@ -81,14 +81,6 @@ namespace {
#endif
if (enable_validation && AreExtensionsSupported(dld, *properties, std::array{VK_EXT_DEBUG_UTILS_EXTENSION_NAME}))
extensions.push_back(VK_EXT_DEBUG_UTILS_EXTENSION_NAME);
// VK_EXT_surface_maintenance1 is required for VK_EXT_swapchain_maintenance1
if (window_type != Core::Frontend::WindowSystemType::Headless && AreExtensionsSupported(dld, *properties, std::array{VK_EXT_SURFACE_MAINTENANCE_1_EXTENSION_NAME})) {
extensions.push_back(VK_EXT_SURFACE_MAINTENANCE_1_EXTENSION_NAME);
// Some(which?) drivers dont like being told to load this extension(why?)
// NVIDIA on FreeBSD is totally fine with this through
if (AreExtensionsSupported(dld, *properties, std::array{VK_KHR_GET_SURFACE_CAPABILITIES_2_EXTENSION_NAME}))
extensions.push_back(VK_KHR_GET_SURFACE_CAPABILITIES_2_EXTENSION_NAME);
}
}
return extensions;
}

View file

@ -15,7 +15,7 @@ vk::SurfaceKHR CreateSurface(
const vk::Instance& instance,
[[maybe_unused]] const Core::Frontend::EmuWindow::WindowSystemInfo& window_info) {
[[maybe_unused]] const vk::InstanceDispatch& dld = instance.Dispatch();
VkSurfaceKHR unsafe_surface = nullptr;
VkSurfaceKHR unsafe_surface = VkSurfaceKHR{};
#ifdef _WIN32
if (window_info.type == Core::Frontend::WindowSystemType::Windows) {

View file

@ -127,18 +127,20 @@ void Load(VkDevice device, DeviceDispatch& dld) noexcept {
X(vkCmdPipelineBarrier);
X(vkCmdPushConstants);
X(vkCmdPushDescriptorSetWithTemplateKHR);
X(vkCmdResetQueryPool);
X(vkCmdSetBlendConstants);
X(vkCmdSetDepthBias);
X(vkCmdSetDepthBias2EXT);
X(vkCmdSetDepthBounds);
X(vkCmdSetEvent);
X(vkCmdSetScissor);
X(vkCmdSetScissorWithCountEXT);
X(vkCmdSetStencilCompareMask);
X(vkCmdSetStencilReference);
X(vkCmdSetStencilWriteMask);
X(vkCmdSetViewport);
X(vkCmdSetViewportWithCountEXT);
X(vkCmdWaitEvents);
X(vkCmdBindVertexBuffers2EXT);
X(vkCmdSetCullModeEXT);
X(vkCmdSetDepthBoundsTestEnableEXT);
X(vkCmdSetDepthCompareOpEXT);
@ -146,25 +148,15 @@ void Load(VkDevice device, DeviceDispatch& dld) noexcept {
X(vkCmdSetDepthWriteEnableEXT);
X(vkCmdSetPrimitiveRestartEnableEXT);
X(vkCmdSetRasterizerDiscardEnableEXT);
X(vkCmdSetAlphaToCoverageEnableEXT);
X(vkCmdSetAlphaToOneEnableEXT);
X(vkCmdSetConservativeRasterizationModeEXT);
X(vkCmdSetLineRasterizationModeEXT);
X(vkCmdSetLineStippleEnableEXT);
X(vkCmdSetDepthBiasEnableEXT);
X(vkCmdSetLogicOpEnableEXT);
X(vkCmdSetDepthClampEnableEXT);
X(vkCmdSetFrontFaceEXT);
X(vkCmdSetLogicOpEXT);
X(vkCmdSetPatchControlPointsEXT);
X(vkCmdSetLineStippleEXT);
X(vkCmdSetLineWidth);
X(vkCmdSetPrimitiveTopologyEXT);
X(vkCmdSetStencilOpEXT);
X(vkCmdSetStencilTestEnableEXT);
X(vkCmdSetVertexInputEXT);
X(vkCmdSetColorWriteMaskEXT);
X(vkCmdSetColorBlendEnableEXT);
X(vkCmdSetColorBlendEquationEXT);
X(vkCmdResolveImage);
X(vkCreateBuffer);
X(vkCreateBufferView);
@ -254,6 +246,15 @@ void Load(VkDevice device, DeviceDispatch& dld) noexcept {
Proc(dld.vkCmdDrawIndirectCount, dld, "vkCmdDrawIndirectCountKHR", device);
Proc(dld.vkCmdDrawIndexedIndirectCount, dld, "vkCmdDrawIndexedIndirectCountKHR", device);
}
if (!dld.vkCmdSetPrimitiveTopologyEXT) {
Proc(dld.vkCmdSetPrimitiveTopologyEXT, dld, "vkCmdSetPrimitiveTopology", device);
}
if (!dld.vkCmdSetViewportWithCountEXT) {
Proc(dld.vkCmdSetViewportWithCountEXT, dld, "vkCmdSetViewportWithCount", device);
}
if (!dld.vkCmdSetScissorWithCountEXT) {
Proc(dld.vkCmdSetScissorWithCountEXT, dld, "vkCmdSetScissorWithCount", device);
}
#undef X
}

View file

@ -200,7 +200,6 @@ struct DeviceDispatch : InstanceDispatch {
PFN_vkCmdBindPipeline vkCmdBindPipeline{};
PFN_vkCmdBindTransformFeedbackBuffersEXT vkCmdBindTransformFeedbackBuffersEXT{};
PFN_vkCmdBindVertexBuffers vkCmdBindVertexBuffers{};
PFN_vkCmdBindVertexBuffers2EXT vkCmdBindVertexBuffers2EXT{};
PFN_vkCmdBlitImage vkCmdBlitImage{};
PFN_vkCmdClearAttachments vkCmdClearAttachments{};
PFN_vkCmdClearColorImage vkCmdClearColorImage{};
@ -229,6 +228,7 @@ struct DeviceDispatch : InstanceDispatch {
PFN_vkCmdPipelineBarrier vkCmdPipelineBarrier{};
PFN_vkCmdPushConstants vkCmdPushConstants{};
PFN_vkCmdPushDescriptorSetWithTemplateKHR vkCmdPushDescriptorSetWithTemplateKHR{};
PFN_vkCmdResetQueryPool vkCmdResetQueryPool{};
PFN_vkCmdResolveImage vkCmdResolveImage{};
PFN_vkCmdSetBlendConstants vkCmdSetBlendConstants{};
PFN_vkCmdSetCullModeEXT vkCmdSetCullModeEXT{};
@ -241,15 +241,8 @@ struct DeviceDispatch : InstanceDispatch {
PFN_vkCmdSetDepthWriteEnableEXT vkCmdSetDepthWriteEnableEXT{};
PFN_vkCmdSetPrimitiveRestartEnableEXT vkCmdSetPrimitiveRestartEnableEXT{};
PFN_vkCmdSetRasterizerDiscardEnableEXT vkCmdSetRasterizerDiscardEnableEXT{};
PFN_vkCmdSetAlphaToCoverageEnableEXT vkCmdSetAlphaToCoverageEnableEXT{};
PFN_vkCmdSetAlphaToOneEnableEXT vkCmdSetAlphaToOneEnableEXT{};
PFN_vkCmdSetConservativeRasterizationModeEXT vkCmdSetConservativeRasterizationModeEXT{};
PFN_vkCmdSetLineRasterizationModeEXT vkCmdSetLineRasterizationModeEXT{};
PFN_vkCmdSetLineStippleEnableEXT vkCmdSetLineStippleEnableEXT{};
PFN_vkCmdSetLineStippleEXT vkCmdSetLineStippleEXT{};
PFN_vkCmdSetDepthBiasEnableEXT vkCmdSetDepthBiasEnableEXT{};
PFN_vkCmdSetLogicOpEnableEXT vkCmdSetLogicOpEnableEXT{};
PFN_vkCmdSetDepthClampEnableEXT vkCmdSetDepthClampEnableEXT{};
PFN_vkCmdSetEvent vkCmdSetEvent{};
PFN_vkCmdSetFrontFaceEXT vkCmdSetFrontFaceEXT{};
PFN_vkCmdSetPatchControlPointsEXT vkCmdSetPatchControlPointsEXT{};
@ -257,16 +250,14 @@ struct DeviceDispatch : InstanceDispatch {
PFN_vkCmdSetLineWidth vkCmdSetLineWidth{};
PFN_vkCmdSetPrimitiveTopologyEXT vkCmdSetPrimitiveTopologyEXT{};
PFN_vkCmdSetScissor vkCmdSetScissor{};
PFN_vkCmdSetScissorWithCountEXT vkCmdSetScissorWithCountEXT{};
PFN_vkCmdSetStencilCompareMask vkCmdSetStencilCompareMask{};
PFN_vkCmdSetStencilOpEXT vkCmdSetStencilOpEXT{};
PFN_vkCmdSetStencilReference vkCmdSetStencilReference{};
PFN_vkCmdSetStencilTestEnableEXT vkCmdSetStencilTestEnableEXT{};
PFN_vkCmdSetStencilWriteMask vkCmdSetStencilWriteMask{};
PFN_vkCmdSetVertexInputEXT vkCmdSetVertexInputEXT{};
PFN_vkCmdSetViewport vkCmdSetViewport{};
PFN_vkCmdSetColorWriteMaskEXT vkCmdSetColorWriteMaskEXT{};
PFN_vkCmdSetColorBlendEnableEXT vkCmdSetColorBlendEnableEXT{};
PFN_vkCmdSetColorBlendEquationEXT vkCmdSetColorBlendEquationEXT{};
PFN_vkCmdSetViewportWithCountEXT vkCmdSetViewportWithCountEXT{};
PFN_vkCmdWaitEvents vkCmdWaitEvents{};
PFN_vkCreateBuffer vkCreateBuffer{};
PFN_vkCreateBufferView vkCreateBufferView{};
@ -404,13 +395,13 @@ public:
/// Construct a handle transferring the ownership from another handle.
Handle(Handle&& rhs) noexcept
: handle{std::exchange(rhs.handle, nullptr)}, owner{rhs.owner}, dld{rhs.dld} {}
: handle{std::exchange(rhs.handle, Type{})}, owner{rhs.owner}, dld{rhs.dld} {}
/// Assign the current handle transferring the ownership from another handle.
/// Destroys any previously held object.
Handle& operator=(Handle&& rhs) noexcept {
Release();
handle = std::exchange(rhs.handle, nullptr);
handle = std::exchange(rhs.handle, Type{});
owner = rhs.owner;
dld = rhs.dld;
return *this;
@ -424,7 +415,7 @@ public:
/// Destroys any held object.
void reset() noexcept {
Release();
handle = nullptr;
handle = Type{};
}
/// Returns the address of the held object.
@ -440,7 +431,7 @@ public:
/// Returns true when there's a held object.
explicit operator bool() const noexcept {
return handle != nullptr;
return handle != Type{};
}
#ifndef ANDROID
@ -455,7 +446,7 @@ public:
#endif
protected:
Type handle = nullptr;
Type handle{};
OwnerType owner = nullptr;
const Dispatch* dld = nullptr;
@ -463,7 +454,7 @@ private:
/// Destroys the held object if it exists.
void Release() noexcept {
if (handle) {
Destroy(owner, handle, *dld);
Destroy(OwnerType(owner), Type(handle), *dld);
}
}
};
@ -506,7 +497,7 @@ public:
/// Destroys any held object.
void reset() noexcept {
Release();
handle = nullptr;
handle = {};
}
/// Returns the address of the held object.
@ -522,7 +513,7 @@ public:
/// Returns true when there's a held object.
explicit operator bool() const noexcept {
return handle != nullptr;
return handle != Type{};
}
#ifndef ANDROID
@ -537,7 +528,7 @@ public:
#endif
protected:
Type handle = nullptr;
Type handle{};
const Dispatch* dld = nullptr;
private:
@ -607,7 +598,7 @@ private:
std::unique_ptr<AllocationType[]> allocations;
std::size_t num = 0;
VkDevice device = nullptr;
PoolType pool = nullptr;
PoolType pool{};
const DeviceDispatch* dld = nullptr;
};
@ -669,12 +660,12 @@ public:
Image& operator=(const Image&) = delete;
Image(Image&& rhs) noexcept
: handle{std::exchange(rhs.handle, nullptr)}, usage{rhs.usage}, owner{rhs.owner},
: handle{std::exchange(rhs.handle, VkImage{})}, usage{rhs.usage}, owner{rhs.owner},
allocator{rhs.allocator}, allocation{rhs.allocation}, dld{rhs.dld} {}
Image& operator=(Image&& rhs) noexcept {
Release();
handle = std::exchange(rhs.handle, nullptr);
handle = std::exchange(rhs.handle, VkImage{});
usage = rhs.usage;
owner = rhs.owner;
allocator = rhs.allocator;
@ -693,11 +684,11 @@ public:
void reset() noexcept {
Release();
handle = nullptr;
handle = VkImage{};
}
explicit operator bool() const noexcept {
return handle != nullptr;
return handle != VkImage{};
}
void SetObjectNameEXT(const char* name) const;
@ -709,7 +700,7 @@ public:
private:
void Release() const noexcept;
VkImage handle = nullptr;
VkImage handle{};
VkImageUsageFlags usage{};
VkDevice owner = nullptr;
VmaAllocator allocator = nullptr;
@ -730,13 +721,13 @@ public:
Buffer& operator=(const Buffer&) = delete;
Buffer(Buffer&& rhs) noexcept
: handle{std::exchange(rhs.handle, nullptr)}, owner{rhs.owner}, allocator{rhs.allocator},
: handle{std::exchange(rhs.handle, VkBuffer{})}, owner{rhs.owner}, allocator{rhs.allocator},
allocation{rhs.allocation}, mapped{rhs.mapped},
is_coherent{rhs.is_coherent}, dld{rhs.dld} {}
Buffer& operator=(Buffer&& rhs) noexcept {
Release();
handle = std::exchange(rhs.handle, nullptr);
handle = std::exchange(rhs.handle, VkBuffer{});
owner = rhs.owner;
allocator = rhs.allocator;
allocation = rhs.allocation;
@ -756,11 +747,11 @@ public:
void reset() noexcept {
Release();
handle = nullptr;
handle = VkBuffer{};
}
explicit operator bool() const noexcept {
return handle != nullptr;
return handle != VkBuffer{};
}
/// Returns the host mapped memory, an empty span otherwise.
@ -786,7 +777,7 @@ public:
private:
void Release() const noexcept;
VkBuffer handle = nullptr;
VkBuffer handle{};
VkDevice owner = nullptr;
VmaAllocator allocator = nullptr;
VmaAllocation allocation = nullptr;
@ -1020,10 +1011,10 @@ public:
[[nodiscard]] PipelineLayout CreatePipelineLayout(const VkPipelineLayoutCreateInfo& ci) const;
[[nodiscard]] Pipeline CreateGraphicsPipeline(const VkGraphicsPipelineCreateInfo& ci,
VkPipelineCache cache = nullptr) const;
VkPipelineCache cache = {}) const;
[[nodiscard]] Pipeline CreateComputePipeline(const VkComputePipelineCreateInfo& ci,
VkPipelineCache cache = nullptr) const;
VkPipelineCache cache = {}) const;
[[nodiscard]] Sampler CreateSampler(const VkSamplerCreateInfo& ci) const;
@ -1182,6 +1173,10 @@ public:
dld->vkCmdPushDescriptorSetWithTemplateKHR(handle, update_template, layout, set, data);
}
void ResetQueryPool(VkQueryPool query_pool, u32 first, u32 count) const noexcept {
dld->vkCmdResetQueryPool(handle, query_pool, first, count);
}
void BindPipeline(VkPipelineBindPoint bind_point, VkPipeline pipeline) const noexcept {
dld->vkCmdBindPipeline(handle, bind_point, pipeline);
}
@ -1374,6 +1369,18 @@ public:
dld->vkCmdSetScissor(handle, first, scissors.size(), scissors.data());
}
void SetViewportWithCountEXT(Span<VkViewport> viewports) const noexcept {
if (dld && dld->vkCmdSetViewportWithCountEXT) {
dld->vkCmdSetViewportWithCountEXT(handle, viewports.size(), viewports.data());
}
}
void SetScissorWithCountEXT(Span<VkRect2D> scissors) const noexcept {
if (dld && dld->vkCmdSetScissorWithCountEXT) {
dld->vkCmdSetScissorWithCountEXT(handle, scissors.size(), scissors.data());
}
}
void SetBlendConstants(const float blend_constants[4]) const noexcept {
dld->vkCmdSetBlendConstants(handle, blend_constants);
}
@ -1403,8 +1410,10 @@ public:
.depthBiasClamp = clamp,
.depthBiasSlopeFactor = slope_factor,
};
if (dld && dld->vkCmdSetDepthBias2EXT) {
dld->vkCmdSetDepthBias2EXT(handle, &info);
}
}
void SetDepthBounds(float min_depth_bounds, float max_depth_bounds) const noexcept {
dld->vkCmdSetDepthBounds(handle, min_depth_bounds, max_depth_bounds);
@ -1423,104 +1432,76 @@ public:
buffer_barriers.data(), image_barriers.size(), image_barriers.data());
}
void BindVertexBuffers2EXT(u32 first_binding, u32 binding_count, const VkBuffer* buffers,
const VkDeviceSize* offsets, const VkDeviceSize* sizes,
const VkDeviceSize* strides) const noexcept {
dld->vkCmdBindVertexBuffers2EXT(handle, first_binding, binding_count, buffers, offsets,
sizes, strides);
}
void SetCullModeEXT(VkCullModeFlags cull_mode) const noexcept {
if (dld && dld->vkCmdSetCullModeEXT) {
dld->vkCmdSetCullModeEXT(handle, cull_mode);
}
}
void SetDepthBoundsTestEnableEXT(bool enable) const noexcept {
if (dld && dld->vkCmdSetDepthBoundsTestEnableEXT) {
dld->vkCmdSetDepthBoundsTestEnableEXT(handle, enable ? VK_TRUE : VK_FALSE);
}
}
void SetDepthCompareOpEXT(VkCompareOp compare_op) const noexcept {
if (dld && dld->vkCmdSetDepthCompareOpEXT) {
dld->vkCmdSetDepthCompareOpEXT(handle, compare_op);
}
}
void SetDepthTestEnableEXT(bool enable) const noexcept {
if (dld && dld->vkCmdSetDepthTestEnableEXT) {
dld->vkCmdSetDepthTestEnableEXT(handle, enable ? VK_TRUE : VK_FALSE);
}
}
void SetDepthWriteEnableEXT(bool enable) const noexcept {
if (dld && dld->vkCmdSetDepthWriteEnableEXT) {
dld->vkCmdSetDepthWriteEnableEXT(handle, enable ? VK_TRUE : VK_FALSE);
}
}
void SetPrimitiveRestartEnableEXT(bool enable) const noexcept {
if (dld && dld->vkCmdSetPrimitiveRestartEnableEXT) {
dld->vkCmdSetPrimitiveRestartEnableEXT(handle, enable ? VK_TRUE : VK_FALSE);
}
}
void SetRasterizerDiscardEnableEXT(bool enable) const noexcept {
if (dld && dld->vkCmdSetRasterizerDiscardEnableEXT) {
dld->vkCmdSetRasterizerDiscardEnableEXT(handle, enable ? VK_TRUE : VK_FALSE);
}
void SetConservativeRasterizationModeEXT(VkConservativeRasterizationModeEXT mode) const noexcept
{
dld->vkCmdSetConservativeRasterizationModeEXT(handle, mode);
}
void SetLineRasterizationModeEXT(VkLineRasterizationModeEXT mode) const noexcept
{
dld->vkCmdSetLineRasterizationModeEXT(handle, mode);
}
void SetLineStippleEnableEXT(bool enable) const noexcept
{
dld->vkCmdSetLineStippleEnableEXT(handle, enable ? VK_TRUE : VK_FALSE);
}
void SetLineStippleEXT(u32 factor, u16 pattern) const noexcept
{
void SetLineStippleEXT(u32 factor, u16 pattern) const noexcept {
if (dld && dld->vkCmdSetLineStippleEXT) {
dld->vkCmdSetLineStippleEXT(handle, factor, pattern);
}
}
void SetDepthBiasEnableEXT(bool enable) const noexcept {
if (dld && dld->vkCmdSetDepthBiasEnableEXT) {
dld->vkCmdSetDepthBiasEnableEXT(handle, enable ? VK_TRUE : VK_FALSE);
}
void SetLogicOpEnableEXT(bool enable) const noexcept {
dld->vkCmdSetLogicOpEnableEXT(handle, enable ? VK_TRUE : VK_FALSE);
}
void SetAlphaToCoverageEnableEXT(bool enable) const noexcept {
dld->vkCmdSetAlphaToCoverageEnableEXT(handle, enable ? VK_TRUE : VK_FALSE);
}
void SetAlphaToOneEnableEXT(bool enable) const noexcept {
dld->vkCmdSetAlphaToOneEnableEXT(handle, enable ? VK_TRUE : VK_FALSE);
}
void SetDepthClampEnableEXT(bool enable) const noexcept {
dld->vkCmdSetDepthClampEnableEXT(handle, enable ? VK_TRUE : VK_FALSE);
}
void SetFrontFaceEXT(VkFrontFace front_face) const noexcept {
if (dld && dld->vkCmdSetFrontFaceEXT) {
dld->vkCmdSetFrontFaceEXT(handle, front_face);
}
}
void SetLogicOpEXT(VkLogicOp logic_op) const noexcept {
if (dld && dld->vkCmdSetLogicOpEXT) {
dld->vkCmdSetLogicOpEXT(handle, logic_op);
}
}
void SetPatchControlPointsEXT(uint32_t patch_control_points) const noexcept {
if (dld && dld->vkCmdSetPatchControlPointsEXT) {
dld->vkCmdSetPatchControlPointsEXT(handle, patch_control_points);
}
void SetColorWriteMaskEXT(u32 first, Span<VkColorComponentFlags> masks) const noexcept {
dld->vkCmdSetColorWriteMaskEXT(handle, first, masks.size(), masks.data());
}
void SetColorBlendEnableEXT(u32 first, Span<VkBool32> enables) const noexcept {
dld->vkCmdSetColorBlendEnableEXT(handle, first, enables.size(), enables.data());
}
void SetColorBlendEquationEXT(u32 first,
Span<VkColorBlendEquationEXT> equations) const noexcept {
dld->vkCmdSetColorBlendEquationEXT(handle, first, equations.size(), equations.data());
}
void SetLineWidth(float line_width) const noexcept {
@ -1528,53 +1509,62 @@ public:
}
void SetPrimitiveTopologyEXT(VkPrimitiveTopology primitive_topology) const noexcept {
if (dld && dld->vkCmdSetPrimitiveTopologyEXT) {
dld->vkCmdSetPrimitiveTopologyEXT(handle, primitive_topology);
}
}
void SetStencilOpEXT(VkStencilFaceFlags face_mask, VkStencilOp fail_op, VkStencilOp pass_op,
VkStencilOp depth_fail_op, VkCompareOp compare_op) const noexcept {
if (dld && dld->vkCmdSetStencilOpEXT) {
dld->vkCmdSetStencilOpEXT(handle, face_mask, fail_op, pass_op, depth_fail_op, compare_op);
}
}
void SetStencilTestEnableEXT(bool enable) const noexcept {
if (dld && dld->vkCmdSetStencilTestEnableEXT) {
dld->vkCmdSetStencilTestEnableEXT(handle, enable ? VK_TRUE : VK_FALSE);
}
void SetVertexInputEXT(
vk::Span<VkVertexInputBindingDescription2EXT> bindings,
vk::Span<VkVertexInputAttributeDescription2EXT> attributes) const noexcept {
dld->vkCmdSetVertexInputEXT(handle, bindings.size(), bindings.data(), attributes.size(),
attributes.data());
}
void BindTransformFeedbackBuffersEXT(u32 first, u32 count, const VkBuffer* buffers,
const VkDeviceSize* offsets,
const VkDeviceSize* sizes) const noexcept {
if (dld && dld->vkCmdBindTransformFeedbackBuffersEXT) {
dld->vkCmdBindTransformFeedbackBuffersEXT(handle, first, count, buffers, offsets, sizes);
}
}
void BeginTransformFeedbackEXT(u32 first_counter_buffer, u32 counter_buffers_count,
const VkBuffer* counter_buffers,
const VkDeviceSize* counter_buffer_offsets) const noexcept {
if (dld && dld->vkCmdBeginTransformFeedbackEXT) {
dld->vkCmdBeginTransformFeedbackEXT(handle, first_counter_buffer, counter_buffers_count,
counter_buffers, counter_buffer_offsets);
}
}
void EndTransformFeedbackEXT(u32 first_counter_buffer, u32 counter_buffers_count,
const VkBuffer* counter_buffers,
const VkDeviceSize* counter_buffer_offsets) const noexcept {
if (dld && dld->vkCmdEndTransformFeedbackEXT) {
dld->vkCmdEndTransformFeedbackEXT(handle, first_counter_buffer, counter_buffers_count,
counter_buffers, counter_buffer_offsets);
}
}
void BeginConditionalRenderingEXT(
const VkConditionalRenderingBeginInfoEXT& info) const noexcept {
if (dld && dld->vkCmdBeginConditionalRenderingEXT) {
dld->vkCmdBeginConditionalRenderingEXT(handle, &info);
}
}
void EndConditionalRenderingEXT() const noexcept {
if (dld && dld->vkCmdEndConditionalRenderingEXT) {
dld->vkCmdEndConditionalRenderingEXT(handle);
}
}
void BeginDebugUtilsLabelEXT(const char* label, std::span<float, 4> color) const noexcept {
const VkDebugUtilsLabelEXT label_info{
@ -1583,12 +1573,16 @@ public:
.pLabelName = label,
.color{color[0], color[1], color[2], color[3]},
};
if (dld && dld->vkCmdBeginDebugUtilsLabelEXT) {
dld->vkCmdBeginDebugUtilsLabelEXT(handle, &label_info);
}
}
void EndDebugUtilsLabelEXT() const noexcept {
if (dld && dld->vkCmdEndDebugUtilsLabelEXT) {
dld->vkCmdEndDebugUtilsLabelEXT(handle);
}
}
private:
VkCommandBuffer handle;

View file

@ -10,13 +10,14 @@
#include <fmt/format.h>
#include <QDesktopServices>
#include <QHeaderView>
#include <QMenu>
#include <QStandardItemModel>
#include <QStandardPaths>
#include <QString>
#include <QTimer>
#include <QTreeView>
#include <QStandardPaths>
#include "common/common_types.h"
#include "common/fs/fs.h"
@ -42,7 +43,7 @@ ConfigurePerGameAddons::ConfigurePerGameAddons(Core::System& system_, QWidget* p
item_model = new QStandardItemModel(tree_view);
tree_view->setModel(item_model);
tree_view->setAlternatingRowColors(true);
tree_view->setSelectionMode(QHeaderView::MultiSelection);
tree_view->setSelectionMode(QHeaderView::ExtendedSelection);
tree_view->setSelectionBehavior(QHeaderView::SelectRows);
tree_view->setVerticalScrollMode(QHeaderView::ScrollPerPixel);
tree_view->setHorizontalScrollMode(QHeaderView::ScrollPerPixel);
@ -248,8 +249,11 @@ void ConfigurePerGameAddons::AddonDeleteRequested(QList<QModelIndex> selected) {
void ConfigurePerGameAddons::showContextMenu(const QPoint& pos) {
const QModelIndex index = tree_view->indexAt(pos);
auto selected = tree_view->selectionModel()->selectedIndexes();
if (index.isValid() && selected.empty()) selected = {index};
auto selected = tree_view->selectionModel()->selectedRows();
if (index.isValid() && selected.empty()) {
QModelIndex idx = item_model->index(index.row(), 0);
if (idx.isValid()) selected << idx;
}
if (selected.empty()) return;
@ -260,6 +264,15 @@ void ConfigurePerGameAddons::showContextMenu(const QPoint& pos) {
AddonDeleteRequested(selected);
});
if (selected.length() == 1) {
auto loc = selected.at(0).data(PATCH_LOCATION).toString();
if (QFileInfo::exists(loc)) {
QAction* open = menu.addAction(tr("&Open in File Manager"));
connect(open, &QAction::triggered, this,
[selected, loc]() { QDesktopServices::openUrl(QUrl::fromLocalFile(loc)); });
}
}
menu.exec(tree_view->viewport()->mapToGlobal(pos));
}