mirror of
https://git.h3cjp.net/H3cJP/citra.git
synced 2024-11-27 14:33:00 +00:00
Merge pull request #10057 from liamwhite/its-not-in-the-timeline
vulkan: use plain fences when timeline semaphores are not available
This commit is contained in:
commit
0cfeb2e8d7
|
@ -10,7 +10,14 @@
|
|||
|
||||
namespace Vulkan {
|
||||
|
||||
MasterSemaphore::MasterSemaphore(const Device& device) {
|
||||
MasterSemaphore::MasterSemaphore(const Device& device_) : device(device_) {
|
||||
if (!device.HasTimelineSemaphore()) {
|
||||
static constexpr VkFenceCreateInfo fence_ci{
|
||||
.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, .pNext = nullptr, .flags = 0};
|
||||
fence = device.GetLogical().CreateFence(fence_ci);
|
||||
return;
|
||||
}
|
||||
|
||||
static constexpr VkSemaphoreTypeCreateInfo semaphore_type_ci{
|
||||
.sType = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
|
@ -42,4 +49,134 @@ MasterSemaphore::MasterSemaphore(const Device& device) {
|
|||
|
||||
MasterSemaphore::~MasterSemaphore() = default;
|
||||
|
||||
void MasterSemaphore::Refresh() {
|
||||
if (!semaphore) {
|
||||
// If we don't support timeline semaphores, there's nothing to refresh
|
||||
return;
|
||||
}
|
||||
|
||||
u64 this_tick{};
|
||||
u64 counter{};
|
||||
do {
|
||||
this_tick = gpu_tick.load(std::memory_order_acquire);
|
||||
counter = semaphore.GetCounter();
|
||||
if (counter < this_tick) {
|
||||
return;
|
||||
}
|
||||
} while (!gpu_tick.compare_exchange_weak(this_tick, counter, std::memory_order_release,
|
||||
std::memory_order_relaxed));
|
||||
}
|
||||
|
||||
void MasterSemaphore::Wait(u64 tick) {
|
||||
if (!semaphore) {
|
||||
// If we don't support timeline semaphores, use an atomic wait
|
||||
while (true) {
|
||||
u64 current_value = gpu_tick.load(std::memory_order_relaxed);
|
||||
if (current_value >= tick) {
|
||||
return;
|
||||
}
|
||||
gpu_tick.wait(current_value);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// No need to wait if the GPU is ahead of the tick
|
||||
if (IsFree(tick)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Update the GPU tick and try again
|
||||
Refresh();
|
||||
|
||||
if (IsFree(tick)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// If none of the above is hit, fallback to a regular wait
|
||||
while (!semaphore.Wait(tick)) {
|
||||
}
|
||||
|
||||
Refresh();
|
||||
}
|
||||
|
||||
VkResult MasterSemaphore::SubmitQueue(vk::CommandBuffer& cmdbuf, VkSemaphore signal_semaphore,
|
||||
VkSemaphore wait_semaphore, u64 host_tick) {
|
||||
if (semaphore) {
|
||||
return SubmitQueueTimeline(cmdbuf, signal_semaphore, wait_semaphore, host_tick);
|
||||
} else {
|
||||
return SubmitQueueFence(cmdbuf, signal_semaphore, wait_semaphore, host_tick);
|
||||
}
|
||||
}
|
||||
|
||||
static constexpr std::array<VkPipelineStageFlags, 2> wait_stage_masks{
|
||||
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
|
||||
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
|
||||
};
|
||||
|
||||
VkResult MasterSemaphore::SubmitQueueTimeline(vk::CommandBuffer& cmdbuf,
|
||||
VkSemaphore signal_semaphore,
|
||||
VkSemaphore wait_semaphore, u64 host_tick) {
|
||||
const VkSemaphore timeline_semaphore = *semaphore;
|
||||
|
||||
const u32 num_signal_semaphores = signal_semaphore ? 2 : 1;
|
||||
const std::array signal_values{host_tick, u64(0)};
|
||||
const std::array signal_semaphores{timeline_semaphore, signal_semaphore};
|
||||
|
||||
const u32 num_wait_semaphores = wait_semaphore ? 2 : 1;
|
||||
const std::array wait_values{host_tick - 1, u64(1)};
|
||||
const std::array wait_semaphores{timeline_semaphore, wait_semaphore};
|
||||
|
||||
const VkTimelineSemaphoreSubmitInfo timeline_si{
|
||||
.sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO,
|
||||
.pNext = nullptr,
|
||||
.waitSemaphoreValueCount = num_wait_semaphores,
|
||||
.pWaitSemaphoreValues = wait_values.data(),
|
||||
.signalSemaphoreValueCount = num_signal_semaphores,
|
||||
.pSignalSemaphoreValues = signal_values.data(),
|
||||
};
|
||||
const VkSubmitInfo submit_info{
|
||||
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
|
||||
.pNext = &timeline_si,
|
||||
.waitSemaphoreCount = num_wait_semaphores,
|
||||
.pWaitSemaphores = wait_semaphores.data(),
|
||||
.pWaitDstStageMask = wait_stage_masks.data(),
|
||||
.commandBufferCount = 1,
|
||||
.pCommandBuffers = cmdbuf.address(),
|
||||
.signalSemaphoreCount = num_signal_semaphores,
|
||||
.pSignalSemaphores = signal_semaphores.data(),
|
||||
};
|
||||
|
||||
return device.GetGraphicsQueue().Submit(submit_info);
|
||||
}
|
||||
|
||||
VkResult MasterSemaphore::SubmitQueueFence(vk::CommandBuffer& cmdbuf, VkSemaphore signal_semaphore,
|
||||
VkSemaphore wait_semaphore, u64 host_tick) {
|
||||
const u32 num_signal_semaphores = signal_semaphore ? 1 : 0;
|
||||
const u32 num_wait_semaphores = wait_semaphore ? 1 : 0;
|
||||
|
||||
const VkSubmitInfo submit_info{
|
||||
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
|
||||
.pNext = nullptr,
|
||||
.waitSemaphoreCount = num_wait_semaphores,
|
||||
.pWaitSemaphores = &wait_semaphore,
|
||||
.pWaitDstStageMask = wait_stage_masks.data(),
|
||||
.commandBufferCount = 1,
|
||||
.pCommandBuffers = cmdbuf.address(),
|
||||
.signalSemaphoreCount = num_signal_semaphores,
|
||||
.pSignalSemaphores = &signal_semaphore,
|
||||
};
|
||||
|
||||
auto result = device.GetGraphicsQueue().Submit(submit_info, *fence);
|
||||
|
||||
if (result == VK_SUCCESS) {
|
||||
fence.Wait();
|
||||
fence.Reset();
|
||||
gpu_tick.store(host_tick);
|
||||
gpu_tick.notify_all();
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
} // namespace Vulkan
|
||||
|
|
|
@ -4,6 +4,8 @@
|
|||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <condition_variable>
|
||||
#include <mutex>
|
||||
#include <thread>
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
@ -29,11 +31,6 @@ public:
|
|||
return gpu_tick.load(std::memory_order_acquire);
|
||||
}
|
||||
|
||||
/// Returns the timeline semaphore handle.
|
||||
[[nodiscard]] VkSemaphore Handle() const noexcept {
|
||||
return *semaphore;
|
||||
}
|
||||
|
||||
/// Returns true when a tick has been hit by the GPU.
|
||||
[[nodiscard]] bool IsFree(u64 tick) const noexcept {
|
||||
return KnownGpuTick() >= tick;
|
||||
|
@ -45,37 +42,24 @@ public:
|
|||
}
|
||||
|
||||
/// Refresh the known GPU tick
|
||||
void Refresh() {
|
||||
u64 this_tick{};
|
||||
u64 counter{};
|
||||
do {
|
||||
this_tick = gpu_tick.load(std::memory_order_acquire);
|
||||
counter = semaphore.GetCounter();
|
||||
if (counter < this_tick) {
|
||||
return;
|
||||
}
|
||||
} while (!gpu_tick.compare_exchange_weak(this_tick, counter, std::memory_order_release,
|
||||
std::memory_order_relaxed));
|
||||
}
|
||||
void Refresh();
|
||||
|
||||
/// Waits for a tick to be hit on the GPU
|
||||
void Wait(u64 tick) {
|
||||
// No need to wait if the GPU is ahead of the tick
|
||||
if (IsFree(tick)) {
|
||||
return;
|
||||
}
|
||||
// Update the GPU tick and try again
|
||||
Refresh();
|
||||
if (IsFree(tick)) {
|
||||
return;
|
||||
}
|
||||
// If none of the above is hit, fallback to a regular wait
|
||||
while (!semaphore.Wait(tick)) {
|
||||
}
|
||||
Refresh();
|
||||
}
|
||||
void Wait(u64 tick);
|
||||
|
||||
/// Submits the device graphics queue, updating the tick as necessary
|
||||
VkResult SubmitQueue(vk::CommandBuffer& cmdbuf, VkSemaphore signal_semaphore,
|
||||
VkSemaphore wait_semaphore, u64 host_tick);
|
||||
|
||||
private:
|
||||
VkResult SubmitQueueTimeline(vk::CommandBuffer& cmdbuf, VkSemaphore signal_semaphore,
|
||||
VkSemaphore wait_semaphore, u64 host_tick);
|
||||
VkResult SubmitQueueFence(vk::CommandBuffer& cmdbuf, VkSemaphore signal_semaphore,
|
||||
VkSemaphore wait_semaphore, u64 host_tick);
|
||||
|
||||
private:
|
||||
const Device& device; ///< Device.
|
||||
vk::Fence fence; ///< Fence.
|
||||
vk::Semaphore semaphore; ///< Timeline semaphore.
|
||||
std::atomic<u64> gpu_tick{0}; ///< Current known GPU tick.
|
||||
std::atomic<u64> current_tick{1}; ///< Current logical tick.
|
||||
|
|
|
@ -212,45 +212,13 @@ void Scheduler::SubmitExecution(VkSemaphore signal_semaphore, VkSemaphore wait_s
|
|||
const u64 signal_value = master_semaphore->NextTick();
|
||||
Record([signal_semaphore, wait_semaphore, signal_value, this](vk::CommandBuffer cmdbuf) {
|
||||
cmdbuf.End();
|
||||
const VkSemaphore timeline_semaphore = master_semaphore->Handle();
|
||||
|
||||
const u32 num_signal_semaphores = signal_semaphore ? 2U : 1U;
|
||||
const std::array signal_values{signal_value, u64(0)};
|
||||
const std::array signal_semaphores{timeline_semaphore, signal_semaphore};
|
||||
|
||||
const u32 num_wait_semaphores = wait_semaphore ? 2U : 1U;
|
||||
const std::array wait_values{signal_value - 1, u64(1)};
|
||||
const std::array wait_semaphores{timeline_semaphore, wait_semaphore};
|
||||
static constexpr std::array<VkPipelineStageFlags, 2> wait_stage_masks{
|
||||
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
|
||||
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
|
||||
};
|
||||
|
||||
const VkTimelineSemaphoreSubmitInfo timeline_si{
|
||||
.sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO,
|
||||
.pNext = nullptr,
|
||||
.waitSemaphoreValueCount = num_wait_semaphores,
|
||||
.pWaitSemaphoreValues = wait_values.data(),
|
||||
.signalSemaphoreValueCount = num_signal_semaphores,
|
||||
.pSignalSemaphoreValues = signal_values.data(),
|
||||
};
|
||||
const VkSubmitInfo submit_info{
|
||||
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
|
||||
.pNext = &timeline_si,
|
||||
.waitSemaphoreCount = num_wait_semaphores,
|
||||
.pWaitSemaphores = wait_semaphores.data(),
|
||||
.pWaitDstStageMask = wait_stage_masks.data(),
|
||||
.commandBufferCount = 1,
|
||||
.pCommandBuffers = cmdbuf.address(),
|
||||
.signalSemaphoreCount = num_signal_semaphores,
|
||||
.pSignalSemaphores = signal_semaphores.data(),
|
||||
};
|
||||
|
||||
if (on_submit) {
|
||||
on_submit();
|
||||
}
|
||||
|
||||
switch (const VkResult result = device.GetGraphicsQueue().Submit(submit_info)) {
|
||||
switch (const VkResult result = master_semaphore->SubmitQueue(
|
||||
cmdbuf, signal_semaphore, wait_semaphore, signal_value)) {
|
||||
case VK_SUCCESS:
|
||||
break;
|
||||
case VK_ERROR_DEVICE_LOST:
|
||||
|
|
|
@ -145,7 +145,6 @@
|
|||
FEATURE_NAME(robustness2, robustImageAccess2) \
|
||||
FEATURE_NAME(shader_demote_to_helper_invocation, shaderDemoteToHelperInvocation) \
|
||||
FEATURE_NAME(shader_draw_parameters, shaderDrawParameters) \
|
||||
FEATURE_NAME(timeline_semaphore, timelineSemaphore) \
|
||||
FEATURE_NAME(variable_pointer, variablePointers) \
|
||||
FEATURE_NAME(variable_pointer, variablePointersStorageBuffer)
|
||||
|
||||
|
@ -158,6 +157,7 @@
|
|||
FEATURE_NAME(provoking_vertex, provokingVertexLast) \
|
||||
FEATURE_NAME(shader_float16_int8, shaderFloat16) \
|
||||
FEATURE_NAME(shader_float16_int8, shaderInt8) \
|
||||
FEATURE_NAME(timeline_semaphore, timelineSemaphore) \
|
||||
FEATURE_NAME(transform_feedback, transformFeedback) \
|
||||
FEATURE_NAME(uniform_buffer_standard_layout, uniformBufferStandardLayout) \
|
||||
FEATURE_NAME(vertex_input_dynamic_state, vertexInputDynamicState)
|
||||
|
@ -493,6 +493,10 @@ public:
|
|||
return extensions.shader_atomic_int64;
|
||||
}
|
||||
|
||||
bool HasTimelineSemaphore() const {
|
||||
return features.timeline_semaphore.timelineSemaphore;
|
||||
}
|
||||
|
||||
/// Returns the minimum supported version of SPIR-V.
|
||||
u32 SupportedSpirvVersion() const {
|
||||
if (instance_version >= VK_API_VERSION_1_3) {
|
||||
|
|
Loading…
Reference in a new issue