mirror of
https://git.h3cjp.net/H3cJP/citra.git
synced 2024-12-30 15:17:01 +00:00
Merge pull request #3753 from ReinUsesLisp/ac-vulkan
{gl,vk}_rasterizer: Add lazy default buffer maker and use it for empty buffers
This commit is contained in:
commit
7e38dd580f
|
@ -1259,7 +1259,8 @@ public:
|
||||||
|
|
||||||
GPUVAddr LimitAddress() const {
|
GPUVAddr LimitAddress() const {
|
||||||
return static_cast<GPUVAddr>((static_cast<GPUVAddr>(limit_high) << 32) |
|
return static_cast<GPUVAddr>((static_cast<GPUVAddr>(limit_high) << 32) |
|
||||||
limit_low);
|
limit_low) +
|
||||||
|
1;
|
||||||
}
|
}
|
||||||
} vertex_array_limit[NumVertexArrays];
|
} vertex_array_limit[NumVertexArrays];
|
||||||
|
|
||||||
|
|
|
@ -186,8 +186,12 @@ void RasterizerOpenGL::SetupVertexBuffer() {
|
||||||
const GPUVAddr start = vertex_array.StartAddress();
|
const GPUVAddr start = vertex_array.StartAddress();
|
||||||
const GPUVAddr end = regs.vertex_array_limit[index].LimitAddress();
|
const GPUVAddr end = regs.vertex_array_limit[index].LimitAddress();
|
||||||
|
|
||||||
ASSERT(end > start);
|
ASSERT(end >= start);
|
||||||
const u64 size = end - start + 1;
|
const u64 size = end - start;
|
||||||
|
if (size == 0) {
|
||||||
|
glBindVertexBuffer(static_cast<GLuint>(index), 0, 0, vertex_array.stride);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
const auto [vertex_buffer, vertex_buffer_offset] = buffer_cache.UploadMemory(start, size);
|
const auto [vertex_buffer, vertex_buffer_offset] = buffer_cache.UploadMemory(start, size);
|
||||||
glBindVertexBuffer(static_cast<GLuint>(index), vertex_buffer, vertex_buffer_offset,
|
glBindVertexBuffer(static_cast<GLuint>(index), vertex_buffer, vertex_buffer_offset,
|
||||||
vertex_array.stride);
|
vertex_array.stride);
|
||||||
|
@ -311,8 +315,8 @@ std::size_t RasterizerOpenGL::CalculateVertexArraysSize() const {
|
||||||
const GPUVAddr start = regs.vertex_array[index].StartAddress();
|
const GPUVAddr start = regs.vertex_array[index].StartAddress();
|
||||||
const GPUVAddr end = regs.vertex_array_limit[index].LimitAddress();
|
const GPUVAddr end = regs.vertex_array_limit[index].LimitAddress();
|
||||||
|
|
||||||
ASSERT(end > start);
|
size += end - start;
|
||||||
size += end - start + 1;
|
ASSERT(end >= start);
|
||||||
}
|
}
|
||||||
|
|
||||||
return size;
|
return size;
|
||||||
|
|
|
@ -877,8 +877,12 @@ void RasterizerVulkan::SetupVertexArrays(FixedPipelineState::VertexInput& vertex
|
||||||
const GPUVAddr start{vertex_array.StartAddress()};
|
const GPUVAddr start{vertex_array.StartAddress()};
|
||||||
const GPUVAddr end{regs.vertex_array_limit[index].LimitAddress()};
|
const GPUVAddr end{regs.vertex_array_limit[index].LimitAddress()};
|
||||||
|
|
||||||
ASSERT(end > start);
|
ASSERT(end >= start);
|
||||||
const std::size_t size{end - start + 1};
|
const std::size_t size{end - start};
|
||||||
|
if (size == 0) {
|
||||||
|
buffer_bindings.AddVertexBinding(DefaultBuffer(), 0);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
const auto [buffer, offset] = buffer_cache.UploadMemory(start, size);
|
const auto [buffer, offset] = buffer_cache.UploadMemory(start, size);
|
||||||
buffer_bindings.AddVertexBinding(buffer, offset);
|
buffer_bindings.AddVertexBinding(buffer, offset);
|
||||||
}
|
}
|
||||||
|
@ -1033,8 +1037,7 @@ void RasterizerVulkan::SetupConstBuffer(const ConstBufferEntry& entry,
|
||||||
const Tegra::Engines::ConstBufferInfo& buffer) {
|
const Tegra::Engines::ConstBufferInfo& buffer) {
|
||||||
if (!buffer.enabled) {
|
if (!buffer.enabled) {
|
||||||
// Set values to zero to unbind buffers
|
// Set values to zero to unbind buffers
|
||||||
update_descriptor_queue.AddBuffer(buffer_cache.GetEmptyBuffer(sizeof(float)), 0,
|
update_descriptor_queue.AddBuffer(DefaultBuffer(), 0, DEFAULT_BUFFER_SIZE);
|
||||||
sizeof(float));
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1057,7 +1060,9 @@ void RasterizerVulkan::SetupGlobalBuffer(const GlobalBufferEntry& entry, GPUVAdd
|
||||||
if (size == 0) {
|
if (size == 0) {
|
||||||
// Sometimes global memory pointers don't have a proper size. Upload a dummy entry
|
// Sometimes global memory pointers don't have a proper size. Upload a dummy entry
|
||||||
// because Vulkan doesn't like empty buffers.
|
// because Vulkan doesn't like empty buffers.
|
||||||
constexpr std::size_t dummy_size = 4;
|
// Note: Do *not* use DefaultBuffer() here, storage buffers can be written breaking the
|
||||||
|
// default buffer.
|
||||||
|
static constexpr std::size_t dummy_size = 4;
|
||||||
const auto buffer = buffer_cache.GetEmptyBuffer(dummy_size);
|
const auto buffer = buffer_cache.GetEmptyBuffer(dummy_size);
|
||||||
update_descriptor_queue.AddBuffer(buffer, 0, dummy_size);
|
update_descriptor_queue.AddBuffer(buffer, 0, dummy_size);
|
||||||
return;
|
return;
|
||||||
|
@ -1222,7 +1227,7 @@ std::size_t RasterizerVulkan::CalculateVertexArraysSize() const {
|
||||||
const GPUVAddr end{regs.vertex_array_limit[index].LimitAddress()};
|
const GPUVAddr end{regs.vertex_array_limit[index].LimitAddress()};
|
||||||
DEBUG_ASSERT(end >= start);
|
DEBUG_ASSERT(end >= start);
|
||||||
|
|
||||||
size += (end - start + 1) * regs.vertex_array[index].enable;
|
size += (end - start) * regs.vertex_array[index].enable;
|
||||||
}
|
}
|
||||||
return size;
|
return size;
|
||||||
}
|
}
|
||||||
|
@ -1269,4 +1274,29 @@ RenderPassParams RasterizerVulkan::GetRenderPassParams(Texceptions texceptions)
|
||||||
return renderpass_params;
|
return renderpass_params;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
VkBuffer RasterizerVulkan::DefaultBuffer() {
|
||||||
|
if (default_buffer) {
|
||||||
|
return *default_buffer;
|
||||||
|
}
|
||||||
|
|
||||||
|
VkBufferCreateInfo ci;
|
||||||
|
ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
|
||||||
|
ci.pNext = nullptr;
|
||||||
|
ci.flags = 0;
|
||||||
|
ci.size = DEFAULT_BUFFER_SIZE;
|
||||||
|
ci.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT |
|
||||||
|
VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
|
||||||
|
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
|
||||||
|
ci.queueFamilyIndexCount = 0;
|
||||||
|
ci.pQueueFamilyIndices = nullptr;
|
||||||
|
default_buffer = device.GetLogical().CreateBuffer(ci);
|
||||||
|
default_buffer_commit = memory_manager.Commit(default_buffer, false);
|
||||||
|
|
||||||
|
scheduler.RequestOutsideRenderPassOperationContext();
|
||||||
|
scheduler.Record([buffer = *default_buffer](vk::CommandBuffer cmdbuf) {
|
||||||
|
cmdbuf.FillBuffer(buffer, 0, DEFAULT_BUFFER_SIZE, 0);
|
||||||
|
});
|
||||||
|
return *default_buffer;
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace Vulkan
|
} // namespace Vulkan
|
||||||
|
|
|
@ -155,6 +155,7 @@ private:
|
||||||
using Texceptions = std::bitset<Maxwell::NumRenderTargets + 1>;
|
using Texceptions = std::bitset<Maxwell::NumRenderTargets + 1>;
|
||||||
|
|
||||||
static constexpr std::size_t ZETA_TEXCEPTION_INDEX = 8;
|
static constexpr std::size_t ZETA_TEXCEPTION_INDEX = 8;
|
||||||
|
static constexpr VkDeviceSize DEFAULT_BUFFER_SIZE = 4 * sizeof(float);
|
||||||
|
|
||||||
void FlushWork();
|
void FlushWork();
|
||||||
|
|
||||||
|
@ -247,6 +248,8 @@ private:
|
||||||
|
|
||||||
RenderPassParams GetRenderPassParams(Texceptions texceptions) const;
|
RenderPassParams GetRenderPassParams(Texceptions texceptions) const;
|
||||||
|
|
||||||
|
VkBuffer DefaultBuffer();
|
||||||
|
|
||||||
Core::System& system;
|
Core::System& system;
|
||||||
Core::Frontend::EmuWindow& render_window;
|
Core::Frontend::EmuWindow& render_window;
|
||||||
VKScreenInfo& screen_info;
|
VKScreenInfo& screen_info;
|
||||||
|
@ -271,6 +274,9 @@ private:
|
||||||
VKFenceManager fence_manager;
|
VKFenceManager fence_manager;
|
||||||
VKQueryCache query_cache;
|
VKQueryCache query_cache;
|
||||||
|
|
||||||
|
vk::Buffer default_buffer;
|
||||||
|
VKMemoryCommit default_buffer_commit;
|
||||||
|
|
||||||
std::array<View, Maxwell::NumRenderTargets> color_attachments;
|
std::array<View, Maxwell::NumRenderTargets> color_attachments;
|
||||||
View zeta_attachment;
|
View zeta_attachment;
|
||||||
|
|
||||||
|
|
|
@ -81,7 +81,7 @@ VKBuffer& VKStagingBufferPool::CreateStagingBuffer(std::size_t size, bool host_v
|
||||||
ci.size = 1ULL << log2;
|
ci.size = 1ULL << log2;
|
||||||
ci.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
|
ci.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
|
||||||
VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
|
VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
|
||||||
VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
|
VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
|
||||||
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
|
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
|
||||||
ci.queueFamilyIndexCount = 0;
|
ci.queueFamilyIndexCount = 0;
|
||||||
ci.pQueueFamilyIndices = nullptr;
|
ci.pQueueFamilyIndices = nullptr;
|
||||||
|
|
Loading…
Reference in a new issue