mirror of
https://git.h3cjp.net/H3cJP/citra.git
synced 2024-12-28 14:16:57 +00:00
Clang format and ddress feedback
This commit is contained in:
parent
e29ced29fa
commit
e4dc73f61e
|
@ -6,6 +6,7 @@
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <array>
|
#include <array>
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
|
#include <functional>
|
||||||
#include <iterator>
|
#include <iterator>
|
||||||
#include <list>
|
#include <list>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
@ -101,10 +102,10 @@ class QueryCacheBase : public VideoCommon::ChannelSetupCaches<VideoCommon::Chann
|
||||||
public:
|
public:
|
||||||
explicit QueryCacheBase(VideoCore::RasterizerInterface& rasterizer_,
|
explicit QueryCacheBase(VideoCore::RasterizerInterface& rasterizer_,
|
||||||
Core::Memory::Memory& cpu_memory_)
|
Core::Memory::Memory& cpu_memory_)
|
||||||
: rasterizer{rasterizer_}, cpu_memory{cpu_memory_}, streams{
|
: rasterizer{rasterizer_},
|
||||||
{CounterStream{static_cast<QueryCache&>(*this),
|
cpu_memory{cpu_memory_}, streams{{CounterStream{static_cast<QueryCache&>(*this),
|
||||||
VideoCore::QueryType::SamplesPassed}}} {
|
VideoCore::QueryType::SamplesPassed}}} {
|
||||||
(void) slot_async_jobs.insert(); // Null value
|
(void)slot_async_jobs.insert(); // Null value
|
||||||
}
|
}
|
||||||
|
|
||||||
void InvalidateRegion(VAddr addr, std::size_t size) {
|
void InvalidateRegion(VAddr addr, std::size_t size) {
|
||||||
|
@ -136,7 +137,7 @@ public:
|
||||||
query = Register(type, *cpu_addr, host_ptr, timestamp.has_value());
|
query = Register(type, *cpu_addr, host_ptr, timestamp.has_value());
|
||||||
}
|
}
|
||||||
|
|
||||||
auto result = query->BindCounter(Stream(type).Current());
|
auto result = query->BindCounter(Stream(type).Current(), timestamp);
|
||||||
if (result) {
|
if (result) {
|
||||||
auto async_job_id = query->GetAsyncJob();
|
auto async_job_id = query->GetAsyncJob();
|
||||||
auto& async_job = slot_async_jobs[async_job_id];
|
auto& async_job = slot_async_jobs[async_job_id];
|
||||||
|
@ -294,6 +295,7 @@ private:
|
||||||
void AsyncFlushQuery(CachedQuery* query, std::optional<u64> timestamp,
|
void AsyncFlushQuery(CachedQuery* query, std::optional<u64> timestamp,
|
||||||
std::unique_lock<std::recursive_mutex>& lock) {
|
std::unique_lock<std::recursive_mutex>& lock) {
|
||||||
const AsyncJobId new_async_job_id = slot_async_jobs.insert();
|
const AsyncJobId new_async_job_id = slot_async_jobs.insert();
|
||||||
|
{
|
||||||
AsyncJob& async_job = slot_async_jobs[new_async_job_id];
|
AsyncJob& async_job = slot_async_jobs[new_async_job_id];
|
||||||
query->SetAsyncJob(new_async_job_id);
|
query->SetAsyncJob(new_async_job_id);
|
||||||
async_job.query_location = query->GetCpuAddr();
|
async_job.query_location = query->GetCpuAddr();
|
||||||
|
@ -303,6 +305,7 @@ private:
|
||||||
uncommitted_flushes = std::make_shared<std::vector<AsyncJobId>>();
|
uncommitted_flushes = std::make_shared<std::vector<AsyncJobId>>();
|
||||||
}
|
}
|
||||||
uncommitted_flushes->push_back(new_async_job_id);
|
uncommitted_flushes->push_back(new_async_job_id);
|
||||||
|
}
|
||||||
lock.unlock();
|
lock.unlock();
|
||||||
std::function<void()> operation([this, new_async_job_id, timestamp] {
|
std::function<void()> operation([this, new_async_job_id, timestamp] {
|
||||||
std::unique_lock local_lock{mutex};
|
std::unique_lock local_lock{mutex};
|
||||||
|
@ -408,11 +411,20 @@ public:
|
||||||
// When counter is nullptr it means that it's just been reset. We are supposed to write a
|
// When counter is nullptr it means that it's just been reset. We are supposed to write a
|
||||||
// zero in these cases.
|
// zero in these cases.
|
||||||
const u64 value = counter ? counter->Query(async) : 0;
|
const u64 value = counter ? counter->Query(async) : 0;
|
||||||
|
if (async) {
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
std::memcpy(host_ptr, &value, sizeof(u64));
|
||||||
|
|
||||||
|
if (timestamp) {
|
||||||
|
std::memcpy(host_ptr + TIMESTAMP_OFFSET, &*timestamp, sizeof(u64));
|
||||||
|
}
|
||||||
return value;
|
return value;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Binds a counter to this query.
|
/// Binds a counter to this query.
|
||||||
std::optional<u64> BindCounter(std::shared_ptr<HostCounter> counter_) {
|
std::optional<u64> BindCounter(std::shared_ptr<HostCounter> counter_,
|
||||||
|
std::optional<u64> timestamp_) {
|
||||||
std::optional<u64> result{};
|
std::optional<u64> result{};
|
||||||
if (counter) {
|
if (counter) {
|
||||||
// If there's an old counter set it means the query is being rewritten by the game.
|
// If there's an old counter set it means the query is being rewritten by the game.
|
||||||
|
@ -420,6 +432,7 @@ public:
|
||||||
result = std::make_optional(Flush());
|
result = std::make_optional(Flush());
|
||||||
}
|
}
|
||||||
counter = std::move(counter_);
|
counter = std::move(counter_);
|
||||||
|
timestamp = timestamp_;
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -10,7 +10,6 @@
|
||||||
#include "video_core/renderer_vulkan/vk_texture_cache.h"
|
#include "video_core/renderer_vulkan/vk_texture_cache.h"
|
||||||
#include "video_core/vulkan_common/vulkan_device.h"
|
#include "video_core/vulkan_common/vulkan_device.h"
|
||||||
|
|
||||||
|
|
||||||
namespace Vulkan {
|
namespace Vulkan {
|
||||||
|
|
||||||
InnerFence::InnerFence(Scheduler& scheduler_, bool is_stubbed_)
|
InnerFence::InnerFence(Scheduler& scheduler_, bool is_stubbed_)
|
||||||
|
|
|
@ -172,7 +172,8 @@ RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra
|
||||||
buffer_cache(*this, cpu_memory_, buffer_cache_runtime),
|
buffer_cache(*this, cpu_memory_, buffer_cache_runtime),
|
||||||
pipeline_cache(*this, device, scheduler, descriptor_pool, update_descriptor_queue,
|
pipeline_cache(*this, device, scheduler, descriptor_pool, update_descriptor_queue,
|
||||||
render_pass_cache, buffer_cache, texture_cache, gpu.ShaderNotify()),
|
render_pass_cache, buffer_cache, texture_cache, gpu.ShaderNotify()),
|
||||||
query_cache{*this, cpu_memory_, device, scheduler}, accelerate_dma(buffer_cache, texture_cache, scheduler),
|
query_cache{*this, cpu_memory_, device, scheduler},
|
||||||
|
accelerate_dma(buffer_cache, texture_cache, scheduler),
|
||||||
fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache, device, scheduler),
|
fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache, device, scheduler),
|
||||||
wfi_event(device.GetLogical().CreateEvent()) {
|
wfi_event(device.GetLogical().CreateEvent()) {
|
||||||
scheduler.SetQueryCache(query_cache);
|
scheduler.SetQueryCache(query_cache);
|
||||||
|
@ -675,7 +676,8 @@ bool RasterizerVulkan::AccelerateConditionalRendering() {
|
||||||
const GPUVAddr condition_address{maxwell3d->regs.render_enable.Address()};
|
const GPUVAddr condition_address{maxwell3d->regs.render_enable.Address()};
|
||||||
Maxwell::ReportSemaphore::Compare cmp;
|
Maxwell::ReportSemaphore::Compare cmp;
|
||||||
if (gpu_memory->IsMemoryDirty(condition_address, sizeof(cmp),
|
if (gpu_memory->IsMemoryDirty(condition_address, sizeof(cmp),
|
||||||
VideoCommon::CacheType::BufferCache | VideoCommon::CacheType::QueryCache)) {
|
VideoCommon::CacheType::BufferCache |
|
||||||
|
VideoCommon::CacheType::QueryCache)) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
|
|
Loading…
Reference in a new issue