mirror of
https://git.h3cjp.net/H3cJP/yuzu.git
synced 2024-11-15 18:12:48 +00:00
core_timing: Make use of std::chrono with ScheduleEvent
This commit is contained in:
parent
263200f982
commit
8b50c660df
|
@ -59,11 +59,9 @@ Stream::State Stream::GetState() const {
|
|||
return state;
|
||||
}
|
||||
|
||||
s64 Stream::GetBufferReleaseNS(const Buffer& buffer) const {
|
||||
std::chrono::nanoseconds Stream::GetBufferReleaseNS(const Buffer& buffer) const {
|
||||
const std::size_t num_samples{buffer.GetSamples().size() / GetNumChannels()};
|
||||
const auto ns =
|
||||
std::chrono::nanoseconds((static_cast<u64>(num_samples) * 1000000000ULL) / sample_rate);
|
||||
return ns.count();
|
||||
return std::chrono::nanoseconds((static_cast<u64>(num_samples) * 1000000000ULL) / sample_rate);
|
||||
}
|
||||
|
||||
static void VolumeAdjustSamples(std::vector<s16>& samples, float game_volume) {
|
||||
|
@ -105,10 +103,10 @@ void Stream::PlayNextBuffer(s64 cycles_late) {
|
|||
|
||||
sink_stream.EnqueueSamples(GetNumChannels(), active_buffer->GetSamples());
|
||||
|
||||
core_timing.ScheduleEvent(
|
||||
GetBufferReleaseNS(*active_buffer) -
|
||||
(Settings::values.enable_audio_stretching.GetValue() ? 0 : cycles_late),
|
||||
release_event, {});
|
||||
const auto time_stretch_delta = std::chrono::nanoseconds{
|
||||
Settings::values.enable_audio_stretching.GetValue() ? 0 : cycles_late};
|
||||
const auto future_time = GetBufferReleaseNS(*active_buffer) - time_stretch_delta;
|
||||
core_timing.ScheduleEvent(future_time, release_event, {});
|
||||
}
|
||||
|
||||
void Stream::ReleaseActiveBuffer(s64 cycles_late) {
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <chrono>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
@ -96,10 +97,7 @@ private:
|
|||
void ReleaseActiveBuffer(s64 cycles_late = 0);
|
||||
|
||||
/// Gets the number of core cycles when the specified buffer will be released
|
||||
s64 GetBufferReleaseNS(const Buffer& buffer) const;
|
||||
|
||||
/// Gets the number of core cycles when the specified buffer will be released
|
||||
s64 GetBufferReleaseNSHostTiming(const Buffer& buffer) const;
|
||||
std::chrono::nanoseconds GetBufferReleaseNS(const Buffer& buffer) const;
|
||||
|
||||
u32 sample_rate; ///< Sample rate of the stream
|
||||
Format format; ///< Format of the stream
|
||||
|
|
|
@ -53,7 +53,7 @@ void CoreTiming::ThreadEntry(CoreTiming& instance) {
|
|||
instance.ThreadLoop();
|
||||
}
|
||||
|
||||
void CoreTiming::Initialize(std::function<void(void)>&& on_thread_init_) {
|
||||
void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
|
||||
on_thread_init = std::move(on_thread_init_);
|
||||
event_fifo_id = 0;
|
||||
shutting_down = false;
|
||||
|
@ -106,11 +106,11 @@ bool CoreTiming::HasPendingEvents() const {
|
|||
return !(wait_set && event_queue.empty());
|
||||
}
|
||||
|
||||
void CoreTiming::ScheduleEvent(s64 ns_into_future, const std::shared_ptr<EventType>& event_type,
|
||||
u64 userdata) {
|
||||
void CoreTiming::ScheduleEvent(std::chrono::nanoseconds ns_into_future,
|
||||
const std::shared_ptr<EventType>& event_type, u64 userdata) {
|
||||
{
|
||||
std::scoped_lock scope{basic_lock};
|
||||
const u64 timeout = static_cast<u64>(GetGlobalTimeNs().count() + ns_into_future);
|
||||
const u64 timeout = static_cast<u64>((GetGlobalTimeNs() + ns_into_future).count());
|
||||
|
||||
event_queue.emplace_back(Event{timeout, event_fifo_id++, userdata, event_type});
|
||||
|
||||
|
|
|
@ -62,7 +62,7 @@ public:
|
|||
|
||||
/// CoreTiming begins at the boundary of timing slice -1. An initial call to Advance() is
|
||||
/// required to end slice - 1 and start slice 0 before the first cycle of code is executed.
|
||||
void Initialize(std::function<void(void)>&& on_thread_init_);
|
||||
void Initialize(std::function<void()>&& on_thread_init_);
|
||||
|
||||
/// Tears down all timing related functionality.
|
||||
void Shutdown();
|
||||
|
@ -95,8 +95,8 @@ public:
|
|||
bool HasPendingEvents() const;
|
||||
|
||||
/// Schedules an event in core timing
|
||||
void ScheduleEvent(s64 ns_into_future, const std::shared_ptr<EventType>& event_type,
|
||||
u64 userdata = 0);
|
||||
void ScheduleEvent(std::chrono::nanoseconds ns_into_future,
|
||||
const std::shared_ptr<EventType>& event_type, u64 userdata = 0);
|
||||
|
||||
void UnscheduleEvent(const std::shared_ptr<EventType>& event_type, u64 userdata);
|
||||
|
||||
|
@ -161,7 +161,7 @@ private:
|
|||
std::atomic<bool> wait_set{};
|
||||
std::atomic<bool> shutting_down{};
|
||||
std::atomic<bool> has_started{};
|
||||
std::function<void(void)> on_thread_init{};
|
||||
std::function<void()> on_thread_init{};
|
||||
|
||||
bool is_multicore{};
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ InterruptManager::~InterruptManager() = default;
|
|||
|
||||
void InterruptManager::GPUInterruptSyncpt(const u32 syncpoint_id, const u32 value) {
|
||||
const u64 msg = (static_cast<u64>(syncpoint_id) << 32ULL) | value;
|
||||
system.CoreTiming().ScheduleEvent(10, gpu_interrupt_event, msg);
|
||||
system.CoreTiming().ScheduleEvent(std::chrono::nanoseconds{10}, gpu_interrupt_event, msg);
|
||||
}
|
||||
|
||||
} // namespace Core::Hardware
|
||||
|
|
|
@ -149,11 +149,13 @@ struct KernelCore::Impl {
|
|||
SchedulerLock lock(kernel);
|
||||
global_scheduler.PreemptThreads();
|
||||
}
|
||||
s64 time_interval = Core::Timing::msToCycles(std::chrono::milliseconds(10));
|
||||
const auto time_interval = std::chrono::nanoseconds{
|
||||
Core::Timing::msToCycles(std::chrono::milliseconds(10))};
|
||||
system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
|
||||
});
|
||||
|
||||
s64 time_interval = Core::Timing::msToCycles(std::chrono::milliseconds(10));
|
||||
const auto time_interval =
|
||||
std::chrono::nanoseconds{Core::Timing::msToCycles(std::chrono::milliseconds(10))};
|
||||
system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
|
||||
}
|
||||
|
||||
|
|
|
@ -184,8 +184,8 @@ ResultCode ServerSession::CompleteSyncRequest() {
|
|||
|
||||
ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<Thread> thread,
|
||||
Core::Memory::Memory& memory) {
|
||||
ResultCode result = QueueSyncRequest(std::move(thread), memory);
|
||||
const u64 delay = kernel.IsMulticore() ? 0U : 20000U;
|
||||
const ResultCode result = QueueSyncRequest(std::move(thread), memory);
|
||||
const auto delay = std::chrono::nanoseconds{kernel.IsMulticore() ? 0 : 20000};
|
||||
Core::System::GetInstance().CoreTiming().ScheduleEvent(delay, request_event, {});
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -34,7 +34,8 @@ void TimeManager::ScheduleTimeEvent(Handle& event_handle, Thread* timetask, s64
|
|||
ASSERT(timetask);
|
||||
ASSERT(timetask->GetStatus() != ThreadStatus::Ready);
|
||||
ASSERT(timetask->GetStatus() != ThreadStatus::WaitMutex);
|
||||
system.CoreTiming().ScheduleEvent(nanoseconds, time_manager_event_type, event_handle);
|
||||
system.CoreTiming().ScheduleEvent(std::chrono::nanoseconds{nanoseconds},
|
||||
time_manager_event_type, event_handle);
|
||||
} else {
|
||||
event_handle = InvalidHandle;
|
||||
}
|
||||
|
|
|
@ -39,9 +39,10 @@ namespace Service::HID {
|
|||
|
||||
// Updating period for each HID device.
|
||||
// TODO(ogniK): Find actual polling rate of hid
|
||||
constexpr s64 pad_update_ticks = static_cast<s64>(1000000000 / 66);
|
||||
[[maybe_unused]] constexpr s64 accelerometer_update_ticks = static_cast<s64>(1000000000 / 100);
|
||||
[[maybe_unused]] constexpr s64 gyroscope_update_ticks = static_cast<s64>(1000000000 / 100);
|
||||
constexpr auto pad_update_ns = std::chrono::nanoseconds{1000000000 / 66};
|
||||
[[maybe_unused]] constexpr auto accelerometer_update_ns =
|
||||
std::chrono::nanoseconds{1000000000 / 100};
|
||||
[[maybe_unused]] constexpr auto gyroscope_update_ticks = std::chrono::nanoseconds{1000000000 / 100};
|
||||
constexpr std::size_t SHARED_MEMORY_SIZE = 0x40000;
|
||||
|
||||
IAppletResource::IAppletResource(Core::System& system)
|
||||
|
@ -82,7 +83,7 @@ IAppletResource::IAppletResource(Core::System& system)
|
|||
|
||||
// TODO(shinyquagsire23): Other update callbacks? (accel, gyro?)
|
||||
|
||||
system.CoreTiming().ScheduleEvent(pad_update_ticks, pad_update_event);
|
||||
system.CoreTiming().ScheduleEvent(pad_update_ns, pad_update_event);
|
||||
|
||||
ReloadInputDevices();
|
||||
}
|
||||
|
@ -118,7 +119,8 @@ void IAppletResource::UpdateControllers(u64 userdata, s64 ns_late) {
|
|||
controller->OnUpdate(core_timing, shared_mem->GetPointer(), SHARED_MEMORY_SIZE);
|
||||
}
|
||||
|
||||
core_timing.ScheduleEvent(pad_update_ticks - ns_late, pad_update_event);
|
||||
const auto future_ns = pad_update_ns - std::chrono::nanoseconds{ns_late};
|
||||
core_timing.ScheduleEvent(future_ns, pad_update_event);
|
||||
}
|
||||
|
||||
class IActiveVibrationDeviceList final : public ServiceFramework<IActiveVibrationDeviceList> {
|
||||
|
|
|
@ -28,8 +28,7 @@
|
|||
|
||||
namespace Service::NVFlinger {
|
||||
|
||||
constexpr s64 frame_ticks = static_cast<s64>(1000000000 / 60);
|
||||
constexpr s64 frame_ticks_30fps = static_cast<s64>(1000000000 / 30);
|
||||
constexpr auto frame_ns = std::chrono::nanoseconds{1000000000 / 60};
|
||||
|
||||
void NVFlinger::VSyncThread(NVFlinger& nv_flinger) {
|
||||
nv_flinger.SplitVSync();
|
||||
|
@ -71,16 +70,20 @@ NVFlinger::NVFlinger(Core::System& system) : system(system) {
|
|||
Core::Timing::CreateEvent("ScreenComposition", [this](u64 userdata, s64 ns_late) {
|
||||
Lock();
|
||||
Compose();
|
||||
const auto ticks = GetNextTicks();
|
||||
this->system.CoreTiming().ScheduleEvent(std::max<s64>(0LL, ticks - ns_late),
|
||||
composition_event);
|
||||
|
||||
const auto ticks = std::chrono::nanoseconds{GetNextTicks()};
|
||||
const auto ticks_delta = ticks - std::chrono::nanoseconds{ns_late};
|
||||
const auto future_ns = std::max(std::chrono::nanoseconds::zero(), ticks_delta);
|
||||
|
||||
this->system.CoreTiming().ScheduleEvent(future_ns, composition_event);
|
||||
});
|
||||
|
||||
if (system.IsMulticore()) {
|
||||
is_running = true;
|
||||
wait_event = std::make_unique<Common::Event>();
|
||||
vsync_thread = std::make_unique<std::thread>(VSyncThread, std::ref(*this));
|
||||
} else {
|
||||
system.CoreTiming().ScheduleEvent(frame_ticks, composition_event);
|
||||
system.CoreTiming().ScheduleEvent(frame_ns, composition_event);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
|
||||
namespace Core::Memory {
|
||||
|
||||
constexpr s64 CHEAT_ENGINE_TICKS = static_cast<s64>(1000000000 / 12);
|
||||
constexpr auto CHEAT_ENGINE_NS = std::chrono::nanoseconds{1000000000 / 12};
|
||||
constexpr u32 KEYPAD_BITMASK = 0x3FFFFFF;
|
||||
|
||||
StandardVmCallbacks::StandardVmCallbacks(Core::System& system, const CheatProcessMetadata& metadata)
|
||||
|
@ -191,7 +191,7 @@ void CheatEngine::Initialize() {
|
|||
event = Core::Timing::CreateEvent(
|
||||
"CheatEngine::FrameCallback::" + Common::HexToString(metadata.main_nso_build_id),
|
||||
[this](u64 userdata, s64 ns_late) { FrameCallback(userdata, ns_late); });
|
||||
core_timing.ScheduleEvent(CHEAT_ENGINE_TICKS, event);
|
||||
core_timing.ScheduleEvent(CHEAT_ENGINE_NS, event);
|
||||
|
||||
metadata.process_id = system.CurrentProcess()->GetProcessID();
|
||||
metadata.title_id = system.CurrentProcess()->GetTitleID();
|
||||
|
@ -230,7 +230,8 @@ void CheatEngine::FrameCallback(u64 userdata, s64 ns_late) {
|
|||
|
||||
vm.Execute(metadata);
|
||||
|
||||
core_timing.ScheduleEvent(CHEAT_ENGINE_TICKS - ns_late, event);
|
||||
const auto future_ns = CHEAT_ENGINE_NS - std::chrono::nanoseconds{ns_late};
|
||||
core_timing.ScheduleEvent(future_ns, event);
|
||||
}
|
||||
|
||||
} // namespace Core::Memory
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
namespace Tools {
|
||||
namespace {
|
||||
|
||||
constexpr s64 MEMORY_FREEZER_TICKS = static_cast<s64>(1000000000 / 60);
|
||||
constexpr auto memory_freezer_ns = std::chrono::nanoseconds{1000000000 / 60};
|
||||
|
||||
u64 MemoryReadWidth(Core::Memory::Memory& memory, u32 width, VAddr addr) {
|
||||
switch (width) {
|
||||
|
@ -58,7 +58,7 @@ Freezer::Freezer(Core::Timing::CoreTiming& core_timing_, Core::Memory::Memory& m
|
|||
event = Core::Timing::CreateEvent(
|
||||
"MemoryFreezer::FrameCallback",
|
||||
[this](u64 userdata, s64 ns_late) { FrameCallback(userdata, ns_late); });
|
||||
core_timing.ScheduleEvent(MEMORY_FREEZER_TICKS, event);
|
||||
core_timing.ScheduleEvent(memory_freezer_ns, event);
|
||||
}
|
||||
|
||||
Freezer::~Freezer() {
|
||||
|
@ -68,7 +68,7 @@ Freezer::~Freezer() {
|
|||
void Freezer::SetActive(bool active) {
|
||||
if (!this->active.exchange(active)) {
|
||||
FillEntryReads();
|
||||
core_timing.ScheduleEvent(MEMORY_FREEZER_TICKS, event);
|
||||
core_timing.ScheduleEvent(memory_freezer_ns, event);
|
||||
LOG_DEBUG(Common_Memory, "Memory freezer activated!");
|
||||
} else {
|
||||
LOG_DEBUG(Common_Memory, "Memory freezer deactivated!");
|
||||
|
@ -173,7 +173,8 @@ void Freezer::FrameCallback(u64 userdata, s64 ns_late) {
|
|||
MemoryWriteWidth(memory, entry.width, entry.address, entry.value);
|
||||
}
|
||||
|
||||
core_timing.ScheduleEvent(MEMORY_FREEZER_TICKS - ns_late, event);
|
||||
const auto future_ns = memory_freezer_ns - std::chrono::nanoseconds{ns_late};
|
||||
core_timing.ScheduleEvent(future_ns, event);
|
||||
}
|
||||
|
||||
void Freezer::FillEntryReads() {
|
||||
|
|
|
@ -116,13 +116,16 @@ TEST_CASE("CoreTiming[BasicOrderNoPausing]", "[core]") {
|
|||
|
||||
expected_callback = 0;
|
||||
|
||||
u64 start = core_timing.GetGlobalTimeNs().count();
|
||||
u64 one_micro = 1000U;
|
||||
const u64 start = core_timing.GetGlobalTimeNs().count();
|
||||
const u64 one_micro = 1000U;
|
||||
|
||||
for (std::size_t i = 0; i < events.size(); i++) {
|
||||
u64 order = calls_order[i];
|
||||
core_timing.ScheduleEvent(i * one_micro + 100U, events[order], CB_IDS[order]);
|
||||
const u64 order = calls_order[i];
|
||||
const auto future_ns = std::chrono::nanoseconds{static_cast<s64>(i * one_micro + 100)};
|
||||
core_timing.ScheduleEvent(future_ns, events[order], CB_IDS[order]);
|
||||
}
|
||||
u64 end = core_timing.GetGlobalTimeNs().count();
|
||||
|
||||
const u64 end = core_timing.GetGlobalTimeNs().count();
|
||||
const double scheduling_time = static_cast<double>(end - start);
|
||||
const double timer_time = static_cast<double>(TestTimerSpeed(core_timing));
|
||||
|
||||
|
|
Loading…
Reference in a new issue