mirror of
https://git.h3cjp.net/H3cJP/citra.git
synced 2024-11-25 12:43:25 +00:00
timing: wait for completion on unregister
This commit is contained in:
parent
0e8f98a441
commit
0953cdd271
|
@ -142,16 +142,24 @@ void CoreTiming::ScheduleLoopingEvent(std::chrono::nanoseconds start_time,
|
|||
}
|
||||
|
||||
void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type,
|
||||
std::uintptr_t user_data) {
|
||||
std::scoped_lock scope{basic_lock};
|
||||
const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) {
|
||||
return e.type.lock().get() == event_type.get() && e.user_data == user_data;
|
||||
});
|
||||
std::uintptr_t user_data, bool wait) {
|
||||
{
|
||||
std::scoped_lock lk{basic_lock};
|
||||
const auto itr =
|
||||
std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) {
|
||||
return e.type.lock().get() == event_type.get() && e.user_data == user_data;
|
||||
});
|
||||
|
||||
// Removing random items breaks the invariant so we have to re-establish it.
|
||||
if (itr != event_queue.end()) {
|
||||
event_queue.erase(itr, event_queue.end());
|
||||
std::make_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
||||
// Removing random items breaks the invariant so we have to re-establish it.
|
||||
if (itr != event_queue.end()) {
|
||||
event_queue.erase(itr, event_queue.end());
|
||||
std::make_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
||||
}
|
||||
}
|
||||
|
||||
// Force any in-progress events to finish
|
||||
if (wait) {
|
||||
std::scoped_lock lk{advance_lock};
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -190,20 +198,6 @@ u64 CoreTiming::GetClockTicks() const {
|
|||
return CpuCyclesToClockCycles(ticks);
|
||||
}
|
||||
|
||||
void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) {
|
||||
std::scoped_lock lock{basic_lock};
|
||||
|
||||
const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) {
|
||||
return e.type.lock().get() == event_type.get();
|
||||
});
|
||||
|
||||
// Removing random items breaks the invariant so we have to re-establish it.
|
||||
if (itr != event_queue.end()) {
|
||||
event_queue.erase(itr, event_queue.end());
|
||||
std::make_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<s64> CoreTiming::Advance() {
|
||||
std::scoped_lock lock{advance_lock, basic_lock};
|
||||
global_timer = GetGlobalTimeNs().count();
|
||||
|
|
|
@ -98,10 +98,13 @@ public:
|
|||
const std::shared_ptr<EventType>& event_type,
|
||||
std::uintptr_t user_data = 0, bool absolute_time = false);
|
||||
|
||||
void UnscheduleEvent(const std::shared_ptr<EventType>& event_type, std::uintptr_t user_data);
|
||||
void UnscheduleEvent(const std::shared_ptr<EventType>& event_type, std::uintptr_t user_data,
|
||||
bool wait = true);
|
||||
|
||||
/// We only permit one event of each type in the queue at a time.
|
||||
void RemoveEvent(const std::shared_ptr<EventType>& event_type);
|
||||
void UnscheduleEventWithoutWait(const std::shared_ptr<EventType>& event_type,
|
||||
std::uintptr_t user_data) {
|
||||
UnscheduleEvent(event_type, user_data, false);
|
||||
}
|
||||
|
||||
void AddTicks(u64 ticks_to_add);
|
||||
|
||||
|
|
|
@ -18,7 +18,8 @@ void KHardwareTimer::Initialize() {
|
|||
}
|
||||
|
||||
void KHardwareTimer::Finalize() {
|
||||
this->DisableInterrupt();
|
||||
m_kernel.System().CoreTiming().UnscheduleEvent(m_event_type, reinterpret_cast<uintptr_t>(this));
|
||||
m_wakeup_time = std::numeric_limits<s64>::max();
|
||||
m_event_type.reset();
|
||||
}
|
||||
|
||||
|
@ -59,7 +60,8 @@ void KHardwareTimer::EnableInterrupt(s64 wakeup_time) {
|
|||
}
|
||||
|
||||
void KHardwareTimer::DisableInterrupt() {
|
||||
m_kernel.System().CoreTiming().UnscheduleEvent(m_event_type, reinterpret_cast<uintptr_t>(this));
|
||||
m_kernel.System().CoreTiming().UnscheduleEventWithoutWait(m_event_type,
|
||||
reinterpret_cast<uintptr_t>(this));
|
||||
m_wakeup_time = std::numeric_limits<s64>::max();
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue