mirror of
				https://git.h3cjp.net/H3cJP/yuzu.git
				synced 2025-10-30 22:45:10 +00:00 
			
		
		
		
	Merge pull request #8157 from lat9nq/kernel-races
kernel: Fix some data races
This commit is contained in:
		
						commit
						fd5e1e80da
					
				|  | @ -148,9 +148,9 @@ u64 GenerateUniformRange(u64 min, u64 max, F f) { | |||
| } // Anonymous namespace
 | ||||
| 
 | ||||
| u64 KSystemControl::GenerateRandomU64() { | ||||
|     static std::random_device device; | ||||
|     static std::mt19937 gen(device()); | ||||
|     static std::uniform_int_distribution<u64> distribution(1, std::numeric_limits<u64>::max()); | ||||
|     std::random_device device; | ||||
|     std::mt19937 gen(device()); | ||||
|     std::uniform_int_distribution<u64> distribution(1, std::numeric_limits<u64>::max()); | ||||
|     return distribution(gen); | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -163,7 +163,7 @@ public: | |||
|         do { | ||||
|             ASSERT(cur_ref_count > 0); | ||||
|         } while (!m_ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count - 1, | ||||
|                                                     std::memory_order_relaxed)); | ||||
|                                                     std::memory_order_acq_rel)); | ||||
| 
 | ||||
|         // If ref count hits zero, destroy the object.
 | ||||
|         if (cur_ref_count - 1 == 0) { | ||||
|  |  | |||
|  | @ -422,7 +422,7 @@ private: | |||
|     bool is_64bit_process = true; | ||||
| 
 | ||||
|     /// Total running time for the process in ticks.
 | ||||
|     u64 total_process_running_time_ticks = 0; | ||||
|     std::atomic<u64> total_process_running_time_ticks = 0; | ||||
| 
 | ||||
|     /// Per-process handle table for storing created object handles in.
 | ||||
|     KHandleTable handle_table; | ||||
|  |  | |||
|  | @ -4,6 +4,7 @@ | |||
| 
 | ||||
| #pragma once | ||||
| 
 | ||||
| #include <atomic> | ||||
| #include "common/assert.h" | ||||
| #include "core/hle/kernel/k_spin_lock.h" | ||||
| #include "core/hle/kernel/k_thread.h" | ||||
|  | @ -75,7 +76,7 @@ private: | |||
|     KernelCore& kernel; | ||||
|     KAlignedSpinLock spin_lock{}; | ||||
|     s32 lock_count{}; | ||||
|     KThread* owner_thread{}; | ||||
|     std::atomic<KThread*> owner_thread{}; | ||||
| }; | ||||
| 
 | ||||
| } // namespace Kernel
 | ||||
|  |  | |||
|  | @ -723,7 +723,7 @@ void KThread::UpdateState() { | |||
|     ASSERT(kernel.GlobalSchedulerContext().IsLocked()); | ||||
| 
 | ||||
|     // Set our suspend flags in state.
 | ||||
|     const auto old_state = thread_state; | ||||
|     const ThreadState old_state = thread_state; | ||||
|     const auto new_state = | ||||
|         static_cast<ThreadState>(this->GetSuspendFlags()) | (old_state & ThreadState::Mask); | ||||
|     thread_state = new_state; | ||||
|  | @ -738,7 +738,7 @@ void KThread::Continue() { | |||
|     ASSERT(kernel.GlobalSchedulerContext().IsLocked()); | ||||
| 
 | ||||
|     // Clear our suspend flags in state.
 | ||||
|     const auto old_state = thread_state; | ||||
|     const ThreadState old_state = thread_state; | ||||
|     thread_state = old_state & ThreadState::Mask; | ||||
| 
 | ||||
|     // Note the state change in scheduler.
 | ||||
|  |  | |||
|  | @ -5,6 +5,7 @@ | |||
| #pragma once | ||||
| 
 | ||||
| #include <array> | ||||
| #include <atomic> | ||||
| #include <span> | ||||
| #include <string> | ||||
| #include <utility> | ||||
|  | @ -751,7 +752,7 @@ private: | |||
|     KAffinityMask original_physical_affinity_mask{}; | ||||
|     s32 original_physical_ideal_core_id{}; | ||||
|     s32 num_core_migration_disables{}; | ||||
|     ThreadState thread_state{}; | ||||
|     std::atomic<ThreadState> thread_state{}; | ||||
|     std::atomic<bool> termination_requested{}; | ||||
|     bool wait_cancelled{}; | ||||
|     bool cancellable{}; | ||||
|  |  | |||
|  | @ -85,7 +85,7 @@ struct KernelCore::Impl { | |||
| 
 | ||||
|     void InitializeCores() { | ||||
|         for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { | ||||
|             cores[core_id].Initialize(current_process->Is64BitProcess()); | ||||
|             cores[core_id].Initialize((*current_process).Is64BitProcess()); | ||||
|             system.Memory().SetCurrentPageTable(*current_process, core_id); | ||||
|         } | ||||
|     } | ||||
|  | @ -168,11 +168,11 @@ struct KernelCore::Impl { | |||
| 
 | ||||
|         // Shutdown all processes.
 | ||||
|         if (current_process) { | ||||
|             current_process->Finalize(); | ||||
|             (*current_process).Finalize(); | ||||
|             // current_process->Close();
 | ||||
|             // TODO: The current process should be destroyed based on accurate ref counting after
 | ||||
|             // calling Close(). Adding a manual Destroy() call instead to avoid a memory leak.
 | ||||
|             current_process->Destroy(); | ||||
|             (*current_process).Destroy(); | ||||
|             current_process = nullptr; | ||||
|         } | ||||
| 
 | ||||
|  | @ -704,7 +704,7 @@ struct KernelCore::Impl { | |||
| 
 | ||||
|     // Lists all processes that exist in the current session.
 | ||||
|     std::vector<KProcess*> process_list; | ||||
|     KProcess* current_process{}; | ||||
|     std::atomic<KProcess*> current_process{}; | ||||
|     std::unique_ptr<Kernel::GlobalSchedulerContext> global_scheduler_context; | ||||
|     Kernel::TimeManager time_manager; | ||||
| 
 | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue