diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp
index 6c44a9e998..4167ade2b5 100644
--- a/src/core/hle/kernel/k_code_memory.cpp
+++ b/src/core/hle/kernel/k_code_memory.cpp
@@ -45,7 +45,7 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si
     m_is_mapped = false;
 
     // We succeeded.
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
 void KCodeMemory::Finalize() {
@@ -80,7 +80,7 @@ Result KCodeMemory::Map(VAddr address, size_t size) {
     // Mark ourselves as mapped.
     m_is_mapped = true;
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
 Result KCodeMemory::Unmap(VAddr address, size_t size) {
@@ -97,7 +97,7 @@ Result KCodeMemory::Unmap(VAddr address, size_t size) {
     // Mark ourselves as unmapped.
     m_is_mapped = false;
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
 Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) {
@@ -131,7 +131,7 @@ Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission
     // Mark ourselves as mapped.
     m_is_owner_mapped = true;
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
 Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) {
@@ -147,7 +147,7 @@ Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) {
     // Mark ourselves as unmapped.
     m_is_owner_mapped = false;
 
-    return ResultSuccess;
+    R_SUCCEED();
 }
 
 } // namespace Kernel
diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp
index 458f4c94ea..b759576883 100644
--- a/src/core/hle/kernel/k_condition_variable.cpp
+++ b/src/core/hle/kernel/k_condition_variable.cpp
@@ -98,17 +98,17 @@ public:
 
 } // namespace
 
-KConditionVariable::KConditionVariable(Core::System& system_)
-    : system{system_}, kernel{system.Kernel()} {}
+KConditionVariable::KConditionVariable(Core::System& system)
+    : m_system{system}, m_kernel{system.Kernel()} {}
 
 KConditionVariable::~KConditionVariable() = default;
 
 Result KConditionVariable::SignalToAddress(VAddr addr) {
-    KThread* owner_thread = GetCurrentThreadPointer(kernel);
+    KThread* owner_thread = GetCurrentThreadPointer(m_kernel);
 
     // Signal the address.
     {
-        KScopedSchedulerLock sl(kernel);
+        KScopedSchedulerLock sl(m_kernel);
 
         // Remove waiter thread.
         bool has_waiters{};
@@ -129,7 +129,7 @@ Result KConditionVariable::SignalToAddress(VAddr addr) {
 
         // Write the value to userspace.
         Result result{ResultSuccess};
-        if (WriteToUser(system, addr, std::addressof(next_value))) [[likely]] {
+        if (WriteToUser(m_system, addr, std::addressof(next_value))) [[likely]] {
             result = ResultSuccess;
         } else {
             result = ResultInvalidCurrentMemory;
@@ -145,26 +145,27 @@ Result KConditionVariable::SignalToAddress(VAddr addr) {
 }
 
 Result KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
-    KThread* cur_thread = GetCurrentThreadPointer(kernel);
-    ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(kernel);
+    KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
+    ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(m_kernel);
 
     // Wait for the address.
     KThread* owner_thread{};
     {
-        KScopedSchedulerLock sl(kernel);
+        KScopedSchedulerLock sl(m_kernel);
 
         // Check if the thread should terminate.
         R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested);
 
         // Read the tag from userspace.
         u32 test_tag{};
-        R_UNLESS(ReadFromUser(system, std::addressof(test_tag), addr), ResultInvalidCurrentMemory);
+        R_UNLESS(ReadFromUser(m_system, std::addressof(test_tag), addr),
+                 ResultInvalidCurrentMemory);
 
         // If the tag isn't the handle (with wait mask), we're done.
         R_SUCCEED_IF(test_tag != (handle | Svc::HandleWaitMask));
 
         // Get the lock owner thread.
-        owner_thread = GetCurrentProcess(kernel)
+        owner_thread = GetCurrentProcess(m_kernel)
                            .GetHandleTable()
                            .GetObjectWithoutPseudoHandle<KThread>(handle)
                            .ReleasePointerUnsafe();
@@ -184,12 +185,12 @@ Result KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value)
     owner_thread->Close();
 
     // Get the wait result.
-    return cur_thread->GetWaitResult();
+    R_RETURN(cur_thread->GetWaitResult());
 }
 
 void KConditionVariable::SignalImpl(KThread* thread) {
     // Check pre-conditions.
-    ASSERT(kernel.GlobalSchedulerContext().IsLocked());
+    ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
 
     // Update the tag.
     VAddr address = thread->GetAddressKey();
@@ -204,7 +205,7 @@ void KConditionVariable::SignalImpl(KThread* thread) {
         // TODO(bunnei): We should call CanAccessAtomic(..) here.
         can_access = true;
         if (can_access) [[likely]] {
-            UpdateLockAtomic(system, std::addressof(prev_tag), address, own_tag,
+            UpdateLockAtomic(m_system, std::addressof(prev_tag), address, own_tag,
                              Svc::HandleWaitMask);
         }
     }
@@ -215,7 +216,7 @@ void KConditionVariable::SignalImpl(KThread* thread) {
             thread->EndWait(ResultSuccess);
         } else {
             // Get the previous owner.
-            KThread* owner_thread = GetCurrentProcess(kernel)
+            KThread* owner_thread = GetCurrentProcess(m_kernel)
                                         .GetHandleTable()
                                         .GetObjectWithoutPseudoHandle<KThread>(
                                             static_cast<Handle>(prev_tag & ~Svc::HandleWaitMask))
@@ -240,14 +241,14 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
     // Perform signaling.
     s32 num_waiters{};
     {
-        KScopedSchedulerLock sl(kernel);
+        KScopedSchedulerLock sl(m_kernel);
 
-        auto it = thread_tree.nfind_key({cv_key, -1});
-        while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
+        auto it = m_tree.nfind_key({cv_key, -1});
+        while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) &&
                (it->GetConditionVariableKey() == cv_key)) {
             KThread* target_thread = std::addressof(*it);
 
-            it = thread_tree.erase(it);
+            it = m_tree.erase(it);
             target_thread->ClearConditionVariable();
 
             this->SignalImpl(target_thread);
@@ -256,27 +257,27 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
         }
 
         // If we have no waiters, clear the has waiter flag.
-        if (it == thread_tree.end() || it->GetConditionVariableKey() != cv_key) {
+        if (it == m_tree.end() || it->GetConditionVariableKey() != cv_key) {
             const u32 has_waiter_flag{};
-            WriteToUser(system, cv_key, std::addressof(has_waiter_flag));
+            WriteToUser(m_system, cv_key, std::addressof(has_waiter_flag));
         }
     }
 }
 
 Result KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
     // Prepare to wait.
-    KThread* cur_thread = GetCurrentThreadPointer(kernel);
+    KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
     KHardwareTimer* timer{};
-    ThreadQueueImplForKConditionVariableWaitConditionVariable wait_queue(
-        kernel, std::addressof(thread_tree));
+    ThreadQueueImplForKConditionVariableWaitConditionVariable wait_queue(m_kernel,
+                                                                         std::addressof(m_tree));
 
     {
-        KScopedSchedulerLockAndSleep slp(kernel, std::addressof(timer), cur_thread, timeout);
+        KScopedSchedulerLockAndSleep slp(m_kernel, std::addressof(timer), cur_thread, timeout);
 
         // Check that the thread isn't terminating.
         if (cur_thread->IsTerminationRequested()) {
             slp.CancelSleep();
-            return ResultTerminationRequested;
+            R_THROW(ResultTerminationRequested);
         }
 
         // Update the value and process for the next owner.
@@ -302,14 +303,14 @@ Result KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
             // Write to the cv key.
             {
                 const u32 has_waiter_flag = 1;
-                WriteToUser(system, key, std::addressof(has_waiter_flag));
-                // TODO(bunnei): We should call DataMemoryBarrier(..) here.
+                WriteToUser(m_system, key, std::addressof(has_waiter_flag));
+                std::atomic_thread_fence(std::memory_order_seq_cst);
             }
 
             // Write the value to userspace.
-            if (!WriteToUser(system, addr, std::addressof(next_value))) {
+            if (!WriteToUser(m_system, addr, std::addressof(next_value))) {
                 slp.CancelSleep();
-                return ResultInvalidCurrentMemory;
+                R_THROW(ResultInvalidCurrentMemory);
             }
         }
 
@@ -317,8 +318,8 @@ Result KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
         R_UNLESS(timeout != 0, ResultTimedOut);
 
         // Update condition variable tracking.
-        cur_thread->SetConditionVariable(std::addressof(thread_tree), addr, key, value);
-        thread_tree.insert(*cur_thread);
+        cur_thread->SetConditionVariable(std::addressof(m_tree), addr, key, value);
+        m_tree.insert(*cur_thread);
 
         // Begin waiting.
         wait_queue.SetHardwareTimer(timer);
@@ -328,7 +329,7 @@ Result KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
     }
 
     // Get the wait result.
-    return cur_thread->GetWaitResult();
+    R_RETURN(cur_thread->GetWaitResult());
 }
 
 } // namespace Kernel
diff --git a/src/core/hle/kernel/k_condition_variable.h b/src/core/hle/kernel/k_condition_variable.h
index fad4ed0110..41635a8948 100644
--- a/src/core/hle/kernel/k_condition_variable.h
+++ b/src/core/hle/kernel/k_condition_variable.h
@@ -21,24 +21,24 @@ class KConditionVariable {
 public:
     using ThreadTree = typename KThread::ConditionVariableThreadTreeType;
 
-    explicit KConditionVariable(Core::System& system_);
+    explicit KConditionVariable(Core::System& system);
     ~KConditionVariable();
 
     // Arbitration
-    [[nodiscard]] Result SignalToAddress(VAddr addr);
-    [[nodiscard]] Result WaitForAddress(Handle handle, VAddr addr, u32 value);
+    Result SignalToAddress(VAddr addr);
+    Result WaitForAddress(Handle handle, VAddr addr, u32 value);
 
     // Condition variable
     void Signal(u64 cv_key, s32 count);
-    [[nodiscard]] Result Wait(VAddr addr, u64 key, u32 value, s64 timeout);
+    Result Wait(VAddr addr, u64 key, u32 value, s64 timeout);
 
 private:
     void SignalImpl(KThread* thread);
 
-    ThreadTree thread_tree;
-
-    Core::System& system;
-    KernelCore& kernel;
+private:
+    Core::System& m_system;
+    KernelCore& m_kernel;
+    ThreadTree m_tree{};
 };
 
 inline void BeforeUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree,
diff --git a/src/core/hle/kernel/k_light_condition_variable.cpp b/src/core/hle/kernel/k_light_condition_variable.cpp
index 8fce2bc712..6d5a815aab 100644
--- a/src/core/hle/kernel/k_light_condition_variable.cpp
+++ b/src/core/hle/kernel/k_light_condition_variable.cpp
@@ -13,9 +13,9 @@ namespace {
 
 class ThreadQueueImplForKLightConditionVariable final : public KThreadQueue {
 public:
-    ThreadQueueImplForKLightConditionVariable(KernelCore& kernel_, KThread::WaiterList* wl,
+    ThreadQueueImplForKLightConditionVariable(KernelCore& kernel, KThread::WaiterList* wl,
                                               bool term)
-        : KThreadQueue(kernel_), m_wait_list(wl), m_allow_terminating_thread(term) {}
+        : KThreadQueue(kernel), m_wait_list(wl), m_allow_terminating_thread(term) {}
 
     void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
         // Only process waits if we're allowed to.
@@ -39,15 +39,15 @@ private:
 
 void KLightConditionVariable::Wait(KLightLock* lock, s64 timeout, bool allow_terminating_thread) {
     // Create thread queue.
-    KThread* owner = GetCurrentThreadPointer(kernel);
+    KThread* owner = GetCurrentThreadPointer(m_kernel);
     KHardwareTimer* timer{};
 
-    ThreadQueueImplForKLightConditionVariable wait_queue(kernel, std::addressof(wait_list),
+    ThreadQueueImplForKLightConditionVariable wait_queue(m_kernel, std::addressof(m_wait_list),
                                                          allow_terminating_thread);
 
     // Sleep the thread.
     {
-        KScopedSchedulerLockAndSleep lk(kernel, std::addressof(timer), owner, timeout);
+        KScopedSchedulerLockAndSleep lk(m_kernel, std::addressof(timer), owner, timeout);
 
         if (!allow_terminating_thread && owner->IsTerminationRequested()) {
             lk.CancelSleep();
@@ -57,7 +57,7 @@ void KLightConditionVariable::Wait(KLightLock* lock, s64 timeout, bool allow_ter
         lock->Unlock();
 
         // Add the thread to the queue.
-        wait_list.push_back(*owner);
+        m_wait_list.push_back(*owner);
 
         // Begin waiting.
         wait_queue.SetHardwareTimer(timer);
@@ -69,10 +69,10 @@ void KLightConditionVariable::Wait(KLightLock* lock, s64 timeout, bool allow_ter
 }
 
 void KLightConditionVariable::Broadcast() {
-    KScopedSchedulerLock lk(kernel);
+    KScopedSchedulerLock lk(m_kernel);
 
     // Signal all threads.
-    for (auto it = wait_list.begin(); it != wait_list.end(); it = wait_list.erase(it)) {
+    for (auto it = m_wait_list.begin(); it != m_wait_list.end(); it = m_wait_list.erase(it)) {
         it->EndWait(ResultSuccess);
     }
 }
diff --git a/src/core/hle/kernel/k_light_condition_variable.h b/src/core/hle/kernel/k_light_condition_variable.h
index 3cabd6b4f0..ab612426d6 100644
--- a/src/core/hle/kernel/k_light_condition_variable.h
+++ b/src/core/hle/kernel/k_light_condition_variable.h
@@ -13,13 +13,13 @@ class KLightLock;
 
 class KLightConditionVariable {
 public:
-    explicit KLightConditionVariable(KernelCore& kernel_) : kernel{kernel_} {}
+    explicit KLightConditionVariable(KernelCore& kernel) : m_kernel{kernel} {}
 
     void Wait(KLightLock* lock, s64 timeout = -1, bool allow_terminating_thread = true);
     void Broadcast();
 
 private:
-    KernelCore& kernel;
-    KThread::WaiterList wait_list{};
+    KernelCore& m_kernel;
+    KThread::WaiterList m_wait_list{};
 };
 } // namespace Kernel
diff --git a/src/core/hle/kernel/k_light_lock.cpp b/src/core/hle/kernel/k_light_lock.cpp
index 14cb615da7..e87ee8b652 100644
--- a/src/core/hle/kernel/k_light_lock.cpp
+++ b/src/core/hle/kernel/k_light_lock.cpp
@@ -13,7 +13,7 @@ namespace {
 
 class ThreadQueueImplForKLightLock final : public KThreadQueue {
 public:
-    explicit ThreadQueueImplForKLightLock(KernelCore& kernel_) : KThreadQueue(kernel_) {}
+    explicit ThreadQueueImplForKLightLock(KernelCore& kernel) : KThreadQueue(kernel) {}
 
     void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
         // Remove the thread as a waiter from its owner.
@@ -29,13 +29,13 @@ public:
 } // namespace
 
 void KLightLock::Lock() {
-    const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel));
+    const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(m_kernel));
 
     while (true) {
-        uintptr_t old_tag = tag.load(std::memory_order_relaxed);
+        uintptr_t old_tag = m_tag.load(std::memory_order_relaxed);
 
-        while (!tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : (old_tag | 1),
-                                          std::memory_order_acquire)) {
+        while (!m_tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : (old_tag | 1),
+                                            std::memory_order_acquire)) {
         }
 
         if (old_tag == 0 || this->LockSlowPath(old_tag | 1, cur_thread)) {
@@ -45,30 +45,30 @@ void KLightLock::Lock() {
 }
 
 void KLightLock::Unlock() {
-    const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel));
+    const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(m_kernel));
 
     uintptr_t expected = cur_thread;
-    if (!tag.compare_exchange_strong(expected, 0, std::memory_order_release)) {
+    if (!m_tag.compare_exchange_strong(expected, 0, std::memory_order_release)) {
         this->UnlockSlowPath(cur_thread);
     }
 }
 
 bool KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) {
     KThread* cur_thread = reinterpret_cast<KThread*>(_cur_thread);
-    ThreadQueueImplForKLightLock wait_queue(kernel);
+    ThreadQueueImplForKLightLock wait_queue(m_kernel);
 
     // Pend the current thread waiting on the owner thread.
     {
-        KScopedSchedulerLock sl{kernel};
+        KScopedSchedulerLock sl{m_kernel};
 
         // Ensure we actually have locking to do.
-        if (tag.load(std::memory_order_relaxed) != _owner) {
+        if (m_tag.load(std::memory_order_relaxed) != _owner) {
             return false;
         }
 
         // Add the current thread as a waiter on the owner.
         KThread* owner_thread = reinterpret_cast<KThread*>(_owner & ~1ULL);
-        cur_thread->SetKernelAddressKey(reinterpret_cast<uintptr_t>(std::addressof(tag)));
+        cur_thread->SetKernelAddressKey(reinterpret_cast<uintptr_t>(std::addressof(m_tag)));
         owner_thread->AddWaiter(cur_thread);
 
         // Begin waiting to hold the lock.
@@ -87,12 +87,12 @@ void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {
 
     // Unlock.
     {
-        KScopedSchedulerLock sl(kernel);
+        KScopedSchedulerLock sl(m_kernel);
 
         // Get the next owner.
         bool has_waiters;
         KThread* next_owner = owner_thread->RemoveKernelWaiterByKey(
-            std::addressof(has_waiters), reinterpret_cast<uintptr_t>(std::addressof(tag)));
+            std::addressof(has_waiters), reinterpret_cast<uintptr_t>(std::addressof(m_tag)));
 
         // Pass the lock to the next owner.
         uintptr_t next_tag = 0;
@@ -114,12 +114,13 @@ void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {
         }
 
         // Write the new tag value.
-        tag.store(next_tag, std::memory_order_release);
+        m_tag.store(next_tag, std::memory_order_release);
     }
 }
 
 bool KLightLock::IsLockedByCurrentThread() const {
-    return (tag | 1ULL) == (reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel)) | 1ULL);
+    return (m_tag.load() | 1ULL) ==
+           (reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(m_kernel)) | 1ULL);
 }
 
 } // namespace Kernel
diff --git a/src/core/hle/kernel/k_light_lock.h b/src/core/hle/kernel/k_light_lock.h
index 7edd950c05..626f57596f 100644
--- a/src/core/hle/kernel/k_light_lock.h
+++ b/src/core/hle/kernel/k_light_lock.h
@@ -13,7 +13,7 @@ class KernelCore;
 
 class KLightLock {
 public:
-    explicit KLightLock(KernelCore& kernel_) : kernel{kernel_} {}
+    explicit KLightLock(KernelCore& kernel) : m_kernel{kernel} {}
 
     void Lock();
 
@@ -24,14 +24,14 @@ public:
     void UnlockSlowPath(uintptr_t cur_thread);
 
     bool IsLocked() const {
-        return tag != 0;
+        return m_tag.load() != 0;
     }
 
     bool IsLockedByCurrentThread() const;
 
 private:
-    std::atomic<uintptr_t> tag{};
-    KernelCore& kernel;
+    std::atomic<uintptr_t> m_tag{};
+    KernelCore& m_kernel;
 };
 
 using KScopedLightLock = KScopedLock<KLightLock>;