diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_light_lock.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_light_lock.hpp index 5cd809d4f..36bbd1af8 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_light_lock.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_light_lock.hpp @@ -37,16 +37,12 @@ namespace ams::kern { uintptr_t old_tag = m_tag.load(std::memory_order_relaxed); while (!m_tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : old_tag | 1, std::memory_order_acquire)) { - if ((old_tag | 1) == cur_thread_tag) { - return; - } + /* ... */ } - if ((old_tag == 0) || ((old_tag | 1) == cur_thread_tag)) { + if (old_tag == 0 || this->LockSlowPath(old_tag | 1, cur_thread)) { break; } - - this->LockSlowPath(old_tag | 1, cur_thread); } } @@ -54,15 +50,14 @@ namespace ams::kern { MESOSPHERE_ASSERT_THIS(); const uintptr_t cur_thread = reinterpret_cast(GetCurrentThreadPointer()); + uintptr_t expected = cur_thread; - do { - if (expected != cur_thread) { - return this->UnlockSlowPath(cur_thread); - } - } while (!m_tag.compare_exchange_weak(expected, 0, std::memory_order_release)); + if (!m_tag.compare_exchange_strong(expected, 0, std::memory_order_release)) { + this->UnlockSlowPath(cur_thread); + } } - void LockSlowPath(uintptr_t owner, uintptr_t cur_thread); + bool LockSlowPath(uintptr_t owner, uintptr_t cur_thread); void UnlockSlowPath(uintptr_t cur_thread); ALWAYS_INLINE bool IsLocked() const { return m_tag.load() != 0; } diff --git a/libraries/libmesosphere/source/kern_k_light_lock.cpp b/libraries/libmesosphere/source/kern_k_light_lock.cpp index e463bfe9f..f1949ce8d 100644 --- a/libraries/libmesosphere/source/kern_k_light_lock.cpp +++ b/libraries/libmesosphere/source/kern_k_light_lock.cpp @@ -31,7 +31,7 @@ namespace ams::kern { } - void KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) { + bool KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) { KThread *cur_thread = reinterpret_cast(_cur_thread); ThreadQueueImplForKLightLock wait_queue; @@ -40,8 +40,8 @@ namespace ams::kern { KScopedSchedulerLock sl; /* Ensure we actually have locking to do. */ - if (AMS_UNLIKELY(m_tag.load(std::memory_order_relaxed) != _owner)) { - return; + if (m_tag.load(std::memory_order_relaxed) != _owner) { + return false; } /* Add the current thread as a waiter on the owner. */ @@ -56,6 +56,8 @@ namespace ams::kern { owner_thread->ContinueIfHasKernelWaiters(); } } + + return true; } void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {