From 632a75eee7db35fb7b094f3eead1163a2b9f9d11 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Wed, 22 Jul 2020 03:29:27 -0700 Subject: [PATCH] kern: KConditionVariable::SignalImpl --- .../arm64/kern_userspace_memory_access.hpp | 1 + .../include/mesosphere/kern_k_auto_object.hpp | 2 + .../arm64/kern_userspace_memory_access_asm.s | 28 +++++++++++ .../source/kern_k_condition_variable.cpp | 50 ++++++++++++++++++- 4 files changed, 80 insertions(+), 1 deletion(-) diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_userspace_memory_access.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_userspace_memory_access.hpp index 6783d399d..38c6ab9f5 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_userspace_memory_access.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_userspace_memory_access.hpp @@ -39,6 +39,7 @@ namespace ams::kern::arch::arm64 { static bool ClearMemoryAligned64Bit(void *dst, size_t size); static bool ClearMemorySize32Bit(void *dst); + static bool UpdateLockAtomic(u32 *out, u32 *address, u32 if_zero, u32 new_orr_mask); static bool UpdateIfEqualAtomic(s32 *out, s32 *address, s32 compare_value, s32 new_value); static bool DecrementIfLessThanAtomic(s32 *out, s32 *address, s32 compare); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_auto_object.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_auto_object.hpp index 37f3f245b..a9c4239fe 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_auto_object.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_auto_object.hpp @@ -239,6 +239,8 @@ namespace ams::kern { constexpr ALWAYS_INLINE T *GetPointerUnsafe() { return this->obj; } + constexpr ALWAYS_INLINE T *ReleasePointerUnsafe() { T *ret = this->obj; this->obj = nullptr; return ret; } + constexpr ALWAYS_INLINE bool IsNull() const { return this->obj == nullptr; } constexpr ALWAYS_INLINE bool IsNotNull() const { return this->obj != nullptr; } }; diff --git a/libraries/libmesosphere/source/arch/arm64/kern_userspace_memory_access_asm.s b/libraries/libmesosphere/source/arch/arm64/kern_userspace_memory_access_asm.s index 5752a93b2..d9f7de920 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_userspace_memory_access_asm.s +++ b/libraries/libmesosphere/source/arch/arm64/kern_userspace_memory_access_asm.s @@ -428,6 +428,34 @@ _ZN3ams4kern4arch5arm6415UserspaceAccess20ClearMemorySize32BitEPv: mov x0, #1 ret +/* ams::kern::arch::arm64::UserspaceAccess::UpdateLockAtomic(u32 *out, u32 *address, u32 if_zero, u32 new_orr_mask) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess16UpdateLockAtomicEPjS4_jj, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess16UpdateLockAtomicEPjS4_jj +.type _ZN3ams4kern4arch5arm6415UserspaceAccess16UpdateLockAtomicEPjS4_jj, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess16UpdateLockAtomicEPjS4_jj: + /* Load the value from the address. */ + ldaxr w4, [x1] + + /* Orr in the new mask. */ + orr w5, w4, w3 + + /* If the value is zero, use the if_zero value, otherwise use the newly orr'd value. */ + cmp w4, wzr + csel w5, w2, w5, eq + + /* Try to store. */ + stlxr w6, w5, [x1] + + /* If we failed to store, try again. */ + cbnz w6, _ZN3ams4kern4arch5arm6415UserspaceAccess16UpdateLockAtomicEPjS4_jj + + /* We're done. */ + str w4, [x0] + mov x0, #1 + ret + + /* ams::kern::arch::arm64::UserspaceAccess::UpdateIfEqualAtomic(s32 *out, s32 *address, s32 compare_value, s32 new_value) */ .section .text._ZN3ams4kern4arch5arm6415UserspaceAccess19UpdateIfEqualAtomicEPiS4_ii, "ax", %progbits .global _ZN3ams4kern4arch5arm6415UserspaceAccess19UpdateIfEqualAtomicEPiS4_ii diff --git a/libraries/libmesosphere/source/kern_k_condition_variable.cpp b/libraries/libmesosphere/source/kern_k_condition_variable.cpp index 3ddf001b4..270c3c8ac 100644 --- a/libraries/libmesosphere/source/kern_k_condition_variable.cpp +++ b/libraries/libmesosphere/source/kern_k_condition_variable.cpp @@ -29,6 +29,10 @@ namespace ams::kern { return UserspaceAccess::CopyMemoryToUserSize32Bit(GetVoidPointer(address), p); } + ALWAYS_INLINE bool UpdateLockAtomic(u32 *out, KProcessAddress address, u32 if_zero, u32 new_orr_mask) { + return UserspaceAccess::UpdateLockAtomic(out, GetPointer(address), if_zero, new_orr_mask); + } + } Result KConditionVariable::SignalToAddress(KProcessAddress addr) { @@ -117,7 +121,51 @@ namespace ams::kern { } KThread *KConditionVariable::SignalImpl(KThread *thread) { - MESOSPHERE_UNIMPLEMENTED(); + /* Check pre-conditions. */ + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + + /* Update the tag. */ + KProcessAddress address = thread->GetAddressKey(); + u32 own_tag = thread->GetAddressKeyValue(); + + u32 prev_tag; + bool can_access; + { + KScopedInterruptDisable di; + + can_access = cpu::CanAccessAtomic(address); + if (AMS_LIKELY(can_access)) { + UpdateLockAtomic(std::addressof(prev_tag), address, own_tag, ams::svc::HandleWaitMask); + } + } + + KThread *thread_to_close = nullptr; + if (AMS_LIKELY(can_access)) { + if (prev_tag == ams::svc::InvalidHandle) { + /* If nobody held the lock previously, we're all good. */ + thread->SetSyncedObject(nullptr, ResultSuccess()); + thread->Wakeup(); + } else { + /* Get the previous owner. */ + KThread *owner_thread = GetCurrentProcess().GetHandleTable().GetObjectWithoutPseudoHandle(static_cast(prev_tag & ~ams::svc::HandleWaitMask)) + .ReleasePointerUnsafe(); + if (AMS_LIKELY(owner_thread != nullptr)) { + /* Add the thread as a waiter on the owner. */ + owner_thread->AddWaiter(thread); + thread_to_close = owner_thread; + } else { + /* The lock was tagged with a thread that doesn't exist. */ + thread->SetSyncedObject(nullptr, svc::ResultInvalidState()); + thread->Wakeup(); + } + } + } else { + /* If the address wasn't accessible, note so. */ + thread->SetSyncedObject(nullptr, svc::ResultInvalidCurrentMemory()); + thread->Wakeup(); + } + + return thread_to_close; } void KConditionVariable::Signal(uintptr_t cv_key, s32 count) {