mirror of
https://github.com/Atmosphere-NX/Atmosphere
synced 2024-11-09 22:56:35 +00:00
kern: unify all waiting semantics to use single api
This commit is contained in:
parent
f6fb5f2c8d
commit
90732ff311
22 changed files with 904 additions and 683 deletions
|
@ -27,59 +27,9 @@ namespace ams::kern {
|
|||
KThread::WaiterList m_wait_list;
|
||||
public:
|
||||
constexpr ALWAYS_INLINE KLightConditionVariable() : m_wait_list() { /* ... */ }
|
||||
private:
|
||||
void WaitImpl(KLightLock *lock, s64 timeout, bool allow_terminating_thread) {
|
||||
KThread *owner = GetCurrentThreadPointer();
|
||||
KHardwareTimer *timer;
|
||||
|
||||
/* Sleep the thread. */
|
||||
{
|
||||
KScopedSchedulerLockAndSleep lk(&timer, owner, timeout);
|
||||
|
||||
if (!allow_terminating_thread && owner->IsTerminationRequested()) {
|
||||
lk.CancelSleep();
|
||||
return;
|
||||
}
|
||||
|
||||
lock->Unlock();
|
||||
|
||||
|
||||
/* Set the thread as waiting. */
|
||||
GetCurrentThread().SetState(KThread::ThreadState_Waiting);
|
||||
|
||||
/* Add the thread to the queue. */
|
||||
m_wait_list.push_back(GetCurrentThread());
|
||||
}
|
||||
|
||||
/* Remove the thread from the wait list. */
|
||||
{
|
||||
KScopedSchedulerLock sl;
|
||||
|
||||
m_wait_list.erase(m_wait_list.iterator_to(GetCurrentThread()));
|
||||
}
|
||||
|
||||
/* Cancel the task that the sleep setup. */
|
||||
if (timer != nullptr) {
|
||||
timer->CancelTask(owner);
|
||||
}
|
||||
|
||||
/* Re-acquire the lock. */
|
||||
lock->Lock();
|
||||
}
|
||||
public:
|
||||
void Wait(KLightLock *lock, s64 timeout = -1ll, bool allow_terminating_thread = true) {
|
||||
this->WaitImpl(lock, timeout, allow_terminating_thread);
|
||||
}
|
||||
|
||||
void Broadcast() {
|
||||
KScopedSchedulerLock lk;
|
||||
|
||||
/* Signal all threads. */
|
||||
for (auto &thread : m_wait_list) {
|
||||
thread.SetState(KThread::ThreadState_Runnable);
|
||||
}
|
||||
}
|
||||
|
||||
void Wait(KLightLock *lock, s64 timeout = -1ll, bool allow_terminating_thread = true);
|
||||
void Broadcast();
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -27,12 +27,12 @@ namespace ams::kern {
|
|||
MESOSPHERE_AUTOOBJECT_TRAITS(KLightServerSession, KAutoObject);
|
||||
private:
|
||||
KLightSession *m_parent;
|
||||
KThreadQueue m_request_queue;
|
||||
KThreadQueue m_server_queue;
|
||||
KThread::WaiterList m_request_list;
|
||||
KThread *m_current_request;
|
||||
u64 m_server_thread_id;
|
||||
KThread *m_server_thread;
|
||||
public:
|
||||
constexpr KLightServerSession() : m_parent(), m_request_queue(), m_server_queue(), m_current_request(), m_server_thread() { /* ... */ }
|
||||
constexpr KLightServerSession() : m_parent(), m_request_list(), m_current_request(), m_server_thread_id(), m_server_thread() { /* ... */ }
|
||||
|
||||
void Initialize(KLightSession *parent) {
|
||||
/* Set member variables. */
|
||||
|
|
|
@ -45,7 +45,39 @@ namespace ams::kern {
|
|||
public:
|
||||
virtual void Finalize() override;
|
||||
virtual bool IsSignaled() const { AMS_INFINITE_LOOP(); }
|
||||
virtual void DumpWaiters();
|
||||
|
||||
void DumpWaiters();
|
||||
|
||||
ALWAYS_INLINE void LinkNode(ThreadListNode *node) {
|
||||
/* Link the node to the list. */
|
||||
if (m_thread_list_tail == nullptr) {
|
||||
m_thread_list_head = node;
|
||||
} else {
|
||||
m_thread_list_tail->next = node;
|
||||
}
|
||||
|
||||
m_thread_list_tail = node;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void UnlinkNode(ThreadListNode *node) {
|
||||
/* Unlink the node from the list. */
|
||||
ThreadListNode *prev_ptr = reinterpret_cast<ThreadListNode *>(std::addressof(m_thread_list_head));
|
||||
ThreadListNode *prev_val = nullptr;
|
||||
ThreadListNode *prev, *tail_prev;
|
||||
|
||||
do {
|
||||
prev = prev_ptr;
|
||||
prev_ptr = prev_ptr->next;
|
||||
tail_prev = prev_val;
|
||||
prev_val = prev_ptr;
|
||||
} while (prev_ptr != node);
|
||||
|
||||
if (m_thread_list_tail == node) {
|
||||
m_thread_list_tail = tail_prev;
|
||||
}
|
||||
|
||||
prev->next = node->next;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -37,6 +37,7 @@ namespace ams::kern {
|
|||
friend class KProcess;
|
||||
friend class KConditionVariable;
|
||||
friend class KAddressArbiter;
|
||||
friend class KThreadQueue;
|
||||
public:
|
||||
static constexpr s32 MainThreadPriority = 1;
|
||||
static constexpr s32 IdleThreadPriority = 64;
|
||||
|
@ -191,7 +192,6 @@ namespace ams::kern {
|
|||
KAffinityMask m_physical_affinity_mask{};
|
||||
u64 m_thread_id{};
|
||||
std::atomic<s64> m_cpu_time{};
|
||||
KSynchronizationObject *m_synced_object{};
|
||||
KProcessAddress m_address_key{};
|
||||
KProcess *m_parent{};
|
||||
void *m_kernel_stack_top{};
|
||||
|
@ -204,9 +204,7 @@ namespace ams::kern {
|
|||
s64 m_last_scheduled_tick{};
|
||||
QueueEntry m_per_core_priority_queue_entry[cpu::NumCores]{};
|
||||
KLightLock *m_waiting_lock{};
|
||||
|
||||
KThreadQueue *m_sleeping_queue{};
|
||||
|
||||
KThreadQueue *m_wait_queue{};
|
||||
WaiterList m_waiter_list{};
|
||||
WaiterList m_pinned_waiter_list{};
|
||||
KThread *m_lock_owner{};
|
||||
|
@ -215,6 +213,7 @@ namespace ams::kern {
|
|||
u32 m_address_key_value{};
|
||||
u32 m_suspend_request_flags{};
|
||||
u32 m_suspend_allowed_flags{};
|
||||
s32 m_synced_index{};
|
||||
Result m_wait_result;
|
||||
Result m_debug_exception_result;
|
||||
s32 m_base_priority{};
|
||||
|
@ -374,6 +373,8 @@ namespace ams::kern {
|
|||
void FinishTermination();
|
||||
|
||||
void IncreaseBasePriority(s32 priority);
|
||||
|
||||
NOINLINE void SetState(ThreadState state);
|
||||
public:
|
||||
constexpr u64 GetThreadId() const { return m_thread_id; }
|
||||
|
||||
|
@ -390,7 +391,6 @@ namespace ams::kern {
|
|||
|
||||
constexpr ThreadState GetState() const { return static_cast<ThreadState>(m_thread_state & ThreadState_Mask); }
|
||||
constexpr ThreadState GetRawState() const { return m_thread_state; }
|
||||
NOINLINE void SetState(ThreadState state);
|
||||
|
||||
NOINLINE KThreadContext *GetContextForSchedulerLoop();
|
||||
|
||||
|
@ -442,8 +442,6 @@ namespace ams::kern {
|
|||
constexpr QueueEntry &GetPriorityQueueEntry(s32 core) { return m_per_core_priority_queue_entry[core]; }
|
||||
constexpr const QueueEntry &GetPriorityQueueEntry(s32 core) const { return m_per_core_priority_queue_entry[core]; }
|
||||
|
||||
constexpr void SetSleepingQueue(KThreadQueue *q) { m_sleeping_queue = q; }
|
||||
|
||||
constexpr ConditionVariableThreadTree *GetConditionVariableTree() const { return m_condvar_tree; }
|
||||
|
||||
constexpr s32 GetNumKernelWaiters() const { return m_num_kernel_waiters; }
|
||||
|
@ -460,29 +458,22 @@ namespace ams::kern {
|
|||
constexpr void SetLockOwner(KThread *owner) { m_lock_owner = owner; }
|
||||
constexpr KThread *GetLockOwner() const { return m_lock_owner; }
|
||||
|
||||
constexpr void SetSyncedObject(KSynchronizationObject *obj, Result wait_res) {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
constexpr void ClearWaitQueue() { m_wait_queue = nullptr; }
|
||||
|
||||
m_synced_object = obj;
|
||||
m_wait_result = wait_res;
|
||||
}
|
||||
void BeginWait(KThreadQueue *queue);
|
||||
void NotifyAvailable(KSynchronizationObject *signaled_object, Result wait_result);
|
||||
void EndWait(Result wait_result);
|
||||
void CancelWait(Result wait_result, bool cancel_timer_task);
|
||||
|
||||
constexpr Result GetWaitResult(KSynchronizationObject **out) const {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
constexpr void SetSyncedIndex(s32 index) { m_synced_index = index; }
|
||||
constexpr s32 GetSyncedIndex() const { return m_synced_index; }
|
||||
|
||||
*out = m_synced_object;
|
||||
return m_wait_result;
|
||||
}
|
||||
constexpr void SetWaitResult(Result wait_res) { m_wait_result = wait_res; }
|
||||
constexpr Result GetWaitResult() const { return m_wait_result; }
|
||||
|
||||
constexpr void SetDebugExceptionResult(Result result) {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
m_debug_exception_result = result;
|
||||
}
|
||||
constexpr void SetDebugExceptionResult(Result result) { m_debug_exception_result = result; }
|
||||
|
||||
constexpr Result GetDebugExceptionResult() const {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
return m_debug_exception_result;
|
||||
}
|
||||
constexpr Result GetDebugExceptionResult() const { return m_debug_exception_result; }
|
||||
|
||||
void WaitCancel();
|
||||
|
||||
|
@ -585,8 +576,6 @@ namespace ams::kern {
|
|||
}
|
||||
}
|
||||
|
||||
void Wakeup();
|
||||
|
||||
void SetBasePriority(s32 priority);
|
||||
Result SetPriorityToIdle();
|
||||
|
||||
|
|
|
@ -16,69 +16,28 @@
|
|||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_thread.hpp>
|
||||
#include <mesosphere/kern_select_hardware_timer.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KThreadQueue {
|
||||
private:
|
||||
KThread::WaiterList m_wait_list;
|
||||
KHardwareTimer *m_hardware_timer;
|
||||
public:
|
||||
constexpr ALWAYS_INLINE KThreadQueue() : m_wait_list() { /* ... */ }
|
||||
constexpr ALWAYS_INLINE KThreadQueue() : m_hardware_timer(nullptr) { /* ... */ }
|
||||
|
||||
bool IsEmpty() const { return m_wait_list.empty(); }
|
||||
constexpr void SetHardwareTimer(KHardwareTimer *timer) { m_hardware_timer = timer; }
|
||||
|
||||
KThread::WaiterList::iterator begin() { return m_wait_list.begin(); }
|
||||
KThread::WaiterList::iterator end() { return m_wait_list.end(); }
|
||||
virtual void NotifyAvailable(KThread *waiting_thread, KSynchronizationObject *signaled_object, Result wait_result);
|
||||
virtual void EndWait(KThread *waiting_thread, Result wait_result);
|
||||
virtual void CancelWait(KThread *waiting_thread, Result wait_result, bool cancel_timer_task);
|
||||
};
|
||||
|
||||
bool SleepThread(KThread *t) {
|
||||
KScopedSchedulerLock sl;
|
||||
class KThreadQueueWithoutEndWait : public KThreadQueue {
|
||||
public:
|
||||
constexpr ALWAYS_INLINE KThreadQueueWithoutEndWait() : KThreadQueue() { /* ... */ }
|
||||
|
||||
/* If the thread needs terminating, don't enqueue it. */
|
||||
if (t->IsTerminationRequested()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Set the thread's queue and mark it as waiting. */
|
||||
t->SetSleepingQueue(this);
|
||||
t->SetState(KThread::ThreadState_Waiting);
|
||||
|
||||
/* Add the thread to the queue. */
|
||||
m_wait_list.push_back(*t);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void WakeupThread(KThread *t) {
|
||||
KScopedSchedulerLock sl;
|
||||
|
||||
/* Remove the thread from the queue. */
|
||||
m_wait_list.erase(m_wait_list.iterator_to(*t));
|
||||
|
||||
/* Mark the thread as no longer sleeping. */
|
||||
t->SetState(KThread::ThreadState_Runnable);
|
||||
t->SetSleepingQueue(nullptr);
|
||||
}
|
||||
|
||||
KThread *WakeupFrontThread() {
|
||||
KScopedSchedulerLock sl;
|
||||
|
||||
if (m_wait_list.empty()) {
|
||||
return nullptr;
|
||||
} else {
|
||||
/* Remove the thread from the queue. */
|
||||
auto it = m_wait_list.begin();
|
||||
KThread *thread = std::addressof(*it);
|
||||
m_wait_list.erase(it);
|
||||
|
||||
MESOSPHERE_ASSERT(thread->GetState() == KThread::ThreadState_Waiting);
|
||||
|
||||
/* Mark the thread as no longer sleeping. */
|
||||
thread->SetState(KThread::ThreadState_Runnable);
|
||||
thread->SetSleepingQueue(nullptr);
|
||||
|
||||
return thread;
|
||||
}
|
||||
}
|
||||
virtual void EndWait(KThread *waiting_thread, Result wait_result) override final;
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -20,14 +20,13 @@
|
|||
|
||||
namespace ams::kern {
|
||||
|
||||
class KWaitObject : public KTimerTask {
|
||||
class KWaitObject {
|
||||
private:
|
||||
KThread::WaiterList m_wait_list;
|
||||
bool m_timer_used;
|
||||
KThread *m_next_thread;
|
||||
public:
|
||||
constexpr KWaitObject() : m_wait_list(), m_timer_used() { /* ... */ }
|
||||
constexpr KWaitObject() : m_wait_list(), m_next_thread() { /* ... */ }
|
||||
|
||||
virtual void OnTimer() override;
|
||||
Result Synchronize(s64 timeout);
|
||||
};
|
||||
|
||||
|
|
|
@ -32,8 +32,7 @@ namespace ams::kern {
|
|||
private:
|
||||
KWorkerTask *m_head_task;
|
||||
KWorkerTask *m_tail_task;
|
||||
KThread *m_thread;
|
||||
bool m_active;
|
||||
KThread *m_waiting_thread;
|
||||
private:
|
||||
static void ThreadFunction(uintptr_t arg);
|
||||
void ThreadFunctionImpl();
|
||||
|
@ -41,7 +40,7 @@ namespace ams::kern {
|
|||
KWorkerTask *GetTask();
|
||||
void AddTask(KWorkerTask *task);
|
||||
public:
|
||||
constexpr KWorkerTaskManager() : m_head_task(), m_tail_task(), m_thread(), m_active() { /* ... */ }
|
||||
constexpr KWorkerTaskManager() : m_head_task(), m_tail_task(), m_waiting_thread() { /* ... */ }
|
||||
|
||||
NOINLINE void Initialize(s32 priority);
|
||||
static void AddTask(WorkerType type, KWorkerTask *task);
|
||||
|
|
|
@ -43,6 +43,24 @@ namespace ams::kern {
|
|||
return UserspaceAccess::UpdateIfEqualAtomic(out, GetPointer<s32>(address), value, new_value);
|
||||
}
|
||||
|
||||
class ThreadQueueImplForKAddressArbiter final : public KThreadQueue {
|
||||
private:
|
||||
KAddressArbiter::ThreadTree *m_tree;
|
||||
public:
|
||||
constexpr ThreadQueueImplForKAddressArbiter(KAddressArbiter::ThreadTree *t) : KThreadQueue(), m_tree(t) { /* ... */ }
|
||||
|
||||
virtual void CancelWait(KThread *waiting_thread, Result wait_result, bool cancel_timer_task) override {
|
||||
/* If the thread is waiting on an address arbiter, remove it from the tree. */
|
||||
if (waiting_thread->IsWaitingForAddressArbiter()) {
|
||||
m_tree->erase(m_tree->iterator_to(*waiting_thread));
|
||||
waiting_thread->ClearAddressArbiter();
|
||||
}
|
||||
|
||||
/* Invoke the base cancel wait handler. */
|
||||
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
Result KAddressArbiter::Signal(uintptr_t addr, s32 count) {
|
||||
|
@ -53,14 +71,14 @@ namespace ams::kern {
|
|||
|
||||
auto it = m_tree.nfind_key({ addr, -1 });
|
||||
while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) && (it->GetAddressArbiterKey() == addr)) {
|
||||
/* End the thread's wait. */
|
||||
KThread *target_thread = std::addressof(*it);
|
||||
target_thread->SetSyncedObject(nullptr, ResultSuccess());
|
||||
target_thread->EndWait(ResultSuccess());
|
||||
|
||||
AMS_ASSERT(target_thread->IsWaitingForAddressArbiter());
|
||||
target_thread->Wakeup();
|
||||
MESOSPHERE_ASSERT(target_thread->IsWaitingForAddressArbiter());
|
||||
target_thread->ClearAddressArbiter();
|
||||
|
||||
it = m_tree.erase(it);
|
||||
target_thread->ClearAddressArbiter();
|
||||
++num_waiters;
|
||||
}
|
||||
}
|
||||
|
@ -80,14 +98,14 @@ namespace ams::kern {
|
|||
|
||||
auto it = m_tree.nfind_key({ addr, -1 });
|
||||
while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) && (it->GetAddressArbiterKey() == addr)) {
|
||||
/* End the thread's wait. */
|
||||
KThread *target_thread = std::addressof(*it);
|
||||
target_thread->SetSyncedObject(nullptr, ResultSuccess());
|
||||
target_thread->EndWait(ResultSuccess());
|
||||
|
||||
AMS_ASSERT(target_thread->IsWaitingForAddressArbiter());
|
||||
target_thread->Wakeup();
|
||||
MESOSPHERE_ASSERT(target_thread->IsWaitingForAddressArbiter());
|
||||
target_thread->ClearAddressArbiter();
|
||||
|
||||
it = m_tree.erase(it);
|
||||
target_thread->ClearAddressArbiter();
|
||||
++num_waiters;
|
||||
}
|
||||
}
|
||||
|
@ -142,14 +160,14 @@ namespace ams::kern {
|
|||
R_UNLESS(user_value == value, svc::ResultInvalidState());
|
||||
|
||||
while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) && (it->GetAddressArbiterKey() == addr)) {
|
||||
/* End the thread's wait. */
|
||||
KThread *target_thread = std::addressof(*it);
|
||||
target_thread->SetSyncedObject(nullptr, ResultSuccess());
|
||||
target_thread->EndWait(ResultSuccess());
|
||||
|
||||
AMS_ASSERT(target_thread->IsWaitingForAddressArbiter());
|
||||
target_thread->Wakeup();
|
||||
MESOSPHERE_ASSERT(target_thread->IsWaitingForAddressArbiter());
|
||||
target_thread->ClearAddressArbiter();
|
||||
|
||||
it = m_tree.erase(it);
|
||||
target_thread->ClearAddressArbiter();
|
||||
++num_waiters;
|
||||
}
|
||||
}
|
||||
|
@ -160,6 +178,7 @@ namespace ams::kern {
|
|||
/* Prepare to wait. */
|
||||
KThread *cur_thread = GetCurrentThreadPointer();
|
||||
KHardwareTimer *timer;
|
||||
ThreadQueueImplForKAddressArbiter wait_queue(std::addressof(m_tree));
|
||||
|
||||
{
|
||||
KScopedSchedulerLockAndSleep slp(std::addressof(timer), cur_thread, timeout);
|
||||
|
@ -170,9 +189,6 @@ namespace ams::kern {
|
|||
return svc::ResultTerminationRequested();
|
||||
}
|
||||
|
||||
/* Set the synced object. */
|
||||
cur_thread->SetSyncedObject(nullptr, ams::svc::ResultTimedOut());
|
||||
|
||||
/* Read the value from userspace. */
|
||||
s32 user_value;
|
||||
bool succeeded;
|
||||
|
@ -202,33 +218,21 @@ namespace ams::kern {
|
|||
/* Set the arbiter. */
|
||||
cur_thread->SetAddressArbiter(std::addressof(m_tree), addr);
|
||||
m_tree.insert(*cur_thread);
|
||||
cur_thread->SetState(KThread::ThreadState_Waiting);
|
||||
|
||||
/* Wait for the thread to finish. */
|
||||
wait_queue.SetHardwareTimer(timer);
|
||||
cur_thread->BeginWait(std::addressof(wait_queue));
|
||||
}
|
||||
|
||||
/* Cancel the timer wait. */
|
||||
if (timer != nullptr) {
|
||||
timer->CancelTask(cur_thread);
|
||||
}
|
||||
|
||||
/* Remove from the address arbiter. */
|
||||
{
|
||||
KScopedSchedulerLock sl;
|
||||
|
||||
if (cur_thread->IsWaitingForAddressArbiter()) {
|
||||
m_tree.erase(m_tree.iterator_to(*cur_thread));
|
||||
cur_thread->ClearAddressArbiter();
|
||||
}
|
||||
}
|
||||
|
||||
/* Get the result. */
|
||||
KSynchronizationObject *dummy;
|
||||
return cur_thread->GetWaitResult(std::addressof(dummy));
|
||||
/* Get the wait result. */
|
||||
return cur_thread->GetWaitResult();
|
||||
}
|
||||
|
||||
Result KAddressArbiter::WaitIfEqual(uintptr_t addr, s32 value, s64 timeout) {
|
||||
/* Prepare to wait. */
|
||||
KThread *cur_thread = GetCurrentThreadPointer();
|
||||
KHardwareTimer *timer;
|
||||
ThreadQueueImplForKAddressArbiter wait_queue(std::addressof(m_tree));
|
||||
|
||||
{
|
||||
KScopedSchedulerLockAndSleep slp(std::addressof(timer), cur_thread, timeout);
|
||||
|
@ -239,9 +243,6 @@ namespace ams::kern {
|
|||
return svc::ResultTerminationRequested();
|
||||
}
|
||||
|
||||
/* Set the synced object. */
|
||||
cur_thread->SetSyncedObject(nullptr, ams::svc::ResultTimedOut());
|
||||
|
||||
/* Read the value from userspace. */
|
||||
s32 user_value;
|
||||
if (!ReadFromUser(std::addressof(user_value), addr)) {
|
||||
|
@ -264,27 +265,14 @@ namespace ams::kern {
|
|||
/* Set the arbiter. */
|
||||
cur_thread->SetAddressArbiter(std::addressof(m_tree), addr);
|
||||
m_tree.insert(*cur_thread);
|
||||
cur_thread->SetState(KThread::ThreadState_Waiting);
|
||||
|
||||
/* Wait for the thread to finish. */
|
||||
wait_queue.SetHardwareTimer(timer);
|
||||
cur_thread->BeginWait(std::addressof(wait_queue));
|
||||
}
|
||||
|
||||
/* Cancel the timer wait. */
|
||||
if (timer != nullptr) {
|
||||
timer->CancelTask(cur_thread);
|
||||
}
|
||||
|
||||
/* Remove from the address arbiter. */
|
||||
{
|
||||
KScopedSchedulerLock sl;
|
||||
|
||||
if (cur_thread->IsWaitingForAddressArbiter()) {
|
||||
m_tree.erase(m_tree.iterator_to(*cur_thread));
|
||||
cur_thread->ClearAddressArbiter();
|
||||
}
|
||||
}
|
||||
|
||||
/* Get the result. */
|
||||
KSynchronizationObject *dummy;
|
||||
return cur_thread->GetWaitResult(std::addressof(dummy));
|
||||
/* Get the wait result. */
|
||||
return cur_thread->GetWaitResult();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -40,17 +40,7 @@ namespace ams::kern {
|
|||
request->Initialize(nullptr, address, size);
|
||||
|
||||
/* Send the request. */
|
||||
{
|
||||
KScopedSchedulerLock sl;
|
||||
|
||||
GetCurrentThread().SetSyncedObject(nullptr, ResultSuccess());
|
||||
|
||||
R_TRY(m_parent->OnRequest(request));
|
||||
}
|
||||
|
||||
/* Get the result. */
|
||||
KSynchronizationObject *dummy;
|
||||
return GetCurrentThread().GetWaitResult(std::addressof(dummy));
|
||||
return m_parent->OnRequest(request);
|
||||
}
|
||||
|
||||
Result KClientSession::SendAsyncRequest(KEvent *event, uintptr_t address, size_t size) {
|
||||
|
@ -65,13 +55,7 @@ namespace ams::kern {
|
|||
request->Initialize(event, address, size);
|
||||
|
||||
/* Send the request. */
|
||||
{
|
||||
KScopedSchedulerLock sl;
|
||||
|
||||
R_TRY(m_parent->OnRequest(request));
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
return m_parent->OnRequest(request);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -31,10 +31,46 @@ namespace ams::kern {
|
|||
return UserspaceAccess::UpdateLockAtomic(out, GetPointer<u32>(address), if_zero, new_orr_mask);
|
||||
}
|
||||
|
||||
class ThreadQueueImplForKConditionVariableWaitForAddress final : public KThreadQueue {
|
||||
public:
|
||||
constexpr ThreadQueueImplForKConditionVariableWaitForAddress() : KThreadQueue() { /* ... */ }
|
||||
|
||||
virtual void CancelWait(KThread *waiting_thread, Result wait_result, bool cancel_timer_task) override {
|
||||
/* Remove the thread as a waiter from its owner. */
|
||||
waiting_thread->GetLockOwner()->RemoveWaiter(waiting_thread);
|
||||
|
||||
/* Invoke the base cancel wait handler. */
|
||||
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
|
||||
}
|
||||
};
|
||||
|
||||
class ThreadQueueImplForKConditionVariableWaitConditionVariable final : public KThreadQueue {
|
||||
private:
|
||||
KConditionVariable::ThreadTree *m_tree;
|
||||
public:
|
||||
constexpr ThreadQueueImplForKConditionVariableWaitConditionVariable(KConditionVariable::ThreadTree *t) : KThreadQueue(), m_tree(t) { /* ... */ }
|
||||
|
||||
virtual void CancelWait(KThread *waiting_thread, Result wait_result, bool cancel_timer_task) override {
|
||||
/* Remove the thread as a waiter from its owner. */
|
||||
if (KThread *owner = waiting_thread->GetLockOwner(); owner != nullptr) {
|
||||
owner->RemoveWaiter(waiting_thread);
|
||||
}
|
||||
|
||||
/* If the thread is waiting on a condvar, remove it from the tree. */
|
||||
if (waiting_thread->IsWaitingForConditionVariable()) {
|
||||
m_tree->erase(m_tree->iterator_to(*waiting_thread));
|
||||
waiting_thread->ClearConditionVariable();
|
||||
}
|
||||
|
||||
/* Invoke the base cancel wait handler. */
|
||||
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
Result KConditionVariable::SignalToAddress(KProcessAddress addr) {
|
||||
KThread *owner_thread = std::addressof(GetCurrentThread());
|
||||
KThread *owner_thread = GetCurrentThreadPointer();
|
||||
|
||||
/* Signal the address. */
|
||||
{
|
||||
|
@ -46,75 +82,64 @@ namespace ams::kern {
|
|||
|
||||
/* Determine the next tag. */
|
||||
u32 next_value = 0;
|
||||
if (next_owner_thread) {
|
||||
if (next_owner_thread != nullptr) {
|
||||
next_value = next_owner_thread->GetAddressKeyValue();
|
||||
if (num_waiters > 1) {
|
||||
next_value |= ams::svc::HandleWaitMask;
|
||||
}
|
||||
|
||||
next_owner_thread->SetSyncedObject(nullptr, ResultSuccess());
|
||||
next_owner_thread->Wakeup();
|
||||
}
|
||||
|
||||
/* Write the value to userspace. */
|
||||
if (!WriteToUser(addr, std::addressof(next_value))) {
|
||||
if (next_owner_thread) {
|
||||
next_owner_thread->SetSyncedObject(nullptr, svc::ResultInvalidCurrentMemory());
|
||||
/* Write the value to userspace. */
|
||||
Result result;
|
||||
if (AMS_LIKELY(WriteToUser(addr, std::addressof(next_value)))) {
|
||||
result = ResultSuccess();
|
||||
} else {
|
||||
result = svc::ResultInvalidCurrentMemory();
|
||||
}
|
||||
|
||||
return svc::ResultInvalidCurrentMemory();
|
||||
/* Signal the next owner thread. */
|
||||
next_owner_thread->EndWait(result);
|
||||
return result;
|
||||
} else {
|
||||
/* Just write the value to userspace. */
|
||||
R_UNLESS(WriteToUser(addr, std::addressof(next_value)), svc::ResultInvalidCurrentMemory());
|
||||
|
||||
return ResultSuccess();
|
||||
}
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
}
|
||||
|
||||
Result KConditionVariable::WaitForAddress(ams::svc::Handle handle, KProcessAddress addr, u32 value) {
|
||||
KThread *cur_thread = std::addressof(GetCurrentThread());
|
||||
KThread *cur_thread = GetCurrentThreadPointer();
|
||||
ThreadQueueImplForKConditionVariableWaitForAddress wait_queue;
|
||||
|
||||
/* Wait for the address. */
|
||||
{
|
||||
KScopedAutoObject<KThread> owner_thread;
|
||||
MESOSPHERE_ASSERT(owner_thread.IsNull());
|
||||
{
|
||||
KScopedSchedulerLock sl;
|
||||
cur_thread->SetSyncedObject(nullptr, ResultSuccess());
|
||||
KScopedSchedulerLock sl;
|
||||
|
||||
/* Check if the thread should terminate. */
|
||||
R_UNLESS(!cur_thread->IsTerminationRequested(), svc::ResultTerminationRequested());
|
||||
/* Check if the thread should terminate. */
|
||||
R_UNLESS(!cur_thread->IsTerminationRequested(), svc::ResultTerminationRequested());
|
||||
|
||||
{
|
||||
/* Read the tag from userspace. */
|
||||
u32 test_tag;
|
||||
R_UNLESS(ReadFromUser(std::addressof(test_tag), addr), svc::ResultInvalidCurrentMemory());
|
||||
/* Read the tag from userspace. */
|
||||
u32 test_tag;
|
||||
R_UNLESS(ReadFromUser(std::addressof(test_tag), addr), svc::ResultInvalidCurrentMemory());
|
||||
|
||||
/* If the tag isn't the handle (with wait mask), we're done. */
|
||||
R_SUCCEED_IF(test_tag != (handle | ams::svc::HandleWaitMask));
|
||||
/* If the tag isn't the handle (with wait mask), we're done. */
|
||||
R_SUCCEED_IF(test_tag != (handle | ams::svc::HandleWaitMask));
|
||||
|
||||
/* Get the lock owner thread. */
|
||||
owner_thread = GetCurrentProcess().GetHandleTable().GetObjectWithoutPseudoHandle<KThread>(handle);
|
||||
R_UNLESS(owner_thread.IsNotNull(), svc::ResultInvalidHandle());
|
||||
/* Get the lock owner thread. */
|
||||
KScopedAutoObject owner_thread = GetCurrentProcess().GetHandleTable().GetObjectWithoutPseudoHandle<KThread>(handle);
|
||||
R_UNLESS(owner_thread.IsNotNull(), svc::ResultInvalidHandle());
|
||||
|
||||
/* Update the lock. */
|
||||
cur_thread->SetAddressKey(addr, value);
|
||||
owner_thread->AddWaiter(cur_thread);
|
||||
cur_thread->SetState(KThread::ThreadState_Waiting);
|
||||
}
|
||||
}
|
||||
MESOSPHERE_ASSERT(owner_thread.IsNotNull());
|
||||
/* Update the lock. */
|
||||
cur_thread->SetAddressKey(addr, value);
|
||||
owner_thread->AddWaiter(cur_thread);
|
||||
|
||||
/* Remove the thread as a waiter from the lock owner. */
|
||||
{
|
||||
KScopedSchedulerLock sl;
|
||||
if (KThread *mutex_owner = cur_thread->GetLockOwner(); mutex_owner != nullptr) {
|
||||
mutex_owner->RemoveWaiter(cur_thread);
|
||||
}
|
||||
}
|
||||
/* Begin waiting. */
|
||||
cur_thread->BeginWait(std::addressof(wait_queue));
|
||||
}
|
||||
|
||||
/* Get the wait result. */
|
||||
KSynchronizationObject *dummy;
|
||||
return cur_thread->GetWaitResult(std::addressof(dummy));
|
||||
return cur_thread->GetWaitResult();
|
||||
}
|
||||
|
||||
void KConditionVariable::SignalImpl(KThread *thread) {
|
||||
|
@ -139,8 +164,7 @@ namespace ams::kern {
|
|||
if (AMS_LIKELY(can_access)) {
|
||||
if (prev_tag == ams::svc::InvalidHandle) {
|
||||
/* If nobody held the lock previously, we're all good. */
|
||||
thread->SetSyncedObject(nullptr, ResultSuccess());
|
||||
thread->Wakeup();
|
||||
thread->EndWait(ResultSuccess());
|
||||
} else {
|
||||
/* Get the previous owner. */
|
||||
KThread *owner_thread = GetCurrentProcess().GetHandleTable().GetObjectWithoutPseudoHandle<KThread>(static_cast<ams::svc::Handle>(prev_tag & ~ams::svc::HandleWaitMask))
|
||||
|
@ -151,14 +175,12 @@ namespace ams::kern {
|
|||
owner_thread->Close();
|
||||
} else {
|
||||
/* The lock was tagged with a thread that doesn't exist. */
|
||||
thread->SetSyncedObject(nullptr, svc::ResultInvalidState());
|
||||
thread->Wakeup();
|
||||
thread->EndWait(svc::ResultInvalidState());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/* If the address wasn't accessible, note so. */
|
||||
thread->SetSyncedObject(nullptr, svc::ResultInvalidCurrentMemory());
|
||||
thread->Wakeup();
|
||||
thread->EndWait(svc::ResultInvalidCurrentMemory());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -190,13 +212,11 @@ namespace ams::kern {
|
|||
/* Prepare to wait. */
|
||||
KThread *cur_thread = GetCurrentThreadPointer();
|
||||
KHardwareTimer *timer;
|
||||
ThreadQueueImplForKConditionVariableWaitConditionVariable wait_queue(std::addressof(m_tree));
|
||||
|
||||
{
|
||||
KScopedSchedulerLockAndSleep slp(std::addressof(timer), cur_thread, timeout);
|
||||
|
||||
/* Set the synced object. */
|
||||
cur_thread->SetSyncedObject(nullptr, ams::svc::ResultTimedOut());
|
||||
|
||||
/* Check that the thread isn't terminating. */
|
||||
if (cur_thread->IsTerminationRequested()) {
|
||||
slp.CancelSleep();
|
||||
|
@ -219,8 +239,7 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
/* Wake up the next owner. */
|
||||
next_owner_thread->SetSyncedObject(nullptr, ResultSuccess());
|
||||
next_owner_thread->Wakeup();
|
||||
next_owner_thread->EndWait(ResultSuccess());
|
||||
}
|
||||
|
||||
/* Write to the cv key. */
|
||||
|
@ -237,40 +256,20 @@ namespace ams::kern {
|
|||
}
|
||||
}
|
||||
|
||||
/* If timeout is zero, time out. */
|
||||
R_UNLESS(timeout != 0, svc::ResultTimedOut());
|
||||
|
||||
/* Update condition variable tracking. */
|
||||
{
|
||||
cur_thread->SetConditionVariable(std::addressof(m_tree), addr, key, value);
|
||||
m_tree.insert(*cur_thread);
|
||||
}
|
||||
cur_thread->SetConditionVariable(std::addressof(m_tree), addr, key, value);
|
||||
m_tree.insert(*cur_thread);
|
||||
|
||||
/* If the timeout is non-zero, set the thread as waiting. */
|
||||
if (timeout != 0) {
|
||||
cur_thread->SetState(KThread::ThreadState_Waiting);
|
||||
}
|
||||
/* Begin waiting. */
|
||||
wait_queue.SetHardwareTimer(timer);
|
||||
cur_thread->BeginWait(std::addressof(wait_queue));
|
||||
}
|
||||
|
||||
/* Remove from the condition variable. */
|
||||
{
|
||||
KScopedSchedulerLock sl;
|
||||
|
||||
if (KThread *owner = cur_thread->GetLockOwner(); owner != nullptr) {
|
||||
owner->RemoveWaiter(cur_thread);
|
||||
}
|
||||
|
||||
if (cur_thread->IsWaitingForConditionVariable()) {
|
||||
m_tree.erase(m_tree.iterator_to(*cur_thread));
|
||||
cur_thread->ClearConditionVariable();
|
||||
}
|
||||
}
|
||||
|
||||
/* Cancel the timer wait. */
|
||||
if (timer != nullptr) {
|
||||
timer->CancelTask(cur_thread);
|
||||
}
|
||||
|
||||
/* Get the result. */
|
||||
KSynchronizationObject *dummy;
|
||||
return cur_thread->GetWaitResult(std::addressof(dummy));
|
||||
/* Get the wait result. */
|
||||
return cur_thread->GetWaitResult();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -37,17 +37,7 @@ namespace ams::kern {
|
|||
cur_thread->SetLightSessionData(data);
|
||||
|
||||
/* Send the request. */
|
||||
{
|
||||
KScopedSchedulerLock sl;
|
||||
|
||||
cur_thread->SetSyncedObject(nullptr, ResultSuccess());
|
||||
|
||||
R_TRY(m_parent->OnRequest(cur_thread));
|
||||
}
|
||||
|
||||
/* Get the result. */
|
||||
KSynchronizationObject *dummy;
|
||||
return cur_thread->GetWaitResult(std::addressof(dummy));
|
||||
return m_parent->OnRequest(cur_thread);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,84 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#include <mesosphere.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
namespace {
|
||||
|
||||
class ThreadQueueImplForKLightConditionVariable final : public KThreadQueue {
|
||||
private:
|
||||
KThread::WaiterList *m_wait_list;
|
||||
bool m_allow_terminating_thread;
|
||||
public:
|
||||
constexpr ThreadQueueImplForKLightConditionVariable(KThread::WaiterList *wl, bool term) : KThreadQueue(), m_wait_list(wl), m_allow_terminating_thread(term) { /* ... */ }
|
||||
|
||||
virtual void CancelWait(KThread *waiting_thread, Result wait_result, bool cancel_timer_task) override {
|
||||
/* Only process waits if we're allowed to. */
|
||||
if (svc::ResultTerminationRequested::Includes(wait_result) && m_allow_terminating_thread) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* Remove the thread from the waiting thread from the light condition variable. */
|
||||
m_wait_list->erase(m_wait_list->iterator_to(*waiting_thread));
|
||||
|
||||
/* Invoke the base cancel wait handler. */
|
||||
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
void KLightConditionVariable::Wait(KLightLock *lock, s64 timeout, bool allow_terminating_thread) {
|
||||
/* Create thread queue. */
|
||||
KThread *owner = GetCurrentThreadPointer();
|
||||
KHardwareTimer *timer;
|
||||
|
||||
ThreadQueueImplForKLightConditionVariable wait_queue(std::addressof(m_wait_list), allow_terminating_thread);
|
||||
|
||||
/* Sleep the thread. */
|
||||
{
|
||||
KScopedSchedulerLockAndSleep lk(&timer, owner, timeout);
|
||||
|
||||
if (!allow_terminating_thread && owner->IsTerminationRequested()) {
|
||||
lk.CancelSleep();
|
||||
return;
|
||||
}
|
||||
|
||||
lock->Unlock();
|
||||
|
||||
/* Add the thread to the queue. */
|
||||
m_wait_list.push_back(*owner);
|
||||
|
||||
/* Begin waiting. */
|
||||
wait_queue.SetHardwareTimer(timer);
|
||||
owner->BeginWait(std::addressof(wait_queue));
|
||||
}
|
||||
|
||||
/* Re-acquire the lock. */
|
||||
lock->Lock();
|
||||
}
|
||||
|
||||
void KLightConditionVariable::Broadcast() {
|
||||
KScopedSchedulerLock lk;
|
||||
|
||||
/* Signal all threads. */
|
||||
for (auto it = m_wait_list.begin(); it != m_wait_list.end(); it = m_wait_list.erase(it)) {
|
||||
it->EndWait(ResultSuccess());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -17,8 +17,23 @@
|
|||
|
||||
namespace ams::kern {
|
||||
|
||||
namespace {
|
||||
|
||||
class ThreadQueueImplForKLightLock final : public KThreadQueue {
|
||||
public:
|
||||
constexpr ThreadQueueImplForKLightLock() : KThreadQueue() { /* ... */ }
|
||||
|
||||
virtual void CancelWait(KThread *waiting_thread, Result wait_result, bool cancel_timer_task) override {
|
||||
/* Do nothing, waiting to acquire a light lock cannot be canceled. */
|
||||
MESOSPHERE_UNUSED(waiting_thread, wait_result, cancel_timer_task);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
void KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) {
|
||||
KThread *cur_thread = reinterpret_cast<KThread *>(_cur_thread);
|
||||
ThreadQueueImplForKLightLock wait_queue;
|
||||
|
||||
/* Pend the current thread waiting on the owner thread. */
|
||||
{
|
||||
|
@ -34,22 +49,13 @@ namespace ams::kern {
|
|||
cur_thread->SetAddressKey(reinterpret_cast<uintptr_t>(std::addressof(m_tag)));
|
||||
owner_thread->AddWaiter(cur_thread);
|
||||
|
||||
/* Set thread states. */
|
||||
cur_thread->SetState(KThread::ThreadState_Waiting);
|
||||
/* Begin waiting to hold the lock. */
|
||||
cur_thread->BeginWait(std::addressof(wait_queue));
|
||||
|
||||
if (owner_thread->IsSuspended()) {
|
||||
owner_thread->ContinueIfHasKernelWaiters();
|
||||
}
|
||||
}
|
||||
|
||||
/* We're no longer waiting on the lock owner. */
|
||||
{
|
||||
KScopedSchedulerLock sl;
|
||||
|
||||
if (KThread *owner_thread = cur_thread->GetLockOwner(); AMS_UNLIKELY(owner_thread != nullptr)) {
|
||||
owner_thread->RemoveWaiter(cur_thread);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {
|
||||
|
@ -71,7 +77,7 @@ namespace ams::kern {
|
|||
next_tag |= 0x1;
|
||||
}
|
||||
|
||||
next_owner->SetState(KThread::ThreadState_Runnable);
|
||||
next_owner->EndWait(ResultSuccess());
|
||||
|
||||
if (next_owner->IsSuspended()) {
|
||||
next_owner->ContinueIfHasKernelWaiters();
|
||||
|
|
|
@ -17,6 +17,64 @@
|
|||
|
||||
namespace ams::kern {
|
||||
|
||||
namespace {
|
||||
|
||||
constexpr u64 InvalidThreadId = -1ull;
|
||||
|
||||
class ThreadQueueImplForKLightServerSessionRequest final : public KThreadQueue {
|
||||
private:
|
||||
KThread::WaiterList *m_wait_list;
|
||||
public:
|
||||
constexpr ThreadQueueImplForKLightServerSessionRequest(KThread::WaiterList *wl) : KThreadQueue(), m_wait_list(wl) { /* ... */ }
|
||||
|
||||
virtual void EndWait(KThread *waiting_thread, Result wait_result) override {
|
||||
/* Remove the thread from our wait list. */
|
||||
m_wait_list->erase(m_wait_list->iterator_to(*waiting_thread));
|
||||
|
||||
/* Invoke the base end wait handler. */
|
||||
KThreadQueue::EndWait(waiting_thread, wait_result);
|
||||
}
|
||||
|
||||
virtual void CancelWait(KThread *waiting_thread, Result wait_result, bool cancel_timer_task) override {
|
||||
/* Remove the thread from our wait list. */
|
||||
m_wait_list->erase(m_wait_list->iterator_to(*waiting_thread));
|
||||
|
||||
/* Invoke the base cancel wait handler. */
|
||||
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
|
||||
}
|
||||
};
|
||||
|
||||
class ThreadQueueImplForKLightServerSessionReceive final : public KThreadQueue {
|
||||
private:
|
||||
KThread **m_server_thread;
|
||||
public:
|
||||
constexpr ThreadQueueImplForKLightServerSessionReceive(KThread **st) : KThreadQueue(), m_server_thread(st) { /* ... */ }
|
||||
|
||||
virtual void EndWait(KThread *waiting_thread, Result wait_result) override {
|
||||
/* Clear the server thread. */
|
||||
*m_server_thread = nullptr;
|
||||
|
||||
/* Set the waiting thread as not cancelable. */
|
||||
waiting_thread->ClearCancellable();
|
||||
|
||||
/* Invoke the base end wait handler. */
|
||||
KThreadQueue::EndWait(waiting_thread, wait_result);
|
||||
}
|
||||
|
||||
virtual void CancelWait(KThread *waiting_thread, Result wait_result, bool cancel_timer_task) override {
|
||||
/* Clear the server thread. */
|
||||
*m_server_thread = nullptr;
|
||||
|
||||
/* Set the waiting thread as not cancelable. */
|
||||
waiting_thread->ClearCancellable();
|
||||
|
||||
/* Invoke the base cancel wait handler. */
|
||||
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
void KLightServerSession::Destroy() {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
|
@ -33,31 +91,45 @@ namespace ams::kern {
|
|||
|
||||
Result KLightServerSession::OnRequest(KThread *request_thread) {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
||||
|
||||
/* Check that the server isn't closed. */
|
||||
R_UNLESS(!m_parent->IsServerClosed(), svc::ResultSessionClosed());
|
||||
ThreadQueueImplForKLightServerSessionRequest wait_queue(std::addressof(m_request_list));
|
||||
|
||||
/* Try to sleep the thread. */
|
||||
R_UNLESS(m_request_queue.SleepThread(request_thread), svc::ResultTerminationRequested());
|
||||
/* Send the request. */
|
||||
{
|
||||
/* Lock the scheduler. */
|
||||
KScopedSchedulerLock sl;
|
||||
|
||||
/* If we don't have a current request, wake up a server thread to handle it. */
|
||||
if (m_current_request == nullptr) {
|
||||
m_server_queue.WakeupFrontThread();
|
||||
/* Check that the server isn't closed. */
|
||||
R_UNLESS(!m_parent->IsServerClosed(), svc::ResultSessionClosed());
|
||||
|
||||
/* Check that the request thread isn't terminating. */
|
||||
R_UNLESS(!request_thread->IsTerminationRequested(), svc::ResultTerminationRequested());
|
||||
|
||||
/* Add the request thread to our list. */
|
||||
m_request_list.push_back(*request_thread);
|
||||
|
||||
/* Begin waiting on the request. */
|
||||
request_thread->BeginWait(std::addressof(wait_queue));
|
||||
|
||||
/* If we have a server thread, end its wait. */
|
||||
if (m_server_thread != nullptr) {
|
||||
m_server_thread->EndWait(ResultSuccess());
|
||||
}
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
/* NOTE: Nintendo returns GetCurrentThread().GetWaitResult() here. */
|
||||
/* This is technically incorrect, although it doesn't cause problems in practice */
|
||||
/* because this is only ever called with request_thread = GetCurrentThreadPointer(). */
|
||||
return request_thread->GetWaitResult();
|
||||
}
|
||||
|
||||
Result KLightServerSession::ReplyAndReceive(u32 *data) {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
/* Set the server context. */
|
||||
KThread *server_thread = GetCurrentThreadPointer();
|
||||
server_thread->SetLightSessionData(data);
|
||||
GetCurrentThread().SetLightSessionData(data);
|
||||
|
||||
/* Reply, if we need to. */
|
||||
KThread *cur_request = nullptr;
|
||||
if (data[0] & KLightSession::ReplyFlag) {
|
||||
KScopedSchedulerLock sl;
|
||||
|
||||
|
@ -68,78 +140,85 @@ namespace ams::kern {
|
|||
/* Check that we have a request to reply to. */
|
||||
R_UNLESS(m_current_request != nullptr, svc::ResultInvalidState());
|
||||
|
||||
/* Check that the server thread is correct. */
|
||||
R_UNLESS(m_server_thread == server_thread, svc::ResultInvalidState());
|
||||
/* Check that the server thread id is correct. */
|
||||
R_UNLESS(m_server_thread_id == GetCurrentThread().GetId(), svc::ResultInvalidState());
|
||||
|
||||
/* If we can reply, do so. */
|
||||
if (!m_current_request->IsTerminationRequested()) {
|
||||
MESOSPHERE_ASSERT(m_current_request->GetState() == KThread::ThreadState_Waiting);
|
||||
MESOSPHERE_ASSERT(m_request_queue.begin() != m_request_queue.end() && m_current_request == std::addressof(*m_request_queue.begin()));
|
||||
std::memcpy(m_current_request->GetLightSessionData(), server_thread->GetLightSessionData(), KLightSession::DataSize);
|
||||
m_request_queue.WakeupThread(m_current_request);
|
||||
std::memcpy(m_current_request->GetLightSessionData(), GetCurrentThread().GetLightSessionData(), KLightSession::DataSize);
|
||||
m_current_request->EndWait(ResultSuccess());
|
||||
}
|
||||
|
||||
/* Close our current request. */
|
||||
m_current_request->Close();
|
||||
|
||||
/* Clear our current request. */
|
||||
cur_request = m_current_request;
|
||||
m_current_request = nullptr;
|
||||
m_server_thread = nullptr;
|
||||
m_current_request = nullptr;
|
||||
m_server_thread_id = InvalidThreadId;
|
||||
}
|
||||
|
||||
/* Close the current request, if we had one. */
|
||||
if (cur_request != nullptr) {
|
||||
cur_request->Close();
|
||||
}
|
||||
/* Close any pending objects before we wait. */
|
||||
GetCurrentThread().DestroyClosedObjects();
|
||||
|
||||
/* Create the wait queue for our receive. */
|
||||
ThreadQueueImplForKLightServerSessionReceive wait_queue(std::addressof(m_server_thread));
|
||||
|
||||
/* Receive. */
|
||||
bool set_cancellable = false;
|
||||
while (true) {
|
||||
KScopedSchedulerLock sl;
|
||||
/* Try to receive a request. */
|
||||
{
|
||||
KScopedSchedulerLock sl;
|
||||
|
||||
/* Check that we aren't already receiving. */
|
||||
R_UNLESS(m_server_queue.IsEmpty(), svc::ResultInvalidState());
|
||||
R_UNLESS(m_server_thread == nullptr, svc::ResultInvalidState());
|
||||
/* Check that we aren't already receiving. */
|
||||
R_UNLESS(m_server_thread == nullptr, svc::ResultInvalidState());
|
||||
R_UNLESS(m_server_thread_id == InvalidThreadId, svc::ResultInvalidState());
|
||||
|
||||
/* If we cancelled in a previous loop, clear cancel state. */
|
||||
if (set_cancellable) {
|
||||
server_thread->ClearCancellable();
|
||||
set_cancellable = false;
|
||||
}
|
||||
/* Check that we're open. */
|
||||
R_UNLESS(!m_parent->IsClientClosed(), svc::ResultSessionClosed());
|
||||
R_UNLESS(!m_parent->IsServerClosed(), svc::ResultSessionClosed());
|
||||
|
||||
/* Check that we're open. */
|
||||
R_UNLESS(!m_parent->IsClientClosed(), svc::ResultSessionClosed());
|
||||
R_UNLESS(!m_parent->IsServerClosed(), svc::ResultSessionClosed());
|
||||
/* Check that we're not terminating. */
|
||||
R_UNLESS(!GetCurrentThread().IsTerminationRequested(), svc::ResultTerminationRequested());
|
||||
|
||||
/* If we have a request available, use it. */
|
||||
if (m_current_request == nullptr && !m_request_queue.IsEmpty()) {
|
||||
m_current_request = std::addressof(*m_request_queue.begin());
|
||||
m_current_request->Open();
|
||||
m_server_thread = server_thread;
|
||||
break;
|
||||
} else {
|
||||
/* Otherwise, wait for a request to come in. */
|
||||
R_UNLESS(m_server_queue.SleepThread(server_thread), svc::ResultTerminationRequested());
|
||||
/* If we have a request available, use it. */
|
||||
if (auto head = m_request_list.begin(); head != m_request_list.end()) {
|
||||
/* Set our current request. */
|
||||
m_current_request = std::addressof(*head);
|
||||
m_current_request->Open();
|
||||
|
||||
/* Set our server thread id. */
|
||||
m_server_thread_id = GetCurrentThread().GetId();
|
||||
|
||||
/* Copy the client request data. */
|
||||
std::memcpy(GetCurrentThread().GetLightSessionData(), m_current_request->GetLightSessionData(), KLightSession::DataSize);
|
||||
|
||||
/* We successfully received. */
|
||||
return ResultSuccess();
|
||||
}
|
||||
|
||||
/* We need to wait for a request to come in. */
|
||||
|
||||
/* Check if we were cancelled. */
|
||||
if (server_thread->IsWaitCancelled()) {
|
||||
m_server_queue.WakeupThread(server_thread);
|
||||
server_thread->ClearWaitCancelled();
|
||||
if (GetCurrentThread().IsWaitCancelled()) {
|
||||
GetCurrentThread().ClearWaitCancelled();
|
||||
return svc::ResultCancelled();
|
||||
}
|
||||
|
||||
/* Otherwise, mark as cancellable. */
|
||||
server_thread->SetCancellable();
|
||||
set_cancellable = true;
|
||||
}
|
||||
}
|
||||
/* Mark ourselves as cancellable. */
|
||||
GetCurrentThread().SetCancellable();
|
||||
|
||||
/* Copy the client data. */
|
||||
std::memcpy(server_thread->GetLightSessionData(), m_current_request->GetLightSessionData(), KLightSession::DataSize);
|
||||
return ResultSuccess();
|
||||
/* Wait for a request to come in. */
|
||||
m_server_thread = GetCurrentThreadPointer();
|
||||
GetCurrentThread().BeginWait(std::addressof(wait_queue));
|
||||
}
|
||||
|
||||
/* We waited to receive a request; if our wait failed, return the failing result. */
|
||||
R_TRY(GetCurrentThread().GetWaitResult());
|
||||
}
|
||||
}
|
||||
|
||||
void KLightServerSession::CleanupRequests() {
|
||||
/* Cleanup all pending requests. */
|
||||
KThread *cur_request = nullptr;
|
||||
{
|
||||
KScopedSchedulerLock sl;
|
||||
|
||||
|
@ -147,34 +226,25 @@ namespace ams::kern {
|
|||
if (m_current_request != nullptr) {
|
||||
/* Reply to the current request. */
|
||||
if (!m_current_request->IsTerminationRequested()) {
|
||||
MESOSPHERE_ASSERT(m_current_request->GetState() == KThread::ThreadState_Waiting);
|
||||
MESOSPHERE_ASSERT(m_request_queue.begin() != m_request_queue.end() && m_current_request == std::addressof(*m_request_queue.begin()));
|
||||
m_request_queue.WakeupThread(m_current_request);
|
||||
m_current_request->SetSyncedObject(nullptr, svc::ResultSessionClosed());
|
||||
m_current_request->EndWait(svc::ResultSessionClosed());
|
||||
}
|
||||
|
||||
/* Clear our current request. */
|
||||
cur_request = m_current_request;
|
||||
m_current_request = nullptr;
|
||||
m_server_thread = nullptr;
|
||||
m_current_request->Close();
|
||||
m_current_request = nullptr;
|
||||
m_server_thread_id = InvalidThreadId;
|
||||
}
|
||||
|
||||
/* Reply to all other requests. */
|
||||
while (!m_request_queue.IsEmpty()) {
|
||||
KThread *client_thread = m_request_queue.WakeupFrontThread();
|
||||
client_thread->SetSyncedObject(nullptr, svc::ResultSessionClosed());
|
||||
for (auto &thread : m_request_list) {
|
||||
thread.EndWait(svc::ResultSessionClosed());
|
||||
}
|
||||
|
||||
/* Wake up all server threads. */
|
||||
while (!m_server_queue.IsEmpty()) {
|
||||
m_server_queue.WakeupFrontThread();
|
||||
/* Wait up our server thread, if we have one. */
|
||||
if (m_server_thread != nullptr) {
|
||||
m_server_thread->EndWait(svc::ResultSessionClosed());
|
||||
}
|
||||
}
|
||||
|
||||
/* Close the current request, if we had one. */
|
||||
if (cur_request != nullptr) {
|
||||
cur_request->Close();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -87,6 +87,29 @@ namespace ams::kern {
|
|||
return ResultSuccess();
|
||||
}
|
||||
|
||||
class ThreadQueueImplForKProcessEnterUserException final : public KThreadQueue {
|
||||
private:
|
||||
KThread **m_exception_thread;
|
||||
public:
|
||||
constexpr ThreadQueueImplForKProcessEnterUserException(KThread **t) : KThreadQueue(), m_exception_thread(t) { /* ... */ }
|
||||
|
||||
virtual void EndWait(KThread *waiting_thread, Result wait_result) override {
|
||||
/* Set the exception thread. */
|
||||
*m_exception_thread = waiting_thread;
|
||||
|
||||
/* Invoke the base end wait handler. */
|
||||
KThreadQueue::EndWait(waiting_thread, wait_result);
|
||||
}
|
||||
|
||||
virtual void CancelWait(KThread *waiting_thread, Result wait_result, bool cancel_timer_task) override {
|
||||
/* Remove the thread as a waiter on its mutex owner. */
|
||||
waiting_thread->GetLockOwner()->RemoveWaiter(waiting_thread);
|
||||
|
||||
/* Invoke the base cancel wait handler. */
|
||||
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
void KProcess::Finalize() {
|
||||
|
@ -784,43 +807,43 @@ namespace ams::kern {
|
|||
KThread *cur_thread = GetCurrentThreadPointer();
|
||||
MESOSPHERE_ASSERT(this == cur_thread->GetOwnerProcess());
|
||||
|
||||
/* Try to claim the exception thread. */
|
||||
if (m_exception_thread != cur_thread) {
|
||||
const uintptr_t address_key = reinterpret_cast<uintptr_t>(std::addressof(m_exception_thread));
|
||||
while (true) {
|
||||
{
|
||||
KScopedSchedulerLock sl;
|
||||
|
||||
/* If the thread is terminating, it can't enter. */
|
||||
if (cur_thread->IsTerminationRequested()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* If we have no exception thread, we succeeded. */
|
||||
if (m_exception_thread == nullptr) {
|
||||
m_exception_thread = cur_thread;
|
||||
KScheduler::SetSchedulerUpdateNeeded();
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Otherwise, wait for us to not have an exception thread. */
|
||||
cur_thread->SetAddressKey(address_key | 1);
|
||||
m_exception_thread->AddWaiter(cur_thread);
|
||||
cur_thread->SetState(KThread::ThreadState_Waiting);
|
||||
}
|
||||
|
||||
/* Remove the thread as a waiter from the lock owner. */
|
||||
{
|
||||
KScopedSchedulerLock sl;
|
||||
|
||||
if (KThread *owner_thread = cur_thread->GetLockOwner(); owner_thread != nullptr) {
|
||||
owner_thread->RemoveWaiter(cur_thread);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/* Check that we haven't already claimed the exception thread. */
|
||||
if (m_exception_thread == cur_thread) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Create the wait queue we'll be using. */
|
||||
ThreadQueueImplForKProcessEnterUserException wait_queue(std::addressof(m_exception_thread));
|
||||
|
||||
/* Claim the exception thread. */
|
||||
{
|
||||
/* Lock the scheduler. */
|
||||
KScopedSchedulerLock sl;
|
||||
|
||||
/* Check that we're not terminating. */
|
||||
if (cur_thread->IsTerminationRequested()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* If we don't have an exception thread, we can just claim it directly. */
|
||||
if (m_exception_thread == nullptr) {
|
||||
m_exception_thread = cur_thread;
|
||||
KScheduler::SetSchedulerUpdateNeeded();
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Otherwise, we need to wait until we don't have an exception thread. */
|
||||
|
||||
/* Add the current thread as a waiter on the current exception thread. */
|
||||
cur_thread->SetAddressKey(reinterpret_cast<uintptr_t>(std::addressof(m_exception_thread)) | 1);
|
||||
m_exception_thread->AddWaiter(cur_thread);
|
||||
|
||||
/* Wait to claim the exception thread. */
|
||||
cur_thread->BeginWait(std::addressof(wait_queue));
|
||||
}
|
||||
|
||||
/* If our wait didn't end due to thread termination, we succeeded. */
|
||||
return !svc::ResultTerminationRequested::Includes(cur_thread->GetWaitResult());
|
||||
}
|
||||
|
||||
bool KProcess::LeaveUserException() {
|
||||
|
@ -836,7 +859,7 @@ namespace ams::kern {
|
|||
/* Remove waiter thread. */
|
||||
s32 num_waiters;
|
||||
if (KThread *next = thread->RemoveWaiterByKey(std::addressof(num_waiters), reinterpret_cast<uintptr_t>(std::addressof(m_exception_thread)) | 1); next != nullptr) {
|
||||
next->SetState(KThread::ThreadState_Runnable);
|
||||
next->EndWait(ResultSuccess());
|
||||
}
|
||||
|
||||
KScheduler::SetSchedulerUpdateNeeded();
|
||||
|
|
|
@ -30,6 +30,8 @@ namespace ams::kern {
|
|||
|
||||
constexpr inline size_t PointerTransferBufferAlignment = 0x10;
|
||||
|
||||
class ThreadQueueImplForKServerSessionRequest final : public KThreadQueue { /* ... */ };
|
||||
|
||||
class ReceiveList {
|
||||
private:
|
||||
u32 m_data[ipc::MessageBuffer::MessageHeader::ReceiveListCountType_CountMax * ipc::MessageBuffer::ReceiveListEntry::GetDataSize() / sizeof(u32)];
|
||||
|
@ -1042,11 +1044,11 @@ namespace ams::kern {
|
|||
/* Signal the event. */
|
||||
event->Signal();
|
||||
} else {
|
||||
/* Set the thread as runnable. */
|
||||
/* End the client thread's wait. */
|
||||
KScopedSchedulerLock sl;
|
||||
if (client_thread->GetState() == KThread::ThreadState_Waiting) {
|
||||
client_thread->SetSyncedObject(nullptr, result_for_client);
|
||||
client_thread->SetState(KThread::ThreadState_Runnable);
|
||||
|
||||
if (!client_thread->IsTerminationRequested()) {
|
||||
client_thread->EndWait(result_for_client);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1146,11 +1148,11 @@ namespace ams::kern {
|
|||
/* Signal the event. */
|
||||
event->Signal();
|
||||
} else {
|
||||
/* Set the thread as runnable. */
|
||||
/* End the client thread's wait. */
|
||||
KScopedSchedulerLock sl;
|
||||
if (client_thread->GetState() == KThread::ThreadState_Waiting) {
|
||||
client_thread->SetSyncedObject(nullptr, client_result);
|
||||
client_thread->SetState(KThread::ThreadState_Runnable);
|
||||
|
||||
if (!client_thread->IsTerminationRequested()) {
|
||||
client_thread->EndWait(client_result);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1160,31 +1162,41 @@ namespace ams::kern {
|
|||
|
||||
Result KServerSession::OnRequest(KSessionRequest *request) {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
||||
|
||||
/* Ensure that we can handle new requests. */
|
||||
R_UNLESS(!m_parent->IsServerClosed(), svc::ResultSessionClosed());
|
||||
/* Create the wait queue. */
|
||||
ThreadQueueImplForKServerSessionRequest wait_queue;
|
||||
|
||||
/* If there's no event, this is synchronous, so we should check for thread termination. */
|
||||
if (request->GetEvent() == nullptr) {
|
||||
KThread *thread = request->GetThread();
|
||||
R_UNLESS(!thread->IsTerminationRequested(), svc::ResultTerminationRequested());
|
||||
thread->SetState(KThread::ThreadState_Waiting);
|
||||
/* Handle the request. */
|
||||
{
|
||||
/* Lock the scheduler. */
|
||||
KScopedSchedulerLock sl;
|
||||
|
||||
/* Ensure that we can handle new requests. */
|
||||
R_UNLESS(!m_parent->IsServerClosed(), svc::ResultSessionClosed());
|
||||
|
||||
/* Check that we're not terminating. */
|
||||
R_UNLESS(!GetCurrentThread().IsTerminationRequested(), svc::ResultTerminationRequested());
|
||||
|
||||
/* Get whether we're empty. */
|
||||
const bool was_empty = m_request_list.empty();
|
||||
|
||||
/* Add the request to the list. */
|
||||
request->Open();
|
||||
m_request_list.push_back(*request);
|
||||
|
||||
/* If we were empty, signal. */
|
||||
if (was_empty) {
|
||||
this->NotifyAvailable();
|
||||
}
|
||||
|
||||
/* If we have a request, this is asynchronous, and we don't need to wait. */
|
||||
R_SUCCEED_IF(request->GetEvent() != nullptr);
|
||||
|
||||
/* This is a synchronous request, so we should wait for our request to complete. */
|
||||
GetCurrentThread().BeginWait(std::addressof(wait_queue));
|
||||
}
|
||||
|
||||
/* Get whether we're empty. */
|
||||
const bool was_empty = m_request_list.empty();
|
||||
|
||||
/* Add the request to the list. */
|
||||
request->Open();
|
||||
m_request_list.push_back(*request);
|
||||
|
||||
/* If we were empty, signal. */
|
||||
if (was_empty) {
|
||||
this->NotifyAvailable();
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
return GetCurrentThread().GetWaitResult();
|
||||
}
|
||||
|
||||
bool KServerSession::IsSignaledImpl() const {
|
||||
|
@ -1264,11 +1276,11 @@ namespace ams::kern {
|
|||
/* Signal the event. */
|
||||
event->Signal();
|
||||
} else {
|
||||
/* Set the thread as runnable. */
|
||||
/* End the client thread's wait. */
|
||||
KScopedSchedulerLock sl;
|
||||
if (client_thread->GetState() == KThread::ThreadState_Waiting) {
|
||||
client_thread->SetSyncedObject(nullptr, (R_SUCCEEDED(result) ? svc::ResultSessionClosed() : result));
|
||||
client_thread->SetState(KThread::ThreadState_Runnable);
|
||||
|
||||
if (!client_thread->IsTerminationRequested()) {
|
||||
client_thread->EndWait(R_SUCCEEDED(result) ? svc::ResultSessionClosed() : result);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1310,6 +1322,7 @@ namespace ams::kern {
|
|||
request->ClearEvent();
|
||||
terminate = true;
|
||||
}
|
||||
|
||||
prev_request = request;
|
||||
} else if (!m_request_list.empty()) {
|
||||
/* Pop the request from the front of the list. */
|
||||
|
|
|
@ -17,6 +17,57 @@
|
|||
|
||||
namespace ams::kern {
|
||||
|
||||
namespace {
|
||||
|
||||
class ThreadQueueImplForKSynchronizationObjectWait final : public KThreadQueueWithoutEndWait {
|
||||
private:
|
||||
using ThreadListNode = KSynchronizationObject::ThreadListNode;
|
||||
private:
|
||||
KSynchronizationObject **m_objects;
|
||||
ThreadListNode *m_nodes;
|
||||
s32 m_count;
|
||||
public:
|
||||
constexpr ThreadQueueImplForKSynchronizationObjectWait(KSynchronizationObject **o, ThreadListNode *n, s32 c) : m_objects(o), m_nodes(n), m_count(c) { /* ... */ }
|
||||
|
||||
virtual void NotifyAvailable(KThread *waiting_thread, KSynchronizationObject *signaled_object, Result wait_result) override {
|
||||
/* Determine the sync index, and unlink all nodes. */
|
||||
s32 sync_index = -1;
|
||||
for (auto i = 0; i < m_count; ++i) {
|
||||
/* Check if this is the signaled object. */
|
||||
if (m_objects[i] == signaled_object && sync_index == -1) {
|
||||
sync_index = i;
|
||||
}
|
||||
|
||||
/* Unlink the current node from the current object. */
|
||||
m_objects[i]->UnlinkNode(std::addressof(m_nodes[i]));
|
||||
}
|
||||
|
||||
/* Set the waiting thread's sync index. */
|
||||
waiting_thread->SetSyncedIndex(sync_index);
|
||||
|
||||
/* Set the waiting thread as not cancellable. */
|
||||
waiting_thread->ClearCancellable();
|
||||
|
||||
/* Invoke the base end wait handler. */
|
||||
KThreadQueue::EndWait(waiting_thread, wait_result);
|
||||
}
|
||||
|
||||
virtual void CancelWait(KThread *waiting_thread, Result wait_result, bool cancel_timer_task) override {
|
||||
/* Remove all nodes from our list. */
|
||||
for (auto i = 0; i < m_count; ++i) {
|
||||
m_objects[i]->UnlinkNode(std::addressof(m_nodes[i]));
|
||||
}
|
||||
|
||||
/* Set the waiting thread as not cancellable. */
|
||||
waiting_thread->ClearCancellable();
|
||||
|
||||
/* Invoke the base cancel wait handler. */
|
||||
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
void KSynchronizationObject::Finalize() {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
|
@ -43,14 +94,21 @@ namespace ams::kern {
|
|||
/* Prepare for wait. */
|
||||
KThread *thread = GetCurrentThreadPointer();
|
||||
KHardwareTimer *timer;
|
||||
ThreadQueueImplForKSynchronizationObjectWait wait_queue(objects, thread_nodes, num_objects);
|
||||
|
||||
{
|
||||
/* Setup the scheduling lock and sleep. */
|
||||
KScopedSchedulerLockAndSleep slp(std::addressof(timer), thread, timeout);
|
||||
|
||||
/* Check if the thread should terminate. */
|
||||
if (thread->IsTerminationRequested()) {
|
||||
slp.CancelSleep();
|
||||
return svc::ResultTerminationRequested();
|
||||
}
|
||||
|
||||
/* Check if any of the objects are already signaled. */
|
||||
for (auto i = 0; i < num_objects; ++i) {
|
||||
AMS_ASSERT(objects[i] != nullptr);
|
||||
MESOSPHERE_ASSERT(objects[i] != nullptr);
|
||||
|
||||
if (objects[i]->IsSignaled()) {
|
||||
*out_index = i;
|
||||
|
@ -65,12 +123,6 @@ namespace ams::kern {
|
|||
return svc::ResultTimedOut();
|
||||
}
|
||||
|
||||
/* Check if the thread should terminate. */
|
||||
if (thread->IsTerminationRequested()) {
|
||||
slp.CancelSleep();
|
||||
return svc::ResultTerminationRequested();
|
||||
}
|
||||
|
||||
/* Check if waiting was canceled. */
|
||||
if (thread->IsWaitCancelled()) {
|
||||
slp.CancelSleep();
|
||||
|
@ -83,67 +135,25 @@ namespace ams::kern {
|
|||
thread_nodes[i].thread = thread;
|
||||
thread_nodes[i].next = nullptr;
|
||||
|
||||
if (objects[i]->m_thread_list_tail == nullptr) {
|
||||
objects[i]->m_thread_list_head = std::addressof(thread_nodes[i]);
|
||||
} else {
|
||||
objects[i]->m_thread_list_tail->next = std::addressof(thread_nodes[i]);
|
||||
}
|
||||
|
||||
objects[i]->m_thread_list_tail = std::addressof(thread_nodes[i]);
|
||||
objects[i]->LinkNode(std::addressof(thread_nodes[i]));
|
||||
}
|
||||
|
||||
/* Mark the thread as waiting. */
|
||||
/* Mark the thread as cancellable. */
|
||||
thread->SetCancellable();
|
||||
thread->SetSyncedObject(nullptr, svc::ResultTimedOut());
|
||||
thread->SetState(KThread::ThreadState_Waiting);
|
||||
|
||||
/* Clear the thread's synced index. */
|
||||
thread->SetSyncedIndex(-1);
|
||||
|
||||
/* Wait for an object to be signaled. */
|
||||
wait_queue.SetHardwareTimer(timer);
|
||||
thread->BeginWait(std::addressof(wait_queue));
|
||||
}
|
||||
|
||||
/* The lock/sleep is done, so we should be able to get our result. */
|
||||
|
||||
/* Thread is no longer cancellable. */
|
||||
thread->ClearCancellable();
|
||||
|
||||
/* Cancel the timer as needed. */
|
||||
if (timer != nullptr) {
|
||||
timer->CancelTask(thread);
|
||||
}
|
||||
/* Set the output index. */
|
||||
*out_index = thread->GetSyncedIndex();
|
||||
|
||||
/* Get the wait result. */
|
||||
Result wait_result;
|
||||
s32 sync_index = -1;
|
||||
{
|
||||
KScopedSchedulerLock lk;
|
||||
KSynchronizationObject *synced_obj;
|
||||
wait_result = thread->GetWaitResult(std::addressof(synced_obj));
|
||||
|
||||
for (auto i = 0; i < num_objects; ++i) {
|
||||
/* Unlink the object from the list. */
|
||||
ThreadListNode *prev_ptr = reinterpret_cast<ThreadListNode *>(std::addressof(objects[i]->m_thread_list_head));
|
||||
ThreadListNode *prev_val = nullptr;
|
||||
ThreadListNode *prev, *tail_prev;
|
||||
|
||||
do {
|
||||
prev = prev_ptr;
|
||||
prev_ptr = prev_ptr->next;
|
||||
tail_prev = prev_val;
|
||||
prev_val = prev_ptr;
|
||||
} while (prev_ptr != std::addressof(thread_nodes[i]));
|
||||
|
||||
if (objects[i]->m_thread_list_tail == std::addressof(thread_nodes[i])) {
|
||||
objects[i]->m_thread_list_tail = tail_prev;
|
||||
}
|
||||
|
||||
prev->next = thread_nodes[i].next;
|
||||
|
||||
if (objects[i] == synced_obj) {
|
||||
sync_index = i;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Set output. */
|
||||
*out_index = sync_index;
|
||||
return wait_result;
|
||||
return thread->GetWaitResult();
|
||||
}
|
||||
|
||||
void KSynchronizationObject::NotifyAvailable(Result result) {
|
||||
|
@ -158,11 +168,7 @@ namespace ams::kern {
|
|||
|
||||
/* Iterate over each thread. */
|
||||
for (auto *cur_node = m_thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
|
||||
KThread *thread = cur_node->thread;
|
||||
if (thread->GetState() == KThread::ThreadState_Waiting) {
|
||||
thread->SetSyncedObject(this, result);
|
||||
thread->SetState(KThread::ThreadState_Runnable);
|
||||
}
|
||||
cur_node->thread->NotifyAvailable(this, result);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -47,6 +47,23 @@ namespace ams::kern {
|
|||
KPageBuffer::Free(KPageBuffer::FromPhysicalAddress(stack_paddr));
|
||||
}
|
||||
|
||||
class ThreadQueueImplForKThreadSleep final : public KThreadQueueWithoutEndWait { /* ... */ };
|
||||
|
||||
class ThreadQueueImplForKThreadSetProperty final : public KThreadQueue {
|
||||
private:
|
||||
KThread::WaiterList *m_wait_list;
|
||||
public:
|
||||
constexpr ThreadQueueImplForKThreadSetProperty(KThread::WaiterList *wl) : m_wait_list(wl) { /* ... */ }
|
||||
|
||||
virtual void CancelWait(KThread *waiting_thread, Result wait_result, bool cancel_timer_task) override {
|
||||
/* Remove the thread from the wait list. */
|
||||
m_wait_list->erase(m_wait_list->iterator_to(*waiting_thread));
|
||||
|
||||
/* Invoke the base cancel wait handler. */
|
||||
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
Result KThread::Initialize(KThreadFunction func, uintptr_t arg, void *kern_stack_top, KProcessAddress user_stack_top, s32 prio, s32 virt_core, KProcess *owner, ThreadType type) {
|
||||
|
@ -131,12 +148,12 @@ namespace ams::kern {
|
|||
m_priority = prio;
|
||||
m_base_priority = prio;
|
||||
|
||||
/* Set sync object and waiting lock to null. */
|
||||
m_synced_object = nullptr;
|
||||
/* Set waiting lock to null. */
|
||||
m_waiting_lock = nullptr;
|
||||
|
||||
/* Initialize sleeping queue. */
|
||||
m_sleeping_queue = nullptr;
|
||||
/* Initialize wait queue/sync index. */
|
||||
m_synced_index = -1;
|
||||
m_wait_queue = nullptr;
|
||||
|
||||
/* Set suspend flags. */
|
||||
m_suspend_request_flags = 0;
|
||||
|
@ -295,12 +312,20 @@ namespace ams::kern {
|
|||
|
||||
auto it = m_waiter_list.begin();
|
||||
while (it != m_waiter_list.end()) {
|
||||
/* Get the thread. */
|
||||
KThread * const waiter = std::addressof(*it);
|
||||
|
||||
/* The thread shouldn't be a kernel waiter. */
|
||||
MESOSPHERE_ASSERT(!IsKernelAddressKey(it->GetAddressKey()));
|
||||
it->SetLockOwner(nullptr);
|
||||
it->SetSyncedObject(nullptr, svc::ResultInvalidState());
|
||||
it->Wakeup();
|
||||
MESOSPHERE_ASSERT(!IsKernelAddressKey(waiter->GetAddressKey()));
|
||||
|
||||
/* Clear the lock owner. */
|
||||
waiter->SetLockOwner(nullptr);
|
||||
|
||||
/* Erase the waiter from our list. */
|
||||
it = m_waiter_list.erase(it);
|
||||
|
||||
/* Cancel the thread's wait. */
|
||||
waiter->CancelWait(svc::ResultInvalidState(), true);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -320,24 +345,14 @@ namespace ams::kern {
|
|||
return m_signaled;
|
||||
}
|
||||
|
||||
void KThread::Wakeup() {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
KScopedSchedulerLock sl;
|
||||
|
||||
if (this->GetState() == ThreadState_Waiting) {
|
||||
if (m_sleeping_queue != nullptr) {
|
||||
m_sleeping_queue->WakeupThread(this);
|
||||
} else {
|
||||
this->SetState(ThreadState_Runnable);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void KThread::OnTimer() {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
||||
|
||||
this->Wakeup();
|
||||
/* If we're waiting, cancel the wait. */
|
||||
if (this->GetState() == ThreadState_Waiting) {
|
||||
m_wait_queue->CancelWait(this, svc::ResultTimedOut(), false);
|
||||
}
|
||||
}
|
||||
|
||||
void KThread::StartTermination() {
|
||||
|
@ -362,7 +377,7 @@ namespace ams::kern {
|
|||
|
||||
/* Signal. */
|
||||
m_signaled = true;
|
||||
this->NotifyAvailable();
|
||||
KSynchronizationObject::NotifyAvailable();
|
||||
|
||||
/* Call the on thread termination handler. */
|
||||
KThreadContext::OnThreadTerminating(this);
|
||||
|
@ -496,10 +511,8 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
/* Resume any threads that began waiting on us while we were pinned. */
|
||||
for (auto it = m_pinned_waiter_list.begin(); it != m_pinned_waiter_list.end(); ++it) {
|
||||
if (it->GetState() == ThreadState_Waiting) {
|
||||
it->SetState(ThreadState_Runnable);
|
||||
}
|
||||
for (auto it = m_pinned_waiter_list.begin(); it != m_pinned_waiter_list.end(); it = m_pinned_waiter_list.erase(it)) {
|
||||
it->EndWait(ResultSuccess());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -646,9 +659,9 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
/* Update the pinned waiter list. */
|
||||
ThreadQueueImplForKThreadSetProperty wait_queue(std::addressof(m_pinned_waiter_list));
|
||||
{
|
||||
bool retry_update;
|
||||
bool thread_is_pinned = false;
|
||||
do {
|
||||
/* Lock the scheduler. */
|
||||
KScopedSchedulerLock sl;
|
||||
|
@ -676,27 +689,15 @@ namespace ams::kern {
|
|||
/* Verify that the current thread isn't terminating. */
|
||||
R_UNLESS(!GetCurrentThread().IsTerminationRequested(), svc::ResultTerminationRequested());
|
||||
|
||||
/* Note that the thread was pinned. */
|
||||
thread_is_pinned = true;
|
||||
|
||||
/* Wait until the thread isn't pinned any more. */
|
||||
m_pinned_waiter_list.push_back(GetCurrentThread());
|
||||
GetCurrentThread().SetState(ThreadState_Waiting);
|
||||
GetCurrentThread().BeginWait(std::addressof(wait_queue));
|
||||
} else {
|
||||
/* If the thread isn't pinned, release the scheduler lock and retry until it's not current. */
|
||||
retry_update = true;
|
||||
}
|
||||
}
|
||||
} while (retry_update);
|
||||
|
||||
/* If the thread was pinned, it no longer is, and we should remove the current thread from our waiter list. */
|
||||
if (thread_is_pinned) {
|
||||
/* Lock the scheduler. */
|
||||
KScopedSchedulerLock sl;
|
||||
|
||||
/* Remove from the list. */
|
||||
m_pinned_waiter_list.erase(m_pinned_waiter_list.iterator_to(GetCurrentThread()));
|
||||
}
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
|
@ -785,14 +786,8 @@ namespace ams::kern {
|
|||
|
||||
/* Check if we're waiting and cancellable. */
|
||||
if (this->GetState() == ThreadState_Waiting && m_cancellable) {
|
||||
if (m_sleeping_queue != nullptr) {
|
||||
m_sleeping_queue->WakeupThread(this);
|
||||
m_wait_cancelled = true;
|
||||
} else {
|
||||
this->SetSyncedObject(nullptr, svc::ResultCancelled());
|
||||
this->SetState(ThreadState_Runnable);
|
||||
m_wait_cancelled = false;
|
||||
}
|
||||
m_wait_cancelled = false;
|
||||
m_wait_queue->CancelWait(this, svc::ResultCancelled(), true);
|
||||
} else {
|
||||
/* Otherwise, note that we cancelled a wait. */
|
||||
m_wait_cancelled = true;
|
||||
|
@ -894,7 +889,8 @@ namespace ams::kern {
|
|||
|
||||
/* If the thread is now paused, update the pinned waiter list. */
|
||||
if (activity == ams::svc::ThreadActivity_Paused) {
|
||||
bool thread_is_pinned = false;
|
||||
ThreadQueueImplForKThreadSetProperty wait_queue(std::addressof(m_pinned_waiter_list));
|
||||
|
||||
bool thread_is_current;
|
||||
do {
|
||||
/* Lock the scheduler. */
|
||||
|
@ -903,23 +899,20 @@ namespace ams::kern {
|
|||
/* Don't do any further management if our termination has been requested. */
|
||||
R_SUCCEED_IF(this->IsTerminationRequested());
|
||||
|
||||
/* By default, treat the thread as not current. */
|
||||
thread_is_current = false;
|
||||
|
||||
/* Check whether the thread is pinned. */
|
||||
if (this->GetStackParameters().is_pinned) {
|
||||
/* Verify that the current thread isn't terminating. */
|
||||
R_UNLESS(!GetCurrentThread().IsTerminationRequested(), svc::ResultTerminationRequested());
|
||||
|
||||
/* Note that the thread was pinned and not current. */
|
||||
thread_is_pinned = true;
|
||||
thread_is_current = false;
|
||||
|
||||
/* Wait until the thread isn't pinned any more. */
|
||||
m_pinned_waiter_list.push_back(GetCurrentThread());
|
||||
GetCurrentThread().SetState(ThreadState_Waiting);
|
||||
GetCurrentThread().BeginWait(std::addressof(wait_queue));
|
||||
} else {
|
||||
/* Check if the thread is currently running. */
|
||||
/* If it is, we'll need to retry. */
|
||||
thread_is_current = false;
|
||||
|
||||
for (auto i = 0; i < static_cast<s32>(cpu::NumCores); ++i) {
|
||||
if (Kernel::GetScheduler(i).GetSchedulerCurrentThread() == this) {
|
||||
thread_is_current = true;
|
||||
|
@ -928,15 +921,6 @@ namespace ams::kern {
|
|||
}
|
||||
}
|
||||
} while (thread_is_current);
|
||||
|
||||
/* If the thread was pinned, it no longer is, and we should remove the current thread from our waiter list. */
|
||||
if (thread_is_pinned) {
|
||||
/* Lock the scheduler. */
|
||||
KScopedSchedulerLock sl;
|
||||
|
||||
/* Remove from the list. */
|
||||
m_pinned_waiter_list.erase(m_pinned_waiter_list.iterator_to(GetCurrentThread()));
|
||||
}
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
|
@ -1241,8 +1225,9 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
/* Wake up the thread. */
|
||||
this->SetSyncedObject(nullptr, svc::ResultTerminationRequested());
|
||||
this->Wakeup();
|
||||
if (this->GetState() == ThreadState_Waiting) {
|
||||
m_wait_queue->CancelWait(this, svc::ResultTerminationRequested(), true);
|
||||
}
|
||||
}
|
||||
|
||||
return this->GetState();
|
||||
|
@ -1254,6 +1239,7 @@ namespace ams::kern {
|
|||
MESOSPHERE_ASSERT(this == GetCurrentThreadPointer());
|
||||
MESOSPHERE_ASSERT(timeout > 0);
|
||||
|
||||
ThreadQueueImplForKThreadSleep wait_queue;
|
||||
KHardwareTimer *timer;
|
||||
{
|
||||
/* Setup the scheduling lock and sleep. */
|
||||
|
@ -1265,18 +1251,58 @@ namespace ams::kern {
|
|||
return svc::ResultTerminationRequested();
|
||||
}
|
||||
|
||||
/* Mark the thread as waiting. */
|
||||
this->SetState(KThread::ThreadState_Waiting);
|
||||
/* Wait for the sleep to end. */
|
||||
wait_queue.SetHardwareTimer(timer);
|
||||
this->BeginWait(std::addressof(wait_queue));
|
||||
}
|
||||
|
||||
/* The lock/sleep is done. */
|
||||
|
||||
/* Cancel the timer. */
|
||||
timer->CancelTask(this);
|
||||
|
||||
return ResultSuccess();
|
||||
}
|
||||
|
||||
void KThread::BeginWait(KThreadQueue *queue) {
|
||||
/* Set our state as waiting. */
|
||||
this->SetState(ThreadState_Waiting);
|
||||
|
||||
/* Set our wait queue. */
|
||||
m_wait_queue = queue;
|
||||
}
|
||||
|
||||
void KThread::NotifyAvailable(KSynchronizationObject *signaled_object, Result wait_result) {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
/* Lock the scheduler. */
|
||||
KScopedSchedulerLock sl;
|
||||
|
||||
/* If we're waiting, notify our queue that we're available. */
|
||||
if (this->GetState() == ThreadState_Waiting) {
|
||||
m_wait_queue->NotifyAvailable(this, signaled_object, wait_result);
|
||||
}
|
||||
}
|
||||
|
||||
void KThread::EndWait(Result wait_result) {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
/* Lock the scheduler. */
|
||||
KScopedSchedulerLock sl;
|
||||
|
||||
/* If we're waiting, notify our queue that we're available. */
|
||||
if (this->GetState() == ThreadState_Waiting) {
|
||||
m_wait_queue->EndWait(this, wait_result);
|
||||
}
|
||||
}
|
||||
|
||||
void KThread::CancelWait(Result wait_result, bool cancel_timer_task) {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
/* Lock the scheduler. */
|
||||
KScopedSchedulerLock sl;
|
||||
|
||||
/* If we're waiting, notify our queue that we're available. */
|
||||
if (this->GetState() == ThreadState_Waiting) {
|
||||
m_wait_queue->CancelWait(this, wait_result, cancel_timer_task);
|
||||
}
|
||||
}
|
||||
|
||||
void KThread::SetState(ThreadState state) {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
|
|
62
libraries/libmesosphere/source/kern_k_thread_queue.cpp
Normal file
62
libraries/libmesosphere/source/kern_k_thread_queue.cpp
Normal file
|
@ -0,0 +1,62 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#include <mesosphere.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
void KThreadQueue::NotifyAvailable(KThread *waiting_thread, KSynchronizationObject *signaled_object, Result wait_result) {
|
||||
MESOSPHERE_UNUSED(waiting_thread, signaled_object, wait_result);
|
||||
MESOSPHERE_PANIC("KThreadQueue::NotifyAvailable\n");
|
||||
}
|
||||
|
||||
void KThreadQueue::EndWait(KThread *waiting_thread, Result wait_result) {
|
||||
/* Set the thread's wait result. */
|
||||
waiting_thread->SetWaitResult(wait_result);
|
||||
|
||||
/* Set the thread as runnable. */
|
||||
waiting_thread->SetState(KThread::ThreadState_Runnable);
|
||||
|
||||
/* Clear the thread's wait queue. */
|
||||
waiting_thread->ClearWaitQueue();
|
||||
|
||||
/* Cancel the thread task. */
|
||||
if (m_hardware_timer != nullptr) {
|
||||
m_hardware_timer->CancelTask(waiting_thread);
|
||||
}
|
||||
}
|
||||
|
||||
void KThreadQueue::CancelWait(KThread *waiting_thread, Result wait_result, bool cancel_timer_task) {
|
||||
/* Set the thread's wait result. */
|
||||
waiting_thread->SetWaitResult(wait_result);
|
||||
|
||||
/* Set the thread as runnable. */
|
||||
waiting_thread->SetState(KThread::ThreadState_Runnable);
|
||||
|
||||
/* Clear the thread's wait queue. */
|
||||
waiting_thread->ClearWaitQueue();
|
||||
|
||||
/* Cancel the thread task. */
|
||||
if (cancel_timer_task && m_hardware_timer != nullptr) {
|
||||
m_hardware_timer->CancelTask(waiting_thread);
|
||||
}
|
||||
}
|
||||
|
||||
void KThreadQueueWithoutEndWait::EndWait(KThread *waiting_thread, Result wait_result) {
|
||||
MESOSPHERE_UNUSED(waiting_thread, wait_result);
|
||||
MESOSPHERE_PANIC("KThreadQueueWithoutEndWait::EndWait\n");
|
||||
}
|
||||
|
||||
}
|
|
@ -17,73 +17,83 @@
|
|||
|
||||
namespace ams::kern {
|
||||
|
||||
void KWaitObject::OnTimer() {
|
||||
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
||||
namespace {
|
||||
|
||||
class ThreadQueueImplForKWaitObjectSynchronize final : public KThreadQueueWithoutEndWait {
|
||||
private:
|
||||
KThread::WaiterList *m_wait_list;
|
||||
KThread **m_thread;
|
||||
public:
|
||||
constexpr ThreadQueueImplForKWaitObjectSynchronize(KThread::WaiterList *wl, KThread **t) : KThreadQueueWithoutEndWait(), m_wait_list(wl), m_thread(t) { /* ... */ }
|
||||
|
||||
virtual void CancelWait(KThread *waiting_thread, Result wait_result, bool cancel_timer_task) override {
|
||||
/* Remove the thread from the wait list. */
|
||||
m_wait_list->erase(m_wait_list->iterator_to(*waiting_thread));
|
||||
|
||||
/* If the result was a timeout and the thread is our wait object thread, cancel recursively. */
|
||||
if (svc::ResultTimedOut::Includes(wait_result) && waiting_thread == *m_thread) {
|
||||
for (auto &thread : *m_wait_list) {
|
||||
thread.CancelWait(svc::ResultTimedOut(), false);
|
||||
}
|
||||
}
|
||||
|
||||
/* If the thread is our wait object thread, clear it. */
|
||||
if (*m_thread == waiting_thread) {
|
||||
*m_thread = nullptr;
|
||||
}
|
||||
|
||||
/* Invoke the base cancel wait handler. */
|
||||
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
|
||||
}
|
||||
};
|
||||
|
||||
/* Wake up all the waiting threads. */
|
||||
for (KThread &thread : m_wait_list) {
|
||||
thread.Wakeup();
|
||||
}
|
||||
}
|
||||
|
||||
Result KWaitObject::Synchronize(s64 timeout) {
|
||||
/* Perform the wait. */
|
||||
KHardwareTimer *timer = nullptr;
|
||||
KThread *cur_thread = GetCurrentThreadPointer();
|
||||
KHardwareTimer *timer;
|
||||
KThread *cur_thread = GetCurrentThreadPointer();
|
||||
ThreadQueueImplForKWaitObjectSynchronize wait_queue(std::addressof(m_wait_list), std::addressof(m_next_thread));
|
||||
|
||||
{
|
||||
KScopedSchedulerLock sl;
|
||||
KScopedSchedulerLockAndSleep slp(std::addressof(timer), cur_thread, timeout);
|
||||
|
||||
/* Check that the thread isn't terminating. */
|
||||
R_UNLESS(!cur_thread->IsTerminationRequested(), svc::ResultTerminationRequested());
|
||||
|
||||
/* Verify that nothing else is already waiting on the object. */
|
||||
if (timeout > 0) {
|
||||
R_UNLESS(!m_timer_used, svc::ResultBusy());
|
||||
if (cur_thread->IsTerminationRequested()) {
|
||||
slp.CancelSleep();
|
||||
return svc::ResultTerminationRequested();
|
||||
}
|
||||
|
||||
/* Check that we're not already in use. */
|
||||
/* Handle the case where timeout is non-negative/infinite. */
|
||||
if (timeout >= 0) {
|
||||
/* Verify the timer isn't already in use. */
|
||||
R_UNLESS(!m_timer_used, svc::ResultBusy());
|
||||
/* Check if we're already waiting. */
|
||||
if (m_next_thread != nullptr) {
|
||||
slp.CancelSleep();
|
||||
return svc::ResultBusy();
|
||||
}
|
||||
|
||||
/* If timeout is zero, handle the special case by canceling all waiting threads. */
|
||||
if (timeout == 0) {
|
||||
for (auto &thread : m_wait_list) {
|
||||
thread.CancelWait(svc::ResultTimedOut(), false);
|
||||
}
|
||||
|
||||
slp.CancelSleep();
|
||||
return ResultSuccess();
|
||||
}
|
||||
}
|
||||
|
||||
/* If we need to, register our timeout. */
|
||||
/* If the timeout isn't infinite, register it as our next timeout. */
|
||||
if (timeout > 0) {
|
||||
/* Mark that we're using the timer. */
|
||||
m_timer_used = true;
|
||||
|
||||
/* Use the timer. */
|
||||
timer = std::addressof(Kernel::GetHardwareTimer());
|
||||
timer->RegisterAbsoluteTask(this, timeout);
|
||||
wait_queue.SetHardwareTimer(timer);
|
||||
m_next_thread = cur_thread;
|
||||
}
|
||||
|
||||
if (timeout == 0) {
|
||||
/* If we're timed out immediately, just wake up the thread. */
|
||||
this->OnTimer();
|
||||
} else {
|
||||
/* Otherwise, sleep until the timeout occurs. */
|
||||
m_wait_list.push_back(GetCurrentThread());
|
||||
cur_thread->SetState(KThread::ThreadState_Waiting);
|
||||
cur_thread->SetSyncedObject(nullptr, svc::ResultTimedOut());
|
||||
}
|
||||
}
|
||||
/* Add the current thread to our wait list. */
|
||||
m_wait_list.push_back(*cur_thread);
|
||||
|
||||
/* Cleanup as necessary. */
|
||||
{
|
||||
KScopedSchedulerLock sl;
|
||||
|
||||
/* Remove from the timer. */
|
||||
if (timeout > 0) {
|
||||
MESOSPHERE_ASSERT(m_timer_used);
|
||||
MESOSPHERE_ASSERT(timer != nullptr);
|
||||
timer->CancelTask(this);
|
||||
m_timer_used = false;
|
||||
}
|
||||
|
||||
/* Remove the thread from our queue. */
|
||||
if (timeout != 0) {
|
||||
m_wait_list.erase(m_wait_list.iterator_to(GetCurrentThread()));
|
||||
}
|
||||
/* Wait until the timeout occurs. */
|
||||
cur_thread->BeginWait(std::addressof(wait_queue));
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
|
|
|
@ -17,22 +17,46 @@
|
|||
|
||||
namespace ams::kern {
|
||||
|
||||
namespace {
|
||||
|
||||
class ThreadQueueImplForKWorkerTaskManager final : public KThreadQueue {
|
||||
private:
|
||||
KThread **m_waiting_thread;
|
||||
public:
|
||||
constexpr ThreadQueueImplForKWorkerTaskManager(KThread **t) : KThreadQueue(), m_waiting_thread(t) { /* ... */ }
|
||||
|
||||
virtual void EndWait(KThread *waiting_thread, Result wait_result) override {
|
||||
/* Clear our waiting thread. */
|
||||
*m_waiting_thread = nullptr;
|
||||
|
||||
/* Invoke the base end wait handler. */
|
||||
KThreadQueue::EndWait(waiting_thread, wait_result);
|
||||
}
|
||||
|
||||
virtual void CancelWait(KThread *waiting_thread, Result wait_result, bool cancel_timer_task) override {
|
||||
MESOSPHERE_UNUSED(waiting_thread, wait_result, cancel_timer_task);
|
||||
MESOSPHERE_PANIC("ThreadQueueImplForKWorkerTaskManager::CancelWait\n");
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
void KWorkerTaskManager::Initialize(s32 priority) {
|
||||
/* Reserve a thread from the system limit. */
|
||||
MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_ThreadCountMax, 1));
|
||||
|
||||
/* Create a new thread. */
|
||||
m_thread = KThread::Create();
|
||||
MESOSPHERE_ABORT_UNLESS(m_thread != nullptr);
|
||||
KThread *thread = KThread::Create();
|
||||
MESOSPHERE_ABORT_UNLESS(thread != nullptr);
|
||||
|
||||
/* Launch the new thread. */
|
||||
MESOSPHERE_R_ABORT_UNLESS(KThread::InitializeKernelThread(m_thread, ThreadFunction, reinterpret_cast<uintptr_t>(this), priority, cpu::NumCores - 1));
|
||||
MESOSPHERE_R_ABORT_UNLESS(KThread::InitializeKernelThread(thread, ThreadFunction, reinterpret_cast<uintptr_t>(this), priority, cpu::NumCores - 1));
|
||||
|
||||
/* Register the new thread. */
|
||||
KThread::Register(m_thread);
|
||||
KThread::Register(thread);
|
||||
|
||||
/* Run the thread. */
|
||||
m_thread->Run();
|
||||
thread->Run();
|
||||
}
|
||||
|
||||
void KWorkerTaskManager::AddTask(WorkerType type, KWorkerTask *task) {
|
||||
|
@ -45,36 +69,40 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
void KWorkerTaskManager::ThreadFunctionImpl() {
|
||||
/* Create wait queue. */
|
||||
ThreadQueueImplForKWorkerTaskManager wait_queue(std::addressof(m_waiting_thread));
|
||||
|
||||
while (true) {
|
||||
KWorkerTask *task = nullptr;
|
||||
KWorkerTask *task;
|
||||
|
||||
/* Get a worker task. */
|
||||
{
|
||||
KScopedSchedulerLock sl;
|
||||
|
||||
task = this->GetTask();
|
||||
|
||||
if (task == nullptr) {
|
||||
/* If there's nothing to do, set ourselves as waiting. */
|
||||
m_active = false;
|
||||
m_thread->SetState(KThread::ThreadState_Waiting);
|
||||
/* Wait to have a task. */
|
||||
m_waiting_thread = GetCurrentThreadPointer();
|
||||
GetCurrentThread().BeginWait(std::addressof(wait_queue));
|
||||
continue;
|
||||
}
|
||||
|
||||
m_active = true;
|
||||
}
|
||||
|
||||
/* Do the task. */
|
||||
task->DoWorkerTask();
|
||||
|
||||
/* Destroy any objects we may need to close. */
|
||||
m_thread->DestroyClosedObjects();
|
||||
GetCurrentThread().DestroyClosedObjects();
|
||||
}
|
||||
}
|
||||
|
||||
KWorkerTask *KWorkerTaskManager::GetTask() {
|
||||
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
||||
|
||||
KWorkerTask *next = m_head_task;
|
||||
if (next) {
|
||||
|
||||
if (next != nullptr) {
|
||||
/* Advance the list. */
|
||||
if (m_head_task == m_tail_task) {
|
||||
m_head_task = nullptr;
|
||||
|
@ -86,6 +114,7 @@ namespace ams::kern {
|
|||
/* Clear the next task's next. */
|
||||
next->SetNextTask(nullptr);
|
||||
}
|
||||
|
||||
return next;
|
||||
}
|
||||
|
||||
|
@ -102,8 +131,8 @@ namespace ams::kern {
|
|||
m_tail_task = task;
|
||||
|
||||
/* Make ourselves active if we need to. */
|
||||
if (!m_active) {
|
||||
m_thread->SetState(KThread::ThreadState_Runnable);
|
||||
if (m_waiting_thread != nullptr) {
|
||||
m_waiting_thread->EndWait(ResultSuccess());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -229,11 +229,14 @@ namespace ams::kern::svc {
|
|||
|
||||
/* Send the request. */
|
||||
MESOSPHERE_ASSERT(message != 0);
|
||||
R_TRY(SendAsyncRequestWithUserBufferImpl(out_event_handle, message, buffer_size, session_handle));
|
||||
const Result result = SendAsyncRequestWithUserBufferImpl(out_event_handle, message, buffer_size, session_handle);
|
||||
|
||||
/* We sent the request successfully. */
|
||||
unlock_guard.Cancel();
|
||||
return ResultSuccess();
|
||||
/* If the request succeeds (or the thread is terminating), don't unlock the user buffer. */
|
||||
if (R_SUCCEEDED(result) || svc::ResultTerminationRequested::Includes(result)) {
|
||||
unlock_guard.Cancel();
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE Result ReplyAndReceive(int32_t *out_index, KUserPointer<const ams::svc::Handle *> handles, int32_t num_handles, ams::svc::Handle reply_target, int64_t timeout_ns) {
|
||||
|
|
Loading…
Reference in a new issue