kern: implement dpc + skeleton rest of main

This commit is contained in:
Michael Scire 2020-02-07 19:16:09 -08:00
parent e9e949ec36
commit 1224ed8abe
14 changed files with 559 additions and 40 deletions

View file

@ -47,6 +47,7 @@
#include <mesosphere/kern_k_core_local_region.hpp>
#include <mesosphere/kern_k_slab_heap.hpp>
#include <mesosphere/kern_k_light_lock.hpp>
#include <mesosphere/kern_k_dpc_manager.hpp>
#include <mesosphere/kern_kernel.hpp>
#include <mesosphere/kern_k_page_table_manager.hpp>

View file

@ -0,0 +1,44 @@
/*
* Copyright (c) 2018-2020 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_select_cpu.hpp>
namespace ams::kern {
class KDpcManager {
private:
static constexpr s32 DpcManagerNormalThreadPriority = 59;
static constexpr s32 DpcManagerPreemptionThreadPriority = 63;
static_assert(ams::svc::HighestThreadPriority <= DpcManagerNormalThreadPriority && DpcManagerNormalThreadPriority <= ams::svc::LowestThreadPriority);
static_assert(ams::svc::HighestThreadPriority <= DpcManagerPreemptionThreadPriority && DpcManagerPreemptionThreadPriority <= ams::svc::LowestThreadPriority);
private:
static NOINLINE void Initialize(s32 core_id, s32 priority);
public:
static void Initialize() {
const s32 core_id = GetCurrentCoreId();
if (core_id == static_cast<s32>(cpu::NumCores) - 1) {
Initialize(core_id, DpcManagerPreemptionThreadPriority);
} else {
Initialize(core_id, DpcManagerNormalThreadPriority);
}
}
static NOINLINE void HandleDpc();
};
}

View file

@ -49,7 +49,7 @@ namespace ams::kern {
}
}
public:
void Wait(KLightLock *lock, s64 timeout) {
void Wait(KLightLock *lock, s64 timeout = -1ll) {
this->WaitImpl(lock, timeout);
lock->Lock();
}

View file

@ -471,6 +471,10 @@ namespace ams::kern {
return *GetVirtualMemoryRegionTree().FindFirstRegionByType(KMemoryRegionType_VirtualKernelPtHeap);
}
static NOINLINE KMemoryRegion &GetKernelStackRegion() {
return *GetVirtualMemoryRegionTree().FindFirstRegionByType(KMemoryRegionType_KernelStack);
}
static NOINLINE KMemoryRegion &GetVirtualLinearRegion(KVirtualAddress address) {
return *GetVirtualLinearMemoryRegionTree().FindContainingRegion(GetInteger(address));
}

View file

@ -142,7 +142,7 @@ namespace ams::kern {
/* ... */
}
void Initialize(KVirtualAddress metadata_region, size_t metadata_region_size);
NOINLINE void Initialize(KVirtualAddress metadata_region, size_t metadata_region_size);
KVirtualAddress AllocateContinuous(size_t num_pages, size_t align_pages, u32 option);

View file

@ -342,8 +342,8 @@ namespace ams::kern {
this->scheduled_queue.MoveToFront(member->GetPriority(), member->GetActiveCore(), member);
}
constexpr ALWAYS_INLINE void MoveToScheduledBack(Member *member) {
this->scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(), member);
constexpr ALWAYS_INLINE KThread *MoveToScheduledBack(Member *member) {
return this->scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(), member);
}
/* First class fancy operations. */

View file

@ -117,6 +117,7 @@ namespace ams::kern {
static NOINLINE void OnThreadAffinityMaskChanged(KThread *thread, const KAffinityMask &old_affinity, s32 old_core);
/* TODO: Yield operations */
static NOINLINE void RotateScheduledQueue(s32 priority, s32 core_id);
private:
/* Instanced private API. */
void ScheduleImpl();

View file

@ -46,7 +46,7 @@ namespace ams::kern {
SuspendType_Thread = 1,
SuspendType_Debug = 2,
SuspendType_Unk3 = 3,
SuspendType_Unk4 = 4,
SuspendType_Init = 4,
SuspendType_Count,
};
@ -64,7 +64,7 @@ namespace ams::kern {
ThreadState_ThreadSuspended = (1 << (SuspendType_Thread + ThreadState_SuspendShift)),
ThreadState_DebugSuspended = (1 << (SuspendType_Debug + ThreadState_SuspendShift)),
ThreadState_Unk3Suspended = (1 << (SuspendType_Unk3 + ThreadState_SuspendShift)),
ThreadState_Unk4Suspended = (1 << (SuspendType_Unk4 + ThreadState_SuspendShift)),
ThreadState_InitSuspended = (1 << (SuspendType_Init + ThreadState_SuspendShift)),
ThreadState_SuspendFlagMask = ((1 << SuspendType_Count) - 1) << ThreadState_SuspendShift,
};
@ -91,17 +91,17 @@ namespace ams::kern {
KThread *prev;
KThread *next;
public:
constexpr ALWAYS_INLINE QueueEntry() : prev(nullptr), next(nullptr) { /* ... */ }
constexpr QueueEntry() : prev(nullptr), next(nullptr) { /* ... */ }
constexpr ALWAYS_INLINE void Initialize() {
constexpr void Initialize() {
this->prev = nullptr;
this->next = nullptr;
}
constexpr ALWAYS_INLINE KThread *GetPrev() const { return this->prev; }
constexpr ALWAYS_INLINE KThread *GetNext() const { return this->next; }
constexpr ALWAYS_INLINE void SetPrev(KThread *t) { this->prev = t; }
constexpr ALWAYS_INLINE void SetNext(KThread *t) { this->next = t; }
constexpr KThread *GetPrev() const { return this->prev; }
constexpr KThread *GetNext() const { return this->next; }
constexpr void SetPrev(KThread *t) { this->prev = t; }
constexpr void SetNext(KThread *t) { this->next = t; }
};
private:
static constexpr size_t PriorityInheritanceCountMax = 10;
@ -179,6 +179,18 @@ namespace ams::kern {
Result Initialize(KThreadFunction func, uintptr_t arg, void *kern_stack_top, KProcessAddress user_stack_top, s32 prio, s32 core, KProcess *owner, ThreadType type);
private:
static Result InitializeThread(KThread *thread, KThreadFunction func, uintptr_t arg, KProcessAddress user_stack_top, s32 prio, s32 core, KProcess *owner, ThreadType type);
public:
static Result InitializeKernelThread(KThread *thread, KThreadFunction func, uintptr_t arg, s32 prio, s32 core) {
return InitializeThread(thread, func, arg, Null<KProcessAddress>, prio, core, nullptr, ThreadType_Kernel);
}
static Result InitializeHighPriorityThread(KThread *thread, KThreadFunction func, uintptr_t arg) {
return InitializeThread(thread, func, arg, Null<KProcessAddress>, 0, GetCurrentCoreId(), nullptr, ThreadType_HighPriority);
}
/* TODO: static Result InitializeUserThread */
private:
StackParameters &GetStackParameters() {
return *(reinterpret_cast<StackParameters *>(this->kernel_stack_top) - 1);
@ -204,39 +216,52 @@ namespace ams::kern {
MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() > 0);
GetStackParameters().disable_count--;
}
private:
void Suspend();
public:
constexpr ALWAYS_INLINE const KAffinityMask &GetAffinityMask() const { return this->affinity_mask; }
constexpr ALWAYS_INLINE ThreadState GetState() const { return static_cast<ThreadState>(this->thread_state & ThreadState_Mask); }
constexpr ALWAYS_INLINE ThreadState GetRawState() const { return this->thread_state; }
constexpr const KAffinityMask &GetAffinityMask() const { return this->affinity_mask; }
constexpr ThreadState GetState() const { return static_cast<ThreadState>(this->thread_state & ThreadState_Mask); }
constexpr ThreadState GetRawState() const { return this->thread_state; }
NOINLINE void SetState(ThreadState state);
NOINLINE KThreadContext *GetContextForSchedulerLoop();
constexpr ALWAYS_INLINE s32 GetActiveCore() const { return this->core_id; }
constexpr ALWAYS_INLINE void SetActiveCore(s32 core) { this->core_id = core; }
constexpr ALWAYS_INLINE s32 GetPriority() const { return this->priority; }
constexpr s32 GetActiveCore() const { return this->core_id; }
constexpr void SetActiveCore(s32 core) { this->core_id = core; }
constexpr s32 GetPriority() const { return this->priority; }
constexpr ALWAYS_INLINE QueueEntry &GetPriorityQueueEntry(s32 core) { return this->per_core_priority_queue_entry[core]; }
constexpr ALWAYS_INLINE const QueueEntry &GetPriorityQueueEntry(s32 core) const { return this->per_core_priority_queue_entry[core]; }
constexpr QueueEntry &GetPriorityQueueEntry(s32 core) { return this->per_core_priority_queue_entry[core]; }
constexpr const QueueEntry &GetPriorityQueueEntry(s32 core) const { return this->per_core_priority_queue_entry[core]; }
constexpr ALWAYS_INLINE QueueEntry &GetSleepingQueueEntry() { return this->sleeping_queue_entry; }
constexpr ALWAYS_INLINE const QueueEntry &GetSleepingQueueEntry() const { return this->sleeping_queue_entry; }
constexpr ALWAYS_INLINE void SetSleepingQueue(KThreadQueue *q) { this->sleeping_queue = q; }
constexpr QueueEntry &GetSleepingQueueEntry() { return this->sleeping_queue_entry; }
constexpr const QueueEntry &GetSleepingQueueEntry() const { return this->sleeping_queue_entry; }
constexpr void SetSleepingQueue(KThreadQueue *q) { this->sleeping_queue = q; }
constexpr ALWAYS_INLINE s32 GetNumKernelWaiters() const { return this->num_kernel_waiters; }
constexpr s32 GetNumKernelWaiters() const { return this->num_kernel_waiters; }
constexpr ALWAYS_INLINE s64 GetLastScheduledTick() const { return this->last_scheduled_tick; }
constexpr ALWAYS_INLINE void SetLastScheduledTick(s64 tick) { this->last_scheduled_tick = tick; }
constexpr s64 GetLastScheduledTick() const { return this->last_scheduled_tick; }
constexpr void SetLastScheduledTick(s64 tick) { this->last_scheduled_tick = tick; }
constexpr ALWAYS_INLINE KProcess *GetOwnerProcess() const { return this->parent; }
constexpr KProcess *GetOwnerProcess() const { return this->parent; }
constexpr bool IsUserThread() const { return this->parent != nullptr; }
constexpr ALWAYS_INLINE KProcessAddress GetThreadLocalRegionAddress() const { return this->tls_address; }
constexpr ALWAYS_INLINE void *GetThreadLocalRegionHeapAddress() const { return this->tls_heap_address; }
constexpr KProcessAddress GetThreadLocalRegionAddress() const { return this->tls_address; }
constexpr void *GetThreadLocalRegionHeapAddress() const { return this->tls_heap_address; }
ALWAYS_INLINE void AddCpuTime(s64 amount) {
void AddCpuTime(s64 amount) {
this->cpu_time += amount;
}
constexpr u32 GetSuspendFlags() const { return this->suspend_allowed_flags & this->suspend_request_flags; }
constexpr bool IsSuspended() const { return this->GetSuspendFlags() != 0; }
void RequestSuspend(SuspendType type);
void TrySuspend();
Result SetPriorityToIdle();
Result Run();
void Exit();
ALWAYS_INLINE void *GetStackTop() const { return reinterpret_cast<StackParameters *>(this->kernel_stack_top) - 1; }
ALWAYS_INLINE void *GetKernelStackTop() const { return this->kernel_stack_top; }

View file

@ -26,7 +26,7 @@ namespace ams::kern::svc {
/* 57 */ using ::ams::svc::ResultNoSynchronizationObject;
/* 59 */ using ::ams::svc::ResultThreadTerminating;
/* 59 */ using ::ams::svc::ResultTerminationRequested;
/* 70 */ using ::ams::svc::ResultNoEvent;

View file

@ -0,0 +1,151 @@
/*
* Copyright (c) 2018-2020 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
namespace {
class KDpcTask {
private:
static inline KLightLock s_lock;
static inline KLightConditionVariable s_cond_var;
static inline u64 s_core_mask;
static inline KDpcTask *s_task;
private:
static bool HasRequest(s32 core_id) {
return (s_core_mask & (1ull << core_id)) != 0;
}
static void SetRequest(s32 core_id) {
s_core_mask |= (1ull << core_id);
}
static void ClearRequest(s32 core_id) {
s_core_mask &= ~(1ull << core_id);
}
public:
virtual void DoTask() { /* ... */ }
static void WaitForRequest() {
/* Wait for a request to come in. */
const auto core_id = GetCurrentCoreId();
KScopedLightLock lk(s_lock);
while (!HasRequest(core_id)) {
s_cond_var.Wait(&s_lock, -1ll);
}
}
static bool TimedWaitForRequest(s64 timeout) {
/* Wait for a request to come in. */
const auto core_id = GetCurrentCoreId();
KScopedLightLock lk(s_lock);
while (!HasRequest(core_id)) {
s_cond_var.Wait(&s_lock, timeout);
if (KHardwareTimer::GetTick() >= timeout) {
return false;
}
}
return true;
}
static void HandleRequest() {
/* Perform the request. */
s_task->DoTask();
/* Clear the request. */
const auto core_id = GetCurrentCoreId();
KScopedLightLock lk(s_lock);
ClearRequest(core_id);
if (s_core_mask == 0) {
s_cond_var.Broadcast();
}
}
};
/* Convenience definitions. */
constexpr s32 DpcManagerThreadPriority = 3;
constexpr s64 DpcManagerTimeout = 192'000ll; /* TODO: Constexpr conversion from 10ms */
/* Globals. */
s64 g_preemption_priorities[cpu::NumCores];
/* Manager thread functions. */
void DpcManagerNormalThreadFunction(uintptr_t arg) {
while (true) {
KDpcTask::WaitForRequest();
KDpcTask::HandleRequest();
}
}
void DpcManagerPreemptionThreadFunction(uintptr_t arg) {
s64 timeout = KHardwareTimer::GetTick() + DpcManagerTimeout;
while (true) {
if (KDpcTask::TimedWaitForRequest(timeout)) {
KDpcTask::HandleRequest();
} else {
/* Rotate the scheduler queue for each core. */
KScopedSchedulerLock lk;
for (size_t core_id = 0; core_id < cpu::NumCores; core_id++) {
if (const s32 priority = g_preemption_priorities[core_id]; priority > DpcManagerThreadPriority) {
KScheduler::RotateScheduledQueue(static_cast<s32>(core_id), priority);
}
}
/* Update our next timeout. */
timeout = KHardwareTimer::GetTick() + DpcManagerTimeout;
}
}
}
}
void KDpcManager::Initialize(s32 core_id, s32 priority) {
/* Reserve a thread from the system limit. */
MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_ThreadCountMax, 1));
/* Create a new thread. */
KThread *new_thread = KThread::Create();
MESOSPHERE_ABORT_UNLESS(new_thread != nullptr);
/* Launch the new thread. */
g_preemption_priorities[core_id] = priority;
if (core_id == cpu::NumCores - 1) {
MESOSPHERE_R_ABORT_UNLESS(KThread::InitializeKernelThread(new_thread, DpcManagerNormalThreadFunction, 0, DpcManagerThreadPriority, core_id));
} else {
MESOSPHERE_R_ABORT_UNLESS(KThread::InitializeKernelThread(new_thread, DpcManagerPreemptionThreadFunction, 0, DpcManagerThreadPriority, core_id));
}
/* Register the new thread. */
KThread::Register(new_thread);
/* Run the thread. */
new_thread->Run();
}
void KDpcManager::HandleDpc() {
/* The only deferred procedure supported by Horizon is thread termination. */
/* Check if we need to terminate the current thread. */
KThread *cur_thread = GetCurrentThreadPointer();
if (cur_thread->IsTerminationRequested()) {
KScopedInterruptEnable ei;
cur_thread->Exit();
}
}
}

View file

@ -300,5 +300,89 @@ namespace ams::kern {
}
}
void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
MESOSPHERE_ASSERT(IsSchedulerLockedByCurrentThread());
/* Get a reference to the priority queue. */
auto &priority_queue = GetPriorityQueue();
/* Rotate the front of the queue to the end. */
KThread *top_thread = priority_queue.GetScheduledFront(core_id, priority);
KThread *next_thread = nullptr;
if (top_thread != nullptr) {
next_thread = priority_queue.MoveToScheduledBack(top_thread);
if (next_thread != top_thread) {
IncrementScheduledCount(top_thread);
IncrementScheduledCount(next_thread);
}
}
/* While we have a suggested thread, try to migrate it! */
{
KThread *suggested = priority_queue.GetSuggestedFront(core_id, priority);
while (suggested != nullptr) {
/* Check if the suggested thread is the top thread on its core. */
const s32 suggested_core = suggested->GetActiveCore();
if (KThread *top_on_suggested_core = (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core) : nullptr; top_on_suggested_core != suggested) {
/* If the next thread is a new thread that has been waiting longer than our suggestion, we prefer it to our suggestion. */
if (top_thread != next_thread && next_thread != nullptr && next_thread->GetLastScheduledTick() < suggested->GetLastScheduledTick()) {
suggested = nullptr;
break;
}
/* If we're allowed to do a migration, do one. */
/* NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the suggestion to the front of the queue. */
if (top_on_suggested_core == nullptr || top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) {
suggested->SetActiveCore(core_id);
priority_queue.ChangeCore(suggested_core, suggested, true);
IncrementScheduledCount(suggested);
break;
}
}
/* Get the next suggestion. */
suggested = priority_queue.GetSamePriorityNext(core_id, suggested);
}
}
/* Now that we might have migrated a thread with the same priority, check if we can do better. */
{
KThread *best_thread = priority_queue.GetScheduledFront(core_id);
if (best_thread == GetCurrentThreadPointer()) {
best_thread = priority_queue.GetScheduledNext(core_id, best_thread);
}
/* If the best thread we can choose has a priority the same or worse than ours, try to migrate a higher priority thread. */
if (best_thread != nullptr && best_thread->GetPriority() >= priority) {
KThread *suggested = priority_queue.GetSuggestedFront(core_id);
while (suggested != nullptr) {
/* If the suggestion's priority is the same as ours, don't bother. */
if (suggested->GetPriority() >= best_thread->GetPriority()) {
break;
}
/* Check if the suggested thread is the top thread on its core. */
const s32 suggested_core = suggested->GetActiveCore();
if (KThread *top_on_suggested_core = (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core) : nullptr; top_on_suggested_core != suggested) {
/* If we're allowed to do a migration, do one. */
/* NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the suggestion to the front of the queue. */
if (top_on_suggested_core == nullptr || top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) {
suggested->SetActiveCore(core_id);
priority_queue.ChangeCore(suggested_core, suggested, true);
IncrementScheduledCount(suggested);
break;
}
}
/* Get the next suggestion. */
suggested = priority_queue.GetScheduledNext(core_id, suggested);
}
}
}
/* After a rotation, we need a scheduler update. */
SetSchedulerUpdateNeeded();
}
}

View file

@ -17,6 +17,24 @@
namespace ams::kern {
namespace {
void CleanupKernelStack(uintptr_t stack_top) {
const uintptr_t stack_bottom = stack_top - PageSize;
KPhysicalAddress stack_paddr = Null<KPhysicalAddress>;
/* TODO: MESOSPHERE_ABORT_UNLESS(Kernel::GetSupervisorPageTable().GetPhysicalAddress(&stack_paddr, stack_bottom)); */
(void)stack_bottom;
/* TODO: MESOSPHERE_R_ABORT_UNLESS(Kernel::GetSupervisorPageTable().Unmap(...) */
(void)stack_paddr;
/* Free the stack page. */
KPageBuffer::Free(KPageBuffer::FromPhysicalAddress(stack_paddr));
}
}
Result KThread::Initialize(KThreadFunction func, uintptr_t arg, void *kern_stack_top, KProcessAddress user_stack_top, s32 prio, s32 core, KProcess *owner, ThreadType type) {
/* Assert parameters are valid. */
MESOSPHERE_ASSERT_THIS();
@ -174,6 +192,32 @@ namespace ams::kern {
return ResultSuccess();
}
Result KThread::InitializeThread(KThread *thread, KThreadFunction func, uintptr_t arg, KProcessAddress user_stack_top, s32 prio, s32 core, KProcess *owner, ThreadType type) {
/* Get stack region for the thread. */
const auto &stack_region = KMemoryLayout::GetKernelStackRegion();
/* Allocate a page to use as the thread. */
KPageBuffer *page = KPageBuffer::Allocate();
R_UNLESS(page != nullptr, svc::ResultOutOfResource());
/* Map the stack page. */
KProcessAddress stack_top = Null<KProcessAddress>;
{
auto page_guard = SCOPE_GUARD { KPageBuffer::Free(page); };
/* TODO: R_TRY(Kernel::GetSupervisorPageTable().Map); ... */
(void)(stack_region);
page_guard.Cancel();
}
/* Initialize the thread. */
auto map_guard = SCOPE_GUARD { CleanupKernelStack(GetInteger(stack_top)); };
R_TRY(thread->Initialize(func, arg, GetVoidPointer(stack_top), user_stack_top, prio, core, owner, type));
map_guard.Cancel();
return ResultSuccess();
}
void KThread::PostDestroy(uintptr_t arg) {
KProcess *owner = reinterpret_cast<KProcess *>(arg & ~1ul);
const bool resource_limit_release_hint = (arg & 1);
@ -202,6 +246,100 @@ namespace ams::kern {
/* TODO */
}
Result KThread::SetPriorityToIdle() {
MESOSPHERE_ASSERT_THIS();
/* Change both our priorities to the idle thread priority. */
const s32 old_priority = this->priority;
this->priority = IdleThreadPriority;
this->base_priority = IdleThreadPriority;
KScheduler::OnThreadPriorityChanged(this, old_priority);
return ResultSuccess();
}
void KThread::RequestSuspend(SuspendType type) {
MESOSPHERE_ASSERT_THIS();
KScopedSchedulerLock lk;
/* Note the request in our flags. */
this->suspend_request_flags |= (1 << (ThreadState_SuspendShift + type));
/* Try to perform the suspend. */
this->TrySuspend();
}
void KThread::TrySuspend() {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
MESOSPHERE_ASSERT(this->IsSuspended());
/* Ensure that we have no waiters. */
if (this->GetNumKernelWaiters() > 0) {
return;
}
MESOSPHERE_ABORT_UNLESS(this->GetNumKernelWaiters() == 0);
/* Perform the suspend. */
this->Suspend();
}
void KThread::Suspend() {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
MESOSPHERE_ASSERT(this->IsSuspended());
/* Set our suspend flags in state. */
const auto old_state = this->thread_state;
this->thread_state = static_cast<ThreadState>(this->GetSuspendFlags() | (old_state & ThreadState_Mask));
/* Note the state change in scheduler. */
KScheduler::OnThreadStateChanged(this, old_state);
}
Result KThread::Run() {
MESOSPHERE_ASSERT_THIS();
/* If the kernel hasn't finished initializing, then we should suspend. */
if (Kernel::GetState() != Kernel::State::Initialized) {
this->RequestSuspend(SuspendType_Init);
}
while (true) {
KScopedSchedulerLock lk;
/* If either this thread or the current thread are requesting termination, note it. */
R_UNLESS(!this->IsTerminationRequested(), svc::ResultTerminationRequested());
R_UNLESS(!GetCurrentThread().IsTerminationRequested(), svc::ResultTerminationRequested());
/* Ensure our thread state is correct. */
R_UNLESS(this->GetState() == ThreadState_Initialized, svc::ResultInvalidState());
/* If the current thread has been asked to suspend, suspend it and retry. */
if (GetCurrentThread().IsSuspended()) {
GetCurrentThread().Suspend();
continue;
}
/* If we're not a kernel thread and we've been asked to suspend, suspend ourselves. */
if (this->IsUserThread() && this->IsSuspended()) {
this->Suspend();
}
/* Set our state and finish. */
this->SetState(KThread::ThreadState_Runnable);
return ResultSuccess();
}
}
void KThread::Exit() {
MESOSPHERE_ASSERT_THIS();
/* TODO */
MESOSPHERE_PANIC("KThread::Exit() would return");
}
void KThread::SetState(ThreadState state) {
MESOSPHERE_ASSERT_THIS();

View file

@ -17,6 +17,21 @@
namespace ams::kern {
namespace {
template<typename F>
ALWAYS_INLINE void DoOnEachCoreInOrder(s32 core_id, F f) {
cpu::SynchronizeAllCores();
for (size_t i = 0; i < cpu::NumCores; i++) {
if (static_cast<s32>(i) == core_id) {
f();
}
cpu::SynchronizeAllCores();
}
}
}
NORETURN void HorizonKernelMain(s32 core_id) {
/* Setup the Core Local Region, and note that we're initializing. */
Kernel::InitializeCoreLocalRegion(core_id);
@ -26,13 +41,9 @@ namespace ams::kern {
cpu::SynchronizeAllCores();
/* Initialize the main and idle thread for each core. */
/* Synchronize after each init to ensure the cores go in order. */
for (size_t i = 0; i < cpu::NumCores; i++) {
if (static_cast<s32>(i) == core_id) {
Kernel::InitializeMainAndIdleThreads(core_id);
}
cpu::SynchronizeAllCores();
}
DoOnEachCoreInOrder(core_id, [=]() ALWAYS_INLINE_LAMBDA {
Kernel::InitializeMainAndIdleThreads(core_id);
});
if (core_id == 0) {
/* Initialize KSystemControl. */
@ -58,8 +69,68 @@ namespace ams::kern {
}
}
/* TODO: Implement more of Main() */
/* Initialize the supervisor page table for each core. */
DoOnEachCoreInOrder(core_id, [=]() ALWAYS_INLINE_LAMBDA {
/* TODO: KPageTable::Initialize(); */
/* TODO: Kernel::GetSupervisorPageTable().Initialize(); */
});
/* Set ttbr0 for each core. */
DoOnEachCoreInOrder(core_id, [=]() ALWAYS_INLINE_LAMBDA {
/* TODO: SetTtbr0(); */
});
/* NOTE: Kernel calls on each core a nullsub here on retail kernel. */
/* Register the main/idle threads and initialize the interrupt task manager. */
DoOnEachCoreInOrder(core_id, [=]() ALWAYS_INLINE_LAMBDA {
KThread::Register(std::addressof(Kernel::GetMainThread(core_id)));
KThread::Register(std::addressof(Kernel::GetIdleThread(core_id)));
/* TODO: Kernel::GetInterruptTaskManager().Initialize(); */
});
/* Activate the scheduler and enable interrupts. */
DoOnEachCoreInOrder(core_id, [=]() ALWAYS_INLINE_LAMBDA {
Kernel::GetScheduler().Activate();
KInterruptManager::EnableInterrupts();
});
/* Initialize cpu interrupt threads. */
/* TODO cpu::InitializeInterruptThreads(core_id); */
/* Initialize the DPC manager. */
KDpcManager::Initialize();
cpu::SynchronizeAllCores();
/* Perform more core-0 specific initialization. */
if (core_id == 0) {
/* TODO: Initialize KWorkerThreadManager */
/* TODO: KSystemControl::InitializeSleepManagerAndAppletSecureMemory(); */
/* TODO: KDeviceAddressSpace::Initialize(); */
/* TODO: CreateAndRunInitialProcesses(); */
/* We're done initializing! */
Kernel::SetState(Kernel::State::Initialized);
/* TODO: KThread::ResumeThreadsSuspendedForInit(); */
}
cpu::SynchronizeAllCores();
/* Set the current thread priority to idle. */
GetCurrentThread().SetPriorityToIdle();
/* Exit the main thread. */
{
auto &main_thread = Kernel::GetMainThread(core_id);
main_thread.Open();
main_thread.Exit();
}
/* Main() is done, and we should never get to this point. */
MESOSPHERE_PANIC("Main Thread continued after exit.");
while (true) { /* ... */ }
}

View file

@ -29,7 +29,7 @@ namespace ams::svc {
R_DEFINE_ERROR_RESULT(NoSynchronizationObject, 57);
R_DEFINE_ERROR_RESULT(ThreadTerminating, 59);
R_DEFINE_ERROR_RESULT(TerminationRequested, 59);
R_DEFINE_ERROR_RESULT(NoEvent, 70);