kern: implement much of KScheduler, KHardwareTimer

This commit is contained in:
Michael Scire 2020-02-05 13:02:35 -08:00
parent 5e4307046a
commit 62de3322ff
19 changed files with 972 additions and 72 deletions

View file

@ -83,4 +83,8 @@ namespace ams::kern::arm64::cpu {
SetTpidrEl1(value); SetTpidrEl1(value);
} }
ALWAYS_INLINE void SwitchThreadLocalRegion(uintptr_t tlr) {
cpu::SetTpidrRoEl0(tlr);
}
} }

View file

@ -52,6 +52,8 @@ namespace ams::kern::arm64::cpu {
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(OslarEl1, oslar_el1) MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(OslarEl1, oslar_el1)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(TpidrRoEl0, tpidrro_el0)
#define FOR_I_IN_0_TO_15(HANDLER, ...) \ #define FOR_I_IN_0_TO_15(HANDLER, ...) \
HANDLER(0, ## __VA_ARGS__) HANDLER(1, ## __VA_ARGS__) HANDLER(2, ## __VA_ARGS__) HANDLER(3, ## __VA_ARGS__) \ HANDLER(0, ## __VA_ARGS__) HANDLER(1, ## __VA_ARGS__) HANDLER(2, ## __VA_ARGS__) HANDLER(3, ## __VA_ARGS__) \
HANDLER(4, ## __VA_ARGS__) HANDLER(5, ## __VA_ARGS__) HANDLER(6, ## __VA_ARGS__) HANDLER(7, ## __VA_ARGS__) \ HANDLER(4, ## __VA_ARGS__) HANDLER(5, ## __VA_ARGS__) HANDLER(6, ## __VA_ARGS__) HANDLER(7, ## __VA_ARGS__) \
@ -250,6 +252,55 @@ namespace ams::kern::arm64::cpu {
} }
}; };
/* Accessors for timer registers. */
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(CounterTimerKernelControl) {
public:
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(CounterTimerKernelControl, cntkctl_el1)
constexpr ALWAYS_INLINE decltype(auto) SetEl0PctEn(bool en) {
this->SetBit(0, en);
return *this;
}
};
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(CounterTimerPhysicalTimerControl) {
public:
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(CounterTimerPhysicalTimerControl, cntp_ctl_el0)
constexpr ALWAYS_INLINE decltype(auto) SetEnable(bool en) {
this->SetBit(0, en);
return *this;
}
constexpr ALWAYS_INLINE decltype(auto) SetIMask(bool en) {
this->SetBit(1, en);
return *this;
}
};
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(CounterTimerPhysicalTimerCompareValue) {
public:
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(CounterTimerPhysicalTimerCompareValue, cntp_cval_el0)
constexpr ALWAYS_INLINE u64 GetCompareValue() {
return this->GetValue();
}
constexpr ALWAYS_INLINE decltype(auto) SetCompareValue(u64 value) {
this->SetBits(0, BITSIZEOF(value), value);
return *this;
}
};
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(CounterTimerPhysicalCountValue) {
public:
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(CounterTimerPhysicalCountValue, cntpct_el0)
constexpr ALWAYS_INLINE u64 GetCount() {
return this->GetValue();
}
};
/* Accessors for cache registers. */ /* Accessors for cache registers. */
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(CacheLineId) { MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(CacheLineId) {
public: public:

View file

@ -19,15 +19,68 @@
namespace ams::kern::arm64 { namespace ams::kern::arm64 {
namespace impl {
class KHardwareTimerInterruptTask;
}
class KHardwareTimer : public KHardwareTimerBase { class KHardwareTimer : public KHardwareTimerBase {
public: public:
static constexpr s32 InterruptId = 30; /* Nintendo uses the non-secure timer interrupt. */ static constexpr s32 InterruptId = 30; /* Nintendo uses the non-secure timer interrupt. */
public: public:
constexpr KHardwareTimer() : KHardwareTimerBase() { /* ... */ } constexpr KHardwareTimer() : KHardwareTimerBase() { /* ... */ }
public:
/* Public API. */
NOINLINE void Initialize(s32 core_id);
NOINLINE void Finalize();
virtual void DoTask() override; static s64 GetTick() {
return GetCount();
}
private:
friend class impl::KHardwareTimerInterruptTask;
NOINLINE void DoInterruptTask();
private:
/* Hardware register accessors. */
static ALWAYS_INLINE void InitializeGlobalTimer() {
/* Set kernel control. */
cpu::CounterTimerKernelControlRegisterAccessor(0).SetEl0PctEn(true).Store();
/* Disable the physical timer. */
cpu::CounterTimerPhysicalTimerControlRegisterAccessor(0).SetEnable(false).SetIMask(false).Store();
/* Set the compare value to the maximum. */
cpu::CounterTimerPhysicalTimerCompareValueRegisterAccessor(0).SetCompareValue(std::numeric_limits<u64>::max()).Store();
/* Enable the physical timer, with interrupt masked. */
cpu::CounterTimerPhysicalTimerControlRegisterAccessor(0).SetEnable(true).SetIMask(true).Store();
}
static ALWAYS_INLINE void EnableInterrupt() {
cpu::CounterTimerPhysicalTimerControlRegisterAccessor(0).SetEnable(true).SetIMask(false).Store();
}
static ALWAYS_INLINE void DisableInterrupt() {
cpu::CounterTimerPhysicalTimerControlRegisterAccessor(0).SetEnable(true).SetIMask(true).Store();
}
static ALWAYS_INLINE void StopTimer() {
/* Set the compare value to the maximum. */
cpu::CounterTimerPhysicalTimerCompareValueRegisterAccessor(0).SetCompareValue(std::numeric_limits<u64>::max()).Store();
/* Disable the physical timer. */
cpu::CounterTimerPhysicalTimerControlRegisterAccessor(0).SetEnable(false).SetIMask(false).Store();
}
static ALWAYS_INLINE s64 GetCount() {
return cpu::CounterTimerPhysicalCountValueRegisterAccessor().GetCount();
}
static ALWAYS_INLINE void SetCompareValue(s64 value) {
cpu::CounterTimerPhysicalTimerCompareValueRegisterAccessor(0).SetCompareValue(static_cast<u64>(value)).Store();
}
/* TODO: Actually implement more of KHardwareTimer, */
}; };
} }

View file

@ -51,6 +51,8 @@ namespace ams::kern::arm64 {
public: public:
KInterruptManager() : local_state_saved(false) { /* Leave things mostly uninitalized. We'll call ::Initialize() later. */ } KInterruptManager() : local_state_saved(false) { /* Leave things mostly uninitalized. We'll call ::Initialize() later. */ }
/* TODO: Actually implement KInterruptManager functionality. */ /* TODO: Actually implement KInterruptManager functionality. */
NOINLINE void Initialize(s32 core_id);
NOINLINE void Finalize(s32 core_id);
public: public:
static ALWAYS_INLINE u32 DisableInterrupts() { static ALWAYS_INLINE u32 DisableInterrupts() {
u64 intr_state; u64 intr_state;
@ -62,14 +64,14 @@ namespace ams::kern::arm64 {
static ALWAYS_INLINE u32 EnableInterrupts() { static ALWAYS_INLINE u32 EnableInterrupts() {
u64 intr_state; u64 intr_state;
__asm__ __volatile__("mrs %[intr_state], daif" : [intr_state]"=r"(intr_state)); __asm__ __volatile__("mrs %[intr_state], daif" : [intr_state]"=r"(intr_state));
__asm__ __volatile__("msr daif, %[intr_state]" :: [intr_state]"r"(intr_state & 0x7F)); __asm__ __volatile__("msr daif, %[intr_state]" :: [intr_state]"r"(intr_state & ~0x80ul));
return intr_state; return intr_state;
} }
static ALWAYS_INLINE void RestoreInterrupts(u32 intr_state) { static ALWAYS_INLINE void RestoreInterrupts(u32 intr_state) {
u64 cur_state; u64 cur_state;
__asm__ __volatile__("mrs %[cur_state], daif" : [cur_state]"=r"(cur_state)); __asm__ __volatile__("mrs %[cur_state], daif" : [cur_state]"=r"(cur_state));
__asm__ __volatile__("msr daif, %[intr_state]" :: [intr_state]"r"((cur_state & 0x7F) | (intr_state & 0x80))); __asm__ __volatile__("msr daif, %[intr_state]" :: [intr_state]"r"((cur_state & ~0x80ul) | (intr_state & 0x80)));
} }
static ALWAYS_INLINE bool AreInterruptsEnabled() { static ALWAYS_INLINE bool AreInterruptsEnabled() {

View file

@ -24,8 +24,8 @@ namespace ams::kern {
class KInterruptTaskManager; class KInterruptTaskManager;
struct KCurrentContext { struct KCurrentContext {
KThread *current_thread; std::atomic<KThread *> current_thread;
KProcess *current_process; std::atomic<KProcess *> current_process;
KScheduler *scheduler; KScheduler *scheduler;
KInterruptTaskManager *interrupt_task_manager; KInterruptTaskManager *interrupt_task_manager;
s32 core_id; s32 core_id;
@ -43,7 +43,7 @@ namespace ams::kern {
} }
ALWAYS_INLINE KThread *GetCurrentThreadPointer() { ALWAYS_INLINE KThread *GetCurrentThreadPointer() {
return impl::GetCurrentContext().current_thread; return impl::GetCurrentContext().current_thread.load(std::memory_order_relaxed);
} }
ALWAYS_INLINE KThread &GetCurrentThread() { ALWAYS_INLINE KThread &GetCurrentThread() {
@ -51,7 +51,7 @@ namespace ams::kern {
} }
ALWAYS_INLINE KProcess *GetCurrentProcessPointer() { ALWAYS_INLINE KProcess *GetCurrentProcessPointer() {
return impl::GetCurrentContext().current_process; return impl::GetCurrentContext().current_process.load(std::memory_order_relaxed);
} }
ALWAYS_INLINE KProcess &GetCurrentProcess() { ALWAYS_INLINE KProcess &GetCurrentProcess() {

View file

@ -15,13 +15,12 @@
*/ */
#pragma once #pragma once
#include <mesosphere/kern_k_spin_lock.hpp> #include <mesosphere/kern_k_spin_lock.hpp>
#include <mesosphere/kern_k_interrupt_task.hpp>
#include <mesosphere/kern_k_timer_task.hpp> #include <mesosphere/kern_k_timer_task.hpp>
#include <mesosphere/kern_select_interrupt_manager.hpp> #include <mesosphere/kern_select_interrupt_manager.hpp>
namespace ams::kern { namespace ams::kern {
class KHardwareTimerBase : public KInterruptTask { class KHardwareTimerBase {
private: private:
using TimerTaskTree = util::IntrusiveRedBlackTreeBaseTraits<KTimerTask>::TreeType<KTimerTask>; using TimerTaskTree = util::IntrusiveRedBlackTreeBaseTraits<KTimerTask>::TreeType<KTimerTask>;
private: private:
@ -29,13 +28,68 @@ namespace ams::kern {
TimerTaskTree task_tree; TimerTaskTree task_tree;
KTimerTask *next_task; KTimerTask *next_task;
public: public:
constexpr KHardwareTimerBase() : lock(), task_tree(), next_task(nullptr) { /* ... */ } constexpr ALWAYS_INLINE KHardwareTimerBase() : lock(), task_tree(), next_task(nullptr) { /* ... */ }
private:
ALWAYS_INLINE void RemoveTaskFromTree(KTimerTask *task) {
/* Erase from the tree. */
auto it = this->task_tree.erase(this->task_tree.iterator_to(*task));
virtual KInterruptTask *OnInterrupt(s32 interrupt_id) override { return this; } /* Clear the task's scheduled time. */
task->SetTime(0);
/* Update our next task if relevant. */
if (this->next_task == task) {
this->next_task = (it != this->task_tree.end()) ? std::addressof(*it) : nullptr;
}
}
public:
NOINLINE void CancelTask(KTimerTask *task) {
KScopedDisableDispatch dd;
KScopedSpinLock lk(this->lock);
if (const s64 task_time = task->GetTime(); task_time > 0) {
this->RemoveTaskFromTree(task);
}
}
protected: protected:
KSpinLock &GetLock() { return this->lock; } ALWAYS_INLINE KSpinLock &GetLock() { return this->lock; }
/* TODO: Actually implement more of KHardwareTimerBase */ ALWAYS_INLINE s64 DoInterruptTaskImpl(s64 cur_time) {
/* We want to handle all tasks, returning the next time that a task is scheduled. */
while (true) {
/* Get the next task. If there isn't one, return 0. */
KTimerTask *task = this->next_task;
if (task == nullptr) {
return 0;
}
/* If the task needs to be done in the future, do it in the future and not now. */
if (const s64 task_time = task->GetTime(); task_time > cur_time) {
return task_time;
}
/* Remove the task from the tree of tasks, and update our next task. */
this->RemoveTaskFromTree(task);
/* Handle the task. */
task->OnTimer();
}
}
ALWAYS_INLINE bool RegisterAbsoluteTaskImpl(KTimerTask *task, s64 task_time) {
MESOSPHERE_ASSERT(task_time > 0);
/* Set the task's time, and insert it into our tree. */
task->SetTime(task_time);
this->task_tree.insert(*task);
/* Update our next task if relevant. */
if (this->next_task != nullptr && this->next_task->GetTime() <= task_time) {
return false;
}
this->next_task = task;
return true;
}
}; };
} }

View file

@ -30,11 +30,11 @@ namespace ams::kern {
public: public:
constexpr ALWAYS_INLINE KInterruptTask() : next_task(nullptr) { /* ... */ } constexpr ALWAYS_INLINE KInterruptTask() : next_task(nullptr) { /* ... */ }
ALWAYS_INLINE KInterruptTask *GetNextTask() const { constexpr ALWAYS_INLINE KInterruptTask *GetNextTask() const {
return this->next_task; return this->next_task;
} }
ALWAYS_INLINE void SetNextTask(KInterruptTask *t) { constexpr ALWAYS_INLINE void SetNextTask(KInterruptTask *t) {
this->next_task = t; this->next_task = t;
} }

View file

@ -87,7 +87,7 @@ namespace ams::kern {
/* Get the entry associated with the end of the queue. */ /* Get the entry associated with the end of the queue. */
Member *tail = this->root[core].GetPrev(); Member *tail = this->root[core].GetPrev();
Entry &tail_entry = (tail != nullptr) ? tail.GetPriorityQueueEntry(core) : this->root[core]; Entry &tail_entry = (tail != nullptr) ? tail->GetPriorityQueueEntry(core) : this->root[core];
/* Link the entries. */ /* Link the entries. */
member_entry.SetPrev(tail); member_entry.SetPrev(tail);
@ -104,12 +104,12 @@ namespace ams::kern {
/* Get the entry associated with the front of the queue. */ /* Get the entry associated with the front of the queue. */
Member *head = this->root[core].GetNext(); Member *head = this->root[core].GetNext();
Entry &head_entry = (head != nullptr) ? head.GetPriorityQueueEntry(core) : this->root[core]; Entry &head_entry = (head != nullptr) ? head->GetPriorityQueueEntry(core) : this->root[core];
/* Link the entries. */ /* Link the entries. */
member_entry.SetPrev(nullptr); member_entry.SetPrev(nullptr);
member_entry.SetNext(head); member_entry.SetNext(head);
head.SetPrev(member); head_entry.SetPrev(member);
this->root[core].SetNext(member); this->root[core].SetNext(member);
return (head == nullptr); return (head == nullptr);
@ -122,14 +122,14 @@ namespace ams::kern {
/* Get the entries associated with next and prev. */ /* Get the entries associated with next and prev. */
Member *prev = member_entry.GetPrev(); Member *prev = member_entry.GetPrev();
Member *next = member_entry.GetNext(); Member *next = member_entry.GetNext();
Entry &prev_entry = (prev != nullptr) ? prev.GetPriorityQueueEntry(core) : this->root[core]; Entry &prev_entry = (prev != nullptr) ? prev->GetPriorityQueueEntry(core) : this->root[core];
Entry &next_entry = (next != nullptr) ? next.GetPriorityQueueEntry(core) : this->root[core]; Entry &next_entry = (next != nullptr) ? next->GetPriorityQueueEntry(core) : this->root[core];
/* Unlink. */ /* Unlink. */
prev_entry.SetNext(next); prev_entry.SetNext(next);
next_entry.SetPrev(prev); next_entry.SetPrev(prev);
return (this->root[core].next == nullptr); return (this->GetFront(core) == nullptr);
} }
constexpr ALWAYS_INLINE Member *GetFront(s32 core) const { constexpr ALWAYS_INLINE Member *GetFront(s32 core) const {
@ -172,7 +172,7 @@ namespace ams::kern {
if (AMS_LIKELY(priority <= LowestPriority)) { if (AMS_LIKELY(priority <= LowestPriority)) {
if (this->queues[priority].Remove(core, member)) { if (this->queues[priority].Remove(core, member)) {
this->available_priorities.ClearBit(priority); this->available_priorities[core].ClearBit(priority);
} }
} }
} }
@ -245,7 +245,7 @@ namespace ams::kern {
constexpr ALWAYS_INLINE s32 GetNextCore(u64 &affinity) { constexpr ALWAYS_INLINE s32 GetNextCore(u64 &affinity) {
const s32 core = __builtin_ctzll(static_cast<unsigned long long>(affinity)); const s32 core = __builtin_ctzll(static_cast<unsigned long long>(affinity));
ClearAffinityBit(core); ClearAffinityBit(affinity, core);
return core; return core;
} }
@ -331,11 +331,11 @@ namespace ams::kern {
/* Mutators. */ /* Mutators. */
constexpr ALWAYS_INLINE void PushBack(Member *member) { constexpr ALWAYS_INLINE void PushBack(Member *member) {
this->PushBack(member, member->GetPriority()); this->PushBack(member->GetPriority(), member);
} }
constexpr ALWAYS_INLINE void Remove(Member *member) { constexpr ALWAYS_INLINE void Remove(Member *member) {
this->Remove(member, member->GetPriority()); this->Remove(member->GetPriority(), member);
} }
constexpr ALWAYS_INLINE void MoveToScheduledFront(Member *member) { constexpr ALWAYS_INLINE void MoveToScheduledFront(Member *member) {
@ -381,7 +381,7 @@ namespace ams::kern {
/* And add the member to all queues it should be in now. */ /* And add the member to all queues it should be in now. */
for (s32 core = 0; core < static_cast<s32>(NumCores); core++) { for (s32 core = 0; core < static_cast<s32>(NumCores); core++) {
if (prev_affinity.GetAffinity(core)) { if (new_affinity.GetAffinity(core)) {
if (core == new_core) { if (core == new_core) {
this->scheduled_queue.PushBack(priority, core, member); this->scheduled_queue.PushBack(priority, core, member);
} else { } else {

View file

@ -31,6 +31,8 @@ namespace ams::kern {
constexpr ALWAYS_INLINE u64 GetPriorityMask() const { /* TODO */ return 0; } constexpr ALWAYS_INLINE u64 GetPriorityMask() const { /* TODO */ return 0; }
constexpr ALWAYS_INLINE bool Is64Bit() const { /* TODO */ return true; } constexpr ALWAYS_INLINE bool Is64Bit() const { /* TODO */ return true; }
ALWAYS_INLINE KThread *GetSuggestedTopThread(s32 core_id) { /* TODO */ return nullptr; }
}; };
} }

View file

@ -34,6 +34,10 @@ namespace ams::kern {
public: public:
using LockType = KAbstractSchedulerLock<KScheduler>; using LockType = KAbstractSchedulerLock<KScheduler>;
static constexpr s32 HighestCoreMigrationAllowedPriority = 2;
static_assert(ams::svc::LowestThreadPriority >= HighestCoreMigrationAllowedPriority);
static_assert(ams::svc::HighestThreadPriority <= HighestCoreMigrationAllowedPriority);
struct SchedulingState { struct SchedulingState {
std::atomic<bool> needs_scheduling; std::atomic<bool> needs_scheduling;
bool interrupt_task_thread_runnable; bool interrupt_task_thread_runnable;
@ -44,27 +48,112 @@ namespace ams::kern {
}; };
private: private:
friend class KScopedSchedulerLock; friend class KScopedSchedulerLock;
static inline bool s_scheduler_update_needed;
static inline LockType s_scheduler_lock; static inline LockType s_scheduler_lock;
static inline KSchedulerPriorityQueue s_priority_queue;
private: private:
SchedulingState state; SchedulingState state;
bool is_active; bool is_active;
s32 core_id; s32 core_id;
KThread *prev_thread; KThread *prev_thread;
u64 last_context_switch_time; s64 last_context_switch_time;
KThread *idle_thread; KThread *idle_thread;
public: public:
KScheduler(); constexpr KScheduler()
/* TODO: Actually implement KScheduler. This is a placeholder. */ : state(), is_active(false), core_id(0), prev_thread(nullptr), last_context_switch_time(0), idle_thread(nullptr)
{
this->state.needs_scheduling = true;
this->state.interrupt_task_thread_runnable = false;
this->state.should_count_idle = false;
this->state.idle_count = 0;
this->state.idle_thread_stack = nullptr;
this->state.highest_priority_thread = nullptr;
}
NOINLINE void Initialize(KThread *idle_thread);
NOINLINE void Activate();
private:
/* Static private API. */
static ALWAYS_INLINE bool IsSchedulerUpdateNeeded() { return s_scheduler_update_needed; }
static ALWAYS_INLINE void SetSchedulerUpdateNeeded() { s_scheduler_update_needed = true; }
static ALWAYS_INLINE void ClearSchedulerUpdateNeeded() { s_scheduler_update_needed = false; }
static ALWAYS_INLINE KSchedulerPriorityQueue &GetPriorityQueue() { return s_priority_queue; }
static NOINLINE void SetInterruptTaskThreadRunnable();
static NOINLINE u64 UpdateHighestPriorityThreadsImpl();
public: public:
/* API used by KSchedulerLock */ /* Static public API. */
static void DisableScheduling(); static ALWAYS_INLINE bool CanSchedule() { return GetCurrentThread().GetDisableDispatchCount() == 0; }
static void EnableScheduling(); static ALWAYS_INLINE bool IsSchedulerLockedByCurrentThread() { return s_scheduler_lock.IsLockedByCurrentThread(); }
static u64 UpdateHighestPriorityThreads();
static void EnableSchedulingAndSchedule(u64 cores_needing_scheduling); static ALWAYS_INLINE void DisableScheduling() {
MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() >= 0);
GetCurrentThread().DisableDispatch();
}
static NOINLINE void EnableScheduling(u64 cores_needing_scheduling) {
MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() >= 1);
if (GetCurrentThread().GetDisableDispatchCount() > 1) {
GetCurrentThread().EnableDispatch();
} else {
GetCurrentScheduler().RescheduleOtherCores(cores_needing_scheduling);
GetCurrentScheduler().RescheduleCurrentCore();
}
}
static ALWAYS_INLINE u64 UpdateHighestPriorityThreads() {
if (IsSchedulerUpdateNeeded()) {
return UpdateHighestPriorityThreadsImpl();
} else {
return 0;
}
}
static NOINLINE void OnThreadStateChanged(KThread *thread, KThread::ThreadState old_state);
static NOINLINE void OnThreadPriorityChanged(KThread *thread, s32 old_priority);
static NOINLINE void OnThreadAffinityMaskChanged(KThread *thread, const KAffinityMask &old_affinity, s32 old_core);
/* TODO: Yield operations */
private:
/* Instanced private API. */
void ScheduleImpl();
void SwitchThread(KThread *next_thread);
ALWAYS_INLINE void Schedule() {
MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() == 1);
MESOSPHERE_ASSERT(this->core_id == GetCurrentCoreId());
this->ScheduleImpl();
}
ALWAYS_INLINE void RescheduleOtherCores(u64 cores_needing_scheduling) {
if (const u64 core_mask = cores_needing_scheduling & ~(1ul << this->core_id); core_mask != 0) {
cpu::DataSynchronizationBarrier();
/* TODO: Send scheduler interrupt. */
}
}
ALWAYS_INLINE void RescheduleCurrentCore() {
MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() == 1);
{
/* Disable interrupts, and then context switch. */
KScopedInterruptDisable intr_disable;
ON_SCOPE_EXIT { GetCurrentThread().EnableDispatch(); };
if (this->state.needs_scheduling) {
Schedule();
}
}
}
NOINLINE u64 UpdateHighestPriorityThread(KThread *thread);
}; };
class KScopedSchedulerLock : KScopedLock<KScheduler::LockType> { class KScopedSchedulerLock : KScopedLock<KScheduler::LockType> {
explicit ALWAYS_INLINE KScopedSchedulerLock() : KScopedLock(KScheduler::s_scheduler_lock) { /* ... */ } public:
explicit ALWAYS_INLINE KScopedSchedulerLock() : KScopedLock(KScheduler::s_scheduler_lock) { /* ... */ }
ALWAYS_INLINE ~KScopedSchedulerLock() { /* ... */ }
}; };
} }

View file

@ -28,10 +28,9 @@ namespace ams::kern {
template<typename T> template<typename T>
concept KSchedulerLockable = !std::is_reference<T>::value && requires { concept KSchedulerLockable = !std::is_reference<T>::value && requires {
{ T::DisableScheduling() } -> std::same_as<void>; { T::DisableScheduling() } -> std::same_as<void>;
{ T::EnableScheduling() } -> std::same_as<void>; { T::EnableScheduling(std::declval<u64>()) } -> std::same_as<void>;
{ T::UpdateHighestPriorityThreads() } -> std::convertible_to<u64>; { T::UpdateHighestPriorityThreads() } -> std::convertible_to<u64>;
{ T::EnableSchedulingAndSchedule(std::declval<u64>()) } -> std::same_as<void>;
}; };
*/ */
@ -88,23 +87,7 @@ namespace ams::kern {
this->spin_lock.Unlock(); this->spin_lock.Unlock();
/* Enable scheduling, and perform a rescheduling operation. */ /* Enable scheduling, and perform a rescheduling operation. */
SchedulerType::EnableSchedulingAndSchedule(cores_needing_scheduling); SchedulerType::EnableScheduling(cores_needing_scheduling);
}
}
ALWAYS_INLINE void UnlockWithoutRescheduling() {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
MESOSPHERE_ASSERT(this->lock_count > 0);
/* Release an instance of the lock. */
if ((--this->lock_count) == 0) {
/* Note that we no longer hold the lock, and unlock the spinlock. */
this->owner_thread = nullptr;
this->spin_lock.Unlock();
/* Enable scheduling, and perform a rescheduling operation. */
SchedulerType::EnableScheduling();
} }
} }
}; };

View file

@ -68,8 +68,8 @@ namespace ams::kern {
}; };
enum DpcFlag : u32 { enum DpcFlag : u32 {
DpcFlag_Terminating = 0, DpcFlag_Terminating = (1 << 0),
DpcFlag_Terminated = 1, DpcFlag_Terminated = (1 << 1),
}; };
struct StackParameters { struct StackParameters {
@ -116,7 +116,7 @@ namespace ams::kern {
alignas(16) KThreadContext thread_context; alignas(16) KThreadContext thread_context;
KAffinityMask affinity_mask; KAffinityMask affinity_mask;
u64 thread_id; u64 thread_id;
std::atomic<u64> cpu_time; std::atomic<s64> cpu_time;
KSynchronizationObject *synced_object; KSynchronizationObject *synced_object;
KLightLock *waiting_lock; KLightLock *waiting_lock;
uintptr_t condvar_key; uintptr_t condvar_key;
@ -204,6 +204,32 @@ namespace ams::kern {
} }
public: public:
constexpr ALWAYS_INLINE const KAffinityMask &GetAffinityMask() const { return this->affinity_mask; } constexpr ALWAYS_INLINE const KAffinityMask &GetAffinityMask() const { return this->affinity_mask; }
constexpr ALWAYS_INLINE ThreadState GetThreadState() const { return static_cast<ThreadState>(this->thread_state & ThreadState_Mask); }
constexpr ALWAYS_INLINE ThreadState GetRawThreadState() const { return this->thread_state; }
NOINLINE void SetState(ThreadState state);
NOINLINE KThreadContext *GetContextForSchedulerLoop();
constexpr ALWAYS_INLINE s32 GetActiveCore() const { return this->core_id; }
constexpr ALWAYS_INLINE void SetActiveCore(s32 core) { this->core_id = core; }
constexpr ALWAYS_INLINE s32 GetPriority() const { return this->priority; }
constexpr ALWAYS_INLINE QueueEntry &GetPriorityQueueEntry(s32 core) { return this->per_core_priority_queue_entry[core]; }
constexpr ALWAYS_INLINE const QueueEntry &GetPriorityQueueEntry(s32 core) const { return this->per_core_priority_queue_entry[core]; }
constexpr ALWAYS_INLINE s32 GetNumKernelWaiters() const { return this->num_kernel_waiters; }
constexpr ALWAYS_INLINE s64 GetLastScheduledTick() const { return this->last_scheduled_tick; }
constexpr ALWAYS_INLINE void SetLastScheduledTick(s64 tick) { this->last_scheduled_tick = tick; }
constexpr ALWAYS_INLINE KProcess *GetOwnerProcess() const { return this->parent; }
constexpr ALWAYS_INLINE KProcessAddress GetThreadLocalRegionAddress() const { return this->tls_address; }
constexpr ALWAYS_INLINE void *GetThreadLocalRegionHeapAddress() const { return this->tls_heap_address; }
ALWAYS_INLINE void AddCpuTime(s64 amount) {
this->cpu_time += amount;
}
ALWAYS_INLINE void *GetStackTop() const { return reinterpret_cast<StackParameters *>(this->kernel_stack_top) - 1; } ALWAYS_INLINE void *GetStackTop() const { return reinterpret_cast<StackParameters *>(this->kernel_stack_top) - 1; }
ALWAYS_INLINE void *GetKernelStackTop() const { return this->kernel_stack_top; } ALWAYS_INLINE void *GetKernelStackTop() const { return this->kernel_stack_top; }
@ -218,6 +244,10 @@ namespace ams::kern {
GetStackParameters().is_in_exception_handler = true; GetStackParameters().is_in_exception_handler = true;
} }
ALWAYS_INLINE bool IsTerminationRequested() const {
return this->termination_requested || this->GetRawThreadState() == ThreadState_Terminated;
}
public: public:
/* Overridden parent functions. */ /* Overridden parent functions. */
virtual bool IsInitialized() const override { return this->initialized; } virtual bool IsInitialized() const override { return this->initialized; }

View file

@ -21,6 +21,7 @@
#include <mesosphere/kern_k_memory_manager.hpp> #include <mesosphere/kern_k_memory_manager.hpp>
#include <mesosphere/kern_k_core_local_region.hpp> #include <mesosphere/kern_k_core_local_region.hpp>
#include <mesosphere/kern_k_thread.hpp> #include <mesosphere/kern_k_thread.hpp>
#include <mesosphere/kern_select_hardware_timer.hpp>
namespace ams::kern { namespace ams::kern {
@ -35,6 +36,13 @@ namespace ams::kern {
static inline State s_state = State::Invalid; static inline State s_state = State::Invalid;
static inline KThread s_main_threads[cpu::NumCores]; static inline KThread s_main_threads[cpu::NumCores];
static inline KThread s_idle_threads[cpu::NumCores]; static inline KThread s_idle_threads[cpu::NumCores];
private:
static ALWAYS_INLINE KCoreLocalContext &GetCoreLocalContext() {
return reinterpret_cast<KCoreLocalRegion *>(cpu::GetCoreLocalRegionAddress())->current.context;
}
static ALWAYS_INLINE KCoreLocalContext &GetCoreLocalContext(s32 core_id) {
return reinterpret_cast<KCoreLocalRegion *>(cpu::GetCoreLocalRegionAddress())->absolute[core_id].context;
}
public: public:
static NOINLINE void InitializeCoreLocalRegion(s32 core_id); static NOINLINE void InitializeCoreLocalRegion(s32 core_id);
static NOINLINE void InitializeMainAndIdleThreads(s32 core_id); static NOINLINE void InitializeMainAndIdleThreads(s32 core_id);
@ -49,6 +57,26 @@ namespace ams::kern {
static ALWAYS_INLINE KThread &GetIdleThread(s32 core_id) { static ALWAYS_INLINE KThread &GetIdleThread(s32 core_id) {
return s_idle_threads[core_id]; return s_idle_threads[core_id];
} }
static ALWAYS_INLINE KScheduler &GetScheduler() {
return GetCoreLocalContext().scheduler;
}
static ALWAYS_INLINE KScheduler &GetScheduler(s32 core_id) {
return GetCoreLocalContext(core_id).scheduler;
}
static ALWAYS_INLINE KInterruptTaskManager &GetInterruptTaskManager() {
return GetCoreLocalContext().interrupt_task_manager;
}
static ALWAYS_INLINE KInterruptManager &GetInterruptManager() {
return GetCoreLocalContext().interrupt_manager;
}
static ALWAYS_INLINE KHardwareTimer &GetHardwareTimer() {
return GetCoreLocalContext().hardware_timer;
}
}; };
} }

View file

@ -17,8 +17,53 @@
namespace ams::kern::arm64 { namespace ams::kern::arm64 {
void KHardwareTimer::DoTask() { namespace impl {
/* TODO: Actually implement this. */
class KHardwareTimerInterruptTask : public KInterruptTask {
public:
constexpr KHardwareTimerInterruptTask() : KInterruptTask() { /* ... */ }
virtual KInterruptTask *OnInterrupt(s32 interrupt_id) override {
return this;
}
virtual void DoTask() override {
Kernel::GetHardwareTimer().DoInterruptTask();
}
};
/* One global hardware timer interrupt task per core. */
KHardwareTimerInterruptTask g_hardware_timer_interrupt_tasks[cpu::NumCores];
}
void KHardwareTimer::Initialize(s32 core_id) {
/* Setup the global timer for the core. */
InitializeGlobalTimer();
/* TODO: Bind the interrupt task for this core to the interrupt manager. */
}
void KHardwareTimer::Finalize() {
/* Stop the hardware timer. */
StopTimer();
}
void KHardwareTimer::DoInterruptTask() {
/* Handle the interrupt. */
{
KScopedSchedulerLock slk;
KScopedSpinLock lk(this->GetLock());
/* Disable the timer interrupt while we handle this. */
DisableInterrupt();
if (const s64 next_time = this->DoInterruptTaskImpl(GetTick()); next_time > 0) {
/* We have a next time, so we should set the time to interrupt and turn the interrupt on. */
SetCompareValue(next_time);
EnableInterrupt();
}
}
/* TODO: Clear the timer interrupt. */
} }
} }

View file

@ -0,0 +1,28 @@
/*
* Copyright (c) 2018-2020 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern::arm64 {
void KInterruptManager::Initialize(s32 core_id) {
/* TODO */
}
void KInterruptManager::Finalize(s32 core_id) {
/* TODO */
}
}

View file

@ -0,0 +1,278 @@
/*
* Copyright (c) 2018-2020 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define SAVE_THREAD_CONTEXT(ctx, tmp0, tmp1, done_label) \
/* Save the callee save registers + SP and cpacr. */ \
mov tmp0, sp; \
mrs tmp1, cpacr_el1; \
stp x19, x20, [ctx, #(8 * 0)]; \
stp x21, x22, [ctx, #(8 * 2)]; \
stp x23, x24, [ctx, #(8 * 4)]; \
stp x25, x26, [ctx, #(8 * 6)]; \
stp x27, x28, [ctx, #(8 * 8)]; \
stp x29, x30, [ctx, #(8 * 10)]; \
\
stp tmp0, tmp1, [ctx, #0x60]; \
\
/* Check whether the FPU is enabled. */ \
/* If it isn't, skip saving FPU state. */ \
and tmp1, tmp1, #0x300000; \
cbz tmp1, done_label; \
\
/* Save fpcr and fpsr. */ \
mrs tmp0, fpcr; \
mrs tmp1, fpsr; \
stp tmp0, tmp1, [ctx, #0x70]; \
\
/* Save the FPU registers. */ \
stp q0, q1, [ctx, #(16 * 0 + 0x80)]; \
stp q2, q3, [ctx, #(16 * 2 + 0x80)]; \
stp q4, q5, [ctx, #(16 * 4 + 0x80)]; \
stp q6, q7, [ctx, #(16 * 6 + 0x80)]; \
stp q8, q9, [ctx, #(16 * 8 + 0x80)]; \
stp q10, q11, [ctx, #(16 * 10 + 0x80)]; \
stp q12, q13, [ctx, #(16 * 12 + 0x80)]; \
stp q14, q15, [ctx, #(16 * 14 + 0x80)]; \
stp q16, q17, [ctx, #(16 * 16 + 0x80)]; \
stp q18, q19, [ctx, #(16 * 18 + 0x80)]; \
stp q20, q21, [ctx, #(16 * 20 + 0x80)]; \
stp q22, q23, [ctx, #(16 * 22 + 0x80)]; \
stp q24, q25, [ctx, #(16 * 24 + 0x80)]; \
stp q26, q27, [ctx, #(16 * 26 + 0x80)]; \
stp q28, q29, [ctx, #(16 * 28 + 0x80)]; \
stp q30, q31, [ctx, #(16 * 30 + 0x80)];
#define RESTORE_THREAD_CONTEXT(ctx, tmp0, tmp1, done_label) \
/* Restore the callee save registers + SP and cpacr. */ \
ldp tmp0, tmp1, [ctx, #0x60]; \
mov sp, tmp0; \
ldp x19, x20, [ctx, #(8 * 0)]; \
ldp x21, x22, [ctx, #(8 * 2)]; \
ldp x23, x24, [ctx, #(8 * 4)]; \
ldp x25, x26, [ctx, #(8 * 6)]; \
ldp x27, x28, [ctx, #(8 * 8)]; \
ldp x29, x30, [ctx, #(8 * 10)]; \
\
msr cpacr_el1, tmp1; \
isb; \
\
/* Check whether the FPU is enabled. */ \
/* If it isn't, skip saving FPU state. */ \
and tmp1, tmp1, #0x300000; \
cbz tmp1, done_label; \
\
/* Save fpcr and fpsr. */ \
ldp tmp0, tmp1, [ctx, #0x70]; \
msr fpcr, tmp0; \
msr fpsr, tmp1; \
\
/* Save the FPU registers. */ \
ldp q0, q1, [ctx, #(16 * 0 + 0x80)]; \
ldp q2, q3, [ctx, #(16 * 2 + 0x80)]; \
ldp q4, q5, [ctx, #(16 * 4 + 0x80)]; \
ldp q6, q7, [ctx, #(16 * 6 + 0x80)]; \
ldp q8, q9, [ctx, #(16 * 8 + 0x80)]; \
ldp q10, q11, [ctx, #(16 * 10 + 0x80)]; \
ldp q12, q13, [ctx, #(16 * 12 + 0x80)]; \
ldp q14, q15, [ctx, #(16 * 14 + 0x80)]; \
ldp q16, q17, [ctx, #(16 * 16 + 0x80)]; \
ldp q18, q19, [ctx, #(16 * 18 + 0x80)]; \
ldp q20, q21, [ctx, #(16 * 20 + 0x80)]; \
ldp q22, q23, [ctx, #(16 * 22 + 0x80)]; \
ldp q24, q25, [ctx, #(16 * 24 + 0x80)]; \
ldp q26, q27, [ctx, #(16 * 26 + 0x80)]; \
ldp q28, q29, [ctx, #(16 * 28 + 0x80)]; \
ldp q30, q31, [ctx, #(16 * 30 + 0x80)];
/* ams::kern::KScheduler::ScheduleImpl() */
.section .text._ZN3ams4kern10KScheduler12ScheduleImplEv, "ax", %progbits
.global _ZN3ams4kern10KScheduler12ScheduleImplEv
.type _ZN3ams4kern10KScheduler12ScheduleImplEv, %function
/* Ensure ScheduleImpl is aligned to 0x40 bytes. */
.balign 0x40
_ZN3ams4kern10KScheduler12ScheduleImplEv:
/* Right now, x0 contains (this). We want x1 to point to the scheduling state, */
/* Current KScheduler layout has state at +0x0. */
mov x1, x0
/* First thing we want to do is check whether the interrupt task thread is runnable. */
ldrb w3, [x1, #1]
cbz w3, 0f
/* If it is, we want to call KScheduler::SetInterruptTaskThreadRunnable() to note it runnable. */
stp x0, x1, [sp, #-16]!
stp x30, xzr, [sp, #-16]!
bl _ZN3ams4kern10KScheduler30SetInterruptTaskThreadRunnableEv
ldp x30, xzr, [sp], 16
ldp x0, x1, [sp], 16
/* Clear the interrupt task thread as runnable. */
strb wzr, [x1, #1]
0: /* Interrupt task thread runnable checked. */
/* Now we want to check if there's any scheduling to do. */
/* First, clear the need's scheduling bool (and dmb ish after, as it's an atomic). */
/* TODO: Should this be a stlrb? Nintendo does not do one. */
strb wzr, [x1]
dmb ish
/* Check if the highest priority thread is the same as the current thread. */
ldr x7, [x1, 16]
ldr x2, [x18]
cmp x7, x2
b.ne 1f
/* If they're the same, then we can just return as there's nothing to do. */
ret
1: /* The highest priority thread is not the same as the current thread. */
/* Get a reference to the current thread's stack parameters. */
add x2, sp, #0x1000
and x2, x2, #~(0x1000-1)
/* Check if the thread has terminated. We can do this by checking the DPC flags for DpcFlag_Terminated. */
ldurb w3, [x2, #-0x20]
tbnz w3, #1, 3f
/* The current thread hasn't terminated, so we want to save its context. */
ldur x2, [x2, #-0x10]
SAVE_THREAD_CONTEXT(x2, x4, x5, 2f)
2: /* We're done saving this thread's context, so we need to unlock it. */
/* We can just do an atomic write to the relevant KThreadContext member. */
add x2, x2, #0x280
stlrb wzr, [x2]
3: /* The current thread's context has been entirely taken care of. */
/* Now we want to loop until we successfully switch the thread context. */
/* Start by saving all the values we care about in callee-save registers. */
mov x19, x0 /* this */
mov x20, x1 /* this->state */
mov x21, x7 /* highest priority thread */
/* Set our stack to the idle thread stack. */
ldr x3, [x20, #0x18]
mov sp, x3
b 5f
4: /* We failed to successfully do the context switch, and need to retry. */
/* Clear the exclusive monitor. */
clrex
/* Clear the need's scheduling bool (and dmb ish after, as it's an atomic). */
/* TODO: Should this be a stlrb? Nintendo does not do one. */
strb wzr, [x20]
dmb ish
/* Refresh the highest priority thread. */
ldr x21, [x20, 16]
5: /* We're starting to try to do the context switch. */
/* Check if the highest priority thread if null. */
/* If it is, we want to branch to a special idle thread loop. */
cbz x21, 11f
/* Get the highest priority thread's context, and save it. */
/* ams::kern::KThread::GetContextForSchedulerLoop() */
mov x0, x21
bl _ZN3ams4kern7KThread26GetContextForSchedulerLoopEv
mov x22, x0
/* Prepare to try to acquire the context lock. */
add x1, x22, #0x280
mov w2, #1
6: /* We want to try to lock the highest priority thread's context. */
/* Check if the lock is already held. */
ldaxrb w3, [x1]
cbnz w3, 7f
/* If it's not, try to take it. */
stxrb w3, w2, [x1]
cbnz w3, 6b
/* We hold the lock, so we can now switch the thread. */
b 8f
7: /* The highest priority thread's context is already locked. */
/* Check if we need scheduling. If we don't, we can retry directly. */
ldarb w3, [x20]
cbz w3, 6b
/* If we do, another core is interfering, and we must start from the top. */
b 4b
8: /* It's time to switch the thread. */
/* Switch to the highest priority thread. */
mov x0, x19
mov x1, x21
/* Call ams::kern::KScheduler::SwitchThread(ams::kern::KThread *) */
bl _ZN3ams4kern10KScheduler12SwitchThreadEPNS0_7KThreadE
/* Check if we need scheduling. If we don't, then we can't complete the switch and should retry. */
ldarb w1, [x20]
cbnz w1, 10f
/* Restore the thread context. */
mov x0, x22
RESTORE_THREAD_CONTEXT(x0, x1, x2, 9f)
9: /* We're done restoring the thread context, and can return safely. */
ret
10: /* Our switch failed. */
/* We should unlock the thread context, and then retry. */
add x1, x22, #0x280
stlrb wzr, [x1]
b 4b
11: /* The next thread is nullptr! */
/* Switch to nullptr. This will actually switch to the idle thread. */
mov x0, x19
mov x1, #0
/* Call ams::kern::KScheduler::SwitchThread(ams::kern::KThread *) */
bl _ZN3ams4kern10KScheduler12SwitchThreadEPNS0_7KThreadE
12: /* We've switched to the idle thread, so we want to loop until we schedule a non-idle thread. */
/* Check if we need scheduling. */
ldarb w3, [x20]
cbnz w3, 13f
/* If we don't, wait for an interrupt and check again. */
wfi
msr daifclr, #2
msr daifset, #2
b 12b
13: /* We need scheduling again! */
/* Check whether the interrupt task thread needs to be set runnable. */
ldrb w3, [x20, #1]
cbz w3, 4b
/* It does, so do so. We're using the idle thread stack so no register state preserve needed. */
bl _ZN3ams4kern10KScheduler30SetInterruptTaskThreadRunnableEv
/* Clear the interrupt task thread as runnable. */
strb wzr, [x20, #1]
/* Retry the scheduling loop. */
b 4b

View file

@ -17,15 +17,261 @@
namespace ams::kern { namespace ams::kern {
KScheduler::KScheduler() namespace {
: is_active(false), core_id(0), prev_thread(nullptr), last_context_switch_time(0), idle_thread(nullptr)
{ ALWAYS_INLINE void IncrementScheduledCount(KThread *thread) {
this->state.needs_scheduling = true; if (KProcess *parent = thread->GetOwnerProcess(); parent != nullptr) {
this->state.interrupt_task_thread_runnable = false; /* TODO: parent->IncrementScheduledCount(); */
this->state.should_count_idle = false; }
this->state.idle_count = 0; }
this->state.idle_thread_stack = nullptr;
this->state.highest_priority_thread = nullptr;
} }
void KScheduler::Initialize(KThread *idle_thread) {
/* Set core ID and idle thread. */
this->core_id = GetCurrentCoreId();
this->idle_thread = idle_thread;
this->state.idle_thread_stack = this->idle_thread->GetStackTop();
/* Insert the main thread into the priority queue. */
{
KScopedSchedulerLock lk;
GetPriorityQueue().PushBack(GetCurrentThreadPointer());
SetSchedulerUpdateNeeded();
}
/* TODO: Bind interrupt handler. */
}
void KScheduler::Activate() {
MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() == 1);
this->state.should_count_idle = false /* TODO: Retrieve from KSystemControl. */;
this->is_active = true;
RescheduleCurrentCore();
}
u64 KScheduler::UpdateHighestPriorityThread(KThread *highest_thread) {
if (KThread *prev_highest_thread = this->state.highest_priority_thread; AMS_LIKELY(prev_highest_thread != highest_thread)) {
if (AMS_LIKELY(prev_highest_thread != nullptr)) {
IncrementScheduledCount(prev_highest_thread);
prev_highest_thread->SetLastScheduledTick(KHardwareTimer::GetTick());
}
if (this->state.should_count_idle) {
if (AMS_LIKELY(highest_thread != nullptr)) {
/* TODO: Set parent process's idle count if it exists. */
} else {
this->state.idle_count++;
}
}
this->state.highest_priority_thread = highest_thread;
this->state.needs_scheduling = true;
return (1ul << this->core_id);
} else {
return 0;
}
}
u64 KScheduler::UpdateHighestPriorityThreadsImpl() {
MESOSPHERE_ASSERT(IsSchedulerLockedByCurrentThread());
/* Clear that we need to update. */
ClearSchedulerUpdateNeeded();
u64 cores_needing_scheduling = 0, idle_cores = 0;
KThread *top_threads[cpu::NumCores];
auto &priority_queue = GetPriorityQueue();
/* We want to go over all cores, finding the highest priority thread and determining if scheduling is needed for that core. */
for (size_t core_id = 0; core_id < cpu::NumCores; core_id++) {
KThread *top_thread = priority_queue.GetScheduledFront(core_id);
if (top_thread != nullptr) {
/* If the thread has no waiters, we might prefer a suggestion from the owner process to it. */
if (top_thread->GetNumKernelWaiters() == 0) {
if (KProcess *parent = top_thread->GetOwnerProcess(); parent != nullptr) {
if (KThread *suggested = parent->GetSuggestedTopThread(core_id); suggested != nullptr && suggested != top_thread) {
/* We prefer our parent's suggestion whenever possible. However, we also don't want to schedule un-runnable threads. */
if (suggested->GetRawThreadState() == KThread::ThreadState_Runnable) {
top_thread = suggested;
} else {
top_thread = nullptr;
}
}
}
}
} else {
idle_cores |= (1ul << core_id);
}
top_threads[core_id] = top_thread;
cores_needing_scheduling |= Kernel::GetScheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]);
}
/* Idle cores are bad. We're going to try to migrate threads to each idle core in turn. */
while (idle_cores != 0) {
s32 core_id = __builtin_ctzll(idle_cores);
if (KThread *suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) {
s32 migration_candidates[cpu::NumCores];
size_t num_candidates = 0;
/* While we have a suggested thread, try to migrate it! */
while (suggested != nullptr) {
/* Check if the suggested thread is the top thread on its core. */
const s32 suggested_core = suggested->GetActiveCore();
if (KThread *top_thread = (suggested_core >= 0) ? top_threads[suggested_core] : nullptr; top_thread != suggested) {
/* Make sure we're not dealing with threads too high priority for migration. */
if (top_thread != nullptr && top_thread->GetPriority() < HighestCoreMigrationAllowedPriority) {
break;
}
/* The suggested thread isn't bound to its core, so we can migrate it! */
suggested->SetActiveCore(core_id);
priority_queue.ChangeCore(suggested_core, suggested);
top_threads[core_id] = suggested;
cores_needing_scheduling |= Kernel::GetScheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]);
break;
}
/* Note this core as a candidate for migration. */
MESOSPHERE_ASSERT(num_candidates < cpu::NumCores);
migration_candidates[num_candidates++] = suggested_core;
suggested = priority_queue.GetSuggestedNext(core_id, suggested);
}
/* If suggested is nullptr, we failed to migrate a specific thread. So let's try all our candidate cores' top threads. */
if (suggested == nullptr) {
for (size_t i = 0; i < num_candidates; i++) {
/* Check if there's some other thread that can run on the candidate core. */
const s32 candidate_core = migration_candidates[i];
suggested = top_threads[candidate_core];
if (KThread *next_on_candidate_core = priority_queue.GetScheduledNext(candidate_core, suggested); next_on_candidate_core != nullptr) {
/* The candidate core can run some other thread! We'll migrate its current top thread to us. */
top_threads[candidate_core] = next_on_candidate_core;
cores_needing_scheduling |= Kernel::GetScheduler(candidate_core).UpdateHighestPriorityThread(top_threads[candidate_core]);
/* Perform the migration. */
suggested->SetActiveCore(core_id);
priority_queue.ChangeCore(candidate_core, suggested);
top_threads[core_id] = suggested;
cores_needing_scheduling |= Kernel::GetScheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]);
break;
}
}
}
}
idle_cores &= ~(1ul << core_id);
}
return cores_needing_scheduling;
}
void KScheduler::SetInterruptTaskThreadRunnable() {
MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() == 1);
KThread *task_thread = nullptr /* TODO: GetInterruptTaskManager().GetThread() */;
{
KScopedSchedulerLock sl;
if (AMS_LIKELY(task_thread->GetThreadState() == KThread::ThreadState_Waiting)) {
task_thread->SetState(KThread::ThreadState_Runnable);
}
}
}
void KScheduler::SwitchThread(KThread *next_thread) {
KProcess *cur_process = GetCurrentProcessPointer();
KThread *cur_thread = GetCurrentThreadPointer();
/* We never want to schedule a null thread, so use the idle thread if we don't have a next. */
if (next_thread == nullptr) {
next_thread = this->idle_thread;
}
/* If we're not actually switching thread, there's nothing to do. */
if (next_thread == cur_thread) {
return;
}
/* Next thread is now known not to be nullptr, and must not be dispatchable. */
MESOSPHERE_ASSERT(next_thread->GetDisableDispatchCount() == 1);
/* Update the CPU time tracking variables. */
const s64 prev_tick = this->last_context_switch_time;
const s64 cur_tick = KHardwareTimer::GetTick();
const s64 tick_diff = cur_tick - prev_tick;
cur_thread->AddCpuTime(tick_diff);
if (cur_process != nullptr) {
/* TODO: cur_process->AddCpuTime(tick_diff); */
}
this->last_context_switch_time = cur_tick;
/* Update our previous thread. */
if (cur_process != nullptr) {
/* NOTE: Combining this into AMS_LIKELY(!... && ...) triggers an internal compiler error: Segmentation fault in GCC 9.2.0. */
if (AMS_LIKELY(!cur_thread->IsTerminationRequested()) && AMS_LIKELY(cur_thread->GetActiveCore() == this->core_id)) {
this->prev_thread = cur_thread;
} else {
this->prev_thread = nullptr;
}
} else if (cur_thread == this->idle_thread) {
this->prev_thread = nullptr;
}
/* Switch the current process, if we're switching processes. */
if (KProcess *next_process = next_thread->GetOwnerProcess(); next_process != cur_process) {
/* TODO: KProcess::Switch */
}
/* Set the new Thread Local region. */
cpu::SwitchThreadLocalRegion(GetInteger(next_thread->GetThreadLocalRegionAddress()));
}
void KScheduler::OnThreadStateChanged(KThread *thread, KThread::ThreadState old_state) {
MESOSPHERE_ASSERT(IsSchedulerLockedByCurrentThread());
/* Check if the state has changed, because if it hasn't there's nothing to do. */
const KThread::ThreadState cur_state = thread->GetRawThreadState();
if (cur_state == old_state) {
return;
}
/* Update the priority queues. */
if (old_state == KThread::ThreadState_Runnable) {
/* If we were previously runnable, then we're not runnable now, and we should remove. */
GetPriorityQueue().Remove(thread);
IncrementScheduledCount(thread);
SetSchedulerUpdateNeeded();
} else if (cur_state == KThread::ThreadState_Runnable) {
/* If we're now runnable, then we weren't previously, and we should add. */
GetPriorityQueue().PushBack(thread);
IncrementScheduledCount(thread);
SetSchedulerUpdateNeeded();
}
}
void KScheduler::OnThreadPriorityChanged(KThread *thread, s32 old_priority) {
MESOSPHERE_ASSERT(IsSchedulerLockedByCurrentThread());
/* If the thread is runnable, we want to change its priority in the queue. */
if (thread->GetRawThreadState() == KThread::ThreadState_Runnable) {
GetPriorityQueue().ChangePriority(old_priority, thread == GetCurrentThreadPointer(), thread);
IncrementScheduledCount(thread);
SetSchedulerUpdateNeeded();
}
}
void KScheduler::OnThreadAffinityMaskChanged(KThread *thread, const KAffinityMask &old_affinity, s32 old_core) {
MESOSPHERE_ASSERT(IsSchedulerLockedByCurrentThread());
/* If the thread is runnable, we want to change its affinity in the queue. */
if (thread->GetRawThreadState() == KThread::ThreadState_Runnable) {
GetPriorityQueue().ChangeAffinityMask(old_core, old_affinity, thread);
IncrementScheduledCount(thread);
SetSchedulerUpdateNeeded();
}
}
} }

View file

@ -202,4 +202,8 @@ namespace ams::kern {
/* TODO */ /* TODO */
} }
KThreadContext *KThread::GetContextForSchedulerLoop() {
return std::addressof(this->thread_context);
}
} }

View file

@ -61,7 +61,10 @@ namespace ams::kern {
SetCurrentThread(main_thread); SetCurrentThread(main_thread);
SetCurrentProcess(nullptr); SetCurrentProcess(nullptr);
/* TODO: We also want to initialize the scheduler/interrupt manager/hardware timer. */ /* TODO: Initialize the interrupt manager. */
GetInterruptManager().Initialize(core_id);
GetHardwareTimer().Initialize(core_id);
GetScheduler().Initialize(idle_thread);
} }
} }