mirror of
https://github.com/Atmosphere-NX/Atmosphere
synced 2024-12-22 12:21:18 +00:00
kern: kill the interrupt task manager thread
This commit is contained in:
parent
29cc3d1c09
commit
cb28150912
9 changed files with 112 additions and 140 deletions
|
@ -156,7 +156,9 @@
|
||||||
|
|
||||||
/* ams::kern::KScheduler (::SchedulingState), https://github.com/Atmosphere-NX/Atmosphere/blob/master/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp */
|
/* ams::kern::KScheduler (::SchedulingState), https://github.com/Atmosphere-NX/Atmosphere/blob/master/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp */
|
||||||
/* NOTE: Due to constraints on ldarb relative offsets, KSCHEDULER_NEEDS_SCHEDULING cannot trivially be changed, and will require assembly edits. */
|
/* NOTE: Due to constraints on ldarb relative offsets, KSCHEDULER_NEEDS_SCHEDULING cannot trivially be changed, and will require assembly edits. */
|
||||||
#define KSCHEDULER_NEEDS_SCHEDULING 0x00
|
#define KSCHEDULER_NEEDS_SCHEDULING 0x00
|
||||||
#define KSCHEDULER_INTERRUPT_TASK_THREAD_RUNNABLE 0x01
|
#define KSCHEDULER_INTERRUPT_TASK_RUNNABLE 0x01
|
||||||
#define KSCHEDULER_HIGHEST_PRIORITY_THREAD 0x10
|
#define KSCHEDULER_HIGHEST_PRIORITY_THREAD 0x10
|
||||||
#define KSCHEDULER_IDLE_THREAD_STACK 0x18
|
#define KSCHEDULER_IDLE_THREAD_STACK 0x18
|
||||||
|
#define KSCHEDULER_PREVIOUS_THREAD 0x20
|
||||||
|
#define KSCHEDULER_INTERRUPT_TASK_MANAGER 0x28
|
||||||
|
|
|
@ -27,28 +27,25 @@ namespace ams::kern {
|
||||||
KInterruptTask *m_head;
|
KInterruptTask *m_head;
|
||||||
KInterruptTask *m_tail;
|
KInterruptTask *m_tail;
|
||||||
public:
|
public:
|
||||||
constexpr TaskQueue() : m_head(nullptr), m_tail(nullptr) { /* ... */ }
|
constexpr ALWAYS_INLINE TaskQueue() : m_head(nullptr), m_tail(nullptr) { /* ... */ }
|
||||||
|
|
||||||
constexpr KInterruptTask *GetHead() { return m_head; }
|
constexpr ALWAYS_INLINE KInterruptTask *GetHead() { return m_head; }
|
||||||
constexpr bool IsEmpty() const { return m_head == nullptr; }
|
constexpr ALWAYS_INLINE bool IsEmpty() const { return m_head == nullptr; }
|
||||||
constexpr void Clear() { m_head = nullptr; m_tail = nullptr; }
|
constexpr ALWAYS_INLINE void Clear() { m_head = nullptr; m_tail = nullptr; }
|
||||||
|
|
||||||
void Enqueue(KInterruptTask *task);
|
void Enqueue(KInterruptTask *task);
|
||||||
void Dequeue();
|
void Dequeue();
|
||||||
};
|
};
|
||||||
private:
|
private:
|
||||||
TaskQueue m_task_queue;
|
TaskQueue m_task_queue;
|
||||||
KThread *m_thread;
|
s64 m_cpu_time;
|
||||||
private:
|
|
||||||
static void ThreadFunction(uintptr_t arg);
|
|
||||||
void ThreadFunctionImpl();
|
|
||||||
public:
|
public:
|
||||||
constexpr KInterruptTaskManager() : m_task_queue(), m_thread(nullptr) { /* ... */ }
|
constexpr KInterruptTaskManager() : m_task_queue(), m_cpu_time(0) { /* ... */ }
|
||||||
|
|
||||||
constexpr KThread *GetThread() const { return m_thread; }
|
constexpr ALWAYS_INLINE s64 GetCpuTime() const { return m_cpu_time; }
|
||||||
|
|
||||||
NOINLINE void Initialize();
|
|
||||||
void EnqueueTask(KInterruptTask *task);
|
void EnqueueTask(KInterruptTask *task);
|
||||||
|
void DoTasks();
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
#include <mesosphere/kern_select_cpu.hpp>
|
#include <mesosphere/kern_select_cpu.hpp>
|
||||||
#include <mesosphere/kern_k_thread.hpp>
|
#include <mesosphere/kern_k_thread.hpp>
|
||||||
#include <mesosphere/kern_k_priority_queue.hpp>
|
#include <mesosphere/kern_k_priority_queue.hpp>
|
||||||
|
#include <mesosphere/kern_k_interrupt_task_manager.hpp>
|
||||||
#include <mesosphere/kern_k_scheduler_lock.hpp>
|
#include <mesosphere/kern_k_scheduler_lock.hpp>
|
||||||
|
|
||||||
namespace ams::kern {
|
namespace ams::kern {
|
||||||
|
@ -39,11 +40,13 @@ namespace ams::kern {
|
||||||
|
|
||||||
struct SchedulingState {
|
struct SchedulingState {
|
||||||
std::atomic<u8> needs_scheduling;
|
std::atomic<u8> needs_scheduling;
|
||||||
bool interrupt_task_thread_runnable;
|
bool interrupt_task_runnable;
|
||||||
bool should_count_idle;
|
bool should_count_idle;
|
||||||
u64 idle_count;
|
u64 idle_count;
|
||||||
KThread *highest_priority_thread;
|
KThread *highest_priority_thread;
|
||||||
void *idle_thread_stack;
|
void *idle_thread_stack;
|
||||||
|
KThread *prev_thread;
|
||||||
|
KInterruptTaskManager *interrupt_task_manager;
|
||||||
};
|
};
|
||||||
private:
|
private:
|
||||||
friend class KScopedSchedulerLock;
|
friend class KScopedSchedulerLock;
|
||||||
|
@ -53,28 +56,29 @@ namespace ams::kern {
|
||||||
SchedulingState m_state;
|
SchedulingState m_state;
|
||||||
bool m_is_active;
|
bool m_is_active;
|
||||||
s32 m_core_id;
|
s32 m_core_id;
|
||||||
KThread *m_prev_thread;
|
|
||||||
s64 m_last_context_switch_time;
|
s64 m_last_context_switch_time;
|
||||||
KThread *m_idle_thread;
|
KThread *m_idle_thread;
|
||||||
std::atomic<KThread *> m_current_thread;
|
std::atomic<KThread *> m_current_thread;
|
||||||
public:
|
public:
|
||||||
constexpr KScheduler()
|
constexpr KScheduler()
|
||||||
: m_state(), m_is_active(false), m_core_id(0), m_prev_thread(nullptr), m_last_context_switch_time(0), m_idle_thread(nullptr), m_current_thread(nullptr)
|
: m_state(), m_is_active(false), m_core_id(0), m_last_context_switch_time(0), m_idle_thread(nullptr), m_current_thread(nullptr)
|
||||||
{
|
{
|
||||||
m_state.needs_scheduling = true;
|
m_state.needs_scheduling = true;
|
||||||
m_state.interrupt_task_thread_runnable = false;
|
m_state.interrupt_task_runnable = false;
|
||||||
m_state.should_count_idle = false;
|
m_state.should_count_idle = false;
|
||||||
m_state.idle_count = 0;
|
m_state.idle_count = 0;
|
||||||
m_state.idle_thread_stack = nullptr;
|
m_state.idle_thread_stack = nullptr;
|
||||||
m_state.highest_priority_thread = nullptr;
|
m_state.highest_priority_thread = nullptr;
|
||||||
|
m_state.prev_thread = nullptr;
|
||||||
|
m_state.interrupt_task_manager = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
NOINLINE void Initialize(KThread *idle_thread);
|
NOINLINE void Initialize(KThread *idle_thread);
|
||||||
NOINLINE void Activate();
|
NOINLINE void Activate();
|
||||||
|
|
||||||
ALWAYS_INLINE void SetInterruptTaskRunnable() {
|
ALWAYS_INLINE void SetInterruptTaskRunnable() {
|
||||||
m_state.interrupt_task_thread_runnable = true;
|
m_state.interrupt_task_runnable = true;
|
||||||
m_state.needs_scheduling = true;
|
m_state.needs_scheduling = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE void RequestScheduleOnInterrupt() {
|
ALWAYS_INLINE void RequestScheduleOnInterrupt() {
|
||||||
|
@ -94,7 +98,7 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE KThread *GetPreviousThread() const {
|
ALWAYS_INLINE KThread *GetPreviousThread() const {
|
||||||
return m_prev_thread;
|
return m_state.prev_thread;
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE KThread *GetSchedulerCurrentThread() const {
|
ALWAYS_INLINE KThread *GetSchedulerCurrentThread() const {
|
||||||
|
@ -108,8 +112,6 @@ namespace ams::kern {
|
||||||
/* Static private API. */
|
/* Static private API. */
|
||||||
static ALWAYS_INLINE KSchedulerPriorityQueue &GetPriorityQueue() { return s_priority_queue; }
|
static ALWAYS_INLINE KSchedulerPriorityQueue &GetPriorityQueue() { return s_priority_queue; }
|
||||||
static NOINLINE u64 UpdateHighestPriorityThreadsImpl();
|
static NOINLINE u64 UpdateHighestPriorityThreadsImpl();
|
||||||
|
|
||||||
static NOINLINE void InterruptTaskThreadToRunnable();
|
|
||||||
public:
|
public:
|
||||||
/* Static public API. */
|
/* Static public API. */
|
||||||
static ALWAYS_INLINE bool CanSchedule() { return GetCurrentThread().GetDisableDispatchCount() == 0; }
|
static ALWAYS_INLINE bool CanSchedule() { return GetCurrentThread().GetDisableDispatchCount() == 0; }
|
||||||
|
@ -124,13 +126,14 @@ namespace ams::kern {
|
||||||
GetCurrentThread().DisableDispatch();
|
GetCurrentThread().DisableDispatch();
|
||||||
}
|
}
|
||||||
|
|
||||||
static NOINLINE void EnableScheduling(u64 cores_needing_scheduling) {
|
static ALWAYS_INLINE void EnableScheduling(u64 cores_needing_scheduling) {
|
||||||
MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() >= 1);
|
MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() >= 1);
|
||||||
|
|
||||||
|
GetCurrentScheduler().RescheduleOtherCores(cores_needing_scheduling);
|
||||||
|
|
||||||
if (GetCurrentThread().GetDisableDispatchCount() > 1) {
|
if (GetCurrentThread().GetDisableDispatchCount() > 1) {
|
||||||
GetCurrentThread().EnableDispatch();
|
GetCurrentThread().EnableDispatch();
|
||||||
} else {
|
} else {
|
||||||
GetCurrentScheduler().RescheduleOtherCores(cores_needing_scheduling);
|
|
||||||
GetCurrentScheduler().RescheduleCurrentCore();
|
GetCurrentScheduler().RescheduleCurrentCore();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -176,14 +179,23 @@ namespace ams::kern {
|
||||||
|
|
||||||
ALWAYS_INLINE void RescheduleCurrentCore() {
|
ALWAYS_INLINE void RescheduleCurrentCore() {
|
||||||
MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() == 1);
|
MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() == 1);
|
||||||
{
|
|
||||||
/* Disable interrupts, and then context switch. */
|
|
||||||
KScopedInterruptDisable intr_disable;
|
|
||||||
ON_SCOPE_EXIT { GetCurrentThread().EnableDispatch(); };
|
|
||||||
|
|
||||||
if (m_state.needs_scheduling.load()) {
|
GetCurrentThread().EnableDispatch();
|
||||||
Schedule();
|
|
||||||
}
|
if (m_state.needs_scheduling.load()) {
|
||||||
|
/* Disable interrupts, and then check again if rescheduling is needed. */
|
||||||
|
KScopedInterruptDisable intr_disable;
|
||||||
|
|
||||||
|
GetCurrentScheduler().RescheduleCurrentCoreImpl();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void RescheduleCurrentCoreImpl() {
|
||||||
|
/* Check that scheduling is needed. */
|
||||||
|
if (AMS_LIKELY(m_state.needs_scheduling.load())) {
|
||||||
|
GetCurrentThread().DisableDispatch();
|
||||||
|
this->Schedule();
|
||||||
|
GetCurrentThread().EnableDispatch();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -199,10 +211,12 @@ namespace ams::kern {
|
||||||
};
|
};
|
||||||
|
|
||||||
consteval bool KScheduler::ValidateAssemblyOffsets() {
|
consteval bool KScheduler::ValidateAssemblyOffsets() {
|
||||||
static_assert(__builtin_offsetof(KScheduler, m_state.needs_scheduling) == KSCHEDULER_NEEDS_SCHEDULING);
|
static_assert(__builtin_offsetof(KScheduler, m_state.needs_scheduling) == KSCHEDULER_NEEDS_SCHEDULING);
|
||||||
static_assert(__builtin_offsetof(KScheduler, m_state.interrupt_task_thread_runnable) == KSCHEDULER_INTERRUPT_TASK_THREAD_RUNNABLE);
|
static_assert(__builtin_offsetof(KScheduler, m_state.interrupt_task_runnable) == KSCHEDULER_INTERRUPT_TASK_RUNNABLE);
|
||||||
static_assert(__builtin_offsetof(KScheduler, m_state.highest_priority_thread) == KSCHEDULER_HIGHEST_PRIORITY_THREAD);
|
static_assert(__builtin_offsetof(KScheduler, m_state.highest_priority_thread) == KSCHEDULER_HIGHEST_PRIORITY_THREAD);
|
||||||
static_assert(__builtin_offsetof(KScheduler, m_state.idle_thread_stack) == KSCHEDULER_IDLE_THREAD_STACK);
|
static_assert(__builtin_offsetof(KScheduler, m_state.idle_thread_stack) == KSCHEDULER_IDLE_THREAD_STACK);
|
||||||
|
static_assert(__builtin_offsetof(KScheduler, m_state.prev_thread) == KSCHEDULER_PREVIOUS_THREAD);
|
||||||
|
static_assert(__builtin_offsetof(KScheduler, m_state.interrupt_task_manager) == KSCHEDULER_INTERRUPT_TASK_MANAGER);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,7 +45,7 @@ namespace ams::kern {
|
||||||
return m_owner_thread == GetCurrentThreadPointer();
|
return m_owner_thread == GetCurrentThreadPointer();
|
||||||
}
|
}
|
||||||
|
|
||||||
void Lock() {
|
NOINLINE void Lock() {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
|
|
||||||
if (this->IsLockedByCurrentThread()) {
|
if (this->IsLockedByCurrentThread()) {
|
||||||
|
@ -67,7 +67,7 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Unlock() {
|
NOINLINE void Unlock() {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||||
MESOSPHERE_ASSERT(m_lock_count > 0);
|
MESOSPHERE_ASSERT(m_lock_count > 0);
|
||||||
|
|
|
@ -59,50 +59,6 @@ namespace ams::kern {
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void KInterruptTaskManager::ThreadFunction(uintptr_t arg) {
|
|
||||||
reinterpret_cast<KInterruptTaskManager *>(arg)->ThreadFunctionImpl();
|
|
||||||
}
|
|
||||||
|
|
||||||
void KInterruptTaskManager::ThreadFunctionImpl() {
|
|
||||||
MESOSPHERE_ASSERT_THIS();
|
|
||||||
|
|
||||||
while (true) {
|
|
||||||
/* Get a task. */
|
|
||||||
KInterruptTask *task = nullptr;
|
|
||||||
{
|
|
||||||
KScopedInterruptDisable di;
|
|
||||||
|
|
||||||
task = m_task_queue.GetHead();
|
|
||||||
if (task == nullptr) {
|
|
||||||
m_thread->SetState(KThread::ThreadState_Waiting);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
m_task_queue.Dequeue();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Do the task. */
|
|
||||||
task->DoTask();
|
|
||||||
|
|
||||||
/* Destroy any objects we may need to close. */
|
|
||||||
m_thread->DestroyClosedObjects();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void KInterruptTaskManager::Initialize() {
|
|
||||||
/* Reserve a thread from the system limit. */
|
|
||||||
MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_ThreadCountMax, 1));
|
|
||||||
|
|
||||||
/* Create and initialize the thread. */
|
|
||||||
m_thread = KThread::Create();
|
|
||||||
MESOSPHERE_ABORT_UNLESS(m_thread != nullptr);
|
|
||||||
MESOSPHERE_R_ABORT_UNLESS(KThread::InitializeHighPriorityThread(m_thread, ThreadFunction, reinterpret_cast<uintptr_t>(this)));
|
|
||||||
KThread::Register(m_thread);
|
|
||||||
|
|
||||||
/* Run the thread. */
|
|
||||||
m_thread->Run();
|
|
||||||
}
|
|
||||||
|
|
||||||
void KInterruptTaskManager::EnqueueTask(KInterruptTask *task) {
|
void KInterruptTaskManager::EnqueueTask(KInterruptTask *task) {
|
||||||
MESOSPHERE_ASSERT(!KInterruptManager::AreInterruptsEnabled());
|
MESOSPHERE_ASSERT(!KInterruptManager::AreInterruptsEnabled());
|
||||||
|
|
||||||
|
@ -111,4 +67,24 @@ namespace ams::kern {
|
||||||
Kernel::GetScheduler().SetInterruptTaskRunnable();
|
Kernel::GetScheduler().SetInterruptTaskRunnable();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void KInterruptTaskManager::DoTasks() {
|
||||||
|
/* Execute pending tasks. */
|
||||||
|
const s64 start_time = KHardwareTimer::GetTick();
|
||||||
|
for (KInterruptTask *task = m_task_queue.GetHead(); task != nullptr; task = m_task_queue.GetHead()) {
|
||||||
|
/* Dequeue the task. */
|
||||||
|
m_task_queue.Dequeue();
|
||||||
|
|
||||||
|
/* Do the task with interrupts temporarily enabled. */
|
||||||
|
{
|
||||||
|
KScopedInterruptEnable ei;
|
||||||
|
|
||||||
|
task->DoTask();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
const s64 end_time = KHardwareTimer::GetTick();
|
||||||
|
|
||||||
|
/* Increment the time we've spent executing. */
|
||||||
|
m_cpu_time += end_time - start_time;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -55,10 +55,11 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
void KScheduler::Initialize(KThread *idle_thread) {
|
void KScheduler::Initialize(KThread *idle_thread) {
|
||||||
/* Set core ID and idle thread. */
|
/* Set core ID/idle thread/interrupt task manager. */
|
||||||
m_core_id = GetCurrentCoreId();
|
m_core_id = GetCurrentCoreId();
|
||||||
m_idle_thread = idle_thread;
|
m_idle_thread = idle_thread;
|
||||||
m_state.idle_thread_stack = m_idle_thread->GetStackTop();
|
m_state.idle_thread_stack = m_idle_thread->GetStackTop();
|
||||||
|
m_state.interrupt_task_manager = std::addressof(Kernel::GetInterruptTaskManager());
|
||||||
|
|
||||||
/* Insert the main thread into the priority queue. */
|
/* Insert the main thread into the priority queue. */
|
||||||
{
|
{
|
||||||
|
@ -212,19 +213,9 @@ namespace ams::kern {
|
||||||
return cores_needing_scheduling;
|
return cores_needing_scheduling;
|
||||||
}
|
}
|
||||||
|
|
||||||
void KScheduler::InterruptTaskThreadToRunnable() {
|
|
||||||
MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() == 1);
|
|
||||||
|
|
||||||
KThread *task_thread = Kernel::GetInterruptTaskManager().GetThread();
|
|
||||||
{
|
|
||||||
KScopedSchedulerLock sl;
|
|
||||||
task_thread->SetState(KThread::ThreadState_Runnable);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void KScheduler::SwitchThread(KThread *next_thread) {
|
void KScheduler::SwitchThread(KThread *next_thread) {
|
||||||
KProcess *cur_process = GetCurrentProcessPointer();
|
KProcess * const cur_process = GetCurrentProcessPointer();
|
||||||
KThread *cur_thread = GetCurrentThreadPointer();
|
KThread * const cur_thread = GetCurrentThreadPointer();
|
||||||
|
|
||||||
/* We never want to schedule a null thread, so use the idle thread if we don't have a next. */
|
/* We never want to schedule a null thread, so use the idle thread if we don't have a next. */
|
||||||
if (next_thread == nullptr) {
|
if (next_thread == nullptr) {
|
||||||
|
@ -257,12 +248,10 @@ namespace ams::kern {
|
||||||
if (cur_process != nullptr) {
|
if (cur_process != nullptr) {
|
||||||
/* NOTE: Combining this into AMS_LIKELY(!... && ...) triggers an internal compiler error: Segmentation fault in GCC 9.2.0. */
|
/* NOTE: Combining this into AMS_LIKELY(!... && ...) triggers an internal compiler error: Segmentation fault in GCC 9.2.0. */
|
||||||
if (AMS_LIKELY(!cur_thread->IsTerminationRequested()) && AMS_LIKELY(cur_thread->GetActiveCore() == m_core_id)) {
|
if (AMS_LIKELY(!cur_thread->IsTerminationRequested()) && AMS_LIKELY(cur_thread->GetActiveCore() == m_core_id)) {
|
||||||
m_prev_thread = cur_thread;
|
m_state.prev_thread = cur_thread;
|
||||||
} else {
|
} else {
|
||||||
m_prev_thread = nullptr;
|
m_state.prev_thread = nullptr;
|
||||||
}
|
}
|
||||||
} else if (cur_thread == m_idle_thread) {
|
|
||||||
m_prev_thread = nullptr;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
MESOSPHERE_KTRACE_THREAD_SWITCH(next_thread);
|
MESOSPHERE_KTRACE_THREAD_SWITCH(next_thread);
|
||||||
|
@ -284,7 +273,7 @@ namespace ams::kern {
|
||||||
MESOSPHERE_ASSERT(IsSchedulerLockedByCurrentThread());
|
MESOSPHERE_ASSERT(IsSchedulerLockedByCurrentThread());
|
||||||
for (size_t i = 0; i < cpu::NumCores; ++i) {
|
for (size_t i = 0; i < cpu::NumCores; ++i) {
|
||||||
/* Get an atomic reference to the core scheduler's previous thread. */
|
/* Get an atomic reference to the core scheduler's previous thread. */
|
||||||
std::atomic_ref<KThread *> prev_thread(Kernel::GetScheduler(static_cast<s32>(i)).m_prev_thread);
|
std::atomic_ref<KThread *> prev_thread(Kernel::GetScheduler(static_cast<s32>(i)).m_state.prev_thread);
|
||||||
static_assert(std::atomic_ref<KThread *>::is_always_lock_free);
|
static_assert(std::atomic_ref<KThread *>::is_always_lock_free);
|
||||||
|
|
||||||
/* Atomically clear the previous thread if it's our target. */
|
/* Atomically clear the previous thread if it's our target. */
|
||||||
|
|
|
@ -99,7 +99,6 @@ namespace ams::kern {
|
||||||
DoOnEachCoreInOrder(core_id, [=]() ALWAYS_INLINE_LAMBDA {
|
DoOnEachCoreInOrder(core_id, [=]() ALWAYS_INLINE_LAMBDA {
|
||||||
KThread::Register(std::addressof(Kernel::GetMainThread(core_id)));
|
KThread::Register(std::addressof(Kernel::GetMainThread(core_id)));
|
||||||
KThread::Register(std::addressof(Kernel::GetIdleThread(core_id)));
|
KThread::Register(std::addressof(Kernel::GetIdleThread(core_id)));
|
||||||
Kernel::GetInterruptTaskManager().Initialize();
|
|
||||||
});
|
});
|
||||||
|
|
||||||
/* Activate the scheduler and enable interrupts. */
|
/* Activate the scheduler and enable interrupts. */
|
||||||
|
|
|
@ -189,7 +189,7 @@ namespace ams::kern::svc {
|
||||||
R_UNLESS(core_valid, svc::ResultInvalidCombination());
|
R_UNLESS(core_valid, svc::ResultInvalidCombination());
|
||||||
|
|
||||||
/* Get the idle tick count. */
|
/* Get the idle tick count. */
|
||||||
*out = Kernel::GetScheduler().GetIdleThread()->GetCpuTime();
|
*out = Kernel::GetScheduler().GetIdleThread()->GetCpuTime() - Kernel::GetInterruptTaskManager().GetCpuTime();
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case ams::svc::InfoType_RandomEntropy:
|
case ams::svc::InfoType_RandomEntropy:
|
||||||
|
|
|
@ -112,29 +112,16 @@ _ZN3ams4kern10KScheduler12ScheduleImplEv:
|
||||||
/* KScheduler layout has state at +0x0, this is guaranteed statically by assembly offsets. */
|
/* KScheduler layout has state at +0x0, this is guaranteed statically by assembly offsets. */
|
||||||
mov x1, x0
|
mov x1, x0
|
||||||
|
|
||||||
/* First thing we want to do is check whether the interrupt task thread is runnable. */
|
|
||||||
ldrb w3, [x1, #(KSCHEDULER_INTERRUPT_TASK_THREAD_RUNNABLE)]
|
|
||||||
cbz w3, 0f
|
|
||||||
|
|
||||||
/* If it is, we want to call KScheduler::InterruptTaskThreadToRunnable() to change its state to runnable. */
|
|
||||||
stp x0, x1, [sp, #-16]!
|
|
||||||
stp x30, xzr, [sp, #-16]!
|
|
||||||
bl _ZN3ams4kern10KScheduler29InterruptTaskThreadToRunnableEv
|
|
||||||
ldp x30, xzr, [sp], 16
|
|
||||||
ldp x0, x1, [sp], 16
|
|
||||||
|
|
||||||
/* Clear the interrupt task thread as runnable. */
|
|
||||||
strb wzr, [x1, #(KSCHEDULER_INTERRUPT_TASK_THREAD_RUNNABLE)]
|
|
||||||
|
|
||||||
0: /* Interrupt task thread runnable checked. */
|
|
||||||
/* Now we want to check if there's any scheduling to do. */
|
|
||||||
|
|
||||||
/* First, clear the need's scheduling bool (and dmb ish after, as it's an atomic). */
|
/* First, clear the need's scheduling bool (and dmb ish after, as it's an atomic). */
|
||||||
/* TODO: Should this be a stlrb? Nintendo does not do one. */
|
/* TODO: Should this be a stlrb? Nintendo does not do one. */
|
||||||
strb wzr, [x1]
|
strb wzr, [x1]
|
||||||
dmb ish
|
dmb ish
|
||||||
|
|
||||||
/* Check if the highest priority thread is the same as the current thread. */
|
/* Check whether there are runnable interrupt tasks. */
|
||||||
|
ldrb w8, [x1, #(KSCHEDULER_INTERRUPT_TASK_RUNNABLE)]
|
||||||
|
cbnz w8, 0f
|
||||||
|
|
||||||
|
/* If it isn't, we want to check if the highest priority thread is the same as the current thread. */
|
||||||
ldr x7, [x1, #(KSCHEDULER_HIGHEST_PRIORITY_THREAD)]
|
ldr x7, [x1, #(KSCHEDULER_HIGHEST_PRIORITY_THREAD)]
|
||||||
cmp x7, x18
|
cmp x7, x18
|
||||||
b.ne 1f
|
b.ne 1f
|
||||||
|
@ -142,6 +129,10 @@ _ZN3ams4kern10KScheduler12ScheduleImplEv:
|
||||||
/* If they're the same, then we can just return as there's nothing to do. */
|
/* If they're the same, then we can just return as there's nothing to do. */
|
||||||
ret
|
ret
|
||||||
|
|
||||||
|
0: /* The interrupt task thread is runnable. */
|
||||||
|
/* We want to switch to the interrupt task/idle thread. */
|
||||||
|
mov x7, #0
|
||||||
|
|
||||||
1: /* The highest priority thread is not the same as the current thread. */
|
1: /* The highest priority thread is not the same as the current thread. */
|
||||||
/* Get a reference to the current thread's stack parameters. */
|
/* Get a reference to the current thread's stack parameters. */
|
||||||
add x2, sp, #0x1000
|
add x2, sp, #0x1000
|
||||||
|
@ -271,12 +262,19 @@ _ZN3ams4kern10KScheduler12ScheduleImplEv:
|
||||||
/* Call ams::kern::KScheduler::SwitchThread(ams::kern::KThread *) */
|
/* Call ams::kern::KScheduler::SwitchThread(ams::kern::KThread *) */
|
||||||
bl _ZN3ams4kern10KScheduler12SwitchThreadEPNS0_7KThreadE
|
bl _ZN3ams4kern10KScheduler12SwitchThreadEPNS0_7KThreadE
|
||||||
|
|
||||||
12: /* We've switched to the idle thread, so we want to loop until we schedule a non-idle thread. */
|
12: /* We've switched to the idle thread, so we want to process interrupt tasks until we schedule a non-idle thread. */
|
||||||
/* Check if we need scheduling. */
|
/* Check whether there are runnable interrupt tasks. */
|
||||||
ldarb w3, [x20] // ldarb w3, [x20, #(KSCHEDULER_NEEDS_SCHEDULING)]
|
ldrb w3, [x20, #(KSCHEDULER_INTERRUPT_TASK_RUNNABLE)]
|
||||||
cbnz w3, 13f
|
cbnz w3, 13f
|
||||||
|
|
||||||
/* If we don't, wait for an interrupt and check again. */
|
/* Check if we need scheduling. */
|
||||||
|
ldarb w3, [x20] // ldarb w3, [x20, #(KSCHEDULER_NEEDS_SCHEDULING)]
|
||||||
|
cbnz w3, 4b
|
||||||
|
|
||||||
|
/* Clear the previous thread. */
|
||||||
|
str xzr, [x20, #(KSCHEDULER_PREVIOUS_THREAD)]
|
||||||
|
|
||||||
|
/* Wait for an interrupt and check again. */
|
||||||
wfi
|
wfi
|
||||||
|
|
||||||
msr daifclr, #2
|
msr daifclr, #2
|
||||||
|
@ -284,16 +282,13 @@ _ZN3ams4kern10KScheduler12ScheduleImplEv:
|
||||||
|
|
||||||
b 12b
|
b 12b
|
||||||
|
|
||||||
13: /* We need scheduling again! */
|
13: /* We have interrupt tasks to execute! */
|
||||||
/* Check whether the interrupt task thread needs to be set runnable. */
|
/* Execute any pending interrupt tasks. */
|
||||||
ldrb w3, [x20, #(KSCHEDULER_INTERRUPT_TASK_THREAD_RUNNABLE)]
|
ldr x0, [x20, #(KSCHEDULER_INTERRUPT_TASK_MANAGER)]
|
||||||
cbz w3, 4b
|
bl _ZN3ams4kern21KInterruptTaskManager7DoTasksEv
|
||||||
|
|
||||||
/* It does, so do so. We're using the idle thread stack so no register state preserve needed. */
|
|
||||||
bl _ZN3ams4kern10KScheduler29InterruptTaskThreadToRunnableEv
|
|
||||||
|
|
||||||
/* Clear the interrupt task thread as runnable. */
|
/* Clear the interrupt task thread as runnable. */
|
||||||
strb wzr, [x20, #(KSCHEDULER_INTERRUPT_TASK_THREAD_RUNNABLE)]
|
strb wzr, [x20, #(KSCHEDULER_INTERRUPT_TASK_RUNNABLE)]
|
||||||
|
|
||||||
/* Retry the scheduling loop. */
|
/* Retry the scheduling loop. */
|
||||||
b 4b
|
b 4b
|
||||||
|
|
Loading…
Reference in a new issue