kern: implement KSchedulerLock

This commit is contained in:
Michael Scire 2020-01-31 16:25:17 -08:00
parent 797c04d19f
commit 57222e8301
4 changed files with 160 additions and 0 deletions

View file

@ -17,6 +17,7 @@
#include <mesosphere/kern_select_cpu.hpp>
#include <mesosphere/kern_k_thread.hpp>
#include <mesosphere/kern_k_priority_queue.hpp>
#include <mesosphere/kern_k_scheduler_lock.hpp>
namespace ams::kern {
@ -25,10 +26,14 @@ namespace ams::kern {
static_assert(KSchedulerPriorityQueue::NumCores == cpu::NumCores);
static_assert(KSchedulerPriorityQueue::NumPriority == BITSIZEOF(u64));
class KScopedSchedulerLock;
class KScheduler {
NON_COPYABLE(KScheduler);
NON_MOVEABLE(KScheduler);
public:
using LockType = KAbstractSchedulerLock<KScheduler>;
struct SchedulingState {
std::atomic<bool> needs_scheduling;
bool interrupt_task_thread_runnable;
@ -37,6 +42,9 @@ namespace ams::kern {
KThread *highest_priority_thread;
void *idle_thread_stack;
};
private:
friend class KScopedSchedulerLock;
static inline LockType s_scheduler_lock;
private:
SchedulingState state;
bool is_active;
@ -47,6 +55,18 @@ namespace ams::kern {
public:
KScheduler();
/* TODO: Actually implement KScheduler. This is a placeholder. */
public:
/* API used by KSchedulerLock */
static void DisableScheduling();
static void EnableScheduling();
static u64 UpdateHighestPriorityThreads();
static void EnableSchedulingAndSchedule(u64 cores_needing_scheduling);
};
class KScopedSchedulerLock {
public:
ALWAYS_INLINE KScopedSchedulerLock() { KScheduler::s_scheduler_lock.Lock(); }
ALWAYS_INLINE ~KScopedSchedulerLock() { KScheduler::s_scheduler_lock.Unlock(); }
};
}

View file

@ -0,0 +1,111 @@
/*
* Copyright (c) 2018-2020 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_spin_lock.hpp>
#include <mesosphere/kern_k_current_context.hpp>
namespace ams::kern {
class KThread;
/*
TODO: C++20
template<typename T>
concept KSchedulerLockable = !std::is_reference<T>::value && requires {
{ T::DisableScheduling() } -> std::same_as<void>;
{ T::EnableScheduling() } -> std::same_as<void>;
{ T::UpdateHighestPriorityThreads() } -> std::convertible_to<u64>;
{ T::EnableSchedulingAndSchedule(std::declval<u64>()) } -> std::same_as<void>;
};
*/
template<typename SchedulerType> /* TODO C++20: requires KSchedulerLockable<SchedulerType> */
class KAbstractSchedulerLock {
private:
KAlignedSpinLock spin_lock;
s32 lock_count;
KThread *owner_thread;
public:
constexpr ALWAYS_INLINE KAbstractSchedulerLock() : spin_lock(), lock_count(0), owner_thread(nullptr) { MESOSPHERE_ASSERT_THIS(); }
ALWAYS_INLINE bool IsLockedByCurrentThread() const {
MESOSPHERE_ASSERT_THIS();
return this->owner_thread == GetCurrentThreadPointer();
}
ALWAYS_INLINE void Lock() {
MESOSPHERE_ASSERT_THIS();
if (this->IsLockedByCurrentThread()) {
/* If we already own the lock, we can just increment the count. */
MESOSPHERE_ASSERT(this->lock_count > 0);
this->lock_count++;
} else {
/* Otherwise, we want to disable scheduling and acquire the spinlock. */
SchedulerType::DisableScheduling();
this->spin_lock.Lock();
/* For debug, ensure that our state is valid. */
MESOSPHERE_ASSERT(this->lock_count == 0);
MESOSPHERE_ASSERT(this->owner_thread == nullptr);
/* Increment count, take ownership. */
this->lock_count = 1;
this->owner_thread = GetCurrentThreadPointer();
}
}
ALWAYS_INLINE void Unlock() {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
MESOSPHERE_ASSERT(this->lock_count > 0);
/* Release an instance of the lock. */
if ((--this->lock_count) == 0) {
/* We're no longer going to hold the lock. Take note of what cores need scheduling. */
const u64 cores_needing_scheduling = SchedulerType::UpdateHighestPriorityThreads();
/* Note that we no longer hold the lock, and unlock the spinlock. */
this->owner_thread = nullptr;
this->spin_lock.Unlock();
/* Enable scheduling, and perform a rescheduling operation. */
SchedulerType::EnableSchedulingAndSchedule(cores_needing_scheduling);
}
}
ALWAYS_INLINE void UnlockWithoutRescheduling() {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
MESOSPHERE_ASSERT(this->lock_count > 0);
/* Release an instance of the lock. */
if ((--this->lock_count) == 0) {
/* Note that we no longer hold the lock, and unlock the spinlock. */
this->owner_thread = nullptr;
this->spin_lock.Unlock();
/* Enable scheduling, and perform a rescheduling operation. */
SchedulerType::EnableScheduling();
}
}
};
}

View file

@ -23,6 +23,7 @@
#include "util/util_bitpack.hpp"
#include "util/util_bitset.hpp"
#include "util/util_scope_guard.hpp"
#include "util/util_specialization_of.hpp"
#include "util/util_typed_storage.hpp"
#include "util/util_intrusive_list.hpp"
#include "util/util_intrusive_red_black_tree.hpp"

View file

@ -0,0 +1,28 @@
/*
* Copyright (c) 2018-2020 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "../defines.hpp"
namespace ams::util {
template<class T, template <class...> class Template>
struct is_specialization_of : std::false_type{};
template<template <class...> class Template, class... Args>
struct is_specialization_of<Template<Args...>, Template> : std::true_type{};
}