kern: update scheduler for 13.0.0 change, fix some ctz/clz bugs

This commit is contained in:
Michael Scire 2021-10-19 01:20:28 -07:00
parent 42b6c2dd95
commit f3b532070b
9 changed files with 22 additions and 56 deletions

View file

@ -248,8 +248,9 @@ namespace ams::kern::arch::arm {
return id; return id;
} }
private: private:
static constexpr size_t PriorityShift = BITSIZEOF(u8) - __builtin_ctz(NumPriorityLevels); static constexpr size_t PriorityShift = BITSIZEOF(u8) - util::CountTrailingZeros(NumPriorityLevels);
static_assert(PriorityShift < BITSIZEOF(u8)); static_assert(PriorityShift < BITSIZEOF(u8));
static_assert(util::IsPowerOfTwo(NumPriorityLevels));
static constexpr ALWAYS_INLINE u8 ToGicPriorityValue(s32 level) { static constexpr ALWAYS_INLINE u8 ToGicPriorityValue(s32 level) {
return (level << PriorityShift) | ((1 << PriorityShift) - 1); return (level << PriorityShift) | ((1 << PriorityShift) - 1);

View file

@ -42,10 +42,11 @@ namespace ams::kern::arch::arm64 {
const L3PageTableEntry *l3_entry; const L3PageTableEntry *l3_entry;
}; };
private: private:
static constexpr size_t PageBits = __builtin_ctzll(PageSize); static constexpr size_t PageBits = util::CountTrailingZeros(PageSize);
static constexpr size_t NumLevels = 3; static constexpr size_t NumLevels = 3;
static constexpr size_t LevelBits = 9; static constexpr size_t LevelBits = 9;
static_assert(NumLevels > 0); static_assert(NumLevels > 0);
static_assert(PageBits == 12);
template<size_t Offset, size_t Count> template<size_t Offset, size_t Count>
static constexpr ALWAYS_INLINE u64 GetBits(u64 value) { static constexpr ALWAYS_INLINE u64 GetBits(u64 value) {

View file

@ -55,48 +55,16 @@ namespace ams::kern {
return static_cast<u32>(type) + 1; return static_cast<u32>(type) + 1;
} }
static constexpr u32 CountTrailingZero(u32 flag) {
for (u32 i = 0; i < BITSIZEOF(u32); i++) {
if (flag & (1u << i)) {
return i;
}
}
return BITSIZEOF(u32);
}
static constexpr u32 GetCapabilityId(CapabilityType type) {
const u32 flag = GetCapabilityFlag(type);
if (std::is_constant_evaluated()) {
return CountTrailingZero(flag);
} else {
return static_cast<u32>(__builtin_ctz(flag));
}
}
template<size_t Index, size_t Count, typename T = u32> template<size_t Index, size_t Count, typename T = u32>
using Field = util::BitPack32::Field<Index, Count, T>; using Field = util::BitPack32::Field<Index, Count, T>;
#define DEFINE_FIELD(name, prev, ...) using name = Field<prev::Next, __VA_ARGS__> #define DEFINE_FIELD(name, prev, ...) using name = Field<prev::Next, __VA_ARGS__>
template<CapabilityType Type> template<CapabilityType Type>
static constexpr inline u32 CapabilityFlag = []() -> u32 { static constexpr inline u32 CapabilityFlag = static_cast<u32>(Type) + 1;
return static_cast<u32>(Type) + 1;
}();
template<CapabilityType Type> template<CapabilityType Type>
static constexpr inline u32 CapabilityId = []() -> u32 { static constexpr inline u32 CapabilityId = util::CountTrailingZeros<u32>(CapabilityFlag<Type>);
const u32 flag = static_cast<u32>(Type) + 1;
if (std::is_constant_evaluated()) {
for (u32 i = 0; i < BITSIZEOF(u32); i++) {
if (flag & (1u << i)) {
return i;
}
}
return BITSIZEOF(u32);
} else {
return __builtin_ctz(flag);
}
}();
struct CorePriority { struct CorePriority {
using IdBits = Field<0, CapabilityId<CapabilityType::CorePriority> + 1>; using IdBits = Field<0, CapabilityId<CapabilityType::CorePriority> + 1>;

View file

@ -235,7 +235,7 @@ namespace ams::kern {
KPriorityQueueImpl m_suggested_queue; KPriorityQueueImpl m_suggested_queue;
private: private:
constexpr ALWAYS_INLINE void ClearAffinityBit(u64 &affinity, s32 core) { constexpr ALWAYS_INLINE void ClearAffinityBit(u64 &affinity, s32 core) {
affinity &= ~(u64(1ul) << core); affinity &= ~(UINT64_C(1) << core);
} }
constexpr ALWAYS_INLINE s32 GetNextCore(u64 &affinity) { constexpr ALWAYS_INLINE s32 GetNextCore(u64 &affinity) {

View file

@ -286,6 +286,8 @@ namespace ams::kern {
constexpr void *GetProcessLocalRegionHeapAddress() const { return m_plr_heap_address; } constexpr void *GetProcessLocalRegionHeapAddress() const { return m_plr_heap_address; }
KThread *GetExceptionThread() const { return m_exception_thread; }
void AddCpuTime(s64 diff) { m_cpu_time += diff; } void AddCpuTime(s64 diff) { m_cpu_time += diff; }
s64 GetCpuTime() { return m_cpu_time; } s64 GetCpuTime() { return m_cpu_time; }

View file

@ -126,11 +126,13 @@ namespace ams::kern {
for (size_t core_id = 0; core_id < cpu::NumCores; core_id++) { for (size_t core_id = 0; core_id < cpu::NumCores; core_id++) {
KThread *top_thread = priority_queue.GetScheduledFront(core_id); KThread *top_thread = priority_queue.GetScheduledFront(core_id);
if (top_thread != nullptr) { if (top_thread != nullptr) {
/* If the thread has no waiters, we need to check if the process has a thread pinned. */ /* We need to check if the thread's process has a pinned thread. */
if (top_thread->GetNumKernelWaiters() == 0) {
if (KProcess *parent = top_thread->GetOwnerProcess(); parent != nullptr) { if (KProcess *parent = top_thread->GetOwnerProcess(); parent != nullptr) {
/* Check that there's a pinned thread other than the current top thread. */
if (KThread *pinned = parent->GetPinnedThread(core_id); pinned != nullptr && pinned != top_thread) { if (KThread *pinned = parent->GetPinnedThread(core_id); pinned != nullptr && pinned != top_thread) {
/* We prefer our parent's pinned thread if possible. However, we also don't want to schedule un-runnable threads. */ /* We need to prefer threads with kernel waiters to the pinned thread. */
if (top_thread->GetNumKernelWaiters() == 0 && top_thread != parent->GetExceptionThread()) {
/* If the pinned thread is runnable, use it. */
if (pinned->GetRawState() == KThread::ThreadState_Runnable) { if (pinned->GetRawState() == KThread::ThreadState_Runnable) {
top_thread = pinned; top_thread = pinned;
} else { } else {

View file

@ -441,7 +441,7 @@ namespace ams::kern {
m_base_priority_on_unpin = old_base_priority; m_base_priority_on_unpin = old_base_priority;
/* Set base priority to higher than any possible process priority. */ /* Set base priority to higher than any possible process priority. */
m_base_priority = std::min<s32>(old_base_priority, __builtin_ctzll(this->GetOwnerProcess()->GetPriorityMask())); m_base_priority = std::min<s32>(old_base_priority, __builtin_ctzll(this->GetOwnerProcess()->GetPriorityMask()) - 1);
RestorePriority(this); RestorePriority(this);
} }

View file

@ -78,9 +78,4 @@ namespace ams::util {
return IsAligned(reinterpret_cast<uintptr_t>(value), alignment); return IsAligned(reinterpret_cast<uintptr_t>(value), alignment);
} }
template<typename T, typename U> requires std::integral<T> && std::integral<U>
constexpr ALWAYS_INLINE T DivideUp(T x, U y) {
return (x + (y - 1)) / y;
}
} }

View file

@ -17,6 +17,7 @@
#pragma once #pragma once
#include <vapours/common.hpp> #include <vapours/common.hpp>
#include <vapours/assert.hpp> #include <vapours/assert.hpp>
#include <vapours/util/util_bitutil.hpp>
namespace ams::util { namespace ams::util {
@ -30,14 +31,10 @@ namespace ams::util {
static_assert(sizeof(Storage) <= sizeof(u64)); static_assert(sizeof(Storage) <= sizeof(u64));
static constexpr size_t FlagsPerWord = BITSIZEOF(Storage); static constexpr size_t FlagsPerWord = BITSIZEOF(Storage);
static constexpr size_t NumWords = util::AlignUp(N, FlagsPerWord) / FlagsPerWord; static constexpr size_t NumWords = util::DivideUp(N, FlagsPerWord);
static constexpr ALWAYS_INLINE auto CountLeadingZeroImpl(Storage word) {
return __builtin_clzll(static_cast<unsigned long long>(word)) - (BITSIZEOF(unsigned long long) - FlagsPerWord);
}
static constexpr ALWAYS_INLINE Storage GetBitMask(size_t bit) { static constexpr ALWAYS_INLINE Storage GetBitMask(size_t bit) {
return Storage(1) << (FlagsPerWord - 1 - bit); return static_cast<Storage>(1) << (FlagsPerWord - 1 - bit);
} }
private: private:
Storage m_words[NumWords]; Storage m_words[NumWords];
@ -55,7 +52,7 @@ namespace ams::util {
constexpr ALWAYS_INLINE size_t CountLeadingZero() const { constexpr ALWAYS_INLINE size_t CountLeadingZero() const {
for (size_t i = 0; i < NumWords; i++) { for (size_t i = 0; i < NumWords; i++) {
if (m_words[i]) { if (m_words[i]) {
return FlagsPerWord * i + CountLeadingZeroImpl(m_words[i]); return FlagsPerWord * i + util::CountLeadingZeros<Storage>(m_words[i]);
} }
} }
return FlagsPerWord * NumWords; return FlagsPerWord * NumWords;
@ -68,7 +65,7 @@ namespace ams::util {
word &= GetBitMask(n % FlagsPerWord) - 1; word &= GetBitMask(n % FlagsPerWord) - 1;
} }
if (word) { if (word) {
return FlagsPerWord * i + CountLeadingZeroImpl(word); return FlagsPerWord * i + util::CountLeadingZeros<Storage>(word);
} }
} }
return FlagsPerWord * NumWords; return FlagsPerWord * NumWords;