kern: Kill KCoreLocalRegion

This commit is contained in:
Michael Scire 2020-12-01 13:41:37 -08:00 committed by SciresM
parent 24d545701c
commit b0debd72a7
24 changed files with 165 additions and 334 deletions

View file

@ -48,7 +48,6 @@
#include <mesosphere/kern_k_spin_lock.hpp>
#include <mesosphere/kern_k_memory_manager.hpp>
#include <mesosphere/kern_k_interrupt_task_manager.hpp>
#include <mesosphere/kern_k_core_local_region.hpp>
#include <mesosphere/kern_k_slab_heap.hpp>
#include <mesosphere/kern_k_light_lock.hpp>
#include <mesosphere/kern_k_dpc_manager.hpp>

View file

@ -29,6 +29,7 @@ namespace ams::kern::init {
u64 entrypoint;
u64 argument;
u64 setup_function;
u64 exception_stack;
};
}

View file

@ -220,16 +220,19 @@ namespace ams::kern::arch::arm64::cpu {
DataSynchronizationBarrier();
}
ALWAYS_INLINE uintptr_t GetCoreLocalRegionAddress() {
ALWAYS_INLINE uintptr_t GetCurrentThreadPointerValue() {
register uintptr_t x18 asm("x18");
__asm__ __volatile__("" : [x18]"=r"(x18));
return x18;
}
ALWAYS_INLINE void SetCoreLocalRegionAddress(uintptr_t value) {
ALWAYS_INLINE void SetCurrentThreadPointerValue(uintptr_t value) {
register uintptr_t x18 asm("x18") = value;
__asm__ __volatile__("":: [x18]"r"(x18));
SetTpidrEl1(value);
}
ALWAYS_INLINE void SetExceptionThreadStackTop(uintptr_t top) {
SetTpidrEl1(top);
}
ALWAYS_INLINE void SwitchThreadLocalRegion(uintptr_t tlr) {

View file

@ -25,6 +25,5 @@
namespace ams::kern::init {
KPhysicalAddress GetInitArgumentsAddress(s32 core_id);
void SetInitArguments(s32 core_id, KPhysicalAddress address, uintptr_t arg);
}

View file

@ -1,45 +0,0 @@
/*
* Copyright (c) 2018-2020 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_select_cpu.hpp>
#include <mesosphere/kern_k_current_context.hpp>
#include <mesosphere/kern_k_scheduler.hpp>
#include <mesosphere/kern_k_interrupt_task_manager.hpp>
#include <mesosphere/kern_select_interrupt_manager.hpp>
#include <mesosphere/kern_select_hardware_timer.hpp>
#include <mesosphere/kern_k_memory_manager.hpp>
namespace ams::kern {
struct KCoreLocalContext {
KCurrentContext current;
};
static_assert(sizeof(KCoreLocalContext) < PageSize);
struct KCoreLocalPage {
KCoreLocalContext context;
u8 padding[PageSize - sizeof(KCoreLocalContext)];
};
static_assert(sizeof(KCoreLocalPage) == PageSize);
struct KCoreLocalRegion {
KCoreLocalPage current;
KCoreLocalPage absolute[cpu::NumCores];
};
static_assert(sizeof(KCoreLocalRegion) == PageSize * (1 + cpu::NumCores));
}

View file

@ -21,80 +21,24 @@ namespace ams::kern {
class KThread;
class KProcess;
class KScheduler;
class KInterruptTaskManager;
struct KCurrentContext {
std::atomic<KThread *> current_thread;
std::atomic<KProcess *> current_process;
KScheduler *scheduler;
KInterruptTaskManager *interrupt_task_manager;
s32 core_id;
void *exception_stack_top;
ams::svc::ThreadLocalRegion *tlr;
};
static_assert(std::is_standard_layout<KCurrentContext>::value && std::is_trivially_destructible<KCurrentContext>::value);
static_assert(sizeof(KCurrentContext) <= cpu::DataCacheLineSize);
static_assert(sizeof(std::atomic<KThread *>) == sizeof(KThread *));
static_assert(sizeof(std::atomic<KProcess *>) == sizeof(KProcess *));
namespace impl {
ALWAYS_INLINE KCurrentContext &GetCurrentContext() {
return *reinterpret_cast<KCurrentContext *>(cpu::GetCoreLocalRegionAddress());
}
}
ALWAYS_INLINE KThread *GetCurrentThreadPointer() {
return impl::GetCurrentContext().current_thread.load(std::memory_order_relaxed);
return reinterpret_cast<KThread *>(cpu::GetCurrentThreadPointerValue());
}
ALWAYS_INLINE KThread &GetCurrentThread() {
return *GetCurrentThreadPointer();
}
ALWAYS_INLINE KProcess *GetCurrentProcessPointer() {
return impl::GetCurrentContext().current_process.load(std::memory_order_relaxed);
}
ALWAYS_INLINE KProcess &GetCurrentProcess() {
return *GetCurrentProcessPointer();
}
ALWAYS_INLINE KScheduler *GetCurrentSchedulerPointer() {
return impl::GetCurrentContext().scheduler;
}
ALWAYS_INLINE KScheduler &GetCurrentScheduler() {
return *GetCurrentSchedulerPointer();
}
ALWAYS_INLINE KInterruptTaskManager *GetCurrentInterruptTaskManagerPointer() {
return impl::GetCurrentContext().interrupt_task_manager;
}
ALWAYS_INLINE KInterruptTaskManager &GetCurrentInterruptTaskManager() {
return *GetCurrentInterruptTaskManagerPointer();
}
ALWAYS_INLINE s32 GetCurrentCoreId() {
return impl::GetCurrentContext().core_id;
}
ALWAYS_INLINE ams::svc::ThreadLocalRegion *GetCurrentThreadLocalRegion() {
return impl::GetCurrentContext().tlr;
}
ALWAYS_INLINE void SetCurrentThread(KThread *new_thread) {
impl::GetCurrentContext().current_thread = new_thread;
cpu::SetCurrentThreadPointerValue(reinterpret_cast<uintptr_t>(new_thread));
}
ALWAYS_INLINE void SetCurrentProcess(KProcess *new_process) {
impl::GetCurrentContext().current_process = new_process;
}
ALWAYS_INLINE KProcess *GetCurrentProcessPointer();
ALWAYS_INLINE KProcess &GetCurrentProcess();
ALWAYS_INLINE void SetCurrentThreadLocalRegion(void *address) {
impl::GetCurrentContext().tlr = static_cast<ams::svc::ThreadLocalRegion *>(address);
}
ALWAYS_INLINE s32 GetCurrentCoreId();
ALWAYS_INLINE KScheduler &GetCurrentScheduler();
}

View file

@ -134,7 +134,6 @@ namespace ams::kern {
static NOINLINE KVirtualAddress GetExceptionStackTopAddress(s32 core_id) { return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscExceptionStack); }
static NOINLINE KVirtualAddress GetSlabRegionAddress() { return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelSlab)).GetAddress(); }
static NOINLINE KVirtualAddress GetCoreLocalRegionAddress() { return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_CoreLocalRegion)).GetAddress(); }
static NOINLINE const KMemoryRegion &GetDeviceRegion(KMemoryRegionType type) { return Dereference(GetPhysicalMemoryRegionTree().FindFirstDerived(type)); }
static KPhysicalAddress GetDevicePhysicalAddress(KMemoryRegionType type) { return GetDeviceRegion(type).GetAddress(); }
@ -144,7 +143,6 @@ namespace ams::kern {
static NOINLINE const KMemoryRegion &GetPageTableHeapRegion() { return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_VirtualDramKernelPtHeap)); }
static NOINLINE const KMemoryRegion &GetKernelStackRegion() { return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelStack)); }
static NOINLINE const KMemoryRegion &GetTempRegion() { return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelTemp)); }
static NOINLINE const KMemoryRegion &GetCoreLocalRegion() { return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_CoreLocalRegion)); }
static NOINLINE const KMemoryRegion &GetKernelTraceBufferRegion() { return Dereference(GetVirtualLinearMemoryRegionTree().FindByType(KMemoryRegionType_VirtualDramKernelTraceBuffer)); }
@ -216,7 +214,6 @@ namespace ams::kern {
namespace init {
/* These should be generic, regardless of board. */
void SetupCoreLocalRegionMemoryRegions(KInitialPageTable &page_table, KInitialPageAllocator &page_allocator);
void SetupPoolPartitionMemoryRegions();
/* These may be implemented in a board-specific manner. */

View file

@ -156,12 +156,13 @@ namespace ams::kern {
constexpr inline const auto KMemoryRegionType_None = impl::KMemoryRegionTypeValue();
constexpr inline const auto KMemoryRegionType_Kernel = KMemoryRegionType_None.DeriveInitial(0, 2);
constexpr inline const auto KMemoryRegionType_Dram = KMemoryRegionType_None.DeriveInitial(1, 2);
constexpr inline const auto KMemoryRegionType_CoreLocalRegion = KMemoryRegionType_None.DeriveInitial(2).Finalize();
static_assert(KMemoryRegionType_Kernel .GetValue() == 0x1);
static_assert(KMemoryRegionType_Dram .GetValue() == 0x2);
static_assert(KMemoryRegionType_CoreLocalRegion.GetValue() == 0x4);
constexpr inline const auto KMemoryRegionType_Kernel = KMemoryRegionType_None.DeriveInitial(0, 2);
constexpr inline const auto KMemoryRegionType_Dram = KMemoryRegionType_None.DeriveInitial(1, 2);
static_assert(KMemoryRegionType_Kernel.GetValue() == 0x1);
static_assert(KMemoryRegionType_Dram .GetValue() == 0x2);
/* constexpr inline const auto KMemoryRegionType_CoreLocalRegion = KMemoryRegionType_None.DeriveInitial(2).Finalize(); */
/* static_assert(KMemoryRegionType_CoreLocalRegion.GetValue() == 0x4); */
constexpr inline const auto KMemoryRegionType_DramKernelBase = KMemoryRegionType_Dram.DeriveSparse(0, 3, 0).SetAttribute(KMemoryRegionAttr_NoUserMap).SetAttribute(KMemoryRegionAttr_CarveoutProtected);
constexpr inline const auto KMemoryRegionType_DramReservedBase = KMemoryRegionType_Dram.DeriveSparse(0, 3, 1);
@ -274,15 +275,15 @@ namespace ams::kern {
/* UNUSED: .Derive(7, 0); */
constexpr inline const auto KMemoryRegionType_KernelMiscMainStack = KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 1);
constexpr inline const auto KMemoryRegionType_KernelMiscMappedDevice = KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 2);
constexpr inline const auto KMemoryRegionType_KernelMiscIdleStack = KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 3);
constexpr inline const auto KMemoryRegionType_KernelMiscExceptionStack = KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 3);
constexpr inline const auto KMemoryRegionType_KernelMiscUnknownDebug = KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 4);
/* UNUSED: .Derive(7, 5); */
constexpr inline const auto KMemoryRegionType_KernelMiscExceptionStack = KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 6);
constexpr inline const auto KMemoryRegionType_KernelMiscIdleStack = KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 6);
static_assert(KMemoryRegionType_KernelMiscMainStack .GetValue() == 0xB49);
static_assert(KMemoryRegionType_KernelMiscMappedDevice .GetValue() == 0xD49);
static_assert(KMemoryRegionType_KernelMiscIdleStack .GetValue() == 0x1349);
static_assert(KMemoryRegionType_KernelMiscExceptionStack.GetValue() == 0x1349);
static_assert(KMemoryRegionType_KernelMiscUnknownDebug .GetValue() == 0x1549);
static_assert(KMemoryRegionType_KernelMiscExceptionStack.GetValue() == 0x2349);
static_assert(KMemoryRegionType_KernelMiscIdleStack .GetValue() == 0x2349);
constexpr inline const auto KMemoryRegionType_KernelTemp = KMemoryRegionType_Kernel.Advance(2).Derive(2, 0);
static_assert(KMemoryRegionType_KernelTemp.GetValue() == 0x31);

View file

@ -361,9 +361,6 @@ namespace ams::kern {
static void Switch(KProcess *cur_process, KProcess *next_process) {
MESOSPHERE_UNUSED(cur_process);
/* Set the current process pointer. */
SetCurrentProcess(next_process);
/* Update the current page table. */
if (next_process) {
next_process->GetPageTable().Activate(next_process->GetProcessId());

View file

@ -56,9 +56,10 @@ namespace ams::kern {
KThread *prev_thread;
s64 last_context_switch_time;
KThread *idle_thread;
std::atomic<KThread *> current_thread;
public:
constexpr KScheduler()
: state(), is_active(false), core_id(0), prev_thread(nullptr), last_context_switch_time(0), idle_thread(nullptr)
: state(), is_active(false), core_id(0), prev_thread(nullptr), last_context_switch_time(0), idle_thread(nullptr), current_thread(nullptr)
{
this->state.needs_scheduling = true;
this->state.interrupt_task_thread_runnable = false;
@ -96,6 +97,10 @@ namespace ams::kern {
return this->prev_thread;
}
ALWAYS_INLINE KThread *GetSchedulerCurrentThread() const {
return this->current_thread;
}
ALWAYS_INLINE s64 GetLastContextSwitchTime() const {
return this->last_context_switch_time;
}

View file

@ -90,6 +90,7 @@ namespace ams::kern {
bool is_pinned;
s32 disable_count;
KThreadContext *context;
KThread *cur_thread;
};
static_assert(alignof(StackParameters) == 0x10);
@ -181,6 +182,7 @@ namespace ams::kern {
Result wait_result;
Result debug_exception_result;
s32 priority{};
s32 current_core_id{};
s32 core_id{};
s32 base_priority{};
s32 ideal_core_id{};
@ -380,6 +382,9 @@ namespace ams::kern {
constexpr s32 GetActiveCore() const { return this->core_id; }
constexpr void SetActiveCore(s32 core) { this->core_id = core; }
constexpr ALWAYS_INLINE s32 GetCurrentCore() const { return this->current_core_id; }
constexpr void SetCurrentCore(s32 core) { this->current_core_id = core; }
constexpr s32 GetPriority() const { return this->priority; }
constexpr void SetPriority(s32 prio) { this->priority = prio; }
@ -570,4 +575,16 @@ namespace ams::kern {
return reinterpret_cast<const KExceptionContext *>(reinterpret_cast<uintptr_t>(thread->GetKernelStackTop()) - sizeof(KThread::StackParameters) - sizeof(KExceptionContext));
}
ALWAYS_INLINE KProcess *GetCurrentProcessPointer() {
return GetCurrentThread().GetOwnerProcess();
}
ALWAYS_INLINE KProcess &GetCurrentProcess() {
return *GetCurrentProcessPointer();
}
ALWAYS_INLINE s32 GetCurrentCoreId() {
return GetCurrentThread().GetCurrentCore();
}
}

View file

@ -19,7 +19,10 @@
#include <mesosphere/kern_select_cpu.hpp>
#include <mesosphere/kern_k_memory_layout.hpp>
#include <mesosphere/kern_k_memory_manager.hpp>
#include <mesosphere/kern_k_core_local_region.hpp>
#include <mesosphere/kern_k_scheduler.hpp>
#include <mesosphere/kern_k_interrupt_task_manager.hpp>
#include <mesosphere/kern_select_interrupt_manager.hpp>
#include <mesosphere/kern_select_hardware_timer.hpp>
#include <mesosphere/kern_k_worker_task_manager.hpp>
namespace ams::kern {
@ -77,13 +80,6 @@ namespace ams::kern {
static KScheduler s_schedulers[cpu::NumCores];
static KInterruptTaskManager s_interrupt_task_managers[cpu::NumCores];
static KHardwareTimer s_hardware_timers[cpu::NumCores];
private:
static ALWAYS_INLINE KCoreLocalContext &GetCoreLocalContext() {
return reinterpret_cast<KCoreLocalRegion *>(cpu::GetCoreLocalRegionAddress())->current.context;
}
static ALWAYS_INLINE KCoreLocalContext &GetCoreLocalContext(s32 core_id) {
return reinterpret_cast<KCoreLocalRegion *>(cpu::GetCoreLocalRegionAddress())->absolute[core_id].context;
}
public:
static NOINLINE void InitializeCoreLocalRegion(s32 core_id);
static NOINLINE void InitializeMainAndIdleThreads(s32 core_id);
@ -96,10 +92,6 @@ namespace ams::kern {
static KThread &GetMainThread(s32 core_id);
static KThread &GetIdleThread(s32 core_id);
static ALWAYS_INLINE KCurrentContext &GetCurrentContext(s32 core_id) {
return GetCoreLocalContext(core_id).current;
}
static ALWAYS_INLINE KScheduler &GetScheduler() {
return s_schedulers[GetCurrentCoreId()];
}
@ -166,4 +158,8 @@ namespace ams::kern {
}
};
ALWAYS_INLINE KScheduler &GetCurrentScheduler() {
return Kernel::GetScheduler();
}
}

View file

@ -32,7 +32,7 @@ _ZN3ams4kern4arch5arm6412SvcHandler64Ev:
mrs x9, elr_el1
mrs x10, spsr_el1
mrs x11, tpidr_el0
mrs x18, tpidr_el1
ldr x18, [sp, #(0x120 + 0x28)]
/* Save callee-saved registers. */
stp x19, x20, [sp, #(8 * 19)]
@ -66,7 +66,7 @@ _ZN3ams4kern4arch5arm6412SvcHandler64Ev:
b.eq 3f
/* Check if our disable count allows us to call SVCs. */
ldr x10, [x18, #0x30]
mrs x10, tpidrro_el0
ldrh w10, [x10, #0x100]
cbz w10, 1f
@ -138,7 +138,7 @@ _ZN3ams4kern4arch5arm6412SvcHandler64Ev:
stp xzr, xzr, [sp, #(8 * 12)]
stp xzr, xzr, [sp, #(8 * 14)]
stp xzr, xzr, [sp, #(8 * 16)]
stp xzr, x19, [sp, #(8 * 18)]
str x19, [sp, #(8 * 19)]
stp x20, x21, [sp, #(8 * 20)]
stp x22, x23, [sp, #(8 * 22)]
stp x24, x25, [sp, #(8 * 24)]
@ -146,7 +146,6 @@ _ZN3ams4kern4arch5arm6412SvcHandler64Ev:
stp x28, x29, [sp, #(8 * 28)]
/* Call ams::kern::arch::arm64::HandleException(ams::kern::arch::arm64::KExceptionContext *) */
mrs x18, tpidr_el1
mov x0, sp
bl _ZN3ams4kern4arch5arm6415HandleExceptionEPNS2_17KExceptionContextE
@ -246,7 +245,7 @@ _ZN3ams4kern4arch5arm6412SvcHandler32Ev:
mrs x17, elr_el1
mrs x20, spsr_el1
mrs x19, tpidr_el0
mrs x18, tpidr_el1
ldr x18, [sp, #(0x120 + 0x28)]
stp x17, x20, [sp, #(8 * 32)]
str x19, [sp, #(8 * 34)]
@ -276,7 +275,7 @@ _ZN3ams4kern4arch5arm6412SvcHandler32Ev:
b.eq 3f
/* Check if our disable count allows us to call SVCs. */
ldr x15, [x18, #0x30]
mrs x15, tpidrro_el0
ldrh w15, [x15, #0x100]
cbz w15, 1f
@ -353,7 +352,6 @@ _ZN3ams4kern4arch5arm6412SvcHandler32Ev:
stp xzr, xzr, [sp, #(8 * 30)]
/* Call ams::kern::arch::arm64::HandleException(ams::kern::arch::arm64::KExceptionContext *) */
mrs x18, tpidr_el1
mov x0, sp
bl _ZN3ams4kern4arch5arm6415HandleExceptionEPNS2_17KExceptionContextE

View file

@ -249,7 +249,6 @@ _ZN3ams4kern5board8nintendo2nx13KSleepManager11ResumeEntryEm:
/* Set the global context back into x18/tpidr. */
msr tpidr_el1, x2
mov x18, x2
dsb sy
isb

View file

@ -57,14 +57,18 @@ namespace ams::kern::board::nintendo::nx::smc {
{
/* Disable interrupts while making the call. */
KScopedInterruptDisable intr_disable;
/* Backup the current thread pointer. */
const uintptr_t current_thread_pointer_value = cpu::GetCurrentThreadPointerValue();
__asm__ __volatile__("smc #1"
: "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5), "+r"(x6), "+r"(x7)
:
: "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "cc", "memory"
);
/* Restore the CoreLocalRegion into X18. */
cpu::SetCoreLocalRegionAddress(cpu::GetTpidrEl1());
/* Restore the current thread pointer into X18. */
cpu::SetCurrentThreadPointerValue(current_thread_pointer_value);
}
/* Store arguments to output. */
@ -93,14 +97,18 @@ namespace ams::kern::board::nintendo::nx::smc {
{
/* Disable interrupts while making the call. */
KScopedInterruptDisable intr_disable;
/* Backup the current thread pointer. */
const uintptr_t current_thread_pointer_value = cpu::GetCurrentThreadPointerValue();
__asm__ __volatile__("smc #0"
: "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5), "+r"(x6), "+r"(x7)
:
: "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "cc", "memory"
);
/* Restore the CoreLocalRegion into X18. */
cpu::SetCoreLocalRegionAddress(cpu::GetTpidrEl1());
/* Restore the current thread pointer into X18. */
cpu::SetCurrentThreadPointerValue(current_thread_pointer_value);
}
/* Store arguments to output. */

View file

@ -496,7 +496,7 @@ namespace ams::kern {
if (thread->GetRawState() != KThread::ThreadState_Runnable) {
bool current = false;
for (auto i = 0; i < static_cast<s32>(cpu::NumCores); ++i) {
if (thread == Kernel::GetCurrentContext(i).current_thread) {
if (thread == Kernel::GetScheduler(i).GetSchedulerCurrentThread()) {
current = true;
}
break;
@ -543,7 +543,7 @@ namespace ams::kern {
if (thread->GetRawState() != KThread::ThreadState_Runnable) {
bool current = false;
for (auto i = 0; i < static_cast<s32>(cpu::NumCores); ++i) {
if (thread == Kernel::GetCurrentContext(i).current_thread) {
if (thread == Kernel::GetScheduler(i).GetSchedulerCurrentThread()) {
current = true;
}
break;

View file

@ -181,89 +181,4 @@ namespace ams::kern {
return resource_region_size;
}
namespace init {
namespace {
constexpr PageTableEntry KernelRwDataAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
constexpr size_t CoreLocalRegionAlign = PageSize;
constexpr size_t CoreLocalRegionSize = PageSize * (1 + cpu::NumCores);
constexpr size_t CoreLocalRegionSizeWithGuards = CoreLocalRegionSize + 2 * PageSize;
constexpr size_t CoreLocalRegionBoundsAlign = 1_GB;
static_assert(CoreLocalRegionSize == sizeof(KCoreLocalRegion));
KVirtualAddress GetCoreLocalRegionVirtualAddress() {
while (true) {
const uintptr_t candidate_start = GetInteger(KMemoryLayout::GetVirtualMemoryRegionTree().GetRandomAlignedRegion(CoreLocalRegionSizeWithGuards, CoreLocalRegionAlign, KMemoryRegionType_None));
const uintptr_t candidate_end = candidate_start + CoreLocalRegionSizeWithGuards;
const uintptr_t candidate_last = candidate_end - 1;
const auto &containing_region = *KMemoryLayout::GetVirtualMemoryRegionTree().Find(candidate_start);
if (candidate_last > containing_region.GetLastAddress()) {
continue;
}
if (containing_region.GetType() != KMemoryRegionType_None) {
continue;
}
if (util::AlignDown(candidate_start, CoreLocalRegionBoundsAlign) != util::AlignDown(candidate_last, CoreLocalRegionBoundsAlign)) {
continue;
}
if (containing_region.GetAddress() > util::AlignDown(candidate_start, CoreLocalRegionBoundsAlign)) {
continue;
}
if (util::AlignUp(candidate_last, CoreLocalRegionBoundsAlign) - 1 > containing_region.GetLastAddress()) {
continue;
}
return candidate_start + PageSize;
}
}
}
void SetupCoreLocalRegionMemoryRegions(KInitialPageTable &page_table, KInitialPageAllocator &page_allocator) {
/* NOTE: Nintendo passes page table here to use num_l1_entries; we don't use this at present. */
MESOSPHERE_UNUSED(page_table);
/* Get the virtual address of the core local reigon. */
const KVirtualAddress core_local_virt_start = GetCoreLocalRegionVirtualAddress();
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(core_local_virt_start), CoreLocalRegionSize, KMemoryRegionType_CoreLocalRegion));
/* Allocate a page for each core. */
KPhysicalAddress core_local_region_start_phys[cpu::NumCores] = {};
for (size_t i = 0; i < cpu::NumCores; i++) {
core_local_region_start_phys[i] = page_allocator.Allocate();
}
/* Allocate an l1 page table for each core. */
KPhysicalAddress core_l1_ttbr1_phys[cpu::NumCores] = {};
core_l1_ttbr1_phys[0] = util::AlignDown(cpu::GetTtbr1El1(), PageSize);
for (size_t i = 1; i < cpu::NumCores; i++) {
core_l1_ttbr1_phys[i] = page_allocator.Allocate();
std::memcpy(reinterpret_cast<void *>(GetInteger(core_l1_ttbr1_phys[i])), reinterpret_cast<void *>(GetInteger(core_l1_ttbr1_phys[0])), PageSize);
}
/* Use the l1 page table for each core to map the core local region for each core. */
for (size_t i = 0; i < cpu::NumCores; i++) {
KInitialPageTable temp_pt(core_l1_ttbr1_phys[i], KInitialPageTable::NoClear{});
temp_pt.Map(core_local_virt_start, PageSize, core_local_region_start_phys[i], KernelRwDataAttribute, page_allocator);
for (size_t j = 0; j < cpu::NumCores; j++) {
temp_pt.Map(core_local_virt_start + (j + 1) * PageSize, PageSize, core_local_region_start_phys[j], KernelRwDataAttribute, page_allocator);
}
/* Setup the InitArguments. */
SetInitArguments(static_cast<s32>(i), core_local_region_start_phys[i], GetInteger(core_l1_ttbr1_phys[i]));
}
}
}
}

View file

@ -78,11 +78,6 @@ namespace ams::kern {
}
void KProcess::Finalize() {
/* Ensure we're not executing on any core. */
for (size_t i = 0; i < cpu::NumCores; ++i) {
MESOSPHERE_ASSERT(Kernel::GetCurrentContext(static_cast<s32>(i)).current_process.load(std::memory_order_relaxed) != this);
}
/* Delete the process local region. */
this->DeleteThreadLocalRegion(this->plr_address);

View file

@ -66,6 +66,9 @@ namespace ams::kern {
/* Bind interrupt handler. */
Kernel::GetInterruptManager().BindHandler(GetSchedulerInterruptTask(), KInterruptName_Scheduler, this->core_id, KInterruptController::PriorityLevel_Scheduler, false, false);
/* Set the current thread. */
this->current_thread = GetCurrentThreadPointer();
}
void KScheduler::Activate() {
@ -259,18 +262,21 @@ namespace ams::kern {
MESOSPHERE_KTRACE_THREAD_SWITCH(next_thread);
if (next_thread->GetCurrentCore() != this->core_id) {
next_thread->SetCurrentCore(this->core_id);
}
/* Switch the current process, if we're switching processes. */
if (KProcess *next_process = next_thread->GetOwnerProcess(); next_process != cur_process) {
/* MESOSPHERE_LOG("!!! PROCESS SWITCH !!! %s -> %s\n", cur_process != nullptr ? cur_process->GetName() : nullptr, next_process != nullptr ? next_process->GetName() : nullptr); */
KProcess::Switch(cur_process, next_process);
}
/* Set the new thread. */
SetCurrentThread(next_thread);
this->current_thread = next_thread;
/* Set the new Thread Local region. */
cpu::SwitchThreadLocalRegion(GetInteger(next_thread->GetThreadLocalRegionAddress()));
SetCurrentThreadLocalRegion(next_thread->GetThreadLocalRegionHeapAddress());
}
void KScheduler::ClearPreviousThread(KThread *thread) {

View file

@ -143,6 +143,9 @@ namespace ams::kern {
this->num_kernel_waiters = 0;
this->entrypoint = reinterpret_cast<uintptr_t>(func);
/* Set our current core id. */
this->current_core_id = core;
/* We haven't released our resource limit hint, and we've spent no time on the cpu. */
this->resource_limit_release_hint = 0;
this->cpu_time = 0;
@ -177,6 +180,7 @@ namespace ams::kern {
this->parent->CopySvcPermissionsTo(sp);
}
sp.context = std::addressof(this->thread_context);
sp.cur_thread = this;
sp.disable_count = 1;
this->SetInExceptionHandler();
@ -362,7 +366,7 @@ namespace ams::kern {
for (size_t i = 0; i < cpu::NumCores; ++i) {
KThread *core_thread;
do {
core_thread = Kernel::GetCurrentContext(i).current_thread.load(std::memory_order_acquire);
core_thread = Kernel::GetScheduler(i).GetSchedulerCurrentThread();
} while (core_thread == this);
}
}
@ -619,7 +623,7 @@ namespace ams::kern {
bool thread_is_current = false;
s32 thread_core;
for (thread_core = 0; thread_core < static_cast<s32>(cpu::NumCores); ++thread_core) {
if (Kernel::GetCurrentContext(thread_core).current_thread == this) {
if (Kernel::GetScheduler(thread_core).GetSchedulerCurrentThread() == this) {
thread_is_current = true;
break;
}
@ -834,7 +838,7 @@ namespace ams::kern {
thread_is_current = false;
for (auto i = 0; i < static_cast<s32>(cpu::NumCores); ++i) {
if (Kernel::GetCurrentContext(i).current_thread == this) {
if (Kernel::GetScheduler(i).GetSchedulerCurrentThread() == this) {
thread_is_current = true;
break;
}

View file

@ -39,20 +39,9 @@ namespace ams::kern {
}
void Kernel::InitializeCoreLocalRegion(s32 core_id) {
/* Construct the core local region object in place. */
KCoreLocalContext *clc = GetPointer<KCoreLocalContext>(KMemoryLayout::GetCoreLocalRegionAddress());
new (clc) KCoreLocalContext;
/* Set the core local region address into the global register. */
cpu::SetCoreLocalRegionAddress(reinterpret_cast<uintptr_t>(clc));
/* Initialize current context. */
clc->current.current_thread = nullptr;
clc->current.current_process = nullptr;
clc->current.core_id = core_id;
clc->current.scheduler = std::addressof(Kernel::GetScheduler());
clc->current.interrupt_task_manager = std::addressof(Kernel::GetInterruptTaskManager());
clc->current.exception_stack_top = GetVoidPointer(KMemoryLayout::GetExceptionStackTopAddress(core_id) - sizeof(KThread::StackParameters));
/* The core local region no longer exists, so just clear the current thread. */
AMS_UNUSED(core_id);
SetCurrentThread(nullptr);
}
void Kernel::InitializeMainAndIdleThreads(s32 core_id) {
@ -68,7 +57,6 @@ namespace ams::kern {
/* Set the current thread to be the main thread, and we have no processes running yet. */
SetCurrentThread(main_thread);
SetCurrentProcess(nullptr);
/* Initialize the interrupt manager, hardware timer, and scheduler */
GetInterruptManager().Initialize(core_id);
@ -126,7 +114,6 @@ namespace ams::kern {
PrintMemoryRegion(" Stack", KMemoryLayout::GetKernelStackRegionExtents());
PrintMemoryRegion(" Misc", KMemoryLayout::GetKernelMiscRegionExtents());
PrintMemoryRegion(" Slab", KMemoryLayout::GetKernelSlabRegionExtents());
PrintMemoryRegion(" CoreLocalRegion", KMemoryLayout::GetCoreLocalRegion());
PrintMemoryRegion(" LinearRegion", KMemoryLayout::GetLinearRegionVirtualExtents());
MESOSPHERE_LOG("\n");

View file

@ -37,6 +37,8 @@ namespace ams::kern::init {
/* Global initial arguments array. */
KPhysicalAddress g_init_arguments_phys_addr[cpu::NumCores];
KInitArguments g_init_arguments[cpu::NumCores];
/* Page table attributes. */
constexpr PageTableEntry KernelRoDataAttribute(PageTableEntry::Permission_KernelR, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
constexpr PageTableEntry KernelRwDataAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
@ -73,6 +75,48 @@ namespace ams::kern::init {
}
}
void SetupInitialArguments(KInitialPageTable &ttbr1_table, KInitialPageAllocator &allocator) {
AMS_UNUSED(ttbr1_table, allocator);
/* Get parameters for initial arguments. */
const u64 ttbr0 = cpu::GetTtbr0El1();
const u64 ttbr1 = cpu::GetTtbr1El1();
const u64 tcr = cpu::GetTcrEl1();
const u64 mair = cpu::GetMairEl1();
const u64 cpuactlr = cpu::GetCpuActlrEl1();
const u64 cpuectlr = cpu::GetCpuEctlrEl1();
const u64 sctlr = cpu::GetSctlrEl1();
for (s32 i = 0; i < static_cast<s32>(cpu::NumCores); ++i) {
/* Get the arguments. */
KInitArguments *init_args = g_init_arguments + i;
/* Translate to a physical address. */
/* KPhysicalAddress phys_addr = Null<KPhysicalAddress>; */
/* if (cpu::GetPhysicalAddressWritable(std::addressof(phys_addr), KVirtualAddress(init_args), true)) { */
/* g_init_arguments_phys_addr[i] = phys_addr; */
/* } */
g_init_arguments_phys_addr[i] = ttbr1_table.GetPhysicalAddress(KVirtualAddress(init_args));
/* Set the arguments. */
init_args->ttbr0 = ttbr0;
init_args->ttbr1 = ttbr1;
init_args->tcr = tcr;
init_args->mair = mair;
init_args->cpuactlr = cpuactlr;
init_args->cpuectlr = cpuectlr;
init_args->sctlr = sctlr;
init_args->sp = GetInteger(KMemoryLayout::GetMainStackTopAddress(i)) - sizeof(KThread::StackParameters);
init_args->entrypoint = reinterpret_cast<uintptr_t>(::ams::kern::HorizonKernelMain);
init_args->argument = static_cast<u64>(i);
init_args->setup_function = reinterpret_cast<uintptr_t>(::ams::kern::init::StartOtherCore);
init_args->exception_stack = GetInteger(KMemoryLayout::GetExceptionStackTopAddress(i)) - sizeof(KThread::StackParameters);
}
/* Ensure the arguments are written to memory. */
StoreDataCache(g_init_arguments, sizeof(g_init_arguments));
}
}
void InitializeCore(uintptr_t misc_unk_debug_phys_addr, uintptr_t initial_page_allocator_state) {
@ -295,8 +339,8 @@ namespace ams::kern::init {
MapStackForCore(ttbr1_table, KMemoryRegionType_KernelMiscExceptionStack, i);
}
/* Setup the KCoreLocalRegion regions. */
SetupCoreLocalRegionMemoryRegions(ttbr1_table, g_initial_page_allocator);
/* Setup the initial arguments. */
SetupInitialArguments(ttbr1_table, g_initial_page_allocator);
/* Finalize the page allocator, we're done allocating at this point. */
KInitialPageAllocator::State final_init_page_table_state;
@ -329,28 +373,6 @@ namespace ams::kern::init {
return g_init_arguments_phys_addr[core_id];
}
void SetInitArguments(s32 core_id, KPhysicalAddress address, uintptr_t arg) {
/* Set the arguments. */
KInitArguments *init_args = reinterpret_cast<KInitArguments *>(GetInteger(address));
init_args->ttbr0 = cpu::GetTtbr0El1();
init_args->ttbr1 = arg;
init_args->tcr = cpu::GetTcrEl1();
init_args->mair = cpu::GetMairEl1();
init_args->cpuactlr = cpu::GetCpuActlrEl1();
init_args->cpuectlr = cpu::GetCpuEctlrEl1();
init_args->sctlr = cpu::GetSctlrEl1();
init_args->sp = GetInteger(KMemoryLayout::GetMainStackTopAddress(core_id)) - sizeof(KThread::StackParameters);
init_args->entrypoint = reinterpret_cast<uintptr_t>(::ams::kern::HorizonKernelMain);
init_args->argument = static_cast<u64>(core_id);
init_args->setup_function = reinterpret_cast<uintptr_t>(::ams::kern::init::StartOtherCore);
/* Ensure the arguments are written to memory. */
StoreDataCache(init_args, sizeof(*init_args));
/* Save the pointer to the arguments to use as argument upon core wakeup. */
g_init_arguments_phys_addr[core_id] = address;
}
void InitializeDebugRegisters() {
/* Determine how many watchpoints and breakpoints we have */
cpu::DebugFeatureRegisterAccessor aa64dfr0;
@ -417,6 +439,7 @@ namespace ams::kern::init {
void InitializeExceptionVectors() {
cpu::SetVbarEl1(reinterpret_cast<uintptr_t>(::ams::kern::ExceptionVectors));
cpu::SetExceptionThreadStackTop(0);
cpu::EnsureInstructionConsistency();
}

View file

@ -227,6 +227,10 @@ _ZN3ams4kern4init16InvokeEntrypointEPKNS1_14KInitArgumentsE:
/* Ensure that the exception vectors are setup. */
bl _ZN3ams4kern4init26InitializeExceptionVectorsEv
/* Setup the exception stack in tpidr_el1. */
ldr x1, [x20, #0x58]
msr tpidr_el1, x1
/* Jump to the entrypoint. */
ldr x1, [x20, #0x40]
ldr x0, [x20, #0x48]

View file

@ -31,9 +31,8 @@ _ZN3ams4kern4arch5arm6422EL1IrqExceptionHandlerEv:
stp x12, x13, [sp, #(8 * 12)]
stp x14, x15, [sp, #(8 * 14)]
stp x16, x17, [sp, #(8 * 16)]
stp x18, x19, [sp, #(8 * 18)]
stp x20, x21, [sp, #(8 * 20)]
stp x22, x30, [sp, #(8 * 22)]
stp x19, x20, [sp, #(8 * 18)]
stp x21, x30, [sp, #(8 * 20)]
mrs x19, sp_el0
mrs x20, elr_el1
@ -41,7 +40,6 @@ _ZN3ams4kern4arch5arm6422EL1IrqExceptionHandlerEv:
mov w21, w21
/* Invoke KInterruptManager::HandleInterrupt(bool user_mode). */
mrs x18, tpidr_el1
mov x0, #0
bl _ZN3ams4kern4arch5arm6417KInterruptManager15HandleInterruptEb
@ -59,9 +57,8 @@ _ZN3ams4kern4arch5arm6422EL1IrqExceptionHandlerEv:
ldp x12, x13, [sp, #(8 * 12)]
ldp x14, x15, [sp, #(8 * 14)]
ldp x16, x17, [sp, #(8 * 16)]
ldp x18, x19, [sp, #(8 * 18)]
ldp x20, x21, [sp, #(8 * 20)]
ldp x22, x30, [sp, #(8 * 22)]
ldp x19, x20, [sp, #(8 * 18)]
ldp x21, x30, [sp, #(8 * 20)]
add sp, sp, #(8 * 24)
@ -74,7 +71,7 @@ _ZN3ams4kern4arch5arm6422EL1IrqExceptionHandlerEv:
.type _ZN3ams4kern4arch5arm6422EL0IrqExceptionHandlerEv, %function
_ZN3ams4kern4arch5arm6422EL0IrqExceptionHandlerEv:
/* Save registers that need saving. */
sub sp, sp, #(8 * 36)
sub sp, sp, #0x120
stp x0, x1, [sp, #(8 * 0)]
stp x2, x3, [sp, #(8 * 2)]
@ -102,7 +99,7 @@ _ZN3ams4kern4arch5arm6422EL0IrqExceptionHandlerEv:
str x23, [sp, #(8 * 34)]
/* Invoke KInterruptManager::HandleInterrupt(bool user_mode). */
mrs x18, tpidr_el1
ldr x18, [sp, #(0x120 + 0x28)]
mov x0, #1
bl _ZN3ams4kern4arch5arm6417KInterruptManager15HandleInterruptEb
@ -199,7 +196,7 @@ _ZN3ams4kern4arch5arm6430EL0SynchronousExceptionHandlerEv:
str x23, [sp, #(8 * 34)]
/* Call ams::kern::arch::arm64::HandleException(ams::kern::arch::arm64::KExceptionContext *) */
mrs x18, tpidr_el1
ldr x18, [sp, #(0x120 + 0x28)]
mov x0, sp
bl _ZN3ams4kern4arch5arm6415HandleExceptionEPNS2_17KExceptionContextE
@ -299,12 +296,8 @@ _ZN3ams4kern4arch5arm6430EL1SynchronousExceptionHandlerEv:
b.eq 5f
1: /* The exception is not a data abort or instruction abort caused by a TLB conflict. */
/* Load the CoreLocalContext into x0. */
/* Load the exception stack top from tpidr_el1. */
mrs x0, tpidr_el1
cbz x0, 2f
/* Load the exception stack top from the context. */
ldr x0, [x0, #0x28]
/* Setup the stack for a generic exception handle */
sub x0, x0, #0x20
@ -342,21 +335,6 @@ _ZN3ams4kern4arch5arm6430EL1SynchronousExceptionHandlerEv:
msr elr_el1, x30
eret
2: /* The CoreLocalContext is nullptr. */
/* Setup the stack for a generic exception handle. */
/* NOTE: Nintendo does not restore X0 here, and thus saves nullptr. */
/* This is probably not their intention, so we'll fix it. */
/* NOTE: Nintendo also does not really save SP correctly, and so we */
/* will also fix that. */
mov x0, sp
sub x0, x0, #0x20
str x1, [x0, #16]
mov x1, sp
str x1, [x0]
mov sp, x0
mrs x0, cntv_cval_el0
str x0, [sp, #8]
3: /* The exception wasn't an triggered by copying memory from userspace. */
ldr x0, [sp, #8]
ldr x1, [sp, #16]
@ -388,7 +366,6 @@ _ZN3ams4kern4arch5arm6430EL1SynchronousExceptionHandlerEv:
str x23, [sp, #(8 * 34)]
/* Call ams::kern::arch::arm64::HandleException(ams::kern::arch::arm64::KExceptionContext *) */
mrs x18, tpidr_el1
mov x0, sp
bl _ZN3ams4kern4arch5arm6415HandleExceptionEPNS2_17KExceptionContextE
@ -440,7 +417,7 @@ _ZN3ams4kern4arch5arm6430EL1SynchronousExceptionHandlerEv:
.type _ZN3ams4kern4arch5arm6425FpuAccessExceptionHandlerEv, %function
_ZN3ams4kern4arch5arm6425FpuAccessExceptionHandlerEv:
/* Save registers that need saving. */
sub sp, sp, #(8 * 24)
sub sp, sp, #0x120
stp x0, x1, [sp, #(8 * 0)]
stp x2, x3, [sp, #(8 * 2)]
@ -453,17 +430,23 @@ _ZN3ams4kern4arch5arm6425FpuAccessExceptionHandlerEv:
stp x16, x17, [sp, #(8 * 16)]
stp x18, x19, [sp, #(8 * 18)]
stp x20, x21, [sp, #(8 * 20)]
stp x22, x30, [sp, #(8 * 22)]
mrs x18, tpidr_el1
mrs x19, sp_el0
mrs x20, elr_el1
mrs x21, spsr_el1
mov w21, w21
stp x30, x19, [sp, #(8 * 30)]
stp x20, x21, [sp, #(8 * 32)]
/* Invoke the FPU context switch handler. */
ldr x18, [sp, #(0x120 + 0x28)]
bl _ZN3ams4kern4arch5arm6423FpuContextSwitchHandlerEv
/* Restore registers that we saved. */
ldp x30, x19, [sp, #(8 * 30)]
ldp x20, x21, [sp, #(8 * 32)]
msr sp_el0, x19
msr elr_el1, x20
msr spsr_el1, x21
@ -479,9 +462,8 @@ _ZN3ams4kern4arch5arm6425FpuAccessExceptionHandlerEv:
ldp x16, x17, [sp, #(8 * 16)]
ldp x18, x19, [sp, #(8 * 18)]
ldp x20, x21, [sp, #(8 * 20)]
ldp x22, x30, [sp, #(8 * 22)]
add sp, sp, #(8 * 24)
add sp, sp, #0x120
/* Return from the exception. */
eret
@ -494,8 +476,8 @@ _ZN3ams4kern4arch5arm6421EL1SystemErrorHandlerEv:
/* Nintendo uses the "unused" virtual timer compare value as a scratch register. */
msr cntv_cval_el0, x0
/* Load the exception stack top from the context. */
ldr x0, [x0, #0x28]
/* Load the exception stack top from tpidr_el1. */
mrs x0, tpidr_el1
/* Setup the stack for a generic exception handle */
sub x0, x0, #0x20
@ -534,16 +516,12 @@ _ZN3ams4kern4arch5arm6421EL1SystemErrorHandlerEv:
str x23, [sp, #(8 * 34)]
/* Invoke ams::kern::arch::arm64::HandleException(ams::kern::arch::arm64::KExceptionContext *). */
mrs x18, tpidr_el1
mov x0, sp
bl _ZN3ams4kern4arch5arm6415HandleExceptionEPNS2_17KExceptionContextE
1: /* HandleException should never return. The best we can do is infinite loop. */
b 1b
/* Return from the exception. */
eret
/* ams::kern::arch::arm64::EL0SystemErrorHandler() */
.section .text._ZN3ams4kern4arch5arm6421EL0SystemErrorHandlerEv, "ax", %progbits
.global _ZN3ams4kern4arch5arm6421EL0SystemErrorHandlerEv
@ -576,7 +554,7 @@ _ZN3ams4kern4arch5arm6421EL0SystemErrorHandlerEv:
str x23, [sp, #(8 * 34)]
/* Invoke ams::kern::arch::arm64::HandleException(ams::kern::arch::arm64::KExceptionContext *). */
mrs x18, tpidr_el1
ldr x18, [sp, #(0x120 + 0x28)]
mov x0, sp
bl _ZN3ams4kern4arch5arm6415HandleExceptionEPNS2_17KExceptionContextE