From 035cebef9d362a6da06945621b1887645b6bdad4 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Tue, 21 Feb 2023 10:38:48 -0700 Subject: [PATCH] kern: refactor init (kill identity map, merge cpu on logic) --- .../arch/arm64/init/kern_k_init_arguments.hpp | 4 - .../arm64/init/kern_k_init_page_table.hpp | 157 ++++++++++--- .../arch/arm64/kern_assembly_offsets.h | 4 +- .../mesosphere/arch/arm64/kern_cpu.hpp | 8 + .../arch/arm64/kern_k_page_table_entry.hpp | 9 +- .../nintendo/nx/kern_k_system_control.hpp | 5 +- .../init/kern_init_arguments_select.hpp | 2 +- .../mesosphere/kern_k_memory_layout.hpp | 6 +- .../mesosphere/kern_k_system_control_base.hpp | 10 +- .../arch/arm64/kern_k_page_table_impl.cpp | 18 +- .../nintendo/nx/kern_k_sleep_manager.cpp | 53 +++-- .../nintendo/nx/kern_k_sleep_manager.hpp | 4 +- .../nintendo/nx/kern_k_sleep_manager_asm.s | 218 ++---------------- .../nintendo/nx/kern_k_system_control.cpp | 2 +- .../board/nintendo/nx/kern_lps_driver.cpp | 4 +- .../board/nintendo/nx/kern_lps_driver.hpp | 2 +- .../source/kern_k_system_control_base.cpp | 25 +- .../source/arch/arm64/init/kern_init_core.cpp | 177 ++++++++++---- .../kernel/source/arch/arm64/init/start.s | 96 ++++---- .../kernel_ldr/source/kern_init_loader.cpp | 14 +- 20 files changed, 431 insertions(+), 387 deletions(-) diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_arguments.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_arguments.hpp index 494750515..94206bfae 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_arguments.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_arguments.hpp @@ -29,8 +29,6 @@ namespace ams::kern::init { u64 sp; u64 entrypoint; u64 argument; - u64 setup_function; - u64 exception_stack; }; static_assert(alignof(KInitArguments) == util::CeilingPowerOfTwo(INIT_ARGUMENTS_SIZE)); static_assert(sizeof(KInitArguments) == std::max(INIT_ARGUMENTS_SIZE, util::CeilingPowerOfTwo(INIT_ARGUMENTS_SIZE))); @@ -45,7 +43,5 @@ namespace ams::kern::init { static_assert(AMS_OFFSETOF(KInitArguments, sp) == INIT_ARGUMENTS_SP); static_assert(AMS_OFFSETOF(KInitArguments, entrypoint) == INIT_ARGUMENTS_ENTRYPOINT); static_assert(AMS_OFFSETOF(KInitArguments, argument) == INIT_ARGUMENTS_ARGUMENT); - static_assert(AMS_OFFSETOF(KInitArguments, setup_function) == INIT_ARGUMENTS_SETUP_FUNCTION); - static_assert(AMS_OFFSETOF(KInitArguments, exception_stack) == INIT_ARGUMENTS_EXCEPTION_STACK); } \ No newline at end of file diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp index e0613fbd5..f47407c89 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp @@ -23,17 +23,6 @@ namespace ams::kern::arch::arm64::init { - inline void ClearPhysicalMemory(KPhysicalAddress address, size_t size) { - MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, sizeof(u64))); - - /* This Physical Address -> void * conversion is valid, because this is init page table code. */ - /* The MMU is necessarily not yet turned on, if we are creating an initial page table. */ - volatile u64 *ptr = reinterpret_cast(GetInteger(address)); - for (size_t i = 0; i < size / sizeof(u64); ++i) { - ptr[i] = 0; - } - } - /* NOTE: Nintendo uses virtual functions, rather than a concept + template. */ template concept IsInitialPageAllocator = requires (T &t, KPhysicalAddress phys_addr, size_t size) { @@ -41,25 +30,23 @@ namespace ams::kern::arch::arm64::init { { t.Free(phys_addr, size) } -> std::same_as; }; - template - class KInitialPageTableTemplate { - public: - using PageAllocator = _PageAllocator; + class KInitialPageTable { private: KPhysicalAddress m_l1_tables[2]; u32 m_num_entries[2]; public: - KInitialPageTableTemplate(KVirtualAddress start_address, KVirtualAddress end_address, PageAllocator &allocator) { + template + KInitialPageTable(KVirtualAddress start_address, KVirtualAddress end_address, PageAllocator &allocator) { /* Set tables. */ - m_l1_tables[0] = AllocateNewPageTable(allocator); - m_l1_tables[1] = AllocateNewPageTable(allocator); + m_l1_tables[0] = AllocateNewPageTable(allocator, 0); + m_l1_tables[1] = AllocateNewPageTable(allocator, 0); /* Set counts. */ m_num_entries[0] = MaxPageTableEntries; m_num_entries[1] = ((end_address / L1BlockSize) & (MaxPageTableEntries - 1)) - ((start_address / L1BlockSize) & (MaxPageTableEntries - 1)) + 1; } - KInitialPageTableTemplate() { + KInitialPageTable() { /* Set tables. */ m_l1_tables[0] = util::AlignDown(cpu::GetTtbr0El1(), PageSize); m_l1_tables[1] = util::AlignDown(cpu::GetTtbr1El1(), PageSize); @@ -82,30 +69,35 @@ namespace ams::kern::arch::arm64::init { return GetInteger(m_l1_tables[1]); } private: - constexpr ALWAYS_INLINE L1PageTableEntry *GetL1Entry(KVirtualAddress address) const { + constexpr ALWAYS_INLINE L1PageTableEntry *GetL1Entry(KVirtualAddress address, u64 phys_to_virt_offset = 0) const { const size_t index = (GetInteger(address) >> (BITSIZEOF(address) - 1)) & 1; - L1PageTableEntry *l1_table = reinterpret_cast(GetInteger(m_l1_tables[index])); + L1PageTableEntry *l1_table = reinterpret_cast(GetInteger(m_l1_tables[index]) + phys_to_virt_offset); return l1_table + ((GetInteger(address) / L1BlockSize) & (m_num_entries[index] - 1)); } - static constexpr ALWAYS_INLINE L2PageTableEntry *GetL2Entry(const L1PageTableEntry *entry, KVirtualAddress address) { - L2PageTableEntry *l2_table = reinterpret_cast(GetInteger(entry->GetTable())); + static constexpr ALWAYS_INLINE L2PageTableEntry *GetL2Entry(const L1PageTableEntry *entry, KVirtualAddress address, u64 phys_to_virt_offset = 0) { + L2PageTableEntry *l2_table = reinterpret_cast(GetInteger(entry->GetTable()) + phys_to_virt_offset); return l2_table + ((GetInteger(address) / L2BlockSize) & (MaxPageTableEntries - 1)); } - static constexpr ALWAYS_INLINE L3PageTableEntry *GetL3Entry(const L2PageTableEntry *entry, KVirtualAddress address) { - L3PageTableEntry *l3_table = reinterpret_cast(GetInteger(entry->GetTable())); + static constexpr ALWAYS_INLINE L3PageTableEntry *GetL3Entry(const L2PageTableEntry *entry, KVirtualAddress address, u64 phys_to_virt_offset = 0) { + L3PageTableEntry *l3_table = reinterpret_cast(GetInteger(entry->GetTable()) + phys_to_virt_offset); return l3_table + ((GetInteger(address) / L3BlockSize) & (MaxPageTableEntries - 1)); } - static ALWAYS_INLINE KPhysicalAddress AllocateNewPageTable(PageAllocator &allocator) { + template + static ALWAYS_INLINE KPhysicalAddress AllocateNewPageTable(PageAllocator &allocator, u64 phys_to_virt_offset) { auto address = allocator.Allocate(PageSize); - ClearNewPageTable(address); + ClearNewPageTable(address, phys_to_virt_offset); return address; } - static ALWAYS_INLINE void ClearNewPageTable(KPhysicalAddress address) { - ClearPhysicalMemory(address, PageSize); + static ALWAYS_INLINE void ClearNewPageTable(KPhysicalAddress address, u64 phys_to_virt_offset) { + /* Convert to a deferenceable address, and clear. */ + volatile u64 *ptr = reinterpret_cast(GetInteger(address) + phys_to_virt_offset); + for (size_t i = 0; i < PageSize / sizeof(u64); ++i) { + ptr[i] = 0; + } } public: static consteval size_t GetMaximumOverheadSize(size_t size) { @@ -327,7 +319,8 @@ namespace ams::kern::arch::arm64::init { } } public: - void NOINLINE Map(KVirtualAddress virt_addr, size_t size, KPhysicalAddress phys_addr, const PageTableEntry &attr, PageAllocator &allocator) { + template + void NOINLINE Map(KVirtualAddress virt_addr, size_t size, KPhysicalAddress phys_addr, const PageTableEntry &attr, PageAllocator &allocator, u64 phys_to_virt_offset) { /* Ensure that addresses and sizes are page aligned. */ MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize)); MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(phys_addr), PageSize)); @@ -335,7 +328,7 @@ namespace ams::kern::arch::arm64::init { /* Iteratively map pages until the requested region is mapped. */ while (size > 0) { - L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr); + L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr, phys_to_virt_offset); /* Can we make an L1 block? */ if (util::IsAligned(GetInteger(virt_addr), L1BlockSize) && util::IsAligned(GetInteger(phys_addr), L1BlockSize) && size >= L1BlockSize) { @@ -349,12 +342,12 @@ namespace ams::kern::arch::arm64::init { /* If we don't already have an L2 table, we need to make a new one. */ if (!l1_entry->IsTable()) { - KPhysicalAddress new_table = AllocateNewPageTable(allocator); + KPhysicalAddress new_table = AllocateNewPageTable(allocator, phys_to_virt_offset); cpu::DataSynchronizationBarrierInnerShareable(); *l1_entry = L1PageTableEntry(PageTableEntry::TableTag{}, new_table, attr.IsPrivilegedExecuteNever()); } - L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr); + L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr, phys_to_virt_offset); /* Can we make a contiguous L2 block? */ if (util::IsAligned(GetInteger(virt_addr), L2ContiguousBlockSize) && util::IsAligned(GetInteger(phys_addr), L2ContiguousBlockSize) && size >= L2ContiguousBlockSize) { @@ -380,12 +373,12 @@ namespace ams::kern::arch::arm64::init { /* If we don't already have an L3 table, we need to make a new one. */ if (!l2_entry->IsTable()) { - KPhysicalAddress new_table = AllocateNewPageTable(allocator); + KPhysicalAddress new_table = AllocateNewPageTable(allocator, phys_to_virt_offset); cpu::DataSynchronizationBarrierInnerShareable(); *l2_entry = L2PageTableEntry(PageTableEntry::TableTag{}, new_table, attr.IsPrivilegedExecuteNever()); } - L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr); + L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr, phys_to_virt_offset); /* Can we make a contiguous L3 block? */ if (util::IsAligned(GetInteger(virt_addr), L3ContiguousBlockSize) && util::IsAligned(GetInteger(phys_addr), L3ContiguousBlockSize) && size >= L3ContiguousBlockSize) { @@ -410,6 +403,98 @@ namespace ams::kern::arch::arm64::init { cpu::DataSynchronizationBarrierInnerShareable(); } + void UnmapTtbr0Entries(u64 phys_to_virt_offset) { + /* Ensure data consistency before we unmap. */ + cpu::DataSynchronizationBarrierInnerShareable(); + + /* Define helper, as we only want to clear non-nGnRE pages. */ + constexpr auto ShouldUnmap = [](const PageTableEntry *entry) ALWAYS_INLINE_LAMBDA -> bool { + return entry->GetPageAttribute() != PageTableEntry::PageAttribute_Device_nGnRE; + }; + + /* Iterate all L1 entries. */ + L1PageTableEntry * const l1_table = reinterpret_cast(GetInteger(m_l1_tables[0]) + phys_to_virt_offset); + for (size_t l1_index = 0; l1_index < m_num_entries[0]; l1_index++) { + /* Get L1 entry. */ + L1PageTableEntry * const l1_entry = l1_table + l1_index; + if (l1_entry->IsBlock()) { + /* Unmap the L1 entry, if we should. */ + if (ShouldUnmap(l1_entry)) { + *static_cast(l1_entry) = InvalidPageTableEntry; + } + } else if (l1_entry->IsTable()) { + /* Get the L2 table. */ + L2PageTableEntry * const l2_table = reinterpret_cast(GetInteger(l1_entry->GetTable()) + phys_to_virt_offset); + + /* Unmap all L2 entries, as relevant. */ + size_t remaining_l2_entries = 0; + for (size_t l2_index = 0; l2_index < MaxPageTableEntries; ++l2_index) { + /* Get L2 entry. */ + L2PageTableEntry * const l2_entry = l2_table + l2_index; + if (l2_entry->IsBlock()) { + const size_t num_to_clear = (l2_entry->IsContiguous() ? L2ContiguousBlockSize : L2BlockSize) / L2BlockSize; + + if (ShouldUnmap(l2_entry)) { + for (size_t i = 0; i < num_to_clear; ++i) { + static_cast(l2_entry)[i] = InvalidPageTableEntry; + } + } else { + remaining_l2_entries += num_to_clear; + } + + l2_index = l2_index + num_to_clear - 1; + } else if (l2_entry->IsTable()) { + /* Get the L3 table. */ + L3PageTableEntry * const l3_table = reinterpret_cast(GetInteger(l2_entry->GetTable()) + phys_to_virt_offset); + + /* Unmap all L3 entries, as relevant. */ + size_t remaining_l3_entries = 0; + for (size_t l3_index = 0; l3_index < MaxPageTableEntries; ++l3_index) { + /* Get L3 entry. */ + if (L3PageTableEntry * const l3_entry = l3_table + l3_index; l3_entry->IsBlock()) { + const size_t num_to_clear = (l3_entry->IsContiguous() ? L3ContiguousBlockSize : L3BlockSize) / L3BlockSize; + + if (ShouldUnmap(l3_entry)) { + for (size_t i = 0; i < num_to_clear; ++i) { + static_cast(l3_entry)[i] = InvalidPageTableEntry; + } + } else { + remaining_l3_entries += num_to_clear; + } + + l3_index = l3_index + num_to_clear - 1; + } + } + + /* If we unmapped all L3 entries, clear the L2 entry. */ + if (remaining_l3_entries == 0) { + *static_cast(l2_entry) = InvalidPageTableEntry; + + /* Invalidate the entire tlb. */ + cpu::DataSynchronizationBarrierInnerShareable(); + cpu::InvalidateEntireTlb(); + } else { + remaining_l2_entries++; + } + } + } + + /* If we unmapped all L2 entries, clear the L1 entry. */ + if (remaining_l2_entries == 0) { + *static_cast(l1_entry) = InvalidPageTableEntry; + + /* Invalidate the entire tlb. */ + cpu::DataSynchronizationBarrierInnerShareable(); + cpu::InvalidateEntireTlb(); + } + } + } + + /* Invalidate the entire tlb. */ + cpu::DataSynchronizationBarrierInnerShareable(); + cpu::InvalidateEntireTlb(); + } + KPhysicalAddress GetPhysicalAddress(KVirtualAddress virt_addr) const { /* Get the L1 entry. */ const L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr); @@ -861,6 +946,4 @@ namespace ams::kern::arch::arm64::init { }; static_assert(IsInitialPageAllocator); - using KInitialPageTable = KInitialPageTableTemplate; - } diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_assembly_offsets.h b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_assembly_offsets.h index 3ca96a713..b585f44eb 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_assembly_offsets.h +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_assembly_offsets.h @@ -246,7 +246,7 @@ #define THREAD_LOCAL_REGION_SIZE 0x200 /* ams::kern::init::KInitArguments, https://github.com/Atmosphere-NX/Atmosphere/blob/master/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_arguments.hpp */ -#define INIT_ARGUMENTS_SIZE 0x60 +#define INIT_ARGUMENTS_SIZE 0x50 #define INIT_ARGUMENTS_TTBR0 0x00 #define INIT_ARGUMENTS_TTBR1 0x08 #define INIT_ARGUMENTS_TCR 0x10 @@ -257,8 +257,6 @@ #define INIT_ARGUMENTS_SP 0x38 #define INIT_ARGUMENTS_ENTRYPOINT 0x40 #define INIT_ARGUMENTS_ARGUMENT 0x48 -#define INIT_ARGUMENTS_SETUP_FUNCTION 0x50 -#define INIT_ARGUMENTS_EXCEPTION_STACK 0x58 /* ams::kern::KScheduler (::SchedulingState), https://github.com/Atmosphere-NX/Atmosphere/blob/master/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp */ /* NOTE: Due to constraints on ldarb relative offsets, KSCHEDULER_NEEDS_SCHEDULING cannot trivially be changed, and will require assembly edits. */ diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp index 164042d5c..9f0494ec2 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp @@ -189,6 +189,14 @@ namespace ams::kern::arch::arm64::cpu { return (par >> (BITSIZEOF(par) - BITSIZEOF(u8))) == 0xFF; } + ALWAYS_INLINE void StoreDataCacheForInitArguments(const void *addr, size_t size) { + const uintptr_t start = util::AlignDown(reinterpret_cast(addr), DataCacheLineSize); + for (size_t stored = 0; stored < size; stored += cpu::DataCacheLineSize) { + __asm__ __volatile__("dc cvac, %[cur]" :: [cur]"r"(start + stored) : "memory"); + } + DataSynchronizationBarrier(); + } + /* Synchronization helpers. */ NOINLINE void SynchronizeAllCores(); void SynchronizeCores(u64 core_mask); diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp index ce0cc921c..93925e3fa 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp @@ -161,9 +161,12 @@ namespace ams::kern::arch::arm64 { constexpr ALWAYS_INLINE bool IsPrivilegedExecuteNever() const { return this->GetBits(53, 1) != 0; } constexpr ALWAYS_INLINE bool IsContiguous() const { return this->GetBits(52, 1) != 0; } constexpr ALWAYS_INLINE bool IsGlobal() const { return this->GetBits(11, 1) == 0; } - constexpr ALWAYS_INLINE AccessFlag GetAccessFlag() const { return static_cast(this->GetBits(10, 1)); } - constexpr ALWAYS_INLINE Shareable GetShareable() const { return static_cast(this->GetBits(8, 2)); } - constexpr ALWAYS_INLINE PageAttribute GetPageAttribute() const { return static_cast(this->GetBits(2, 3)); } + constexpr ALWAYS_INLINE AccessFlag GetAccessFlag() const { return static_cast(this->SelectBits(10, 1)); } + constexpr ALWAYS_INLINE Shareable GetShareable() const { return static_cast(this->SelectBits(8, 2)); } + constexpr ALWAYS_INLINE PageAttribute GetPageAttribute() const { return static_cast(this->SelectBits(2, 3)); } + constexpr ALWAYS_INLINE int GetAccessFlagInteger() const { return static_cast(this->GetBits(10, 1)); } + constexpr ALWAYS_INLINE int GetShareableInteger() const { return static_cast(this->GetBits(8, 2)); } + constexpr ALWAYS_INLINE int GetPageAttributeInteger() const { return static_cast(this->GetBits(2, 3)); } constexpr ALWAYS_INLINE bool IsReadOnly() const { return this->GetBits(7, 1) != 0; } constexpr ALWAYS_INLINE bool IsUserAccessible() const { return this->GetBits(6, 1) != 0; } constexpr ALWAYS_INLINE bool IsNonSecure() const { return this->GetBits(5, 1) != 0; } diff --git a/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_system_control.hpp b/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_system_control.hpp index 02021759a..a0cfce5a1 100644 --- a/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_system_control.hpp +++ b/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_system_control.hpp @@ -25,12 +25,15 @@ namespace ams::kern::board::nintendo::nx { static constexpr size_t SecureAppletMemorySize = 4_MB; public: class Init : public KSystemControlBase::Init { + private: + friend class KSystemControlBase::Init; + private: + static void CpuOnImpl(u64 core_id, uintptr_t entrypoint, uintptr_t arg); public: /* Initialization. */ static size_t GetRealMemorySize(); static size_t GetIntendedMemorySize(); static bool ShouldIncreaseThreadResourceLimit(); - static void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg); static size_t GetApplicationPoolSize(); static size_t GetAppletPoolSize(); static size_t GetMinimumNonSecureSystemPoolSize(); diff --git a/libraries/libmesosphere/include/mesosphere/init/kern_init_arguments_select.hpp b/libraries/libmesosphere/include/mesosphere/init/kern_init_arguments_select.hpp index 1e34dbcc2..6892d3307 100644 --- a/libraries/libmesosphere/include/mesosphere/init/kern_init_arguments_select.hpp +++ b/libraries/libmesosphere/include/mesosphere/init/kern_init_arguments_select.hpp @@ -26,6 +26,6 @@ namespace ams::kern::init { static_assert(util::IsPowerOfTwo(alignof(KInitArguments)) && util::IsPowerOfTwo(sizeof(KInitArguments))); - KPhysicalAddress GetInitArgumentsAddress(s32 core_id); + KInitArguments *GetInitArguments(s32 core_id); } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp index e3099198a..f8ed6ad0f 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp @@ -183,10 +183,10 @@ namespace ams::kern { return std::make_tuple(total_size, kernel_size); } - static void InitializeLinearMemoryAddresses(KPhysicalAddress aligned_linear_phys_start, KVirtualAddress linear_virtual_start) { + static void InitializeLinearMemoryAddresses(u64 phys_to_virt_diff) { /* Set static differences. */ - s_linear_phys_to_virt_diff = GetInteger(linear_virtual_start) - GetInteger(aligned_linear_phys_start); - s_linear_virt_to_phys_diff = GetInteger(aligned_linear_phys_start) - GetInteger(linear_virtual_start); + s_linear_phys_to_virt_diff = phys_to_virt_diff; + s_linear_virt_to_phys_diff = -phys_to_virt_diff; } static void InitializeLinearMemoryRegionTrees(); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_system_control_base.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_system_control_base.hpp index d169079ac..095bff49a 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_system_control_base.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_system_control_base.hpp @@ -21,6 +21,12 @@ namespace ams::kern { struct InitialProcessBinaryLayout; + namespace init { + + struct KInitArguments; + + } + } namespace ams::kern { @@ -40,6 +46,8 @@ namespace ams::kern { static constinit inline KSpinLock s_random_lock; public: class Init { + private: + static void CpuOnImpl(u64 core_id, uintptr_t entrypoint, uintptr_t arg); public: /* Initialization. */ static size_t GetRealMemorySize(); @@ -47,7 +55,7 @@ namespace ams::kern { static KPhysicalAddress GetKernelPhysicalBaseAddress(KPhysicalAddress base_address); static void GetInitialProcessBinaryLayout(InitialProcessBinaryLayout *out); static bool ShouldIncreaseThreadResourceLimit(); - static void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg); + static void TurnOnCpu(u64 core_id, const ams::kern::init::KInitArguments *args); static size_t GetApplicationPoolSize(); static size_t GetAppletPoolSize(); static size_t GetMinimumNonSecureSystemPoolSize(); diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp index 901a708ab..95c7d7a64 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp @@ -353,12 +353,12 @@ namespace ams::kern::arch::arm64 { l1_entry->IsPrivilegedExecuteNever(), l1_entry->IsContiguous(), !l1_entry->IsGlobal(), - static_cast(l1_entry->GetAccessFlag()), - static_cast(l1_entry->GetShareable()), + static_cast(l1_entry->GetAccessFlagInteger()), + static_cast(l1_entry->GetShareableInteger()), l1_entry->IsReadOnly(), l1_entry->IsUserAccessible(), l1_entry->IsNonSecure(), - static_cast(l1_entry->GetPageAttribute()), + static_cast(l1_entry->GetPageAttributeInteger()), l1_entry->IsHeadMergeDisabled(), l1_entry->IsHeadAndBodyMergeDisabled(), l1_entry->IsTailMergeDisabled()); @@ -398,12 +398,12 @@ namespace ams::kern::arch::arm64 { l2_entry->IsPrivilegedExecuteNever(), l2_entry->IsContiguous(), !l2_entry->IsGlobal(), - static_cast(l2_entry->GetAccessFlag()), - static_cast(l2_entry->GetShareable()), + static_cast(l2_entry->GetAccessFlagInteger()), + static_cast(l2_entry->GetShareableInteger()), l2_entry->IsReadOnly(), l2_entry->IsUserAccessible(), l2_entry->IsNonSecure(), - static_cast(l2_entry->GetPageAttribute()), + static_cast(l2_entry->GetPageAttributeInteger()), l2_entry->IsHeadMergeDisabled(), l2_entry->IsHeadAndBodyMergeDisabled(), l2_entry->IsTailMergeDisabled()); @@ -443,12 +443,12 @@ namespace ams::kern::arch::arm64 { l3_entry->IsPrivilegedExecuteNever(), l3_entry->IsContiguous(), !l3_entry->IsGlobal(), - static_cast(l3_entry->GetAccessFlag()), - static_cast(l3_entry->GetShareable()), + static_cast(l3_entry->GetAccessFlagInteger()), + static_cast(l3_entry->GetShareableInteger()), l3_entry->IsReadOnly(), l3_entry->IsUserAccessible(), l3_entry->IsNonSecure(), - static_cast(l3_entry->GetPageAttribute()), + static_cast(l3_entry->GetPageAttributeInteger()), l3_entry->IsHeadMergeDisabled(), l3_entry->IsHeadAndBodyMergeDisabled(), l3_entry->IsTailMergeDisabled()); diff --git a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager.cpp b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager.cpp index 85c01bc10..00b606ff5 100644 --- a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager.cpp +++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager.cpp @@ -18,6 +18,12 @@ #include "kern_secure_monitor.hpp" #include "kern_lps_driver.hpp" +namespace ams::kern::init { + + void StartOtherCore(const ams::kern::init::KInitArguments *init_args); + +} + namespace ams::kern::board::nintendo::nx { namespace { @@ -67,15 +73,10 @@ namespace ams::kern::board::nintendo::nx { constinit KLightLock g_request_lock; constinit KLightLock g_cv_lock; constinit KLightConditionVariable g_cv{util::ConstantInitialize}; - constinit KPhysicalAddress g_sleep_buffer_phys_addrs[cpu::NumCores]; alignas(1_KB) constinit u64 g_sleep_buffers[cpu::NumCores][1_KB / sizeof(u64)]; + constinit ams::kern::init::KInitArguments g_sleep_init_arguments[cpu::NumCores]; constinit SavedSystemRegisters g_sleep_system_registers[cpu::NumCores] = {}; - void PowerOnCpu(int core_id, KPhysicalAddress entry_phys_addr, u64 context_id) { - /* Request the secure monitor power on the core. */ - ::ams::kern::arch::arm64::smc::CpuOn(cpu::MultiprocessorAffinityRegisterAccessor().GetCpuOnArgument() | core_id, GetInteger(entry_phys_addr), context_id); - } - void WaitOtherCpuPowerOff() { constexpr u64 PmcPhysicalAddress = 0x7000E400; constexpr u32 PWRGATE_STATUS_CE123_MASK = ((1u << 3) - 1) << 9; @@ -473,18 +474,20 @@ namespace ams::kern::board::nintendo::nx { } } - void KSleepManager::ProcessRequests(uintptr_t buffer) { + void KSleepManager::ProcessRequests(uintptr_t sleep_buffer) { const auto target_fw = GetTargetFirmware(); const s32 core_id = GetCurrentCoreId(); - KPhysicalAddress resume_entry_phys_addr = Null; + + ams::kern::init::KInitArguments * const init_args = g_sleep_init_arguments + core_id; + KPhysicalAddress start_core_phys_addr = Null; + KPhysicalAddress init_args_phys_addr = Null; /* Get the physical addresses we'll need. */ { - MESOSPHERE_ABORT_UNLESS(Kernel::GetKernelPageTable().GetPhysicalAddress(std::addressof(g_sleep_buffer_phys_addrs[core_id]), KProcessAddress(buffer))); - MESOSPHERE_ABORT_UNLESS(Kernel::GetKernelPageTable().GetPhysicalAddress(std::addressof(resume_entry_phys_addr), KProcessAddress(&::ams::kern::board::nintendo::nx::KSleepManager::ResumeEntry))); - + MESOSPHERE_ABORT_UNLESS(Kernel::GetKernelPageTable().GetPhysicalAddress(std::addressof(start_core_phys_addr), KProcessAddress(&::ams::kern::init::StartOtherCore))); + MESOSPHERE_ABORT_UNLESS(Kernel::GetKernelPageTable().GetPhysicalAddress(std::addressof(init_args_phys_addr), KProcessAddress(init_args))); } - const KPhysicalAddress sleep_buffer_phys_addr = g_sleep_buffer_phys_addrs[core_id]; + const u64 target_core_mask = (1ul << core_id); const bool use_legacy_lps_driver = target_fw < TargetFirmware_2_0_0; @@ -547,15 +550,29 @@ namespace ams::kern::board::nintendo::nx { /* Save the interrupt manager's state. */ Kernel::GetInterruptManager().Save(core_id); + /* Setup the initial arguments. */ + { + init_args->ttbr0 = cpu::GetTtbr0El1(); + init_args->ttbr1 = cpu::GetTtbr1El1(); + init_args->tcr = cpu::GetTcrEl1(); + init_args->mair = cpu::GetMairEl1(); + init_args->cpuactlr = cpu::GetCpuActlrEl1(); + init_args->cpuectlr = cpu::GetCpuEctlrEl1(); + init_args->sctlr = cpu::GetSctlrEl1(); + init_args->sp = 0; + init_args->entrypoint = reinterpret_cast(::ams::kern::board::nintendo::nx::KSleepManager::ResumeEntry); + init_args->argument = sleep_buffer; + } + /* Ensure that all cores get to this point before continuing. */ cpu::SynchronizeAllCores(); /* Log that the core is going to sleep. */ - MESOSPHERE_LOG("Core[%d]: Going to sleep, buffer = %010lx\n", core_id, GetInteger(sleep_buffer_phys_addr)); + MESOSPHERE_LOG("Core[%d]: Going to sleep, buffer = %010lx\n", core_id, sleep_buffer); /* If we're on a core other than zero, we can just invoke the sleep handler. */ if (core_id != 0) { - CpuSleepHandler(GetInteger(sleep_buffer_phys_addr), GetInteger(resume_entry_phys_addr)); + CpuSleepHandler(sleep_buffer, GetInteger(start_core_phys_addr), GetInteger(init_args_phys_addr)); } else { /* Wait for all other cores to be powered off. */ WaitOtherCpuPowerOff(); @@ -574,9 +591,9 @@ namespace ams::kern::board::nintendo::nx { /* Invoke the sleep handler. */ if (!use_legacy_lps_driver) { /* When not using the legacy driver, invoke directly. */ - CpuSleepHandler(GetInteger(sleep_buffer_phys_addr), GetInteger(resume_entry_phys_addr)); + CpuSleepHandler(sleep_buffer, GetInteger(start_core_phys_addr), GetInteger(init_args_phys_addr)); } else { - lps::InvokeCpuSleepHandler(GetInteger(sleep_buffer_phys_addr), GetInteger(resume_entry_phys_addr)); + lps::InvokeCpuSleepHandler(sleep_buffer, GetInteger(start_core_phys_addr), GetInteger(init_args_phys_addr)); } /* Restore the debug log state. */ @@ -586,8 +603,10 @@ namespace ams::kern::board::nintendo::nx { MESOSPHERE_LOG("Exiting SC7\n"); /* Wake up the other cores. */ + cpu::MultiprocessorAffinityRegisterAccessor mpidr; + const auto arg = mpidr.GetCpuOnArgument(); for (s32 i = 1; i < static_cast(cpu::NumCores); ++i) { - PowerOnCpu(i, resume_entry_phys_addr, GetInteger(g_sleep_buffer_phys_addrs[i])); + KSystemControl::Init::TurnOnCpu(arg | i, g_sleep_init_arguments + i); } } diff --git a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager.hpp b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager.hpp index 195db36df..e5d3add15 100644 --- a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager.hpp +++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager.hpp @@ -22,14 +22,12 @@ namespace ams::kern::board::nintendo::nx { private: static void ResumeEntry(uintptr_t arg); - static void InvalidateDataCacheForResumeEntry(uintptr_t level); - static void ProcessRequests(uintptr_t buffer); public: static void Initialize(); static void SleepSystem(); public: - static void CpuSleepHandler(uintptr_t arg, uintptr_t entry); + static void CpuSleepHandler(uintptr_t arg, uintptr_t entry, uintptr_t entry_args); }; diff --git a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager_asm.s b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager_asm.s index fca5455c7..1da1ac148 100644 --- a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager_asm.s +++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager_asm.s @@ -22,14 +22,14 @@ mov reg, #(((val) >> 0x00) & 0xFFFF); \ movk reg, #(((val) >> 0x10) & 0xFFFF), lsl#16 -/* ams::kern::board::nintendo::nx::KSleepManager::CpuSleepHandler(uintptr_t arg, uintptr_t entry) */ -.section .sleep._ZN3ams4kern5board8nintendo2nx13KSleepManager15CpuSleepHandlerEmm, "ax", %progbits -.global _ZN3ams4kern5board8nintendo2nx13KSleepManager15CpuSleepHandlerEmm -.type _ZN3ams4kern5board8nintendo2nx13KSleepManager15CpuSleepHandlerEmm, %function -_ZN3ams4kern5board8nintendo2nx13KSleepManager15CpuSleepHandlerEmm: +/* ams::kern::board::nintendo::nx::KSleepManager::CpuSleepHandler(uintptr_t arg, uintptr_t entry, uintptr_t entry_arg) */ +.section .sleep._ZN3ams4kern5board8nintendo2nx13KSleepManager15CpuSleepHandlerEmmm, "ax", %progbits +.global _ZN3ams4kern5board8nintendo2nx13KSleepManager15CpuSleepHandlerEmmm +.type _ZN3ams4kern5board8nintendo2nx13KSleepManager15CpuSleepHandlerEmmm, %function +_ZN3ams4kern5board8nintendo2nx13KSleepManager15CpuSleepHandlerEmmm: /* Save arguments. */ - mov x16, x0 - mov x17, x1 + mov x16, x1 + mov x17, x2 /* Enable access to FPU registers. */ mrs x1, cpacr_el1 @@ -74,28 +74,8 @@ _ZN3ams4kern5board8nintendo2nx13KSleepManager15CpuSleepHandlerEmm: stp q28, q29, [x0], #0x20 stp q30, q31, [x0], #0x20 - /* Save cpuactlr/cpuectlr. */ - mrs x1, cpuectlr_el1 - mrs x2, cpuactlr_el1 - stp x1, x2, [x0], #0x10 - - /* Save ttbr0/ttbr1. */ - mrs x1, ttbr0_el1 - mrs x2, ttbr1_el1 - stp x1, x2, [x0], #0x10 - - /* Save tcr/mair. */ - mrs x1, tcr_el1 - mrs x2, mair_el1 - stp x1, x2, [x0], #0x10 - - /* Save sctlr/tpidr. */ - mrs x1, sctlr_el1 - mrs x2, tpidr_el1 - stp x1, x2, [x0], #0x10 - - /* Save the virtual resumption entrypoint and cntv_cval_el0. */ - adr x1, 77f + /* Save tpidr/cntv_cval_el0. */ + mrs x1, tpidr_el1 mrs x2, cntv_cval_el0 stp x1, x2, [x0], #0x10 @@ -114,8 +94,8 @@ _ZN3ams4kern5board8nintendo2nx13KSleepManager15CpuSleepHandlerEmm: 1: /* Suspend. */ LOAD_IMMEDIATE_32(x0, 0xC4000001) LOAD_IMMEDIATE_32(x1, 0x0201001B) - mov x2, x17 - mov x3, x16 + mov x2, x16 + mov x3, x17 smc #1 0: b 0b @@ -124,65 +104,6 @@ _ZN3ams4kern5board8nintendo2nx13KSleepManager15CpuSleepHandlerEmm: .global _ZN3ams4kern5board8nintendo2nx13KSleepManager11ResumeEntryEm .type _ZN3ams4kern5board8nintendo2nx13KSleepManager11ResumeEntryEm, %function _ZN3ams4kern5board8nintendo2nx13KSleepManager11ResumeEntryEm: - /* Mask interrupts. */ - msr daifset, #0xF - - /* Save the argument. */ - mov x21, x0 - - /* Check that we're at the correct exception level. */ - mrs x0, currentel - - /* Check if we're EL1. */ - cmp x0, #0x4 - b.eq 3f - - /* Check if we're EL2. */ - cmp x0, #0x8 - b.eq 2f - -1: /* We're running at EL3. */ - b 1b - -2: /* We're running at EL2. */ - b 2b - -3: /* We're running at EL1. */ - - /* Invalidate the L1 cache. */ - mov x0, #0 - bl _ZN3ams4kern5board8nintendo2nx13KSleepManager33InvalidateDataCacheForResumeEntryEm - - /* Get the current core id. */ - mrs x0, mpidr_el1 - and x0, x0, #0xFF - - /* If we're on core0, we want to invalidate the L2 cache. */ - cbnz x0, 4f - - mov x0, #1 - bl _ZN3ams4kern5board8nintendo2nx13KSleepManager33InvalidateDataCacheForResumeEntryEm - -4: /* Invalidate the L1 cache. */ - mov x0, #0 - bl _ZN3ams4kern5board8nintendo2nx13KSleepManager33InvalidateDataCacheForResumeEntryEm - - /* Invalidate the instruction cache. */ - ic ialluis - dsb sy - isb - - /* Invalidate the entire tlb. */ - tlbi vmalle1is - dsb sy - isb - - /* Switch to sp 1. */ - msr spsel, #1 - - /* Prepare to restore the saved context. */ - mov x0, x21 - /* Enable access to FPU registers. */ mrs x1, cpacr_el1 orr x1, x1, #0x100000 @@ -226,121 +147,12 @@ _ZN3ams4kern5board8nintendo2nx13KSleepManager11ResumeEntryEm: ldp q28, q29, [x0], #0x20 ldp q30, q31, [x0], #0x20 - /* Restore cpuactlr/cpuectlr. */ + /* Restore tpidr/cntv_cval_el0. */ ldp x1, x2, [x0], #0x10 - mrs x3, cpuectlr_el1 - cmp x1, x3 -5: b.ne 5b - mrs x3, cpuactlr_el1 - cmp x2, x3 -6: b.ne 6b - - /* Restore ttbr0/ttbr1. */ - ldp x1, x2, [x0], #0x10 - msr ttbr0_el1, x1 - msr ttbr1_el1, x2 - - /* Restore tcr/mair. */ - ldp x1, x2, [x0], #0x10 - msr tcr_el1, x1 - msr mair_el1, x2 - - /* Get sctlr, tpidr, the entrypoint, and cntv_cval_el0. */ - ldp x1, x2, [x0], #0x10 - ldp x3, x4, [x0], #0x10 - - /* Set the global context back into x18/tpidr. */ - msr tpidr_el1, x2 - msr cntv_cval_el0, x4 - dsb sy - isb - - /* Restore sctlr with the wxn bit cleared. */ - bic x2, x1, #0x80000 - msr sctlr_el1, x2 - dsb sy - isb - - /* Jump to the entrypoint. */ - br x3 - -77: /* Virtual resumption entrypoint. */ - - /* Restore sctlr. */ - msr sctlr_el1, x1 + msr tpidr_el1, x1 + msr cntv_cval_el0, x2 dsb sy isb - ret - -/* ams::kern::board::nintendo::nx::KSleepManager::InvalidateDataCacheForResumeEntry(uintptr_t level) */ -.section .sleep._ZN3ams4kern5board8nintendo2nx13KSleepManager33InvalidateDataCacheForResumeEntryEm, "ax", %progbits -.global _ZN3ams4kern5board8nintendo2nx13KSleepManager33InvalidateDataCacheForResumeEntryEm -.type _ZN3ams4kern5board8nintendo2nx13KSleepManager33InvalidateDataCacheForResumeEntryEm, %function -_ZN3ams4kern5board8nintendo2nx13KSleepManager33InvalidateDataCacheForResumeEntryEm: - /* cpu::DataSynchronizationBarrier(); */ - dsb sy - - /* const u64 level_sel_value = level << 1; */ - lsl x8, x0, #1 - - /* cpu::SetCsselrEl1(level_sel_value); */ - msr csselr_el1, x8 - - /* cpu::InstructionMemoryBarrier(); */ - isb - - /* CacheSizeIdAccessor ccsidr_el1; */ - mrs x13, ccsidr_el1 - - /* const int num_ways = ccsidr_el1.GetAssociativity(); */ - ubfx w10, w13, #3, #0xA - - /* const int line_size = ccsidr_el1.GetLineSize(); */ - and w11, w13, #7 - - /* const int num_sets = ccsidr_el1.GetNumberOfSets(); */ - ubfx w13, w13, #0xD, #0xF - - /* int way = 0; */ - mov w9, wzr - - /* const u64 set_shift = static_cast(line_size + 4); */ - add w11, w11, #4 - - /* const u64 way_shift = static_cast(__builtin_clz(num_ways)); */ - clz w12, w10 - - -0: /* do { */ - /* int set = 0; */ - mov w14, wzr - - /* const u64 way_value = (static_cast(way) << way_shift); */ - lsl w15, w9, w12 - -1: /* do { */ - - /* const u64 isw_value = (static_cast(set) << set_shift) | way_value | level_sel_value; */ - lsl w16, w14, w11 - orr w16, w16, w15 - sxtw x16, w16 - orr x16, x16, x8 - - /* __asm__ __volatile__("dc isw, %0" :: "r"(isw_value) : "memory"); */ - dc isw, x16 - - /* while (set <= num_sets); */ - cmp w13, w14 - add w14, w14, #1 - b.ne 1b - - /* while (way <= num_ways); */ - cmp w9, w10 - add w9, w9, #1 - b.ne 0b - - /* cpu::EnsureInstructionConsistency(); */ - dsb sy - isb + /* Return. */ ret diff --git a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp index ea67d3b9a..efe391c16 100644 --- a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp +++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp @@ -382,7 +382,7 @@ namespace ams::kern::board::nintendo::nx { return static_cast((value >> 32) & 0xFF); } - void KSystemControl::Init::CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) { + void KSystemControl::Init::CpuOnImpl(u64 core_id, uintptr_t entrypoint, uintptr_t arg) { MESOSPHERE_INIT_ABORT_UNLESS((::ams::kern::arch::arm64::smc::CpuOn(core_id, entrypoint, arg)) == 0); } diff --git a/libraries/libmesosphere/source/board/nintendo/nx/kern_lps_driver.cpp b/libraries/libmesosphere/source/board/nintendo/nx/kern_lps_driver.cpp index bce9da12a..a1f1cf2f6 100644 --- a/libraries/libmesosphere/source/board/nintendo/nx/kern_lps_driver.cpp +++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_lps_driver.cpp @@ -395,7 +395,7 @@ namespace ams::kern::board::nintendo::nx::lps { R_SUCCEED(); } - void InvokeCpuSleepHandler(uintptr_t arg, uintptr_t entry) { + void InvokeCpuSleepHandler(uintptr_t arg, uintptr_t entry, uintptr_t entry_arg) { /* Verify that we're allowed to perform suspension. */ MESOSPHERE_ABORT_UNLESS(g_lps_init_done); MESOSPHERE_ABORT_UNLESS(GetCurrentCoreId() == 0); @@ -416,7 +416,7 @@ namespace ams::kern::board::nintendo::nx::lps { Read(g_pmc_address + APBDEV_PMC_SCRATCH0); /* Invoke the sleep hander. */ - KSleepManager::CpuSleepHandler(arg, entry); + KSleepManager::CpuSleepHandler(arg, entry, entry_arg); /* Disable deep power down. */ Write(g_pmc_address + APBDEV_PMC_DPD_ENABLE, 0); diff --git a/libraries/libmesosphere/source/board/nintendo/nx/kern_lps_driver.hpp b/libraries/libmesosphere/source/board/nintendo/nx/kern_lps_driver.hpp index 3d702a7a4..0c69d5da9 100644 --- a/libraries/libmesosphere/source/board/nintendo/nx/kern_lps_driver.hpp +++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_lps_driver.hpp @@ -22,7 +22,7 @@ namespace ams::kern::board::nintendo::nx { void Initialize(); Result EnableSuspend(bool enable); - void InvokeCpuSleepHandler(uintptr_t arg, uintptr_t entry); + void InvokeCpuSleepHandler(uintptr_t arg, uintptr_t entry, uintptr_t entry_arg); void ResumeBpmpFirmware(); } diff --git a/libraries/libmesosphere/source/kern_k_system_control_base.cpp b/libraries/libmesosphere/source/kern_k_system_control_base.cpp index f04f896bc..77582203c 100644 --- a/libraries/libmesosphere/source/kern_k_system_control_base.cpp +++ b/libraries/libmesosphere/source/kern_k_system_control_base.cpp @@ -20,6 +20,13 @@ namespace ams::kern { + namespace init { + + /* TODO: Is this function name architecture specific? */ + void StartOtherCore(const ams::kern::init::KInitArguments *init_args); + + } + /* Initialization. */ size_t KSystemControlBase::Init::GetRealMemorySize() { return ams::kern::MainMemorySize; @@ -68,7 +75,7 @@ namespace ams::kern { return 0; } - void KSystemControlBase::Init::CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) { + void KSystemControlBase::Init::CpuOnImpl(u64 core_id, uintptr_t entrypoint, uintptr_t arg) { #if defined(ATMOSPHERE_ARCH_ARM64) MESOSPHERE_INIT_ABORT_UNLESS((::ams::kern::arch::arm64::smc::CpuOn<0, false>(core_id, entrypoint, arg)) == 0); #else @@ -76,6 +83,22 @@ namespace ams::kern { #endif } + void KSystemControlBase::Init::TurnOnCpu(u64 core_id, const ams::kern::init::KInitArguments *args) { + /* Get entrypoint. */ + KPhysicalAddress entrypoint = Null; + while (!cpu::GetPhysicalAddressReadable(std::addressof(entrypoint), reinterpret_cast(::ams::kern::init::StartOtherCore), true)) { /* ... */ } + + /* Get arguments. */ + KPhysicalAddress args_addr = Null; + while (!cpu::GetPhysicalAddressReadable(std::addressof(args_addr), reinterpret_cast(args), true)) { /* ... */ } + + /* Ensure cache is correct for the initial arguments. */ + cpu::StoreDataCacheForInitArguments(args, sizeof(*args)); + + /* Turn on the cpu. */ + KSystemControl::Init::CpuOnImpl(core_id, GetInteger(entrypoint), GetInteger(args_addr)); + } + /* Randomness for Initialization. */ void KSystemControlBase::Init::GenerateRandom(u64 *dst, size_t count) { if (AMS_UNLIKELY(!s_initialized_random_generator)) { diff --git a/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp b/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp index e69a9ddab..240faf484 100644 --- a/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp +++ b/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp @@ -29,48 +29,63 @@ namespace ams::kern::init { /* Prototypes for functions declared in ASM that we need to reference. */ void StartOtherCore(const ams::kern::init::KInitArguments *init_args); + void IdentityMappedFunctionAreaBegin(); + void IdentityMappedFunctionAreaEnd(); + size_t GetMiscUnknownDebugRegionSize(); + void InitializeDebugRegisters(); + void InitializeExceptionVectors(); + namespace { /* Global Allocator. */ constinit KInitialPageAllocator g_initial_page_allocator; - /* Global initial arguments array. */ - constinit KPhysicalAddress g_init_arguments_phys_addr[cpu::NumCores]; - constinit KInitArguments g_init_arguments[cpu::NumCores]; + /* Globals for passing data between InitializeCorePhase1 and InitializeCorePhase2. */ + constinit InitialProcessBinaryLayout g_phase2_initial_process_binary_layout{}; + constinit KPhysicalAddress g_phase2_resource_end_phys_addr = Null; + constinit u64 g_phase2_linear_region_phys_to_virt_diff = 0; + /* Page table attributes. */ + constexpr PageTableEntry KernelTextAttribute(PageTableEntry::Permission_KernelRX, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped); constexpr PageTableEntry KernelRoDataAttribute(PageTableEntry::Permission_KernelR, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped); constexpr PageTableEntry KernelRwDataAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped); constexpr PageTableEntry KernelMmioAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_Device_nGnRE, PageTableEntry::Shareable_OuterShareable, PageTableEntry::MappingFlag_Mapped); constexpr PageTableEntry KernelRwDataUncachedAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemoryNotCacheable, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped); - void StoreDataCache(const void *addr, size_t size) { - const uintptr_t start = util::AlignDown(reinterpret_cast(addr), cpu::DataCacheLineSize); - for (size_t stored = 0; stored < size; stored += cpu::DataCacheLineSize) { - __asm__ __volatile__("dc cvac, %[cur]" :: [cur]"r"(start + stored) : "memory"); - } - cpu::DataSynchronizationBarrier(); - } - - void TurnOnAllCores(uintptr_t start_other_core_phys) { + void TurnOnAllCores() { cpu::MultiprocessorAffinityRegisterAccessor mpidr; const auto arg = mpidr.GetCpuOnArgument(); const auto current_core = mpidr.GetAff0(); for (s32 i = 0; i < static_cast(cpu::NumCores); i++) { if (static_cast(current_core) != i) { - KSystemControl::Init::CpuOn(arg | i, start_other_core_phys, GetInteger(g_init_arguments_phys_addr[i])); + KSystemControl::Init::TurnOnCpu(arg | i, g_init_arguments + i); } } } - void SetupInitialArguments(KInitialPageTable &init_pt, KInitialPageAllocator &allocator) { - AMS_UNUSED(init_pt, allocator); + void InvokeMain(u64 core_id) { + /* Clear cpacr_el1. */ + cpu::SetCpacrEl1(0); + cpu::InstructionMemoryBarrier(); + /* Initialize registers. */ + InitializeDebugRegisters(); + InitializeExceptionVectors(); + + /* Set exception stack. */ + cpu::SetCntvCvalEl0(GetInteger(KMemoryLayout::GetExceptionStackTopAddress(static_cast(core_id))) - sizeof(KThread::StackParameters)); + + /* Call main. */ + HorizonKernelMain(static_cast(core_id)); + } + + void SetupInitialArguments() { /* Get parameters for initial arguments. */ const u64 ttbr0 = cpu::GetTtbr0El1(); const u64 ttbr1 = cpu::GetTtbr1El1(); @@ -84,13 +99,6 @@ namespace ams::kern::init { /* Get the arguments. */ KInitArguments *init_args = g_init_arguments + i; - /* Translate to a physical address. */ - /* KPhysicalAddress phys_addr = Null; */ - /* if (cpu::GetPhysicalAddressWritable(std::addressof(phys_addr), KVirtualAddress(init_args), true)) { */ - /* g_init_arguments_phys_addr[i] = phys_addr; */ - /* } */ - g_init_arguments_phys_addr[i] = init_pt.GetPhysicalAddress(KVirtualAddress(init_args)); - /* Set the arguments. */ init_args->ttbr0 = ttbr0; init_args->ttbr1 = ttbr1; @@ -100,14 +108,9 @@ namespace ams::kern::init { init_args->cpuectlr = cpuectlr; init_args->sctlr = sctlr; init_args->sp = GetInteger(KMemoryLayout::GetMainStackTopAddress(i)) - sizeof(KThread::StackParameters); - init_args->entrypoint = reinterpret_cast(::ams::kern::HorizonKernelMain); + init_args->entrypoint = reinterpret_cast(::ams::kern::init::InvokeMain); init_args->argument = static_cast(i); - init_args->setup_function = reinterpret_cast(::ams::kern::init::StartOtherCore); - init_args->exception_stack = GetInteger(KMemoryLayout::GetExceptionStackTopAddress(i)) - sizeof(KThread::StackParameters); } - - /* Ensure the arguments are written to memory. */ - StoreDataCache(g_init_arguments, sizeof(g_init_arguments)); } KVirtualAddress GetRandomAlignedRegionWithGuard(size_t size, size_t alignment, KInitialPageTable &pt, KMemoryRegionTree &tree, u32 type_id, size_t guard_size) { @@ -167,18 +170,83 @@ namespace ams::kern::init { const KPhysicalAddress stack_start_phys = g_initial_page_allocator.Allocate(PageSize); MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(stack_start_virt), StackSize, type, core_id)); - page_table.Map(stack_start_virt, StackSize, stack_start_phys, KernelRwDataAttribute, g_initial_page_allocator); + page_table.Map(stack_start_virt, StackSize, stack_start_phys, KernelRwDataAttribute, g_initial_page_allocator, 0); + } + + class KInitialPageAllocatorForFinalizeIdentityMapping final { + private: + struct FreeListEntry { + FreeListEntry *next; + }; + private: + FreeListEntry *m_free_list_head; + u64 m_phys_to_virt_offset; + public: + template + KInitialPageAllocatorForFinalizeIdentityMapping(PageAllocator &allocator, u64 phys_to_virt) : m_free_list_head(nullptr), m_phys_to_virt_offset(phys_to_virt) { + /* Allocate and free two pages. */ + for (size_t i = 0; i < 2; ++i) { + KPhysicalAddress page = allocator.Allocate(PageSize); + MESOSPHERE_INIT_ABORT_UNLESS(page != Null); + + /* Free the pages. */ + this->Free(page, PageSize); + } + } + public: + KPhysicalAddress Allocate(size_t size) { + /* Check that the size is correct. */ + MESOSPHERE_INIT_ABORT_UNLESS(size == PageSize); + + /* Check that we have a free page. */ + FreeListEntry *head = m_free_list_head; + MESOSPHERE_INIT_ABORT_UNLESS(head != nullptr); + + /* Update the free list. */ + m_free_list_head = head->next; + + /* Return the page. */ + return KPhysicalAddress(reinterpret_cast(head) - m_phys_to_virt_offset); + } + + void Free(KPhysicalAddress phys_addr, size_t size) { + /* Check that the size is correct. */ + MESOSPHERE_INIT_ABORT_UNLESS(size == PageSize); + + /* Convert to a free list entry. */ + FreeListEntry *fl = reinterpret_cast(GetInteger(phys_addr) + m_phys_to_virt_offset); + + /* Insert into free list. */ + fl->next = m_free_list_head; + m_free_list_head = fl; + } + }; + static_assert(kern::arch::arm64::init::IsInitialPageAllocator); + + void FinalizeIdentityMapping(KInitialPageTable &init_pt, KInitialPageAllocator &allocator, u64 phys_to_virt_offset) { + /* Create an allocator for identity mapping finalization. */ + KInitialPageAllocatorForFinalizeIdentityMapping finalize_allocator(allocator, phys_to_virt_offset); + + /* Get the physical address of crt0. */ + const KPhysicalAddress start_phys_addr = init_pt.GetPhysicalAddress(reinterpret_cast(::ams::kern::init::IdentityMappedFunctionAreaBegin)); + + /* Unmap the entire identity mapping. */ + init_pt.UnmapTtbr0Entries(phys_to_virt_offset); + + /* Re-map only the first page of code. */ + const size_t size = util::AlignUp(reinterpret_cast(::ams::kern::init::IdentityMappedFunctionAreaEnd) - reinterpret_cast(::ams::kern::init::IdentityMappedFunctionAreaBegin), PageSize); + init_pt.Map(KVirtualAddress(GetInteger(start_phys_addr)), size, start_phys_addr, KernelTextAttribute, finalize_allocator, phys_to_virt_offset); } } - void InitializeCore(uintptr_t misc_unk_debug_phys_addr, void **initial_state) { + void InitializeCorePhase1(uintptr_t misc_unk_debug_phys_addr, void **initial_state) { /* Ensure our first argument is page aligned. */ MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(misc_unk_debug_phys_addr, PageSize)); /* Decode the initial state. */ - const auto initial_page_allocator_state = *static_cast(initial_state[0]); - const auto initial_process_binary_layout = *static_cast(initial_state[1]); + const auto initial_page_allocator_state = *static_cast(initial_state[0]); + g_phase2_initial_process_binary_layout = *static_cast(initial_state[1]); /* Restore the page allocator state setup by kernel loader. */ g_initial_page_allocator.InitializeFromState(std::addressof(initial_page_allocator_state)); @@ -343,7 +411,7 @@ namespace ams::kern::init { largest->SetPairAddress(GetInteger(map_virt_addr) + largest->GetAddress() - GetInteger(map_phys_addr)); /* Map the page in to our page table. */ - init_pt.Map(map_virt_addr, map_size, map_phys_addr, KernelMmioAttribute, g_initial_page_allocator); + init_pt.Map(map_virt_addr, map_size, map_phys_addr, KernelMmioAttribute, g_initial_page_allocator, 0); } } while (largest != nullptr); } @@ -400,7 +468,7 @@ namespace ams::kern::init { /* Map the page in to our page table. */ const auto attribute = largest->HasTypeAttribute(KMemoryRegionAttr_Uncached) ? KernelRwDataUncachedAttribute : KernelRwDataAttribute; - init_pt.Map(map_virt_addr, map_size, map_phys_addr, attribute, g_initial_page_allocator); + init_pt.Map(map_virt_addr, map_size, map_phys_addr, attribute, g_initial_page_allocator, 0); } } while (largest != nullptr); } @@ -412,7 +480,7 @@ namespace ams::kern::init { MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(slab_start_phys_addr), slab_region_size, KMemoryRegionType_DramKernelSlab)); /* Map the slab region. */ - init_pt.Map(slab_region_start, slab_region_size, slab_start_phys_addr, KernelRwDataAttribute, g_initial_page_allocator); + init_pt.Map(slab_region_start, slab_region_size, slab_start_phys_addr, KernelRwDataAttribute, g_initial_page_allocator, 0); /* Physically randomize the slab region. */ /* NOTE: Nintendo does this only on 10.0.0+ */ @@ -426,6 +494,8 @@ namespace ams::kern::init { /* Determine size available for kernel page table heaps. */ const KPhysicalAddress resource_end_phys_addr = slab_start_phys_addr + resource_region_size; + g_phase2_resource_end_phys_addr = resource_end_phys_addr; + const size_t page_table_heap_size = GetInteger(resource_end_phys_addr) - GetInteger(secure_applet_end_phys_addr); /* Insert a physical region for the kernel page table heap region */ @@ -472,7 +542,7 @@ namespace ams::kern::init { cur_size += region.GetSize(); } else { const uintptr_t cur_virt_addr = cur_phys_addr + linear_region_phys_to_virt_diff; - init_pt.Map(cur_virt_addr, cur_size, cur_phys_addr, KernelRwDataAttribute, g_initial_page_allocator); + init_pt.Map(cur_virt_addr, cur_size, cur_phys_addr, KernelRwDataAttribute, g_initial_page_allocator, 0); cur_phys_addr = region.GetAddress(); cur_size = region.GetSize(); } @@ -491,7 +561,7 @@ namespace ams::kern::init { /* Map the last block, which we may have skipped. */ if (cur_size != 0) { const uintptr_t cur_virt_addr = cur_phys_addr + linear_region_phys_to_virt_diff; - init_pt.Map(cur_virt_addr, cur_size, cur_phys_addr, KernelRwDataAttribute, g_initial_page_allocator); + init_pt.Map(cur_virt_addr, cur_size, cur_phys_addr, KernelRwDataAttribute, g_initial_page_allocator, 0); } } @@ -510,23 +580,34 @@ namespace ams::kern::init { } /* Setup the initial arguments. */ - SetupInitialArguments(init_pt, g_initial_page_allocator); + SetupInitialArguments(); + + /* Set linear difference for Phase2. */ + g_phase2_linear_region_phys_to_virt_diff = linear_region_phys_to_virt_diff; + } + + void InitializeCorePhase2() { + /* Create page table object for use during remaining initialization. */ + KInitialPageTable init_pt; + + /* Unmap the identity mapping. */ + FinalizeIdentityMapping(init_pt, g_initial_page_allocator, g_phase2_linear_region_phys_to_virt_diff); /* Finalize the page allocator, we're done allocating at this point. */ KInitialPageAllocator::State final_init_page_table_state; g_initial_page_allocator.GetFinalState(std::addressof(final_init_page_table_state)); const KPhysicalAddress final_init_page_table_end_address = final_init_page_table_state.end_address; - const size_t init_page_table_region_size = GetInteger(final_init_page_table_end_address) - GetInteger(resource_end_phys_addr); + const size_t init_page_table_region_size = GetInteger(final_init_page_table_end_address) - GetInteger(g_phase2_resource_end_phys_addr); /* Insert regions for the initial page table region. */ - MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(resource_end_phys_addr), init_page_table_region_size, KMemoryRegionType_DramKernelInitPt)); - MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(resource_end_phys_addr) + linear_region_phys_to_virt_diff, init_page_table_region_size, KMemoryRegionType_VirtualDramKernelInitPt)); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(g_phase2_resource_end_phys_addr), init_page_table_region_size, KMemoryRegionType_DramKernelInitPt)); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(g_phase2_resource_end_phys_addr) + g_phase2_linear_region_phys_to_virt_diff, init_page_table_region_size, KMemoryRegionType_VirtualDramKernelInitPt)); /* Insert a physical region for the kernel trace buffer */ if constexpr (IsKTraceEnabled) { - const KPhysicalAddress ktrace_buffer_phys_addr = GetInteger(resource_end_phys_addr) + init_page_table_region_size; + const KPhysicalAddress ktrace_buffer_phys_addr = GetInteger(g_phase2_resource_end_phys_addr) + init_page_table_region_size; MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(ktrace_buffer_phys_addr), KTraceBufferSize, KMemoryRegionType_KernelTraceBuffer)); - MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(ktrace_buffer_phys_addr) + linear_region_phys_to_virt_diff, KTraceBufferSize, GetTypeForVirtualLinearMapping(KMemoryRegionType_KernelTraceBuffer))); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(ktrace_buffer_phys_addr) + g_phase2_linear_region_phys_to_virt_diff, KTraceBufferSize, GetTypeForVirtualLinearMapping(KMemoryRegionType_KernelTraceBuffer))); } /* All linear-mapped DRAM regions that we haven't tagged by this point will be allocated to some pool partition. Tag them. */ @@ -538,12 +619,12 @@ namespace ams::kern::init { } /* Set the linear memory offsets, to enable conversion between physical and virtual addresses. */ - KMemoryLayout::InitializeLinearMemoryAddresses(aligned_linear_phys_start, linear_region_start); + KMemoryLayout::InitializeLinearMemoryAddresses(g_phase2_linear_region_phys_to_virt_diff); /* Set the initial process binary physical address. */ /* NOTE: Nintendo does this after pool partition setup, but it's a requirement that we do it before */ /* to retain compatibility with < 5.0.0. */ - const KPhysicalAddress ini_address = initial_process_binary_layout.address; + const KPhysicalAddress ini_address = g_phase2_initial_process_binary_layout.address; MESOSPHERE_INIT_ABORT_UNLESS(ini_address != Null); SetInitialProcessBinaryPhysicalAddress(ini_address); @@ -567,11 +648,11 @@ namespace ams::kern::init { KMemoryLayout::InitializeLinearMemoryRegionTrees(); /* Turn on all other cores. */ - TurnOnAllCores(GetInteger(init_pt.GetPhysicalAddress(reinterpret_cast(::ams::kern::init::StartOtherCore)))); + TurnOnAllCores(); } - KPhysicalAddress GetInitArgumentsAddress(s32 core_id) { - return g_init_arguments_phys_addr[core_id]; + KInitArguments *GetInitArguments(s32 core_id) { + return g_init_arguments + core_id; } void InitializeDebugRegisters() { diff --git a/mesosphere/kernel/source/arch/arm64/init/start.s b/mesosphere/kernel/source/arch/arm64/init/start.s index 3b68f5e25..a8d80a2e7 100644 --- a/mesosphere/kernel/source/arch/arm64/init/start.s +++ b/mesosphere/kernel/source/arch/arm64/init/start.s @@ -33,7 +33,18 @@ adr reg, label; \ ldr reg, [reg] + + .section .crt0.text.start, "ax", %progbits + +/* ams::kern::init::IdentityMappedFunctionAreaBegin() */ +.global _ZN3ams4kern4init31IdentityMappedFunctionAreaBeginEv +.type _ZN3ams4kern4init31IdentityMappedFunctionAreaBeginEv, %function +_ZN3ams4kern4init31IdentityMappedFunctionAreaBeginEv: +/* NOTE: This is not a real function, and only exists as a label for safety. */ + +/* ================ Functions after this line remain identity-mapped after initialization finishes. ================ */ + .global _start _start: b _ZN3ams4kern4init10StartCore0Emm @@ -145,13 +156,31 @@ _ZN3ams4kern4init10StartCore0Emm: /* Call ams::kern::init::InitializeCore(uintptr_t, void **) */ mov x1, x0 /* Kernelldr returns a state object for the kernel to re-use. */ mov x0, x21 /* Use the address we determined earlier. */ - bl _ZN3ams4kern4init14InitializeCoreEmPPv + bl _ZN3ams4kern4init20InitializeCorePhase1EmPPv /* Get the init arguments for core 0. */ mov x0, xzr - bl _ZN3ams4kern4init23GetInitArgumentsAddressEi + bl _ZN3ams4kern4init16GetInitArgumentsEi - bl _ZN3ams4kern4init16InvokeEntrypointEPKNS1_14KInitArgumentsE + /* Setup the stack pointer. */ + ldr x2, [x0, #(INIT_ARGUMENTS_SP)] + mov sp, x2 + + /* Perform further initialization with the stack pointer set up, as required. */ + /* This will include e.g. unmapping the identity mapping. */ + bl _ZN3ams4kern4init20InitializeCorePhase2Ev + + /* Get the init arguments for core 0. */ + mov x0, xzr + bl _ZN3ams4kern4init16GetInitArgumentsEi + + /* Invoke the entrypoint. */ + ldr x1, [x0, #(INIT_ARGUMENTS_ENTRYPOINT)] + ldr x0, [x0, #(INIT_ARGUMENTS_ARGUMENT)] + blr x1 + +0: /* If we return here, something has gone wrong, so wait forever. */ + b 0b /* ams::kern::init::StartOtherCore(const ams::kern::init::KInitArguments *) */ .section .crt0.text._ZN3ams4kern4init14StartOtherCoreEPKNS1_14KInitArgumentsE, "ax", %progbits @@ -221,52 +250,26 @@ _ZN3ams4kern4init14StartOtherCoreEPKNS1_14KInitArgumentsE: dsb sy isb + /* Load remaining needed fields from the init args. */ + ldr x3, [x20, #(INIT_ARGUMENTS_SCTLR)] + ldr x2, [x20, #(INIT_ARGUMENTS_SP)] + ldr x1, [x20, #(INIT_ARGUMENTS_ENTRYPOINT)] + ldr x0, [x20, #(INIT_ARGUMENTS_ARGUMENT)] + /* Set sctlr_el1 and ensure instruction consistency. */ - ldr x1, [x20, #(INIT_ARGUMENTS_SCTLR)] - msr sctlr_el1, x1 + msr sctlr_el1, x3 dsb sy isb - /* Jump to the virtual address equivalent to ams::kern::init::InvokeEntrypoint */ - ldr x1, [x20, #(INIT_ARGUMENTS_SETUP_FUNCTION)] - adr x2, _ZN3ams4kern4init14StartOtherCoreEPKNS1_14KInitArgumentsE - sub x1, x1, x2 - adr x2, _ZN3ams4kern4init16InvokeEntrypointEPKNS1_14KInitArgumentsE - add x1, x1, x2 - mov x0, x20 - br x1 + /* Set the stack pointer. */ + mov sp, x2 -/* ams::kern::init::InvokeEntrypoint(const ams::kern::init::KInitArguments *) */ -.section .crt0.text._ZN3ams4kern4init16InvokeEntrypointEPKNS1_14KInitArgumentsE, "ax", %progbits -.global _ZN3ams4kern4init16InvokeEntrypointEPKNS1_14KInitArgumentsE -.type _ZN3ams4kern4init16InvokeEntrypointEPKNS1_14KInitArgumentsE, %function -_ZN3ams4kern4init16InvokeEntrypointEPKNS1_14KInitArgumentsE: - /* Preserve the KInitArguments pointer in a register. */ - mov x20, x0 + /* Invoke the entrypoint. */ + blr x1 - /* Clear CPACR_EL1. This will prevent classes of traps (SVE, etc). */ - msr cpacr_el1, xzr - isb - - /* Setup the stack pointer. */ - ldr x1, [x20, #(INIT_ARGUMENTS_SP)] - mov sp, x1 - - /* Ensure that system debug registers are setup. */ - bl _ZN3ams4kern4init24InitializeDebugRegistersEv - - /* Ensure that the exception vectors are setup. */ - bl _ZN3ams4kern4init26InitializeExceptionVectorsEv - - /* Setup the exception stack in cntv_cval_el0. */ - ldr x1, [x20, #(INIT_ARGUMENTS_EXCEPTION_STACK)] - msr cntv_cval_el0, x1 - - /* Jump to the entrypoint. */ - ldr x1, [x20, #(INIT_ARGUMENTS_ENTRYPOINT)] - ldr x0, [x20, #(INIT_ARGUMENTS_ARGUMENT)] - br x1 +0: /* If we return here, something has gone wrong, so wait forever. */ + b 0b /* TODO: Can we remove this while retaining QEMU support? */ #ifndef ATMOSPHERE_BOARD_NINTENDO_NX @@ -559,3 +562,12 @@ _ZN3ams4kern4arch5arm643cpu36FlushEntireDataCacheImplWithoutStackEv: b 0b 3: ret + + +/* ================ Functions before this line remain identity-mapped after initialization finishes. ================ */ + +/* ams::kern::init::IdentityMappedFunctionAreaEnd() */ +.global _ZN3ams4kern4init29IdentityMappedFunctionAreaEndEv +.type _ZN3ams4kern4init31IdentityMappedFunctionAreaEndEv, %function +_ZN3ams4kern4init29IdentityMappedFunctionAreaEndEv: +/* NOTE: This is not a real function, and only exists as a label for safety. */ \ No newline at end of file diff --git a/mesosphere/kernel_ldr/source/kern_init_loader.cpp b/mesosphere/kernel_ldr/source/kern_init_loader.cpp index 7e4a5947d..70e26866c 100644 --- a/mesosphere/kernel_ldr/source/kern_init_loader.cpp +++ b/mesosphere/kernel_ldr/source/kern_init_loader.cpp @@ -62,20 +62,20 @@ namespace ams::kern::init::loader { } } - void SetupInitialIdentityMapping(KInitialPageTable &init_pt, uintptr_t base_address, uintptr_t kernel_size, uintptr_t page_table_region, size_t page_table_region_size, KInitialPageTable::PageAllocator &allocator) { + void SetupInitialIdentityMapping(KInitialPageTable &init_pt, uintptr_t base_address, uintptr_t kernel_size, uintptr_t page_table_region, size_t page_table_region_size, KInitialPageAllocator &allocator) { /* Map in an RWX identity mapping for the kernel. */ constexpr PageTableEntry KernelRWXIdentityAttribute(PageTableEntry::Permission_KernelRWX, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped); - init_pt.Map(base_address, kernel_size, base_address, KernelRWXIdentityAttribute, allocator); + init_pt.Map(base_address, kernel_size, base_address, KernelRWXIdentityAttribute, allocator, 0); /* Map in an RWX identity mapping for ourselves. */ constexpr PageTableEntry KernelLdrRWXIdentityAttribute(PageTableEntry::Permission_KernelRWX, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped); const uintptr_t kernel_ldr_base = util::AlignDown(reinterpret_cast(__start__), PageSize); const uintptr_t kernel_ldr_size = util::AlignUp(reinterpret_cast(__end__), PageSize) - kernel_ldr_base; - init_pt.Map(kernel_ldr_base, kernel_ldr_size, kernel_ldr_base, KernelRWXIdentityAttribute, allocator); + init_pt.Map(kernel_ldr_base, kernel_ldr_size, kernel_ldr_base, KernelRWXIdentityAttribute, allocator, 0); /* Map in the page table region as RW- for ourselves. */ constexpr PageTableEntry PageTableRegionRWAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped); - init_pt.Map(page_table_region, page_table_region_size, page_table_region, KernelRWXIdentityAttribute, allocator); + init_pt.Map(page_table_region, page_table_region_size, page_table_region, KernelRWXIdentityAttribute, allocator, 0); /* Place the L1 table addresses in the relevant system registers. */ cpu::SetTtbr0El1(init_pt.GetTtbr0L1TableAddress()); @@ -194,14 +194,14 @@ namespace ams::kern::init::loader { /* Map kernel .text as R-X. */ constexpr PageTableEntry KernelTextAttribute(PageTableEntry::Permission_KernelRX, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped); - init_pt.Map(virtual_base_address + rx_offset, rx_end_offset - rx_offset, base_address + rx_offset, KernelTextAttribute, g_initial_page_allocator); + init_pt.Map(virtual_base_address + rx_offset, rx_end_offset - rx_offset, base_address + rx_offset, KernelTextAttribute, g_initial_page_allocator, 0); /* Map kernel .rodata and .rwdata as RW-. */ /* Note that we will later reprotect .rodata as R-- */ constexpr PageTableEntry KernelRoDataAttribute(PageTableEntry::Permission_KernelR, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped); constexpr PageTableEntry KernelRwDataAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped); - init_pt.Map(virtual_base_address + ro_offset, ro_end_offset - ro_offset, base_address + ro_offset, KernelRwDataAttribute, g_initial_page_allocator); - init_pt.Map(virtual_base_address + rw_offset, bss_end_offset - rw_offset, base_address + rw_offset, KernelRwDataAttribute, g_initial_page_allocator); + init_pt.Map(virtual_base_address + ro_offset, ro_end_offset - ro_offset, base_address + ro_offset, KernelRwDataAttribute, g_initial_page_allocator, 0); + init_pt.Map(virtual_base_address + rw_offset, bss_end_offset - rw_offset, base_address + rw_offset, KernelRwDataAttribute, g_initial_page_allocator, 0); /* Physically randomize the kernel region. */ /* NOTE: Nintendo does this only on 10.0.0+ */