diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp index ea53fca94..593c251a0 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp @@ -293,7 +293,7 @@ namespace ams::kern::arch::arm64::init { } /* Swap the mappings. */ - const u64 attr_preserve_mask = (negative_block_size_for_mask | 0xFFFF000000000000ul) ^ ((1ul << 48) - 1); + const u64 attr_preserve_mask = (block_size - 1) | 0xFFFF000000000000ul; const size_t shift_for_contig = contig ? 4 : 0; size_t advanced_size = 0; const u64 src_attr_val = src_saved.GetRawAttributesUnsafeForSwap() & attr_preserve_mask; @@ -726,8 +726,8 @@ namespace ams::kern::arch::arm64::init { m_state.end_address = address; } - ALWAYS_INLINE void InitializeFromState(uintptr_t state_val) { - m_state = *reinterpret_cast(state_val); + ALWAYS_INLINE void InitializeFromState(const State *state) { + m_state = *state; } ALWAYS_INLINE void GetFinalState(State *out) { diff --git a/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_system_control.hpp b/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_system_control.hpp index 056d5f32b..ae14d4974 100644 --- a/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_system_control.hpp +++ b/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_system_control.hpp @@ -16,6 +16,12 @@ #pragma once #include +namespace ams::kern { + + struct InitialProcessBinaryLayout; + +} + namespace ams::kern::board::nintendo::nx { class KSystemControl { @@ -25,7 +31,7 @@ namespace ams::kern::board::nintendo::nx { /* Initialization. */ static size_t GetIntendedMemorySize(); static KPhysicalAddress GetKernelPhysicalBaseAddress(uintptr_t base_address); - static KPhysicalAddress GetInitialProcessBinaryPhysicalAddress(); + static void GetInitialProcessBinaryLayout(InitialProcessBinaryLayout *out); static bool ShouldIncreaseThreadResourceLimit(); static void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg); static size_t GetApplicationPoolSize(); diff --git a/libraries/libmesosphere/include/mesosphere/kern_initial_process.hpp b/libraries/libmesosphere/include/mesosphere/kern_initial_process.hpp index 8be5e14b1..7839a53e9 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_initial_process.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_initial_process.hpp @@ -29,14 +29,19 @@ namespace ams::kern { u32 reserved; }; - NOINLINE size_t CopyInitialProcessBinaryToKernelMemory(); - NOINLINE void CreateAndRunInitialProcesses(); + struct InitialProcessBinaryLayout { + uintptr_t address; + uintptr_t _08; + }; + + KPhysicalAddress GetInitialProcessBinaryPhysicalAddress(); + void SetInitialProcessBinaryPhysicalAddress(KPhysicalAddress phys_addr); u64 GetInitialProcessIdMin(); u64 GetInitialProcessIdMax(); - KVirtualAddress GetInitialProcessBinaryAddress(); size_t GetInitialProcessesSecureMemorySize(); - void LoadInitialProcessBinaryHeaderDeprecated(KPhysicalAddress pool_end); + NOINLINE size_t CopyInitialProcessBinaryToKernelMemory(); + NOINLINE void CreateAndRunInitialProcesses(); } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_region.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_region.hpp index f31716e82..77d1a787a 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_memory_region.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_region.hpp @@ -237,12 +237,6 @@ namespace ams::kern { public: NOINLINE void InsertDirectly(uintptr_t address, uintptr_t last_address, u32 attr = 0, u32 type_id = 0); NOINLINE bool Insert(uintptr_t address, size_t size, u32 type_id, u32 new_attr = 0, u32 old_attr = 0); - - NOINLINE KVirtualAddress GetRandomAlignedRegion(size_t size, size_t alignment, u32 type_id); - - ALWAYS_INLINE KVirtualAddress GetRandomAlignedRegionWithGuard(size_t size, size_t alignment, u32 type_id, size_t guard_size) { - return this->GetRandomAlignedRegion(size + 2 * guard_size, alignment, type_id) + guard_size; - } public: /* Iterator accessors. */ iterator begin() { diff --git a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp index 7dc699e46..dcd6be936 100644 --- a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp +++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp @@ -346,8 +346,11 @@ namespace ams::kern::board::nintendo::nx { } } - KPhysicalAddress KSystemControl::Init::GetInitialProcessBinaryPhysicalAddress() { - return GetKernelPhysicalBaseAddress(DramPhysicalAddress) + GetIntendedMemorySize() - KTraceBufferSize - InitialProcessBinarySizeMax; + void KSystemControl::Init::GetInitialProcessBinaryLayout(InitialProcessBinaryLayout *out) { + *out = { + .address = GetInteger(GetKernelPhysicalBaseAddress(DramPhysicalAddress)) + GetIntendedMemorySize() - KTraceBufferSize - InitialProcessBinarySizeMax, + ._08 = 0, + }; } bool KSystemControl::Init::ShouldIncreaseThreadResourceLimit() { diff --git a/libraries/libmesosphere/source/kern_initial_process.cpp b/libraries/libmesosphere/source/kern_initial_process.cpp index a8e2f2b16..81fff32ea 100644 --- a/libraries/libmesosphere/source/kern_initial_process.cpp +++ b/libraries/libmesosphere/source/kern_initial_process.cpp @@ -25,18 +25,18 @@ namespace ams::kern { s32 priority; }; + constinit KPhysicalAddress g_initial_process_binary_phys_addr = Null; constinit KVirtualAddress g_initial_process_binary_address = Null; constinit InitialProcessBinaryHeader g_initial_process_binary_header = {}; constinit size_t g_initial_process_secure_memory_size = 0; constinit u64 g_initial_process_id_min = std::numeric_limits::max(); constinit u64 g_initial_process_id_max = std::numeric_limits::min(); - void LoadInitialProcessBinaryHeader(KVirtualAddress virt_addr = Null) { + void LoadInitialProcessBinaryHeader() { if (g_initial_process_binary_header.magic != InitialProcessBinaryMagic) { - /* Get the virtual address, if it's not overridden. */ - if (virt_addr == Null) { - virt_addr = GetInitialProcessBinaryAddress(); - } + /* Get the virtual address. */ + MESOSPHERE_INIT_ABORT_UNLESS(g_initial_process_binary_phys_addr != Null); + const KVirtualAddress virt_addr = KMemoryLayout::GetLinearVirtualAddress(g_initial_process_binary_phys_addr); /* Copy and validate the header. */ g_initial_process_binary_header = *GetPointer(virt_addr); @@ -273,10 +273,18 @@ namespace ams::kern { } } - ALWAYS_INLINE KVirtualAddress GetInitialProcessBinaryAddress(KVirtualAddress pool_end) { - return pool_end - InitialProcessBinarySizeMax; - } + } + void SetInitialProcessBinaryPhysicalAddress(KPhysicalAddress phys_addr) { + MESOSPHERE_INIT_ABORT_UNLESS(g_initial_process_binary_phys_addr == Null); + + g_initial_process_binary_phys_addr = phys_addr; + } + + KPhysicalAddress GetInitialProcessBinaryPhysicalAddress() { + MESOSPHERE_INIT_ABORT_UNLESS(g_initial_process_binary_phys_addr != Null); + + return g_initial_process_binary_phys_addr; } u64 GetInitialProcessIdMin() { @@ -287,15 +295,6 @@ namespace ams::kern { return g_initial_process_id_max; } - KVirtualAddress GetInitialProcessBinaryAddress() { - /* Get, validate the pool region. */ - const auto *pool_region = KMemoryLayout::GetVirtualMemoryRegionTree().FindLastDerived(KMemoryRegionType_VirtualDramUserPool); - MESOSPHERE_INIT_ABORT_UNLESS(pool_region != nullptr); - MESOSPHERE_INIT_ABORT_UNLESS(pool_region->GetEndAddress() != 0); - MESOSPHERE_ABORT_UNLESS(pool_region->GetSize() >= InitialProcessBinarySizeMax); - return GetInitialProcessBinaryAddress(pool_region->GetEndAddress()); - } - size_t GetInitialProcessesSecureMemorySize() { LoadInitialProcessBinaryHeader(); @@ -321,10 +320,6 @@ namespace ams::kern { } } - void LoadInitialProcessBinaryHeaderDeprecated(KPhysicalAddress pool_end) { - LoadInitialProcessBinaryHeader(GetInitialProcessBinaryAddress(KMemoryLayout::GetLinearVirtualAddress(pool_end))); - } - void CreateAndRunInitialProcesses() { /* Allocate space for the processes. */ InitialProcessInfo *infos = static_cast(__builtin_alloca(sizeof(InitialProcessInfo) * g_initial_process_binary_header.num_processes)); diff --git a/libraries/libmesosphere/source/kern_k_memory_layout.board.nintendo_nx.cpp b/libraries/libmesosphere/source/kern_k_memory_layout.board.nintendo_nx.cpp index 2a2db9319..176467964 100644 --- a/libraries/libmesosphere/source/kern_k_memory_layout.board.nintendo_nx.cpp +++ b/libraries/libmesosphere/source/kern_k_memory_layout.board.nintendo_nx.cpp @@ -190,12 +190,6 @@ namespace ams::kern { static_assert(KMemoryManager::Pool_Unsafe == KMemoryManager::Pool_Application); static_assert(KMemoryManager::Pool_Secure == KMemoryManager::Pool_System); - /* NOTE: Beginning with 12.0.0 (and always, in mesosphere), the initial process binary is at the end of the pool region. */ - /* However, this is problematic for < 5.0.0, because we require the initial process binary to be parsed in order */ - /* to determine the pool sizes. Hence, we will force an initial binary load with the known pool end directly, so */ - /* that we retain compatibility with lower firmware versions. */ - LoadInitialProcessBinaryHeaderDeprecated(pool_end); - /* Get Secure pool size. */ const size_t secure_pool_size = [] ALWAYS_INLINE_LAMBDA (auto target_firmware) -> size_t { constexpr size_t LegacySecureKernelSize = 8_MB; /* KPageBuffer pages, other small kernel allocations. */ diff --git a/libraries/libmesosphere/source/kern_k_memory_layout.cpp b/libraries/libmesosphere/source/kern_k_memory_layout.cpp index 5171e1acd..7879dee5c 100644 --- a/libraries/libmesosphere/source/kern_k_memory_layout.cpp +++ b/libraries/libmesosphere/source/kern_k_memory_layout.cpp @@ -111,43 +111,6 @@ namespace ams::kern { return true; } - KVirtualAddress KMemoryRegionTree::GetRandomAlignedRegion(size_t size, size_t alignment, u32 type_id) { - /* We want to find the total extents of the type id. */ - const auto extents = this->GetDerivedRegionExtents(type_id); - - /* Ensure that our alignment is correct. */ - MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(extents.GetAddress(), alignment)); - - const uintptr_t first_address = extents.GetAddress(); - const uintptr_t last_address = extents.GetLastAddress(); - - const uintptr_t first_index = first_address / alignment; - const uintptr_t last_index = last_address / alignment; - - while (true) { - const uintptr_t candidate = KSystemControl::Init::GenerateRandomRange(first_index, last_index) * alignment; - - /* Ensure that the candidate doesn't overflow with the size. */ - if (!(candidate < candidate + size)) { - continue; - } - - const uintptr_t candidate_last = candidate + size - 1; - - /* Ensure that the candidate fits within the region. */ - if (candidate_last > last_address) { - continue; - } - - /* Locate the candidate region, and ensure it fits and has the correct type id. */ - if (const auto &candidate_region = *this->Find(candidate); !(candidate_last <= candidate_region.GetLastAddress() && candidate_region.GetType() == type_id)) { - continue; - } - - return candidate; - } - } - void KMemoryLayout::InitializeLinearMemoryRegionTrees() { /* Initialize linear trees. */ for (auto ®ion : GetPhysicalMemoryRegionTree()) { diff --git a/libraries/libmesosphere/source/kern_k_memory_manager.cpp b/libraries/libmesosphere/source/kern_k_memory_manager.cpp index 9c5618e78..3f9a3f1bc 100644 --- a/libraries/libmesosphere/source/kern_k_memory_manager.cpp +++ b/libraries/libmesosphere/source/kern_k_memory_manager.cpp @@ -107,7 +107,7 @@ namespace ams::kern { /* Free each region to its corresponding heap. */ size_t reserved_sizes[MaxManagerCount] = {}; - const KPhysicalAddress ini_start = KMemoryLayout::GetLinearPhysicalAddress(GetInitialProcessBinaryAddress()); + const KPhysicalAddress ini_start = GetInitialProcessBinaryPhysicalAddress(); const KPhysicalAddress ini_end = ini_start + InitialProcessBinarySizeMax; const KPhysicalAddress ini_last = ini_end - 1; for (const auto &it : KMemoryLayout::GetPhysicalMemoryRegionTree()) { diff --git a/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp b/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp index 5665a9f1b..90773bfd9 100644 --- a/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp +++ b/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp @@ -48,16 +48,6 @@ namespace ams::kern::init { constexpr PageTableEntry KernelRwDataUncachedAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemoryNotCacheable, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped); - void MapStackForCore(KInitialPageTable &page_table, KMemoryRegionType type, u32 core_id) { - constexpr size_t StackSize = PageSize; - constexpr size_t StackAlign = PageSize; - const KVirtualAddress stack_start_virt = KMemoryLayout::GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(StackSize, StackAlign, KMemoryRegionType_KernelMisc, PageSize); - const KPhysicalAddress stack_start_phys = g_initial_page_allocator.Allocate(PageSize); - MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(stack_start_virt), StackSize, type, core_id)); - - page_table.Map(stack_start_virt, StackSize, stack_start_phys, KernelRwDataAttribute, g_initial_page_allocator); - } - void StoreDataCache(const void *addr, size_t size) { uintptr_t start = util::AlignDown(reinterpret_cast(addr), cpu::DataCacheLineSize); uintptr_t end = reinterpret_cast(addr) + size; @@ -121,14 +111,78 @@ namespace ams::kern::init { StoreDataCache(g_init_arguments, sizeof(g_init_arguments)); } + KVirtualAddress GetRandomAlignedRegionWithGuard(size_t size, size_t alignment, KInitialPageTable &pt, KMemoryRegionTree &tree, u32 type_id, size_t guard_size) { + /* Check that the size is valid. */ + MESOSPHERE_INIT_ABORT_UNLESS(size > 0); + + /* We want to find the total extents of the type id. */ + const auto extents = tree.GetDerivedRegionExtents(type_id); + + /* Ensure that our alignment is correct. */ + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(extents.GetAddress(), alignment)); + + const uintptr_t first_address = extents.GetAddress(); + const uintptr_t last_address = extents.GetLastAddress(); + + const uintptr_t first_index = first_address / alignment; + const uintptr_t last_index = last_address / alignment; + + while (true) { + const uintptr_t candidate_start = KSystemControl::Init::GenerateRandomRange(first_index, last_index) * alignment; + const uintptr_t candidate_end = candidate_start + size + guard_size; + + /* Ensure that the candidate doesn't overflow with the size/guard. */ + if (!(candidate_start < candidate_end) || !(candidate_start >= guard_size)) { + continue; + } + + const uintptr_t candidate_last = candidate_end - 1; + + /* Ensure that the candidate fits within the region. */ + if (candidate_last > last_address) { + continue; + } + + /* Ensure that the candidate range is free. */ + if (!pt.IsFree(candidate_start, size)) { + continue; + } + + /* Locate the candidate's guard start, and ensure the whole range fits/has the correct type id. */ + if (const auto &candidate_region = *tree.Find(candidate_start - guard_size); !(candidate_last <= candidate_region.GetLastAddress() && candidate_region.GetType() == type_id)) { + continue; + } + + return candidate_start; + } + } + + KVirtualAddress GetRandomAlignedRegion(size_t size, size_t alignment, KInitialPageTable &pt, KMemoryRegionTree &tree, u32 type_id) { + return GetRandomAlignedRegionWithGuard(size, alignment, pt, tree, type_id, 0); + } + + void MapStackForCore(KInitialPageTable &page_table, KMemoryRegionType type, u32 core_id) { + constexpr size_t StackSize = PageSize; + constexpr size_t StackAlign = PageSize; + const KVirtualAddress stack_start_virt = GetRandomAlignedRegionWithGuard(StackSize, StackAlign, page_table, KMemoryLayout::GetVirtualMemoryRegionTree(), KMemoryRegionType_KernelMisc, PageSize); + const KPhysicalAddress stack_start_phys = g_initial_page_allocator.Allocate(PageSize); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(stack_start_virt), StackSize, type, core_id)); + + page_table.Map(stack_start_virt, StackSize, stack_start_phys, KernelRwDataAttribute, g_initial_page_allocator); + } + } - void InitializeCore(uintptr_t misc_unk_debug_phys_addr, uintptr_t initial_page_allocator_state) { + void InitializeCore(uintptr_t misc_unk_debug_phys_addr, void **initial_state) { /* Ensure our first argument is page aligned. */ MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(misc_unk_debug_phys_addr, PageSize)); + /* Decode the initial state. */ + const auto initial_page_allocator_state = *static_cast(initial_state[0]); + const auto initial_process_binary_layout = *static_cast(initial_state[1]); + /* Restore the page allocator state setup by kernel loader. */ - g_initial_page_allocator.InitializeFromState(initial_page_allocator_state); + g_initial_page_allocator.InitializeFromState(std::addressof(initial_page_allocator_state)); /* Ensure that the T1SZ is correct (and what we expect). */ MESOSPHERE_INIT_ABORT_UNLESS((cpu::TranslationControlRegisterAccessor().GetT1Size() / arch::arm64::L1BlockSize) == arch::arm64::MaxPageTableEntries); @@ -207,13 +261,13 @@ namespace ams::kern::init { MESOSPHERE_INIT_ABORT_UNLESS(misc_region_size > 0); /* Setup the misc region. */ - const KVirtualAddress misc_region_start = KMemoryLayout::GetVirtualMemoryRegionTree().GetRandomAlignedRegion(misc_region_size, MiscRegionAlign, KMemoryRegionType_Kernel); + const KVirtualAddress misc_region_start = GetRandomAlignedRegion(misc_region_size, MiscRegionAlign, init_pt, KMemoryLayout::GetVirtualMemoryRegionTree(), KMemoryRegionType_Kernel); MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(misc_region_start), misc_region_size, KMemoryRegionType_KernelMisc)); /* Setup the stack region. */ constexpr size_t StackRegionSize = 14_MB; constexpr size_t StackRegionAlign = KernelAslrAlignment; - const KVirtualAddress stack_region_start = KMemoryLayout::GetVirtualMemoryRegionTree().GetRandomAlignedRegion(StackRegionSize, StackRegionAlign, KMemoryRegionType_Kernel); + const KVirtualAddress stack_region_start = GetRandomAlignedRegion(StackRegionSize, StackRegionAlign, init_pt, KMemoryLayout::GetVirtualMemoryRegionTree(), KMemoryRegionType_Kernel); MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(stack_region_start), StackRegionSize, KMemoryRegionType_KernelStack)); /* Determine the size of the resource region. */ @@ -230,13 +284,13 @@ namespace ams::kern::init { const KPhysicalAddress slab_end_phys_addr = slab_start_phys_addr + slab_region_size; constexpr size_t SlabRegionAlign = KernelAslrAlignment; const size_t slab_region_needed_size = util::AlignUp(GetInteger(code_end_phys_addr) + slab_region_size, SlabRegionAlign) - util::AlignDown(GetInteger(code_end_phys_addr), SlabRegionAlign); - const KVirtualAddress slab_region_start = KMemoryLayout::GetVirtualMemoryRegionTree().GetRandomAlignedRegion(slab_region_needed_size, SlabRegionAlign, KMemoryRegionType_Kernel) + (GetInteger(code_end_phys_addr) % SlabRegionAlign); + const KVirtualAddress slab_region_start = GetRandomAlignedRegion(slab_region_needed_size, SlabRegionAlign, init_pt, KMemoryLayout::GetVirtualMemoryRegionTree(), KMemoryRegionType_Kernel) + (GetInteger(code_end_phys_addr) % SlabRegionAlign); MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(slab_region_start), slab_region_size, KMemoryRegionType_KernelSlab)); /* Setup the temp region. */ constexpr size_t TempRegionSize = 128_MB; constexpr size_t TempRegionAlign = KernelAslrAlignment; - const KVirtualAddress temp_region_start = KMemoryLayout::GetVirtualMemoryRegionTree().GetRandomAlignedRegion(TempRegionSize, TempRegionAlign, KMemoryRegionType_Kernel); + const KVirtualAddress temp_region_start = GetRandomAlignedRegion(TempRegionSize, TempRegionAlign, init_pt, KMemoryLayout::GetVirtualMemoryRegionTree(), KMemoryRegionType_Kernel); MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(temp_region_start), TempRegionSize, KMemoryRegionType_KernelTemp)); /* Automatically map in devices that have auto-map attributes, from largest region to smallest region. */ @@ -282,7 +336,7 @@ namespace ams::kern::init { const size_t map_size = util::AlignUp(largest->GetEndAddress(), PageSize) - GetInteger(map_phys_addr); const size_t min_align = std::min(util::GetAlignment(map_size), util::GetAlignment(GetInteger(map_phys_addr))); const size_t map_align = min_align >= KernelAslrAlignment ? KernelAslrAlignment : PageSize; - const KVirtualAddress map_virt_addr = KMemoryLayout::GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(map_size, map_align, KMemoryRegionType_KernelMisc, map_align); + const KVirtualAddress map_virt_addr = GetRandomAlignedRegionWithGuard(map_size, map_align, init_pt, KMemoryLayout::GetVirtualMemoryRegionTree(), KMemoryRegionType_KernelMisc, PageSize); MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(map_virt_addr), map_size, KMemoryRegionType_KernelMiscMappedDevice)); largest->SetPairAddress(GetInteger(map_virt_addr) + largest->GetAddress() - GetInteger(map_phys_addr)); @@ -338,7 +392,7 @@ namespace ams::kern::init { const size_t map_size = util::AlignUp(largest->GetEndAddress(), PageSize) - GetInteger(map_phys_addr); const size_t min_align = std::min(util::GetAlignment(map_size), util::GetAlignment(GetInteger(map_phys_addr))); const size_t map_align = min_align >= KernelAslrAlignment ? KernelAslrAlignment : PageSize; - const KVirtualAddress map_virt_addr = KMemoryLayout::GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(map_size, map_align, KMemoryRegionType_KernelMisc, map_align); + const KVirtualAddress map_virt_addr = GetRandomAlignedRegionWithGuard(map_size, map_align, init_pt, KMemoryLayout::GetVirtualMemoryRegionTree(), KMemoryRegionType_KernelMisc, PageSize); MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(map_virt_addr), map_size, KMemoryRegionType_KernelMiscUnknownDebug)); largest->SetPairAddress(GetInteger(map_virt_addr) + largest->GetAddress() - GetInteger(map_phys_addr)); @@ -388,7 +442,7 @@ namespace ams::kern::init { constexpr size_t LinearRegionAlign = 1_GB; const KPhysicalAddress aligned_linear_phys_start = util::AlignDown(linear_extents.GetAddress(), LinearRegionAlign); const size_t linear_region_size = util::AlignUp(linear_extents.GetEndAddress(), LinearRegionAlign) - GetInteger(aligned_linear_phys_start); - const KVirtualAddress linear_region_start = KMemoryLayout::GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(linear_region_size, LinearRegionAlign, KMemoryRegionType_None, LinearRegionAlign); + const KVirtualAddress linear_region_start = GetRandomAlignedRegionWithGuard(linear_region_size, LinearRegionAlign, init_pt, KMemoryLayout::GetVirtualMemoryRegionTree(), KMemoryRegionType_None, LinearRegionAlign); const uintptr_t linear_region_phys_to_virt_diff = GetInteger(linear_region_start) - GetInteger(aligned_linear_phys_start); @@ -471,9 +525,29 @@ namespace ams::kern::init { /* Set the linear memory offsets, to enable conversion between physical and virtual addresses. */ KMemoryLayout::InitializeLinearMemoryAddresses(aligned_linear_phys_start, linear_region_start); + /* Set the initial process binary physical address. */ + /* NOTE: Nintendo does this after pool partition setup, but it's a requirement that we do it before */ + /* to retain compatibility with < 5.0.0. */ + const KPhysicalAddress ini_address = initial_process_binary_layout.address; + MESOSPHERE_INIT_ABORT_UNLESS(ini_address != Null); + SetInitialProcessBinaryPhysicalAddress(ini_address); + /* Setup all other memory regions needed to arrange the pool partitions. */ SetupPoolPartitionMemoryRegions(); + /* Validate the initial process binary address. */ + { + const KMemoryRegion *ini_region = KMemoryLayout::Find(ini_address); + + /* Check that the region is non-kernel dram. */ + MESOSPHERE_INIT_ABORT_UNLESS(ini_region->IsDerivedFrom(KMemoryRegionType_DramUserPool)); + + /* Check that the region contains the ini. */ + MESOSPHERE_INIT_ABORT_UNLESS(ini_region->GetAddress() <= GetInteger(ini_address)); + MESOSPHERE_INIT_ABORT_UNLESS(GetInteger(ini_address) + InitialProcessBinarySizeMax <= ini_region->GetEndAddress()); + MESOSPHERE_INIT_ABORT_UNLESS(ini_region->GetEndAddress() != 0); + } + /* Cache all linear regions in their own trees for faster access, later. */ KMemoryLayout::InitializeLinearMemoryRegionTrees(); diff --git a/mesosphere/kernel/source/arch/arm64/init/start.s b/mesosphere/kernel/source/arch/arm64/init/start.s index 281db1509..2ad7b70f7 100644 --- a/mesosphere/kernel/source/arch/arm64/init/start.s +++ b/mesosphere/kernel/source/arch/arm64/init/start.s @@ -124,10 +124,10 @@ core0_el1: /* At this point kernelldr has been invoked, and we are relocated at a random virtual address. */ /* Next thing to do is to set up our memory management and slabheaps -- all the other core initialization. */ - /* Call ams::kern::init::InitializeCore(uintptr_t, uintptr_t) */ - mov x1, x0 /* Kernelldr returns a KInitialPageAllocator state for the kernel to re-use. */ + /* Call ams::kern::init::InitializeCore(uintptr_t, void **) */ + mov x1, x0 /* Kernelldr returns a state object for the kernel to re-use. */ mov x0, xzr /* Official kernel always passes zero, when this is non-zero the address is mapped. */ - bl _ZN3ams4kern4init14InitializeCoreEmm + bl _ZN3ams4kern4init14InitializeCoreEmPPv /* Get the init arguments for core 0. */ mov x0, xzr diff --git a/mesosphere/kernel_ldr/source/arch/arm64/start.s b/mesosphere/kernel_ldr/source/arch/arm64/start.s index 9254d065f..06218e216 100644 --- a/mesosphere/kernel_ldr/source/arch/arm64/start.s +++ b/mesosphere/kernel_ldr/source/arch/arm64/start.s @@ -110,10 +110,10 @@ _main: str x0, [sp, #0x20] - /* Call ams::kern::init::loader::GetFinalPageAllocatorState() */ - bl _ZN3ams4kern4init6loader26GetFinalPageAllocatorStateEv + /* Call ams::kern::init::loader::GetFinalState() */ + bl _ZN3ams4kern4init6loader13GetFinalStateEv - /* X0 is now the saved state for the page allocator. */ + /* X0 is now the saved state. */ /* We will return this to the kernel. */ /* Return to the newly-relocated kernel. */ diff --git a/mesosphere/kernel_ldr/source/kern_init_loader.cpp b/mesosphere/kernel_ldr/source/kern_init_loader.cpp index c283013a5..abcc067c0 100644 --- a/mesosphere/kernel_ldr/source/kern_init_loader.cpp +++ b/mesosphere/kernel_ldr/source/kern_init_loader.cpp @@ -42,9 +42,12 @@ namespace ams::kern::init::loader { static_assert(InitialPageTableRegionSizeMax < KernelPageTableHeapSize + KernelInitialPageHeapSize); /* Global Allocator. */ - KInitialPageAllocator g_initial_page_allocator; + constinit KInitialPageAllocator g_initial_page_allocator; - KInitialPageAllocator::State g_final_page_allocator_state; + constinit KInitialPageAllocator::State g_final_page_allocator_state; + constinit InitialProcessBinaryLayout g_initial_process_binary_layout; + + constinit void *g_final_state[2]; void RelocateKernelPhysically(uintptr_t &base_address, KernelLayout *&layout) { KPhysicalAddress correct_base = KSystemControl::Init::GetKernelPhysicalBaseAddress(base_address); @@ -159,19 +162,21 @@ namespace ams::kern::init::loader { /* Determine the size of the resource region. */ const size_t resource_region_size = KMemoryLayout::GetResourceRegionSizeForInit(); + const uintptr_t resource_end_address = base_address + resource_offset + resource_region_size; /* Setup the INI1 header in memory for the kernel. */ - const uintptr_t resource_end_address = base_address + resource_offset + resource_region_size; - const uintptr_t ini_load_address = GetInteger(KSystemControl::Init::GetInitialProcessBinaryPhysicalAddress()); - if (ini_base_address != ini_load_address) { + KSystemControl::Init::GetInitialProcessBinaryLayout(std::addressof(g_initial_process_binary_layout)); + MESOSPHERE_INIT_ABORT_UNLESS(g_initial_process_binary_layout.address != 0); + + if (ini_base_address != g_initial_process_binary_layout.address) { /* The INI is not at the correct address, so we need to relocate it. */ const InitialProcessBinaryHeader *ini_header = reinterpret_cast(ini_base_address); if (ini_header->magic == InitialProcessBinaryMagic && ini_header->size <= InitialProcessBinarySizeMax) { /* INI is valid, relocate it. */ - std::memmove(reinterpret_cast(ini_load_address), ini_header, ini_header->size); + std::memmove(reinterpret_cast(g_initial_process_binary_layout.address), ini_header, ini_header->size); } else { /* INI is invalid. Make the destination header invalid. */ - std::memset(reinterpret_cast(ini_load_address), 0, sizeof(InitialProcessBinaryHeader)); + std::memset(reinterpret_cast(g_initial_process_binary_layout.address), 0, sizeof(InitialProcessBinaryHeader)); } } @@ -225,9 +230,15 @@ namespace ams::kern::init::loader { return g_initial_page_allocator.Allocate(PageSize) + PageSize; } - uintptr_t GetFinalPageAllocatorState() { + void **GetFinalState() { + /* Get final page allocator state. */ g_initial_page_allocator.GetFinalState(std::addressof(g_final_page_allocator_state)); - return reinterpret_cast(std::addressof(g_final_page_allocator_state)); + + /* Setup final kernel loader state. */ + g_final_state[0] = std::addressof(g_final_page_allocator_state); + g_final_state[1] = std::addressof(g_initial_process_binary_layout); + + return g_final_state; } } \ No newline at end of file