diff --git a/libraries/libmesosphere/include/mesosphere/kern_initial_process.hpp b/libraries/libmesosphere/include/mesosphere/kern_initial_process.hpp index 08dc14cdd..8be5e14b1 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_initial_process.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_initial_process.hpp @@ -37,4 +37,6 @@ namespace ams::kern { KVirtualAddress GetInitialProcessBinaryAddress(); size_t GetInitialProcessesSecureMemorySize(); + void LoadInitialProcessBinaryHeaderDeprecated(KPhysicalAddress pool_end); + } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp index dce0af090..e38a361dc 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp @@ -175,7 +175,14 @@ namespace ams::kern { return std::make_tuple(total_size, kernel_size); } - static void InitializeLinearMemoryRegionTrees(KPhysicalAddress aligned_linear_phys_start, KVirtualAddress linear_virtual_start); + static void InitializeLinearMemoryAddresses(KPhysicalAddress aligned_linear_phys_start, KVirtualAddress linear_virtual_start) { + /* Set static differences. */ + s_linear_phys_to_virt_diff = GetInteger(linear_virtual_start) - GetInteger(aligned_linear_phys_start); + s_linear_virt_to_phys_diff = GetInteger(aligned_linear_phys_start) - GetInteger(linear_virtual_start); + } + + static void InitializeLinearMemoryRegionTrees(); + static size_t GetResourceRegionSizeForInit(); static NOINLINE auto GetKernelRegionExtents() { return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_Kernel); } diff --git a/libraries/libmesosphere/source/kern_initial_process.cpp b/libraries/libmesosphere/source/kern_initial_process.cpp index 6656218bd..2e1507a43 100644 --- a/libraries/libmesosphere/source/kern_initial_process.cpp +++ b/libraries/libmesosphere/source/kern_initial_process.cpp @@ -31,10 +31,12 @@ namespace ams::kern { constinit u64 g_initial_process_id_min = std::numeric_limits::max(); constinit u64 g_initial_process_id_max = std::numeric_limits::min(); - void LoadInitialProcessBinaryHeader() { + void LoadInitialProcessBinaryHeader(KVirtualAddress virt_addr = Null) { if (g_initial_process_binary_header.magic != InitialProcessBinaryMagic) { - /* Get the virtual address for the image. */ - const KVirtualAddress virt_addr = GetInitialProcessBinaryAddress(); + /* Get the virtual address, if it's not overridden. */ + if (virt_addr == Null) { + virt_addr = GetInitialProcessBinaryAddress(); + } /* Copy and validate the header. */ g_initial_process_binary_header = *GetPointer(virt_addr); @@ -54,12 +56,16 @@ namespace ams::kern { /* Attach to the current KIP. */ KInitialProcessReader reader; - MESOSPHERE_ABORT_UNLESS(reader.Attach(current) != Null); + KVirtualAddress data = reader.Attach(current); + MESOSPHERE_ABORT_UNLESS(data != Null); /* If the process uses secure memory, account for that. */ if (reader.UsesSecureMemory()) { g_initial_process_secure_memory_size += reader.GetSize() + util::AlignUp(reader.GetStackSize(), PageSize); } + + /* Advance to the next KIP. */ + current = data + reader.GetBinarySize(); } } } @@ -267,6 +273,10 @@ namespace ams::kern { } } + ALWAYS_INLINE KVirtualAddress GetInitialProcessBinaryAddress(KVirtualAddress pool_end) { + return pool_end - InitialProcessBinarySizeMax; + } + } u64 GetInitialProcessIdMin() { @@ -283,7 +293,7 @@ namespace ams::kern { MESOSPHERE_INIT_ABORT_UNLESS(pool_region != nullptr); MESOSPHERE_INIT_ABORT_UNLESS(pool_region->GetEndAddress() != 0); MESOSPHERE_ABORT_UNLESS(pool_region->GetSize() >= InitialProcessBinarySizeMax); - return pool_region->GetEndAddress() - InitialProcessBinarySizeMax; + return GetInitialProcessBinaryAddress(pool_region->GetEndAddress()); } size_t GetInitialProcessesSecureMemorySize() { @@ -311,6 +321,10 @@ namespace ams::kern { } } + void LoadInitialProcessBinaryHeaderDeprecated(KPhysicalAddress pool_end) { + LoadInitialProcessBinaryHeader(GetInitialProcessBinaryAddress(KMemoryLayout::GetLinearVirtualAddress(pool_end))); + } + void CreateAndRunInitialProcesses() { /* Allocate space for the processes. */ InitialProcessInfo *infos = static_cast(__builtin_alloca(sizeof(InitialProcessInfo) * g_initial_process_binary_header.num_processes)); diff --git a/libraries/libmesosphere/source/kern_k_memory_layout.board.nintendo_nx.cpp b/libraries/libmesosphere/source/kern_k_memory_layout.board.nintendo_nx.cpp index 5d972d121..c9b2fd0d7 100644 --- a/libraries/libmesosphere/source/kern_k_memory_layout.board.nintendo_nx.cpp +++ b/libraries/libmesosphere/source/kern_k_memory_layout.board.nintendo_nx.cpp @@ -185,6 +185,12 @@ namespace ams::kern { static_assert(KMemoryManager::Pool_Unsafe == KMemoryManager::Pool_Application); static_assert(KMemoryManager::Pool_Secure == KMemoryManager::Pool_System); + /* NOTE: Beginning with 12.0.0 (and always, in mesosphere), the initial process binary is at the end of the pool region. */ + /* However, this is problematic for < 5.0.0, because we require the initial process binary to be parsed in order */ + /* to determine the pool sizes. Hence, we will force an initial binary load with the known pool end directly, so */ + /* that we retain compatibility with lower firmware versions. */ + LoadInitialProcessBinaryHeaderDeprecated(pool_end); + /* Get Secure pool size. */ const size_t secure_pool_size = [] ALWAYS_INLINE_LAMBDA (auto target_firmware) -> size_t { constexpr size_t LegacySecureKernelSize = 8_MB; /* KPageBuffer pages, other small kernel allocations. */ diff --git a/libraries/libmesosphere/source/kern_k_memory_layout.cpp b/libraries/libmesosphere/source/kern_k_memory_layout.cpp index 8aecb49ca..5171e1acd 100644 --- a/libraries/libmesosphere/source/kern_k_memory_layout.cpp +++ b/libraries/libmesosphere/source/kern_k_memory_layout.cpp @@ -148,11 +148,7 @@ namespace ams::kern { } } - void KMemoryLayout::InitializeLinearMemoryRegionTrees(KPhysicalAddress aligned_linear_phys_start, KVirtualAddress linear_virtual_start) { - /* Set static differences. */ - s_linear_phys_to_virt_diff = GetInteger(linear_virtual_start) - GetInteger(aligned_linear_phys_start); - s_linear_virt_to_phys_diff = GetInteger(aligned_linear_phys_start) - GetInteger(linear_virtual_start); - + void KMemoryLayout::InitializeLinearMemoryRegionTrees() { /* Initialize linear trees. */ for (auto ®ion : GetPhysicalMemoryRegionTree()) { if (region.HasTypeAttribute(KMemoryRegionAttr_LinearMapped)) { diff --git a/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp b/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp index 490999d0e..39437ced5 100644 --- a/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp +++ b/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp @@ -467,11 +467,14 @@ namespace ams::kern::init { } } + /* Set the linear memory offsets, to enable conversion between physical and virtual addresses. */ + KMemoryLayout::InitializeLinearMemoryAddresses(aligned_linear_phys_start, linear_region_start); + /* Setup all other memory regions needed to arrange the pool partitions. */ SetupPoolPartitionMemoryRegions(); /* Cache all linear regions in their own trees for faster access, later. */ - KMemoryLayout::InitializeLinearMemoryRegionTrees(aligned_linear_phys_start, linear_region_start); + KMemoryLayout::InitializeLinearMemoryRegionTrees(); /* Turn on all other cores. */ TurnOnAllCores(GetInteger(init_pt.GetPhysicalAddress(reinterpret_cast(::ams::kern::init::StartOtherCore))));