diff --git a/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_system_control.hpp b/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_system_control.hpp index f9d99aecf..056d5f32b 100644 --- a/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_system_control.hpp +++ b/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_system_control.hpp @@ -25,6 +25,7 @@ namespace ams::kern::board::nintendo::nx { /* Initialization. */ static size_t GetIntendedMemorySize(); static KPhysicalAddress GetKernelPhysicalBaseAddress(uintptr_t base_address); + static KPhysicalAddress GetInitialProcessBinaryPhysicalAddress(); static bool ShouldIncreaseThreadResourceLimit(); static void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg); static size_t GetApplicationPoolSize(); diff --git a/libraries/libmesosphere/include/mesosphere/init/kern_init_layout.hpp b/libraries/libmesosphere/include/mesosphere/init/kern_init_layout.hpp index 3d379ca03..63197ec1e 100644 --- a/libraries/libmesosphere/include/mesosphere/init/kern_init_layout.hpp +++ b/libraries/libmesosphere/include/mesosphere/init/kern_init_layout.hpp @@ -27,7 +27,7 @@ namespace ams::kern::init { u32 rw_end_offset; u32 bss_offset; u32 bss_end_offset; - u32 ini_load_offset; + u32 resource_offset; u32 dynamic_offset; u32 init_array_offset; u32 init_array_end_offset; diff --git a/libraries/libmesosphere/include/mesosphere/kern_initial_process.hpp b/libraries/libmesosphere/include/mesosphere/kern_initial_process.hpp index bc15b0b88..08dc14cdd 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_initial_process.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_initial_process.hpp @@ -29,11 +29,12 @@ namespace ams::kern { u32 reserved; }; - NOINLINE void CopyInitialProcessBinaryToKernelMemory(); + NOINLINE size_t CopyInitialProcessBinaryToKernelMemory(); NOINLINE void CreateAndRunInitialProcesses(); u64 GetInitialProcessIdMin(); u64 GetInitialProcessIdMax(); + KVirtualAddress GetInitialProcessBinaryAddress(); size_t GetInitialProcessesSecureMemorySize(); } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_initial_process_reader.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_initial_process_reader.hpp index ce534533f..97b7977f9 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_initial_process_reader.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_initial_process_reader.hpp @@ -91,46 +91,49 @@ namespace ams::kern { class KInitialProcessReader { private: - KInitialProcessHeader *m_kip_header; + KInitialProcessHeader m_kip_header; public: constexpr KInitialProcessReader() : m_kip_header() { /* ... */ } - constexpr const u32 *GetCapabilities() const { return m_kip_header->GetCapabilities(); } - constexpr size_t GetNumCapabilities() const { return m_kip_header->GetNumCapabilities(); } + constexpr const u32 *GetCapabilities() const { return m_kip_header.GetCapabilities(); } + constexpr size_t GetNumCapabilities() const { return m_kip_header.GetNumCapabilities(); } constexpr size_t GetBinarySize() const { - return sizeof(*m_kip_header) + m_kip_header->GetRxCompressedSize() + m_kip_header->GetRoCompressedSize() + m_kip_header->GetRwCompressedSize(); + return m_kip_header.GetRxCompressedSize() + m_kip_header.GetRoCompressedSize() + m_kip_header.GetRwCompressedSize(); } constexpr size_t GetSize() const { - if (const size_t bss_size = m_kip_header->GetBssSize(); bss_size != 0) { - return m_kip_header->GetBssAddress() + m_kip_header->GetBssSize(); + if (const size_t bss_size = m_kip_header.GetBssSize(); bss_size != 0) { + return util::AlignUp(m_kip_header.GetBssAddress() + m_kip_header.GetBssSize(), PageSize); } else { - return m_kip_header->GetRwAddress() + m_kip_header->GetRwSize(); + return util::AlignUp(m_kip_header.GetRwAddress() + m_kip_header.GetRwSize(), PageSize); } } - constexpr u8 GetPriority() const { return m_kip_header->GetPriority(); } - constexpr u8 GetIdealCoreId() const { return m_kip_header->GetIdealCoreId(); } - constexpr u32 GetAffinityMask() const { return m_kip_header->GetAffinityMask(); } - constexpr u32 GetStackSize() const { return m_kip_header->GetStackSize(); } + constexpr u8 GetPriority() const { return m_kip_header.GetPriority(); } + constexpr u8 GetIdealCoreId() const { return m_kip_header.GetIdealCoreId(); } + constexpr u32 GetAffinityMask() const { return m_kip_header.GetAffinityMask(); } + constexpr u32 GetStackSize() const { return m_kip_header.GetStackSize(); } - constexpr bool Is64Bit() const { return m_kip_header->Is64Bit(); } - constexpr bool Is64BitAddressSpace() const { return m_kip_header->Is64BitAddressSpace(); } - constexpr bool UsesSecureMemory() const { return m_kip_header->UsesSecureMemory(); } - constexpr bool IsImmortal() const { return m_kip_header->IsImmortal(); } + constexpr bool Is64Bit() const { return m_kip_header.Is64Bit(); } + constexpr bool Is64BitAddressSpace() const { return m_kip_header.Is64BitAddressSpace(); } + constexpr bool UsesSecureMemory() const { return m_kip_header.UsesSecureMemory(); } + constexpr bool IsImmortal() const { return m_kip_header.IsImmortal(); } - bool Attach(u8 *bin) { - if (KInitialProcessHeader *header = reinterpret_cast(bin); header->IsValid()) { - m_kip_header = header; - return true; + KVirtualAddress Attach(KVirtualAddress bin) { + /* Copy the header. */ + m_kip_header = *GetPointer(bin); + + /* Check that it's valid. */ + if (m_kip_header.IsValid()) { + return bin + sizeof(KInitialProcessHeader); } else { - return false; + return Null; } } Result MakeCreateProcessParameter(ams::svc::CreateProcessParameter *out, bool enable_aslr) const; - Result Load(KProcessAddress address, const ams::svc::CreateProcessParameter ¶ms) const; + Result Load(KProcessAddress address, const ams::svc::CreateProcessParameter ¶ms, KProcessAddress src) const; Result SetMemoryPermissions(KProcessPageTable &page_table, const ams::svc::CreateProcessParameter ¶ms) const; }; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp index 5bef180a9..4b17911e4 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp @@ -75,7 +75,7 @@ namespace ams::kern { KVirtualAddress AllocateBlock(s32 index, bool random) { return m_heap.AllocateBlock(index, random); } void Free(KVirtualAddress addr, size_t num_pages) { m_heap.Free(addr, num_pages); } - void UpdateUsedHeapSize() { m_heap.UpdateUsedSize(); } + void SetInitialUsedHeapSize(size_t reserved_size) { m_heap.SetInitialUsedSize(reserved_size); } void InitializeOptimizedMemory() { std::memset(GetVoidPointer(m_management_region), 0, CalculateOptimizedProcessOverheadSize(m_heap.GetSize())); } @@ -168,6 +168,10 @@ namespace ams::kern { return m_managers[KMemoryLayout::GetVirtualLinearRegion(address).GetAttributes()]; } + const Impl &GetManager(KVirtualAddress address) const { + return m_managers[KMemoryLayout::GetVirtualLinearRegion(address).GetAttributes()]; + } + constexpr Impl *GetFirstManager(Pool pool, Direction dir) { return dir == Direction_FromBack ? m_pool_managers_tail[pool] : m_pool_managers_head[pool]; } @@ -197,6 +201,10 @@ namespace ams::kern { NOINLINE Result AllocateAndOpen(KPageGroup *out, size_t num_pages, u32 option); NOINLINE Result AllocateAndOpenForProcess(KPageGroup *out, size_t num_pages, u32 option, u64 process_id, u8 fill_pattern); + Pool GetPool(KVirtualAddress address) const { + return this->GetManager(address).GetPool(); + } + void Open(KVirtualAddress address, size_t num_pages) { /* Repeatedly open references until we've done so for all pages. */ while (num_pages) { diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_page_heap.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_page_heap.hpp index e14e7e0df..563bcb3be 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_page_heap.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_page_heap.hpp @@ -125,7 +125,7 @@ namespace ams::kern { private: KVirtualAddress m_heap_address; size_t m_heap_size; - size_t m_used_size; + size_t m_initial_used_size; size_t m_num_blocks; Block m_blocks[NumMemoryBlockPageShifts]; private: @@ -134,7 +134,7 @@ namespace ams::kern { void FreeBlock(KVirtualAddress block, s32 index); public: - KPageHeap() : m_heap_address(), m_heap_size(), m_used_size(), m_num_blocks(), m_blocks() { /* ... */ } + KPageHeap() : m_heap_address(), m_heap_size(), m_initial_used_size(), m_num_blocks(), m_blocks() { /* ... */ } constexpr KVirtualAddress GetAddress() const { return m_heap_address; } constexpr size_t GetSize() const { return m_heap_size; } @@ -149,8 +149,13 @@ namespace ams::kern { size_t GetFreeSize() const { return this->GetNumFreePages() * PageSize; } void DumpFreeList() const; - void UpdateUsedSize() { - m_used_size = m_heap_size - (this->GetNumFreePages() * PageSize); + void SetInitialUsedSize(size_t reserved_size) { + /* Check that the reserved size is valid. */ + const size_t free_size = this->GetNumFreePages() * PageSize; + MESOSPHERE_ABORT_UNLESS(m_heap_size >= free_size + reserved_size); + + /* Set the initial used size. */ + m_initial_used_size = m_heap_size - free_size - reserved_size; } KVirtualAddress AllocateBlock(s32 index, bool random); diff --git a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp index 8120957dd..b689d19ea 100644 --- a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp +++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp @@ -21,7 +21,8 @@ namespace ams::kern::board::nintendo::nx { namespace { - constexpr size_t SecureAlignment = 128_KB; + constexpr uintptr_t DramPhysicalAddress = 0x80000000; + constexpr size_t SecureAlignment = 128_KB; /* Global variables for panic. */ constinit bool g_call_smc_on_panic; @@ -348,6 +349,10 @@ namespace ams::kern::board::nintendo::nx { } } + KPhysicalAddress KSystemControl::Init::GetInitialProcessBinaryPhysicalAddress() { + return GetKernelPhysicalBaseAddress(DramPhysicalAddress) + GetIntendedMemorySize() - KTraceBufferSize - InitialProcessBinarySizeMax; + } + bool KSystemControl::Init::ShouldIncreaseThreadResourceLimit() { return GetKernelConfigurationForInit().Get(); } diff --git a/libraries/libmesosphere/source/kern_initial_process.cpp b/libraries/libmesosphere/source/kern_initial_process.cpp index 24616fbb6..6656218bd 100644 --- a/libraries/libmesosphere/source/kern_initial_process.cpp +++ b/libraries/libmesosphere/source/kern_initial_process.cpp @@ -25,101 +25,219 @@ namespace ams::kern { s32 priority; }; - KVirtualAddress GetInitialProcessBinaryAddress() { - const uintptr_t end_address = KMemoryLayout::GetPageTableHeapRegion().GetEndAddress(); - MESOSPHERE_ABORT_UNLESS(end_address != 0); - return end_address - InitialProcessBinarySizeMax; - } + constinit KVirtualAddress g_initial_process_binary_address = Null; + constinit InitialProcessBinaryHeader g_initial_process_binary_header = {}; + constinit size_t g_initial_process_secure_memory_size = 0; + constinit u64 g_initial_process_id_min = std::numeric_limits::max(); + constinit u64 g_initial_process_id_max = std::numeric_limits::min(); - void LoadInitialProcessBinaryHeader(InitialProcessBinaryHeader *header) { - if (header->magic != InitialProcessBinaryMagic) { - *header = *GetPointer(GetInitialProcessBinaryAddress()); - } + void LoadInitialProcessBinaryHeader() { + if (g_initial_process_binary_header.magic != InitialProcessBinaryMagic) { + /* Get the virtual address for the image. */ + const KVirtualAddress virt_addr = GetInitialProcessBinaryAddress(); - MESOSPHERE_ABORT_UNLESS(header->magic == InitialProcessBinaryMagic); - MESOSPHERE_ABORT_UNLESS(header->num_processes <= init::GetSlabResourceCounts().num_KProcess); - } + /* Copy and validate the header. */ + g_initial_process_binary_header = *GetPointer(virt_addr); + MESOSPHERE_ABORT_UNLESS(g_initial_process_binary_header.magic == InitialProcessBinaryMagic); + MESOSPHERE_ABORT_UNLESS(g_initial_process_binary_header.num_processes <= init::GetSlabResourceCounts().num_KProcess); - size_t GetProcessesSecureMemorySize(KVirtualAddress binary_address, const InitialProcessBinaryHeader &header) { - u8 *current = GetPointer(binary_address + sizeof(InitialProcessBinaryHeader)); - const u8 * const end = GetPointer(binary_address + header.size - sizeof(KInitialProcessHeader)); + /* Set the image address. */ + g_initial_process_binary_address = virt_addr; - size_t size = 0; - const size_t num_processes = header.num_processes; - for (size_t i = 0; i < num_processes; i++) { - /* Validate that we can read the current KIP. */ - MESOSPHERE_ABORT_UNLESS(current <= end); - KInitialProcessReader reader; - MESOSPHERE_ABORT_UNLESS(reader.Attach(current)); + /* Process/calculate the secure memory size. */ + KVirtualAddress current = g_initial_process_binary_address + sizeof(InitialProcessBinaryHeader); + const KVirtualAddress end = g_initial_process_binary_address + g_initial_process_binary_header.size; + const size_t num_processes = g_initial_process_binary_header.num_processes; + for (size_t i = 0; i < num_processes; ++i) { + /* Validate that we can read the current KIP. */ + MESOSPHERE_ABORT_UNLESS(current <= end - sizeof(KInitialProcessHeader)); - /* If the process uses secure memory, account for that. */ - if (reader.UsesSecureMemory()) { - size += util::AlignUp(reader.GetSize(), PageSize); + /* Attach to the current KIP. */ + KInitialProcessReader reader; + MESOSPHERE_ABORT_UNLESS(reader.Attach(current) != Null); + + /* If the process uses secure memory, account for that. */ + if (reader.UsesSecureMemory()) { + g_initial_process_secure_memory_size += reader.GetSize() + util::AlignUp(reader.GetStackSize(), PageSize); + } } - - /* Advance the reader. */ - current += reader.GetBinarySize(); } - - return size; } - void CreateProcesses(InitialProcessInfo *infos, KVirtualAddress binary_address, const InitialProcessBinaryHeader &header) { - u8 *current = GetPointer(binary_address + sizeof(InitialProcessBinaryHeader)); - const u8 * const end = GetPointer(binary_address + header.size - sizeof(KInitialProcessHeader)); + void CreateProcesses(InitialProcessInfo *infos) { + /* Determine process image extents. */ + KVirtualAddress current = g_initial_process_binary_address + sizeof(InitialProcessBinaryHeader); + KVirtualAddress end = g_initial_process_binary_address + g_initial_process_binary_header.size; /* Decide on pools to use. */ const auto unsafe_pool = static_cast(KSystemControl::GetCreateProcessMemoryPool()); const auto secure_pool = (GetTargetFirmware() >= TargetFirmware_2_0_0) ? KMemoryManager::Pool_Secure : unsafe_pool; - const size_t num_processes = header.num_processes; - for (size_t i = 0; i < num_processes; i++) { - /* Validate that we can read the current KIP. */ - MESOSPHERE_ABORT_UNLESS(current <= end); - KInitialProcessReader reader; - MESOSPHERE_ABORT_UNLESS(reader.Attach(current)); + const size_t num_processes = g_initial_process_binary_header.num_processes; + for (size_t i = 0; i < num_processes; ++i) { + /* Validate that we can read the current KIP header. */ + MESOSPHERE_ABORT_UNLESS(current <= end - sizeof(KInitialProcessHeader)); - /* Parse process parameters and reserve memory. */ + /* Attach to the current kip. */ + KInitialProcessReader reader; + KVirtualAddress data = reader.Attach(current); + MESOSPHERE_ABORT_UNLESS(data != Null); + + /* Ensure that the remainder of our parse is page aligned. */ + if (!util::IsAligned(GetInteger(data), PageSize)) { + const KVirtualAddress aligned_data = util::AlignDown(GetInteger(data), PageSize); + std::memmove(GetVoidPointer(aligned_data), GetVoidPointer(data), end - data); + + data = aligned_data; + end -= (data - aligned_data); + } + + /* If we crossed a page boundary, free the pages we're done using. */ + if (KVirtualAddress aligned_current = util::AlignDown(GetInteger(current), PageSize); aligned_current != data) { + const size_t freed_size = data - aligned_current; + Kernel::GetMemoryManager().Close(aligned_current, freed_size / PageSize); + Kernel::GetSystemResourceLimit().Release(ams::svc::LimitableResource_PhysicalMemoryMax, freed_size); + } + + /* Parse process parameters. */ ams::svc::CreateProcessParameter params; MESOSPHERE_R_ABORT_UNLESS(reader.MakeCreateProcessParameter(std::addressof(params), true)); - MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, params.code_num_pages * PageSize)); + + /* Get the binary size for the kip. */ + const size_t binary_size = reader.GetBinarySize(); + const size_t binary_pages = binary_size / PageSize; + + /* Get the pool for both the current (compressed) image, and the decompressed process. */ + const auto src_pool = Kernel::GetMemoryManager().GetPool(data); + const auto dst_pool = reader.UsesSecureMemory() ? secure_pool : unsafe_pool; + + /* Determine the process size, and how much memory isn't already reserved. */ + const size_t process_size = params.code_num_pages * PageSize; + const size_t unreserved_size = process_size - (src_pool == dst_pool ? util::AlignDown(binary_size, PageSize) : 0); + + /* Reserve however much memory we need to reserve. */ + MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, unreserved_size)); /* Create the process. */ KProcess *new_process = nullptr; { - /* Declare page group to use for process memory. */ + /* Make page groups to represent the data. */ KPageGroup pg(std::addressof(Kernel::GetBlockInfoManager())); + KPageGroup workaround_pg(std::addressof(Kernel::GetBlockInfoManager())); - /* Allocate memory for the process. */ - auto &mm = Kernel::GetMemoryManager(); - const auto pool = reader.UsesSecureMemory() ? secure_pool : unsafe_pool; - MESOSPHERE_R_ABORT_UNLESS(mm.AllocateAndOpen(std::addressof(pg), params.code_num_pages, KMemoryManager::EncodeOption(pool, KMemoryManager::Direction_FromFront))); - + /* Populate the page group to represent the data. */ { - /* Ensure that we do not leak pages. */ - ON_SCOPE_EXIT { pg.Close(); }; + /* Allocate the previously unreserved pages. */ + KPageGroup unreserve_pg(std::addressof(Kernel::GetBlockInfoManager())); + MESOSPHERE_R_ABORT_UNLESS(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(unreserve_pg), unreserved_size / PageSize, KMemoryManager::EncodeOption(dst_pool, KMemoryManager::Direction_FromFront))); - /* Get the temporary region. */ - const auto &temp_region = KMemoryLayout::GetTempRegion(); - MESOSPHERE_ABORT_UNLESS(temp_region.GetEndAddress() != 0); + /* Add the previously reserved pages. */ + if (src_pool == dst_pool && binary_pages != 0) { + /* NOTE: Nintendo does not check the result of this operation. */ + pg.AddBlock(data, binary_pages); + } - /* Map the process's memory into the temporary region. */ - KProcessAddress temp_address = Null; - MESOSPHERE_R_ABORT_UNLESS(Kernel::GetKernelPageTable().MapPageGroup(std::addressof(temp_address), pg, temp_region.GetAddress(), temp_region.GetSize() / PageSize, KMemoryState_Kernel, KMemoryPermission_KernelReadWrite)); - - /* Load the process. */ - MESOSPHERE_R_ABORT_UNLESS(reader.Load(temp_address, params)); - - /* Unmap the temporary mapping. */ - MESOSPHERE_R_ABORT_UNLESS(Kernel::GetKernelPageTable().UnmapPageGroup(temp_address, pg, KMemoryState_Kernel)); - - /* Create a KProcess object. */ - new_process = KProcess::Create(); - MESOSPHERE_ABORT_UNLESS(new_process != nullptr); - - /* Initialize the process. */ - MESOSPHERE_R_ABORT_UNLESS(new_process->Initialize(params, pg, reader.GetCapabilities(), reader.GetNumCapabilities(), std::addressof(Kernel::GetSystemResourceLimit()), pool, reader.IsImmortal())); + /* Add the previously unreserved pages. */ + for (const auto &block : unreserve_pg) { + /* NOTE: Nintendo does not check the result of this operation. */ + pg.AddBlock(block.GetAddress(), block.GetNumPages()); + } } + MESOSPHERE_ABORT_UNLESS(pg.GetNumPages() == static_cast(params.code_num_pages)); + + /* Ensure that we do not leak pages. */ + KPageGroup *process_pg = std::addressof(pg); + ON_SCOPE_EXIT { process_pg->Close(); }; + + /* Get the temporary region. */ + const auto &temp_region = KMemoryLayout::GetTempRegion(); + MESOSPHERE_ABORT_UNLESS(temp_region.GetEndAddress() != 0); + + /* Map the process's memory into the temporary region. */ + KProcessAddress temp_address = Null; + MESOSPHERE_R_ABORT_UNLESS(Kernel::GetKernelPageTable().MapPageGroup(std::addressof(temp_address), pg, temp_region.GetAddress(), temp_region.GetSize() / PageSize, KMemoryState_Kernel, KMemoryPermission_KernelReadWrite)); + + /* Setup the new page group's memory, so that we can load the process. */ + { + /* Copy the unaligned ending of the compressed binary. */ + if (const size_t unaligned_size = binary_size - util::AlignDown(binary_size, PageSize); unaligned_size != 0) { + std::memcpy(GetVoidPointer(temp_address + process_size - unaligned_size), GetVoidPointer(data + binary_size - unaligned_size), unaligned_size); + } + + /* Copy the aligned part of the compressed binary. */ + if (const size_t aligned_size = util::AlignDown(binary_size, PageSize); aligned_size != 0 && src_pool == dst_pool) { + std::memmove(GetVoidPointer(temp_address + process_size - binary_size), GetVoidPointer(temp_address), aligned_size); + } else { + if (src_pool != dst_pool) { + std::memcpy(GetVoidPointer(temp_address + process_size - binary_size), GetVoidPointer(data), aligned_size); + Kernel::GetMemoryManager().Close(data, aligned_size / PageSize); + } + } + + /* Clear the first part of the memory. */ + std::memset(GetVoidPointer(temp_address), 0, process_size - binary_size); + } + + /* Load the process. */ + MESOSPHERE_R_ABORT_UNLESS(reader.Load(temp_address, params, temp_address + process_size - binary_size)); + + /* Unmap the temporary mapping. */ + MESOSPHERE_R_ABORT_UNLESS(Kernel::GetKernelPageTable().UnmapPageGroup(temp_address, pg, KMemoryState_Kernel)); + + /* Create a KProcess object. */ + new_process = KProcess::Create(); + MESOSPHERE_ABORT_UNLESS(new_process != nullptr); + + /* Ensure the page group is usable for the process. */ + /* If the pool is the same, we need to use the workaround page group. */ + if (src_pool == dst_pool) { + /* Allocate a new, usable group for the process. */ + MESOSPHERE_R_ABORT_UNLESS(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(workaround_pg), static_cast(params.code_num_pages), KMemoryManager::EncodeOption(dst_pool, KMemoryManager::Direction_FromFront))); + + /* Copy data from the working page group to the usable one. */ + auto work_it = pg.begin(); + MESOSPHERE_ABORT_UNLESS(work_it != pg.end()); + { + auto work_address = work_it->GetAddress(); + auto work_remaining = work_it->GetNumPages(); + for (const auto &block : workaround_pg) { + auto block_address = block.GetAddress(); + auto block_remaining = block.GetNumPages(); + while (block_remaining > 0) { + if (work_remaining == 0) { + ++work_it; + work_address = work_it->GetAddress(); + work_remaining = work_it->GetNumPages(); + } + + const size_t cur_pages = std::min(block_remaining, work_remaining); + const size_t cur_size = cur_pages * PageSize; + std::memcpy(GetVoidPointer(block_address), GetVoidPointer(work_address), cur_size); + + block_address += cur_size; + work_address += cur_size; + + block_remaining -= cur_pages; + work_remaining -= cur_pages; + } + } + + ++work_it; + } + MESOSPHERE_ABORT_UNLESS(work_it == pg.end()); + + /* We want to use the new page group. */ + process_pg = std::addressof(workaround_pg); + pg.Close(); + } + + /* Initialize the process. */ + MESOSPHERE_R_ABORT_UNLESS(new_process->Initialize(params, *process_pg, reader.GetCapabilities(), reader.GetNumCapabilities(), std::addressof(Kernel::GetSystemResourceLimit()), dst_pool, reader.IsImmortal())); + } + + /* Release the memory that was previously reserved. */ + if (const size_t aligned_bin_size = util::AlignDown(binary_size, PageSize); aligned_bin_size != 0 && src_pool != dst_pool) { + Kernel::GetSystemResourceLimit().Release(ams::svc::LimitableResource_PhysicalMemoryMax, aligned_bin_size); } /* Set the process's memory permissions. */ @@ -137,15 +255,18 @@ namespace ams::kern { infos[i].priority = reader.GetPriority(); /* Advance the reader. */ - current += reader.GetBinarySize(); + current = data + binary_size; + } + + /* Release remaining memory used by the image. */ + { + const size_t remaining_size = util::AlignUp(GetInteger(g_initial_process_binary_address) + g_initial_process_binary_header.size, PageSize) - util::AlignDown(GetInteger(current), PageSize); + const size_t remaining_pages = remaining_size / PageSize; + Kernel::GetMemoryManager().Close(util::AlignDown(GetInteger(current), PageSize), remaining_pages); + Kernel::GetSystemResourceLimit().Release(ams::svc::LimitableResource_PhysicalMemoryMax, remaining_size); } } - constinit KVirtualAddress g_initial_process_binary_address = Null; - constinit InitialProcessBinaryHeader g_initial_process_binary_header = {}; - constinit u64 g_initial_process_id_min = std::numeric_limits::max(); - constinit u64 g_initial_process_id_max = std::numeric_limits::min(); - } u64 GetInitialProcessIdMin() { @@ -156,32 +277,37 @@ namespace ams::kern { return g_initial_process_id_max; } - size_t GetInitialProcessesSecureMemorySize() { - LoadInitialProcessBinaryHeader(&g_initial_process_binary_header); - - return GetProcessesSecureMemorySize(g_initial_process_binary_address != Null ? g_initial_process_binary_address : GetInitialProcessBinaryAddress(), g_initial_process_binary_header); + KVirtualAddress GetInitialProcessBinaryAddress() { + /* Get, validate the pool region. */ + const auto *pool_region = KMemoryLayout::GetVirtualMemoryRegionTree().FindLastDerived(KMemoryRegionType_VirtualDramUserPool); + MESOSPHERE_INIT_ABORT_UNLESS(pool_region != nullptr); + MESOSPHERE_INIT_ABORT_UNLESS(pool_region->GetEndAddress() != 0); + MESOSPHERE_ABORT_UNLESS(pool_region->GetSize() >= InitialProcessBinarySizeMax); + return pool_region->GetEndAddress() - InitialProcessBinarySizeMax; } - void CopyInitialProcessBinaryToKernelMemory() { - LoadInitialProcessBinaryHeader(&g_initial_process_binary_header); + size_t GetInitialProcessesSecureMemorySize() { + LoadInitialProcessBinaryHeader(); + + return g_initial_process_secure_memory_size; + } + + size_t CopyInitialProcessBinaryToKernelMemory() { + LoadInitialProcessBinaryHeader(); if (g_initial_process_binary_header.num_processes > 0) { /* Reserve pages for the initial process binary from the system resource limit. */ - auto &mm = Kernel::GetMemoryManager(); const size_t total_size = util::AlignUp(g_initial_process_binary_header.size, PageSize); - const size_t num_pages = total_size / PageSize; MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, total_size)); - /* Allocate memory for the image. */ - const KMemoryManager::Pool pool = static_cast(KSystemControl::GetCreateProcessMemoryPool()); - const auto allocate_option = KMemoryManager::EncodeOption(pool, KMemoryManager::Direction_FromFront); - KVirtualAddress allocated_memory = mm.AllocateAndOpenContinuous(num_pages, 1, allocate_option); - MESOSPHERE_ABORT_UNLESS(allocated_memory != Null); + /* The initial process binary is potentially over-allocated, so free any extra pages. */ + if (total_size < InitialProcessBinarySizeMax) { + Kernel::GetMemoryManager().Close(g_initial_process_binary_address + total_size, (InitialProcessBinarySizeMax - total_size) / PageSize); + } - /* Relocate the image. */ - std::memmove(GetVoidPointer(allocated_memory), GetVoidPointer(GetInitialProcessBinaryAddress()), g_initial_process_binary_header.size); - std::memset(GetVoidPointer(GetInitialProcessBinaryAddress()), 0, g_initial_process_binary_header.size); - g_initial_process_binary_address = allocated_memory; + return total_size; + } else { + return 0; } } @@ -190,15 +316,7 @@ namespace ams::kern { InitialProcessInfo *infos = static_cast(__builtin_alloca(sizeof(InitialProcessInfo) * g_initial_process_binary_header.num_processes)); /* Create the processes. */ - CreateProcesses(infos, g_initial_process_binary_address, g_initial_process_binary_header); - - /* Release the memory used by the image. */ - { - const size_t total_size = util::AlignUp(g_initial_process_binary_header.size, PageSize); - const size_t num_pages = total_size / PageSize; - Kernel::GetMemoryManager().Close(g_initial_process_binary_address, num_pages); - Kernel::GetSystemResourceLimit().Release(ams::svc::LimitableResource_PhysicalMemoryMax, total_size); - } + CreateProcesses(infos); /* Determine the initial process id range. */ for (size_t i = 0; i < g_initial_process_binary_header.num_processes; i++) { diff --git a/libraries/libmesosphere/source/kern_k_initial_process_reader.cpp b/libraries/libmesosphere/source/kern_k_initial_process_reader.cpp index 6665dcdc8..83922182d 100644 --- a/libraries/libmesosphere/source/kern_k_initial_process_reader.cpp +++ b/libraries/libmesosphere/source/kern_k_initial_process_reader.cpp @@ -77,14 +77,14 @@ namespace ams::kern { Result KInitialProcessReader::MakeCreateProcessParameter(ams::svc::CreateProcessParameter *out, bool enable_aslr) const { /* Get and validate addresses/sizes. */ - const uintptr_t rx_address = m_kip_header->GetRxAddress(); - const size_t rx_size = m_kip_header->GetRxSize(); - const uintptr_t ro_address = m_kip_header->GetRoAddress(); - const size_t ro_size = m_kip_header->GetRoSize(); - const uintptr_t rw_address = m_kip_header->GetRwAddress(); - const size_t rw_size = m_kip_header->GetRwSize(); - const uintptr_t bss_address = m_kip_header->GetBssAddress(); - const size_t bss_size = m_kip_header->GetBssSize(); + const uintptr_t rx_address = m_kip_header.GetRxAddress(); + const size_t rx_size = m_kip_header.GetRxSize(); + const uintptr_t ro_address = m_kip_header.GetRoAddress(); + const size_t ro_size = m_kip_header.GetRoSize(); + const uintptr_t rw_address = m_kip_header.GetRwAddress(); + const size_t rw_size = m_kip_header.GetRwSize(); + const uintptr_t bss_address = m_kip_header.GetBssAddress(); + const size_t bss_size = m_kip_header.GetBssSize(); R_UNLESS(util::IsAligned(rx_address, PageSize), svc::ResultInvalidAddress()); R_UNLESS(util::IsAligned(ro_address, PageSize), svc::ResultInvalidAddress()); R_UNLESS(util::IsAligned(rw_address, PageSize), svc::ResultInvalidAddress()); @@ -115,13 +115,13 @@ namespace ams::kern { /* Set fields in parameter. */ out->code_address = map_start + start_address; out->code_num_pages = util::AlignUp(end_address - start_address, PageSize) / PageSize; - out->program_id = m_kip_header->GetProgramId(); - out->version = m_kip_header->GetVersion(); + out->program_id = m_kip_header.GetProgramId(); + out->version = m_kip_header.GetVersion(); out->flags = 0; MESOSPHERE_ABORT_UNLESS((out->code_address / PageSize) + out->code_num_pages <= (map_end / PageSize)); /* Copy name field. */ - m_kip_header->GetName(out->name, sizeof(out->name)); + m_kip_header.GetName(out->name, sizeof(out->name)); /* Apply ASLR, if needed. */ if (enable_aslr) { @@ -146,39 +146,36 @@ namespace ams::kern { return ResultSuccess(); } - Result KInitialProcessReader::Load(KProcessAddress address, const ams::svc::CreateProcessParameter ¶ms) const { - /* Clear memory at the address. */ - std::memset(GetVoidPointer(address), 0, params.code_num_pages * PageSize); - + Result KInitialProcessReader::Load(KProcessAddress address, const ams::svc::CreateProcessParameter ¶ms, KProcessAddress src) const { /* Prepare to layout the data. */ - const KProcessAddress rx_address = address + m_kip_header->GetRxAddress(); - const KProcessAddress ro_address = address + m_kip_header->GetRoAddress(); - const KProcessAddress rw_address = address + m_kip_header->GetRwAddress(); - const u8 *rx_binary = reinterpret_cast(m_kip_header + 1); - const u8 *ro_binary = rx_binary + m_kip_header->GetRxCompressedSize(); - const u8 *rw_binary = ro_binary + m_kip_header->GetRoCompressedSize(); + const KProcessAddress rx_address = address + m_kip_header.GetRxAddress(); + const KProcessAddress ro_address = address + m_kip_header.GetRoAddress(); + const KProcessAddress rw_address = address + m_kip_header.GetRwAddress(); + const u8 *rx_binary = GetPointer(src); + const u8 *ro_binary = rx_binary + m_kip_header.GetRxCompressedSize(); + const u8 *rw_binary = ro_binary + m_kip_header.GetRoCompressedSize(); /* Copy text. */ - if (util::AlignUp(m_kip_header->GetRxSize(), PageSize)) { - std::memcpy(GetVoidPointer(rx_address), rx_binary, m_kip_header->GetRxCompressedSize()); - if (m_kip_header->IsRxCompressed()) { - BlzUncompress(GetVoidPointer(rx_address + m_kip_header->GetRxCompressedSize())); + if (util::AlignUp(m_kip_header.GetRxSize(), PageSize)) { + std::memmove(GetVoidPointer(rx_address), rx_binary, m_kip_header.GetRxCompressedSize()); + if (m_kip_header.IsRxCompressed()) { + BlzUncompress(GetVoidPointer(rx_address + m_kip_header.GetRxCompressedSize())); } } /* Copy rodata. */ - if (util::AlignUp(m_kip_header->GetRoSize(), PageSize)) { - std::memcpy(GetVoidPointer(ro_address), ro_binary, m_kip_header->GetRoCompressedSize()); - if (m_kip_header->IsRoCompressed()) { - BlzUncompress(GetVoidPointer(ro_address + m_kip_header->GetRoCompressedSize())); + if (util::AlignUp(m_kip_header.GetRoSize(), PageSize)) { + std::memmove(GetVoidPointer(ro_address), ro_binary, m_kip_header.GetRoCompressedSize()); + if (m_kip_header.IsRoCompressed()) { + BlzUncompress(GetVoidPointer(ro_address + m_kip_header.GetRoCompressedSize())); } } /* Copy rwdata. */ - if (util::AlignUp(m_kip_header->GetRwSize(), PageSize)) { - std::memcpy(GetVoidPointer(rw_address), rw_binary, m_kip_header->GetRwCompressedSize()); - if (m_kip_header->IsRwCompressed()) { - BlzUncompress(GetVoidPointer(rw_address + m_kip_header->GetRwCompressedSize())); + if (util::AlignUp(m_kip_header.GetRwSize(), PageSize)) { + std::memmove(GetVoidPointer(rw_address), rw_binary, m_kip_header.GetRwCompressedSize()); + if (m_kip_header.IsRwCompressed()) { + BlzUncompress(GetVoidPointer(rw_address + m_kip_header.GetRwCompressedSize())); } } @@ -192,27 +189,27 @@ namespace ams::kern { } Result KInitialProcessReader::SetMemoryPermissions(KProcessPageTable &page_table, const ams::svc::CreateProcessParameter ¶ms) const { - const size_t rx_size = m_kip_header->GetRxSize(); - const size_t ro_size = m_kip_header->GetRoSize(); - const size_t rw_size = m_kip_header->GetRwSize(); - const size_t bss_size = m_kip_header->GetBssSize(); + const size_t rx_size = m_kip_header.GetRxSize(); + const size_t ro_size = m_kip_header.GetRoSize(); + const size_t rw_size = m_kip_header.GetRwSize(); + const size_t bss_size = m_kip_header.GetBssSize(); /* Set R-X pages. */ if (rx_size) { - const uintptr_t start = m_kip_header->GetRxAddress() + params.code_address; + const uintptr_t start = m_kip_header.GetRxAddress() + params.code_address; R_TRY(page_table.SetProcessMemoryPermission(start, util::AlignUp(rx_size, PageSize), ams::svc::MemoryPermission_ReadExecute)); } /* Set R-- pages. */ if (ro_size) { - const uintptr_t start = m_kip_header->GetRoAddress() + params.code_address; + const uintptr_t start = m_kip_header.GetRoAddress() + params.code_address; R_TRY(page_table.SetProcessMemoryPermission(start, util::AlignUp(ro_size, PageSize), ams::svc::MemoryPermission_Read)); } /* Set RW- pages. */ if (rw_size || bss_size) { - const uintptr_t start = (rw_size ? m_kip_header->GetRwAddress() : m_kip_header->GetBssAddress()) + params.code_address; - const uintptr_t end = (bss_size ? m_kip_header->GetBssAddress() + bss_size : m_kip_header->GetRwAddress() + rw_size) + params.code_address; + const uintptr_t start = (rw_size ? m_kip_header.GetRwAddress() : m_kip_header.GetBssAddress()) + params.code_address; + const uintptr_t end = (bss_size ? m_kip_header.GetBssAddress() + bss_size : m_kip_header.GetRwAddress() + rw_size) + params.code_address; R_TRY(page_table.SetProcessMemoryPermission(start, util::AlignUp(end - start, PageSize), ams::svc::MemoryPermission_ReadWrite)); } diff --git a/libraries/libmesosphere/source/kern_k_memory_manager.cpp b/libraries/libmesosphere/source/kern_k_memory_manager.cpp index 7143f062c..36434ee36 100644 --- a/libraries/libmesosphere/source/kern_k_memory_manager.cpp +++ b/libraries/libmesosphere/source/kern_k_memory_manager.cpp @@ -100,19 +100,48 @@ namespace ams::kern { } /* Free each region to its corresponding heap. */ + size_t reserved_sizes[MaxManagerCount] = {}; + const uintptr_t ini_start = GetInteger(GetInitialProcessBinaryAddress()); + const uintptr_t ini_end = ini_start + InitialProcessBinarySizeMax; + const uintptr_t ini_last = ini_end - 1; for (const auto &it : KMemoryLayout::GetVirtualMemoryRegionTree()) { if (it.IsDerivedFrom(KMemoryRegionType_VirtualDramUserPool)) { - /* Check the region. */ - MESOSPHERE_ABORT_UNLESS(it.GetEndAddress() != 0); + /* Get the manager for the region. */ + auto &manager = m_managers[it.GetAttributes()]; - /* Free the memory to the heap. */ - m_managers[it.GetAttributes()].Free(it.GetAddress(), it.GetSize() / PageSize); + if (it.GetAddress() <= ini_start && ini_last <= it.GetLastAddress()) { + /* Free memory before the ini to the heap. */ + if (it.GetAddress() != ini_start) { + manager.Free(it.GetAddress(), (ini_start - it.GetAddress()) / PageSize); + } + + /* Open/reserve the ini memory. */ + manager.OpenFirst(ini_start, InitialProcessBinarySizeMax / PageSize); + reserved_sizes[it.GetAttributes()] += InitialProcessBinarySizeMax; + + /* Free memory after the ini to the heap. */ + if (ini_last != it.GetLastAddress()) { + MESOSPHERE_ABORT_UNLESS(it.GetEndAddress() != 0); + manager.Free(ini_end, it.GetEndAddress() - ini_end); + } + } else { + /* Ensure there's no partial overlap with the ini image. */ + if (it.GetAddress() <= ini_last) { + MESOSPHERE_ABORT_UNLESS(it.GetLastAddress() < ini_start); + } else { + /* Otherwise, check the region for general validity. */ + MESOSPHERE_ABORT_UNLESS(it.GetEndAddress() != 0); + } + + /* Free the memory to the heap. */ + manager.Free(it.GetAddress(), it.GetSize() / PageSize); + } } } /* Update the used size for all managers. */ for (size_t i = 0; i < m_num_managers; ++i) { - m_managers[i].UpdateUsedHeapSize(); + m_managers[i].SetInitialUsedHeapSize(reserved_sizes[i]); } } diff --git a/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp b/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp index a6c213344..490999d0e 100644 --- a/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp +++ b/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp @@ -362,10 +362,9 @@ namespace ams::kern::init { /* NOTE: Nintendo does this only on 10.0.0+ */ init_pt.PhysicallyRandomize(slab_region_start, slab_region_size, false); - /* Determine size available for kernel page table heaps, requiring > 8 MB. */ + /* Determine size available for kernel page table heaps. */ const KPhysicalAddress resource_end_phys_addr = slab_start_phys_addr + resource_region_size; const size_t page_table_heap_size = GetInteger(resource_end_phys_addr) - GetInteger(slab_end_phys_addr); - MESOSPHERE_INIT_ABORT_UNLESS(page_table_heap_size / 4_MB > 2); /* Insert a physical region for the kernel page table heap region */ MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(slab_end_phys_addr), page_table_heap_size, KMemoryRegionType_DramKernelPtHeap)); diff --git a/mesosphere/kernel/source/arch/arm64/init/start.s b/mesosphere/kernel/source/arch/arm64/init/start.s index 398440c7d..6300c50bd 100644 --- a/mesosphere/kernel/source/arch/arm64/init/start.s +++ b/mesosphere/kernel/source/arch/arm64/init/start.s @@ -53,7 +53,7 @@ __metadata_kernel_layout: .word __bss_start__ - _start /* rw_end_offset */ .word __bss_start__ - _start /* bss_offset */ .word __bss_end__ - _start /* bss_end_offset */ - .word __end__ - _start /* ini_load_offset */ + .word __end__ - _start /* resource_offset */ .word _DYNAMIC - _start /* dynamic_offset */ .word __init_array_start - _start /* init_array_offset */ .word __init_array_end - _start /* init_array_end_offset */ diff --git a/mesosphere/kernel_ldr/source/kern_init_loader.cpp b/mesosphere/kernel_ldr/source/kern_init_loader.cpp index 6be53d77b..1eae25f16 100644 --- a/mesosphere/kernel_ldr/source/kern_init_loader.cpp +++ b/mesosphere/kernel_ldr/source/kern_init_loader.cpp @@ -172,7 +172,7 @@ namespace ams::kern::init::loader { MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(rw_offset, PageSize)); MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(bss_end_offset, PageSize)); const uintptr_t bss_offset = layout->bss_offset; - const uintptr_t ini_load_offset = layout->ini_load_offset; + const uintptr_t resource_offset = layout->resource_offset; const uintptr_t dynamic_offset = layout->dynamic_offset; const uintptr_t init_array_offset = layout->init_array_offset; const uintptr_t init_array_end_offset = layout->init_array_end_offset; @@ -181,8 +181,8 @@ namespace ams::kern::init::loader { const size_t resource_region_size = KMemoryLayout::GetResourceRegionSizeForInit(); /* Setup the INI1 header in memory for the kernel. */ - const uintptr_t ini_end_address = base_address + ini_load_offset + resource_region_size; - const uintptr_t ini_load_address = ini_end_address - InitialProcessBinarySizeMax; + const uintptr_t resource_end_address = base_address + resource_offset + resource_region_size; + const uintptr_t ini_load_address = GetInteger(KSystemControl::Init::GetInitialProcessBinaryPhysicalAddress()); if (ini_base_address != ini_load_address) { /* The INI is not at the correct address, so we need to relocate it. */ const InitialProcessBinaryHeader *ini_header = reinterpret_cast(ini_base_address); @@ -195,14 +195,14 @@ namespace ams::kern::init::loader { } } - /* We want to start allocating page tables at ini_end_address. */ - g_initial_page_allocator.Initialize(ini_end_address); + /* We want to start allocating page tables at the end of the resource region. */ + g_initial_page_allocator.Initialize(resource_end_address); /* Make a new page table for TTBR1_EL1. */ KInitialPageTable init_pt(KernelBaseRangeStart, KernelBaseRangeLast, g_initial_page_allocator); /* Setup initial identity mapping. TTBR1 table passed by reference. */ - SetupInitialIdentityMapping(init_pt, base_address, bss_end_offset, ini_end_address, InitialPageTableRegionSizeMax, g_initial_page_allocator); + SetupInitialIdentityMapping(init_pt, base_address, bss_end_offset, resource_end_address, InitialPageTableRegionSizeMax, g_initial_page_allocator); /* Generate a random slide for the kernel's base address. */ const KVirtualAddress virtual_base_address = GetRandomKernelBaseAddress(init_pt, base_address, bss_end_offset);