From 92521eed2a75d90e28164d8b10b7c74e819773a4 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Mon, 17 Feb 2020 02:49:21 -0800 Subject: [PATCH] kern: implement through kip decompression --- .../mesosphere/arch/arm64/kern_cpu.hpp | 2 + .../arch/arm64/kern_k_page_table.hpp | 31 +++++- .../arm64/kern_k_supervisor_page_table.hpp | 12 ++ .../kern_k_initial_process_reader.hpp | 1 + .../mesosphere/kern_k_memory_layout.hpp | 87 ++++++++++++++- .../include/mesosphere/kern_k_page_group.hpp | 1 + .../mesosphere/kern_k_page_table_base.hpp | 38 +++++-- .../source/arch/arm64/kern_cpu.cpp | 4 + .../source/arch/arm64/kern_k_page_table.cpp | 62 +++++++++-- .../source/kern_initial_process.cpp | 42 ++++--- .../source/kern_k_initial_process_reader.cpp | 104 +++++++++++++++++- .../source/kern_k_page_table_base.cpp | 85 +++++++++++++- 12 files changed, 427 insertions(+), 42 deletions(-) diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp index 7e0af46ef..7d7f657e0 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp @@ -157,6 +157,8 @@ namespace ams::kern::arch::arm64::cpu { void FlushEntireDataCacheSharedForInit(); void FlushEntireDataCacheLocalForInit(); + void FlushEntireDataCache(); + Result InvalidateDataCache(void *addr, size_t size); Result StoreDataCache(const void *addr, size_t size); Result FlushDataCache(const void *addr, size_t size); diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp index 0975d658d..a8a8b460b 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp @@ -44,7 +44,6 @@ namespace ams::kern::arch::arm64 { BlockType_Count, }; - static_assert(L3BlockSize == PageSize); static constexpr size_t ContiguousPageSize = L3ContiguousBlockSize; @@ -79,6 +78,16 @@ namespace ams::kern::arch::arm64 { MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); } } + + static constexpr size_t GetSmallerAlignment(size_t alignment) { + MESOSPHERE_ASSERT(alignment > L3BlockSize); + return KPageTable::GetBlockSize(static_cast(KPageTable::GetBlockType(alignment) - 1)); + } + + static constexpr size_t GetLargerAlignment(size_t alignment) { + MESOSPHERE_ASSERT(alignment < L1BlockSize); + return KPageTable::GetBlockSize(static_cast(KPageTable::GetBlockType(alignment) + 1)); + } protected: virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, OperationType operation, bool reuse_ll) override; virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, const KPageGroup *page_group, const KPageProperties properties, OperationType operation, bool reuse_ll) override; @@ -164,7 +173,25 @@ namespace ams::kern::arch::arm64 { Result Finalize(); private: Result Map(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll); - Result Unmap(KProcessAddress virt_addr, size_t num_pages, KPageGroup *pg, PageLinkedList *page_list, bool force, bool reuse_ll); + Result Unmap(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list, bool force, bool reuse_ll); + + Result Map(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, size_t page_size, PageLinkedList *page_list, bool reuse_ll) { + switch (page_size) { + case L1BlockSize: +#ifdef ATMOSPHERE_BOARD_NINTENDO_NX + case L2TegraSmmuBlockSize: +#endif + case L2BlockSize: + case L3BlockSize: + break; + case L2ContiguousBlockSize: + case L3ContiguousBlockSize: + entry_template.SetContiguous(true); + break; + MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); + } + return this->Map(virt_addr, phys_addr, num_pages, entry_template, page_list, reuse_ll); + } Result MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll); diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_supervisor_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_supervisor_page_table.hpp index 6596e70fb..4061c78e9 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_supervisor_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_supervisor_page_table.hpp @@ -35,6 +35,18 @@ namespace ams::kern::arch::arm64 { return this->page_table.MapPages(out_addr, num_pages, alignment, phys_addr, region_start, region_num_pages, state, perm); } + Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) { + return this->page_table.UnmapPages(address, num_pages, state); + } + + Result MapPageGroup(KProcessAddress *out_addr, const KPageGroup &pg, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) { + return this->page_table.MapPageGroup(out_addr, pg, region_start, region_num_pages, state, perm); + } + + Result UnmapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state) { + return this->page_table.UnmapPageGroup(address, pg, state); + } + bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress address) const { return this->page_table.GetPhysicalAddress(out, address); } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_initial_process_reader.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_initial_process_reader.hpp index d70ade490..11ec5ddee 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_initial_process_reader.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_initial_process_reader.hpp @@ -127,6 +127,7 @@ namespace ams::kern { } Result MakeCreateProcessParameter(ams::svc::CreateProcessParameter *out, bool enable_aslr) const; + Result Load(KProcessAddress address, const ams::svc::CreateProcessParameter ¶ms) const; }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp index 683ddc284..b90fcfea2 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp @@ -180,7 +180,7 @@ namespace ams::kern { } constexpr ALWAYS_INLINE bool Contains(uintptr_t address) const { - return this->GetAddress() <= address && address < this->GetLastAddress(); + return this->GetAddress() <= address && address <= this->GetLastAddress(); } constexpr ALWAYS_INLINE bool IsDerivedFrom(u32 type) const { @@ -231,6 +231,7 @@ namespace ams::kern { }; private: using TreeType = util::IntrusiveRedBlackTreeBaseTraits::TreeType; + public: using value_type = TreeType::value_type; using size_type = TreeType::size_type; using difference_type = TreeType::difference_type; @@ -276,7 +277,7 @@ namespace ams::kern { MESOSPHERE_INIT_ABORT(); } - DerivedRegionExtents GetDerivedRegionExtents(u32 type_id) { + DerivedRegionExtents GetDerivedRegionExtents(u32 type_id) const { DerivedRegionExtents extents; MESOSPHERE_INIT_ABORT_UNLESS(extents.first_region == nullptr); @@ -479,12 +480,24 @@ namespace ams::kern { return *GetVirtualMemoryRegionTree().FindFirstRegionByType(KMemoryRegionType_KernelStack); } + static NOINLINE KMemoryRegion &GetTempRegion() { + return *GetVirtualMemoryRegionTree().FindFirstRegionByType(KMemoryRegionType_KernelTemp); + } + static NOINLINE KMemoryRegion &GetVirtualLinearRegion(KVirtualAddress address) { return *GetVirtualLinearMemoryRegionTree().FindContainingRegion(GetInteger(address)); } - static NOINLINE bool IsHeapPhysicalAddress(KMemoryRegion **out, KPhysicalAddress address) { - if (auto it = GetPhysicalLinearMemoryRegionTree().FindContainingRegion(GetInteger(address)); it != GetPhysicalLinearMemoryRegionTree().end() && it->IsDerivedFrom(KMemoryRegionType_DramNonKernel)) { + static NOINLINE bool IsHeapPhysicalAddress(const KMemoryRegion **out, KPhysicalAddress address, const KMemoryRegion *hint = nullptr) { + auto &tree = GetPhysicalLinearMemoryRegionTree(); + KMemoryRegionTree::const_iterator it = tree.end(); + if (hint != nullptr) { + it = tree.iterator_to(*hint); + } + if (it == tree.end() || !it->Contains(GetInteger(address))) { + it = tree.FindContainingRegion(GetInteger(address)); + } + if (it != tree.end() && it->IsDerivedFrom(KMemoryRegionType_DramNonKernel)) { if (out) { *out = std::addressof(*it); } @@ -493,6 +506,72 @@ namespace ams::kern { return false; } + static NOINLINE bool IsHeapPhysicalAddress(const KMemoryRegion **out, KPhysicalAddress address, size_t size, const KMemoryRegion *hint = nullptr) { + auto &tree = GetPhysicalLinearMemoryRegionTree(); + KMemoryRegionTree::const_iterator it = tree.end(); + if (hint != nullptr) { + it = tree.iterator_to(*hint); + } + if (it == tree.end() || !it->Contains(GetInteger(address))) { + it = tree.FindContainingRegion(GetInteger(address)); + } + if (it != tree.end() && it->IsDerivedFrom(KMemoryRegionType_DramNonKernel)) { + const uintptr_t last_address = GetInteger(address) + size - 1; + do { + if (last_address <= it->GetLastAddress()) { + if (out) { + *out = std::addressof(*it); + } + return true; + } + it++; + } while (it != tree.end() && it->IsDerivedFrom(KMemoryRegionType_DramNonKernel)); + } + return false; + } + + static NOINLINE bool IsHeapVirtualAddress(const KMemoryRegion **out, KVirtualAddress address, const KMemoryRegion *hint = nullptr) { + auto &tree = GetVirtualLinearMemoryRegionTree(); + KMemoryRegionTree::const_iterator it = tree.end(); + if (hint != nullptr) { + it = tree.iterator_to(*hint); + } + if (it == tree.end() || !it->Contains(GetInteger(address))) { + it = tree.FindContainingRegion(GetInteger(address)); + } + if (it != tree.end() && it->IsDerivedFrom(KMemoryRegionType_VirtualDramManagedPool)) { + if (out) { + *out = std::addressof(*it); + } + return true; + } + return false; + } + + static NOINLINE bool IsHeapVirtualAddress(const KMemoryRegion **out, KVirtualAddress address, size_t size, const KMemoryRegion *hint = nullptr) { + auto &tree = GetVirtualLinearMemoryRegionTree(); + KMemoryRegionTree::const_iterator it = tree.end(); + if (hint != nullptr) { + it = tree.iterator_to(*hint); + } + if (it == tree.end() || !it->Contains(GetInteger(address))) { + it = tree.FindContainingRegion(GetInteger(address)); + } + if (it != tree.end() && it->IsDerivedFrom(KMemoryRegionType_VirtualDramManagedPool)) { + const uintptr_t last_address = GetInteger(address) + size - 1; + do { + if (last_address <= it->GetLastAddress()) { + if (out) { + *out = std::addressof(*it); + } + return true; + } + it++; + } while (it != tree.end() && it->IsDerivedFrom(KMemoryRegionType_VirtualDramManagedPool)); + } + return false; + } + static NOINLINE std::tuple GetTotalAndKernelMemorySizes() { size_t total_size = 0, kernel_size = 0; for (auto it = GetPhysicalMemoryRegionTree().cbegin(); it != GetPhysicalMemoryRegionTree().cend(); it++) { diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_page_group.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_page_group.hpp index e70d6cc5f..cf98a9cbd 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_page_group.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_page_group.hpp @@ -38,6 +38,7 @@ namespace ams::kern { constexpr size_t GetNumPages() const { return this->num_pages; } constexpr size_t GetSize() const { return this->GetNumPages() * PageSize; } constexpr KVirtualAddress GetEndAddress() const { return this->GetAddress() + this->GetSize(); } + constexpr KVirtualAddress GetLastAddress() const { return this->GetEndAddress() - 1; } constexpr bool IsEquivalentTo(const KBlockInfo &rhs) const { return this->address == rhs.address && this->num_pages == rhs.num_pages; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp index 1a85d2107..0056f2452 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp @@ -124,9 +124,9 @@ namespace ams::kern { bool enable_aslr; KMemoryBlockSlabManager *memory_block_slab_manager; KBlockInfoManager *block_info_manager; - KMemoryRegion *cached_physical_linear_region; - KMemoryRegion *cached_physical_heap_region; - KMemoryRegion *cached_virtual_managed_pool_dram_region; + const KMemoryRegion *cached_physical_linear_region; + const KMemoryRegion *cached_physical_heap_region; + const KMemoryRegion *cached_virtual_heap_region; MemoryFillValue heap_fill_value; MemoryFillValue ipc_fill_value; MemoryFillValue stack_fill_value; @@ -137,7 +137,7 @@ namespace ams::kern { kernel_map_region_end(), alias_code_region_start(), alias_code_region_end(), code_region_start(), code_region_end(), max_heap_size(), max_physical_memory_size(), general_lock(), map_physical_memory_lock(), impl(), memory_block_manager(), allocate_option(), address_space_size(), is_kernel(), enable_aslr(), memory_block_slab_manager(), block_info_manager(), - cached_physical_linear_region(), cached_physical_heap_region(), cached_virtual_managed_pool_dram_region(), + cached_physical_linear_region(), cached_physical_heap_region(), cached_virtual_heap_region(), heap_fill_value(), ipc_fill_value(), stack_fill_value() { /* ... */ @@ -172,10 +172,27 @@ namespace ams::kern { bool IsLockedByCurrentThread() const { return this->general_lock.IsLockedByCurrentThread(); } bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr) { - if (this->cached_physical_heap_region && this->cached_physical_heap_region->Contains(GetInteger(phys_addr))) { - return true; - } - return KMemoryLayout::IsHeapPhysicalAddress(&this->cached_physical_heap_region, phys_addr); + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + + return KMemoryLayout::IsHeapPhysicalAddress(std::addressof(this->cached_physical_heap_region), phys_addr, this->cached_physical_heap_region); + } + + bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr, size_t size) { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + + return KMemoryLayout::IsHeapPhysicalAddress(std::addressof(this->cached_physical_heap_region), phys_addr, size, this->cached_physical_heap_region); + } + + bool IsHeapVirtualAddress(KVirtualAddress virt_addr) { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + + return KMemoryLayout::IsHeapVirtualAddress(std::addressof(this->cached_virtual_heap_region), virt_addr, this->cached_virtual_heap_region); + } + + bool IsHeapVirtualAddress(KVirtualAddress virt_addr, size_t size) { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + + return KMemoryLayout::IsHeapVirtualAddress(std::addressof(this->cached_virtual_heap_region), virt_addr, size, this->cached_virtual_heap_region); } bool ContainsPages(KProcessAddress addr, size_t num_pages) const { @@ -193,6 +210,7 @@ namespace ams::kern { Result QueryInfoImpl(KMemoryInfo *out_info, ams::svc::PageInfo *out_page, KProcessAddress address) const; Result AllocateAndMapPagesImpl(PageLinkedList *page_list, KProcessAddress address, size_t num_pages, const KPageProperties properties); + Result MapPageGroupImpl(PageLinkedList *page_list, KProcessAddress address, const KPageGroup &pg, const KPageProperties properties, bool reuse_ll); NOINLINE Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm); public: @@ -203,6 +221,10 @@ namespace ams::kern { Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) { return this->MapPages(out_addr, num_pages, alignment, phys_addr, true, region_start, region_num_pages, state, perm); } + + Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state); + Result MapPageGroup(KProcessAddress *out_addr, const KPageGroup &pg, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm); + Result UnmapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state); public: static ALWAYS_INLINE KVirtualAddress GetLinearVirtualAddress(KPhysicalAddress addr) { return KMemoryLayout::GetLinearVirtualAddress(addr); diff --git a/libraries/libmesosphere/source/arch/arm64/kern_cpu.cpp b/libraries/libmesosphere/source/arch/arm64/kern_cpu.cpp index 0a28dacf1..482e37915 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_cpu.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_cpu.cpp @@ -333,6 +333,10 @@ namespace ams::kern::arch::arm64::cpu { return PerformCacheOperationBySetWayLocal(FlushDataCacheLineBySetWayImpl); } + void FlushEntireDataCache() { + return PerformCacheOperationBySetWayShared(FlushDataCacheLineBySetWayImpl); + } + Result InvalidateDataCache(void *addr, size_t size) { KScopedCoreMigrationDisable dm; const uintptr_t start = reinterpret_cast(addr); diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp index 581daef31..ffd1f8037 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp @@ -58,7 +58,7 @@ namespace ams::kern::arch::arm64 { } if (operation == OperationType_Unmap) { - MESOSPHERE_TODO("operation == OperationType_Unmap"); + return this->Unmap(virt_addr, num_pages, page_list, false, reuse_ll); } else { auto entry_template = this->GetEntryTemplate(properties); @@ -175,7 +175,7 @@ namespace ams::kern::arch::arm64 { return ResultSuccess(); } - Result KPageTable::Unmap(KProcessAddress virt_addr, size_t num_pages, KPageGroup *pg, PageLinkedList *page_list, bool force, bool reuse_ll) { + Result KPageTable::Unmap(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list, bool force, bool reuse_ll) { MESOSPHERE_TODO_IMPLEMENT(); } @@ -188,13 +188,57 @@ namespace ams::kern::arch::arm64 { size_t remaining_pages = num_pages; - if (num_pages < ContiguousPageSize / PageSize) { - auto guard = SCOPE_GUARD { MESOSPHERE_R_ABORT_UNLESS(this->Unmap(orig_virt_addr, num_pages, nullptr, page_list, true, true)); }; - R_TRY(this->Map(virt_addr, phys_addr, num_pages, entry_template, page_list, reuse_ll)); - guard.Cancel(); - } else { - MESOSPHERE_TODO("Contiguous mapping"); - (void)remaining_pages; + /* Map the pages, using a guard to ensure we don't leak. */ + { + auto map_guard = SCOPE_GUARD { MESOSPHERE_R_ABORT_UNLESS(this->Unmap(orig_virt_addr, num_pages, nullptr, page_list, true, true)); }; + + if (num_pages < ContiguousPageSize / PageSize) { + R_TRY(this->Map(virt_addr, phys_addr, num_pages, entry_template, L3BlockSize, page_list, reuse_ll)); + remaining_pages -= num_pages; + virt_addr += num_pages * PageSize; + phys_addr += num_pages * PageSize; + } else { + /* Map the fractional part of the pages. */ + size_t alignment; + for (alignment = ContiguousPageSize; (virt_addr & (alignment - 1)) == (phys_addr & (alignment - 1)); alignment = GetLargerAlignment(alignment)) { + /* Check if this would be our last map. */ + const size_t pages_to_map = (alignment - (virt_addr & (alignment - 1))) & (alignment - 1); + if (pages_to_map + (alignment / PageSize) > remaining_pages) { + break; + } + + /* Map pages, if we should. */ + if (pages_to_map > 0) { + R_TRY(this->Map(virt_addr, phys_addr, pages_to_map, entry_template, GetSmallerAlignment(alignment), page_list, reuse_ll)); + remaining_pages -= pages_to_map; + virt_addr += pages_to_map * PageSize; + phys_addr += pages_to_map * PageSize; + } + + /* Don't go further than L1 block. */ + if (alignment == L1BlockSize) { + break; + } + } + + while (remaining_pages > 0) { + /* Select the next smallest alignment. */ + alignment = GetSmallerAlignment(alignment); + MESOSPHERE_ASSERT((virt_addr & (alignment - 1)) == 0); + MESOSPHERE_ASSERT((phys_addr & (alignment - 1)) == 0); + + /* Map pages, if we should. */ + const size_t pages_to_map = util::AlignDown(remaining_pages, alignment / PageSize); + if (pages_to_map > 0) { + R_TRY(this->Map(virt_addr, phys_addr, pages_to_map, entry_template, alignment, page_list, reuse_ll)); + remaining_pages -= pages_to_map; + virt_addr += pages_to_map * PageSize; + phys_addr += pages_to_map * PageSize; + } + } + } + + map_guard.Cancel(); } /* Perform what coalescing we can. */ diff --git a/libraries/libmesosphere/source/kern_initial_process.cpp b/libraries/libmesosphere/source/kern_initial_process.cpp index 54e2b7965..64e7c56ed 100644 --- a/libraries/libmesosphere/source/kern_initial_process.cpp +++ b/libraries/libmesosphere/source/kern_initial_process.cpp @@ -52,27 +52,41 @@ namespace ams::kern { /* Parse process parameters and reserve memory. */ ams::svc::CreateProcessParameter params; MESOSPHERE_R_ABORT_UNLESS(reader.MakeCreateProcessParameter(std::addressof(params), true)); - MESOSPHERE_TODO("Reserve memory"); + MESOSPHERE_LOG("Reserving %zx for process %zu\n", params.code_num_pages * PageSize, i); + MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, params.code_num_pages * PageSize)); - /* Create the process, and ensure we don't leak pages. */ + /* Create the process. */ + KProcess *new_process = nullptr; { + /* Declare page group to use for process memory. */ + KPageGroup pg(std::addressof(Kernel::GetBlockInfoManager())); + /* Allocate memory for the process. */ - MESOSPHERE_TODO("Allocate memory for the process"); + auto &mm = Kernel::GetMemoryManager(); + const auto pool = static_cast(reader.UsesSecureMemory() ? KMemoryManager::Pool_System : KSystemControl::GetInitialProcessBinaryPool()); + MESOSPHERE_R_ABORT_UNLESS(mm.Allocate(std::addressof(pg), params.code_num_pages, KMemoryManager::EncodeOption(pool, KMemoryManager::Direction_FromFront))); - /* Map the process's memory into the temporary region. */ - MESOSPHERE_TODO("Map the process's page group"); + { + /* Ensure that we do not leak pages. */ + KScopedPageGroup spg(pg); - /* Load the process. */ - MESOSPHERE_TODO("Load the process"); + /* Map the process's memory into the temporary region. */ + const auto &temp_region = KMemoryLayout::GetTempRegion(); + KProcessAddress temp_address = Null; + MESOSPHERE_R_ABORT_UNLESS(Kernel::GetKernelPageTable().MapPageGroup(std::addressof(temp_address), pg, temp_region.GetAddress(), temp_region.GetSize() / PageSize, KMemoryState_Kernel, KMemoryPermission_KernelReadWrite)); - /* Unmap the temporary mapping. */ - MESOSPHERE_TODO("Unmap the process's page group"); + /* Load the process. */ + MESOSPHERE_R_ABORT_UNLESS(reader.Load(temp_address, params)); - /* Create a KProcess object. */ - MESOSPHERE_TODO("Create a KProcess"); + /* Unmap the temporary mapping. */ + MESOSPHERE_R_ABORT_UNLESS(Kernel::GetKernelPageTable().UnmapPageGroup(temp_address, pg, KMemoryState_Kernel)); - /* Initialize the process. */ - MESOSPHERE_TODO("Initialize the process"); + /* Create a KProcess object. */ + MESOSPHERE_TODO("Create a KProcess"); + + /* Initialize the process. */ + MESOSPHERE_TODO("Initialize the process"); + } } /* Set the process's memory permissions. */ @@ -82,7 +96,7 @@ namespace ams::kern { MESOSPHERE_TODO("Register the process"); /* Save the process info. */ - infos[i].process = /* TODO */ nullptr; + infos[i].process = new_process; infos[i].stack_size = reader.GetStackSize(); infos[i].priority = reader.GetPriority(); diff --git a/libraries/libmesosphere/source/kern_k_initial_process_reader.cpp b/libraries/libmesosphere/source/kern_k_initial_process_reader.cpp index 6be4b9629..bf3a66a91 100644 --- a/libraries/libmesosphere/source/kern_k_initial_process_reader.cpp +++ b/libraries/libmesosphere/source/kern_k_initial_process_reader.cpp @@ -17,6 +17,63 @@ namespace ams::kern { + namespace { + + struct BlzSegmentFlags { + using Offset = util::BitPack16::Field<0, 12, u32>; + using Size = util::BitPack16::Field; + }; + + NOINLINE void BlzUncompress(void *_end) { + /* Parse the footer, endian agnostic. */ + static_assert(sizeof(u32) == 4); + static_assert(sizeof(u16) == 2); + static_assert(sizeof(u8) == 1); + + u8 *end = static_cast(_end); + const u32 total_size = (end[-12] << 0) | (end[-11] << 8) | (end[-10] << 16) | (end[- 9] << 24); + const u32 footer_size = (end[- 8] << 0) | (end[- 7] << 8) | (end[- 6] << 16) | (end[- 5] << 24); + const u32 additional_size = (end[- 4] << 0) | (end[- 3] << 8) | (end[- 2] << 16) | (end[- 1] << 24); + + /* Prepare to decompress. */ + u8 *cmp_start = end - total_size; + u32 cmp_ofs = total_size - footer_size; + u32 out_ofs = total_size + additional_size; + + /* Decompress. */ + while (out_ofs) { + u8 control = cmp_start[--cmp_ofs]; + + /* Each bit in the control byte is a flag indicating compressed or not compressed. */ + for (size_t i = 0; i < 8 && out_ofs; ++i, control <<= 1) { + if (control & 0x80) { + /* NOTE: Nintendo does not check if it's possible to decompress. */ + /* As such, we will leave the following as a debug assertion, and not a release assertion. */ + MESOSPHERE_ASSERT(cmp_ofs >= sizeof(u16)); + cmp_ofs -= sizeof(u16); + + /* Extract segment bounds. */ + const util::BitPack16 seg_flags{static_cast((cmp_start[cmp_ofs] << 0) | (cmp_start[cmp_ofs + 1] << 8))}; + const u32 seg_ofs = seg_flags.Get() + 3; + const u32 seg_size = std::min(seg_flags.Get(), out_ofs) + 3; + + /* Copy the data. */ + out_ofs -= seg_size; + for (size_t j = 0; j < seg_size; j++) { + cmp_start[out_ofs + j] = cmp_start[out_ofs + seg_ofs + j]; + } + } else { + /* NOTE: Nintendo does not check if it's possible to copy. */ + /* As such, we will leave the following as a debug assertion, and not a release assertion. */ + MESOSPHERE_ASSERT(cmp_ofs >= sizeof(u8)); + cmp_start[--out_ofs] = cmp_start[--cmp_ofs]; + } + } + } + } + + } + Result KInitialProcessReader::MakeCreateProcessParameter(ams::svc::CreateProcessParameter *out, bool enable_aslr) const { /* Get and validate addresses/sizes. */ const uintptr_t rx_address = this->kip_header->GetRxAddress(); @@ -56,7 +113,7 @@ namespace ams::kern { /* Set fields in parameter. */ out->code_address = map_start + start_address; - out->code_num_pages = util::AlignUp(end_address - start_address, PageSize); + out->code_num_pages = util::AlignUp(end_address - start_address, PageSize) / PageSize; out->program_id = this->kip_header->GetProgramId(); out->version = this->kip_header->GetVersion(); out->flags = 0; @@ -85,4 +142,49 @@ namespace ams::kern { return ResultSuccess(); } + Result KInitialProcessReader::Load(KProcessAddress address, const ams::svc::CreateProcessParameter ¶ms) const { + /* Clear memory at the address. */ + std::memset(GetVoidPointer(address), 0, params.code_num_pages); + + /* Prepare to layout the data. */ + const KProcessAddress rx_address = address + this->kip_header->GetRxAddress(); + const KProcessAddress ro_address = address + this->kip_header->GetRoAddress(); + const KProcessAddress rw_address = address + this->kip_header->GetRwAddress(); + const u8 *rx_binary = reinterpret_cast(this->kip_header + 1); + const u8 *ro_binary = rx_binary + this->kip_header->GetRxCompressedSize(); + const u8 *rw_binary = ro_binary + this->kip_header->GetRoCompressedSize(); + + /* Copy text. */ + if (util::AlignUp(this->kip_header->GetRxSize(), PageSize)) { + std::memcpy(GetVoidPointer(rx_address), rx_binary, this->kip_header->GetRxCompressedSize()); + if (this->kip_header->IsRxCompressed()) { + BlzUncompress(GetVoidPointer(rx_address + this->kip_header->GetRxCompressedSize())); + } + } + + /* Copy rodata. */ + if (util::AlignUp(this->kip_header->GetRoSize(), PageSize)) { + std::memcpy(GetVoidPointer(ro_address), ro_binary, this->kip_header->GetRoCompressedSize()); + if (this->kip_header->IsRoCompressed()) { + BlzUncompress(GetVoidPointer(ro_address + this->kip_header->GetRoCompressedSize())); + } + } + + /* Copy rwdata. */ + if (util::AlignUp(this->kip_header->GetRwSize(), PageSize)) { + std::memcpy(GetVoidPointer(rw_address), rw_binary, this->kip_header->GetRwCompressedSize()); + if (this->kip_header->IsRwCompressed()) { + BlzUncompress(GetVoidPointer(rw_address + this->kip_header->GetRwCompressedSize())); + } + } + + /* Flush caches. */ + /* NOTE: official kernel does an entire cache flush by set/way here, which is incorrect as other cores are online. */ + /* We will simply flush by virtual address, since that's what ARM says is correct to do. */ + MESOSPHERE_R_ABORT_UNLESS(cpu::FlushDataCache(GetVoidPointer(address), params.code_num_pages * PageSize)); + cpu::InvalidateEntireInstructionCache(); + + return ResultSuccess(); + } + } diff --git a/libraries/libmesosphere/source/kern_k_page_table_base.cpp b/libraries/libmesosphere/source/kern_k_page_table_base.cpp index 5aa48b2fd..d82f665f4 100644 --- a/libraries/libmesosphere/source/kern_k_page_table_base.cpp +++ b/libraries/libmesosphere/source/kern_k_page_table_base.cpp @@ -50,9 +50,9 @@ namespace ams::kern { this->ipc_fill_value = MemoryFillValue_Zero; this->stack_fill_value = MemoryFillValue_Zero; - this->cached_physical_linear_region = nullptr; - this->cached_physical_heap_region = nullptr; - this->cached_virtual_managed_pool_dram_region = nullptr; + this->cached_physical_linear_region = nullptr; + this->cached_physical_heap_region = nullptr; + this->cached_virtual_heap_region = nullptr; /* Initialize our implementation. */ this->impl.InitializeForKernel(table, start, end); @@ -285,6 +285,8 @@ namespace ams::kern { } Result KPageTableBase::AllocateAndMapPagesImpl(PageLinkedList *page_list, KProcessAddress address, size_t num_pages, const KPageProperties properties) { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + /* Create a page group to hold the pages we allocate. */ KPageGroup pg(this->block_info_manager); @@ -303,6 +305,38 @@ namespace ams::kern { return this->Operate(page_list, address, num_pages, std::addressof(pg), properties, OperationType_MapGroup, false); } + Result KPageTableBase::MapPageGroupImpl(PageLinkedList *page_list, KProcessAddress address, const KPageGroup &pg, const KPageProperties properties, bool reuse_ll) { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + + /* Note the current address, so that we can iterate. */ + const KProcessAddress start_address = address; + KProcessAddress cur_address = address; + + /* Ensure that we clean up on failure. */ + auto mapping_guard = SCOPE_GUARD { + MESOSPHERE_ABORT_UNLESS(!reuse_ll); + if (cur_address != start_address) { + const KPageProperties unmap_properties = {}; + MESOSPHERE_R_ABORT_UNLESS(this->Operate(page_list, start_address, (cur_address - start_address) / PageSize, Null, false, unmap_properties, OperationType_Unmap, true)); + } + }; + + /* Iterate, mapping all pages in the group. */ + for (const auto &block : pg) { + /* We only allow mapping pages in the heap, and we require we're mapping non-empty blocks. */ + MESOSPHERE_ABORT_UNLESS(block.GetAddress() < block.GetLastAddress()); + MESOSPHERE_ABORT_UNLESS(IsHeapVirtualAddress(block.GetAddress(), block.GetSize())); + + /* Map and advance. */ + R_TRY(this->Operate(page_list, cur_address, block.GetNumPages(), GetHeapPhysicalAddress(block.GetAddress()), true, properties, OperationType_Map, reuse_ll)); + cur_address += block.GetSize(); + } + + /* We succeeded! */ + mapping_guard.Cancel(); + return ResultSuccess(); + } + Result KPageTableBase::MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) { MESOSPHERE_ASSERT(util::IsAligned(alignment, PageSize) && alignment >= PageSize); @@ -318,7 +352,7 @@ namespace ams::kern { R_UNLESS(addr != Null, svc::ResultOutOfMemory()); MESOSPHERE_ASSERT(util::IsAligned(GetInteger(addr), alignment)); MESOSPHERE_ASSERT(this->Contains(addr, num_pages * PageSize, state)); - MESOSPHERE_R_ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState_All, KMemoryState_Free, KMemoryPermission_All, KMemoryPermission_None, KMemoryAttribute_All, KMemoryAttribute_None)); + MESOSPHERE_R_ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None)); /* Create an update allocator. */ KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); @@ -342,4 +376,47 @@ namespace ams::kern { *out_addr = addr; return ResultSuccess(); } + + Result KPageTableBase::UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) { + MESOSPHERE_TODO_IMPLEMENT(); + } + + Result KPageTableBase::MapPageGroup(KProcessAddress *out_addr, const KPageGroup &pg, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) { + /* Ensure this is a valid map request. */ + const size_t num_pages = pg.GetNumPages(); + R_UNLESS(this->Contains(region_start, region_num_pages * PageSize, state), svc::ResultInvalidCurrentMemory()); + R_UNLESS(num_pages < region_num_pages, svc::ResultOutOfMemory()); + + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Find a random address to map at. */ + KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, PageSize, 0, this->GetNumGuardPages()); + R_UNLESS(addr != Null, svc::ResultOutOfMemory()); + MESOSPHERE_ASSERT(this->Contains(addr, num_pages * PageSize, state)); + MESOSPHERE_R_ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None)); + + /* Create an update allocator. */ + KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + R_TRY(allocator.GetResult()); + + /* We're going to perform an update, so create a helper. */ + KScopedPageTableUpdater updater(this); + + /* Perform mapping operation. */ + const KPageProperties properties = { perm, state == KMemoryState_Io, false, false }; + R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false)); + + /* Update the blocks. */ + this->memory_block_manager.Update(&allocator, addr, num_pages, state, perm, KMemoryAttribute_None); + + /* We successfully mapped the pages. */ + *out_addr = addr; + return ResultSuccess(); + } + + Result KPageTableBase::UnmapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state) { + MESOSPHERE_TODO_IMPLEMENT(); + } + }