From f5a9d1d6e8aee2e291df5412af97e600ebd5a7a0 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Thu, 28 Mar 2024 02:17:38 -0700 Subject: [PATCH] kern: take alignment argument in KMemoryManager::AllocateAndOpen --- .../include/mesosphere/kern_k_memory_manager.hpp | 4 ++-- .../source/kern_initial_process.cpp | 4 ++-- .../source/kern_k_memory_manager.cpp | 16 +++++++++++----- .../source/kern_k_page_table_base.cpp | 8 ++++---- .../source/kern_k_shared_memory.cpp | 2 +- 5 files changed, 20 insertions(+), 14 deletions(-) diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp index ca23119bc..bd596dfd0 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp @@ -185,7 +185,7 @@ namespace ams::kern { } } - Result AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool unoptimized, bool random); + Result AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool unoptimized, bool random, s32 min_heap_index); public: KMemoryManager() : m_pool_locks(), m_pool_managers_head(), m_pool_managers_tail(), m_managers(), m_num_managers(), m_optimized_process_ids(), m_has_optimized_process() @@ -199,7 +199,7 @@ namespace ams::kern { NOINLINE void FinalizeOptimizedMemory(u64 process_id, Pool pool); NOINLINE KPhysicalAddress AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option); - NOINLINE Result AllocateAndOpen(KPageGroup *out, size_t num_pages, u32 option); + NOINLINE Result AllocateAndOpen(KPageGroup *out, size_t num_pages, size_t align_pages, u32 option); NOINLINE Result AllocateForProcess(KPageGroup *out, size_t num_pages, u32 option, u64 process_id, u8 fill_pattern); Pool GetPool(KPhysicalAddress address) const { diff --git a/libraries/libmesosphere/source/kern_initial_process.cpp b/libraries/libmesosphere/source/kern_initial_process.cpp index 282bba103..ab38d5774 100644 --- a/libraries/libmesosphere/source/kern_initial_process.cpp +++ b/libraries/libmesosphere/source/kern_initial_process.cpp @@ -136,7 +136,7 @@ namespace ams::kern { { /* Allocate the previously unreserved pages. */ KPageGroup unreserve_pg(Kernel::GetSystemSystemResource().GetBlockInfoManagerPointer()); - MESOSPHERE_R_ABORT_UNLESS(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(unreserve_pg), unreserved_size / PageSize, KMemoryManager::EncodeOption(dst_pool, KMemoryManager::Direction_FromFront))); + MESOSPHERE_R_ABORT_UNLESS(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(unreserve_pg), unreserved_size / PageSize, 1, KMemoryManager::EncodeOption(dst_pool, KMemoryManager::Direction_FromFront))); /* Add the previously reserved pages. */ if (src_pool == dst_pool && binary_pages != 0) { @@ -173,7 +173,7 @@ namespace ams::kern { /* If the pool is the same, we need to use the workaround page group. */ if (src_pool == dst_pool) { /* Allocate a new, usable group for the process. */ - MESOSPHERE_R_ABORT_UNLESS(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(workaround_pg), static_cast(params.code_num_pages), KMemoryManager::EncodeOption(dst_pool, KMemoryManager::Direction_FromFront))); + MESOSPHERE_R_ABORT_UNLESS(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(workaround_pg), static_cast(params.code_num_pages), 1, KMemoryManager::EncodeOption(dst_pool, KMemoryManager::Direction_FromFront))); /* Copy data from the working page group to the usable one. */ auto work_it = pg.begin(); diff --git a/libraries/libmesosphere/source/kern_k_memory_manager.cpp b/libraries/libmesosphere/source/kern_k_memory_manager.cpp index 01dcd565a..fa533387c 100644 --- a/libraries/libmesosphere/source/kern_k_memory_manager.cpp +++ b/libraries/libmesosphere/source/kern_k_memory_manager.cpp @@ -225,7 +225,7 @@ namespace ams::kern { return allocated_block; } - Result KMemoryManager::AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool unoptimized, bool random) { + Result KMemoryManager::AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool unoptimized, bool random, s32 min_heap_index) { /* Choose a heap based on our page size request. */ const s32 heap_index = KPageHeap::GetBlockIndex(num_pages); R_UNLESS(0 <= heap_index, svc::ResultOutOfMemory()); @@ -241,7 +241,7 @@ namespace ams::kern { }; /* Keep allocating until we've allocated all our pages. */ - for (s32 index = heap_index; index >= 0 && num_pages > 0; index--) { + for (s32 index = heap_index; index >= min_heap_index && num_pages > 0; index--) { const size_t pages_per_alloc = KPageHeap::GetBlockNumPages(index); for (Impl *cur_manager = this->GetFirstManager(pool, dir); cur_manager != nullptr; cur_manager = this->GetNextManager(cur_manager, dir)) { while (num_pages >= pages_per_alloc) { @@ -274,7 +274,7 @@ namespace ams::kern { R_SUCCEED(); } - Result KMemoryManager::AllocateAndOpen(KPageGroup *out, size_t num_pages, u32 option) { + Result KMemoryManager::AllocateAndOpen(KPageGroup *out, size_t num_pages, size_t align_pages, u32 option) { MESOSPHERE_ASSERT(out != nullptr); MESOSPHERE_ASSERT(out->GetNumPages() == 0); @@ -285,8 +285,11 @@ namespace ams::kern { const auto [pool, dir] = DecodeOption(option); KScopedLightLock lk(m_pool_locks[pool]); + /* Choose a heap based on our alignment size request. */ + const s32 heap_index = KPageHeap::GetAlignedBlockIndex(align_pages, align_pages); + /* Allocate the page group. */ - R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, m_has_optimized_process[pool], true)); + R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, m_has_optimized_process[pool], true, heap_index)); /* Open the first reference to the pages. */ for (const auto &block : *out) { @@ -326,8 +329,11 @@ namespace ams::kern { const bool has_optimized = m_has_optimized_process[pool]; const bool is_optimized = m_optimized_process_ids[pool] == process_id; + /* Always use the minimum alignment size. */ + const s32 heap_index = 0; + /* Allocate the page group. */ - R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, has_optimized && !is_optimized, false)); + R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, has_optimized && !is_optimized, false, heap_index)); /* Set whether we should optimize. */ optimized = has_optimized && is_optimized; diff --git a/libraries/libmesosphere/source/kern_k_page_table_base.cpp b/libraries/libmesosphere/source/kern_k_page_table_base.cpp index d58ceb717..136c92333 100644 --- a/libraries/libmesosphere/source/kern_k_page_table_base.cpp +++ b/libraries/libmesosphere/source/kern_k_page_table_base.cpp @@ -1266,7 +1266,7 @@ namespace ams::kern { /* Allocate pages for the insecure memory. */ KPageGroup pg(m_block_info_manager); - R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), size / PageSize, KMemoryManager::EncodeOption(insecure_pool, KMemoryManager::Direction_FromFront))); + R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), size / PageSize, 1, KMemoryManager::EncodeOption(insecure_pool, KMemoryManager::Direction_FromFront))); /* Close the opened pages when we're done with them. */ /* If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed automatically. */ @@ -1426,7 +1426,7 @@ namespace ams::kern { KPageGroup pg(m_block_info_manager); /* Allocate the pages. */ - R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, m_allocate_option)); + R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, 1, m_allocate_option)); /* Ensure that the page group is closed when we're done working with it. */ ON_SCOPE_EXIT { pg.Close(); }; @@ -1931,7 +1931,7 @@ namespace ams::kern { /* Allocate pages for the heap extension. */ KPageGroup pg(m_block_info_manager); - R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), allocation_size / PageSize, m_allocate_option)); + R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), allocation_size / PageSize, 1, m_allocate_option)); /* Close the opened pages when we're done with them. */ /* If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed automatically. */ @@ -4844,7 +4844,7 @@ namespace ams::kern { /* Allocate the new memory. */ const size_t num_pages = size / PageSize; - R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, KMemoryManager::EncodeOption(KMemoryManager::Pool_Unsafe, KMemoryManager::Direction_FromFront))); + R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, 1, KMemoryManager::EncodeOption(KMemoryManager::Pool_Unsafe, KMemoryManager::Direction_FromFront))); /* Close the page group when we're done with it. */ ON_SCOPE_EXIT { pg.Close(); }; diff --git a/libraries/libmesosphere/source/kern_k_shared_memory.cpp b/libraries/libmesosphere/source/kern_k_shared_memory.cpp index c7850fb1d..95d0dc401 100644 --- a/libraries/libmesosphere/source/kern_k_shared_memory.cpp +++ b/libraries/libmesosphere/source/kern_k_shared_memory.cpp @@ -37,7 +37,7 @@ namespace ams::kern { R_UNLESS(memory_reservation.Succeeded(), svc::ResultLimitReached()); /* Allocate the memory. */ - R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(m_page_group), num_pages, owner->GetAllocateOption())); + R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(m_page_group), num_pages, 1, owner->GetAllocateOption())); /* Commit our reservation. */ memory_reservation.Commit();