mirror of
https://github.com/Atmosphere-NX/Atmosphere
synced 2024-12-22 20:31:14 +00:00
kern: take alignment argument in KMemoryManager::AllocateAndOpen
This commit is contained in:
parent
5e63792a67
commit
f5a9d1d6e8
5 changed files with 20 additions and 14 deletions
|
@ -185,7 +185,7 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Result AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool unoptimized, bool random);
|
Result AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool unoptimized, bool random, s32 min_heap_index);
|
||||||
public:
|
public:
|
||||||
KMemoryManager()
|
KMemoryManager()
|
||||||
: m_pool_locks(), m_pool_managers_head(), m_pool_managers_tail(), m_managers(), m_num_managers(), m_optimized_process_ids(), m_has_optimized_process()
|
: m_pool_locks(), m_pool_managers_head(), m_pool_managers_tail(), m_managers(), m_num_managers(), m_optimized_process_ids(), m_has_optimized_process()
|
||||||
|
@ -199,7 +199,7 @@ namespace ams::kern {
|
||||||
NOINLINE void FinalizeOptimizedMemory(u64 process_id, Pool pool);
|
NOINLINE void FinalizeOptimizedMemory(u64 process_id, Pool pool);
|
||||||
|
|
||||||
NOINLINE KPhysicalAddress AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
|
NOINLINE KPhysicalAddress AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
|
||||||
NOINLINE Result AllocateAndOpen(KPageGroup *out, size_t num_pages, u32 option);
|
NOINLINE Result AllocateAndOpen(KPageGroup *out, size_t num_pages, size_t align_pages, u32 option);
|
||||||
NOINLINE Result AllocateForProcess(KPageGroup *out, size_t num_pages, u32 option, u64 process_id, u8 fill_pattern);
|
NOINLINE Result AllocateForProcess(KPageGroup *out, size_t num_pages, u32 option, u64 process_id, u8 fill_pattern);
|
||||||
|
|
||||||
Pool GetPool(KPhysicalAddress address) const {
|
Pool GetPool(KPhysicalAddress address) const {
|
||||||
|
|
|
@ -136,7 +136,7 @@ namespace ams::kern {
|
||||||
{
|
{
|
||||||
/* Allocate the previously unreserved pages. */
|
/* Allocate the previously unreserved pages. */
|
||||||
KPageGroup unreserve_pg(Kernel::GetSystemSystemResource().GetBlockInfoManagerPointer());
|
KPageGroup unreserve_pg(Kernel::GetSystemSystemResource().GetBlockInfoManagerPointer());
|
||||||
MESOSPHERE_R_ABORT_UNLESS(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(unreserve_pg), unreserved_size / PageSize, KMemoryManager::EncodeOption(dst_pool, KMemoryManager::Direction_FromFront)));
|
MESOSPHERE_R_ABORT_UNLESS(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(unreserve_pg), unreserved_size / PageSize, 1, KMemoryManager::EncodeOption(dst_pool, KMemoryManager::Direction_FromFront)));
|
||||||
|
|
||||||
/* Add the previously reserved pages. */
|
/* Add the previously reserved pages. */
|
||||||
if (src_pool == dst_pool && binary_pages != 0) {
|
if (src_pool == dst_pool && binary_pages != 0) {
|
||||||
|
@ -173,7 +173,7 @@ namespace ams::kern {
|
||||||
/* If the pool is the same, we need to use the workaround page group. */
|
/* If the pool is the same, we need to use the workaround page group. */
|
||||||
if (src_pool == dst_pool) {
|
if (src_pool == dst_pool) {
|
||||||
/* Allocate a new, usable group for the process. */
|
/* Allocate a new, usable group for the process. */
|
||||||
MESOSPHERE_R_ABORT_UNLESS(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(workaround_pg), static_cast<size_t>(params.code_num_pages), KMemoryManager::EncodeOption(dst_pool, KMemoryManager::Direction_FromFront)));
|
MESOSPHERE_R_ABORT_UNLESS(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(workaround_pg), static_cast<size_t>(params.code_num_pages), 1, KMemoryManager::EncodeOption(dst_pool, KMemoryManager::Direction_FromFront)));
|
||||||
|
|
||||||
/* Copy data from the working page group to the usable one. */
|
/* Copy data from the working page group to the usable one. */
|
||||||
auto work_it = pg.begin();
|
auto work_it = pg.begin();
|
||||||
|
|
|
@ -225,7 +225,7 @@ namespace ams::kern {
|
||||||
return allocated_block;
|
return allocated_block;
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KMemoryManager::AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool unoptimized, bool random) {
|
Result KMemoryManager::AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool unoptimized, bool random, s32 min_heap_index) {
|
||||||
/* Choose a heap based on our page size request. */
|
/* Choose a heap based on our page size request. */
|
||||||
const s32 heap_index = KPageHeap::GetBlockIndex(num_pages);
|
const s32 heap_index = KPageHeap::GetBlockIndex(num_pages);
|
||||||
R_UNLESS(0 <= heap_index, svc::ResultOutOfMemory());
|
R_UNLESS(0 <= heap_index, svc::ResultOutOfMemory());
|
||||||
|
@ -241,7 +241,7 @@ namespace ams::kern {
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Keep allocating until we've allocated all our pages. */
|
/* Keep allocating until we've allocated all our pages. */
|
||||||
for (s32 index = heap_index; index >= 0 && num_pages > 0; index--) {
|
for (s32 index = heap_index; index >= min_heap_index && num_pages > 0; index--) {
|
||||||
const size_t pages_per_alloc = KPageHeap::GetBlockNumPages(index);
|
const size_t pages_per_alloc = KPageHeap::GetBlockNumPages(index);
|
||||||
for (Impl *cur_manager = this->GetFirstManager(pool, dir); cur_manager != nullptr; cur_manager = this->GetNextManager(cur_manager, dir)) {
|
for (Impl *cur_manager = this->GetFirstManager(pool, dir); cur_manager != nullptr; cur_manager = this->GetNextManager(cur_manager, dir)) {
|
||||||
while (num_pages >= pages_per_alloc) {
|
while (num_pages >= pages_per_alloc) {
|
||||||
|
@ -274,7 +274,7 @@ namespace ams::kern {
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KMemoryManager::AllocateAndOpen(KPageGroup *out, size_t num_pages, u32 option) {
|
Result KMemoryManager::AllocateAndOpen(KPageGroup *out, size_t num_pages, size_t align_pages, u32 option) {
|
||||||
MESOSPHERE_ASSERT(out != nullptr);
|
MESOSPHERE_ASSERT(out != nullptr);
|
||||||
MESOSPHERE_ASSERT(out->GetNumPages() == 0);
|
MESOSPHERE_ASSERT(out->GetNumPages() == 0);
|
||||||
|
|
||||||
|
@ -285,8 +285,11 @@ namespace ams::kern {
|
||||||
const auto [pool, dir] = DecodeOption(option);
|
const auto [pool, dir] = DecodeOption(option);
|
||||||
KScopedLightLock lk(m_pool_locks[pool]);
|
KScopedLightLock lk(m_pool_locks[pool]);
|
||||||
|
|
||||||
|
/* Choose a heap based on our alignment size request. */
|
||||||
|
const s32 heap_index = KPageHeap::GetAlignedBlockIndex(align_pages, align_pages);
|
||||||
|
|
||||||
/* Allocate the page group. */
|
/* Allocate the page group. */
|
||||||
R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, m_has_optimized_process[pool], true));
|
R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, m_has_optimized_process[pool], true, heap_index));
|
||||||
|
|
||||||
/* Open the first reference to the pages. */
|
/* Open the first reference to the pages. */
|
||||||
for (const auto &block : *out) {
|
for (const auto &block : *out) {
|
||||||
|
@ -326,8 +329,11 @@ namespace ams::kern {
|
||||||
const bool has_optimized = m_has_optimized_process[pool];
|
const bool has_optimized = m_has_optimized_process[pool];
|
||||||
const bool is_optimized = m_optimized_process_ids[pool] == process_id;
|
const bool is_optimized = m_optimized_process_ids[pool] == process_id;
|
||||||
|
|
||||||
|
/* Always use the minimum alignment size. */
|
||||||
|
const s32 heap_index = 0;
|
||||||
|
|
||||||
/* Allocate the page group. */
|
/* Allocate the page group. */
|
||||||
R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, has_optimized && !is_optimized, false));
|
R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, has_optimized && !is_optimized, false, heap_index));
|
||||||
|
|
||||||
/* Set whether we should optimize. */
|
/* Set whether we should optimize. */
|
||||||
optimized = has_optimized && is_optimized;
|
optimized = has_optimized && is_optimized;
|
||||||
|
|
|
@ -1266,7 +1266,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
/* Allocate pages for the insecure memory. */
|
/* Allocate pages for the insecure memory. */
|
||||||
KPageGroup pg(m_block_info_manager);
|
KPageGroup pg(m_block_info_manager);
|
||||||
R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), size / PageSize, KMemoryManager::EncodeOption(insecure_pool, KMemoryManager::Direction_FromFront)));
|
R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), size / PageSize, 1, KMemoryManager::EncodeOption(insecure_pool, KMemoryManager::Direction_FromFront)));
|
||||||
|
|
||||||
/* Close the opened pages when we're done with them. */
|
/* Close the opened pages when we're done with them. */
|
||||||
/* If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed automatically. */
|
/* If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed automatically. */
|
||||||
|
@ -1426,7 +1426,7 @@ namespace ams::kern {
|
||||||
KPageGroup pg(m_block_info_manager);
|
KPageGroup pg(m_block_info_manager);
|
||||||
|
|
||||||
/* Allocate the pages. */
|
/* Allocate the pages. */
|
||||||
R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, m_allocate_option));
|
R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, 1, m_allocate_option));
|
||||||
|
|
||||||
/* Ensure that the page group is closed when we're done working with it. */
|
/* Ensure that the page group is closed when we're done working with it. */
|
||||||
ON_SCOPE_EXIT { pg.Close(); };
|
ON_SCOPE_EXIT { pg.Close(); };
|
||||||
|
@ -1931,7 +1931,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
/* Allocate pages for the heap extension. */
|
/* Allocate pages for the heap extension. */
|
||||||
KPageGroup pg(m_block_info_manager);
|
KPageGroup pg(m_block_info_manager);
|
||||||
R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), allocation_size / PageSize, m_allocate_option));
|
R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), allocation_size / PageSize, 1, m_allocate_option));
|
||||||
|
|
||||||
/* Close the opened pages when we're done with them. */
|
/* Close the opened pages when we're done with them. */
|
||||||
/* If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed automatically. */
|
/* If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed automatically. */
|
||||||
|
@ -4844,7 +4844,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
/* Allocate the new memory. */
|
/* Allocate the new memory. */
|
||||||
const size_t num_pages = size / PageSize;
|
const size_t num_pages = size / PageSize;
|
||||||
R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, KMemoryManager::EncodeOption(KMemoryManager::Pool_Unsafe, KMemoryManager::Direction_FromFront)));
|
R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, 1, KMemoryManager::EncodeOption(KMemoryManager::Pool_Unsafe, KMemoryManager::Direction_FromFront)));
|
||||||
|
|
||||||
/* Close the page group when we're done with it. */
|
/* Close the page group when we're done with it. */
|
||||||
ON_SCOPE_EXIT { pg.Close(); };
|
ON_SCOPE_EXIT { pg.Close(); };
|
||||||
|
|
|
@ -37,7 +37,7 @@ namespace ams::kern {
|
||||||
R_UNLESS(memory_reservation.Succeeded(), svc::ResultLimitReached());
|
R_UNLESS(memory_reservation.Succeeded(), svc::ResultLimitReached());
|
||||||
|
|
||||||
/* Allocate the memory. */
|
/* Allocate the memory. */
|
||||||
R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(m_page_group), num_pages, owner->GetAllocateOption()));
|
R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(m_page_group), num_pages, 1, owner->GetAllocateOption()));
|
||||||
|
|
||||||
/* Commit our reservation. */
|
/* Commit our reservation. */
|
||||||
memory_reservation.Commit();
|
memory_reservation.Commit();
|
||||||
|
|
Loading…
Reference in a new issue