kern: implement SvcSetHeapSize

This commit is contained in:
Michael Scire 2020-07-15 03:07:00 -07:00 committed by SciresM
parent 9c4c058307
commit 01a7606f95
5 changed files with 162 additions and 3 deletions

View file

@ -0,0 +1,23 @@
/*
* Copyright (c) 2018-2020 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
namespace ams::kern {
constexpr inline size_t MainMemorySize = 4_GB;
}

View file

@ -17,6 +17,12 @@
#include <mesosphere/kern_common.hpp>
#include <mesosphere/init/kern_init_page_table_select.hpp>
#if defined(ATMOSPHERE_BOARD_NINTENDO_NX)
#include <mesosphere/board/nintendo/nx/kern_k_memory_layout.board.nintendo_nx.hpp>
#else
#error "Unknown board for KMemoryLayout"
#endif
namespace ams::kern {
constexpr size_t KernelAslrAlignment = 2_MB;

View file

@ -1077,7 +1077,119 @@ namespace ams::kern {
}
Result KPageTableBase::SetHeapSize(KProcessAddress *out, size_t size) {
MESOSPHERE_UNIMPLEMENTED();
/* Lock the physical memory mutex. */
KScopedLightLock map_phys_mem_lk(this->map_physical_memory_lock);
/* Try to perform a reduction in heap, instead of an extension. */
KProcessAddress cur_address;
size_t allocation_size;
{
/* Lock the table. */
KScopedLightLock lk(this->general_lock);
/* Validate that setting heap size is possible at all. */
R_UNLESS(!this->is_kernel, svc::ResultOutOfMemory());
R_UNLESS(size <= static_cast<size_t>(this->heap_region_end - this->heap_region_start), svc::ResultOutOfMemory());
R_UNLESS(size <= this->max_heap_size, svc::ResultOutOfMemory());
if (size < static_cast<size_t>(this->current_heap_end - this->heap_region_start)) {
/* The size being requested is less than the current size, so we need to free the end of the heap. */
/* Create an update allocator. */
KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager);
R_TRY(allocator.GetResult());
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* Validate memory state. */
R_TRY(this->CheckMemoryState(this->heap_region_start + size, (this->heap_region_end - this->heap_region_start) - size,
KMemoryState_All, KMemoryState_Normal,
KMemoryPermission_All, KMemoryPermission_UserReadWrite,
KMemoryAttribute_All, KMemoryAttribute_None));
/* Unmap the end of the heap. */
const size_t num_pages = ((this->current_heap_end - this->heap_region_start) - size) / PageSize;
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, false };
R_TRY(this->Operate(updater.GetPageList(), this->heap_region_start + size, num_pages, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, false));
/* Release the memory from the resource limit. */
GetCurrentProcess().ReleaseResource(ams::svc::LimitableResource_PhysicalMemoryMax, num_pages * PageSize);
/* Apply the memory block update. */
this->memory_block_manager.Update(std::addressof(allocator), this->heap_region_start + size, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None);
/* Update the current heap end. */
this->current_heap_end = this->heap_region_start + size;
/* Set the output. */
*out = this->heap_region_start;
return ResultSuccess();
} else if (size == static_cast<size_t>(this->current_heap_end - this->heap_region_start)) {
/* The size requested is exactly the current size. */
*out = this->heap_region_start;
return ResultSuccess();
} else {
/* We have to allocate memory. Determine how much to allocate and where while the table is locked. */
cur_address = this->current_heap_end;
allocation_size = size - (this->current_heap_end - this->heap_region_start);
}
}
/* Reserve memory for the heap extension. */
KScopedResourceReservation memory_reservation(GetCurrentProcess().GetResourceLimit(), ams::svc::LimitableResource_PhysicalMemoryMax, allocation_size);
R_UNLESS(memory_reservation.Succeeded(), svc::ResultLimitReached());
/* Allocate pages for the heap extension. */
KPageGroup pg(this->block_info_manager);
R_TRY(Kernel::GetMemoryManager().Allocate(std::addressof(pg), allocation_size / PageSize, this->allocate_option));
/* Open the pages in the group for the duration of the call, and close them at the end. */
/* If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed automatically. */
pg.Open();
ON_SCOPE_EXIT { pg.Close(); };
/* Clear all the newly allocated pages. */
for (const auto &it : pg) {
std::memset(GetVoidPointer(it.GetAddress()), this->heap_fill_value, it.GetSize());
}
/* Map the pages. */
{
/* Lock the table. */
KScopedLightLock lk(this->general_lock);
/* Create an update allocator. */
KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager);
R_TRY(allocator.GetResult());
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* Ensure that the heap hasn't changed since we began executing. */
MESOSPHERE_ABORT_UNLESS(cur_address == this->current_heap_end);
/* Check the memory state. */
R_TRY(this->CheckMemoryState(this->current_heap_end, allocation_size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None));
/* Map the pages. */
const size_t num_pages = allocation_size / PageSize;
const KPageProperties map_properties = { KMemoryPermission_UserReadWrite, false, false, false };
R_TRY(this->Operate(updater.GetPageList(), this->current_heap_end, num_pages, pg, map_properties, OperationType_MapGroup, false));
/* We succeeded, so commit our memory reservation. */
memory_reservation.Commit();
/* Apply the memory block update. */
this->memory_block_manager.Update(std::addressof(allocator), this->current_heap_end, num_pages, KMemoryState_Normal, KMemoryPermission_UserReadWrite, KMemoryAttribute_None);
/* Update the current heap end. */
this->current_heap_end = this->heap_region_start + size;
/* Set the output. */
*out = this->heap_region_start;
return ResultSuccess();
}
}
Result KPageTableBase::SetMaxHeapSize(size_t size) {

View file

@ -21,6 +21,20 @@ namespace ams::kern::svc {
namespace {
Result SetHeapSize(uintptr_t *out_address, size_t size) {
/* Validate size. */
R_UNLESS(util::IsAligned(size, ams::svc::HeapSizeAlignment), svc::ResultInvalidSize());
R_UNLESS(size < ams::kern::MainMemorySize, svc::ResultInvalidSize());
/* Set the heap size. */
KProcessAddress address;
R_TRY(GetCurrentProcess().GetPageTable().SetHeapSize(std::addressof(address), size));
/* Set the output. */
*out_address = GetInteger(address);
return ResultSuccess();
}
Result SetUnsafeLimit(size_t limit) {
/* Ensure the size is aligned. */
R_UNLESS(util::IsAligned(limit, PageSize), svc::ResultInvalidSize());
@ -37,7 +51,8 @@ namespace ams::kern::svc {
/* ============================= 64 ABI ============================= */
Result SetHeapSize64(ams::svc::Address *out_address, ams::svc::Size size) {
MESOSPHERE_PANIC("Stubbed SvcSetHeapSize64 was called.");
static_assert(sizeof(*out_address) == sizeof(uintptr_t));
return SetHeapSize(reinterpret_cast<uintptr_t *>(out_address), size);
}
Result MapPhysicalMemory64(ams::svc::Address address, ams::svc::Size size) {
@ -63,7 +78,8 @@ namespace ams::kern::svc {
/* ============================= 64From32 ABI ============================= */
Result SetHeapSize64From32(ams::svc::Address *out_address, ams::svc::Size size) {
MESOSPHERE_PANIC("Stubbed SvcSetHeapSize64From32 was called.");
static_assert(sizeof(*out_address) == sizeof(uintptr_t));
return SetHeapSize(reinterpret_cast<uintptr_t *>(out_address), size);
}
Result MapPhysicalMemory64From32(ams::svc::Address address, ams::svc::Size size) {

View file

@ -116,6 +116,8 @@ namespace ams::svc {
MemoryAttribute_Uncached = (1 << 3),
};
constexpr inline size_t HeapSizeAlignment = 2_MB;
struct PageInfo {
u32 flags;
};