kern: implement SvcMapPhysicalMemory

This commit is contained in:
Michael Scire 2020-07-24 08:07:34 -07:00 committed by SciresM
parent 695b82b945
commit 5ecc80a5f6
11 changed files with 559 additions and 13 deletions

View file

@ -200,6 +200,22 @@ namespace ams::kern::arch::arm64 {
return this->page_table.CleanupForIpcClient(address, size, dst_state);
}
Result MapPhysicalMemory(KProcessAddress address, size_t size) {
return this->page_table.MapPhysicalMemory(address, size);
}
Result UnmapPhysicalMemory(KProcessAddress address, size_t size) {
return this->page_table.UnmapPhysicalMemory(address, size);
}
Result MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
return this->page_table.MapPhysicalMemoryUnsafe(address, size);
}
Result UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
return this->page_table.UnmapPhysicalMemoryUnsafe(address, size);
}
void DumpTable() const {
return this->page_table.DumpTable();
}
@ -209,6 +225,9 @@ namespace ams::kern::arch::arm64 {
}
bool Contains(KProcessAddress addr, size_t size) const { return this->page_table.Contains(addr, size); }
bool IsInAliasRegion(KProcessAddress addr, size_t size) const { return this->page_table.IsInAliasRegion(addr, size); }
bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const { return this->page_table.CanContain(addr, size, state); }
KProcessAddress GetAddressSpaceStart() const { return this->page_table.GetAddressSpaceStart(); }

View file

@ -224,6 +224,10 @@ namespace ams::kern {
return this->ipc_lock_count;
}
constexpr KMemoryState GetState() const {
return this->state;
}
constexpr KMemoryPermission GetPermission() const {
return this->perm;
}

View file

@ -96,6 +96,8 @@ namespace ams::kern {
void Update(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr);
void UpdateLock(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, void (KMemoryBlock::*lock_func)(KMemoryPermission new_perm), KMemoryPermission perm);
void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm, KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr);
iterator FindIterator(KProcessAddress address) const {
return this->memory_block_tree.find(KMemoryBlock(address, 1, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None));
}

View file

@ -75,7 +75,11 @@ namespace ams::kern {
void Free(KVirtualAddress addr, size_t num_pages) { this->heap.Free(addr, num_pages); }
void InitializeOptimizedMemory() { std::memset(GetVoidPointer(this->metadata_region), 0, CalculateOptimizedProcessOverheadSize(this->heap.GetSize())); }
void TrackAllocationForOptimizedProcess(KVirtualAddress block, size_t num_pages);
void TrackUnoptimizedAllocation(KVirtualAddress block, size_t num_pages);
size_t TrackOptimizedAllocation(KVirtualAddress block, size_t num_pages);
size_t ProcessOptimizedAllocation(bool *out_any_new, KVirtualAddress block, size_t num_pages, u8 fill_pattern);
constexpr Pool GetPool() const { return this->pool; }
constexpr size_t GetSize() const { return this->heap.GetSize(); }
@ -161,7 +165,7 @@ namespace ams::kern {
}
}
Result AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool optimize, bool random);
Result AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool unoptimized, bool random);
public:
KMemoryManager()
: pool_locks(), pool_managers_head(), pool_managers_tail(), managers(), num_managers(), optimized_process_ids(), has_optimized_process()
@ -175,6 +179,7 @@ namespace ams::kern {
NOINLINE KVirtualAddress AllocateContinuous(size_t num_pages, size_t align_pages, u32 option);
NOINLINE Result Allocate(KPageGroup *out, size_t num_pages, u32 option);
NOINLINE Result AllocateForProcess(KPageGroup *out, size_t num_pages, u32 option, u64 process_id, u8 fill_pattern);
void Open(KVirtualAddress address, size_t num_pages) {
/* Repeatedly open references until we've done so for all pages. */

View file

@ -140,6 +140,7 @@ namespace ams::kern {
constexpr size_t GetSize() const { return this->heap_size; }
constexpr KVirtualAddress GetEndAddress() const { return this->GetAddress() + this->GetSize(); }
constexpr size_t GetPageOffset(KVirtualAddress block) const { return (block - this->GetAddress()) / PageSize; }
constexpr size_t GetPageOffsetToEnd(KVirtualAddress block) const { return (this->GetEndAddress() - block) / PageSize; }
void Initialize(KVirtualAddress heap_address, size_t heap_size, KVirtualAddress metadata_address, size_t metadata_size) {
return Initialize(heap_address, heap_size, metadata_address, metadata_size, MemoryBlockPageShifts, NumMemoryBlockPageShifts);

View file

@ -180,6 +180,10 @@ namespace ams::kern {
return this->address_space_start <= addr && addr < addr + size && addr + size - 1 <= this->address_space_end - 1;
}
constexpr bool IsInAliasRegion(KProcessAddress addr, size_t size) const {
return this->Contains(addr, size) && this->alias_region_start <= addr && addr + size - 1 <= this->alias_region_end - 1;
}
KProcessAddress GetRegionAddress(KMemoryState state) const;
size_t GetRegionSize(KMemoryState state) const;
bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const;
@ -336,6 +340,12 @@ namespace ams::kern {
Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state, KProcess *server_process);
Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state);
Result MapPhysicalMemory(KProcessAddress address, size_t size);
Result UnmapPhysicalMemory(KProcessAddress address, size_t size);
Result MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size);
Result UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size);
void DumpTable() const {
KScopedLightLock lk(this->general_lock);
this->GetImpl().Dump(GetInteger(this->address_space_start), this->address_space_end - this->address_space_start);

View file

@ -954,7 +954,7 @@ namespace ams::kern::arch::arm64 {
}
/* Open references to the L2 table. */
Kernel::GetPageTableManager().Open(l2_table, L1BlockSize / L2BlockSize);
this->GetPageTableManager().Open(l2_table, L1BlockSize / L2BlockSize);
/* Replace the L1 entry with one to the new table. */
PteDataSynchronizationBarrier();
@ -1001,7 +1001,7 @@ namespace ams::kern::arch::arm64 {
}
/* Open references to the L3 table. */
Kernel::GetPageTableManager().Open(l3_table, L2BlockSize / L3BlockSize);
this->GetPageTableManager().Open(l3_table, L2BlockSize / L3BlockSize);
/* Replace the L2 entry with one to the new table. */
PteDataSynchronizationBarrier();

View file

@ -228,6 +228,87 @@ namespace ams::kern {
}
}
void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm, KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr) {
/* Ensure for auditing that we never end up with an invalid tree. */
KScopedMemoryBlockManagerAuditor auditor(this);
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(address), PageSize));
MESOSPHERE_ASSERT((attr & (KMemoryAttribute_IpcLocked | KMemoryAttribute_DeviceShared)) == 0);
KProcessAddress cur_address = address;
size_t remaining_pages = num_pages;
iterator it = this->FindIterator(address);
while (remaining_pages > 0) {
const size_t remaining_size = remaining_pages * PageSize;
KMemoryInfo cur_info = it->GetMemoryInfo();
if (it->HasProperties(test_state, test_perm, test_attr) && !it->HasProperties(state, perm, attr)) {
/* If we need to, create a new block before and insert it. */
if (cur_info.GetAddress() != GetInteger(cur_address)) {
KMemoryBlock *new_block = allocator->Allocate();
it->Split(new_block, cur_address);
it = this->memory_block_tree.insert(*new_block);
it++;
cur_info = it->GetMemoryInfo();
cur_address = cur_info.GetAddress();
}
/* If we need to, create a new block after and insert it. */
if (cur_info.GetSize() > remaining_size) {
KMemoryBlock *new_block = allocator->Allocate();
it->Split(new_block, cur_address + remaining_size);
it = this->memory_block_tree.insert(*new_block);
cur_info = it->GetMemoryInfo();
}
/* Update block state. */
it->Update(state, perm, attr);
cur_address += cur_info.GetSize();
remaining_pages -= cur_info.GetNumPages();
} else {
/* If we already have the right properties, just advance. */
if (cur_address + remaining_size < cur_info.GetEndAddress()) {
remaining_pages = 0;
cur_address += remaining_size;
} else {
remaining_pages = (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize;
cur_address = cur_info.GetEndAddress();
}
}
it++;
}
/* Find the iterator now that we've updated. */
it = this->FindIterator(address);
if (address != this->start_address) {
it--;
}
/* Coalesce blocks that we can. */
while (true) {
iterator prev = it++;
if (it == this->memory_block_tree.end()) {
break;
}
if (prev->HasSameProperties(*it)) {
KMemoryBlock *block = std::addressof(*it);
const size_t pages = it->GetNumPages();
this->memory_block_tree.erase(it);
allocator->Free(block);
prev->Add(pages);
it = prev;
}
if (address + num_pages * PageSize < it->GetMemoryInfo().GetEndAddress()) {
break;
}
}
}
void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, void (KMemoryBlock::*lock_func)(KMemoryPermission new_perm), KMemoryPermission perm) {
/* Ensure for auditing that we never end up with an invalid tree. */
KScopedMemoryBlockManagerAuditor auditor(this);

View file

@ -143,13 +143,13 @@ namespace ams::kern {
/* Maintain the optimized memory bitmap, if we should. */
if (this->has_optimized_process[pool]) {
chosen_manager->TrackAllocationForOptimizedProcess(allocated_block, num_pages);
chosen_manager->TrackUnoptimizedAllocation(allocated_block, num_pages);
}
return allocated_block;
}
Result KMemoryManager::AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool optimize, bool random) {
Result KMemoryManager::AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool unoptimized, bool random) {
/* Choose a heap based on our page size request. */
const s32 heap_index = KPageHeap::GetBlockIndex(num_pages);
R_UNLESS(0 <= heap_index, svc::ResultOutOfMemory());
@ -183,8 +183,8 @@ namespace ams::kern {
}
/* Maintain the optimized memory bitmap, if we should. */
if (optimize) {
cur_manager->TrackAllocationForOptimizedProcess(allocated_block, pages_per_alloc);
if (unoptimized) {
cur_manager->TrackUnoptimizedAllocation(allocated_block, pages_per_alloc);
}
num_pages -= pages_per_alloc;
@ -216,6 +216,91 @@ namespace ams::kern {
return this->AllocatePageGroupImpl(out, num_pages, pool, dir, this->has_optimized_process[pool], true);
}
Result KMemoryManager::AllocateForProcess(KPageGroup *out, size_t num_pages, u32 option, u64 process_id, u8 fill_pattern) {
MESOSPHERE_ASSERT(out != nullptr);
MESOSPHERE_ASSERT(out->GetNumPages() == 0);
/* Decode the option. */
const auto [pool, dir] = DecodeOption(option);
/* Allocate the memory. */
bool has_optimized, is_optimized;
{
/* Lock the pool that we're allocating from. */
KScopedLightLock lk(this->pool_locks[pool]);
/* Check if we have an optimized process. */
has_optimized = this->has_optimized_process[pool];
is_optimized = this->optimized_process_ids[pool] == process_id;
/* Allocate the page group. */
R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, has_optimized && !is_optimized, false));
}
/* Perform optimized memory tracking, if we should. */
if (has_optimized && is_optimized) {
/* Iterate over the allocated blocks. */
for (const auto &block : *out) {
/* Get the block extents. */
const KVirtualAddress block_address = block.GetAddress();
const size_t block_pages = block.GetNumPages();
/* If it has no pages, we don't need to do anything. */
if (block_pages == 0) {
continue;
}
/* Fill all the pages that we need to fill. */
bool any_new = false;
{
KVirtualAddress cur_address = block_address;
size_t cur_pages = block_pages;
while (cur_pages > 0) {
/* Get the manager for the current address. */
auto &manager = this->GetManager(cur_address);
/* Process part or all of the block. */
const size_t processed_pages = manager.ProcessOptimizedAllocation(std::addressof(any_new), cur_address, cur_pages, fill_pattern);
/* Advance. */
cur_address += processed_pages * PageSize;
cur_pages -= processed_pages;
}
}
/* If there are no new pages, move on to the next block. */
if (!any_new) {
continue;
}
/* Update tracking for the allocation. */
KVirtualAddress cur_address = block_address;
size_t cur_pages = block_pages;
while (cur_pages > 0) {
/* Get the manager for the current address. */
auto &manager = this->GetManager(cur_address);
/* Lock the pool for the manager. */
KScopedLightLock lk(this->pool_locks[manager.GetPool()]);
/* Track some or all of the current pages. */
const size_t processed_pages = manager.TrackOptimizedAllocation(cur_address, cur_pages);
/* Advance. */
cur_address += processed_pages * PageSize;
cur_pages -= processed_pages;
}
}
} else {
/* Set all the allocated memory. */
for (const auto &block : *out) {
std::memset(GetVoidPointer(block.GetAddress()), fill_pattern, block.GetSize());
}
}
return ResultSuccess();
}
size_t KMemoryManager::Impl::Initialize(const KMemoryRegion *region, Pool p, KVirtualAddress metadata, KVirtualAddress metadata_end) {
/* Calculate metadata sizes. */
const size_t ref_count_size = (region->GetSize() / PageSize) * sizeof(u16);
@ -245,7 +330,7 @@ namespace ams::kern {
return total_metadata_size;
}
void KMemoryManager::Impl::TrackAllocationForOptimizedProcess(KVirtualAddress block, size_t num_pages) {
void KMemoryManager::Impl::TrackUnoptimizedAllocation(KVirtualAddress block, size_t num_pages) {
size_t offset = this->heap.GetPageOffset(block);
const size_t last = offset + num_pages - 1;
u64 *optimize_map = GetPointer<u64>(this->metadata_region);
@ -255,6 +340,57 @@ namespace ams::kern {
}
}
size_t KMemoryManager::Impl::TrackOptimizedAllocation(KVirtualAddress block, size_t num_pages) {
/* Get the number of tracking pages. */
const size_t cur_pages = std::min(num_pages, this->heap.GetPageOffsetToEnd(block));
/* Get the range we're tracking. */
size_t offset = this->heap.GetPageOffset(block);
const size_t last = offset + cur_pages - 1;
/* Track. */
u64 *optimize_map = GetPointer<u64>(this->metadata_region);
while (offset <= last) {
/* Mark the page as being optimized-allocated. */
optimize_map[offset / BITSIZEOF(u64)] |= (u64(1) << (offset % BITSIZEOF(u64)));
offset++;
}
/* Return the number of pages we tracked. */
return cur_pages;
}
size_t KMemoryManager::Impl::ProcessOptimizedAllocation(bool *out_any_new, KVirtualAddress block, size_t num_pages, u8 fill_pattern) {
/* Get the number of processable pages. */
const size_t cur_pages = std::min(num_pages, this->heap.GetPageOffsetToEnd(block));
/* Clear any new. */
*out_any_new = false;
/* Get the range we're processing. */
size_t offset = this->heap.GetPageOffset(block);
const size_t last = offset + cur_pages - 1;
/* Process. */
u64 *optimize_map = GetPointer<u64>(this->metadata_region);
while (offset <= last) {
/* Check if the page has been optimized-allocated before. */
if ((optimize_map[offset / BITSIZEOF(u64)] & (u64(1) << (offset % BITSIZEOF(u64)))) == 0) {
/* If not, it's new. */
*out_any_new = true;
/* Fill the page. */
std::memset(GetVoidPointer(this->heap.GetAddress() + offset * PageSize), fill_pattern, PageSize);
}
offset++;
}
/* Return the number of pages we processed. */
return cur_pages;
}
size_t KMemoryManager::Impl::CalculateMetadataOverheadSize(size_t region_size) {
const size_t ref_count_size = (region_size / PageSize) * sizeof(u16);
const size_t optimize_map_size = (util::AlignUp((region_size / PageSize), BITSIZEOF(u64)) / BITSIZEOF(u64)) * sizeof(u64);

View file

@ -3119,4 +3119,248 @@ namespace ams::kern {
return ResultSuccess();
}
Result KPageTableBase::MapPhysicalMemory(KProcessAddress address, size_t size) {
/* Lock the physical memory lock. */
KScopedLightLock phys_lk(this->map_physical_memory_lock);
/* Calculate the last address for convenience. */
const KProcessAddress last_address = address + size - 1;
/* Define iteration variables. */
KProcessAddress cur_address;
size_t mapped_size;
/* The entire mapping process can be retried. */
while (true) {
/* Check if the memory is already mapped. */
{
/* Lock the table. */
KScopedLightLock lk(this->general_lock);
/* Iterate over the memory. */
cur_address = address;
mapped_size = 0;
auto it = this->memory_block_manager.FindIterator(cur_address);
while (true) {
/* Check that the iterator is valid. */
MESOSPHERE_ASSERT(it != this->memory_block_manager.end());
/* Get the memory info. */
const KMemoryInfo info = it->GetMemoryInfo();
/* Check if we're done. */
if (last_address <= info.GetLastAddress()) {
if (info.GetState() != KMemoryState_Free) {
mapped_size += (last_address + 1 - cur_address);
}
break;
}
/* Track the memory if it's mapped. */
if (info.GetState() != KMemoryState_Free) {
mapped_size += KProcessAddress(info.GetEndAddress()) - cur_address;
}
/* Advance. */
cur_address = info.GetEndAddress();
++it;
}
/* If the size mapped is the size requested, we've nothing to do. */
R_SUCCEED_IF(size == mapped_size);
}
/* Allocate and map the memory. */
{
/* Reserve the memory from the process resource limit. */
KScopedResourceReservation memory_reservation(GetCurrentProcess().GetResourceLimit(), ams::svc::LimitableResource_PhysicalMemoryMax, size - mapped_size);
R_UNLESS(memory_reservation.Succeeded(), svc::ResultLimitReached());
/* Allocate pages for the new memory. */
KPageGroup pg(this->block_info_manager);
R_TRY(Kernel::GetMemoryManager().AllocateForProcess(std::addressof(pg), (size - mapped_size) / PageSize, this->allocate_option, GetCurrentProcess().GetId(), this->heap_fill_value));
/* Open a reference to the pages we allocated, and close our reference when we're done. */
pg.Open();
ON_SCOPE_EXIT { pg.Close(); };
/* Map the memory. */
{
/* Lock the table. */
KScopedLightLock lk(this->general_lock);
/* Verify that nobody has mapped memory since we first checked. */
{
/* Iterate over the memory. */
size_t checked_mapped_size = 0;
cur_address = address;
auto it = this->memory_block_manager.FindIterator(cur_address);
while (true) {
/* Check that the iterator is valid. */
MESOSPHERE_ASSERT(it != this->memory_block_manager.end());
/* Get the memory info. */
const KMemoryInfo info = it->GetMemoryInfo();
/* Check if we're done. */
if (last_address <= info.GetLastAddress()) {
if (info.GetState() != KMemoryState_Free) {
checked_mapped_size += (last_address + 1 - cur_address);
}
break;
}
/* Track the memory if it's mapped. */
if (info.GetState() != KMemoryState_Free) {
checked_mapped_size += KProcessAddress(info.GetEndAddress()) - cur_address;
}
/* Advance. */
cur_address = info.GetEndAddress();
++it;
}
/* If the size now isn't what it was before, somebody mapped or unmapped concurrently. */
/* If this happened, retry. */
if (mapped_size != checked_mapped_size) {
continue;
}
}
/* Create an update allocator. */
KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager);
R_TRY(allocator.GetResult());
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* Reset the current tracking address, and make sure we clean up on failure. */
cur_address = address;
auto unmap_guard = SCOPE_GUARD {
if (cur_address > address) {
const KProcessAddress last_unmap_address = cur_address - 1;
/* Iterate, unmapping the pages. */
cur_address = address;
auto it = this->memory_block_manager.FindIterator(cur_address);
while (true) {
/* Check that the iterator is valid. */
MESOSPHERE_ASSERT(it != this->memory_block_manager.end());
/* Get the memory info. */
const KMemoryInfo info = it->GetMemoryInfo();
/* If the memory state is free, we mapped it and need to unmap it. */
if (info.GetState() == KMemoryState_Free) {
/* Determine the range to unmap. */
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, false };
const size_t cur_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address, last_unmap_address + 1 - cur_address) / PageSize;
/* Unmap. */
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), cur_address, cur_pages, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, true));
}
/* Check if we're done. */
if (last_unmap_address <= info.GetLastAddress()) {
break;
}
/* Advance. */
cur_address = info.GetEndAddress();
++it;
}
}
};
/* Iterate over the memory. */
auto pg_it = pg.begin();
KPhysicalAddress pg_phys_addr = GetHeapPhysicalAddress(pg_it->GetAddress());
size_t pg_pages = pg_it->GetNumPages();
auto it = this->memory_block_manager.FindIterator(cur_address);
while (true) {
/* Check that the iterator is valid. */
MESOSPHERE_ASSERT(it != this->memory_block_manager.end());
/* Get the memory info. */
const KMemoryInfo info = it->GetMemoryInfo();
/* If it's unmapped, we need to map it. */
if (info.GetState() == KMemoryState_Free) {
/* Determine the range to map. */
const KPageProperties map_properties = { KMemoryPermission_UserReadWrite, false, false, false };
size_t map_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address, last_address + 1 - cur_address) / PageSize;
/* While we have pages to map, map them. */
while (map_pages > 0) {
/* Check if we're at the end of the physical block. */
if (pg_pages == 0) {
/* Ensure there are more pages to map. */
MESOSPHERE_ASSERT(pg_it != pg.end());
/* Advance our physical block. */
++pg_it;
pg_phys_addr = GetHeapPhysicalAddress(pg_it->GetAddress());
pg_pages = pg_it->GetNumPages();
}
/* Map whatever we can. */
const size_t cur_pages = std::min(pg_pages, map_pages);
R_TRY(this->Operate(updater.GetPageList(), cur_address, cur_pages, pg_phys_addr, true, map_properties, OperationType_Map, false));
/* Advance. */
cur_address += cur_pages * PageSize;
map_pages -= cur_pages;
pg_phys_addr += cur_pages * PageSize;
pg_pages -= cur_pages;
}
}
/* Check if we're done. */
if (last_address <= info.GetLastAddress()) {
break;
}
/* Advance. */
cur_address = info.GetEndAddress();
++it;
}
/* We succeeded, so commit the memory reservation. */
memory_reservation.Commit();
/* Increase our tracked mapped size. */
this->mapped_physical_memory_size += (size - mapped_size);
/* Update the relevant memory blocks. */
this->memory_block_manager.UpdateIfMatch(std::addressof(allocator), address, size / PageSize,
KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None,
KMemoryState_Normal, KMemoryPermission_UserReadWrite, KMemoryAttribute_None);
/* Cancel our guard. */
unmap_guard.Cancel();
return ResultSuccess();
}
}
}
}
Result KPageTableBase::UnmapPhysicalMemory(KProcessAddress address, size_t size) {
MESOSPHERE_UNIMPLEMENTED();
}
Result KPageTableBase::MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
MESOSPHERE_UNIMPLEMENTED();
}
Result KPageTableBase::UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
MESOSPHERE_UNIMPLEMENTED();
}
}

View file

@ -22,6 +22,8 @@ namespace ams::kern::svc {
namespace {
Result SetHeapSize(uintptr_t *out_address, size_t size) {
MESOSPHERE_LOG("%s: SetHeapSize(%012zx)\n", GetCurrentProcess().GetName(), size);
/* Validate size. */
R_UNLESS(util::IsAligned(size, ams::svc::HeapSizeAlignment), svc::ResultInvalidSize());
R_UNLESS(size < ams::kern::MainMemorySize, svc::ResultInvalidSize());
@ -46,6 +48,48 @@ namespace ams::kern::svc {
return Kernel::GetUnsafeMemory().SetLimitSize(limit);
}
Result MapPhysicalMemory(uintptr_t address, size_t size) {
/* Validate address / size. */
R_UNLESS(util::IsAligned(address, PageSize), svc::ResultInvalidAddress());
R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize());
R_UNLESS(size > 0, svc::ResultInvalidSize());
R_UNLESS((address < address + size), svc::ResultInvalidMemoryRegion());
/* Verify that the process has system resource. */
auto &process = GetCurrentProcess();
R_UNLESS(process.GetTotalSystemResourceSize() > 0, svc::ResultInvalidState());
/* Verify that the region is in range. */
auto &page_table = process.GetPageTable();
R_UNLESS(page_table.IsInAliasRegion(address, size), svc::ResultInvalidMemoryRegion());
/* Map the memory. */
R_TRY(page_table.MapPhysicalMemory(address, size));
return ResultSuccess();
}
Result UnmapPhysicalMemory(uintptr_t address, size_t size) {
/* Validate address / size. */
R_UNLESS(util::IsAligned(address, PageSize), svc::ResultInvalidAddress());
R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize());
R_UNLESS(size > 0, svc::ResultInvalidSize());
R_UNLESS((address < address + size), svc::ResultInvalidMemoryRegion());
/* Verify that the process has system resource. */
auto &process = GetCurrentProcess();
R_UNLESS(process.GetTotalSystemResourceSize() > 0, svc::ResultInvalidState());
/* Verify that the region is in range. */
auto &page_table = process.GetPageTable();
R_UNLESS(page_table.IsInAliasRegion(address, size), svc::ResultInvalidMemoryRegion());
/* Unmap the memory. */
R_TRY(page_table.UnmapPhysicalMemory(address, size));
return ResultSuccess();
}
}
/* ============================= 64 ABI ============================= */
@ -56,11 +100,11 @@ namespace ams::kern::svc {
}
Result MapPhysicalMemory64(ams::svc::Address address, ams::svc::Size size) {
MESOSPHERE_PANIC("Stubbed SvcMapPhysicalMemory64 was called.");
return MapPhysicalMemory(address, size);
}
Result UnmapPhysicalMemory64(ams::svc::Address address, ams::svc::Size size) {
MESOSPHERE_PANIC("Stubbed SvcUnmapPhysicalMemory64 was called.");
return UnmapPhysicalMemory(address, size);
}
Result MapPhysicalMemoryUnsafe64(ams::svc::Address address, ams::svc::Size size) {
@ -83,11 +127,11 @@ namespace ams::kern::svc {
}
Result MapPhysicalMemory64From32(ams::svc::Address address, ams::svc::Size size) {
MESOSPHERE_PANIC("Stubbed SvcMapPhysicalMemory64From32 was called.");
return MapPhysicalMemory(address, size);
}
Result UnmapPhysicalMemory64From32(ams::svc::Address address, ams::svc::Size size) {
MESOSPHERE_PANIC("Stubbed SvcUnmapPhysicalMemory64From32 was called.");
return UnmapPhysicalMemory(address, size);
}
Result MapPhysicalMemoryUnsafe64From32(ams::svc::Address address, ams::svc::Size size) {