kern: SvcMapProcessCodeMemory, SvcUnmapProcessCodeMemory

This commit is contained in:
Michael Scire 2020-07-23 17:22:27 -07:00 committed by SciresM
parent 240e6227e9
commit 11d5353827
5 changed files with 267 additions and 4 deletions

View file

@ -76,6 +76,14 @@ namespace ams::kern::arch::arm64 {
return this->page_table.UnmapMemory(dst_address, src_address, size); return this->page_table.UnmapMemory(dst_address, src_address, size);
} }
Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
return this->page_table.MapCodeMemory(dst_address, src_address, size);
}
Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
return this->page_table.UnmapCodeMemory(dst_address, src_address, size);
}
Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) { Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
return this->page_table.MapIo(phys_addr, size, perm); return this->page_table.MapIo(phys_addr, size, perm);
} }

View file

@ -285,6 +285,8 @@ namespace ams::kern {
Result QueryIoMapping(KProcessAddress *out, KPhysicalAddress address, size_t size) const { return this->QueryMappingImpl(out, address, size, KMemoryState_Io); } Result QueryIoMapping(KProcessAddress *out, KPhysicalAddress address, size_t size) const { return this->QueryMappingImpl(out, address, size, KMemoryState_Io); }
Result MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size); Result MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
Result UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size); Result UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm); Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm);
Result MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm); Result MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm);
Result MapRegion(KMemoryRegionType region_type, KMemoryPermission perm); Result MapRegion(KMemoryRegionType region_type, KMemoryPermission perm);

View file

@ -785,6 +785,193 @@ namespace ams::kern {
return ResultSuccess(); return ResultSuccess();
} }
Result KPageTableBase::MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
/* Validate the mapping request. */
R_UNLESS(this->CanContain(dst_address, size, KMemoryState_AliasCode), svc::ResultInvalidMemoryRegion());
/* Lock the table. */
KScopedLightLock lk(this->general_lock);
/* Verify that the source memory is normal heap. */
KMemoryState src_state;
KMemoryPermission src_perm;
R_TRY(this->CheckMemoryState(std::addressof(src_state), std::addressof(src_perm), nullptr, src_address, size, KMemoryState_All, KMemoryState_Normal, KMemoryPermission_All, KMemoryPermission_UserReadWrite, KMemoryAttribute_All, KMemoryAttribute_None));
/* Verify that the destination memory is unmapped. */
R_TRY(this->CheckMemoryState(dst_address, size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None));
/* Create an update allocator for the source. */
KMemoryBlockManagerUpdateAllocator src_allocator(this->memory_block_slab_manager);
R_TRY(src_allocator.GetResult());
/* Create an update allocator for the destination. */
KMemoryBlockManagerUpdateAllocator dst_allocator(this->memory_block_slab_manager);
R_TRY(dst_allocator.GetResult());
/* Map the code memory. */
{
/* Determine the number of pages being operated on. */
const size_t num_pages = size / PageSize;
/* Create page groups for the memory being unmapped. */
KPageGroup pg(this->block_info_manager);
/* Create the page group representing the source. */
R_TRY(this->MakePageGroup(pg, src_address, num_pages));
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* Reprotect the source as kernel-read/not mapped. */
const KMemoryPermission new_perm = static_cast<KMemoryPermission>(KMemoryPermission_KernelRead | KMemoryPermission_NotMapped);
const KPageProperties src_properties = { new_perm, false, false, false };
R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, Null<KPhysicalAddress>, false, src_properties, OperationType_ChangePermissions, false));
/* Ensure that we unprotect the source pages on failure. */
auto unprot_guard = SCOPE_GUARD {
const KPageProperties unprotect_properties = { src_perm, false, false, false };
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), src_address, num_pages, Null<KPhysicalAddress>, false, unprotect_properties, OperationType_ChangePermissions, true));
};
/* Map the alias pages. */
const KPageProperties dst_properties = { new_perm, false, false, false };
R_TRY(this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_properties, false));
/* We successfully mapped the alias pages, so we don't need to unprotect the src pages on failure. */
unprot_guard.Cancel();
/* Apply the memory block updates. */
this->memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, new_perm, static_cast<KMemoryAttribute>(KMemoryAttribute_AnyLocked | KMemoryAttribute_Locked));
this->memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, KMemoryState_AliasCode, new_perm, KMemoryAttribute_None);
}
return ResultSuccess();
}
Result KPageTableBase::UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
/* Validate the mapping request. */
R_UNLESS(this->CanContain(dst_address, size, KMemoryState_AliasCode), svc::ResultInvalidMemoryRegion());
/* Lock the table. */
KScopedLightLock lk(this->general_lock);
/* Verify that the source memory is locked normal heap. */
R_TRY(this->CheckMemoryState(src_address, size, KMemoryState_All, KMemoryState_Normal, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_All, static_cast<KMemoryAttribute>(KMemoryAttribute_AnyLocked | KMemoryAttribute_Locked)));
/* Verify the first page of the destination memory is aliasable code, and get its state. */
KMemoryState dst_state;
R_TRY(this->CheckMemoryState(std::addressof(dst_state), nullptr, nullptr, dst_address, PageSize, KMemoryState_FlagCanCodeAlias, KMemoryState_FlagCanCodeAlias, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_All, KMemoryAttribute_None));
/* Verify that the destination memory is contiguous with the same state as the first page. */
R_TRY(this->CheckMemoryStateContiguous(dst_address, size, KMemoryState_All, dst_state, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_All, KMemoryAttribute_None));
/* Unmap. */
{
/* Determine the number of pages being operated on. */
const size_t num_pages = size / PageSize;
/* Create page groups for the memory being unmapped. */
KPageGroup pg(this->block_info_manager);
/* Create the page group representing the destination. */
R_TRY(this->MakePageGroup(pg, dst_address, num_pages));
/* Verify that the page group contains the same pages as the source. */
R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), svc::ResultInvalidMemoryRegion());
/* Create an update allocator for the source. */
KMemoryBlockManagerUpdateAllocator src_allocator(this->memory_block_slab_manager);
R_TRY(src_allocator.GetResult());
/* Create an update allocator for the destination. */
KMemoryBlockManagerUpdateAllocator dst_allocator(this->memory_block_slab_manager);
R_TRY(dst_allocator.GetResult());
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* Unmap the aliased copy of the pages. */
const KPageProperties dst_unmap_properties = { KMemoryPermission_None, false, false, false };
R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, Null<KPhysicalAddress>, false, dst_unmap_properties, OperationType_Unmap, false));
/* Ensure that we re-map the aliased pages on failure. */
auto remap_guard = SCOPE_GUARD {
/* Cache the last address for convenience. */
const auto last_address = dst_address + size - 1;
/* Iterate over the memory we unmapped. */
auto it = this->memory_block_manager.FindIterator(dst_address);
auto pg_it = pg.begin();
KPhysicalAddress pg_phys_addr = GetHeapPhysicalAddress(pg_it->GetAddress());
size_t pg_size = pg_it->GetNumPages() * PageSize;
while (true) {
/* Get the memory info for the pages we unmapped, convert to property. */
const KMemoryInfo info = it->GetMemoryInfo();
const KPageProperties prev_properties = { info.GetPermission(), false, false, false };
/* Determine the range to map. */
KProcessAddress map_address = std::max(info.GetAddress(), GetInteger(dst_address));
size_t map_size = std::min(GetInteger(dst_address + size), info.GetEndAddress()) - GetInteger(map_address);
MESOSPHERE_ABORT_UNLESS(map_size != 0);
/* While we have pages to map, map them. */
while (map_size > 0) {
/* Check if we're at the end of the physical block. */
if (pg_size == 0) {
/* Ensure there are more pages to map. */
MESOSPHERE_ABORT_UNLESS(pg_it != pg.end());
/* Advance our physical block. */
++pg_it;
pg_phys_addr = GetHeapPhysicalAddress(pg_it->GetAddress());
pg_size = pg_it->GetNumPages() * PageSize;
}
/* Map whatever we can. */
const size_t cur_size = std::min(pg_size, map_size);
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), map_address, cur_size / PageSize, pg_phys_addr, true, prev_properties, OperationType_Map, true));
/* Advance. */
map_address += cur_size;
map_size -= cur_size;
pg_phys_addr += cur_size;
pg_size -= cur_size;
}
/* Check if we're done. */
if (last_address <= info.GetLastAddress()) {
/* Validate that we must have re-mapped exactly what we unmapped. */
MESOSPHERE_ABORT_UNLESS((++pg_it) == pg.end());
break;
}
/* Advance. */
++it;
}
};
/* Try to set the permissions for the source pages back to what they should be. */
const KPageProperties src_properties = { KMemoryPermission_UserReadWrite, false, false, false };
R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, Null<KPhysicalAddress>, false, src_properties, OperationType_ChangePermissions, false));
/* We successfully changed the permissions for the source pages, so we don't need to re-map the dst pages on failure. */
remap_guard.Cancel();
/* Apply the memory block updates. */
this->memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, KMemoryState_Normal, KMemoryPermission_UserReadWrite, KMemoryAttribute_None);
this->memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, KMemoryState_None, KMemoryPermission_None, KMemoryAttribute_None);
}
/* If the destination state was alias code, invalidate the entire instruction cache. */
if (dst_state == KMemoryState_AliasCode) {
cpu::InvalidateEntireInstructionCache();
}
return ResultSuccess();
}
KProcessAddress KPageTableBase::FindFreeArea(KProcessAddress region_start, size_t region_num_pages, size_t num_pages, size_t alignment, size_t offset, size_t guard_pages) const { KProcessAddress KPageTableBase::FindFreeArea(KProcessAddress region_start, size_t region_num_pages, size_t num_pages, size_t alignment, size_t offset, size_t guard_pages) const {
KProcessAddress address = Null<KProcessAddress>; KProcessAddress address = Null<KProcessAddress>;

View file

@ -107,6 +107,18 @@ namespace ams::kern::svc {
} }
} }
break; break;
case ams::svc::InfoType_DebuggerAttached:
{
/* Verify the input handle is invalid. */
R_UNLESS(handle == ams::svc::InvalidHandle, svc::ResultInvalidHandle());
/* Verify the sub-type is valid. */
R_UNLESS(info_subtype == 0, svc::ResultInvalidCombination());
/* Get whether debugger is attached. */
*out = GetCurrentProcess().GetDebugObject() != nullptr;
}
break;
case ams::svc::InfoType_ResourceLimit: case ams::svc::InfoType_ResourceLimit:
{ {
/* Verify the input handle is invalid. */ /* Verify the input handle is invalid. */

View file

@ -141,6 +141,60 @@ namespace ams::kern::svc {
return ResultSuccess(); return ResultSuccess();
} }
Result MapProcessCodeMemory(ams::svc::Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size) {
/* Validate the address/size. */
R_UNLESS(util::IsAligned(dst_address, PageSize), svc::ResultInvalidAddress());
R_UNLESS(util::IsAligned(src_address, PageSize), svc::ResultInvalidAddress());
R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize());
R_UNLESS(size > 0, svc::ResultInvalidSize());
R_UNLESS((dst_address < dst_address + size), svc::ResultInvalidCurrentMemory());
R_UNLESS((src_address < src_address + size), svc::ResultInvalidCurrentMemory());
R_UNLESS(src_address == static_cast<uintptr_t>(src_address), svc::ResultInvalidCurrentMemory());
R_UNLESS(dst_address == static_cast<uintptr_t>(dst_address), svc::ResultInvalidCurrentMemory());
R_UNLESS(size == static_cast<size_t>(size), svc::ResultInvalidCurrentMemory());
/* Get the process from its handle. */
KScopedAutoObject process = GetCurrentProcess().GetHandleTable().GetObjectWithoutPseudoHandle<KProcess>(process_handle);
R_UNLESS(process.IsNotNull(), svc::ResultInvalidHandle());
/* Validate that the mapping is in range. */
auto &page_table = process->GetPageTable();
R_UNLESS(page_table.Contains(src_address, size), svc::ResultInvalidCurrentMemory());
R_UNLESS(page_table.CanContain(dst_address, size, KMemoryState_AliasCode), svc::ResultInvalidCurrentMemory());
/* Map the memory. */
R_TRY(page_table.MapCodeMemory(dst_address, src_address, size));
return ResultSuccess();
}
Result UnmapProcessCodeMemory(ams::svc::Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size) {
/* Validate the address/size. */
R_UNLESS(util::IsAligned(dst_address, PageSize), svc::ResultInvalidAddress());
R_UNLESS(util::IsAligned(src_address, PageSize), svc::ResultInvalidAddress());
R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize());
R_UNLESS(size > 0, svc::ResultInvalidSize());
R_UNLESS((dst_address < dst_address + size), svc::ResultInvalidCurrentMemory());
R_UNLESS((src_address < src_address + size), svc::ResultInvalidCurrentMemory());
R_UNLESS(src_address == static_cast<uintptr_t>(src_address), svc::ResultInvalidCurrentMemory());
R_UNLESS(dst_address == static_cast<uintptr_t>(dst_address), svc::ResultInvalidCurrentMemory());
R_UNLESS(size == static_cast<size_t>(size), svc::ResultInvalidCurrentMemory());
/* Get the process from its handle. */
KScopedAutoObject process = GetCurrentProcess().GetHandleTable().GetObjectWithoutPseudoHandle<KProcess>(process_handle);
R_UNLESS(process.IsNotNull(), svc::ResultInvalidHandle());
/* Validate that the mapping is in range. */
auto &page_table = process->GetPageTable();
R_UNLESS(page_table.Contains(src_address, size), svc::ResultInvalidCurrentMemory());
R_UNLESS(page_table.CanContain(dst_address, size, KMemoryState_AliasCode), svc::ResultInvalidCurrentMemory());
/* Unmap the memory. */
R_TRY(page_table.UnmapCodeMemory(dst_address, src_address, size));
return ResultSuccess();
}
} }
/* ============================= 64 ABI ============================= */ /* ============================= 64 ABI ============================= */
@ -158,11 +212,11 @@ namespace ams::kern::svc {
} }
Result MapProcessCodeMemory64(ams::svc::Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size) { Result MapProcessCodeMemory64(ams::svc::Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size) {
MESOSPHERE_PANIC("Stubbed SvcMapProcessCodeMemory64 was called."); return MapProcessCodeMemory(process_handle, dst_address, src_address, size);
} }
Result UnmapProcessCodeMemory64(ams::svc::Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size) { Result UnmapProcessCodeMemory64(ams::svc::Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size) {
MESOSPHERE_PANIC("Stubbed SvcUnmapProcessCodeMemory64 was called."); return UnmapProcessCodeMemory(process_handle, dst_address, src_address, size);
} }
/* ============================= 64From32 ABI ============================= */ /* ============================= 64From32 ABI ============================= */
@ -180,11 +234,11 @@ namespace ams::kern::svc {
} }
Result MapProcessCodeMemory64From32(ams::svc::Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size) { Result MapProcessCodeMemory64From32(ams::svc::Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size) {
MESOSPHERE_PANIC("Stubbed SvcMapProcessCodeMemory64From32 was called."); return MapProcessCodeMemory(process_handle, dst_address, src_address, size);
} }
Result UnmapProcessCodeMemory64From32(ams::svc::Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size) { Result UnmapProcessCodeMemory64From32(ams::svc::Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size) {
MESOSPHERE_PANIC("Stubbed SvcUnmapProcessCodeMemory64From32 was called."); return UnmapProcessCodeMemory(process_handle, dst_address, src_address, size);
} }
} }