mirror of
https://github.com/Atmosphere-NX/Atmosphere
synced 2024-11-10 07:06:34 +00:00
kern: implement DisableDeviceAddressSpaceMerge
This commit is contained in:
parent
f469dfbeb3
commit
6a85f7225d
11 changed files with 214 additions and 72 deletions
|
@ -176,7 +176,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
NOINLINE Result InitializeForKernel(void *table, KVirtualAddress start, KVirtualAddress end);
|
NOINLINE Result InitializeForKernel(void *table, KVirtualAddress start, KVirtualAddress end);
|
||||||
NOINLINE Result InitializeForProcess(u32 id, ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager, KPageTableManager *pt_manager);
|
NOINLINE Result InitializeForProcess(u32 id, ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager, KPageTableManager *pt_manager);
|
||||||
Result Finalize();
|
Result Finalize();
|
||||||
private:
|
private:
|
||||||
Result MapL1Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll);
|
Result MapL1Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll);
|
||||||
|
|
|
@ -30,8 +30,8 @@ namespace ams::kern::arch::arm64 {
|
||||||
this->page_table.Activate(id);
|
this->page_table.Activate(id);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result Initialize(u32 id, ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager, KPageTableManager *pt_manager) {
|
Result Initialize(u32 id, ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager, KPageTableManager *pt_manager) {
|
||||||
return this->page_table.InitializeForProcess(id, as_type, enable_aslr, from_back, pool, code_address, code_size, mem_block_slab_manager, block_info_manager, pt_manager);
|
return this->page_table.InitializeForProcess(id, as_type, enable_aslr, enable_das_merge, from_back, pool, code_address, code_size, mem_block_slab_manager, block_info_manager, pt_manager);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Finalize() { this->page_table.Finalize(); }
|
void Finalize() { this->page_table.Finalize(); }
|
||||||
|
@ -152,6 +152,14 @@ namespace ams::kern::arch::arm64 {
|
||||||
return this->page_table.UnlockForDeviceAddressSpace(address, size);
|
return this->page_table.UnlockForDeviceAddressSpace(address, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Result MakePageGroupForUnmapDeviceAddressSpace(KPageGroup *out, KProcessAddress address, size_t size) {
|
||||||
|
return this->page_table.MakePageGroupForUnmapDeviceAddressSpace(out, address, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
Result UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size, size_t mapped_size) {
|
||||||
|
return this->page_table.UnlockForDeviceAddressSpacePartialMap(address, size, mapped_size);
|
||||||
|
}
|
||||||
|
|
||||||
Result LockForIpcUserBuffer(KPhysicalAddress *out, KProcessAddress address, size_t size) {
|
Result LockForIpcUserBuffer(KPhysicalAddress *out, KProcessAddress address, size_t size) {
|
||||||
return this->page_table.LockForIpcUserBuffer(out, address, size);
|
return this->page_table.LockForIpcUserBuffer(out, address, size);
|
||||||
}
|
}
|
||||||
|
|
|
@ -71,6 +71,10 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
|
|
||||||
Result Map(size_t *out_mapped_size, const KPageGroup &pg, KDeviceVirtualAddress device_address, ams::svc::MemoryPermission device_perm, bool refresh_mappings);
|
Result Map(size_t *out_mapped_size, const KPageGroup &pg, KDeviceVirtualAddress device_address, ams::svc::MemoryPermission device_perm, bool refresh_mappings);
|
||||||
Result Unmap(const KPageGroup &pg, KDeviceVirtualAddress device_address);
|
Result Unmap(const KPageGroup &pg, KDeviceVirtualAddress device_address);
|
||||||
|
|
||||||
|
void Unmap(KDeviceVirtualAddress device_address, size_t size) {
|
||||||
|
return this->UnmapImpl(device_address, size, false);
|
||||||
|
}
|
||||||
private:
|
private:
|
||||||
Result MapDevicePage(size_t *out_mapped_size, s32 &num_pt, s32 max_pt, KPhysicalAddress phys_addr, u64 size, KDeviceVirtualAddress address, ams::svc::MemoryPermission device_perm);
|
Result MapDevicePage(size_t *out_mapped_size, s32 &num_pt, s32 max_pt, KPhysicalAddress phys_addr, u64 size, KDeviceVirtualAddress address, ams::svc::MemoryPermission device_perm);
|
||||||
|
|
||||||
|
|
|
@ -76,6 +76,7 @@ namespace ams::kern {
|
||||||
class KMemoryBlockManager {
|
class KMemoryBlockManager {
|
||||||
public:
|
public:
|
||||||
using MemoryBlockTree = util::IntrusiveRedBlackTreeBaseTraits<KMemoryBlock>::TreeType<KMemoryBlock>;
|
using MemoryBlockTree = util::IntrusiveRedBlackTreeBaseTraits<KMemoryBlock>::TreeType<KMemoryBlock>;
|
||||||
|
using MemoryBlockLockFunction = void (KMemoryBlock::*)(KMemoryPermission new_perm, bool left, bool right);
|
||||||
using iterator = MemoryBlockTree::iterator;
|
using iterator = MemoryBlockTree::iterator;
|
||||||
using const_iterator = MemoryBlockTree::const_iterator;
|
using const_iterator = MemoryBlockTree::const_iterator;
|
||||||
private:
|
private:
|
||||||
|
@ -97,7 +98,7 @@ namespace ams::kern {
|
||||||
KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages, size_t num_pages, size_t alignment, size_t offset, size_t guard_pages) const;
|
KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages, size_t num_pages, size_t alignment, size_t offset, size_t guard_pages) const;
|
||||||
|
|
||||||
void Update(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr, KMemoryBlockDisableMergeAttribute set_disable_attr, KMemoryBlockDisableMergeAttribute clear_disable_attr);
|
void Update(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr, KMemoryBlockDisableMergeAttribute set_disable_attr, KMemoryBlockDisableMergeAttribute clear_disable_attr);
|
||||||
void UpdateLock(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, void (KMemoryBlock::*lock_func)(KMemoryPermission new_perm, bool left, bool right), KMemoryPermission perm);
|
void UpdateLock(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, MemoryBlockLockFunction lock_func, KMemoryPermission perm);
|
||||||
|
|
||||||
void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm, KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr);
|
void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm, KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr);
|
||||||
|
|
||||||
|
|
|
@ -158,6 +158,7 @@ namespace ams::kern {
|
||||||
u32 address_space_width;
|
u32 address_space_width;
|
||||||
bool is_kernel;
|
bool is_kernel;
|
||||||
bool enable_aslr;
|
bool enable_aslr;
|
||||||
|
bool enable_device_address_space_merge;
|
||||||
KMemoryBlockSlabManager *memory_block_slab_manager;
|
KMemoryBlockSlabManager *memory_block_slab_manager;
|
||||||
KBlockInfoManager *block_info_manager;
|
KBlockInfoManager *block_info_manager;
|
||||||
const KMemoryRegion *cached_physical_linear_region;
|
const KMemoryRegion *cached_physical_linear_region;
|
||||||
|
@ -172,15 +173,15 @@ namespace ams::kern {
|
||||||
alias_region_start(), alias_region_end(), stack_region_start(), stack_region_end(), kernel_map_region_start(),
|
alias_region_start(), alias_region_end(), stack_region_start(), stack_region_end(), kernel_map_region_start(),
|
||||||
kernel_map_region_end(), alias_code_region_start(), alias_code_region_end(), code_region_start(), code_region_end(),
|
kernel_map_region_end(), alias_code_region_start(), alias_code_region_end(), code_region_start(), code_region_end(),
|
||||||
max_heap_size(), mapped_physical_memory_size(), mapped_unsafe_physical_memory(), general_lock(), map_physical_memory_lock(),
|
max_heap_size(), mapped_physical_memory_size(), mapped_unsafe_physical_memory(), general_lock(), map_physical_memory_lock(),
|
||||||
impl(), memory_block_manager(), allocate_option(), address_space_width(), is_kernel(), enable_aslr(), memory_block_slab_manager(),
|
impl(), memory_block_manager(), allocate_option(), address_space_width(), is_kernel(), enable_aslr(), enable_device_address_space_merge(),
|
||||||
block_info_manager(), cached_physical_linear_region(), cached_physical_heap_region(), cached_virtual_heap_region(),
|
memory_block_slab_manager(), block_info_manager(), cached_physical_linear_region(), cached_physical_heap_region(), cached_virtual_heap_region(),
|
||||||
heap_fill_value(), ipc_fill_value(), stack_fill_value()
|
heap_fill_value(), ipc_fill_value(), stack_fill_value()
|
||||||
{
|
{
|
||||||
/* ... */
|
/* ... */
|
||||||
}
|
}
|
||||||
|
|
||||||
NOINLINE Result InitializeForKernel(bool is_64_bit, void *table, KVirtualAddress start, KVirtualAddress end);
|
NOINLINE Result InitializeForKernel(bool is_64_bit, void *table, KVirtualAddress start, KVirtualAddress end);
|
||||||
NOINLINE Result InitializeForProcess(ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool from_back, KMemoryManager::Pool pool, void *table, KProcessAddress start, KProcessAddress end, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager);
|
NOINLINE Result InitializeForProcess(ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_device_address_space_merge, bool from_back, KMemoryManager::Pool pool, void *table, KProcessAddress start, KProcessAddress end, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager);
|
||||||
|
|
||||||
void Finalize();
|
void Finalize();
|
||||||
|
|
||||||
|
@ -353,6 +354,10 @@ namespace ams::kern {
|
||||||
|
|
||||||
Result LockForDeviceAddressSpace(KPageGroup *out, KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned);
|
Result LockForDeviceAddressSpace(KPageGroup *out, KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned);
|
||||||
Result UnlockForDeviceAddressSpace(KProcessAddress address, size_t size);
|
Result UnlockForDeviceAddressSpace(KProcessAddress address, size_t size);
|
||||||
|
|
||||||
|
Result MakePageGroupForUnmapDeviceAddressSpace(KPageGroup *out, KProcessAddress address, size_t size);
|
||||||
|
Result UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size, size_t mapped_size);
|
||||||
|
|
||||||
Result LockForIpcUserBuffer(KPhysicalAddress *out, KProcessAddress address, size_t size);
|
Result LockForIpcUserBuffer(KPhysicalAddress *out, KProcessAddress address, size_t size);
|
||||||
Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size);
|
Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size);
|
||||||
|
|
||||||
|
|
|
@ -181,7 +181,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KPageTable::InitializeForProcess(u32 id, ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager, KPageTableManager *pt_manager) {
|
Result KPageTable::InitializeForProcess(u32 id, ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager, KPageTableManager *pt_manager) {
|
||||||
/* The input ID isn't actually used. */
|
/* The input ID isn't actually used. */
|
||||||
MESOSPHERE_UNUSED(id);
|
MESOSPHERE_UNUSED(id);
|
||||||
|
|
||||||
|
@ -202,7 +202,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
const size_t as_width = GetAddressSpaceWidth(as_type);
|
const size_t as_width = GetAddressSpaceWidth(as_type);
|
||||||
const KProcessAddress as_start = 0;
|
const KProcessAddress as_start = 0;
|
||||||
const KProcessAddress as_end = (1ul << as_width);
|
const KProcessAddress as_end = (1ul << as_width);
|
||||||
R_TRY(KPageTableBase::InitializeForProcess(as_type, enable_aslr, from_back, pool, GetVoidPointer(new_table), as_start, as_end, code_address, code_size, mem_block_slab_manager, block_info_manager));
|
R_TRY(KPageTableBase::InitializeForProcess(as_type, enable_aslr, enable_das_merge, from_back, pool, GetVoidPointer(new_table), as_start, as_end, code_address, code_size, mem_block_slab_manager, block_info_manager));
|
||||||
|
|
||||||
/* We succeeded! */
|
/* We succeeded! */
|
||||||
table_guard.Cancel();
|
table_guard.Cancel();
|
||||||
|
|
|
@ -79,25 +79,30 @@ namespace ams::kern {
|
||||||
ON_SCOPE_EXIT { pg.Close(); };
|
ON_SCOPE_EXIT { pg.Close(); };
|
||||||
|
|
||||||
/* Ensure that if we fail, we don't keep unmapped pages locked. */
|
/* Ensure that if we fail, we don't keep unmapped pages locked. */
|
||||||
ON_SCOPE_EXIT {
|
auto unlock_guard = SCOPE_GUARD { MESOSPHERE_R_ABORT_UNLESS(page_table->UnlockForDeviceAddressSpace(process_address, size)); };
|
||||||
if (*out_mapped_size != size) {
|
|
||||||
page_table->UnlockForDeviceAddressSpace(process_address + *out_mapped_size, size - *out_mapped_size);
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Map the pages. */
|
/* Map the pages. */
|
||||||
{
|
{
|
||||||
/* Clear the output size to zero on failure. */
|
/* Clear the output size to zero on failure. */
|
||||||
auto map_guard = SCOPE_GUARD { *out_mapped_size = 0; };
|
auto mapped_size_guard = SCOPE_GUARD { *out_mapped_size = 0; };
|
||||||
|
|
||||||
/* Perform the mapping. */
|
/* Perform the mapping. */
|
||||||
R_TRY(this->table.Map(out_mapped_size, pg, device_address, device_perm, refresh_mappings));
|
R_TRY(this->table.Map(out_mapped_size, pg, device_address, device_perm, refresh_mappings));
|
||||||
|
|
||||||
/* We succeeded, so cancel our guard. */
|
/* Ensure that we unmap the pages if we fail to update the protections. */
|
||||||
|
/* NOTE: Nintendo does not check the result of this unmap call. */
|
||||||
|
auto map_guard = SCOPE_GUARD { this->table.Unmap(device_address, *out_mapped_size); };
|
||||||
|
|
||||||
|
/* Update the protections in accordance with how much we mapped. */
|
||||||
|
R_TRY(page_table->UnlockForDeviceAddressSpacePartialMap(process_address, size, *out_mapped_size));
|
||||||
|
|
||||||
|
/* We succeeded, so cancel our guards. */
|
||||||
map_guard.Cancel();
|
map_guard.Cancel();
|
||||||
|
mapped_size_guard.Cancel();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* We succeeded, so we don't need to unlock our pages. */
|
||||||
|
unlock_guard.Cancel();
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -110,19 +115,23 @@ namespace ams::kern {
|
||||||
|
|
||||||
/* Make and open a page group for the unmapped region. */
|
/* Make and open a page group for the unmapped region. */
|
||||||
KPageGroup pg(page_table->GetBlockInfoManager());
|
KPageGroup pg(page_table->GetBlockInfoManager());
|
||||||
R_TRY(page_table->MakeAndOpenPageGroupContiguous(std::addressof(pg), process_address, size / PageSize,
|
R_TRY(page_table->MakePageGroupForUnmapDeviceAddressSpace(std::addressof(pg), process_address, size));
|
||||||
KMemoryState_FlagCanDeviceMap, KMemoryState_FlagCanDeviceMap,
|
|
||||||
KMemoryPermission_None, KMemoryPermission_None,
|
|
||||||
KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked, KMemoryAttribute_DeviceShared));
|
|
||||||
|
|
||||||
/* Ensure the page group is closed on scope exit. */
|
/* Ensure the page group is closed on scope exit. */
|
||||||
ON_SCOPE_EXIT { pg.Close(); };
|
ON_SCOPE_EXIT { pg.Close(); };
|
||||||
|
|
||||||
|
/* If we fail to unmap, we want to do a partial unlock. */
|
||||||
|
{
|
||||||
|
auto unlock_guard = SCOPE_GUARD { page_table->UnlockForDeviceAddressSpacePartialMap(process_address, size, size); };
|
||||||
|
|
||||||
/* Unmap. */
|
/* Unmap. */
|
||||||
R_TRY(this->table.Unmap(pg, device_address));
|
R_TRY(this->table.Unmap(pg, device_address));
|
||||||
|
|
||||||
|
unlock_guard.Cancel();
|
||||||
|
}
|
||||||
|
|
||||||
/* Unlock the pages. */
|
/* Unlock the pages. */
|
||||||
R_TRY(page_table->UnlockForDeviceAddressSpace(process_address, size));
|
MESOSPHERE_R_ABORT_UNLESS(page_table->UnlockForDeviceAddressSpace(process_address, size));
|
||||||
|
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
|
|
@ -140,6 +140,9 @@ namespace ams::kern {
|
||||||
out->flags |= ams::svc::CreateProcessFlag_AddressSpace32Bit;
|
out->flags |= ams::svc::CreateProcessFlag_AddressSpace32Bit;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* All initial processes should disable device address space merge. */
|
||||||
|
out->flags |= ams::svc::CreateProcessFlag_DisableDeviceAddressSpaceMerge;
|
||||||
|
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -287,7 +287,7 @@ namespace ams::kern {
|
||||||
this->CoalesceForUpdate(allocator, address, num_pages);
|
this->CoalesceForUpdate(allocator, address, num_pages);
|
||||||
}
|
}
|
||||||
|
|
||||||
void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, void (KMemoryBlock::*lock_func)(KMemoryPermission new_perm, bool left, bool right), KMemoryPermission perm) {
|
void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, MemoryBlockLockFunction lock_func, KMemoryPermission perm) {
|
||||||
/* Ensure for auditing that we never end up with an invalid tree. */
|
/* Ensure for auditing that we never end up with an invalid tree. */
|
||||||
KScopedMemoryBlockManagerAuditor auditor(this);
|
KScopedMemoryBlockManagerAuditor auditor(this);
|
||||||
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(address), PageSize));
|
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(address), PageSize));
|
||||||
|
|
|
@ -25,6 +25,7 @@ namespace ams::kern {
|
||||||
this->address_space_end = KProcessAddress(GetInteger(end));
|
this->address_space_end = KProcessAddress(GetInteger(end));
|
||||||
this->is_kernel = true;
|
this->is_kernel = true;
|
||||||
this->enable_aslr = true;
|
this->enable_aslr = true;
|
||||||
|
this->enable_device_address_space_merge = false;
|
||||||
|
|
||||||
this->heap_region_start = 0;
|
this->heap_region_start = 0;
|
||||||
this->heap_region_end = 0;
|
this->heap_region_end = 0;
|
||||||
|
@ -64,7 +65,7 @@ namespace ams::kern {
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KPageTableBase::InitializeForProcess(ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool from_back, KMemoryManager::Pool pool, void *table, KProcessAddress start, KProcessAddress end, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager) {
|
Result KPageTableBase::InitializeForProcess(ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, void *table, KProcessAddress start, KProcessAddress end, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager) {
|
||||||
/* Validate the region. */
|
/* Validate the region. */
|
||||||
MESOSPHERE_ABORT_UNLESS(start <= code_address);
|
MESOSPHERE_ABORT_UNLESS(start <= code_address);
|
||||||
MESOSPHERE_ABORT_UNLESS(code_address < code_address + code_size);
|
MESOSPHERE_ABORT_UNLESS(code_address < code_address + code_size);
|
||||||
|
@ -124,6 +125,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
/* Set other basic fields. */
|
/* Set other basic fields. */
|
||||||
this->enable_aslr = enable_aslr;
|
this->enable_aslr = enable_aslr;
|
||||||
|
this->enable_device_address_space_merge = enable_das_merge;
|
||||||
this->address_space_start = start;
|
this->address_space_start = start;
|
||||||
this->address_space_end = end;
|
this->address_space_end = end;
|
||||||
this->is_kernel = false;
|
this->is_kernel = false;
|
||||||
|
@ -2356,6 +2358,114 @@ namespace ams::kern {
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Result KPageTableBase::MakePageGroupForUnmapDeviceAddressSpace(KPageGroup *out, KProcessAddress address, size_t size) {
|
||||||
|
/* Lightly validate the range before doing anything else. */
|
||||||
|
const size_t num_pages = size / PageSize;
|
||||||
|
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
|
||||||
|
|
||||||
|
/* Lock the table. */
|
||||||
|
KScopedLightLock lk(this->general_lock);
|
||||||
|
|
||||||
|
/* Check the memory state. */
|
||||||
|
size_t num_allocator_blocks;
|
||||||
|
R_TRY(this->CheckMemoryStateContiguous(std::addressof(num_allocator_blocks),
|
||||||
|
address, size,
|
||||||
|
KMemoryState_FlagReferenceCounted | KMemoryState_FlagCanDeviceMap, KMemoryState_FlagReferenceCounted | KMemoryState_FlagCanDeviceMap,
|
||||||
|
KMemoryPermission_None, KMemoryPermission_None,
|
||||||
|
KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked, KMemoryAttribute_DeviceShared));
|
||||||
|
|
||||||
|
/* Create an update allocator. */
|
||||||
|
KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager);
|
||||||
|
R_TRY(allocator.Initialize(num_allocator_blocks));
|
||||||
|
|
||||||
|
/* Make the page group. */
|
||||||
|
R_TRY(this->MakePageGroup(*out, address, num_pages));
|
||||||
|
|
||||||
|
/* Update the memory blocks. */
|
||||||
|
const KMemoryBlockManager::MemoryBlockLockFunction lock_func = this->enable_device_address_space_merge ? &KMemoryBlock::UpdateDeviceDisableMergeStateForShare : &KMemoryBlock::UpdateDeviceDisableMergeStateForShareRight;
|
||||||
|
this->memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, lock_func, KMemoryPermission_None);
|
||||||
|
|
||||||
|
/* Open a reference to the pages in the page group. */
|
||||||
|
out->Open();
|
||||||
|
|
||||||
|
return ResultSuccess();
|
||||||
|
}
|
||||||
|
|
||||||
|
Result KPageTableBase::UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size, size_t mapped_size) {
|
||||||
|
/* Lightly validate the range before doing anything else. */
|
||||||
|
const size_t num_pages = size / PageSize;
|
||||||
|
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
|
||||||
|
|
||||||
|
/* Lock the table. */
|
||||||
|
KScopedLightLock lk(this->general_lock);
|
||||||
|
|
||||||
|
/* Determine useful extents. */
|
||||||
|
const KProcessAddress mapped_end_address = address + mapped_size;
|
||||||
|
const size_t unmapped_size = size - mapped_size;
|
||||||
|
|
||||||
|
/* Check memory state. */
|
||||||
|
size_t allocator_num_blocks = 0, unmapped_allocator_num_blocks = 0;
|
||||||
|
if (unmapped_size) {
|
||||||
|
if (this->enable_device_address_space_merge) {
|
||||||
|
R_TRY(this->CheckMemoryState(std::addressof(allocator_num_blocks),
|
||||||
|
address, size,
|
||||||
|
KMemoryState_FlagCanDeviceMap, KMemoryState_FlagCanDeviceMap,
|
||||||
|
KMemoryPermission_None, KMemoryPermission_None,
|
||||||
|
KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked, KMemoryAttribute_DeviceShared));
|
||||||
|
}
|
||||||
|
R_TRY(this->CheckMemoryState(std::addressof(unmapped_allocator_num_blocks),
|
||||||
|
mapped_end_address, unmapped_size,
|
||||||
|
KMemoryState_FlagCanDeviceMap, KMemoryState_FlagCanDeviceMap,
|
||||||
|
KMemoryPermission_None, KMemoryPermission_None,
|
||||||
|
KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked, KMemoryAttribute_DeviceShared));
|
||||||
|
} else {
|
||||||
|
R_TRY(this->CheckMemoryState(std::addressof(allocator_num_blocks),
|
||||||
|
address, size,
|
||||||
|
KMemoryState_FlagCanDeviceMap, KMemoryState_FlagCanDeviceMap,
|
||||||
|
KMemoryPermission_None, KMemoryPermission_None,
|
||||||
|
KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked, KMemoryAttribute_DeviceShared));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Create an update allocator for the region. */
|
||||||
|
KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager);
|
||||||
|
R_TRY(allocator.Initialize(allocator_num_blocks));
|
||||||
|
|
||||||
|
/* Create an update allocator for the unmapped region. */
|
||||||
|
KMemoryBlockManagerUpdateAllocator unmapped_allocator(this->memory_block_slab_manager);
|
||||||
|
R_TRY(unmapped_allocator.Initialize(unmapped_allocator_num_blocks));
|
||||||
|
|
||||||
|
/* Determine parameters for the update lock call. */
|
||||||
|
KMemoryBlockManagerUpdateAllocator *lock_allocator;
|
||||||
|
KProcessAddress lock_address;
|
||||||
|
size_t lock_num_pages;
|
||||||
|
KMemoryBlockManager::MemoryBlockLockFunction lock_func;
|
||||||
|
if (unmapped_size) {
|
||||||
|
/* If device address space merge is enabled, update tracking appropriately. */
|
||||||
|
if (this->enable_device_address_space_merge) {
|
||||||
|
this->memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshareLeft, KMemoryPermission_None);
|
||||||
|
}
|
||||||
|
|
||||||
|
lock_allocator = std::addressof(unmapped_allocator);
|
||||||
|
lock_address = mapped_end_address;
|
||||||
|
lock_num_pages = unmapped_size / PageSize;
|
||||||
|
lock_func = &KMemoryBlock::UnshareToDeviceRight;
|
||||||
|
} else {
|
||||||
|
lock_allocator = std::addressof(allocator);
|
||||||
|
lock_address = address;
|
||||||
|
lock_num_pages = num_pages;
|
||||||
|
if (this->enable_device_address_space_merge) {
|
||||||
|
lock_func = &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshare;
|
||||||
|
} else {
|
||||||
|
lock_func = &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshareRight;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Update the memory blocks. */
|
||||||
|
this->memory_block_manager.UpdateLock(lock_allocator, lock_address, lock_num_pages, lock_func, KMemoryPermission_None);
|
||||||
|
|
||||||
|
return ResultSuccess();
|
||||||
|
}
|
||||||
|
|
||||||
Result KPageTableBase::LockForIpcUserBuffer(KPhysicalAddress *out, KProcessAddress address, size_t size) {
|
Result KPageTableBase::LockForIpcUserBuffer(KPhysicalAddress *out, KProcessAddress address, size_t size) {
|
||||||
return this->LockMemoryAndOpen(nullptr, out, address, size,
|
return this->LockMemoryAndOpen(nullptr, out, address, size,
|
||||||
KMemoryState_FlagCanIpcUserBuffer, KMemoryState_FlagCanIpcUserBuffer,
|
KMemoryState_FlagCanIpcUserBuffer, KMemoryState_FlagCanIpcUserBuffer,
|
||||||
|
|
|
@ -238,12 +238,13 @@ namespace ams::kern {
|
||||||
/* This goes completely unused, but even so... */
|
/* This goes completely unused, but even so... */
|
||||||
{
|
{
|
||||||
const auto as_type = static_cast<ams::svc::CreateProcessFlag>(params.flags & ams::svc::CreateProcessFlag_AddressSpaceMask);
|
const auto as_type = static_cast<ams::svc::CreateProcessFlag>(params.flags & ams::svc::CreateProcessFlag_AddressSpaceMask);
|
||||||
const bool enable_aslr = (params.flags & ams::svc::CreateProcessFlag_EnableAslr);
|
const bool enable_aslr = (params.flags & ams::svc::CreateProcessFlag_EnableAslr) != 0;
|
||||||
const bool is_app = (params.flags & ams::svc::CreateProcessFlag_IsApplication);
|
const bool enable_das_merge = (params.flags & ams::svc::CreateProcessFlag_DisableDeviceAddressSpaceMerge) == 0;
|
||||||
|
const bool is_app = (params.flags & ams::svc::CreateProcessFlag_IsApplication) != 0;
|
||||||
auto *mem_block_manager = std::addressof(is_app ? Kernel::GetApplicationMemoryBlockManager() : Kernel::GetSystemMemoryBlockManager());
|
auto *mem_block_manager = std::addressof(is_app ? Kernel::GetApplicationMemoryBlockManager() : Kernel::GetSystemMemoryBlockManager());
|
||||||
auto *block_info_manager = std::addressof(Kernel::GetBlockInfoManager());
|
auto *block_info_manager = std::addressof(Kernel::GetBlockInfoManager());
|
||||||
auto *pt_manager = std::addressof(Kernel::GetPageTableManager());
|
auto *pt_manager = std::addressof(Kernel::GetPageTableManager());
|
||||||
R_TRY(this->page_table.Initialize(this->process_id, as_type, enable_aslr, !enable_aslr, pool, params.code_address, params.code_num_pages * PageSize, mem_block_manager, block_info_manager, pt_manager));
|
R_TRY(this->page_table.Initialize(this->process_id, as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address, params.code_num_pages * PageSize, mem_block_manager, block_info_manager, pt_manager));
|
||||||
}
|
}
|
||||||
auto pt_guard = SCOPE_GUARD { this->page_table.Finalize(); };
|
auto pt_guard = SCOPE_GUARD { this->page_table.Finalize(); };
|
||||||
|
|
||||||
|
@ -345,8 +346,9 @@ namespace ams::kern {
|
||||||
/* This goes completely unused, but even so... */
|
/* This goes completely unused, but even so... */
|
||||||
{
|
{
|
||||||
const auto as_type = static_cast<ams::svc::CreateProcessFlag>(params.flags & ams::svc::CreateProcessFlag_AddressSpaceMask);
|
const auto as_type = static_cast<ams::svc::CreateProcessFlag>(params.flags & ams::svc::CreateProcessFlag_AddressSpaceMask);
|
||||||
const bool enable_aslr = (params.flags & ams::svc::CreateProcessFlag_EnableAslr);
|
const bool enable_aslr = (params.flags & ams::svc::CreateProcessFlag_EnableAslr) != 0;
|
||||||
R_TRY(this->page_table.Initialize(this->process_id, as_type, enable_aslr, !enable_aslr, pool, params.code_address, code_size, mem_block_manager, block_info_manager, pt_manager));
|
const bool enable_das_merge = (params.flags & ams::svc::CreateProcessFlag_DisableDeviceAddressSpaceMerge) == 0;
|
||||||
|
R_TRY(this->page_table.Initialize(this->process_id, as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address, code_size, mem_block_manager, block_info_manager, pt_manager));
|
||||||
}
|
}
|
||||||
auto pt_guard = SCOPE_GUARD { this->page_table.Finalize(); };
|
auto pt_guard = SCOPE_GUARD { this->page_table.Finalize(); };
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue