2020-02-09 11:45:45 +00:00
|
|
|
/*
|
2021-10-04 19:59:10 +00:00
|
|
|
* Copyright (c) Atmosphère-NX
|
2020-02-09 11:45:45 +00:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms and conditions of the GNU General Public License,
|
|
|
|
* version 2, as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
|
|
* more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
#include <mesosphere.hpp>
|
|
|
|
#include <mesosphere/kern_select_page_table.hpp>
|
|
|
|
|
|
|
|
namespace ams::kern {
|
|
|
|
|
2021-04-07 16:57:32 +00:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
class KScopedLightLockPair {
|
|
|
|
NON_COPYABLE(KScopedLightLockPair);
|
|
|
|
NON_MOVEABLE(KScopedLightLockPair);
|
|
|
|
private:
|
|
|
|
KLightLock *m_lower;
|
|
|
|
KLightLock *m_upper;
|
|
|
|
public:
|
|
|
|
ALWAYS_INLINE KScopedLightLockPair(KLightLock &lhs, KLightLock &rhs) {
|
|
|
|
/* Ensure our locks are in a consistent order. */
|
|
|
|
if (std::addressof(lhs) <= std::addressof(rhs)) {
|
|
|
|
m_lower = std::addressof(lhs);
|
|
|
|
m_upper = std::addressof(rhs);
|
|
|
|
} else {
|
|
|
|
m_lower = std::addressof(rhs);
|
|
|
|
m_upper = std::addressof(lhs);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Acquire both locks. */
|
|
|
|
m_lower->Lock();
|
|
|
|
if (m_lower != m_upper) {
|
|
|
|
m_upper->Lock();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
~KScopedLightLockPair() {
|
|
|
|
/* Unlock the upper lock. */
|
|
|
|
if (m_upper != nullptr && m_upper != m_lower) {
|
|
|
|
m_upper->Unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Unlock the lower lock. */
|
|
|
|
if (m_lower != nullptr) {
|
|
|
|
m_lower->Unlock();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
public:
|
|
|
|
/* Utility. */
|
|
|
|
ALWAYS_INLINE void TryUnlockHalf(KLightLock &lock) {
|
|
|
|
/* Only allow unlocking if the lock is half the pair. */
|
|
|
|
if (m_lower != m_upper) {
|
|
|
|
/* We want to be sure the lock is one we own. */
|
|
|
|
if (m_lower == std::addressof(lock)) {
|
|
|
|
lock.Unlock();
|
|
|
|
m_lower = nullptr;
|
|
|
|
} else if (m_upper == std::addressof(lock)) {
|
|
|
|
lock.Unlock();
|
|
|
|
m_upper = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2023-04-30 23:50:53 +00:00
|
|
|
void KPageTableBase::MemoryRange::Open() {
|
|
|
|
/* If the range contains heap pages, open them. */
|
|
|
|
if (this->IsHeap()) {
|
|
|
|
Kernel::GetMemoryManager().Open(this->GetAddress(), this->GetSize() / PageSize);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-08 00:07:01 +00:00
|
|
|
void KPageTableBase::MemoryRange::Close() {
|
2023-04-30 23:50:53 +00:00
|
|
|
/* If the range contains heap pages, close them. */
|
|
|
|
if (this->IsHeap()) {
|
|
|
|
Kernel::GetMemoryManager().Close(this->GetAddress(), this->GetSize() / PageSize);
|
|
|
|
}
|
2021-04-08 00:07:01 +00:00
|
|
|
}
|
|
|
|
|
2020-02-09 11:45:45 +00:00
|
|
|
Result KPageTableBase::InitializeForKernel(bool is_64_bit, void *table, KVirtualAddress start, KVirtualAddress end) {
|
|
|
|
/* Initialize our members. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_address_space_width = (is_64_bit) ? BITSIZEOF(u64) : BITSIZEOF(u32);
|
|
|
|
m_address_space_start = KProcessAddress(GetInteger(start));
|
|
|
|
m_address_space_end = KProcessAddress(GetInteger(end));
|
|
|
|
m_is_kernel = true;
|
|
|
|
m_enable_aslr = true;
|
|
|
|
m_enable_device_address_space_merge = false;
|
|
|
|
|
|
|
|
m_heap_region_start = 0;
|
|
|
|
m_heap_region_end = 0;
|
|
|
|
m_current_heap_end = 0;
|
|
|
|
m_alias_region_start = 0;
|
|
|
|
m_alias_region_end = 0;
|
|
|
|
m_stack_region_start = 0;
|
|
|
|
m_stack_region_end = 0;
|
|
|
|
m_kernel_map_region_start = 0;
|
|
|
|
m_kernel_map_region_end = 0;
|
|
|
|
m_alias_code_region_start = 0;
|
|
|
|
m_alias_code_region_end = 0;
|
|
|
|
m_code_region_start = 0;
|
|
|
|
m_code_region_end = 0;
|
|
|
|
m_max_heap_size = 0;
|
|
|
|
m_mapped_physical_memory_size = 0;
|
|
|
|
m_mapped_unsafe_physical_memory = 0;
|
2022-10-12 06:44:22 +00:00
|
|
|
m_mapped_insecure_memory = 0;
|
2021-04-07 16:48:25 +00:00
|
|
|
m_mapped_ipc_server_memory = 0;
|
2020-12-18 01:18:47 +00:00
|
|
|
|
2022-10-12 04:32:56 +00:00
|
|
|
m_memory_block_slab_manager = Kernel::GetSystemSystemResource().GetMemoryBlockSlabManagerPointer();
|
|
|
|
m_block_info_manager = Kernel::GetSystemSystemResource().GetBlockInfoManagerPointer();
|
2021-04-07 16:48:25 +00:00
|
|
|
m_resource_limit = std::addressof(Kernel::GetSystemResourceLimit());
|
2020-12-18 01:18:47 +00:00
|
|
|
|
|
|
|
m_allocate_option = KMemoryManager::EncodeOption(KMemoryManager::Pool_System, KMemoryManager::Direction_FromFront);
|
|
|
|
m_heap_fill_value = MemoryFillValue_Zero;
|
|
|
|
m_ipc_fill_value = MemoryFillValue_Zero;
|
|
|
|
m_stack_fill_value = MemoryFillValue_Zero;
|
|
|
|
|
|
|
|
m_cached_physical_linear_region = nullptr;
|
|
|
|
m_cached_physical_heap_region = nullptr;
|
2020-02-10 02:10:13 +00:00
|
|
|
|
|
|
|
/* Initialize our implementation. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_impl.InitializeForKernel(table, start, end);
|
2020-02-09 11:45:45 +00:00
|
|
|
|
|
|
|
/* Initialize our memory block manager. */
|
2022-02-14 22:45:32 +00:00
|
|
|
R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end, m_memory_block_slab_manager));
|
2020-02-09 11:45:45 +00:00
|
|
|
}
|
|
|
|
|
2022-10-12 04:32:56 +00:00
|
|
|
Result KPageTableBase::InitializeForProcess(ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, void *table, KProcessAddress start, KProcessAddress end, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit) {
|
2020-02-19 09:22:27 +00:00
|
|
|
/* Validate the region. */
|
|
|
|
MESOSPHERE_ABORT_UNLESS(start <= code_address);
|
|
|
|
MESOSPHERE_ABORT_UNLESS(code_address < code_address + code_size);
|
|
|
|
MESOSPHERE_ABORT_UNLESS(code_address + code_size - 1 <= end - 1);
|
|
|
|
|
|
|
|
/* Define helpers. */
|
|
|
|
auto GetSpaceStart = [&](KAddressSpaceInfo::Type type) ALWAYS_INLINE_LAMBDA {
|
2020-12-18 01:18:47 +00:00
|
|
|
return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type);
|
2020-02-19 09:22:27 +00:00
|
|
|
};
|
|
|
|
auto GetSpaceSize = [&](KAddressSpaceInfo::Type type) ALWAYS_INLINE_LAMBDA {
|
2020-12-18 01:18:47 +00:00
|
|
|
return KAddressSpaceInfo::GetAddressSpaceSize(m_address_space_width, type);
|
2020-02-19 09:22:27 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Set our width and heap/alias sizes. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_address_space_width = GetAddressSpaceWidth(as_type);
|
2020-02-19 09:22:27 +00:00
|
|
|
size_t alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type_Alias);
|
|
|
|
size_t heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type_Heap);
|
|
|
|
|
|
|
|
/* Adjust heap/alias size if we don't have an alias region. */
|
|
|
|
if ((as_type & ams::svc::CreateProcessFlag_AddressSpaceMask) == ams::svc::CreateProcessFlag_AddressSpace32BitWithoutAlias) {
|
|
|
|
heap_region_size += alias_region_size;
|
|
|
|
alias_region_size = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set code regions and determine remaining sizes. */
|
|
|
|
KProcessAddress process_code_start;
|
|
|
|
KProcessAddress process_code_end;
|
|
|
|
size_t stack_region_size;
|
|
|
|
size_t kernel_map_region_size;
|
2020-12-18 01:18:47 +00:00
|
|
|
if (m_address_space_width == 39) {
|
2020-02-19 09:22:27 +00:00
|
|
|
alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type_Alias);
|
|
|
|
heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type_Heap);
|
|
|
|
stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type_Stack);
|
2020-07-22 01:54:08 +00:00
|
|
|
kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type_MapSmall);
|
2021-09-18 20:26:21 +00:00
|
|
|
m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type_Map39Bit);
|
|
|
|
m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type_Map39Bit);
|
|
|
|
m_alias_code_region_start = m_code_region_start;
|
|
|
|
m_alias_code_region_end = m_code_region_end;
|
2020-02-19 09:22:27 +00:00
|
|
|
process_code_start = util::AlignDown(GetInteger(code_address), RegionAlignment);
|
|
|
|
process_code_end = util::AlignUp(GetInteger(code_address) + code_size, RegionAlignment);
|
|
|
|
} else {
|
|
|
|
stack_region_size = 0;
|
|
|
|
kernel_map_region_size = 0;
|
2021-09-18 20:26:21 +00:00
|
|
|
m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type_MapSmall);
|
|
|
|
m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type_MapSmall);
|
|
|
|
m_stack_region_start = m_code_region_start;
|
|
|
|
m_alias_code_region_start = m_code_region_start;
|
|
|
|
m_alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type_MapLarge) + GetSpaceSize(KAddressSpaceInfo::Type_MapLarge);
|
|
|
|
m_stack_region_end = m_code_region_end;
|
|
|
|
m_kernel_map_region_start = m_code_region_start;
|
|
|
|
m_kernel_map_region_end = m_code_region_end;
|
2020-12-18 01:18:47 +00:00
|
|
|
process_code_start = m_code_region_start;
|
|
|
|
process_code_end = m_code_region_end;
|
2020-02-19 09:22:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Set other basic fields. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_enable_aslr = enable_aslr;
|
|
|
|
m_enable_device_address_space_merge = enable_das_merge;
|
|
|
|
m_address_space_start = start;
|
|
|
|
m_address_space_end = end;
|
|
|
|
m_is_kernel = false;
|
2022-10-12 04:32:56 +00:00
|
|
|
m_memory_block_slab_manager = system_resource->GetMemoryBlockSlabManagerPointer();
|
|
|
|
m_block_info_manager = system_resource->GetBlockInfoManagerPointer();
|
2021-04-07 16:48:25 +00:00
|
|
|
m_resource_limit = resource_limit;
|
2020-02-19 09:22:27 +00:00
|
|
|
|
|
|
|
/* Determine the region we can place our undetermineds in. */
|
|
|
|
KProcessAddress alloc_start;
|
|
|
|
size_t alloc_size;
|
2020-12-18 01:18:47 +00:00
|
|
|
if ((GetInteger(process_code_start) - GetInteger(m_code_region_start)) >= (GetInteger(end) - GetInteger(process_code_end))) {
|
|
|
|
alloc_start = m_code_region_start;
|
|
|
|
alloc_size = GetInteger(process_code_start) - GetInteger(m_code_region_start);
|
2020-02-19 09:22:27 +00:00
|
|
|
} else {
|
|
|
|
alloc_start = process_code_end;
|
|
|
|
alloc_size = GetInteger(end) - GetInteger(process_code_end);
|
|
|
|
}
|
|
|
|
const size_t needed_size = (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size);
|
|
|
|
R_UNLESS(alloc_size >= needed_size, svc::ResultOutOfMemory());
|
|
|
|
|
|
|
|
const size_t remaining_size = alloc_size - needed_size;
|
|
|
|
|
|
|
|
/* Determine random placements for each region. */
|
|
|
|
size_t alias_rnd = 0, heap_rnd = 0, stack_rnd = 0, kmap_rnd = 0;
|
|
|
|
if (enable_aslr) {
|
|
|
|
alias_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * RegionAlignment;
|
|
|
|
heap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * RegionAlignment;
|
|
|
|
stack_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * RegionAlignment;
|
|
|
|
kmap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * RegionAlignment;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Setup heap and alias regions. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_alias_region_start = alloc_start + alias_rnd;
|
|
|
|
m_alias_region_end = m_alias_region_start + alias_region_size;
|
|
|
|
m_heap_region_start = alloc_start + heap_rnd;
|
|
|
|
m_heap_region_end = m_heap_region_start + heap_region_size;
|
2020-02-19 09:22:27 +00:00
|
|
|
|
|
|
|
if (alias_rnd <= heap_rnd) {
|
2020-12-18 01:18:47 +00:00
|
|
|
m_heap_region_start += alias_region_size;
|
|
|
|
m_heap_region_end += alias_region_size;
|
2020-02-19 09:22:27 +00:00
|
|
|
} else {
|
2020-12-18 01:18:47 +00:00
|
|
|
m_alias_region_start += heap_region_size;
|
|
|
|
m_alias_region_end += heap_region_size;
|
2020-02-19 09:22:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Setup stack region. */
|
|
|
|
if (stack_region_size) {
|
2020-12-18 01:18:47 +00:00
|
|
|
m_stack_region_start = alloc_start + stack_rnd;
|
|
|
|
m_stack_region_end = m_stack_region_start + stack_region_size;
|
2020-02-19 09:22:27 +00:00
|
|
|
|
|
|
|
if (alias_rnd < stack_rnd) {
|
2020-12-18 01:18:47 +00:00
|
|
|
m_stack_region_start += alias_region_size;
|
|
|
|
m_stack_region_end += alias_region_size;
|
2020-02-19 09:22:27 +00:00
|
|
|
} else {
|
2020-12-18 01:18:47 +00:00
|
|
|
m_alias_region_start += stack_region_size;
|
|
|
|
m_alias_region_end += stack_region_size;
|
2020-02-19 09:22:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (heap_rnd < stack_rnd) {
|
2020-12-18 01:18:47 +00:00
|
|
|
m_stack_region_start += heap_region_size;
|
|
|
|
m_stack_region_end += heap_region_size;
|
2020-02-19 09:22:27 +00:00
|
|
|
} else {
|
2020-12-18 01:18:47 +00:00
|
|
|
m_heap_region_start += stack_region_size;
|
|
|
|
m_heap_region_end += stack_region_size;
|
2020-02-19 09:22:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Setup kernel map region. */
|
|
|
|
if (kernel_map_region_size) {
|
2020-12-18 01:18:47 +00:00
|
|
|
m_kernel_map_region_start = alloc_start + kmap_rnd;
|
|
|
|
m_kernel_map_region_end = m_kernel_map_region_start + kernel_map_region_size;
|
2020-02-19 09:22:27 +00:00
|
|
|
|
|
|
|
if (alias_rnd < kmap_rnd) {
|
2020-12-18 01:18:47 +00:00
|
|
|
m_kernel_map_region_start += alias_region_size;
|
|
|
|
m_kernel_map_region_end += alias_region_size;
|
2020-02-19 09:22:27 +00:00
|
|
|
} else {
|
2020-12-18 01:18:47 +00:00
|
|
|
m_alias_region_start += kernel_map_region_size;
|
|
|
|
m_alias_region_end += kernel_map_region_size;
|
2020-02-19 09:22:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (heap_rnd < kmap_rnd) {
|
2020-12-18 01:18:47 +00:00
|
|
|
m_kernel_map_region_start += heap_region_size;
|
|
|
|
m_kernel_map_region_end += heap_region_size;
|
2020-02-19 09:22:27 +00:00
|
|
|
} else {
|
2020-12-18 01:18:47 +00:00
|
|
|
m_heap_region_start += kernel_map_region_size;
|
|
|
|
m_heap_region_end += kernel_map_region_size;
|
2020-02-19 09:22:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (stack_region_size) {
|
|
|
|
if (stack_rnd < kmap_rnd) {
|
2020-12-18 01:18:47 +00:00
|
|
|
m_kernel_map_region_start += stack_region_size;
|
|
|
|
m_kernel_map_region_end += stack_region_size;
|
2020-02-19 09:22:27 +00:00
|
|
|
} else {
|
2020-12-18 01:18:47 +00:00
|
|
|
m_stack_region_start += kernel_map_region_size;
|
|
|
|
m_stack_region_end += kernel_map_region_size;
|
2020-02-19 09:22:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set heap and fill members. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_current_heap_end = m_heap_region_start;
|
|
|
|
m_max_heap_size = 0;
|
2021-04-07 16:48:25 +00:00
|
|
|
m_mapped_physical_memory_size = 0;
|
2020-12-18 01:18:47 +00:00
|
|
|
m_mapped_unsafe_physical_memory = 0;
|
2022-10-12 06:44:22 +00:00
|
|
|
m_mapped_insecure_memory = 0;
|
2021-04-07 16:48:25 +00:00
|
|
|
m_mapped_ipc_server_memory = 0;
|
2020-02-19 09:22:27 +00:00
|
|
|
|
|
|
|
const bool fill_memory = KTargetSystem::IsDebugMemoryFillEnabled();
|
2020-12-18 01:18:47 +00:00
|
|
|
m_heap_fill_value = fill_memory ? MemoryFillValue_Heap : MemoryFillValue_Zero;
|
|
|
|
m_ipc_fill_value = fill_memory ? MemoryFillValue_Ipc : MemoryFillValue_Zero;
|
|
|
|
m_stack_fill_value = fill_memory ? MemoryFillValue_Stack : MemoryFillValue_Zero;
|
2020-02-19 09:22:27 +00:00
|
|
|
|
|
|
|
/* Set allocation option. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_allocate_option = KMemoryManager::EncodeOption(pool, from_back ? KMemoryManager::Direction_FromBack : KMemoryManager::Direction_FromFront);
|
2020-02-19 09:22:27 +00:00
|
|
|
|
|
|
|
/* Ensure that we regions inside our address space. */
|
2020-12-18 01:18:47 +00:00
|
|
|
auto IsInAddressSpace = [&](KProcessAddress addr) ALWAYS_INLINE_LAMBDA { return m_address_space_start <= addr && addr <= m_address_space_end; };
|
|
|
|
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_alias_region_start));
|
|
|
|
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_alias_region_end));
|
|
|
|
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_heap_region_start));
|
|
|
|
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_heap_region_end));
|
|
|
|
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_stack_region_start));
|
|
|
|
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_stack_region_end));
|
|
|
|
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_kernel_map_region_start));
|
|
|
|
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_kernel_map_region_end));
|
2020-02-19 09:22:27 +00:00
|
|
|
|
|
|
|
/* Ensure that we selected regions that don't overlap. */
|
2020-12-18 01:18:47 +00:00
|
|
|
const KProcessAddress alias_start = m_alias_region_start;
|
|
|
|
const KProcessAddress alias_last = m_alias_region_end - 1;
|
|
|
|
const KProcessAddress heap_start = m_heap_region_start;
|
|
|
|
const KProcessAddress heap_last = m_heap_region_end - 1;
|
|
|
|
const KProcessAddress stack_start = m_stack_region_start;
|
|
|
|
const KProcessAddress stack_last = m_stack_region_end - 1;
|
|
|
|
const KProcessAddress kmap_start = m_kernel_map_region_start;
|
|
|
|
const KProcessAddress kmap_last = m_kernel_map_region_end - 1;
|
2020-02-19 09:22:27 +00:00
|
|
|
MESOSPHERE_ABORT_UNLESS(alias_last < heap_start || heap_last < alias_start);
|
|
|
|
MESOSPHERE_ABORT_UNLESS(alias_last < stack_start || stack_last < alias_start);
|
|
|
|
MESOSPHERE_ABORT_UNLESS(alias_last < kmap_start || kmap_last < alias_start);
|
|
|
|
MESOSPHERE_ABORT_UNLESS(heap_last < stack_start || stack_last < heap_start);
|
|
|
|
MESOSPHERE_ABORT_UNLESS(heap_last < kmap_start || kmap_last < heap_start);
|
|
|
|
|
|
|
|
/* Initialize our implementation. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_impl.InitializeForProcess(table, GetInteger(start), GetInteger(end));
|
2020-02-19 09:22:27 +00:00
|
|
|
|
|
|
|
/* Initialize our memory block manager. */
|
2022-02-14 22:45:32 +00:00
|
|
|
R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end, m_memory_block_slab_manager));
|
2020-02-19 09:22:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-02-09 11:45:45 +00:00
|
|
|
void KPageTableBase::Finalize() {
|
2020-07-23 06:52:29 +00:00
|
|
|
/* Finalize memory blocks. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_memory_block_manager.Finalize(m_memory_block_slab_manager);
|
2020-07-23 06:52:29 +00:00
|
|
|
|
|
|
|
/* Free any unsafe mapped memory. */
|
2020-12-18 01:18:47 +00:00
|
|
|
if (m_mapped_unsafe_physical_memory) {
|
|
|
|
Kernel::GetUnsafeMemory().Release(m_mapped_unsafe_physical_memory);
|
2020-07-23 06:52:29 +00:00
|
|
|
}
|
|
|
|
|
2022-10-12 06:44:22 +00:00
|
|
|
/* Release any insecure mapped memory. */
|
|
|
|
if (m_mapped_insecure_memory) {
|
|
|
|
if (auto * const insecure_resource_limit = KSystemControl::GetInsecureMemoryResourceLimit(); insecure_resource_limit != nullptr) {
|
|
|
|
insecure_resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, m_mapped_insecure_memory);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-07 16:48:25 +00:00
|
|
|
/* Release any ipc server memory. */
|
|
|
|
if (m_mapped_ipc_server_memory) {
|
|
|
|
m_resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, m_mapped_ipc_server_memory);
|
|
|
|
}
|
|
|
|
|
2020-07-23 06:52:29 +00:00
|
|
|
/* Invalidate the entire instruction cache. */
|
|
|
|
cpu::InvalidateEntireInstructionCache();
|
2020-02-09 11:45:45 +00:00
|
|
|
}
|
2020-02-14 01:38:56 +00:00
|
|
|
|
2023-10-11 12:00:23 +00:00
|
|
|
KProcessAddress KPageTableBase::GetRegionAddress(ams::svc::MemoryState state) const {
|
2020-02-14 01:38:56 +00:00
|
|
|
switch (state) {
|
2023-10-11 12:00:23 +00:00
|
|
|
case ams::svc::MemoryState_Free:
|
|
|
|
case ams::svc::MemoryState_Kernel:
|
2020-12-18 01:18:47 +00:00
|
|
|
return m_address_space_start;
|
2023-10-11 12:00:23 +00:00
|
|
|
case ams::svc::MemoryState_Normal:
|
2020-12-18 01:18:47 +00:00
|
|
|
return m_heap_region_start;
|
2023-10-11 12:00:23 +00:00
|
|
|
case ams::svc::MemoryState_Ipc:
|
|
|
|
case ams::svc::MemoryState_NonSecureIpc:
|
|
|
|
case ams::svc::MemoryState_NonDeviceIpc:
|
2020-12-18 01:18:47 +00:00
|
|
|
return m_alias_region_start;
|
2023-10-11 12:00:23 +00:00
|
|
|
case ams::svc::MemoryState_Stack:
|
2020-12-18 01:18:47 +00:00
|
|
|
return m_stack_region_start;
|
2023-10-11 12:00:23 +00:00
|
|
|
case ams::svc::MemoryState_Static:
|
|
|
|
case ams::svc::MemoryState_ThreadLocal:
|
2020-12-18 01:18:47 +00:00
|
|
|
return m_kernel_map_region_start;
|
2023-10-11 12:00:23 +00:00
|
|
|
case ams::svc::MemoryState_Io:
|
|
|
|
case ams::svc::MemoryState_Shared:
|
|
|
|
case ams::svc::MemoryState_AliasCode:
|
|
|
|
case ams::svc::MemoryState_AliasCodeData:
|
|
|
|
case ams::svc::MemoryState_Transfered:
|
|
|
|
case ams::svc::MemoryState_SharedTransfered:
|
|
|
|
case ams::svc::MemoryState_SharedCode:
|
|
|
|
case ams::svc::MemoryState_GeneratedCode:
|
|
|
|
case ams::svc::MemoryState_CodeOut:
|
|
|
|
case ams::svc::MemoryState_Coverage:
|
|
|
|
case ams::svc::MemoryState_Insecure:
|
2020-12-18 01:18:47 +00:00
|
|
|
return m_alias_code_region_start;
|
2023-10-11 12:00:23 +00:00
|
|
|
case ams::svc::MemoryState_Code:
|
|
|
|
case ams::svc::MemoryState_CodeData:
|
2020-12-18 01:18:47 +00:00
|
|
|
return m_code_region_start;
|
2020-02-14 01:38:56 +00:00
|
|
|
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-10-11 12:00:23 +00:00
|
|
|
size_t KPageTableBase::GetRegionSize(ams::svc::MemoryState state) const {
|
2020-02-14 01:38:56 +00:00
|
|
|
switch (state) {
|
2023-10-11 12:00:23 +00:00
|
|
|
case ams::svc::MemoryState_Free:
|
|
|
|
case ams::svc::MemoryState_Kernel:
|
2020-12-18 01:18:47 +00:00
|
|
|
return m_address_space_end - m_address_space_start;
|
2023-10-11 12:00:23 +00:00
|
|
|
case ams::svc::MemoryState_Normal:
|
2020-12-18 01:18:47 +00:00
|
|
|
return m_heap_region_end - m_heap_region_start;
|
2023-10-11 12:00:23 +00:00
|
|
|
case ams::svc::MemoryState_Ipc:
|
|
|
|
case ams::svc::MemoryState_NonSecureIpc:
|
|
|
|
case ams::svc::MemoryState_NonDeviceIpc:
|
2020-12-18 01:18:47 +00:00
|
|
|
return m_alias_region_end - m_alias_region_start;
|
2023-10-11 12:00:23 +00:00
|
|
|
case ams::svc::MemoryState_Stack:
|
2020-12-18 01:18:47 +00:00
|
|
|
return m_stack_region_end - m_stack_region_start;
|
2023-10-11 12:00:23 +00:00
|
|
|
case ams::svc::MemoryState_Static:
|
|
|
|
case ams::svc::MemoryState_ThreadLocal:
|
2020-12-18 01:18:47 +00:00
|
|
|
return m_kernel_map_region_end - m_kernel_map_region_start;
|
2023-10-11 12:00:23 +00:00
|
|
|
case ams::svc::MemoryState_Io:
|
|
|
|
case ams::svc::MemoryState_Shared:
|
|
|
|
case ams::svc::MemoryState_AliasCode:
|
|
|
|
case ams::svc::MemoryState_AliasCodeData:
|
|
|
|
case ams::svc::MemoryState_Transfered:
|
|
|
|
case ams::svc::MemoryState_SharedTransfered:
|
|
|
|
case ams::svc::MemoryState_SharedCode:
|
|
|
|
case ams::svc::MemoryState_GeneratedCode:
|
|
|
|
case ams::svc::MemoryState_CodeOut:
|
|
|
|
case ams::svc::MemoryState_Coverage:
|
|
|
|
case ams::svc::MemoryState_Insecure:
|
2020-12-18 01:18:47 +00:00
|
|
|
return m_alias_code_region_end - m_alias_code_region_start;
|
2023-10-11 12:00:23 +00:00
|
|
|
case ams::svc::MemoryState_Code:
|
|
|
|
case ams::svc::MemoryState_CodeData:
|
2020-12-18 01:18:47 +00:00
|
|
|
return m_code_region_end - m_code_region_start;
|
2020-02-14 01:38:56 +00:00
|
|
|
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-10-11 12:00:23 +00:00
|
|
|
bool KPageTableBase::CanContain(KProcessAddress addr, size_t size, ams::svc::MemoryState state) const {
|
2020-02-14 01:38:56 +00:00
|
|
|
const KProcessAddress end = addr + size;
|
|
|
|
const KProcessAddress last = end - 1;
|
|
|
|
|
|
|
|
const KProcessAddress region_start = this->GetRegionAddress(state);
|
|
|
|
const size_t region_size = this->GetRegionSize(state);
|
|
|
|
|
|
|
|
const bool is_in_region = region_start <= addr && addr < end && last <= region_start + region_size - 1;
|
2021-04-07 16:59:22 +00:00
|
|
|
const bool is_in_heap = !(end <= m_heap_region_start || m_heap_region_end <= addr || m_heap_region_start == m_heap_region_end);
|
|
|
|
const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr || m_alias_region_start == m_alias_region_end);
|
2020-02-14 01:38:56 +00:00
|
|
|
switch (state) {
|
2023-10-11 12:00:23 +00:00
|
|
|
case ams::svc::MemoryState_Free:
|
|
|
|
case ams::svc::MemoryState_Kernel:
|
2020-02-14 01:38:56 +00:00
|
|
|
return is_in_region;
|
2023-10-11 12:00:23 +00:00
|
|
|
case ams::svc::MemoryState_Io:
|
|
|
|
case ams::svc::MemoryState_Static:
|
|
|
|
case ams::svc::MemoryState_Code:
|
|
|
|
case ams::svc::MemoryState_CodeData:
|
|
|
|
case ams::svc::MemoryState_Shared:
|
|
|
|
case ams::svc::MemoryState_AliasCode:
|
|
|
|
case ams::svc::MemoryState_AliasCodeData:
|
|
|
|
case ams::svc::MemoryState_Stack:
|
|
|
|
case ams::svc::MemoryState_ThreadLocal:
|
|
|
|
case ams::svc::MemoryState_Transfered:
|
|
|
|
case ams::svc::MemoryState_SharedTransfered:
|
|
|
|
case ams::svc::MemoryState_SharedCode:
|
|
|
|
case ams::svc::MemoryState_GeneratedCode:
|
|
|
|
case ams::svc::MemoryState_CodeOut:
|
|
|
|
case ams::svc::MemoryState_Coverage:
|
|
|
|
case ams::svc::MemoryState_Insecure:
|
2020-02-14 01:38:56 +00:00
|
|
|
return is_in_region && !is_in_heap && !is_in_alias;
|
2023-10-11 12:00:23 +00:00
|
|
|
case ams::svc::MemoryState_Normal:
|
2020-02-14 01:38:56 +00:00
|
|
|
MESOSPHERE_ASSERT(is_in_heap);
|
|
|
|
return is_in_region && !is_in_alias;
|
2023-10-11 12:00:23 +00:00
|
|
|
case ams::svc::MemoryState_Ipc:
|
|
|
|
case ams::svc::MemoryState_NonSecureIpc:
|
|
|
|
case ams::svc::MemoryState_NonDeviceIpc:
|
2020-02-14 01:38:56 +00:00
|
|
|
MESOSPHERE_ASSERT(is_in_alias);
|
|
|
|
return is_in_region && !is_in_heap;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Result KPageTableBase::CheckMemoryState(const KMemoryInfo &info, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const {
|
|
|
|
/* Validate the states match expectation. */
|
2021-10-05 22:16:54 +00:00
|
|
|
R_UNLESS((info.m_state & state_mask) == state, svc::ResultInvalidCurrentMemory());
|
|
|
|
R_UNLESS((info.m_permission & perm_mask) == perm, svc::ResultInvalidCurrentMemory());
|
|
|
|
R_UNLESS((info.m_attribute & attr_mask) == attr, svc::ResultInvalidCurrentMemory());
|
2020-02-14 01:38:56 +00:00
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-02-14 01:38:56 +00:00
|
|
|
}
|
|
|
|
|
2020-12-01 12:24:43 +00:00
|
|
|
Result KPageTableBase::CheckMemoryStateContiguous(size_t *out_blocks_needed, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const {
|
2020-07-11 03:09:06 +00:00
|
|
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
|
|
|
|
|
|
|
/* Get information about the first block. */
|
|
|
|
const KProcessAddress last_addr = addr + size - 1;
|
2020-12-18 01:18:47 +00:00
|
|
|
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
|
2020-07-11 03:09:06 +00:00
|
|
|
KMemoryInfo info = it->GetMemoryInfo();
|
|
|
|
|
2020-12-01 12:24:43 +00:00
|
|
|
/* If the start address isn't aligned, we need a block. */
|
|
|
|
const size_t blocks_for_start_align = (util::AlignDown(GetInteger(addr), PageSize) != info.GetAddress()) ? 1 : 0;
|
|
|
|
|
2020-07-11 03:09:06 +00:00
|
|
|
while (true) {
|
|
|
|
/* Validate against the provided masks. */
|
|
|
|
R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr));
|
|
|
|
|
|
|
|
/* Break once we're done. */
|
|
|
|
if (last_addr <= info.GetLastAddress()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Advance our iterator. */
|
|
|
|
it++;
|
2020-12-18 01:18:47 +00:00
|
|
|
MESOSPHERE_ASSERT(it != m_memory_block_manager.cend());
|
2020-07-11 03:09:06 +00:00
|
|
|
info = it->GetMemoryInfo();
|
|
|
|
}
|
|
|
|
|
2020-12-01 12:24:43 +00:00
|
|
|
/* If the end address isn't aligned, we need a block. */
|
|
|
|
const size_t blocks_for_end_align = (util::AlignUp(GetInteger(addr) + size, PageSize) != info.GetEndAddress()) ? 1 : 0;
|
|
|
|
|
|
|
|
if (out_blocks_needed != nullptr) {
|
|
|
|
*out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
|
|
|
|
}
|
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-11 03:09:06 +00:00
|
|
|
}
|
|
|
|
|
2023-10-11 12:11:50 +00:00
|
|
|
Result KPageTableBase::CheckMemoryState(KMemoryState *out_state, KMemoryPermission *out_perm, KMemoryAttribute *out_attr, size_t *out_blocks_needed, KMemoryBlockManager::const_iterator it, KProcessAddress last_addr, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr) const {
|
2020-02-14 01:38:56 +00:00
|
|
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
|
|
|
|
|
|
|
/* Get information about the first block. */
|
|
|
|
KMemoryInfo info = it->GetMemoryInfo();
|
|
|
|
|
|
|
|
/* Validate all blocks in the range have correct state. */
|
2020-12-18 01:18:47 +00:00
|
|
|
const KMemoryState first_state = info.m_state;
|
2021-10-05 22:16:54 +00:00
|
|
|
const KMemoryPermission first_perm = info.m_permission;
|
2020-12-18 01:18:47 +00:00
|
|
|
const KMemoryAttribute first_attr = info.m_attribute;
|
2020-02-14 01:38:56 +00:00
|
|
|
while (true) {
|
|
|
|
/* Validate the current block. */
|
2020-12-18 01:18:47 +00:00
|
|
|
R_UNLESS(info.m_state == first_state, svc::ResultInvalidCurrentMemory());
|
2021-10-05 22:16:54 +00:00
|
|
|
R_UNLESS(info.m_permission == first_perm, svc::ResultInvalidCurrentMemory());
|
2020-12-18 01:18:47 +00:00
|
|
|
R_UNLESS((info.m_attribute | ignore_attr) == (first_attr | ignore_attr), svc::ResultInvalidCurrentMemory());
|
2020-02-14 01:38:56 +00:00
|
|
|
|
|
|
|
/* Validate against the provided masks. */
|
|
|
|
R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr));
|
|
|
|
|
|
|
|
/* Break once we're done. */
|
|
|
|
if (last_addr <= info.GetLastAddress()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Advance our iterator. */
|
|
|
|
it++;
|
2020-12-18 01:18:47 +00:00
|
|
|
MESOSPHERE_ASSERT(it != m_memory_block_manager.cend());
|
2020-02-14 01:38:56 +00:00
|
|
|
info = it->GetMemoryInfo();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Write output state. */
|
2020-12-01 12:24:43 +00:00
|
|
|
if (out_state != nullptr) {
|
2020-02-14 01:38:56 +00:00
|
|
|
*out_state = first_state;
|
|
|
|
}
|
2020-12-01 12:24:43 +00:00
|
|
|
if (out_perm != nullptr) {
|
2020-02-14 01:38:56 +00:00
|
|
|
*out_perm = first_perm;
|
|
|
|
}
|
2020-12-01 12:24:43 +00:00
|
|
|
if (out_attr != nullptr) {
|
2020-02-14 01:38:56 +00:00
|
|
|
*out_attr = static_cast<KMemoryAttribute>(first_attr & ~ignore_attr);
|
|
|
|
}
|
2023-10-11 12:11:50 +00:00
|
|
|
|
|
|
|
/* If the end address isn't aligned, we need a block. */
|
2020-12-01 12:24:43 +00:00
|
|
|
if (out_blocks_needed != nullptr) {
|
2023-10-11 12:11:50 +00:00
|
|
|
const size_t blocks_for_end_align = (util::AlignDown(GetInteger(last_addr), PageSize) + PageSize != info.GetEndAddress()) ? 1 : 0;
|
|
|
|
*out_blocks_needed = blocks_for_end_align;
|
2020-12-01 12:24:43 +00:00
|
|
|
}
|
2023-10-11 12:11:50 +00:00
|
|
|
|
|
|
|
R_SUCCEED();
|
|
|
|
}
|
|
|
|
|
|
|
|
Result KPageTableBase::CheckMemoryState(KMemoryState *out_state, KMemoryPermission *out_perm, KMemoryAttribute *out_attr, size_t *out_blocks_needed, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr) const {
|
|
|
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
|
|
|
|
|
|
|
/* Check memory state. */
|
|
|
|
const KProcessAddress last_addr = addr + size - 1;
|
|
|
|
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
|
|
|
|
R_TRY(this->CheckMemoryState(out_state, out_perm, out_attr, out_blocks_needed, it, last_addr, state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr))
|
|
|
|
|
|
|
|
/* If the start address isn't aligned, we need a block. */
|
|
|
|
if (out_blocks_needed != nullptr && util::AlignDown(GetInteger(addr), PageSize) != it->GetAddress()) {
|
|
|
|
++(*out_blocks_needed);
|
|
|
|
}
|
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-02-14 01:38:56 +00:00
|
|
|
}
|
|
|
|
|
2020-07-10 04:30:29 +00:00
|
|
|
Result KPageTableBase::LockMemoryAndOpen(KPageGroup *out_pg, KPhysicalAddress *out_paddr, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, KMemoryPermission new_perm, u32 lock_attr) {
|
|
|
|
/* Validate basic preconditions. */
|
2020-07-10 22:31:23 +00:00
|
|
|
MESOSPHERE_ASSERT((lock_attr & attr) == 0);
|
|
|
|
MESOSPHERE_ASSERT((lock_attr & (KMemoryAttribute_IpcLocked | KMemoryAttribute_DeviceShared)) == 0);
|
2020-07-10 04:30:29 +00:00
|
|
|
|
|
|
|
/* Validate the lock request. */
|
|
|
|
const size_t num_pages = size / PageSize;
|
|
|
|
R_UNLESS(this->Contains(addr, size), svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-07-10 04:30:29 +00:00
|
|
|
|
|
|
|
/* Check that the output page group is empty, if it exists. */
|
|
|
|
if (out_pg) {
|
|
|
|
MESOSPHERE_ASSERT(out_pg->GetNumPages() == 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check the state. */
|
|
|
|
KMemoryState old_state;
|
|
|
|
KMemoryPermission old_perm;
|
|
|
|
KMemoryAttribute old_attr;
|
2020-12-01 12:24:43 +00:00
|
|
|
size_t num_allocator_blocks;
|
|
|
|
R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), std::addressof(old_attr), std::addressof(num_allocator_blocks), addr, size, state_mask | KMemoryState_FlagReferenceCounted, state | KMemoryState_FlagReferenceCounted, perm_mask, perm, attr_mask, attr));
|
2020-07-10 04:30:29 +00:00
|
|
|
|
|
|
|
/* Get the physical address, if we're supposed to. */
|
|
|
|
if (out_paddr != nullptr) {
|
2021-04-07 16:57:32 +00:00
|
|
|
MESOSPHERE_ABORT_UNLESS(this->GetPhysicalAddressLocked(out_paddr, addr));
|
2020-07-10 04:30:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Make the page group, if we're supposed to. */
|
|
|
|
if (out_pg != nullptr) {
|
|
|
|
R_TRY(this->MakePageGroup(*out_pg, addr, num_pages));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Create an update allocator. */
|
2021-04-07 15:46:06 +00:00
|
|
|
Result allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
|
|
|
|
R_TRY(allocator_result);
|
2020-07-10 04:30:29 +00:00
|
|
|
|
|
|
|
/* Decide on new perm and attr. */
|
|
|
|
new_perm = (new_perm != KMemoryPermission_None) ? new_perm : old_perm;
|
|
|
|
KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr | lock_attr);
|
|
|
|
|
|
|
|
/* Update permission, if we need to. */
|
|
|
|
if (new_perm != old_perm) {
|
|
|
|
/* We're going to perform an update, so create a helper. */
|
|
|
|
KScopedPageTableUpdater updater(this);
|
|
|
|
|
2020-12-01 11:33:46 +00:00
|
|
|
const KPageProperties properties = { new_perm, false, (old_attr & KMemoryAttribute_Uncached) != 0, DisableMergeAttribute_DisableHeadBodyTail };
|
2020-07-10 04:30:29 +00:00
|
|
|
R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, Null<KPhysicalAddress>, false, properties, OperationType_ChangePermissions, false));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Apply the memory block updates. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, new_attr, KMemoryBlockDisableMergeAttribute_Locked, KMemoryBlockDisableMergeAttribute_None);
|
2020-07-10 04:30:29 +00:00
|
|
|
|
|
|
|
/* If we have an output group, open. */
|
|
|
|
if (out_pg) {
|
|
|
|
out_pg->Open();
|
|
|
|
}
|
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-10 04:30:29 +00:00
|
|
|
}
|
|
|
|
|
2020-07-09 23:32:37 +00:00
|
|
|
Result KPageTableBase::UnlockMemory(KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, KMemoryPermission new_perm, u32 lock_attr, const KPageGroup *pg) {
|
|
|
|
/* Validate basic preconditions. */
|
|
|
|
MESOSPHERE_ASSERT((attr_mask & lock_attr) == lock_attr);
|
|
|
|
MESOSPHERE_ASSERT((attr & lock_attr) == lock_attr);
|
|
|
|
|
|
|
|
/* Validate the unlock request. */
|
|
|
|
const size_t num_pages = size / PageSize;
|
|
|
|
R_UNLESS(this->Contains(addr, size), svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-07-09 23:32:37 +00:00
|
|
|
|
|
|
|
/* Check the state. */
|
|
|
|
KMemoryState old_state;
|
|
|
|
KMemoryPermission old_perm;
|
|
|
|
KMemoryAttribute old_attr;
|
2020-12-01 12:24:43 +00:00
|
|
|
size_t num_allocator_blocks;
|
|
|
|
R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), std::addressof(old_attr), std::addressof(num_allocator_blocks), addr, size, state_mask | KMemoryState_FlagReferenceCounted, state | KMemoryState_FlagReferenceCounted, perm_mask, perm, attr_mask, attr));
|
2020-07-09 23:32:37 +00:00
|
|
|
|
|
|
|
/* Check the page group. */
|
|
|
|
if (pg != nullptr) {
|
|
|
|
R_UNLESS(this->IsValidPageGroup(*pg, addr, num_pages), svc::ResultInvalidMemoryRegion());
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Decide on new perm and attr. */
|
|
|
|
new_perm = (new_perm != KMemoryPermission_None) ? new_perm : old_perm;
|
|
|
|
KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr & ~lock_attr);
|
|
|
|
|
|
|
|
/* Create an update allocator. */
|
2021-04-07 15:46:06 +00:00
|
|
|
Result allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
|
|
|
|
R_TRY(allocator_result);
|
2020-07-09 23:32:37 +00:00
|
|
|
|
|
|
|
/* Update permission, if we need to. */
|
|
|
|
if (new_perm != old_perm) {
|
|
|
|
/* We're going to perform an update, so create a helper. */
|
|
|
|
KScopedPageTableUpdater updater(this);
|
|
|
|
|
2020-12-01 11:33:46 +00:00
|
|
|
const KPageProperties properties = { new_perm, false, (old_attr & KMemoryAttribute_Uncached) != 0, DisableMergeAttribute_EnableAndMergeHeadBodyTail };
|
2020-07-09 23:32:37 +00:00
|
|
|
R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, Null<KPhysicalAddress>, false, properties, OperationType_ChangePermissions, false));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Apply the memory block updates. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, new_attr, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Locked);
|
2020-07-09 23:32:37 +00:00
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-09 23:32:37 +00:00
|
|
|
}
|
|
|
|
|
2020-02-14 01:38:56 +00:00
|
|
|
Result KPageTableBase::QueryInfoImpl(KMemoryInfo *out_info, ams::svc::PageInfo *out_page, KProcessAddress address) const {
|
|
|
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
|
|
|
MESOSPHERE_ASSERT(out_info != nullptr);
|
|
|
|
MESOSPHERE_ASSERT(out_page != nullptr);
|
|
|
|
|
2020-12-18 01:18:47 +00:00
|
|
|
const KMemoryBlock *block = m_memory_block_manager.FindBlock(address);
|
2020-02-14 01:38:56 +00:00
|
|
|
R_UNLESS(block != nullptr, svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
*out_info = block->GetMemoryInfo();
|
|
|
|
out_page->flags = 0;
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-02-14 01:38:56 +00:00
|
|
|
}
|
|
|
|
|
2023-10-11 12:00:23 +00:00
|
|
|
Result KPageTableBase::QueryMappingImpl(KProcessAddress *out, KPhysicalAddress address, size_t size, ams::svc::MemoryState state) const {
|
2020-07-13 20:24:32 +00:00
|
|
|
MESOSPHERE_ASSERT(!this->IsLockedByCurrentThread());
|
|
|
|
MESOSPHERE_ASSERT(out != nullptr);
|
|
|
|
|
|
|
|
const KProcessAddress region_start = this->GetRegionAddress(state);
|
|
|
|
const size_t region_size = this->GetRegionSize(state);
|
|
|
|
|
|
|
|
/* Check that the address/size are potentially valid. */
|
|
|
|
R_UNLESS((address < address + size), svc::ResultNotFound());
|
|
|
|
|
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-07-13 20:24:32 +00:00
|
|
|
|
|
|
|
auto &impl = this->GetImpl();
|
|
|
|
|
|
|
|
/* Begin traversal. */
|
|
|
|
TraversalContext context;
|
2021-10-23 22:25:20 +00:00
|
|
|
TraversalEntry cur_entry = { .phys_addr = Null<KPhysicalAddress>, .block_size = 0, .sw_reserved_bits = 0 };
|
2020-07-13 20:24:32 +00:00
|
|
|
bool cur_valid = false;
|
|
|
|
TraversalEntry next_entry;
|
|
|
|
bool next_valid;
|
2020-07-15 05:39:43 +00:00
|
|
|
size_t tot_size = 0;
|
2020-07-13 20:24:32 +00:00
|
|
|
|
|
|
|
next_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), region_start);
|
2020-07-15 05:39:43 +00:00
|
|
|
next_entry.block_size = (next_entry.block_size - (GetInteger(region_start) & (next_entry.block_size - 1)));
|
2020-07-13 20:24:32 +00:00
|
|
|
|
|
|
|
/* Iterate, looking for entry. */
|
|
|
|
while (true) {
|
|
|
|
if ((!next_valid && !cur_valid) || (next_valid && cur_valid && next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) {
|
|
|
|
cur_entry.block_size += next_entry.block_size;
|
|
|
|
} else {
|
|
|
|
if (cur_valid && cur_entry.phys_addr <= address && address + size <= cur_entry.phys_addr + cur_entry.block_size) {
|
|
|
|
/* Check if this region is valid. */
|
|
|
|
const KProcessAddress mapped_address = (region_start + tot_size) + (address - cur_entry.phys_addr);
|
2023-10-11 12:00:23 +00:00
|
|
|
if (R_SUCCEEDED(this->CheckMemoryState(mapped_address, size, KMemoryState_Mask, static_cast<KMemoryState>(util::ToUnderlying(state)), KMemoryPermission_UserRead, KMemoryPermission_UserRead, KMemoryAttribute_None, KMemoryAttribute_None))) {
|
2020-07-13 20:24:32 +00:00
|
|
|
/* It is! */
|
|
|
|
*out = mapped_address;
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-13 20:24:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update tracking variables. */
|
|
|
|
tot_size += cur_entry.block_size;
|
|
|
|
cur_entry = next_entry;
|
|
|
|
cur_valid = next_valid;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cur_entry.block_size + tot_size >= region_size) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
next_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check the last entry. */
|
|
|
|
R_UNLESS(cur_valid, svc::ResultNotFound());
|
|
|
|
R_UNLESS(cur_entry.phys_addr <= address, svc::ResultNotFound());
|
|
|
|
R_UNLESS(address + size <= cur_entry.phys_addr + cur_entry.block_size, svc::ResultNotFound());
|
|
|
|
|
|
|
|
/* Check if the last region is valid. */
|
|
|
|
const KProcessAddress mapped_address = (region_start + tot_size) + (address - cur_entry.phys_addr);
|
|
|
|
R_TRY_CATCH(this->CheckMemoryState(mapped_address, size, KMemoryState_All, state, KMemoryPermission_UserRead, KMemoryPermission_UserRead, KMemoryAttribute_None, KMemoryAttribute_None)) {
|
|
|
|
R_CONVERT_ALL(svc::ResultNotFound());
|
|
|
|
} R_END_TRY_CATCH;
|
|
|
|
|
|
|
|
/* We found the region. */
|
|
|
|
*out = mapped_address;
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-13 20:24:32 +00:00
|
|
|
}
|
|
|
|
|
2020-07-10 01:12:04 +00:00
|
|
|
Result KPageTableBase::MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
|
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-07-10 01:12:04 +00:00
|
|
|
|
|
|
|
/* Validate that the source address's state is valid. */
|
|
|
|
KMemoryState src_state;
|
2020-12-01 12:24:43 +00:00
|
|
|
size_t num_src_allocator_blocks;
|
|
|
|
R_TRY(this->CheckMemoryState(std::addressof(src_state), nullptr, nullptr, std::addressof(num_src_allocator_blocks), src_address, size, KMemoryState_FlagCanAlias, KMemoryState_FlagCanAlias, KMemoryPermission_All, KMemoryPermission_UserReadWrite, KMemoryAttribute_All, KMemoryAttribute_None));
|
2020-07-10 01:12:04 +00:00
|
|
|
|
|
|
|
/* Validate that the dst address's state is valid. */
|
2020-12-01 12:24:43 +00:00
|
|
|
size_t num_dst_allocator_blocks;
|
|
|
|
R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None));
|
2020-07-10 01:12:04 +00:00
|
|
|
|
|
|
|
/* Create an update allocator for the source. */
|
2021-04-07 15:46:06 +00:00
|
|
|
Result src_allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), m_memory_block_slab_manager, num_src_allocator_blocks);
|
|
|
|
R_TRY(src_allocator_result);
|
2020-07-10 01:12:04 +00:00
|
|
|
|
|
|
|
/* Create an update allocator for the destination. */
|
2021-04-07 15:46:06 +00:00
|
|
|
Result dst_allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), m_memory_block_slab_manager, num_dst_allocator_blocks);
|
|
|
|
R_TRY(dst_allocator_result);
|
2020-07-10 01:12:04 +00:00
|
|
|
|
|
|
|
/* Map the memory. */
|
|
|
|
{
|
|
|
|
/* Determine the number of pages being operated on. */
|
|
|
|
const size_t num_pages = size / PageSize;
|
|
|
|
|
|
|
|
/* Create page groups for the memory being unmapped. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KPageGroup pg(m_block_info_manager);
|
2020-07-10 01:12:04 +00:00
|
|
|
|
|
|
|
/* Create the page group representing the source. */
|
|
|
|
R_TRY(this->MakePageGroup(pg, src_address, num_pages));
|
|
|
|
|
|
|
|
/* We're going to perform an update, so create a helper. */
|
|
|
|
KScopedPageTableUpdater updater(this);
|
|
|
|
|
|
|
|
/* Reprotect the source as kernel-read/not mapped. */
|
|
|
|
const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>(KMemoryPermission_KernelRead | KMemoryPermission_NotMapped);
|
2020-12-01 12:33:46 +00:00
|
|
|
const KMemoryAttribute new_src_attr = KMemoryAttribute_Locked;
|
2020-12-01 11:33:46 +00:00
|
|
|
const KPageProperties src_properties = { new_src_perm, false, false, DisableMergeAttribute_DisableHeadBodyTail };
|
2020-07-10 01:12:04 +00:00
|
|
|
R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, Null<KPhysicalAddress>, false, src_properties, OperationType_ChangePermissions, false));
|
|
|
|
|
|
|
|
/* Ensure that we unprotect the source pages on failure. */
|
2022-02-14 22:45:32 +00:00
|
|
|
ON_RESULT_FAILURE {
|
2020-12-01 11:33:46 +00:00
|
|
|
const KPageProperties unprotect_properties = { KMemoryPermission_UserReadWrite, false, false, DisableMergeAttribute_EnableHeadBodyTail };
|
2020-07-10 01:12:04 +00:00
|
|
|
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), src_address, num_pages, Null<KPhysicalAddress>, false, unprotect_properties, OperationType_ChangePermissions, true));
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Map the alias pages. */
|
2020-12-01 11:33:46 +00:00
|
|
|
const KPageProperties dst_map_properties = { KMemoryPermission_UserReadWrite, false, false, DisableMergeAttribute_DisableHead };
|
2020-07-10 01:12:04 +00:00
|
|
|
R_TRY(this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_map_properties, false));
|
|
|
|
|
|
|
|
/* Apply the memory block updates. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, new_src_perm, new_src_attr, KMemoryBlockDisableMergeAttribute_Locked, KMemoryBlockDisableMergeAttribute_None);
|
|
|
|
m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, KMemoryState_Stack, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
|
2020-07-10 01:12:04 +00:00
|
|
|
}
|
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-10 01:12:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Result KPageTableBase::UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
|
2020-05-29 07:57:25 +00:00
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-05-29 07:57:25 +00:00
|
|
|
|
|
|
|
/* Validate that the source address's state is valid. */
|
|
|
|
KMemoryState src_state;
|
2020-12-01 12:24:43 +00:00
|
|
|
size_t num_src_allocator_blocks;
|
2020-12-01 12:33:46 +00:00
|
|
|
R_TRY(this->CheckMemoryState(std::addressof(src_state), nullptr, nullptr, std::addressof(num_src_allocator_blocks), src_address, size, KMemoryState_FlagCanAlias, KMemoryState_FlagCanAlias, KMemoryPermission_All, KMemoryPermission_NotMapped | KMemoryPermission_KernelRead, KMemoryAttribute_All, KMemoryAttribute_Locked));
|
2020-05-29 07:57:25 +00:00
|
|
|
|
|
|
|
/* Validate that the dst address's state is valid. */
|
|
|
|
KMemoryPermission dst_perm;
|
2020-12-01 12:24:43 +00:00
|
|
|
size_t num_dst_allocator_blocks;
|
|
|
|
R_TRY(this->CheckMemoryState(nullptr, std::addressof(dst_perm), nullptr, std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState_All, KMemoryState_Stack, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_All, KMemoryAttribute_None));
|
2020-05-29 07:57:25 +00:00
|
|
|
|
|
|
|
/* Create an update allocator for the source. */
|
2021-04-07 15:46:06 +00:00
|
|
|
Result src_allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), m_memory_block_slab_manager, num_src_allocator_blocks);
|
|
|
|
R_TRY(src_allocator_result);
|
2020-05-29 07:57:25 +00:00
|
|
|
|
|
|
|
/* Create an update allocator for the destination. */
|
2021-04-07 15:46:06 +00:00
|
|
|
Result dst_allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), m_memory_block_slab_manager, num_dst_allocator_blocks);
|
|
|
|
R_TRY(dst_allocator_result);
|
2020-05-29 07:57:25 +00:00
|
|
|
|
|
|
|
/* Unmap the memory. */
|
|
|
|
{
|
|
|
|
/* Determine the number of pages being operated on. */
|
|
|
|
const size_t num_pages = size / PageSize;
|
|
|
|
|
|
|
|
/* Create page groups for the memory being unmapped. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KPageGroup pg(m_block_info_manager);
|
2020-05-29 07:57:25 +00:00
|
|
|
|
|
|
|
/* Create the page group representing the destination. */
|
|
|
|
R_TRY(this->MakePageGroup(pg, dst_address, num_pages));
|
|
|
|
|
|
|
|
/* Ensure the page group is the valid for the source. */
|
|
|
|
R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), svc::ResultInvalidMemoryRegion());
|
|
|
|
|
|
|
|
/* We're going to perform an update, so create a helper. */
|
|
|
|
KScopedPageTableUpdater updater(this);
|
|
|
|
|
|
|
|
/* Unmap the aliased copy of the pages. */
|
2020-12-01 11:33:46 +00:00
|
|
|
const KPageProperties dst_unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
|
2020-05-29 07:57:25 +00:00
|
|
|
R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, Null<KPhysicalAddress>, false, dst_unmap_properties, OperationType_Unmap, false));
|
|
|
|
|
|
|
|
/* Ensure that we re-map the aliased pages on failure. */
|
2022-02-14 22:45:32 +00:00
|
|
|
ON_RESULT_FAILURE {
|
2020-12-01 11:33:46 +00:00
|
|
|
this->RemapPageGroup(updater.GetPageList(), dst_address, size, pg);
|
2020-05-29 07:57:25 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Try to set the permissions for the source pages back to what they should be. */
|
2020-12-01 11:33:46 +00:00
|
|
|
const KPageProperties src_properties = { KMemoryPermission_UserReadWrite, false, false, DisableMergeAttribute_EnableAndMergeHeadBodyTail };
|
2020-05-29 07:57:25 +00:00
|
|
|
R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, Null<KPhysicalAddress>, false, src_properties, OperationType_ChangePermissions, false));
|
|
|
|
|
|
|
|
/* Apply the memory block updates. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Locked);
|
|
|
|
m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, KMemoryState_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal);
|
2020-05-29 07:57:25 +00:00
|
|
|
}
|
2020-07-24 00:22:27 +00:00
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-24 00:22:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Result KPageTableBase::MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
|
|
|
|
/* Validate the mapping request. */
|
|
|
|
R_UNLESS(this->CanContain(dst_address, size, KMemoryState_AliasCode), svc::ResultInvalidMemoryRegion());
|
|
|
|
|
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-07-24 00:22:27 +00:00
|
|
|
|
|
|
|
/* Verify that the source memory is normal heap. */
|
|
|
|
KMemoryState src_state;
|
|
|
|
KMemoryPermission src_perm;
|
2020-12-01 12:24:43 +00:00
|
|
|
size_t num_src_allocator_blocks;
|
|
|
|
R_TRY(this->CheckMemoryState(std::addressof(src_state), std::addressof(src_perm), nullptr, std::addressof(num_src_allocator_blocks), src_address, size, KMemoryState_All, KMemoryState_Normal, KMemoryPermission_All, KMemoryPermission_UserReadWrite, KMemoryAttribute_All, KMemoryAttribute_None));
|
2020-07-24 00:22:27 +00:00
|
|
|
|
|
|
|
/* Verify that the destination memory is unmapped. */
|
2020-12-01 12:24:43 +00:00
|
|
|
size_t num_dst_allocator_blocks;
|
|
|
|
R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None));
|
2020-07-24 00:22:27 +00:00
|
|
|
|
|
|
|
/* Create an update allocator for the source. */
|
2021-04-07 15:46:06 +00:00
|
|
|
Result src_allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), m_memory_block_slab_manager, num_src_allocator_blocks);
|
|
|
|
R_TRY(src_allocator_result);
|
2020-07-24 00:22:27 +00:00
|
|
|
|
|
|
|
/* Create an update allocator for the destination. */
|
2021-04-07 15:46:06 +00:00
|
|
|
Result dst_allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), m_memory_block_slab_manager, num_dst_allocator_blocks);
|
|
|
|
R_TRY(dst_allocator_result);
|
2020-07-24 00:22:27 +00:00
|
|
|
|
|
|
|
/* Map the code memory. */
|
|
|
|
{
|
|
|
|
/* Determine the number of pages being operated on. */
|
|
|
|
const size_t num_pages = size / PageSize;
|
|
|
|
|
|
|
|
/* Create page groups for the memory being unmapped. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KPageGroup pg(m_block_info_manager);
|
2020-07-24 00:22:27 +00:00
|
|
|
|
|
|
|
/* Create the page group representing the source. */
|
|
|
|
R_TRY(this->MakePageGroup(pg, src_address, num_pages));
|
|
|
|
|
|
|
|
/* We're going to perform an update, so create a helper. */
|
|
|
|
KScopedPageTableUpdater updater(this);
|
|
|
|
|
|
|
|
/* Reprotect the source as kernel-read/not mapped. */
|
|
|
|
const KMemoryPermission new_perm = static_cast<KMemoryPermission>(KMemoryPermission_KernelRead | KMemoryPermission_NotMapped);
|
2020-12-01 11:33:46 +00:00
|
|
|
const KPageProperties src_properties = { new_perm, false, false, DisableMergeAttribute_DisableHeadBodyTail };
|
2020-07-24 00:22:27 +00:00
|
|
|
R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, Null<KPhysicalAddress>, false, src_properties, OperationType_ChangePermissions, false));
|
|
|
|
|
|
|
|
/* Ensure that we unprotect the source pages on failure. */
|
2022-02-14 22:45:32 +00:00
|
|
|
ON_RESULT_FAILURE {
|
2020-12-01 11:33:46 +00:00
|
|
|
const KPageProperties unprotect_properties = { src_perm, false, false, DisableMergeAttribute_EnableHeadBodyTail };
|
2020-07-24 00:22:27 +00:00
|
|
|
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), src_address, num_pages, Null<KPhysicalAddress>, false, unprotect_properties, OperationType_ChangePermissions, true));
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Map the alias pages. */
|
2020-12-01 11:33:46 +00:00
|
|
|
const KPageProperties dst_properties = { new_perm, false, false, DisableMergeAttribute_DisableHead };
|
2020-07-24 00:22:27 +00:00
|
|
|
R_TRY(this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_properties, false));
|
|
|
|
|
|
|
|
/* Apply the memory block updates. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, new_perm, KMemoryAttribute_Locked, KMemoryBlockDisableMergeAttribute_Locked, KMemoryBlockDisableMergeAttribute_None);
|
|
|
|
m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, KMemoryState_AliasCode, new_perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
|
2020-07-24 00:22:27 +00:00
|
|
|
}
|
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-24 00:22:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Result KPageTableBase::UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
|
|
|
|
/* Validate the mapping request. */
|
|
|
|
R_UNLESS(this->CanContain(dst_address, size, KMemoryState_AliasCode), svc::ResultInvalidMemoryRegion());
|
|
|
|
|
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-07-24 00:22:27 +00:00
|
|
|
|
|
|
|
/* Verify that the source memory is locked normal heap. */
|
2020-12-01 12:24:43 +00:00
|
|
|
size_t num_src_allocator_blocks;
|
2020-12-01 12:33:46 +00:00
|
|
|
R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size, KMemoryState_All, KMemoryState_Normal, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_All, KMemoryAttribute_Locked));
|
2020-07-24 00:22:27 +00:00
|
|
|
|
2020-12-01 11:33:46 +00:00
|
|
|
/* Verify that the destination memory is aliasable code. */
|
2020-12-01 12:24:43 +00:00
|
|
|
size_t num_dst_allocator_blocks;
|
2023-10-11 14:59:37 +00:00
|
|
|
R_TRY(this->CheckMemoryStateContiguous(std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState_FlagCanCodeAlias, KMemoryState_FlagCanCodeAlias, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_All & ~KMemoryAttribute_PermissionLocked, KMemoryAttribute_None));
|
2020-07-24 00:22:27 +00:00
|
|
|
|
2020-12-01 11:33:46 +00:00
|
|
|
/* Determine whether any pages being unmapped are code. */
|
|
|
|
bool any_code_pages = false;
|
|
|
|
{
|
2020-12-18 01:18:47 +00:00
|
|
|
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(dst_address);
|
2020-12-01 11:33:46 +00:00
|
|
|
while (true) {
|
|
|
|
/* Get the memory info. */
|
|
|
|
const KMemoryInfo info = it->GetMemoryInfo();
|
|
|
|
|
|
|
|
/* Check if the memory has code flag. */
|
|
|
|
if ((info.GetState() & KMemoryState_FlagCode) != 0) {
|
|
|
|
any_code_pages = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if we're done. */
|
2020-12-02 19:31:50 +00:00
|
|
|
if (dst_address + size - 1 <= info.GetLastAddress()) {
|
2020-12-01 11:33:46 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Advance. */
|
|
|
|
++it;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Ensure that we maintain the instruction cache. */
|
|
|
|
bool reprotected_pages = false;
|
|
|
|
ON_SCOPE_EXIT {
|
|
|
|
if (reprotected_pages && any_code_pages) {
|
|
|
|
cpu::InvalidateEntireInstructionCache();
|
|
|
|
}
|
|
|
|
};
|
2020-07-24 00:22:27 +00:00
|
|
|
|
|
|
|
/* Unmap. */
|
|
|
|
{
|
|
|
|
/* Determine the number of pages being operated on. */
|
|
|
|
const size_t num_pages = size / PageSize;
|
|
|
|
|
|
|
|
/* Create page groups for the memory being unmapped. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KPageGroup pg(m_block_info_manager);
|
2020-07-24 00:22:27 +00:00
|
|
|
|
|
|
|
/* Create the page group representing the destination. */
|
|
|
|
R_TRY(this->MakePageGroup(pg, dst_address, num_pages));
|
|
|
|
|
|
|
|
/* Verify that the page group contains the same pages as the source. */
|
|
|
|
R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), svc::ResultInvalidMemoryRegion());
|
|
|
|
|
|
|
|
/* Create an update allocator for the source. */
|
2021-04-07 15:46:06 +00:00
|
|
|
Result src_allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), m_memory_block_slab_manager, num_src_allocator_blocks);
|
|
|
|
R_TRY(src_allocator_result);
|
2020-07-24 00:22:27 +00:00
|
|
|
|
|
|
|
/* Create an update allocator for the destination. */
|
2021-04-07 15:46:06 +00:00
|
|
|
Result dst_allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), m_memory_block_slab_manager, num_dst_allocator_blocks);
|
|
|
|
R_TRY(dst_allocator_result);
|
2020-07-24 00:22:27 +00:00
|
|
|
|
|
|
|
/* We're going to perform an update, so create a helper. */
|
|
|
|
KScopedPageTableUpdater updater(this);
|
|
|
|
|
|
|
|
/* Unmap the aliased copy of the pages. */
|
2020-12-01 11:33:46 +00:00
|
|
|
const KPageProperties dst_unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
|
2020-07-24 00:22:27 +00:00
|
|
|
R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, Null<KPhysicalAddress>, false, dst_unmap_properties, OperationType_Unmap, false));
|
|
|
|
|
|
|
|
/* Ensure that we re-map the aliased pages on failure. */
|
2022-02-14 22:45:32 +00:00
|
|
|
ON_RESULT_FAILURE {
|
2020-12-01 11:33:46 +00:00
|
|
|
this->RemapPageGroup(updater.GetPageList(), dst_address, size, pg);
|
2020-07-24 00:22:27 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Try to set the permissions for the source pages back to what they should be. */
|
2020-12-01 11:33:46 +00:00
|
|
|
const KPageProperties src_properties = { KMemoryPermission_UserReadWrite, false, false, DisableMergeAttribute_EnableAndMergeHeadBodyTail };
|
2020-07-24 00:22:27 +00:00
|
|
|
R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, Null<KPhysicalAddress>, false, src_properties, OperationType_ChangePermissions, false));
|
|
|
|
|
|
|
|
/* Apply the memory block updates. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, KMemoryState_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal);
|
|
|
|
m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, KMemoryState_Normal, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Locked);
|
2020-07-24 00:22:27 +00:00
|
|
|
|
2020-12-01 11:33:46 +00:00
|
|
|
/* Note that we reprotected pages. */
|
|
|
|
reprotected_pages = true;
|
2020-07-24 00:22:27 +00:00
|
|
|
}
|
2020-05-29 07:57:25 +00:00
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-05-29 07:57:25 +00:00
|
|
|
}
|
|
|
|
|
2022-10-12 06:44:22 +00:00
|
|
|
Result KPageTableBase::MapInsecureMemory(KProcessAddress address, size_t size) {
|
|
|
|
/* Get the insecure memory resource limit and pool. */
|
|
|
|
auto * const insecure_resource_limit = KSystemControl::GetInsecureMemoryResourceLimit();
|
|
|
|
const auto insecure_pool = static_cast<KMemoryManager::Pool>(KSystemControl::GetInsecureMemoryPool());
|
|
|
|
|
|
|
|
/* Reserve the insecure memory. */
|
|
|
|
/* NOTE: ResultOutOfMemory is returned here instead of the usual LimitReached. */
|
|
|
|
KScopedResourceReservation memory_reservation(insecure_resource_limit, ams::svc::LimitableResource_PhysicalMemoryMax, size);
|
|
|
|
R_UNLESS(memory_reservation.Succeeded(), svc::ResultOutOfMemory());
|
|
|
|
|
|
|
|
/* Allocate pages for the insecure memory. */
|
|
|
|
KPageGroup pg(m_block_info_manager);
|
|
|
|
R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), size / PageSize, KMemoryManager::EncodeOption(insecure_pool, KMemoryManager::Direction_FromFront)));
|
|
|
|
|
|
|
|
/* Close the opened pages when we're done with them. */
|
|
|
|
/* If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed automatically. */
|
|
|
|
ON_SCOPE_EXIT { pg.Close(); };
|
|
|
|
|
|
|
|
/* Clear all the newly allocated pages. */
|
|
|
|
for (const auto &it : pg) {
|
|
|
|
std::memset(GetVoidPointer(GetHeapVirtualAddress(it.GetAddress())), m_heap_fill_value, it.GetSize());
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Lock the table. */
|
|
|
|
KScopedLightLock lk(m_general_lock);
|
|
|
|
|
|
|
|
/* Validate that the address's state is valid. */
|
|
|
|
size_t num_allocator_blocks;
|
|
|
|
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None));
|
|
|
|
|
|
|
|
/* Create an update allocator. */
|
|
|
|
Result allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
|
|
|
|
R_TRY(allocator_result);
|
|
|
|
|
|
|
|
/* We're going to perform an update, so create a helper. */
|
|
|
|
KScopedPageTableUpdater updater(this);
|
|
|
|
|
|
|
|
/* Map the pages. */
|
|
|
|
const size_t num_pages = size / PageSize;
|
|
|
|
const KPageProperties map_properties = { KMemoryPermission_UserReadWrite, false, false, DisableMergeAttribute_DisableHead };
|
|
|
|
R_TRY(this->Operate(updater.GetPageList(), address, num_pages, pg, map_properties, OperationType_MapGroup, false));
|
|
|
|
|
|
|
|
/* Apply the memory block update. */
|
|
|
|
m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState_Insecure, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
|
|
|
|
|
|
|
|
/* Update our mapped insecure size. */
|
|
|
|
m_mapped_insecure_memory += size;
|
|
|
|
|
|
|
|
/* Commit the memory reservation. */
|
|
|
|
memory_reservation.Commit();
|
|
|
|
|
|
|
|
/* We succeeded. */
|
|
|
|
R_SUCCEED();
|
|
|
|
}
|
|
|
|
|
|
|
|
Result KPageTableBase::UnmapInsecureMemory(KProcessAddress address, size_t size) {
|
|
|
|
/* Lock the table. */
|
|
|
|
KScopedLightLock lk(m_general_lock);
|
|
|
|
|
|
|
|
/* Check the memory state. */
|
|
|
|
size_t num_allocator_blocks;
|
|
|
|
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, KMemoryState_All, KMemoryState_Insecure, KMemoryPermission_All, KMemoryPermission_UserReadWrite, KMemoryAttribute_All, KMemoryAttribute_None));
|
|
|
|
|
|
|
|
/* Create an update allocator. */
|
|
|
|
Result allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
|
|
|
|
R_TRY(allocator_result);
|
|
|
|
|
|
|
|
/* We're going to perform an update, so create a helper. */
|
|
|
|
KScopedPageTableUpdater updater(this);
|
|
|
|
|
|
|
|
/* Unmap the memory. */
|
|
|
|
const size_t num_pages = size / PageSize;
|
|
|
|
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
|
|
|
|
R_TRY(this->Operate(updater.GetPageList(), address, num_pages, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, false));
|
|
|
|
|
|
|
|
/* Apply the memory block update. */
|
|
|
|
m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal);
|
|
|
|
|
|
|
|
/* Update our mapped insecure size. */
|
|
|
|
m_mapped_insecure_memory -= size;
|
|
|
|
|
|
|
|
/* Release the insecure memory from the insecure limit. */
|
|
|
|
if (auto * const insecure_resource_limit = KSystemControl::GetInsecureMemoryResourceLimit(); insecure_resource_limit != nullptr) {
|
|
|
|
insecure_resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
R_SUCCEED();
|
|
|
|
}
|
|
|
|
|
2020-02-14 01:38:56 +00:00
|
|
|
KProcessAddress KPageTableBase::FindFreeArea(KProcessAddress region_start, size_t region_num_pages, size_t num_pages, size_t alignment, size_t offset, size_t guard_pages) const {
|
|
|
|
KProcessAddress address = Null<KProcessAddress>;
|
|
|
|
|
|
|
|
if (num_pages <= region_num_pages) {
|
|
|
|
if (this->IsAslrEnabled()) {
|
|
|
|
/* Try to directly find a free area up to 8 times. */
|
|
|
|
for (size_t i = 0; i < 8; i++) {
|
|
|
|
const size_t random_offset = KSystemControl::GenerateRandomRange(0, (region_num_pages - num_pages - guard_pages) * PageSize / alignment) * alignment;
|
|
|
|
const KProcessAddress candidate = util::AlignDown(GetInteger(region_start + random_offset), alignment) + offset;
|
|
|
|
|
|
|
|
KMemoryInfo info;
|
|
|
|
ams::svc::PageInfo page_info;
|
2021-04-07 16:25:19 +00:00
|
|
|
MESOSPHERE_R_ABORT_UNLESS(this->QueryInfoImpl(std::addressof(info), std::addressof(page_info), candidate));
|
2020-02-14 01:38:56 +00:00
|
|
|
|
2020-12-18 01:18:47 +00:00
|
|
|
if (info.m_state != KMemoryState_Free) { continue; }
|
2020-02-14 01:38:56 +00:00
|
|
|
if (!(region_start <= candidate)) { continue; }
|
|
|
|
if (!(info.GetAddress() + guard_pages * PageSize <= GetInteger(candidate))) { continue; }
|
|
|
|
if (!(candidate + (num_pages + guard_pages) * PageSize - 1 <= info.GetLastAddress())) { continue; }
|
|
|
|
if (!(candidate + (num_pages + guard_pages) * PageSize - 1 <= region_start + region_num_pages * PageSize - 1)) { continue; }
|
|
|
|
|
|
|
|
address = candidate;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* Fall back to finding the first free area with a random offset. */
|
|
|
|
if (address == Null<KProcessAddress>) {
|
|
|
|
/* NOTE: Nintendo does not account for guard pages here. */
|
|
|
|
/* This may theoretically cause an offset to be chosen that cannot be mapped. */
|
2020-07-31 08:27:09 +00:00
|
|
|
/* We will account for guard pages. */
|
|
|
|
const size_t offset_pages = KSystemControl::GenerateRandomRange(0, region_num_pages - num_pages - guard_pages);
|
2020-12-18 01:18:47 +00:00
|
|
|
address = m_memory_block_manager.FindFreeArea(region_start + offset_pages * PageSize, region_num_pages - offset_pages, num_pages, alignment, offset, guard_pages);
|
2020-02-14 01:38:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
/* Find the first free area. */
|
|
|
|
if (address == Null<KProcessAddress>) {
|
2020-12-18 01:18:47 +00:00
|
|
|
address = m_memory_block_manager.FindFreeArea(region_start, region_num_pages, num_pages, alignment, offset, guard_pages);
|
2020-02-14 01:38:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return address;
|
|
|
|
}
|
|
|
|
|
2020-12-11 00:31:47 +00:00
|
|
|
size_t KPageTableBase::GetSize(KMemoryState state) const {
|
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-12-11 00:31:47 +00:00
|
|
|
|
|
|
|
/* Iterate, counting blocks with the desired state. */
|
|
|
|
size_t total_size = 0;
|
2020-12-18 01:18:47 +00:00
|
|
|
for (KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(m_address_space_start); it != m_memory_block_manager.end(); ++it) {
|
2020-12-11 00:31:47 +00:00
|
|
|
/* Get the memory info. */
|
|
|
|
const KMemoryInfo info = it->GetMemoryInfo();
|
|
|
|
if (info.GetState() == state) {
|
|
|
|
total_size += info.GetSize();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return total_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t KPageTableBase::GetCodeSize() const {
|
|
|
|
return this->GetSize(KMemoryState_Code);
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t KPageTableBase::GetCodeDataSize() const {
|
|
|
|
return this->GetSize(KMemoryState_CodeData);
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t KPageTableBase::GetAliasCodeSize() const {
|
|
|
|
return this->GetSize(KMemoryState_AliasCode);
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t KPageTableBase::GetAliasCodeDataSize() const {
|
|
|
|
return this->GetSize(KMemoryState_AliasCodeData);
|
|
|
|
}
|
|
|
|
|
2020-12-01 11:33:46 +00:00
|
|
|
Result KPageTableBase::AllocateAndMapPagesImpl(PageLinkedList *page_list, KProcessAddress address, size_t num_pages, KMemoryPermission perm) {
|
2020-02-17 10:49:21 +00:00
|
|
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
|
|
|
|
2020-02-14 01:38:56 +00:00
|
|
|
/* Create a page group to hold the pages we allocate. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KPageGroup pg(m_block_info_manager);
|
2020-02-14 01:38:56 +00:00
|
|
|
|
|
|
|
/* Allocate the pages. */
|
2020-12-18 01:18:47 +00:00
|
|
|
R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, m_allocate_option));
|
2020-02-14 01:38:56 +00:00
|
|
|
|
2020-12-01 14:01:44 +00:00
|
|
|
/* Ensure that the page group is closed when we're done working with it. */
|
|
|
|
ON_SCOPE_EXIT { pg.Close(); };
|
2020-02-14 01:38:56 +00:00
|
|
|
|
|
|
|
/* Clear all pages. */
|
|
|
|
for (const auto &it : pg) {
|
2021-09-18 07:11:10 +00:00
|
|
|
std::memset(GetVoidPointer(GetHeapVirtualAddress(it.GetAddress())), m_heap_fill_value, it.GetSize());
|
2020-02-14 01:38:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Map the pages. */
|
2020-12-01 11:33:46 +00:00
|
|
|
const KPageProperties properties = { perm, false, false, DisableMergeAttribute_None };
|
2022-02-14 22:45:32 +00:00
|
|
|
R_RETURN(this->Operate(page_list, address, num_pages, pg, properties, OperationType_MapGroup, false));
|
2020-02-14 01:38:56 +00:00
|
|
|
}
|
|
|
|
|
2020-02-17 10:49:21 +00:00
|
|
|
Result KPageTableBase::MapPageGroupImpl(PageLinkedList *page_list, KProcessAddress address, const KPageGroup &pg, const KPageProperties properties, bool reuse_ll) {
|
|
|
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
|
|
|
|
|
|
|
/* Note the current address, so that we can iterate. */
|
|
|
|
const KProcessAddress start_address = address;
|
|
|
|
KProcessAddress cur_address = address;
|
|
|
|
|
|
|
|
/* Ensure that we clean up on failure. */
|
2022-02-14 22:45:32 +00:00
|
|
|
ON_RESULT_FAILURE {
|
2020-02-17 10:49:21 +00:00
|
|
|
MESOSPHERE_ABORT_UNLESS(!reuse_ll);
|
|
|
|
if (cur_address != start_address) {
|
2020-12-01 11:33:46 +00:00
|
|
|
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
|
2020-02-17 10:49:21 +00:00
|
|
|
MESOSPHERE_R_ABORT_UNLESS(this->Operate(page_list, start_address, (cur_address - start_address) / PageSize, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, true));
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Iterate, mapping all pages in the group. */
|
|
|
|
for (const auto &block : pg) {
|
|
|
|
/* Map and advance. */
|
2020-12-01 11:33:46 +00:00
|
|
|
const KPageProperties cur_properties = (cur_address == start_address) ? properties : KPageProperties{ properties.perm, properties.io, properties.uncached, DisableMergeAttribute_None };
|
2021-09-18 07:11:10 +00:00
|
|
|
R_TRY(this->Operate(page_list, cur_address, block.GetNumPages(), block.GetAddress(), true, cur_properties, OperationType_Map, reuse_ll));
|
2020-02-17 10:49:21 +00:00
|
|
|
cur_address += block.GetSize();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We succeeded! */
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-02-17 10:49:21 +00:00
|
|
|
}
|
|
|
|
|
2020-12-01 11:33:46 +00:00
|
|
|
void KPageTableBase::RemapPageGroup(PageLinkedList *page_list, KProcessAddress address, size_t size, const KPageGroup &pg) {
|
|
|
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
|
|
|
|
|
|
|
/* Note the current address, so that we can iterate. */
|
|
|
|
const KProcessAddress start_address = address;
|
|
|
|
const KProcessAddress last_address = start_address + size - 1;
|
|
|
|
const KProcessAddress end_address = last_address + 1;
|
|
|
|
|
|
|
|
/* Iterate over the memory. */
|
|
|
|
auto pg_it = pg.begin();
|
|
|
|
MESOSPHERE_ABORT_UNLESS(pg_it != pg.end());
|
|
|
|
|
2021-09-18 07:11:10 +00:00
|
|
|
KPhysicalAddress pg_phys_addr = pg_it->GetAddress();
|
2020-12-01 11:33:46 +00:00
|
|
|
size_t pg_pages = pg_it->GetNumPages();
|
|
|
|
|
2020-12-18 01:18:47 +00:00
|
|
|
auto it = m_memory_block_manager.FindIterator(start_address);
|
2020-12-01 11:33:46 +00:00
|
|
|
while (true) {
|
|
|
|
/* Check that the iterator is valid. */
|
2020-12-18 01:18:47 +00:00
|
|
|
MESOSPHERE_ASSERT(it != m_memory_block_manager.end());
|
2020-12-01 11:33:46 +00:00
|
|
|
|
|
|
|
/* Get the memory info. */
|
|
|
|
const KMemoryInfo info = it->GetMemoryInfo();
|
|
|
|
|
|
|
|
/* Determine the range to map. */
|
|
|
|
KProcessAddress map_address = std::max(info.GetAddress(), GetInteger(start_address));
|
|
|
|
const KProcessAddress map_end_address = std::min(info.GetEndAddress(), GetInteger(end_address));
|
|
|
|
MESOSPHERE_ABORT_UNLESS(map_end_address != map_address);
|
|
|
|
|
|
|
|
/* Determine if we should disable head merge. */
|
2020-12-01 12:57:09 +00:00
|
|
|
const bool disable_head_merge = info.GetAddress() >= GetInteger(start_address) && (info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute_Normal) != 0;
|
2020-12-01 11:33:46 +00:00
|
|
|
const KPageProperties map_properties = { info.GetPermission(), false, false, disable_head_merge ? DisableMergeAttribute_DisableHead : DisableMergeAttribute_None };
|
|
|
|
|
|
|
|
/* While we have pages to map, map them. */
|
|
|
|
size_t map_pages = (map_end_address - map_address) / PageSize;
|
|
|
|
while (map_pages > 0) {
|
|
|
|
/* Check if we're at the end of the physical block. */
|
|
|
|
if (pg_pages == 0) {
|
|
|
|
/* Ensure there are more pages to map. */
|
|
|
|
MESOSPHERE_ABORT_UNLESS(pg_it != pg.end());
|
|
|
|
|
|
|
|
/* Advance our physical block. */
|
|
|
|
++pg_it;
|
2021-09-18 07:11:10 +00:00
|
|
|
pg_phys_addr = pg_it->GetAddress();
|
2020-12-01 11:33:46 +00:00
|
|
|
pg_pages = pg_it->GetNumPages();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Map whatever we can. */
|
|
|
|
const size_t cur_pages = std::min(pg_pages, map_pages);
|
|
|
|
MESOSPHERE_R_ABORT_UNLESS(this->Operate(page_list, map_address, map_pages, pg_phys_addr, true, map_properties, OperationType_Map, true));
|
|
|
|
|
|
|
|
/* Advance. */
|
|
|
|
map_address += cur_pages * PageSize;
|
|
|
|
map_pages -= cur_pages;
|
|
|
|
|
|
|
|
pg_phys_addr += cur_pages * PageSize;
|
|
|
|
pg_pages -= cur_pages;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if we're done. */
|
|
|
|
if (last_address <= info.GetLastAddress()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Advance. */
|
|
|
|
++it;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check that we re-mapped precisely the page group. */
|
|
|
|
MESOSPHERE_ABORT_UNLESS((++pg_it) == pg.end());
|
|
|
|
}
|
|
|
|
|
2020-02-19 16:07:44 +00:00
|
|
|
Result KPageTableBase::MakePageGroup(KPageGroup &pg, KProcessAddress addr, size_t num_pages) {
|
|
|
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
|
|
|
|
|
|
|
const size_t size = num_pages * PageSize;
|
|
|
|
|
|
|
|
/* We're making a new group, not adding to an existing one. */
|
|
|
|
R_UNLESS(pg.empty(), svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
auto &impl = this->GetImpl();
|
|
|
|
|
|
|
|
/* Begin traversal. */
|
|
|
|
TraversalContext context;
|
|
|
|
TraversalEntry next_entry;
|
|
|
|
R_UNLESS(impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), addr), svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* Prepare tracking variables. */
|
|
|
|
KPhysicalAddress cur_addr = next_entry.phys_addr;
|
|
|
|
size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
|
|
|
|
size_t tot_size = cur_size;
|
|
|
|
|
|
|
|
/* Iterate, adding to group as we go. */
|
|
|
|
while (tot_size < size) {
|
|
|
|
R_UNLESS(impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)), svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
if (next_entry.phys_addr != (cur_addr + cur_size)) {
|
|
|
|
const size_t cur_pages = cur_size / PageSize;
|
|
|
|
|
|
|
|
R_UNLESS(IsHeapPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory());
|
2021-09-18 07:11:10 +00:00
|
|
|
R_TRY(pg.AddBlock(cur_addr, cur_pages));
|
2020-02-19 16:07:44 +00:00
|
|
|
|
|
|
|
cur_addr = next_entry.phys_addr;
|
|
|
|
cur_size = next_entry.block_size;
|
|
|
|
} else {
|
|
|
|
cur_size += next_entry.block_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
tot_size += next_entry.block_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Ensure we add the right amount for the last block. */
|
|
|
|
if (tot_size > size) {
|
|
|
|
cur_size -= (tot_size - size);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* add the last block. */
|
|
|
|
const size_t cur_pages = cur_size / PageSize;
|
|
|
|
R_UNLESS(IsHeapPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory());
|
2021-09-18 07:11:10 +00:00
|
|
|
R_TRY(pg.AddBlock(cur_addr, cur_pages));
|
2020-02-19 16:07:44 +00:00
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-02-19 16:07:44 +00:00
|
|
|
}
|
|
|
|
|
2020-02-18 09:44:40 +00:00
|
|
|
bool KPageTableBase::IsValidPageGroup(const KPageGroup &pg, KProcessAddress addr, size_t num_pages) {
|
2020-02-19 16:07:44 +00:00
|
|
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
|
|
|
|
|
|
|
const size_t size = num_pages * PageSize;
|
|
|
|
|
2020-02-18 09:44:40 +00:00
|
|
|
/* Empty groups are necessarily invalid. */
|
|
|
|
if (pg.empty()) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto &impl = this->GetImpl();
|
|
|
|
|
|
|
|
/* We're going to validate that the group we'd expect is the group we see. */
|
|
|
|
auto cur_it = pg.begin();
|
2021-09-18 07:11:10 +00:00
|
|
|
KPhysicalAddress cur_block_address = cur_it->GetAddress();
|
2020-02-18 09:44:40 +00:00
|
|
|
size_t cur_block_pages = cur_it->GetNumPages();
|
|
|
|
|
|
|
|
auto UpdateCurrentIterator = [&]() ALWAYS_INLINE_LAMBDA {
|
|
|
|
if (cur_block_pages == 0) {
|
|
|
|
if ((++cur_it) == pg.end()) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
cur_block_address = cur_it->GetAddress();
|
|
|
|
cur_block_pages = cur_it->GetNumPages();
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Begin traversal. */
|
|
|
|
TraversalContext context;
|
|
|
|
TraversalEntry next_entry;
|
|
|
|
if (!impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), addr)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Prepare tracking variables. */
|
|
|
|
KPhysicalAddress cur_addr = next_entry.phys_addr;
|
|
|
|
size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
|
|
|
|
size_t tot_size = cur_size;
|
|
|
|
|
|
|
|
/* Iterate, comparing expected to actual. */
|
2020-02-19 16:07:44 +00:00
|
|
|
while (tot_size < size) {
|
2020-02-18 09:44:40 +00:00
|
|
|
if (!impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context))) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (next_entry.phys_addr != (cur_addr + cur_size)) {
|
|
|
|
const size_t cur_pages = cur_size / PageSize;
|
|
|
|
|
|
|
|
if (!IsHeapPhysicalAddress(cur_addr)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!UpdateCurrentIterator()) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-09-18 07:11:10 +00:00
|
|
|
if (cur_block_address != cur_addr || cur_block_pages < cur_pages) {
|
2020-02-18 09:44:40 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
cur_block_address += cur_size;
|
|
|
|
cur_block_pages -= cur_pages;
|
|
|
|
cur_addr = next_entry.phys_addr;
|
|
|
|
cur_size = next_entry.block_size;
|
|
|
|
} else {
|
|
|
|
cur_size += next_entry.block_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
tot_size += next_entry.block_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Ensure we compare the right amount for the last block. */
|
2020-02-19 16:07:44 +00:00
|
|
|
if (tot_size > size) {
|
|
|
|
cur_size -= (tot_size - size);
|
2020-02-18 09:44:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!IsHeapPhysicalAddress(cur_addr)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!UpdateCurrentIterator()) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-09-18 07:11:10 +00:00
|
|
|
return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize);
|
2020-02-18 09:04:44 +00:00
|
|
|
}
|
|
|
|
|
2021-04-08 00:07:01 +00:00
|
|
|
Result KPageTableBase::GetContiguousMemoryRangeWithState(MemoryRange *out, KProcessAddress address, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) {
|
|
|
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
|
|
|
|
|
|
|
auto &impl = this->GetImpl();
|
|
|
|
|
|
|
|
/* Begin a traversal. */
|
|
|
|
TraversalContext context;
|
2021-10-23 22:25:20 +00:00
|
|
|
TraversalEntry cur_entry = { .phys_addr = Null<KPhysicalAddress>, .block_size = 0, .sw_reserved_bits = 0 };
|
2021-04-08 00:07:01 +00:00
|
|
|
R_UNLESS(impl.BeginTraversal(std::addressof(cur_entry), std::addressof(context), address), svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* Traverse until we have enough size or we aren't contiguous any more. */
|
2022-10-12 05:22:11 +00:00
|
|
|
const KPhysicalAddress phys_address = cur_entry.phys_addr;
|
2021-04-08 00:07:01 +00:00
|
|
|
size_t contig_size;
|
|
|
|
for (contig_size = cur_entry.block_size - (GetInteger(phys_address) & (cur_entry.block_size - 1)); contig_size < size; contig_size += cur_entry.block_size) {
|
|
|
|
if (!impl.ContinueTraversal(std::addressof(cur_entry), std::addressof(context))) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (cur_entry.phys_addr != phys_address + contig_size) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Take the minimum size for our region. */
|
|
|
|
size = std::min(size, contig_size);
|
|
|
|
|
2022-10-12 05:22:11 +00:00
|
|
|
/* Check that the memory is contiguous (modulo the reference count bit). */
|
|
|
|
const u32 test_state_mask = state_mask | KMemoryState_FlagReferenceCounted;
|
2023-04-30 23:50:53 +00:00
|
|
|
const bool is_heap = R_SUCCEEDED(this->CheckMemoryStateContiguous(address, size, test_state_mask, state | KMemoryState_FlagReferenceCounted, perm_mask, perm, attr_mask, attr));
|
|
|
|
if (!is_heap) {
|
2022-10-12 05:22:11 +00:00
|
|
|
R_TRY(this->CheckMemoryStateContiguous(address, size, test_state_mask, state, perm_mask, perm, attr_mask, attr));
|
|
|
|
}
|
2021-04-08 00:07:01 +00:00
|
|
|
|
|
|
|
/* The memory is contiguous, so set the output range. */
|
2023-04-30 23:50:53 +00:00
|
|
|
out->Set(phys_address, size, is_heap);
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2021-04-08 00:07:01 +00:00
|
|
|
}
|
|
|
|
|
2020-02-19 16:07:44 +00:00
|
|
|
Result KPageTableBase::SetMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission svc_perm) {
|
2020-07-23 07:58:44 +00:00
|
|
|
const size_t num_pages = size / PageSize;
|
|
|
|
|
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-07-23 07:58:44 +00:00
|
|
|
|
|
|
|
/* Verify we can change the memory permission. */
|
|
|
|
KMemoryState old_state;
|
|
|
|
KMemoryPermission old_perm;
|
2020-12-01 12:24:43 +00:00
|
|
|
size_t num_allocator_blocks;
|
|
|
|
R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr, std::addressof(num_allocator_blocks), addr, size, KMemoryState_FlagCanReprotect, KMemoryState_FlagCanReprotect, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_All, KMemoryAttribute_None));
|
2020-07-23 07:58:44 +00:00
|
|
|
|
|
|
|
/* Determine new perm. */
|
|
|
|
const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm);
|
|
|
|
R_SUCCEED_IF(old_perm == new_perm);
|
|
|
|
|
|
|
|
/* Create an update allocator. */
|
2021-04-07 15:46:06 +00:00
|
|
|
Result allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
|
|
|
|
R_TRY(allocator_result);
|
2020-07-23 07:58:44 +00:00
|
|
|
|
|
|
|
/* We're going to perform an update, so create a helper. */
|
|
|
|
KScopedPageTableUpdater updater(this);
|
|
|
|
|
|
|
|
/* Perform mapping operation. */
|
2020-12-01 11:33:46 +00:00
|
|
|
const KPageProperties properties = { new_perm, false, false, DisableMergeAttribute_None };
|
2020-07-23 07:58:44 +00:00
|
|
|
R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, Null<KPhysicalAddress>, false, properties, OperationType_ChangePermissions, false));
|
|
|
|
|
|
|
|
/* Update the blocks. */
|
2021-04-07 16:25:19 +00:00
|
|
|
m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_None);
|
2020-07-23 07:58:44 +00:00
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-02-19 16:07:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Result KPageTableBase::SetProcessMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission svc_perm) {
|
|
|
|
const size_t num_pages = size / PageSize;
|
|
|
|
|
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-02-19 16:07:44 +00:00
|
|
|
|
|
|
|
/* Verify we can change the memory permission. */
|
|
|
|
KMemoryState old_state;
|
|
|
|
KMemoryPermission old_perm;
|
2020-12-01 12:24:43 +00:00
|
|
|
size_t num_allocator_blocks;
|
|
|
|
R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr, std::addressof(num_allocator_blocks), addr, size, KMemoryState_FlagCode, KMemoryState_FlagCode, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_All, KMemoryAttribute_None));
|
2020-02-19 16:07:44 +00:00
|
|
|
|
|
|
|
/* Make a new page group for the region. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KPageGroup pg(m_block_info_manager);
|
2020-02-19 16:07:44 +00:00
|
|
|
|
|
|
|
/* Determine new perm/state. */
|
|
|
|
const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm);
|
|
|
|
KMemoryState new_state = old_state;
|
2020-07-21 02:59:12 +00:00
|
|
|
const bool is_w = (new_perm & KMemoryPermission_UserWrite) == KMemoryPermission_UserWrite;
|
|
|
|
const bool is_x = (new_perm & KMemoryPermission_UserExecute) == KMemoryPermission_UserExecute;
|
|
|
|
const bool was_x = (old_perm & KMemoryPermission_UserExecute) == KMemoryPermission_UserExecute;
|
2020-02-19 16:07:44 +00:00
|
|
|
MESOSPHERE_ASSERT(!(is_w && is_x));
|
|
|
|
|
|
|
|
if (is_w) {
|
|
|
|
switch (old_state) {
|
|
|
|
case KMemoryState_Code: new_state = KMemoryState_CodeData; break;
|
|
|
|
case KMemoryState_AliasCode: new_state = KMemoryState_AliasCodeData; break;
|
|
|
|
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Create a page group, if we're setting execute permissions. */
|
|
|
|
if (is_x) {
|
|
|
|
R_TRY(this->MakePageGroup(pg, GetInteger(addr), num_pages));
|
|
|
|
}
|
|
|
|
|
2020-07-21 02:59:12 +00:00
|
|
|
/* Succeed if there's nothing to do. */
|
|
|
|
R_SUCCEED_IF(old_perm == new_perm && old_state == new_state);
|
|
|
|
|
2020-02-19 16:07:44 +00:00
|
|
|
/* Create an update allocator. */
|
2021-04-07 15:46:06 +00:00
|
|
|
Result allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
|
|
|
|
R_TRY(allocator_result);
|
2020-02-19 16:07:44 +00:00
|
|
|
|
|
|
|
/* We're going to perform an update, so create a helper. */
|
|
|
|
KScopedPageTableUpdater updater(this);
|
|
|
|
|
|
|
|
/* Perform mapping operation. */
|
2020-12-01 11:33:46 +00:00
|
|
|
const KPageProperties properties = { new_perm, false, false, DisableMergeAttribute_None };
|
2023-02-21 15:39:21 +00:00
|
|
|
const auto operation = was_x ? OperationType_ChangePermissionsAndRefreshAndFlush : OperationType_ChangePermissions;
|
2020-02-19 16:07:44 +00:00
|
|
|
R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, Null<KPhysicalAddress>, false, properties, operation, false));
|
|
|
|
|
|
|
|
/* Update the blocks. */
|
2021-04-07 16:25:19 +00:00
|
|
|
m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, new_state, new_perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_None);
|
2020-02-19 16:07:44 +00:00
|
|
|
|
|
|
|
/* Ensure cache coherency, if we're setting pages as executable. */
|
|
|
|
if (is_x) {
|
|
|
|
for (const auto &block : pg) {
|
2021-09-18 07:11:10 +00:00
|
|
|
cpu::StoreDataCache(GetVoidPointer(GetHeapVirtualAddress(block.GetAddress())), block.GetSize());
|
2020-02-19 16:07:44 +00:00
|
|
|
}
|
|
|
|
cpu::InvalidateEntireInstructionCache();
|
|
|
|
}
|
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-02-19 16:07:44 +00:00
|
|
|
}
|
|
|
|
|
2020-07-23 01:46:28 +00:00
|
|
|
Result KPageTableBase::SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mask, u32 attr) {
|
|
|
|
const size_t num_pages = size / PageSize;
|
|
|
|
MESOSPHERE_ASSERT((mask | KMemoryAttribute_SetMask) == KMemoryAttribute_SetMask);
|
|
|
|
|
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-07-23 01:46:28 +00:00
|
|
|
|
|
|
|
/* Verify we can change the memory attribute. */
|
|
|
|
KMemoryState old_state;
|
|
|
|
KMemoryPermission old_perm;
|
|
|
|
KMemoryAttribute old_attr;
|
2020-12-01 12:24:43 +00:00
|
|
|
size_t num_allocator_blocks;
|
2020-07-23 01:46:28 +00:00
|
|
|
constexpr u32 AttributeTestMask = ~(KMemoryAttribute_SetMask | KMemoryAttribute_DeviceShared);
|
2023-10-11 14:59:37 +00:00
|
|
|
const u32 state_test_mask = ((mask & KMemoryAttribute_Uncached) ? static_cast<u32>(KMemoryState_FlagCanChangeAttribute) : 0) | ((mask & KMemoryAttribute_PermissionLocked) ? static_cast<u32>(KMemoryState_FlagCanPermissionLock) : 0);
|
2020-12-01 12:24:43 +00:00
|
|
|
R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), std::addressof(old_attr), std::addressof(num_allocator_blocks),
|
2020-07-23 01:46:28 +00:00
|
|
|
addr, size,
|
2023-10-11 14:59:37 +00:00
|
|
|
state_test_mask, state_test_mask,
|
2020-07-23 01:46:28 +00:00
|
|
|
KMemoryPermission_None, KMemoryPermission_None,
|
|
|
|
AttributeTestMask, KMemoryAttribute_None, ~AttributeTestMask));
|
|
|
|
|
|
|
|
/* Create an update allocator. */
|
2021-04-07 15:46:06 +00:00
|
|
|
Result allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
|
|
|
|
R_TRY(allocator_result);
|
2020-07-23 01:46:28 +00:00
|
|
|
|
|
|
|
/* We're going to perform an update, so create a helper. */
|
|
|
|
KScopedPageTableUpdater updater(this);
|
|
|
|
|
2023-10-11 14:59:37 +00:00
|
|
|
/* If we need to, perform a change attribute operation. */
|
|
|
|
if ((mask & KMemoryAttribute_Uncached) != 0) {
|
|
|
|
/* Determine the new attribute. */
|
|
|
|
const KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(((old_attr & ~mask) | (attr & mask)));
|
2020-07-23 01:46:28 +00:00
|
|
|
|
2023-10-11 14:59:37 +00:00
|
|
|
/* Perform operation. */
|
|
|
|
const KPageProperties properties = { old_perm, false, (new_attr & KMemoryAttribute_Uncached) != 0, DisableMergeAttribute_None };
|
|
|
|
R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, Null<KPhysicalAddress>, false, properties, OperationType_ChangePermissionsAndRefreshAndFlush, false));
|
|
|
|
}
|
2020-07-23 01:46:28 +00:00
|
|
|
|
|
|
|
/* Update the blocks. */
|
2023-10-11 14:59:37 +00:00
|
|
|
m_memory_block_manager.UpdateAttribute(std::addressof(allocator), addr, num_pages, mask, attr);
|
2020-07-23 01:46:28 +00:00
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-23 01:46:28 +00:00
|
|
|
}
|
|
|
|
|
2020-02-20 03:38:20 +00:00
|
|
|
Result KPageTableBase::SetHeapSize(KProcessAddress *out, size_t size) {
|
2020-07-15 10:07:00 +00:00
|
|
|
/* Lock the physical memory mutex. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock);
|
2020-07-15 10:07:00 +00:00
|
|
|
|
|
|
|
/* Try to perform a reduction in heap, instead of an extension. */
|
|
|
|
KProcessAddress cur_address;
|
|
|
|
size_t allocation_size;
|
|
|
|
{
|
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-07-15 10:07:00 +00:00
|
|
|
|
|
|
|
/* Validate that setting heap size is possible at all. */
|
2021-04-07 16:59:22 +00:00
|
|
|
R_UNLESS(!m_is_kernel, svc::ResultOutOfMemory());
|
2020-12-18 01:18:47 +00:00
|
|
|
R_UNLESS(size <= static_cast<size_t>(m_heap_region_end - m_heap_region_start), svc::ResultOutOfMemory());
|
2021-04-07 16:59:22 +00:00
|
|
|
R_UNLESS(size <= m_max_heap_size, svc::ResultOutOfMemory());
|
2020-07-15 10:07:00 +00:00
|
|
|
|
2020-12-18 01:18:47 +00:00
|
|
|
if (size < static_cast<size_t>(m_current_heap_end - m_heap_region_start)) {
|
2020-07-15 10:07:00 +00:00
|
|
|
/* The size being requested is less than the current size, so we need to free the end of the heap. */
|
|
|
|
|
2020-12-01 12:24:43 +00:00
|
|
|
/* Validate memory state. */
|
|
|
|
size_t num_allocator_blocks;
|
|
|
|
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks),
|
2020-12-18 01:18:47 +00:00
|
|
|
m_heap_region_start + size, (m_current_heap_end - m_heap_region_start) - size,
|
2020-12-01 12:24:43 +00:00
|
|
|
KMemoryState_All, KMemoryState_Normal,
|
|
|
|
KMemoryPermission_All, KMemoryPermission_UserReadWrite,
|
|
|
|
KMemoryAttribute_All, KMemoryAttribute_None));
|
|
|
|
|
2020-07-15 10:07:00 +00:00
|
|
|
/* Create an update allocator. */
|
2021-04-07 15:46:06 +00:00
|
|
|
Result allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
|
|
|
|
R_TRY(allocator_result);
|
2020-07-15 10:07:00 +00:00
|
|
|
|
|
|
|
/* We're going to perform an update, so create a helper. */
|
|
|
|
KScopedPageTableUpdater updater(this);
|
|
|
|
|
|
|
|
/* Unmap the end of the heap. */
|
2020-12-18 01:18:47 +00:00
|
|
|
const size_t num_pages = ((m_current_heap_end - m_heap_region_start) - size) / PageSize;
|
2020-12-01 11:33:46 +00:00
|
|
|
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
|
2020-12-18 01:18:47 +00:00
|
|
|
R_TRY(this->Operate(updater.GetPageList(), m_heap_region_start + size, num_pages, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, false));
|
2020-07-15 10:07:00 +00:00
|
|
|
|
|
|
|
/* Release the memory from the resource limit. */
|
2021-04-07 16:48:25 +00:00
|
|
|
m_resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, num_pages * PageSize);
|
2020-07-15 10:07:00 +00:00
|
|
|
|
|
|
|
/* Apply the memory block update. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, size == 0 ? KMemoryBlockDisableMergeAttribute_Normal : KMemoryBlockDisableMergeAttribute_None);
|
2020-07-15 10:07:00 +00:00
|
|
|
|
|
|
|
/* Update the current heap end. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_current_heap_end = m_heap_region_start + size;
|
2020-07-15 10:07:00 +00:00
|
|
|
|
|
|
|
/* Set the output. */
|
2020-12-18 01:18:47 +00:00
|
|
|
*out = m_heap_region_start;
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-12-18 01:18:47 +00:00
|
|
|
} else if (size == static_cast<size_t>(m_current_heap_end - m_heap_region_start)) {
|
2020-07-15 10:07:00 +00:00
|
|
|
/* The size requested is exactly the current size. */
|
2020-12-18 01:18:47 +00:00
|
|
|
*out = m_heap_region_start;
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-15 10:07:00 +00:00
|
|
|
} else {
|
|
|
|
/* We have to allocate memory. Determine how much to allocate and where while the table is locked. */
|
2020-12-18 01:18:47 +00:00
|
|
|
cur_address = m_current_heap_end;
|
|
|
|
allocation_size = size - (m_current_heap_end - m_heap_region_start);
|
2020-07-15 10:07:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Reserve memory for the heap extension. */
|
2021-04-07 16:48:25 +00:00
|
|
|
KScopedResourceReservation memory_reservation(m_resource_limit, ams::svc::LimitableResource_PhysicalMemoryMax, allocation_size);
|
2020-07-15 10:07:00 +00:00
|
|
|
R_UNLESS(memory_reservation.Succeeded(), svc::ResultLimitReached());
|
|
|
|
|
|
|
|
/* Allocate pages for the heap extension. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KPageGroup pg(m_block_info_manager);
|
|
|
|
R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), allocation_size / PageSize, m_allocate_option));
|
2020-07-15 10:07:00 +00:00
|
|
|
|
2020-12-01 14:01:44 +00:00
|
|
|
/* Close the opened pages when we're done with them. */
|
2020-07-15 10:07:00 +00:00
|
|
|
/* If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed automatically. */
|
|
|
|
ON_SCOPE_EXIT { pg.Close(); };
|
|
|
|
|
|
|
|
/* Clear all the newly allocated pages. */
|
|
|
|
for (const auto &it : pg) {
|
2021-09-18 07:11:10 +00:00
|
|
|
std::memset(GetVoidPointer(GetHeapVirtualAddress(it.GetAddress())), m_heap_fill_value, it.GetSize());
|
2020-07-15 10:07:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Map the pages. */
|
|
|
|
{
|
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-07-15 10:07:00 +00:00
|
|
|
|
2020-12-01 12:24:43 +00:00
|
|
|
/* Ensure that the heap hasn't changed since we began executing. */
|
2020-12-18 01:18:47 +00:00
|
|
|
MESOSPHERE_ABORT_UNLESS(cur_address == m_current_heap_end);
|
2020-12-01 12:24:43 +00:00
|
|
|
|
|
|
|
/* Check the memory state. */
|
|
|
|
size_t num_allocator_blocks;
|
2020-12-18 01:18:47 +00:00
|
|
|
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), m_current_heap_end, allocation_size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None));
|
2020-12-01 12:24:43 +00:00
|
|
|
|
2020-07-15 10:07:00 +00:00
|
|
|
/* Create an update allocator. */
|
2021-04-07 15:46:06 +00:00
|
|
|
Result allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
|
|
|
|
R_TRY(allocator_result);
|
2020-07-15 10:07:00 +00:00
|
|
|
|
|
|
|
/* We're going to perform an update, so create a helper. */
|
|
|
|
KScopedPageTableUpdater updater(this);
|
|
|
|
|
|
|
|
/* Map the pages. */
|
|
|
|
const size_t num_pages = allocation_size / PageSize;
|
2020-12-18 01:18:47 +00:00
|
|
|
const KPageProperties map_properties = { KMemoryPermission_UserReadWrite, false, false, (m_current_heap_end == m_heap_region_start) ? DisableMergeAttribute_DisableHead : DisableMergeAttribute_None };
|
|
|
|
R_TRY(this->Operate(updater.GetPageList(), m_current_heap_end, num_pages, pg, map_properties, OperationType_MapGroup, false));
|
2020-07-15 10:07:00 +00:00
|
|
|
|
|
|
|
/* We succeeded, so commit our memory reservation. */
|
|
|
|
memory_reservation.Commit();
|
|
|
|
|
|
|
|
/* Apply the memory block update. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_memory_block_manager.Update(std::addressof(allocator), m_current_heap_end, num_pages, KMemoryState_Normal, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, m_heap_region_start == m_current_heap_end ? KMemoryBlockDisableMergeAttribute_Normal : KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_None);
|
2020-07-15 10:07:00 +00:00
|
|
|
|
|
|
|
/* Update the current heap end. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_current_heap_end = m_heap_region_start + size;
|
2020-07-15 10:07:00 +00:00
|
|
|
|
|
|
|
/* Set the output. */
|
2020-12-18 01:18:47 +00:00
|
|
|
*out = m_heap_region_start;
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-15 10:07:00 +00:00
|
|
|
}
|
2020-02-20 03:38:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Result KPageTableBase::SetMaxHeapSize(size_t size) {
|
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-02-20 03:38:20 +00:00
|
|
|
|
|
|
|
/* Only process page tables are allowed to set heap size. */
|
|
|
|
MESOSPHERE_ASSERT(!this->IsKernel());
|
|
|
|
|
2020-12-18 01:18:47 +00:00
|
|
|
m_max_heap_size = size;
|
2020-02-20 03:38:20 +00:00
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-02-20 03:38:20 +00:00
|
|
|
}
|
|
|
|
|
2020-02-20 17:05:01 +00:00
|
|
|
Result KPageTableBase::QueryInfo(KMemoryInfo *out_info, ams::svc::PageInfo *out_page_info, KProcessAddress addr) const {
|
|
|
|
/* If the address is invalid, create a fake block. */
|
|
|
|
if (!this->Contains(addr, 1)) {
|
|
|
|
*out_info = {
|
2020-12-18 01:18:47 +00:00
|
|
|
.m_address = GetInteger(m_address_space_end),
|
|
|
|
.m_size = 0 - GetInteger(m_address_space_end),
|
|
|
|
.m_state = static_cast<KMemoryState>(ams::svc::MemoryState_Inaccessible),
|
|
|
|
.m_device_disable_merge_left_count = 0,
|
|
|
|
.m_device_disable_merge_right_count = 0,
|
|
|
|
.m_ipc_lock_count = 0,
|
|
|
|
.m_device_use_count = 0,
|
|
|
|
.m_ipc_disable_merge_count = 0,
|
2021-10-05 22:16:54 +00:00
|
|
|
.m_permission = KMemoryPermission_None,
|
2020-12-18 01:18:47 +00:00
|
|
|
.m_attribute = KMemoryAttribute_None,
|
2021-10-05 22:16:54 +00:00
|
|
|
.m_original_permission = KMemoryPermission_None,
|
2020-12-18 01:18:47 +00:00
|
|
|
.m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute_None,
|
2020-02-20 17:05:01 +00:00
|
|
|
};
|
|
|
|
out_page_info->flags = 0;
|
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-02-20 17:05:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Otherwise, lock the table and query. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2022-02-14 22:45:32 +00:00
|
|
|
R_RETURN(this->QueryInfoImpl(out_info, out_page_info, addr));
|
2020-02-20 17:05:01 +00:00
|
|
|
}
|
|
|
|
|
2020-07-31 00:26:09 +00:00
|
|
|
Result KPageTableBase::QueryPhysicalAddress(ams::svc::PhysicalMemoryInfo *out, KProcessAddress address) const {
|
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-07-31 00:26:09 +00:00
|
|
|
|
|
|
|
/* Align the address down to page size. */
|
|
|
|
address = util::AlignDown(GetInteger(address), PageSize);
|
|
|
|
|
|
|
|
/* Verify that we can query the address. */
|
|
|
|
KMemoryInfo info;
|
|
|
|
ams::svc::PageInfo page_info;
|
|
|
|
R_TRY(this->QueryInfoImpl(std::addressof(info), std::addressof(page_info), address));
|
|
|
|
|
|
|
|
/* Check the memory state. */
|
|
|
|
R_TRY(this->CheckMemoryState(info, KMemoryState_FlagCanQueryPhysical, KMemoryState_FlagCanQueryPhysical, KMemoryPermission_UserReadExecute, KMemoryPermission_UserRead, KMemoryAttribute_None, KMemoryAttribute_None));
|
|
|
|
|
|
|
|
/* Prepare to traverse. */
|
|
|
|
KPhysicalAddress phys_addr;
|
|
|
|
size_t phys_size;
|
|
|
|
|
|
|
|
KProcessAddress virt_addr = info.GetAddress();
|
|
|
|
KProcessAddress end_addr = info.GetEndAddress();
|
|
|
|
|
|
|
|
/* Perform traversal. */
|
|
|
|
{
|
|
|
|
/* Begin traversal. */
|
|
|
|
TraversalContext context;
|
|
|
|
TraversalEntry next_entry;
|
2020-12-18 01:18:47 +00:00
|
|
|
bool traverse_valid = m_impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), virt_addr);
|
2020-07-31 00:26:09 +00:00
|
|
|
R_UNLESS(traverse_valid, svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* Set tracking variables. */
|
|
|
|
phys_addr = next_entry.phys_addr;
|
|
|
|
phys_size = next_entry.block_size - (GetInteger(phys_addr) & (next_entry.block_size - 1));
|
|
|
|
|
|
|
|
/* Iterate. */
|
|
|
|
while (true) {
|
|
|
|
/* Continue the traversal. */
|
2020-12-18 01:18:47 +00:00
|
|
|
traverse_valid = m_impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
|
2020-07-31 00:26:09 +00:00
|
|
|
if (!traverse_valid) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (next_entry.phys_addr != (phys_addr + phys_size)) {
|
|
|
|
/* Check if we're done. */
|
|
|
|
if (virt_addr <= address && address <= virt_addr + phys_size - 1) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Advance. */
|
|
|
|
phys_addr = next_entry.phys_addr;
|
|
|
|
virt_addr += next_entry.block_size;
|
|
|
|
phys_size = next_entry.block_size - (GetInteger(phys_addr) & (next_entry.block_size - 1));
|
|
|
|
} else {
|
|
|
|
phys_size += next_entry.block_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if we're done. */
|
|
|
|
if (end_addr < virt_addr + phys_size) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
MESOSPHERE_ASSERT(virt_addr <= address && address <= virt_addr + phys_size - 1);
|
|
|
|
|
|
|
|
/* Ensure we use the right size. */
|
|
|
|
if (end_addr < virt_addr + phys_size) {
|
|
|
|
phys_size = end_addr - virt_addr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set the output. */
|
|
|
|
out->physical_address = GetInteger(phys_addr);
|
|
|
|
out->virtual_address = GetInteger(virt_addr);
|
|
|
|
out->size = phys_size;
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-31 00:26:09 +00:00
|
|
|
}
|
|
|
|
|
2023-10-11 12:00:23 +00:00
|
|
|
Result KPageTableBase::MapIoImpl(KProcessAddress *out, PageLinkedList *page_list, KPhysicalAddress phys_addr, size_t size, KMemoryState state, KMemoryPermission perm) {
|
2021-04-07 16:25:19 +00:00
|
|
|
/* Check pre-conditions. */
|
|
|
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
2020-02-19 13:35:22 +00:00
|
|
|
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), PageSize));
|
|
|
|
MESOSPHERE_ASSERT(util::IsAligned(size, PageSize));
|
|
|
|
MESOSPHERE_ASSERT(size > 0);
|
2021-04-07 16:25:19 +00:00
|
|
|
|
2020-02-19 13:35:22 +00:00
|
|
|
R_UNLESS(phys_addr < phys_addr + size, svc::ResultInvalidAddress());
|
|
|
|
const size_t num_pages = size / PageSize;
|
|
|
|
const KPhysicalAddress last = phys_addr + size - 1;
|
|
|
|
|
|
|
|
/* Get region extents. */
|
2021-09-18 20:26:21 +00:00
|
|
|
const KProcessAddress region_start = m_kernel_map_region_start;
|
|
|
|
const size_t region_size = m_kernel_map_region_end - m_kernel_map_region_start;
|
2020-02-19 13:35:22 +00:00
|
|
|
const size_t region_num_pages = region_size / PageSize;
|
|
|
|
|
2023-10-11 12:00:23 +00:00
|
|
|
MESOSPHERE_ASSERT(this->CanContain(region_start, region_size, state));
|
2021-09-18 20:26:21 +00:00
|
|
|
|
2020-02-19 13:35:22 +00:00
|
|
|
/* Locate the memory region. */
|
2020-08-03 19:06:24 +00:00
|
|
|
const KMemoryRegion *region = KMemoryLayout::Find(phys_addr);
|
|
|
|
R_UNLESS(region != nullptr, svc::ResultInvalidAddress());
|
2020-02-19 13:35:22 +00:00
|
|
|
|
2020-08-03 19:06:24 +00:00
|
|
|
MESOSPHERE_ASSERT(region->Contains(GetInteger(phys_addr)));
|
2020-02-19 13:35:22 +00:00
|
|
|
|
|
|
|
/* Ensure that the region is mappable. */
|
|
|
|
const bool is_rw = perm == KMemoryPermission_UserReadWrite;
|
2020-08-03 19:06:24 +00:00
|
|
|
while (true) {
|
|
|
|
/* Check that the region exists. */
|
|
|
|
R_UNLESS(region != nullptr, svc::ResultInvalidAddress());
|
|
|
|
|
2020-02-19 13:35:22 +00:00
|
|
|
/* Check the region attributes. */
|
2020-08-03 19:06:24 +00:00
|
|
|
R_UNLESS(!region->IsDerivedFrom(KMemoryRegionType_Dram), svc::ResultInvalidAddress());
|
|
|
|
R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_UserReadOnly) || !is_rw, svc::ResultInvalidAddress());
|
|
|
|
R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_NoUserMap), svc::ResultInvalidAddress());
|
2020-02-19 13:35:22 +00:00
|
|
|
|
|
|
|
/* Check if we're done. */
|
2020-08-03 19:06:24 +00:00
|
|
|
if (GetInteger(last) <= region->GetLastAddress()) {
|
2020-02-19 13:35:22 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Advance. */
|
2020-08-03 19:06:24 +00:00
|
|
|
region = region->GetNext();
|
|
|
|
};
|
2020-02-19 13:35:22 +00:00
|
|
|
|
|
|
|
/* Select an address to map at. */
|
|
|
|
KProcessAddress addr = Null<KProcessAddress>;
|
|
|
|
for (s32 block_type = KPageTable::GetMaxBlockType(); block_type >= 0; block_type--) {
|
|
|
|
const size_t alignment = KPageTable::GetBlockSize(static_cast<KPageTable::BlockType>(block_type));
|
2023-10-11 14:59:37 +00:00
|
|
|
|
|
|
|
const KPhysicalAddress aligned_phys = util::AlignUp(GetInteger(phys_addr), alignment) + alignment - 1;
|
|
|
|
if (aligned_phys <= phys_addr) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
const KPhysicalAddress last_aligned_paddr = util::AlignDown(GetInteger(last) + 1, alignment) - 1;
|
|
|
|
if (!(last_aligned_paddr <= last && aligned_phys <= last_aligned_paddr)) {
|
2020-02-19 13:35:22 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0, this->GetNumGuardPages());
|
|
|
|
if (addr != Null<KProcessAddress>) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
R_UNLESS(addr != Null<KProcessAddress>, svc::ResultOutOfMemory());
|
|
|
|
|
|
|
|
/* Check that we can map IO here. */
|
2023-10-11 12:00:23 +00:00
|
|
|
MESOSPHERE_ASSERT(this->CanContain(addr, size, state));
|
2020-02-19 13:35:22 +00:00
|
|
|
MESOSPHERE_R_ASSERT(this->CheckMemoryState(addr, size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None));
|
|
|
|
|
2021-04-07 16:25:19 +00:00
|
|
|
/* Perform mapping operation. */
|
2023-10-11 12:00:23 +00:00
|
|
|
const KPageProperties properties = { perm, state == KMemoryState_IoRegister, false, DisableMergeAttribute_DisableHead };
|
2021-04-07 16:25:19 +00:00
|
|
|
R_TRY(this->Operate(page_list, addr, num_pages, phys_addr, true, properties, OperationType_Map, false));
|
|
|
|
|
|
|
|
/* Set the output address. */
|
|
|
|
*out = addr;
|
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2021-04-07 16:25:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Result KPageTableBase::MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
|
|
|
|
/* Lock the table. */
|
|
|
|
KScopedLightLock lk(m_general_lock);
|
|
|
|
|
2020-02-19 13:35:22 +00:00
|
|
|
/* Create an update allocator. */
|
2021-04-07 15:46:06 +00:00
|
|
|
Result allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager);
|
|
|
|
R_TRY(allocator_result);
|
2020-02-19 13:35:22 +00:00
|
|
|
|
|
|
|
/* We're going to perform an update, so create a helper. */
|
|
|
|
KScopedPageTableUpdater updater(this);
|
|
|
|
|
2021-04-07 16:25:19 +00:00
|
|
|
/* Map the io memory. */
|
|
|
|
KProcessAddress addr;
|
2023-10-11 12:00:23 +00:00
|
|
|
R_TRY(this->MapIoImpl(std::addressof(addr), updater.GetPageList(), phys_addr, size, KMemoryState_IoRegister, perm));
|
2020-02-19 13:35:22 +00:00
|
|
|
|
|
|
|
/* Update the blocks. */
|
2023-10-11 12:00:23 +00:00
|
|
|
m_memory_block_manager.Update(std::addressof(allocator), addr, size / PageSize, KMemoryState_IoRegister, perm, KMemoryAttribute_Locked, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
|
2020-02-19 13:35:22 +00:00
|
|
|
|
|
|
|
/* We successfully mapped the pages. */
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-02-19 12:55:00 +00:00
|
|
|
}
|
2021-09-18 20:26:21 +00:00
|
|
|
|
|
|
|
Result KPageTableBase::MapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size, ams::svc::MemoryMapping mapping, ams::svc::MemoryPermission svc_perm) {
|
|
|
|
const size_t num_pages = size / PageSize;
|
|
|
|
|
|
|
|
/* Lock the table. */
|
|
|
|
KScopedLightLock lk(m_general_lock);
|
|
|
|
|
|
|
|
/* Validate the memory state. */
|
|
|
|
size_t num_allocator_blocks;
|
|
|
|
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), dst_address, size, KMemoryState_All, KMemoryState_None, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None));
|
|
|
|
|
|
|
|
/* Create an update allocator. */
|
|
|
|
Result allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
|
|
|
|
R_TRY(allocator_result);
|
|
|
|
|
|
|
|
/* We're going to perform an update, so create a helper. */
|
|
|
|
KScopedPageTableUpdater updater(this);
|
|
|
|
|
|
|
|
/* Perform mapping operation. */
|
|
|
|
const KMemoryPermission perm = ConvertToKMemoryPermission(svc_perm);
|
|
|
|
const KPageProperties properties = { perm, mapping == ams::svc::MemoryMapping_IoRegister, mapping == ams::svc::MemoryMapping_Uncached, DisableMergeAttribute_DisableHead };
|
|
|
|
R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, phys_addr, true, properties, OperationType_Map, false));
|
|
|
|
|
|
|
|
/* Update the blocks. */
|
2023-10-11 12:00:23 +00:00
|
|
|
const auto state = mapping == ams::svc::MemoryMapping_Memory ? KMemoryState_IoMemory : KMemoryState_IoRegister;
|
|
|
|
m_memory_block_manager.Update(std::addressof(allocator), dst_address, num_pages, state, perm, KMemoryAttribute_Locked, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
|
2021-09-18 20:26:21 +00:00
|
|
|
|
|
|
|
/* We successfully mapped the pages. */
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2021-09-18 20:26:21 +00:00
|
|
|
}
|
|
|
|
|
2023-02-21 15:02:59 +00:00
|
|
|
Result KPageTableBase::UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size, ams::svc::MemoryMapping mapping) {
|
2021-09-18 20:26:21 +00:00
|
|
|
const size_t num_pages = size / PageSize;
|
|
|
|
|
|
|
|
/* Lock the table. */
|
|
|
|
KScopedLightLock lk(m_general_lock);
|
|
|
|
|
|
|
|
/* Validate the memory state. */
|
2023-02-21 15:02:59 +00:00
|
|
|
KMemoryState old_state;
|
|
|
|
KMemoryPermission old_perm;
|
|
|
|
KMemoryAttribute old_attr;
|
2021-09-18 20:26:21 +00:00
|
|
|
size_t num_allocator_blocks;
|
2023-02-21 15:02:59 +00:00
|
|
|
R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), std::addressof(old_attr), std::addressof(num_allocator_blocks),
|
|
|
|
dst_address, size,
|
2023-10-11 12:00:23 +00:00
|
|
|
KMemoryState_All, mapping == ams::svc::MemoryMapping_Memory ? KMemoryState_IoMemory : KMemoryState_IoRegister,
|
2023-02-21 15:02:59 +00:00
|
|
|
KMemoryPermission_None, KMemoryPermission_None,
|
|
|
|
KMemoryAttribute_All, KMemoryAttribute_Locked));
|
2021-09-18 20:26:21 +00:00
|
|
|
|
|
|
|
/* Validate that the region being unmapped corresponds to the physical range described. */
|
|
|
|
{
|
|
|
|
/* Get the impl. */
|
|
|
|
auto &impl = this->GetImpl();
|
|
|
|
|
|
|
|
/* Begin traversal. */
|
|
|
|
TraversalContext context;
|
|
|
|
TraversalEntry next_entry;
|
|
|
|
MESOSPHERE_ABORT_UNLESS(impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_address));
|
|
|
|
|
|
|
|
/* Check that the physical region matches. */
|
|
|
|
R_UNLESS(next_entry.phys_addr == phys_addr, svc::ResultInvalidMemoryRegion());
|
|
|
|
|
|
|
|
/* Iterate. */
|
|
|
|
for (size_t checked_size = next_entry.block_size - (GetInteger(phys_addr) & (next_entry.block_size - 1)); checked_size < size; checked_size += next_entry.block_size) {
|
|
|
|
/* Continue the traversal. */
|
|
|
|
MESOSPHERE_ABORT_UNLESS(impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)));
|
|
|
|
|
|
|
|
/* Check that the physical region matches. */
|
|
|
|
R_UNLESS(next_entry.phys_addr == phys_addr + checked_size, svc::ResultInvalidMemoryRegion());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Create an update allocator. */
|
|
|
|
Result allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
|
|
|
|
R_TRY(allocator_result);
|
|
|
|
|
|
|
|
/* We're going to perform an update, so create a helper. */
|
|
|
|
KScopedPageTableUpdater updater(this);
|
|
|
|
|
2023-02-21 15:02:59 +00:00
|
|
|
/* If the region being unmapped is Memory, synchronize. */
|
|
|
|
if (mapping == ams::svc::MemoryMapping_Memory) {
|
|
|
|
/* Change the region to be uncached. */
|
|
|
|
const KPageProperties properties = { old_perm, false, true, DisableMergeAttribute_None };
|
|
|
|
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), dst_address, num_pages, Null<KPhysicalAddress>, false, properties, OperationType_ChangePermissionsAndRefresh, false));
|
|
|
|
|
|
|
|
/* Temporarily unlock ourselves, so that other operations can occur while we flush the region. */
|
|
|
|
m_general_lock.Unlock();
|
|
|
|
ON_SCOPE_EXIT { m_general_lock.Lock(); };
|
|
|
|
|
|
|
|
/* Flush the region. */
|
|
|
|
MESOSPHERE_R_ABORT_UNLESS(cpu::FlushDataCache(GetVoidPointer(dst_address), size));
|
|
|
|
}
|
|
|
|
|
2021-09-18 20:26:21 +00:00
|
|
|
/* Perform the unmap. */
|
|
|
|
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
|
2023-02-21 15:02:59 +00:00
|
|
|
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), dst_address, num_pages, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, false));
|
2021-09-18 20:26:21 +00:00
|
|
|
|
|
|
|
/* Update the blocks. */
|
|
|
|
m_memory_block_manager.Update(std::addressof(allocator), dst_address, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal);
|
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2021-09-18 20:26:21 +00:00
|
|
|
}
|
2020-02-19 12:55:00 +00:00
|
|
|
|
|
|
|
Result KPageTableBase::MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
|
2020-07-31 07:29:00 +00:00
|
|
|
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), PageSize));
|
|
|
|
MESOSPHERE_ASSERT(util::IsAligned(size, PageSize));
|
|
|
|
MESOSPHERE_ASSERT(size > 0);
|
|
|
|
R_UNLESS(phys_addr < phys_addr + size, svc::ResultInvalidAddress());
|
|
|
|
const size_t num_pages = size / PageSize;
|
|
|
|
const KPhysicalAddress last = phys_addr + size - 1;
|
|
|
|
|
|
|
|
/* Get region extents. */
|
|
|
|
const KProcessAddress region_start = this->GetRegionAddress(KMemoryState_Static);
|
|
|
|
const size_t region_size = this->GetRegionSize(KMemoryState_Static);
|
|
|
|
const size_t region_num_pages = region_size / PageSize;
|
|
|
|
|
|
|
|
/* Locate the memory region. */
|
2020-08-03 19:06:24 +00:00
|
|
|
const KMemoryRegion *region = KMemoryLayout::Find(phys_addr);
|
|
|
|
R_UNLESS(region != nullptr, svc::ResultInvalidAddress());
|
2020-07-31 07:29:00 +00:00
|
|
|
|
2020-08-03 19:06:24 +00:00
|
|
|
MESOSPHERE_ASSERT(region->Contains(GetInteger(phys_addr)));
|
|
|
|
R_UNLESS(GetInteger(last) <= region->GetLastAddress(), svc::ResultInvalidAddress());
|
2020-07-31 07:29:00 +00:00
|
|
|
|
|
|
|
/* Check the region attributes. */
|
|
|
|
const bool is_rw = perm == KMemoryPermission_UserReadWrite;
|
2020-08-03 19:06:24 +00:00
|
|
|
R_UNLESS( region->IsDerivedFrom(KMemoryRegionType_Dram), svc::ResultInvalidAddress());
|
|
|
|
R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_NoUserMap), svc::ResultInvalidAddress());
|
|
|
|
R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_UserReadOnly) || !is_rw, svc::ResultInvalidAddress());
|
2020-07-31 07:29:00 +00:00
|
|
|
|
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-07-31 07:29:00 +00:00
|
|
|
|
|
|
|
/* Select an address to map at. */
|
|
|
|
KProcessAddress addr = Null<KProcessAddress>;
|
|
|
|
for (s32 block_type = KPageTable::GetMaxBlockType(); block_type >= 0; block_type--) {
|
|
|
|
const size_t alignment = KPageTable::GetBlockSize(static_cast<KPageTable::BlockType>(block_type));
|
2023-10-11 14:59:37 +00:00
|
|
|
|
|
|
|
const KPhysicalAddress aligned_phys = util::AlignUp(GetInteger(phys_addr), alignment) + alignment - 1;
|
|
|
|
if (aligned_phys <= phys_addr) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
const KPhysicalAddress last_aligned_paddr = util::AlignDown(GetInteger(last) + 1, alignment) - 1;
|
|
|
|
if (!(last_aligned_paddr <= last && aligned_phys <= last_aligned_paddr)) {
|
2020-07-31 07:29:00 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0, this->GetNumGuardPages());
|
|
|
|
if (addr != Null<KProcessAddress>) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
R_UNLESS(addr != Null<KProcessAddress>, svc::ResultOutOfMemory());
|
|
|
|
|
|
|
|
/* Check that we can map static here. */
|
|
|
|
MESOSPHERE_ASSERT(this->CanContain(addr, size, KMemoryState_Static));
|
|
|
|
MESOSPHERE_R_ASSERT(this->CheckMemoryState(addr, size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None));
|
|
|
|
|
|
|
|
/* Create an update allocator. */
|
2021-04-07 15:46:06 +00:00
|
|
|
Result allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager);
|
|
|
|
R_TRY(allocator_result);
|
2020-07-31 07:29:00 +00:00
|
|
|
|
|
|
|
/* We're going to perform an update, so create a helper. */
|
|
|
|
KScopedPageTableUpdater updater(this);
|
|
|
|
|
|
|
|
/* Perform mapping operation. */
|
2020-12-01 11:33:46 +00:00
|
|
|
const KPageProperties properties = { perm, false, false, DisableMergeAttribute_DisableHead };
|
2020-07-31 07:29:00 +00:00
|
|
|
R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, phys_addr, true, properties, OperationType_Map, false));
|
|
|
|
|
|
|
|
/* Update the blocks. */
|
2021-04-07 16:25:19 +00:00
|
|
|
m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, KMemoryState_Static, perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
|
2020-07-31 07:29:00 +00:00
|
|
|
|
|
|
|
/* We successfully mapped the pages. */
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-02-19 12:55:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Result KPageTableBase::MapRegion(KMemoryRegionType region_type, KMemoryPermission perm) {
|
2020-07-31 07:29:00 +00:00
|
|
|
/* Get the memory region. */
|
2020-08-03 19:06:24 +00:00
|
|
|
const KMemoryRegion *region = KMemoryLayout::GetPhysicalMemoryRegionTree().FindFirstDerived(region_type);
|
|
|
|
R_UNLESS(region != nullptr, svc::ResultOutOfRange());
|
2020-07-31 07:29:00 +00:00
|
|
|
|
2020-12-02 01:14:23 +00:00
|
|
|
/* Check that the region is valid. */
|
|
|
|
MESOSPHERE_ABORT_UNLESS(region->GetEndAddress() != 0);
|
|
|
|
|
2020-07-31 07:29:00 +00:00
|
|
|
/* Map the region. */
|
2020-08-03 19:06:24 +00:00
|
|
|
R_TRY_CATCH(this->MapStatic(region->GetAddress(), region->GetSize(), perm)) {
|
2020-07-31 07:29:00 +00:00
|
|
|
R_CONVERT(svc::ResultInvalidAddress, svc::ResultOutOfRange())
|
|
|
|
} R_END_TRY_CATCH;
|
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-02-19 12:55:00 +00:00
|
|
|
}
|
|
|
|
|
2020-02-14 01:38:56 +00:00
|
|
|
Result KPageTableBase::MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
|
|
|
|
MESOSPHERE_ASSERT(util::IsAligned(alignment, PageSize) && alignment >= PageSize);
|
|
|
|
|
|
|
|
/* Ensure this is a valid map request. */
|
2020-02-19 09:22:27 +00:00
|
|
|
R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state), svc::ResultInvalidCurrentMemory());
|
2020-02-14 01:38:56 +00:00
|
|
|
R_UNLESS(num_pages < region_num_pages, svc::ResultOutOfMemory());
|
|
|
|
|
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-02-14 01:38:56 +00:00
|
|
|
|
|
|
|
/* Find a random address to map at. */
|
|
|
|
KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0, this->GetNumGuardPages());
|
|
|
|
R_UNLESS(addr != Null<KProcessAddress>, svc::ResultOutOfMemory());
|
|
|
|
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(addr), alignment));
|
2020-02-19 09:22:27 +00:00
|
|
|
MESOSPHERE_ASSERT(this->CanContain(addr, num_pages * PageSize, state));
|
2020-02-17 10:49:21 +00:00
|
|
|
MESOSPHERE_R_ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None));
|
2020-02-14 01:38:56 +00:00
|
|
|
|
|
|
|
/* Create an update allocator. */
|
2021-04-07 15:46:06 +00:00
|
|
|
Result allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager);
|
|
|
|
R_TRY(allocator_result);
|
2020-02-14 01:38:56 +00:00
|
|
|
|
|
|
|
/* We're going to perform an update, so create a helper. */
|
|
|
|
KScopedPageTableUpdater updater(this);
|
|
|
|
|
|
|
|
/* Perform mapping operation. */
|
|
|
|
if (is_pa_valid) {
|
2020-12-01 11:33:46 +00:00
|
|
|
const KPageProperties properties = { perm, false, false, DisableMergeAttribute_DisableHead };
|
2020-02-14 01:38:56 +00:00
|
|
|
R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, phys_addr, true, properties, OperationType_Map, false));
|
|
|
|
} else {
|
2020-12-01 11:33:46 +00:00
|
|
|
R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), addr, num_pages, perm));
|
2020-02-14 01:38:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Update the blocks. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
|
2020-02-14 01:38:56 +00:00
|
|
|
|
|
|
|
/* We successfully mapped the pages. */
|
|
|
|
*out_addr = addr;
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-22 05:13:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Result KPageTableBase::MapPages(KProcessAddress address, size_t num_pages, KMemoryState state, KMemoryPermission perm) {
|
|
|
|
/* Check that the map is in range. */
|
|
|
|
const size_t size = num_pages * PageSize;
|
|
|
|
R_UNLESS(this->CanContain(address, size, state), svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-07-22 05:13:16 +00:00
|
|
|
|
|
|
|
/* Check the memory state. */
|
2020-12-01 12:24:43 +00:00
|
|
|
size_t num_allocator_blocks;
|
|
|
|
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None));
|
2020-07-22 05:13:16 +00:00
|
|
|
|
|
|
|
/* Create an update allocator. */
|
2021-04-07 15:46:06 +00:00
|
|
|
Result allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
|
|
|
|
R_TRY(allocator_result);
|
2020-07-22 05:13:16 +00:00
|
|
|
|
|
|
|
/* We're going to perform an update, so create a helper. */
|
|
|
|
KScopedPageTableUpdater updater(this);
|
|
|
|
|
|
|
|
/* Map the pages. */
|
2020-12-01 11:33:46 +00:00
|
|
|
R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), address, num_pages, perm));
|
2020-07-22 05:13:16 +00:00
|
|
|
|
|
|
|
/* Update the blocks. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
|
2020-07-22 05:13:16 +00:00
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-02-14 01:38:56 +00:00
|
|
|
}
|
2020-02-17 10:49:21 +00:00
|
|
|
|
|
|
|
Result KPageTableBase::UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) {
|
2020-07-21 11:58:54 +00:00
|
|
|
/* Check that the unmap is in range. */
|
|
|
|
const size_t size = num_pages * PageSize;
|
|
|
|
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-07-21 11:58:54 +00:00
|
|
|
|
|
|
|
/* Check the memory state. */
|
2020-12-01 12:24:43 +00:00
|
|
|
size_t num_allocator_blocks;
|
|
|
|
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, KMemoryState_All, state, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_All, KMemoryAttribute_None));
|
2020-07-21 11:58:54 +00:00
|
|
|
|
|
|
|
/* Create an update allocator. */
|
2021-04-07 15:46:06 +00:00
|
|
|
Result allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
|
|
|
|
R_TRY(allocator_result);
|
2020-07-21 11:58:54 +00:00
|
|
|
|
|
|
|
/* We're going to perform an update, so create a helper. */
|
|
|
|
KScopedPageTableUpdater updater(this);
|
|
|
|
|
|
|
|
/* Perform the unmap. */
|
2020-12-01 11:33:46 +00:00
|
|
|
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
|
2020-07-21 11:58:54 +00:00
|
|
|
R_TRY(this->Operate(updater.GetPageList(), address, num_pages, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, false));
|
|
|
|
|
|
|
|
/* Update the blocks. */
|
2021-04-07 16:25:19 +00:00
|
|
|
m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal);
|
2020-07-21 11:58:54 +00:00
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-02-17 10:49:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Result KPageTableBase::MapPageGroup(KProcessAddress *out_addr, const KPageGroup &pg, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
|
2020-02-18 09:04:44 +00:00
|
|
|
MESOSPHERE_ASSERT(!this->IsLockedByCurrentThread());
|
|
|
|
|
2020-02-17 10:49:21 +00:00
|
|
|
/* Ensure this is a valid map request. */
|
|
|
|
const size_t num_pages = pg.GetNumPages();
|
2020-02-19 09:22:27 +00:00
|
|
|
R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state), svc::ResultInvalidCurrentMemory());
|
|
|
|
R_UNLESS(num_pages < region_num_pages, svc::ResultOutOfMemory());
|
2020-02-17 10:49:21 +00:00
|
|
|
|
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-02-17 10:49:21 +00:00
|
|
|
|
|
|
|
/* Find a random address to map at. */
|
|
|
|
KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, PageSize, 0, this->GetNumGuardPages());
|
|
|
|
R_UNLESS(addr != Null<KProcessAddress>, svc::ResultOutOfMemory());
|
2020-02-19 09:22:27 +00:00
|
|
|
MESOSPHERE_ASSERT(this->CanContain(addr, num_pages * PageSize, state));
|
2020-02-17 10:49:21 +00:00
|
|
|
MESOSPHERE_R_ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None));
|
|
|
|
|
|
|
|
/* Create an update allocator. */
|
2021-04-07 15:46:06 +00:00
|
|
|
Result allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager);
|
|
|
|
R_TRY(allocator_result);
|
2020-02-17 10:49:21 +00:00
|
|
|
|
|
|
|
/* We're going to perform an update, so create a helper. */
|
|
|
|
KScopedPageTableUpdater updater(this);
|
|
|
|
|
|
|
|
/* Perform mapping operation. */
|
2023-10-11 12:00:23 +00:00
|
|
|
const KPageProperties properties = { perm, false, false, DisableMergeAttribute_DisableHead };
|
2020-02-17 10:49:21 +00:00
|
|
|
R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
|
|
|
|
|
|
|
|
/* Update the blocks. */
|
2021-04-07 16:25:19 +00:00
|
|
|
m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
|
2020-02-17 10:49:21 +00:00
|
|
|
|
|
|
|
/* We successfully mapped the pages. */
|
|
|
|
*out_addr = addr;
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-02-17 10:49:21 +00:00
|
|
|
}
|
|
|
|
|
2020-02-19 09:22:27 +00:00
|
|
|
Result KPageTableBase::MapPageGroup(KProcessAddress addr, const KPageGroup &pg, KMemoryState state, KMemoryPermission perm) {
|
|
|
|
MESOSPHERE_ASSERT(!this->IsLockedByCurrentThread());
|
|
|
|
|
|
|
|
/* Ensure this is a valid map request. */
|
|
|
|
const size_t num_pages = pg.GetNumPages();
|
|
|
|
const size_t size = num_pages * PageSize;
|
|
|
|
R_UNLESS(this->CanContain(addr, size, state), svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-02-19 09:22:27 +00:00
|
|
|
|
|
|
|
/* Check if state allows us to map. */
|
2020-12-01 12:24:43 +00:00
|
|
|
size_t num_allocator_blocks;
|
|
|
|
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), addr, size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None));
|
2020-02-19 09:22:27 +00:00
|
|
|
|
|
|
|
/* Create an update allocator. */
|
2021-04-07 15:46:06 +00:00
|
|
|
Result allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
|
|
|
|
R_TRY(allocator_result);
|
2020-02-19 09:22:27 +00:00
|
|
|
|
|
|
|
/* We're going to perform an update, so create a helper. */
|
|
|
|
KScopedPageTableUpdater updater(this);
|
|
|
|
|
|
|
|
/* Perform mapping operation. */
|
2023-10-11 12:00:23 +00:00
|
|
|
const KPageProperties properties = { perm, false, false, DisableMergeAttribute_DisableHead };
|
2020-02-19 09:22:27 +00:00
|
|
|
R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
|
|
|
|
|
|
|
|
/* Update the blocks. */
|
2021-04-07 16:25:19 +00:00
|
|
|
m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
|
2020-02-19 09:22:27 +00:00
|
|
|
|
|
|
|
/* We successfully mapped the pages. */
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-02-19 09:22:27 +00:00
|
|
|
}
|
|
|
|
|
2020-02-17 10:49:21 +00:00
|
|
|
Result KPageTableBase::UnmapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state) {
|
2020-02-18 09:04:44 +00:00
|
|
|
MESOSPHERE_ASSERT(!this->IsLockedByCurrentThread());
|
|
|
|
|
|
|
|
/* Ensure this is a valid unmap request. */
|
|
|
|
const size_t num_pages = pg.GetNumPages();
|
|
|
|
const size_t size = num_pages * PageSize;
|
2020-02-19 09:22:27 +00:00
|
|
|
R_UNLESS(this->CanContain(address, size, state), svc::ResultInvalidCurrentMemory());
|
2020-02-18 09:04:44 +00:00
|
|
|
|
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-02-18 09:04:44 +00:00
|
|
|
|
|
|
|
/* Check if state allows us to unmap. */
|
2020-12-01 12:24:43 +00:00
|
|
|
size_t num_allocator_blocks;
|
|
|
|
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, KMemoryState_All, state, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_All, KMemoryAttribute_None));
|
2020-02-18 09:04:44 +00:00
|
|
|
|
|
|
|
/* Check that the page group is valid. */
|
2020-02-18 09:44:40 +00:00
|
|
|
R_UNLESS(this->IsValidPageGroup(pg, address, num_pages), svc::ResultInvalidCurrentMemory());
|
2020-02-18 09:04:44 +00:00
|
|
|
|
|
|
|
/* Create an update allocator. */
|
2021-04-07 15:46:06 +00:00
|
|
|
Result allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
|
|
|
|
R_TRY(allocator_result);
|
2020-02-18 09:04:44 +00:00
|
|
|
|
|
|
|
/* We're going to perform an update, so create a helper. */
|
|
|
|
KScopedPageTableUpdater updater(this);
|
|
|
|
|
|
|
|
/* Perform unmapping operation. */
|
2020-12-01 11:33:46 +00:00
|
|
|
const KPageProperties properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
|
2020-02-18 09:04:44 +00:00
|
|
|
R_TRY(this->Operate(updater.GetPageList(), address, num_pages, Null<KPhysicalAddress>, false, properties, OperationType_Unmap, false));
|
|
|
|
|
|
|
|
/* Update the blocks. */
|
2021-04-07 16:25:19 +00:00
|
|
|
m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal);
|
2020-02-18 09:04:44 +00:00
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-02-17 10:49:21 +00:00
|
|
|
}
|
|
|
|
|
2020-03-10 06:23:38 +00:00
|
|
|
Result KPageTableBase::MakeAndOpenPageGroup(KPageGroup *out, KProcessAddress address, size_t num_pages, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) {
|
|
|
|
/* Ensure that the page group isn't null. */
|
2020-07-09 23:32:37 +00:00
|
|
|
MESOSPHERE_ASSERT(out != nullptr);
|
2020-03-10 06:23:38 +00:00
|
|
|
|
|
|
|
/* Make sure that the region we're mapping is valid for the table. */
|
|
|
|
const size_t size = num_pages * PageSize;
|
|
|
|
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-03-10 06:23:38 +00:00
|
|
|
|
|
|
|
/* Check if state allows us to create the group. */
|
|
|
|
R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState_FlagReferenceCounted, state | KMemoryState_FlagReferenceCounted, perm_mask, perm, attr_mask, attr));
|
|
|
|
|
|
|
|
/* Create a new page group for the region. */
|
|
|
|
R_TRY(this->MakePageGroup(*out, address, num_pages));
|
|
|
|
|
|
|
|
/* Open a new reference to the pages in the group. */
|
|
|
|
out->Open();
|
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-03-10 06:23:38 +00:00
|
|
|
}
|
|
|
|
|
2020-07-24 03:07:40 +00:00
|
|
|
Result KPageTableBase::InvalidateProcessDataCache(KProcessAddress address, size_t size) {
|
|
|
|
/* Check that the region is in range. */
|
|
|
|
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-07-24 03:07:40 +00:00
|
|
|
|
|
|
|
/* Check the memory state. */
|
|
|
|
R_TRY(this->CheckMemoryStateContiguous(address, size, KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted, KMemoryPermission_UserReadWrite, KMemoryPermission_UserReadWrite, KMemoryAttribute_Uncached, KMemoryAttribute_None));
|
|
|
|
|
|
|
|
/* Get the impl. */
|
|
|
|
auto &impl = this->GetImpl();
|
|
|
|
|
|
|
|
/* Begin traversal. */
|
|
|
|
TraversalContext context;
|
|
|
|
TraversalEntry next_entry;
|
|
|
|
bool traverse_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), address);
|
|
|
|
R_UNLESS(traverse_valid, svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* Prepare tracking variables. */
|
|
|
|
KPhysicalAddress cur_addr = next_entry.phys_addr;
|
|
|
|
size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
|
|
|
|
size_t tot_size = cur_size;
|
|
|
|
|
|
|
|
/* Iterate. */
|
|
|
|
while (tot_size < size) {
|
|
|
|
/* Continue the traversal. */
|
|
|
|
traverse_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
|
|
|
|
R_UNLESS(traverse_valid, svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
if (next_entry.phys_addr != (cur_addr + cur_size)) {
|
|
|
|
/* Check that the pages are linearly mapped. */
|
|
|
|
R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* Invalidate the block. */
|
|
|
|
if (cur_size > 0) {
|
|
|
|
/* NOTE: Nintendo does not check the result of invalidation. */
|
|
|
|
cpu::InvalidateDataCache(GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), cur_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Advance. */
|
|
|
|
cur_addr = next_entry.phys_addr;
|
|
|
|
cur_size = next_entry.block_size;
|
|
|
|
} else {
|
|
|
|
cur_size += next_entry.block_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
tot_size += next_entry.block_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Ensure we use the right size for the last block. */
|
|
|
|
if (tot_size > size) {
|
|
|
|
cur_size -= (tot_size - size);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check that the last block is linearly mapped. */
|
|
|
|
R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* Invalidate the last block. */
|
|
|
|
if (cur_size > 0) {
|
|
|
|
/* NOTE: Nintendo does not check the result of invalidation. */
|
|
|
|
cpu::InvalidateDataCache(GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), cur_size);
|
|
|
|
}
|
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-24 03:07:40 +00:00
|
|
|
}
|
|
|
|
|
2020-07-30 10:37:40 +00:00
|
|
|
Result KPageTableBase::ReadDebugMemory(void *buffer, KProcessAddress address, size_t size) {
|
|
|
|
/* Lightly validate the region is in range. */
|
|
|
|
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-07-30 10:37:40 +00:00
|
|
|
|
|
|
|
/* Require that the memory either be user readable or debuggable. */
|
|
|
|
const bool can_read = R_SUCCEEDED(this->CheckMemoryStateContiguous(address, size, KMemoryState_None, KMemoryState_None, KMemoryPermission_UserRead, KMemoryPermission_UserRead, KMemoryAttribute_None, KMemoryAttribute_None));
|
|
|
|
if (!can_read) {
|
|
|
|
const bool can_debug = R_SUCCEEDED(this->CheckMemoryStateContiguous(address, size, KMemoryState_FlagCanDebug, KMemoryState_FlagCanDebug, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None));
|
|
|
|
R_UNLESS(can_debug, svc::ResultInvalidCurrentMemory());
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get the impl. */
|
|
|
|
auto &impl = this->GetImpl();
|
|
|
|
|
|
|
|
/* Begin traversal. */
|
|
|
|
TraversalContext context;
|
|
|
|
TraversalEntry next_entry;
|
|
|
|
bool traverse_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), address);
|
|
|
|
R_UNLESS(traverse_valid, svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* Prepare tracking variables. */
|
|
|
|
KPhysicalAddress cur_addr = next_entry.phys_addr;
|
|
|
|
size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
|
|
|
|
size_t tot_size = cur_size;
|
|
|
|
|
2021-11-07 01:19:34 +00:00
|
|
|
auto PerformCopy = [&]() ALWAYS_INLINE_LAMBDA -> Result {
|
2020-07-30 10:37:40 +00:00
|
|
|
/* Ensure the address is linear mapped. */
|
|
|
|
R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* Copy as much aligned data as we can. */
|
|
|
|
if (cur_size >= sizeof(u32)) {
|
|
|
|
const size_t copy_size = util::AlignDown(cur_size, sizeof(u32));
|
2021-04-07 15:50:44 +00:00
|
|
|
const void * copy_src = GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr));
|
|
|
|
cpu::FlushDataCache(copy_src, copy_size);
|
|
|
|
R_UNLESS(UserspaceAccess::CopyMemoryToUserAligned32Bit(buffer, copy_src, copy_size), svc::ResultInvalidPointer());
|
2020-07-30 10:37:40 +00:00
|
|
|
buffer = reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(buffer) + copy_size);
|
|
|
|
cur_addr += copy_size;
|
|
|
|
cur_size -= copy_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Copy remaining data. */
|
|
|
|
if (cur_size > 0) {
|
2021-04-07 15:50:44 +00:00
|
|
|
const void * copy_src = GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr));
|
|
|
|
cpu::FlushDataCache(copy_src, cur_size);
|
|
|
|
R_UNLESS(UserspaceAccess::CopyMemoryToUser(buffer, copy_src, cur_size), svc::ResultInvalidPointer());
|
2020-07-30 10:37:40 +00:00
|
|
|
}
|
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-30 10:37:40 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Iterate. */
|
|
|
|
while (tot_size < size) {
|
|
|
|
/* Continue the traversal. */
|
|
|
|
traverse_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
|
|
|
|
MESOSPHERE_ASSERT(traverse_valid);
|
|
|
|
|
|
|
|
if (next_entry.phys_addr != (cur_addr + cur_size)) {
|
|
|
|
/* Perform copy. */
|
|
|
|
R_TRY(PerformCopy());
|
|
|
|
|
|
|
|
/* Advance. */
|
|
|
|
buffer = reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(buffer) + cur_size);
|
|
|
|
|
|
|
|
cur_addr = next_entry.phys_addr;
|
|
|
|
cur_size = next_entry.block_size;
|
|
|
|
} else {
|
|
|
|
cur_size += next_entry.block_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
tot_size += next_entry.block_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Ensure we use the right size for the last block. */
|
|
|
|
if (tot_size > size) {
|
|
|
|
cur_size -= (tot_size - size);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Perform copy for the last block. */
|
|
|
|
R_TRY(PerformCopy());
|
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-30 10:37:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Result KPageTableBase::WriteDebugMemory(KProcessAddress address, const void *buffer, size_t size) {
|
|
|
|
/* Lightly validate the region is in range. */
|
|
|
|
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-07-30 10:37:40 +00:00
|
|
|
|
|
|
|
/* Require that the memory either be user writable or debuggable. */
|
|
|
|
const bool can_read = R_SUCCEEDED(this->CheckMemoryStateContiguous(address, size, KMemoryState_None, KMemoryState_None, KMemoryPermission_UserReadWrite, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryAttribute_None));
|
|
|
|
if (!can_read) {
|
|
|
|
const bool can_debug = R_SUCCEEDED(this->CheckMemoryStateContiguous(address, size, KMemoryState_FlagCanDebug, KMemoryState_FlagCanDebug, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None));
|
|
|
|
R_UNLESS(can_debug, svc::ResultInvalidCurrentMemory());
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get the impl. */
|
|
|
|
auto &impl = this->GetImpl();
|
|
|
|
|
|
|
|
/* Begin traversal. */
|
|
|
|
TraversalContext context;
|
|
|
|
TraversalEntry next_entry;
|
|
|
|
bool traverse_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), address);
|
|
|
|
R_UNLESS(traverse_valid, svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* Prepare tracking variables. */
|
|
|
|
KPhysicalAddress cur_addr = next_entry.phys_addr;
|
|
|
|
size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
|
|
|
|
size_t tot_size = cur_size;
|
|
|
|
|
2021-11-07 01:19:34 +00:00
|
|
|
auto PerformCopy = [&]() ALWAYS_INLINE_LAMBDA -> Result {
|
2020-07-30 10:37:40 +00:00
|
|
|
/* Ensure the address is linear mapped. */
|
|
|
|
R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* Copy as much aligned data as we can. */
|
|
|
|
if (cur_size >= sizeof(u32)) {
|
|
|
|
const size_t copy_size = util::AlignDown(cur_size, sizeof(u32));
|
|
|
|
R_UNLESS(UserspaceAccess::CopyMemoryFromUserAligned32Bit(GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), buffer, copy_size), svc::ResultInvalidCurrentMemory());
|
2020-08-04 01:43:38 +00:00
|
|
|
cpu::StoreDataCache(GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), copy_size);
|
|
|
|
|
2020-07-30 10:37:40 +00:00
|
|
|
buffer = reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(buffer) + copy_size);
|
|
|
|
cur_addr += copy_size;
|
|
|
|
cur_size -= copy_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Copy remaining data. */
|
|
|
|
if (cur_size > 0) {
|
|
|
|
R_UNLESS(UserspaceAccess::CopyMemoryFromUser(GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), buffer, cur_size), svc::ResultInvalidCurrentMemory());
|
2020-08-04 01:52:51 +00:00
|
|
|
cpu::StoreDataCache(GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), cur_size);
|
2020-07-30 10:37:40 +00:00
|
|
|
}
|
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-30 10:37:40 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Iterate. */
|
|
|
|
while (tot_size < size) {
|
|
|
|
/* Continue the traversal. */
|
|
|
|
traverse_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
|
|
|
|
MESOSPHERE_ASSERT(traverse_valid);
|
|
|
|
|
|
|
|
if (next_entry.phys_addr != (cur_addr + cur_size)) {
|
|
|
|
/* Perform copy. */
|
|
|
|
R_TRY(PerformCopy());
|
|
|
|
|
|
|
|
/* Advance. */
|
|
|
|
buffer = reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(buffer) + cur_size);
|
|
|
|
|
|
|
|
cur_addr = next_entry.phys_addr;
|
|
|
|
cur_size = next_entry.block_size;
|
|
|
|
} else {
|
|
|
|
cur_size += next_entry.block_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
tot_size += next_entry.block_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Ensure we use the right size for the last block. */
|
|
|
|
if (tot_size > size) {
|
|
|
|
cur_size -= (tot_size - size);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Perform copy for the last block. */
|
|
|
|
R_TRY(PerformCopy());
|
|
|
|
|
2020-08-04 01:43:38 +00:00
|
|
|
/* Invalidate the entire instruction cache, as this svc allows modifying executable pages. */
|
|
|
|
cpu::InvalidateEntireInstructionCache();
|
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-30 10:37:40 +00:00
|
|
|
}
|
|
|
|
|
2023-10-11 12:00:23 +00:00
|
|
|
Result KPageTableBase::ReadIoMemoryImpl(void *buffer, KPhysicalAddress phys_addr, size_t size, KMemoryState state) {
|
2021-04-07 16:25:19 +00:00
|
|
|
/* Check pre-conditions. */
|
|
|
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
|
|
|
|
|
|
|
/* Determine the mapping extents. */
|
|
|
|
const KPhysicalAddress map_start = util::AlignDown(GetInteger(phys_addr), PageSize);
|
|
|
|
const KPhysicalAddress map_end = util::AlignUp(GetInteger(phys_addr) + size, PageSize);
|
|
|
|
const size_t map_size = map_end - map_start;
|
|
|
|
|
|
|
|
/* We're going to perform an update, so create a helper. */
|
|
|
|
KScopedPageTableUpdater updater(this);
|
|
|
|
|
|
|
|
/* Temporarily map the io memory. */
|
|
|
|
KProcessAddress io_addr;
|
2023-10-11 12:00:23 +00:00
|
|
|
R_TRY(this->MapIoImpl(std::addressof(io_addr), updater.GetPageList(), map_start, map_size, state, KMemoryPermission_UserRead));
|
2021-04-07 16:25:19 +00:00
|
|
|
|
|
|
|
/* Ensure we unmap the io memory when we're done with it. */
|
|
|
|
ON_SCOPE_EXIT {
|
|
|
|
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
|
|
|
|
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), io_addr, map_size / PageSize, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, true));
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Read the memory. */
|
|
|
|
const KProcessAddress read_addr = io_addr + (GetInteger(phys_addr) & (PageSize - 1));
|
|
|
|
switch ((GetInteger(read_addr) | size) & 3) {
|
|
|
|
case 0:
|
|
|
|
{
|
|
|
|
R_UNLESS(UserspaceAccess::ReadIoMemory32Bit(buffer, GetVoidPointer(read_addr), size), svc::ResultInvalidPointer());
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
{
|
|
|
|
R_UNLESS(UserspaceAccess::ReadIoMemory16Bit(buffer, GetVoidPointer(read_addr), size), svc::ResultInvalidPointer());
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
{
|
|
|
|
R_UNLESS(UserspaceAccess::ReadIoMemory8Bit(buffer, GetVoidPointer(read_addr), size), svc::ResultInvalidPointer());
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2021-04-07 16:25:19 +00:00
|
|
|
}
|
|
|
|
|
2023-10-11 12:00:23 +00:00
|
|
|
Result KPageTableBase::WriteIoMemoryImpl(KPhysicalAddress phys_addr, const void *buffer, size_t size, KMemoryState state) {
|
2021-04-07 16:25:19 +00:00
|
|
|
/* Check pre-conditions. */
|
|
|
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
|
|
|
|
|
|
|
/* Determine the mapping extents. */
|
|
|
|
const KPhysicalAddress map_start = util::AlignDown(GetInteger(phys_addr), PageSize);
|
|
|
|
const KPhysicalAddress map_end = util::AlignUp(GetInteger(phys_addr) + size, PageSize);
|
|
|
|
const size_t map_size = map_end - map_start;
|
|
|
|
|
|
|
|
/* We're going to perform an update, so create a helper. */
|
|
|
|
KScopedPageTableUpdater updater(this);
|
|
|
|
|
|
|
|
/* Temporarily map the io memory. */
|
|
|
|
KProcessAddress io_addr;
|
2023-10-11 12:00:23 +00:00
|
|
|
R_TRY(this->MapIoImpl(std::addressof(io_addr), updater.GetPageList(), map_start, map_size, state, KMemoryPermission_UserReadWrite));
|
2021-04-07 16:25:19 +00:00
|
|
|
|
|
|
|
/* Ensure we unmap the io memory when we're done with it. */
|
|
|
|
ON_SCOPE_EXIT {
|
|
|
|
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
|
|
|
|
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), io_addr, map_size / PageSize, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, true));
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Read the memory. */
|
|
|
|
const KProcessAddress write_addr = io_addr + (GetInteger(phys_addr) & (PageSize - 1));
|
|
|
|
switch ((GetInteger(write_addr) | size) & 3) {
|
|
|
|
case 0:
|
|
|
|
{
|
|
|
|
R_UNLESS(UserspaceAccess::WriteIoMemory32Bit(GetVoidPointer(write_addr), buffer, size), svc::ResultInvalidPointer());
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
{
|
|
|
|
R_UNLESS(UserspaceAccess::WriteIoMemory16Bit(GetVoidPointer(write_addr), buffer, size), svc::ResultInvalidPointer());
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
{
|
|
|
|
R_UNLESS(UserspaceAccess::WriteIoMemory8Bit(GetVoidPointer(write_addr), buffer, size), svc::ResultInvalidPointer());
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2021-04-07 16:25:19 +00:00
|
|
|
}
|
|
|
|
|
2023-10-11 12:00:23 +00:00
|
|
|
Result KPageTableBase::ReadDebugIoMemory(void *buffer, KProcessAddress address, size_t size, KMemoryState state) {
|
2021-04-07 16:25:19 +00:00
|
|
|
/* Lightly validate the range before doing anything else. */
|
|
|
|
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* We need to lock both this table, and the current process's table, so set up some aliases. */
|
|
|
|
KPageTableBase &src_page_table = *this;
|
|
|
|
KPageTableBase &dst_page_table = GetCurrentProcess().GetPageTable().GetBasePageTable();
|
|
|
|
|
2021-04-07 16:57:32 +00:00
|
|
|
/* Acquire the table locks. */
|
|
|
|
KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
|
2021-04-07 16:25:19 +00:00
|
|
|
|
|
|
|
/* Check that the desired range is readable io memory. */
|
2023-10-11 12:00:23 +00:00
|
|
|
R_TRY(this->CheckMemoryStateContiguous(address, size, KMemoryState_All, state, KMemoryPermission_UserRead, KMemoryPermission_UserRead, KMemoryAttribute_None, KMemoryAttribute_None));
|
2021-04-07 16:25:19 +00:00
|
|
|
|
|
|
|
/* Read the memory. */
|
|
|
|
u8 *dst = static_cast<u8 *>(buffer);
|
|
|
|
const KProcessAddress last_address = address + size - 1;
|
|
|
|
while (address <= last_address) {
|
|
|
|
/* Get the current physical address. */
|
|
|
|
KPhysicalAddress phys_addr;
|
2021-04-07 16:57:32 +00:00
|
|
|
MESOSPHERE_ABORT_UNLESS(src_page_table.GetPhysicalAddressLocked(std::addressof(phys_addr), address));
|
2021-04-07 16:25:19 +00:00
|
|
|
|
|
|
|
/* Determine the current read size. */
|
|
|
|
const size_t cur_size = std::min<size_t>(last_address - address + 1, util::AlignDown(GetInteger(address) + PageSize, PageSize) - GetInteger(address));
|
|
|
|
|
|
|
|
/* Read. */
|
2023-10-11 12:00:23 +00:00
|
|
|
R_TRY(dst_page_table.ReadIoMemoryImpl(dst, phys_addr, cur_size, state));
|
2021-04-07 16:25:19 +00:00
|
|
|
|
|
|
|
/* Advance. */
|
|
|
|
address += cur_size;
|
|
|
|
dst += cur_size;
|
|
|
|
}
|
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2021-04-07 16:25:19 +00:00
|
|
|
}
|
|
|
|
|
2023-10-11 12:00:23 +00:00
|
|
|
Result KPageTableBase::WriteDebugIoMemory(KProcessAddress address, const void *buffer, size_t size, KMemoryState state) {
|
2021-04-07 16:25:19 +00:00
|
|
|
/* Lightly validate the range before doing anything else. */
|
|
|
|
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* We need to lock both this table, and the current process's table, so set up some aliases. */
|
|
|
|
KPageTableBase &src_page_table = *this;
|
|
|
|
KPageTableBase &dst_page_table = GetCurrentProcess().GetPageTable().GetBasePageTable();
|
|
|
|
|
2021-04-07 16:57:32 +00:00
|
|
|
/* Acquire the table locks. */
|
|
|
|
KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
|
2021-04-07 16:25:19 +00:00
|
|
|
|
|
|
|
/* Check that the desired range is writable io memory. */
|
2023-10-11 12:00:23 +00:00
|
|
|
R_TRY(this->CheckMemoryStateContiguous(address, size, KMemoryState_All, state, KMemoryPermission_UserReadWrite, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryAttribute_None));
|
2021-04-07 16:25:19 +00:00
|
|
|
|
|
|
|
/* Read the memory. */
|
|
|
|
const u8 *src = static_cast<const u8 *>(buffer);
|
|
|
|
const KProcessAddress last_address = address + size - 1;
|
|
|
|
while (address <= last_address) {
|
|
|
|
/* Get the current physical address. */
|
|
|
|
KPhysicalAddress phys_addr;
|
2021-04-07 16:57:32 +00:00
|
|
|
MESOSPHERE_ABORT_UNLESS(src_page_table.GetPhysicalAddressLocked(std::addressof(phys_addr), address));
|
2021-04-07 16:25:19 +00:00
|
|
|
|
|
|
|
/* Determine the current read size. */
|
|
|
|
const size_t cur_size = std::min<size_t>(last_address - address + 1, util::AlignDown(GetInteger(address) + PageSize, PageSize) - GetInteger(address));
|
|
|
|
|
|
|
|
/* Read. */
|
2023-10-11 12:00:23 +00:00
|
|
|
R_TRY(dst_page_table.WriteIoMemoryImpl(phys_addr, src, cur_size, state));
|
2021-04-07 16:25:19 +00:00
|
|
|
|
|
|
|
/* Advance. */
|
|
|
|
address += cur_size;
|
|
|
|
src += cur_size;
|
|
|
|
}
|
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2021-04-07 16:25:19 +00:00
|
|
|
}
|
|
|
|
|
2022-10-12 05:22:11 +00:00
|
|
|
Result KPageTableBase::LockForMapDeviceAddressSpace(bool *out_is_io, KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned, bool check_heap) {
|
2020-07-15 01:46:25 +00:00
|
|
|
/* Lightly validate the range before doing anything else. */
|
|
|
|
const size_t num_pages = size / PageSize;
|
|
|
|
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-07-15 01:46:25 +00:00
|
|
|
|
|
|
|
/* Check the memory state. */
|
2022-10-12 05:22:11 +00:00
|
|
|
const u32 test_state = (is_aligned ? KMemoryState_FlagCanAlignedDeviceMap : KMemoryState_FlagCanDeviceMap) | (check_heap ? KMemoryState_FlagReferenceCounted : KMemoryState_None);
|
2020-12-01 12:24:43 +00:00
|
|
|
size_t num_allocator_blocks;
|
2022-10-12 05:22:11 +00:00
|
|
|
KMemoryState old_state;
|
|
|
|
R_TRY(this->CheckMemoryState(std::addressof(old_state), nullptr, nullptr, std::addressof(num_allocator_blocks), address, size, test_state, test_state, perm, perm, KMemoryAttribute_IpcLocked | KMemoryAttribute_Locked, KMemoryAttribute_None, KMemoryAttribute_DeviceShared));
|
2020-07-15 01:46:25 +00:00
|
|
|
|
|
|
|
/* Create an update allocator. */
|
2021-04-07 15:46:06 +00:00
|
|
|
Result allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
|
|
|
|
R_TRY(allocator_result);
|
2020-07-15 01:46:25 +00:00
|
|
|
|
|
|
|
/* Update the memory blocks. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, &KMemoryBlock::ShareToDevice, KMemoryPermission_None);
|
2020-07-15 01:46:25 +00:00
|
|
|
|
2022-10-12 05:22:11 +00:00
|
|
|
/* Set whether the locked memory was io. */
|
2023-10-11 12:00:23 +00:00
|
|
|
*out_is_io = static_cast<ams::svc::MemoryState>(old_state & KMemoryState_Mask) == ams::svc::MemoryState_Io;
|
2022-10-12 05:22:11 +00:00
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-15 01:46:25 +00:00
|
|
|
}
|
|
|
|
|
2022-10-12 05:22:11 +00:00
|
|
|
Result KPageTableBase::LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, bool check_heap) {
|
2020-07-15 01:46:25 +00:00
|
|
|
/* Lightly validate the range before doing anything else. */
|
|
|
|
const size_t num_pages = size / PageSize;
|
|
|
|
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-07-15 01:46:25 +00:00
|
|
|
|
|
|
|
/* Check the memory state. */
|
2022-10-12 05:22:11 +00:00
|
|
|
const u32 test_state = KMemoryState_FlagCanDeviceMap | (check_heap ? KMemoryState_FlagReferenceCounted : KMemoryState_None);
|
2020-12-01 12:24:43 +00:00
|
|
|
size_t num_allocator_blocks;
|
|
|
|
R_TRY(this->CheckMemoryStateContiguous(std::addressof(num_allocator_blocks),
|
|
|
|
address, size,
|
2022-10-12 05:22:11 +00:00
|
|
|
test_state, test_state,
|
2020-07-15 01:46:25 +00:00
|
|
|
KMemoryPermission_None, KMemoryPermission_None,
|
2020-12-01 12:33:46 +00:00
|
|
|
KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked, KMemoryAttribute_DeviceShared));
|
2020-07-15 01:46:25 +00:00
|
|
|
|
|
|
|
/* Create an update allocator. */
|
2021-04-07 15:46:06 +00:00
|
|
|
Result allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
|
|
|
|
R_TRY(allocator_result);
|
2020-07-15 01:46:25 +00:00
|
|
|
|
|
|
|
/* Update the memory blocks. */
|
2021-04-08 00:07:01 +00:00
|
|
|
const KMemoryBlockManager::MemoryBlockLockFunction lock_func = m_enable_device_address_space_merge ? &KMemoryBlock::UpdateDeviceDisableMergeStateForShare : &KMemoryBlock::UpdateDeviceDisableMergeStateForShareRight;
|
|
|
|
m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, lock_func, KMemoryPermission_None);
|
2020-07-15 01:46:25 +00:00
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-15 01:46:25 +00:00
|
|
|
}
|
|
|
|
|
2021-04-08 00:07:01 +00:00
|
|
|
Result KPageTableBase::UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) {
|
2020-12-01 14:53:22 +00:00
|
|
|
/* Lightly validate the range before doing anything else. */
|
|
|
|
const size_t num_pages = size / PageSize;
|
|
|
|
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-12-01 14:53:22 +00:00
|
|
|
|
|
|
|
/* Check the memory state. */
|
|
|
|
size_t num_allocator_blocks;
|
|
|
|
R_TRY(this->CheckMemoryStateContiguous(std::addressof(num_allocator_blocks),
|
|
|
|
address, size,
|
2021-04-08 00:07:01 +00:00
|
|
|
KMemoryState_FlagCanDeviceMap, KMemoryState_FlagCanDeviceMap,
|
2020-12-01 14:53:22 +00:00
|
|
|
KMemoryPermission_None, KMemoryPermission_None,
|
|
|
|
KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked, KMemoryAttribute_DeviceShared));
|
|
|
|
|
|
|
|
/* Create an update allocator. */
|
2021-04-07 15:46:06 +00:00
|
|
|
Result allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
|
|
|
|
R_TRY(allocator_result);
|
2020-12-01 14:53:22 +00:00
|
|
|
|
|
|
|
/* Update the memory blocks. */
|
2021-04-08 00:07:01 +00:00
|
|
|
m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, &KMemoryBlock::UnshareToDevice, KMemoryPermission_None);
|
2020-12-01 14:53:22 +00:00
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-12-01 14:53:22 +00:00
|
|
|
}
|
|
|
|
|
2021-09-18 18:28:39 +00:00
|
|
|
Result KPageTableBase::UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size) {
|
2020-12-01 14:53:22 +00:00
|
|
|
/* Lightly validate the range before doing anything else. */
|
|
|
|
const size_t num_pages = size / PageSize;
|
|
|
|
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-12-01 14:53:22 +00:00
|
|
|
|
|
|
|
/* Check memory state. */
|
2021-09-18 18:28:39 +00:00
|
|
|
size_t allocator_num_blocks = 0;
|
|
|
|
R_TRY(this->CheckMemoryStateContiguous(std::addressof(allocator_num_blocks),
|
|
|
|
address, size,
|
|
|
|
KMemoryState_FlagCanDeviceMap, KMemoryState_FlagCanDeviceMap,
|
|
|
|
KMemoryPermission_None, KMemoryPermission_None,
|
|
|
|
KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked, KMemoryAttribute_DeviceShared));
|
2020-12-01 14:53:22 +00:00
|
|
|
|
|
|
|
/* Create an update allocator for the region. */
|
2021-04-07 15:46:06 +00:00
|
|
|
Result allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, allocator_num_blocks);
|
|
|
|
R_TRY(allocator_result);
|
2020-12-01 14:53:22 +00:00
|
|
|
|
|
|
|
/* Update the memory blocks. */
|
2021-09-18 18:28:39 +00:00
|
|
|
m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, m_enable_device_address_space_merge ? &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshare : &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshareRight, KMemoryPermission_None);
|
2020-12-01 14:53:22 +00:00
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-12-01 14:53:22 +00:00
|
|
|
}
|
|
|
|
|
2021-04-08 00:07:01 +00:00
|
|
|
Result KPageTableBase::OpenMemoryRangeForMapDeviceAddressSpace(KPageTableBase::MemoryRange *out, KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned) {
|
|
|
|
/* Lock the table. */
|
|
|
|
KScopedLightLock lk(m_general_lock);
|
|
|
|
|
|
|
|
/* Get the range. */
|
2022-10-12 05:22:11 +00:00
|
|
|
const u32 test_state = (is_aligned ? KMemoryState_FlagCanAlignedDeviceMap : KMemoryState_FlagCanDeviceMap);
|
2021-04-08 00:07:01 +00:00
|
|
|
R_TRY(this->GetContiguousMemoryRangeWithState(out,
|
|
|
|
address, size,
|
|
|
|
test_state, test_state,
|
|
|
|
perm, perm,
|
|
|
|
KMemoryAttribute_IpcLocked | KMemoryAttribute_Locked, KMemoryAttribute_None));
|
|
|
|
|
|
|
|
/* We got the range, so open it. */
|
2023-04-30 23:50:53 +00:00
|
|
|
out->Open();
|
2021-04-08 00:07:01 +00:00
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2021-04-08 00:07:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Result KPageTableBase::OpenMemoryRangeForUnmapDeviceAddressSpace(MemoryRange *out, KProcessAddress address, size_t size) {
|
|
|
|
/* Lock the table. */
|
|
|
|
KScopedLightLock lk(m_general_lock);
|
|
|
|
|
|
|
|
/* Get the range. */
|
|
|
|
R_TRY(this->GetContiguousMemoryRangeWithState(out,
|
|
|
|
address, size,
|
|
|
|
KMemoryState_FlagCanDeviceMap, KMemoryState_FlagCanDeviceMap,
|
|
|
|
KMemoryPermission_None, KMemoryPermission_None,
|
|
|
|
KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked, KMemoryAttribute_DeviceShared));
|
|
|
|
|
|
|
|
/* We got the range, so open it. */
|
2023-04-30 23:50:53 +00:00
|
|
|
out->Open();
|
2021-04-08 00:07:01 +00:00
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2021-04-08 00:07:01 +00:00
|
|
|
}
|
|
|
|
|
2020-07-10 04:30:29 +00:00
|
|
|
Result KPageTableBase::LockForIpcUserBuffer(KPhysicalAddress *out, KProcessAddress address, size_t size) {
|
2022-02-14 22:45:32 +00:00
|
|
|
R_RETURN(this->LockMemoryAndOpen(nullptr, out, address, size,
|
2020-07-23 07:44:33 +00:00
|
|
|
KMemoryState_FlagCanIpcUserBuffer, KMemoryState_FlagCanIpcUserBuffer,
|
|
|
|
KMemoryPermission_All, KMemoryPermission_UserReadWrite,
|
|
|
|
KMemoryAttribute_All, KMemoryAttribute_None,
|
|
|
|
static_cast<KMemoryPermission>(KMemoryPermission_NotMapped | KMemoryPermission_KernelReadWrite),
|
2022-02-14 22:45:32 +00:00
|
|
|
KMemoryAttribute_Locked));
|
2020-07-10 04:30:29 +00:00
|
|
|
}
|
|
|
|
|
2020-07-09 23:32:37 +00:00
|
|
|
Result KPageTableBase::UnlockForIpcUserBuffer(KProcessAddress address, size_t size) {
|
2022-02-14 22:45:32 +00:00
|
|
|
R_RETURN(this->UnlockMemory(address, size,
|
2020-07-09 23:32:37 +00:00
|
|
|
KMemoryState_FlagCanIpcUserBuffer, KMemoryState_FlagCanIpcUserBuffer,
|
|
|
|
KMemoryPermission_None, KMemoryPermission_None,
|
2020-12-01 12:33:46 +00:00
|
|
|
KMemoryAttribute_All, KMemoryAttribute_Locked,
|
2020-07-09 23:32:37 +00:00
|
|
|
KMemoryPermission_UserReadWrite,
|
2022-02-14 22:45:32 +00:00
|
|
|
KMemoryAttribute_Locked, nullptr));
|
2020-07-09 23:32:37 +00:00
|
|
|
}
|
|
|
|
|
2020-07-23 07:44:33 +00:00
|
|
|
Result KPageTableBase::LockForTransferMemory(KPageGroup *out, KProcessAddress address, size_t size, KMemoryPermission perm) {
|
2022-02-14 22:45:32 +00:00
|
|
|
R_RETURN(this->LockMemoryAndOpen(out, nullptr, address, size,
|
2020-07-23 07:44:33 +00:00
|
|
|
KMemoryState_FlagCanTransfer, KMemoryState_FlagCanTransfer,
|
|
|
|
KMemoryPermission_All, KMemoryPermission_UserReadWrite,
|
|
|
|
KMemoryAttribute_All, KMemoryAttribute_None,
|
|
|
|
perm,
|
2022-02-14 22:45:32 +00:00
|
|
|
KMemoryAttribute_Locked));
|
2020-07-23 07:44:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Result KPageTableBase::UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup &pg) {
|
2022-02-14 22:45:32 +00:00
|
|
|
R_RETURN(this->UnlockMemory(address, size,
|
2020-07-23 07:44:33 +00:00
|
|
|
KMemoryState_FlagCanTransfer, KMemoryState_FlagCanTransfer,
|
|
|
|
KMemoryPermission_None, KMemoryPermission_None,
|
2020-12-01 12:33:46 +00:00
|
|
|
KMemoryAttribute_All, KMemoryAttribute_Locked,
|
2020-07-23 07:44:33 +00:00
|
|
|
KMemoryPermission_UserReadWrite,
|
2022-02-14 22:45:32 +00:00
|
|
|
KMemoryAttribute_Locked, std::addressof(pg)));
|
2020-07-23 07:44:33 +00:00
|
|
|
}
|
|
|
|
|
2020-07-28 22:09:07 +00:00
|
|
|
Result KPageTableBase::LockForCodeMemory(KPageGroup *out, KProcessAddress address, size_t size) {
|
2022-02-14 22:45:32 +00:00
|
|
|
R_RETURN(this->LockMemoryAndOpen(out, nullptr, address, size,
|
2020-07-23 07:44:33 +00:00
|
|
|
KMemoryState_FlagCanCodeMemory, KMemoryState_FlagCanCodeMemory,
|
|
|
|
KMemoryPermission_All, KMemoryPermission_UserReadWrite,
|
|
|
|
KMemoryAttribute_All, KMemoryAttribute_None,
|
|
|
|
static_cast<KMemoryPermission>(KMemoryPermission_NotMapped | KMemoryPermission_KernelReadWrite),
|
2022-02-14 22:45:32 +00:00
|
|
|
KMemoryAttribute_Locked));
|
2020-07-23 07:44:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Result KPageTableBase::UnlockForCodeMemory(KProcessAddress address, size_t size, const KPageGroup &pg) {
|
2022-02-14 22:45:32 +00:00
|
|
|
R_RETURN(this->UnlockMemory(address, size,
|
2020-07-23 07:44:33 +00:00
|
|
|
KMemoryState_FlagCanCodeMemory, KMemoryState_FlagCanCodeMemory,
|
|
|
|
KMemoryPermission_None, KMemoryPermission_None,
|
2020-12-01 12:33:46 +00:00
|
|
|
KMemoryAttribute_All, KMemoryAttribute_Locked,
|
2020-07-23 07:44:33 +00:00
|
|
|
KMemoryPermission_UserReadWrite,
|
2022-02-14 22:45:32 +00:00
|
|
|
KMemoryAttribute_Locked, std::addressof(pg)));
|
2020-07-23 07:44:33 +00:00
|
|
|
}
|
|
|
|
|
2021-04-08 00:07:01 +00:00
|
|
|
Result KPageTableBase::OpenMemoryRangeForProcessCacheOperation(MemoryRange *out, KProcessAddress address, size_t size) {
|
|
|
|
/* Lock the table. */
|
|
|
|
KScopedLightLock lk(m_general_lock);
|
|
|
|
|
|
|
|
/* Get the range. */
|
|
|
|
R_TRY(this->GetContiguousMemoryRangeWithState(out,
|
|
|
|
address, size,
|
|
|
|
KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted,
|
|
|
|
KMemoryPermission_UserRead, KMemoryPermission_UserRead,
|
|
|
|
KMemoryAttribute_Uncached, KMemoryAttribute_None));
|
|
|
|
|
|
|
|
/* We got the range, so open it. */
|
2023-04-30 23:50:53 +00:00
|
|
|
out->Open();
|
2021-04-08 00:07:01 +00:00
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2021-04-08 00:07:01 +00:00
|
|
|
}
|
|
|
|
|
2020-07-10 08:15:14 +00:00
|
|
|
Result KPageTableBase::CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
|
2020-07-11 03:09:06 +00:00
|
|
|
/* Lightly validate the range before doing anything else. */
|
|
|
|
R_UNLESS(this->Contains(src_addr, size), svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* Copy the memory. */
|
|
|
|
{
|
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-07-11 03:09:06 +00:00
|
|
|
|
|
|
|
/* Check memory state. */
|
|
|
|
R_TRY(this->CheckMemoryStateContiguous(src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm, src_attr_mask | KMemoryAttribute_Uncached, src_attr));
|
|
|
|
|
|
|
|
auto &impl = this->GetImpl();
|
|
|
|
|
|
|
|
/* Begin traversal. */
|
|
|
|
TraversalContext context;
|
|
|
|
TraversalEntry next_entry;
|
|
|
|
bool traverse_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), src_addr);
|
|
|
|
MESOSPHERE_ABORT_UNLESS(traverse_valid);
|
|
|
|
|
|
|
|
/* Prepare tracking variables. */
|
|
|
|
KPhysicalAddress cur_addr = next_entry.phys_addr;
|
|
|
|
size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
|
|
|
|
size_t tot_size = cur_size;
|
|
|
|
|
2021-11-07 01:19:34 +00:00
|
|
|
auto PerformCopy = [&]() ALWAYS_INLINE_LAMBDA -> Result {
|
2020-07-11 03:09:06 +00:00
|
|
|
/* Ensure the address is linear mapped. */
|
|
|
|
R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* Copy as much aligned data as we can. */
|
|
|
|
if (cur_size >= sizeof(u32)) {
|
|
|
|
const size_t copy_size = util::AlignDown(cur_size, sizeof(u32));
|
|
|
|
R_UNLESS(UserspaceAccess::CopyMemoryToUserAligned32Bit(GetVoidPointer(dst_addr), GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), copy_size), svc::ResultInvalidCurrentMemory());
|
|
|
|
dst_addr += copy_size;
|
|
|
|
cur_addr += copy_size;
|
|
|
|
cur_size -= copy_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Copy remaining data. */
|
|
|
|
if (cur_size > 0) {
|
|
|
|
R_UNLESS(UserspaceAccess::CopyMemoryToUser(GetVoidPointer(dst_addr), GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), cur_size), svc::ResultInvalidCurrentMemory());
|
|
|
|
}
|
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-11 03:09:06 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Iterate. */
|
|
|
|
while (tot_size < size) {
|
|
|
|
/* Continue the traversal. */
|
|
|
|
traverse_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
|
|
|
|
MESOSPHERE_ASSERT(traverse_valid);
|
|
|
|
|
|
|
|
if (next_entry.phys_addr != (cur_addr + cur_size)) {
|
|
|
|
/* Perform copy. */
|
|
|
|
R_TRY(PerformCopy());
|
|
|
|
|
|
|
|
/* Advance. */
|
|
|
|
dst_addr += cur_size;
|
|
|
|
|
|
|
|
cur_addr = next_entry.phys_addr;
|
|
|
|
cur_size = next_entry.block_size;
|
|
|
|
} else {
|
|
|
|
cur_size += next_entry.block_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
tot_size += next_entry.block_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Ensure we use the right size for the last block. */
|
|
|
|
if (tot_size > size) {
|
|
|
|
cur_size -= (tot_size - size);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Perform copy for the last block. */
|
|
|
|
R_TRY(PerformCopy());
|
|
|
|
}
|
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-10 08:15:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Result KPageTableBase::CopyMemoryFromLinearToKernel(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
|
2020-07-11 03:09:06 +00:00
|
|
|
/* Lightly validate the range before doing anything else. */
|
|
|
|
R_UNLESS(this->Contains(src_addr, size), svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* Copy the memory. */
|
|
|
|
{
|
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-07-11 03:09:06 +00:00
|
|
|
|
|
|
|
/* Check memory state. */
|
|
|
|
R_TRY(this->CheckMemoryStateContiguous(src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm, src_attr_mask | KMemoryAttribute_Uncached, src_attr));
|
|
|
|
|
|
|
|
auto &impl = this->GetImpl();
|
|
|
|
|
|
|
|
/* Begin traversal. */
|
|
|
|
TraversalContext context;
|
|
|
|
TraversalEntry next_entry;
|
|
|
|
bool traverse_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), src_addr);
|
|
|
|
MESOSPHERE_ABORT_UNLESS(traverse_valid);
|
|
|
|
|
|
|
|
/* Prepare tracking variables. */
|
|
|
|
KPhysicalAddress cur_addr = next_entry.phys_addr;
|
|
|
|
size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
|
|
|
|
size_t tot_size = cur_size;
|
|
|
|
|
2021-11-07 01:19:34 +00:00
|
|
|
auto PerformCopy = [&]() ALWAYS_INLINE_LAMBDA -> Result {
|
2020-07-11 03:09:06 +00:00
|
|
|
/* Ensure the address is linear mapped. */
|
|
|
|
R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* Copy the data. */
|
|
|
|
std::memcpy(GetVoidPointer(dst_addr), GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), cur_size);
|
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-11 03:09:06 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Iterate. */
|
|
|
|
while (tot_size < size) {
|
|
|
|
/* Continue the traversal. */
|
|
|
|
traverse_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
|
|
|
|
MESOSPHERE_ASSERT(traverse_valid);
|
|
|
|
|
|
|
|
if (next_entry.phys_addr != (cur_addr + cur_size)) {
|
|
|
|
/* Perform copy. */
|
|
|
|
R_TRY(PerformCopy());
|
|
|
|
|
|
|
|
/* Advance. */
|
|
|
|
dst_addr += cur_size;
|
|
|
|
|
|
|
|
cur_addr = next_entry.phys_addr;
|
|
|
|
cur_size = next_entry.block_size;
|
|
|
|
} else {
|
|
|
|
cur_size += next_entry.block_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
tot_size += next_entry.block_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Ensure we use the right size for the last block. */
|
|
|
|
if (tot_size > size) {
|
|
|
|
cur_size -= (tot_size - size);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Perform copy for the last block. */
|
|
|
|
R_TRY(PerformCopy());
|
|
|
|
}
|
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-10 08:15:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Result KPageTableBase::CopyMemoryFromUserToLinear(KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr) {
|
2020-07-11 03:09:06 +00:00
|
|
|
/* Lightly validate the range before doing anything else. */
|
|
|
|
R_UNLESS(this->Contains(dst_addr, size), svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* Copy the memory. */
|
|
|
|
{
|
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-07-11 03:09:06 +00:00
|
|
|
|
|
|
|
/* Check memory state. */
|
|
|
|
R_TRY(this->CheckMemoryStateContiguous(dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_test_perm, dst_attr_mask | KMemoryAttribute_Uncached, dst_attr));
|
|
|
|
|
|
|
|
auto &impl = this->GetImpl();
|
|
|
|
|
|
|
|
/* Begin traversal. */
|
|
|
|
TraversalContext context;
|
|
|
|
TraversalEntry next_entry;
|
|
|
|
bool traverse_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_addr);
|
|
|
|
MESOSPHERE_ABORT_UNLESS(traverse_valid);
|
|
|
|
|
|
|
|
/* Prepare tracking variables. */
|
|
|
|
KPhysicalAddress cur_addr = next_entry.phys_addr;
|
|
|
|
size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
|
|
|
|
size_t tot_size = cur_size;
|
|
|
|
|
2021-11-07 01:19:34 +00:00
|
|
|
auto PerformCopy = [&]() ALWAYS_INLINE_LAMBDA -> Result {
|
2020-07-11 03:09:06 +00:00
|
|
|
/* Ensure the address is linear mapped. */
|
|
|
|
R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* Copy as much aligned data as we can. */
|
|
|
|
if (cur_size >= sizeof(u32)) {
|
|
|
|
const size_t copy_size = util::AlignDown(cur_size, sizeof(u32));
|
|
|
|
R_UNLESS(UserspaceAccess::CopyMemoryFromUserAligned32Bit(GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), GetVoidPointer(src_addr), copy_size), svc::ResultInvalidCurrentMemory());
|
|
|
|
src_addr += copy_size;
|
|
|
|
cur_addr += copy_size;
|
|
|
|
cur_size -= copy_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Copy remaining data. */
|
|
|
|
if (cur_size > 0) {
|
|
|
|
R_UNLESS(UserspaceAccess::CopyMemoryFromUser(GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), GetVoidPointer(src_addr), cur_size), svc::ResultInvalidCurrentMemory());
|
|
|
|
}
|
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-11 03:09:06 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Iterate. */
|
|
|
|
while (tot_size < size) {
|
|
|
|
/* Continue the traversal. */
|
|
|
|
traverse_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
|
|
|
|
MESOSPHERE_ASSERT(traverse_valid);
|
|
|
|
|
|
|
|
if (next_entry.phys_addr != (cur_addr + cur_size)) {
|
|
|
|
/* Perform copy. */
|
|
|
|
R_TRY(PerformCopy());
|
|
|
|
|
|
|
|
/* Advance. */
|
|
|
|
src_addr += cur_size;
|
|
|
|
|
|
|
|
cur_addr = next_entry.phys_addr;
|
|
|
|
cur_size = next_entry.block_size;
|
|
|
|
} else {
|
|
|
|
cur_size += next_entry.block_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
tot_size += next_entry.block_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Ensure we use the right size for the last block. */
|
|
|
|
if (tot_size > size) {
|
|
|
|
cur_size -= (tot_size - size);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Perform copy for the last block. */
|
|
|
|
R_TRY(PerformCopy());
|
|
|
|
}
|
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-10 08:15:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Result KPageTableBase::CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr) {
|
2020-07-11 03:09:06 +00:00
|
|
|
/* Lightly validate the range before doing anything else. */
|
|
|
|
R_UNLESS(this->Contains(dst_addr, size), svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* Copy the memory. */
|
|
|
|
{
|
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-07-11 03:09:06 +00:00
|
|
|
|
|
|
|
/* Check memory state. */
|
|
|
|
R_TRY(this->CheckMemoryStateContiguous(dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_test_perm, dst_attr_mask | KMemoryAttribute_Uncached, dst_attr));
|
|
|
|
|
|
|
|
auto &impl = this->GetImpl();
|
|
|
|
|
|
|
|
/* Begin traversal. */
|
|
|
|
TraversalContext context;
|
|
|
|
TraversalEntry next_entry;
|
|
|
|
bool traverse_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_addr);
|
|
|
|
MESOSPHERE_ABORT_UNLESS(traverse_valid);
|
|
|
|
|
|
|
|
/* Prepare tracking variables. */
|
|
|
|
KPhysicalAddress cur_addr = next_entry.phys_addr;
|
|
|
|
size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
|
|
|
|
size_t tot_size = cur_size;
|
|
|
|
|
2021-11-07 01:19:34 +00:00
|
|
|
auto PerformCopy = [&]() ALWAYS_INLINE_LAMBDA -> Result {
|
2020-07-11 03:09:06 +00:00
|
|
|
/* Ensure the address is linear mapped. */
|
|
|
|
R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* Copy the data. */
|
|
|
|
std::memcpy(GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), GetVoidPointer(src_addr), cur_size);
|
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-11 03:09:06 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Iterate. */
|
|
|
|
while (tot_size < size) {
|
|
|
|
/* Continue the traversal. */
|
|
|
|
traverse_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
|
|
|
|
MESOSPHERE_ASSERT(traverse_valid);
|
|
|
|
|
|
|
|
if (next_entry.phys_addr != (cur_addr + cur_size)) {
|
|
|
|
/* Perform copy. */
|
|
|
|
R_TRY(PerformCopy());
|
|
|
|
|
|
|
|
/* Advance. */
|
|
|
|
src_addr += cur_size;
|
|
|
|
|
|
|
|
cur_addr = next_entry.phys_addr;
|
|
|
|
cur_size = next_entry.block_size;
|
|
|
|
} else {
|
|
|
|
cur_size += next_entry.block_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
tot_size += next_entry.block_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Ensure we use the right size for the last block. */
|
|
|
|
if (tot_size > size) {
|
|
|
|
cur_size -= (tot_size - size);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Perform copy for the last block. */
|
|
|
|
R_TRY(PerformCopy());
|
|
|
|
}
|
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-10 08:15:14 +00:00
|
|
|
}
|
|
|
|
|
2020-07-11 04:37:56 +00:00
|
|
|
Result KPageTableBase::CopyMemoryFromHeapToHeap(KPageTableBase &dst_page_table, KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
|
|
|
|
/* For convenience, alias this. */
|
|
|
|
KPageTableBase &src_page_table = *this;
|
|
|
|
|
|
|
|
/* Lightly validate the ranges before doing anything else. */
|
|
|
|
R_UNLESS(src_page_table.Contains(src_addr, size), svc::ResultInvalidCurrentMemory());
|
|
|
|
R_UNLESS(dst_page_table.Contains(dst_addr, size), svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* Copy the memory. */
|
|
|
|
{
|
2021-04-07 16:57:32 +00:00
|
|
|
/* Acquire the table locks. */
|
|
|
|
KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
|
2020-07-11 04:37:56 +00:00
|
|
|
|
|
|
|
/* Check memory state. */
|
|
|
|
R_TRY(src_page_table.CheckMemoryStateContiguous(src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm, src_attr_mask | KMemoryAttribute_Uncached, src_attr));
|
|
|
|
R_TRY(dst_page_table.CheckMemoryStateContiguous(dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_test_perm, dst_attr_mask | KMemoryAttribute_Uncached, dst_attr));
|
|
|
|
|
|
|
|
/* Get implementations. */
|
|
|
|
auto &src_impl = src_page_table.GetImpl();
|
|
|
|
auto &dst_impl = dst_page_table.GetImpl();
|
|
|
|
|
|
|
|
/* Prepare for traversal. */
|
|
|
|
TraversalContext src_context;
|
|
|
|
TraversalContext dst_context;
|
|
|
|
TraversalEntry src_next_entry;
|
|
|
|
TraversalEntry dst_next_entry;
|
|
|
|
bool traverse_valid;
|
|
|
|
|
|
|
|
/* Begin traversal. */
|
|
|
|
traverse_valid = src_impl.BeginTraversal(std::addressof(src_next_entry), std::addressof(src_context), src_addr);
|
|
|
|
MESOSPHERE_ABORT_UNLESS(traverse_valid);
|
|
|
|
traverse_valid = dst_impl.BeginTraversal(std::addressof(dst_next_entry), std::addressof(dst_context), dst_addr);
|
|
|
|
MESOSPHERE_ABORT_UNLESS(traverse_valid);
|
|
|
|
|
|
|
|
/* Prepare tracking variables. */
|
|
|
|
KPhysicalAddress cur_src_block_addr = src_next_entry.phys_addr;
|
|
|
|
KPhysicalAddress cur_dst_block_addr = dst_next_entry.phys_addr;
|
|
|
|
size_t cur_src_size = src_next_entry.block_size - (GetInteger(cur_src_block_addr) & (src_next_entry.block_size - 1));
|
|
|
|
size_t cur_dst_size = dst_next_entry.block_size - (GetInteger(cur_dst_block_addr) & (dst_next_entry.block_size - 1));
|
|
|
|
|
|
|
|
/* Adjust the initial block sizes. */
|
|
|
|
src_next_entry.block_size = cur_src_size;
|
|
|
|
dst_next_entry.block_size = cur_dst_size;
|
|
|
|
|
|
|
|
/* Before we get any crazier, succeed if there's nothing to do. */
|
|
|
|
R_SUCCEED_IF(size == 0);
|
|
|
|
|
|
|
|
/* We're going to manage dual traversal via an offset against the total size. */
|
|
|
|
KPhysicalAddress cur_src_addr = cur_src_block_addr;
|
|
|
|
KPhysicalAddress cur_dst_addr = cur_dst_block_addr;
|
|
|
|
size_t cur_min_size = std::min<size_t>(cur_src_size, cur_dst_size);
|
|
|
|
|
|
|
|
/* Iterate. */
|
|
|
|
size_t ofs = 0;
|
|
|
|
while (ofs < size) {
|
|
|
|
/* Determine how much we can copy this iteration. */
|
|
|
|
const size_t cur_copy_size = std::min<size_t>(cur_min_size, size - ofs);
|
|
|
|
|
|
|
|
/* If we need to advance the traversals, do so. */
|
|
|
|
bool updated_src = false, updated_dst = false, skip_copy = false;
|
|
|
|
if (ofs + cur_copy_size != size) {
|
|
|
|
if (cur_src_addr + cur_min_size == cur_src_block_addr + cur_src_size) {
|
|
|
|
/* Continue the src traversal. */
|
|
|
|
traverse_valid = src_impl.ContinueTraversal(std::addressof(src_next_entry), std::addressof(src_context));
|
|
|
|
MESOSPHERE_ASSERT(traverse_valid);
|
|
|
|
|
|
|
|
/* Update source. */
|
|
|
|
updated_src = cur_src_addr + cur_min_size != GetInteger(src_next_entry.phys_addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cur_dst_addr + cur_min_size == dst_next_entry.phys_addr + dst_next_entry.block_size) {
|
|
|
|
/* Continue the dst traversal. */
|
|
|
|
traverse_valid = dst_impl.ContinueTraversal(std::addressof(dst_next_entry), std::addressof(dst_context));
|
|
|
|
MESOSPHERE_ASSERT(traverse_valid);
|
|
|
|
|
|
|
|
/* Update destination. */
|
|
|
|
updated_dst = cur_dst_addr + cur_min_size != GetInteger(dst_next_entry.phys_addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If we didn't update either of source/destination, skip the copy this iteration. */
|
|
|
|
if (!updated_src && !updated_dst) {
|
|
|
|
skip_copy = true;
|
|
|
|
|
|
|
|
/* Update the source block address. */
|
|
|
|
cur_src_block_addr = src_next_entry.phys_addr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Do the copy, unless we're skipping it. */
|
|
|
|
if (!skip_copy) {
|
|
|
|
/* We need both ends of the copy to be heap blocks. */
|
|
|
|
R_UNLESS(IsHeapPhysicalAddress(cur_src_addr), svc::ResultInvalidCurrentMemory());
|
|
|
|
R_UNLESS(IsHeapPhysicalAddress(cur_dst_addr), svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* Copy the data. */
|
|
|
|
std::memcpy(GetVoidPointer(GetHeapVirtualAddress(cur_dst_addr)), GetVoidPointer(GetHeapVirtualAddress(cur_src_addr)), cur_copy_size);
|
|
|
|
|
|
|
|
/* Update. */
|
|
|
|
cur_src_block_addr = src_next_entry.phys_addr;
|
|
|
|
cur_src_addr = updated_src ? cur_src_block_addr : cur_src_addr + cur_copy_size;
|
|
|
|
cur_dst_block_addr = dst_next_entry.phys_addr;
|
|
|
|
cur_dst_addr = updated_dst ? cur_dst_block_addr : cur_dst_addr + cur_copy_size;
|
|
|
|
|
|
|
|
/* Advance offset. */
|
|
|
|
ofs += cur_copy_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update min size. */
|
|
|
|
cur_src_size = src_next_entry.block_size;
|
|
|
|
cur_dst_size = dst_next_entry.block_size;
|
|
|
|
cur_min_size = std::min<size_t>(cur_src_block_addr - cur_src_addr + cur_src_size, cur_dst_block_addr - cur_dst_addr + cur_dst_size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-10 08:15:14 +00:00
|
|
|
}
|
|
|
|
|
2020-07-11 04:37:56 +00:00
|
|
|
Result KPageTableBase::CopyMemoryFromHeapToHeapWithoutCheckDestination(KPageTableBase &dst_page_table, KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
|
|
|
|
/* For convenience, alias this. */
|
|
|
|
KPageTableBase &src_page_table = *this;
|
|
|
|
|
|
|
|
/* Lightly validate the ranges before doing anything else. */
|
|
|
|
R_UNLESS(src_page_table.Contains(src_addr, size), svc::ResultInvalidCurrentMemory());
|
|
|
|
R_UNLESS(dst_page_table.Contains(dst_addr, size), svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* Copy the memory. */
|
|
|
|
{
|
2021-04-07 16:57:32 +00:00
|
|
|
/* Acquire the table locks. */
|
|
|
|
KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
|
2020-07-11 04:37:56 +00:00
|
|
|
|
2020-08-17 21:20:24 +00:00
|
|
|
/* Check memory state for source. */
|
2020-07-11 04:37:56 +00:00
|
|
|
R_TRY(src_page_table.CheckMemoryStateContiguous(src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm, src_attr_mask | KMemoryAttribute_Uncached, src_attr));
|
|
|
|
|
2020-08-17 21:20:24 +00:00
|
|
|
/* Destination state is intentionally unchecked. */
|
|
|
|
MESOSPHERE_UNUSED(dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr);
|
|
|
|
|
2020-07-11 04:37:56 +00:00
|
|
|
/* Get implementations. */
|
|
|
|
auto &src_impl = src_page_table.GetImpl();
|
|
|
|
auto &dst_impl = dst_page_table.GetImpl();
|
|
|
|
|
|
|
|
/* Prepare for traversal. */
|
|
|
|
TraversalContext src_context;
|
|
|
|
TraversalContext dst_context;
|
|
|
|
TraversalEntry src_next_entry;
|
|
|
|
TraversalEntry dst_next_entry;
|
|
|
|
bool traverse_valid;
|
|
|
|
|
|
|
|
/* Begin traversal. */
|
|
|
|
traverse_valid = src_impl.BeginTraversal(std::addressof(src_next_entry), std::addressof(src_context), src_addr);
|
|
|
|
MESOSPHERE_ABORT_UNLESS(traverse_valid);
|
|
|
|
traverse_valid = dst_impl.BeginTraversal(std::addressof(dst_next_entry), std::addressof(dst_context), dst_addr);
|
|
|
|
MESOSPHERE_ABORT_UNLESS(traverse_valid);
|
|
|
|
|
|
|
|
/* Prepare tracking variables. */
|
|
|
|
KPhysicalAddress cur_src_block_addr = src_next_entry.phys_addr;
|
|
|
|
KPhysicalAddress cur_dst_block_addr = dst_next_entry.phys_addr;
|
|
|
|
size_t cur_src_size = src_next_entry.block_size - (GetInteger(cur_src_block_addr) & (src_next_entry.block_size - 1));
|
|
|
|
size_t cur_dst_size = dst_next_entry.block_size - (GetInteger(cur_dst_block_addr) & (dst_next_entry.block_size - 1));
|
|
|
|
|
|
|
|
/* Adjust the initial block sizes. */
|
|
|
|
src_next_entry.block_size = cur_src_size;
|
|
|
|
dst_next_entry.block_size = cur_dst_size;
|
|
|
|
|
|
|
|
/* Before we get any crazier, succeed if there's nothing to do. */
|
|
|
|
R_SUCCEED_IF(size == 0);
|
|
|
|
|
|
|
|
/* We're going to manage dual traversal via an offset against the total size. */
|
|
|
|
KPhysicalAddress cur_src_addr = cur_src_block_addr;
|
|
|
|
KPhysicalAddress cur_dst_addr = cur_dst_block_addr;
|
|
|
|
size_t cur_min_size = std::min<size_t>(cur_src_size, cur_dst_size);
|
|
|
|
|
|
|
|
/* Iterate. */
|
|
|
|
size_t ofs = 0;
|
|
|
|
while (ofs < size) {
|
|
|
|
/* Determine how much we can copy this iteration. */
|
|
|
|
const size_t cur_copy_size = std::min<size_t>(cur_min_size, size - ofs);
|
|
|
|
|
|
|
|
/* If we need to advance the traversals, do so. */
|
|
|
|
bool updated_src = false, updated_dst = false, skip_copy = false;
|
|
|
|
if (ofs + cur_copy_size != size) {
|
|
|
|
if (cur_src_addr + cur_min_size == cur_src_block_addr + cur_src_size) {
|
|
|
|
/* Continue the src traversal. */
|
|
|
|
traverse_valid = src_impl.ContinueTraversal(std::addressof(src_next_entry), std::addressof(src_context));
|
|
|
|
MESOSPHERE_ASSERT(traverse_valid);
|
|
|
|
|
|
|
|
/* Update source. */
|
|
|
|
updated_src = cur_src_addr + cur_min_size != GetInteger(src_next_entry.phys_addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cur_dst_addr + cur_min_size == dst_next_entry.phys_addr + dst_next_entry.block_size) {
|
|
|
|
/* Continue the dst traversal. */
|
|
|
|
traverse_valid = dst_impl.ContinueTraversal(std::addressof(dst_next_entry), std::addressof(dst_context));
|
|
|
|
MESOSPHERE_ASSERT(traverse_valid);
|
|
|
|
|
|
|
|
/* Update destination. */
|
|
|
|
updated_dst = cur_dst_addr + cur_min_size != GetInteger(dst_next_entry.phys_addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If we didn't update either of source/destination, skip the copy this iteration. */
|
|
|
|
if (!updated_src && !updated_dst) {
|
|
|
|
skip_copy = true;
|
|
|
|
|
|
|
|
/* Update the source block address. */
|
|
|
|
cur_src_block_addr = src_next_entry.phys_addr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Do the copy, unless we're skipping it. */
|
|
|
|
if (!skip_copy) {
|
|
|
|
/* We need both ends of the copy to be heap blocks. */
|
|
|
|
R_UNLESS(IsHeapPhysicalAddress(cur_src_addr), svc::ResultInvalidCurrentMemory());
|
|
|
|
R_UNLESS(IsHeapPhysicalAddress(cur_dst_addr), svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* Copy the data. */
|
|
|
|
std::memcpy(GetVoidPointer(GetHeapVirtualAddress(cur_dst_addr)), GetVoidPointer(GetHeapVirtualAddress(cur_src_addr)), cur_copy_size);
|
|
|
|
|
|
|
|
/* Update. */
|
|
|
|
cur_src_block_addr = src_next_entry.phys_addr;
|
|
|
|
cur_src_addr = updated_src ? cur_src_block_addr : cur_src_addr + cur_copy_size;
|
|
|
|
cur_dst_block_addr = dst_next_entry.phys_addr;
|
|
|
|
cur_dst_addr = updated_dst ? cur_dst_block_addr : cur_dst_addr + cur_copy_size;
|
|
|
|
|
|
|
|
/* Advance offset. */
|
|
|
|
ofs += cur_copy_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update min size. */
|
|
|
|
cur_src_size = src_next_entry.block_size;
|
|
|
|
cur_dst_size = dst_next_entry.block_size;
|
|
|
|
cur_min_size = std::min<size_t>(cur_src_block_addr - cur_src_addr + cur_src_size, cur_dst_block_addr - cur_dst_addr + cur_dst_size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-10 08:15:14 +00:00
|
|
|
}
|
|
|
|
|
2020-12-02 10:14:24 +00:00
|
|
|
#pragma GCC push_options
|
2020-12-02 11:39:07 +00:00
|
|
|
#pragma GCC optimize ("-O3")
|
2020-12-02 10:14:24 +00:00
|
|
|
|
2020-12-01 12:24:43 +00:00
|
|
|
Result KPageTableBase::SetupForIpcClient(PageLinkedList *page_list, size_t *out_blocks_needed, KProcessAddress address, size_t size, KMemoryPermission test_perm, KMemoryState dst_state) {
|
2020-07-12 22:42:47 +00:00
|
|
|
/* Validate pre-conditions. */
|
|
|
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
|
|
|
MESOSPHERE_ASSERT(test_perm == KMemoryPermission_UserReadWrite || test_perm == KMemoryPermission_UserRead);
|
|
|
|
|
|
|
|
/* Check that the address is in range. */
|
|
|
|
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* Get the source permission. */
|
|
|
|
const auto src_perm = static_cast<KMemoryPermission>((test_perm == KMemoryPermission_UserReadWrite) ? KMemoryPermission_KernelReadWrite | KMemoryPermission_NotMapped : KMemoryPermission_UserRead);
|
|
|
|
|
|
|
|
/* Get aligned extents. */
|
|
|
|
const KProcessAddress aligned_src_start = util::AlignDown(GetInteger(address), PageSize);
|
|
|
|
const KProcessAddress aligned_src_end = util::AlignUp(GetInteger(address) + size, PageSize);
|
|
|
|
const KProcessAddress mapping_src_start = util::AlignUp(GetInteger(address), PageSize);
|
|
|
|
const KProcessAddress mapping_src_end = util::AlignDown(GetInteger(address) + size, PageSize);
|
|
|
|
|
|
|
|
const auto aligned_src_last = GetInteger(aligned_src_end) - 1;
|
|
|
|
const auto mapping_src_last = GetInteger(mapping_src_end) - 1;
|
|
|
|
|
|
|
|
/* Get the test state and attribute mask. */
|
|
|
|
u32 test_state;
|
|
|
|
u32 test_attr_mask;
|
|
|
|
switch (dst_state) {
|
|
|
|
case KMemoryState_Ipc:
|
|
|
|
test_state = KMemoryState_FlagCanUseIpc;
|
2020-12-01 12:33:46 +00:00
|
|
|
test_attr_mask = KMemoryAttribute_Uncached | KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked;
|
2020-07-12 22:42:47 +00:00
|
|
|
break;
|
|
|
|
case KMemoryState_NonSecureIpc:
|
|
|
|
test_state = KMemoryState_FlagCanUseNonSecureIpc;
|
2020-12-01 12:33:46 +00:00
|
|
|
test_attr_mask = KMemoryAttribute_Uncached | KMemoryAttribute_Locked;
|
2020-07-12 22:42:47 +00:00
|
|
|
break;
|
|
|
|
case KMemoryState_NonDeviceIpc:
|
|
|
|
test_state = KMemoryState_FlagCanUseNonDeviceIpc;
|
2020-12-01 12:33:46 +00:00
|
|
|
test_attr_mask = KMemoryAttribute_Uncached | KMemoryAttribute_Locked;
|
2020-07-12 22:42:47 +00:00
|
|
|
break;
|
|
|
|
default:
|
2022-02-14 22:45:32 +00:00
|
|
|
R_THROW(svc::ResultInvalidCombination());
|
2020-07-12 22:42:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Ensure that on failure, we roll back appropriately. */
|
|
|
|
size_t mapped_size = 0;
|
2022-02-14 22:45:32 +00:00
|
|
|
ON_RESULT_FAILURE {
|
2020-07-12 22:42:47 +00:00
|
|
|
if (mapped_size > 0) {
|
2020-12-01 11:33:46 +00:00
|
|
|
this->CleanupForIpcClientOnServerSetupFailure(page_list, mapping_src_start, mapped_size, src_perm);
|
2020-07-12 22:42:47 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2020-12-01 12:24:43 +00:00
|
|
|
size_t blocks_needed = 0;
|
|
|
|
|
2020-07-12 22:42:47 +00:00
|
|
|
/* Iterate, mapping as needed. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(aligned_src_start);
|
2020-07-12 22:42:47 +00:00
|
|
|
while (true) {
|
|
|
|
const KMemoryInfo info = it->GetMemoryInfo();
|
|
|
|
|
|
|
|
/* Validate the current block. */
|
|
|
|
R_TRY(this->CheckMemoryState(info, test_state, test_state, test_perm, test_perm, test_attr_mask, KMemoryAttribute_None));
|
|
|
|
|
|
|
|
if (mapping_src_start < mapping_src_end && GetInteger(mapping_src_start) < info.GetEndAddress() && info.GetAddress() < GetInteger(mapping_src_end)) {
|
|
|
|
const auto cur_start = info.GetAddress() >= GetInteger(mapping_src_start) ? info.GetAddress() : GetInteger(mapping_src_start);
|
2020-12-01 12:24:43 +00:00
|
|
|
const auto cur_end = mapping_src_last >= info.GetLastAddress() ? info.GetEndAddress() : GetInteger(mapping_src_end);
|
2020-07-12 22:42:47 +00:00
|
|
|
const size_t cur_size = cur_end - cur_start;
|
|
|
|
|
2020-12-01 12:24:43 +00:00
|
|
|
if (info.GetAddress() < GetInteger(mapping_src_start)) {
|
|
|
|
++blocks_needed;
|
|
|
|
}
|
|
|
|
if (mapping_src_last < info.GetLastAddress()) {
|
|
|
|
++blocks_needed;
|
|
|
|
}
|
|
|
|
|
2020-07-12 22:42:47 +00:00
|
|
|
/* Set the permissions on the block, if we need to. */
|
|
|
|
if ((info.GetPermission() & KMemoryPermission_IpcLockChangeMask) != src_perm) {
|
2020-12-01 11:33:46 +00:00
|
|
|
const DisableMergeAttribute head_body_attr = (GetInteger(mapping_src_start) >= info.GetAddress()) ? DisableMergeAttribute_DisableHeadAndBody : DisableMergeAttribute_None;
|
|
|
|
const DisableMergeAttribute tail_attr = (cur_end == GetInteger(mapping_src_end)) ? DisableMergeAttribute_DisableTail : DisableMergeAttribute_None;
|
|
|
|
const KPageProperties properties = { src_perm, false, false, static_cast<DisableMergeAttribute>(head_body_attr | tail_attr) };
|
2020-07-12 22:42:47 +00:00
|
|
|
R_TRY(this->Operate(page_list, cur_start, cur_size / PageSize, Null<KPhysicalAddress>, false, properties, OperationType_ChangePermissions, false));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Note that we mapped this part. */
|
|
|
|
mapped_size += cur_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If the block is at the end, we're done. */
|
|
|
|
if (aligned_src_last <= info.GetLastAddress()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Advance. */
|
|
|
|
++it;
|
2020-12-18 01:18:47 +00:00
|
|
|
MESOSPHERE_ABORT_UNLESS(it != m_memory_block_manager.end());
|
2020-07-12 22:42:47 +00:00
|
|
|
}
|
|
|
|
|
2020-12-01 12:24:43 +00:00
|
|
|
if (out_blocks_needed != nullptr) {
|
|
|
|
MESOSPHERE_ASSERT(blocks_needed <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
|
|
|
|
*out_blocks_needed = blocks_needed;
|
|
|
|
}
|
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-12 22:42:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Result KPageTableBase::SetupForIpcServer(KProcessAddress *out_addr, size_t size, KProcessAddress src_addr, KMemoryPermission test_perm, KMemoryState dst_state, KPageTableBase &src_page_table, bool send) {
|
|
|
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
2020-07-29 01:10:23 +00:00
|
|
|
MESOSPHERE_ASSERT(src_page_table.IsLockedByCurrentThread());
|
2020-07-12 22:42:47 +00:00
|
|
|
|
|
|
|
/* Check that we can theoretically map. */
|
2020-12-18 01:18:47 +00:00
|
|
|
const KProcessAddress region_start = m_alias_region_start;
|
|
|
|
const size_t region_size = m_alias_region_end - m_alias_region_start;
|
2020-07-12 22:42:47 +00:00
|
|
|
R_UNLESS(size < region_size, svc::ResultOutOfAddressSpace());
|
|
|
|
|
|
|
|
/* Get aligned source extents. */
|
|
|
|
const KProcessAddress src_start = src_addr;
|
|
|
|
const KProcessAddress src_end = src_addr + size;
|
|
|
|
const KProcessAddress aligned_src_start = util::AlignDown(GetInteger(src_start), PageSize);
|
|
|
|
const KProcessAddress aligned_src_end = util::AlignUp(GetInteger(src_start) + size, PageSize);
|
|
|
|
const KProcessAddress mapping_src_start = util::AlignUp(GetInteger(src_start), PageSize);
|
|
|
|
const KProcessAddress mapping_src_end = util::AlignDown(GetInteger(src_start) + size, PageSize);
|
|
|
|
const size_t aligned_src_size = aligned_src_end - aligned_src_start;
|
|
|
|
const size_t mapping_src_size = (mapping_src_start < mapping_src_end) ? (mapping_src_end - mapping_src_start) : 0;
|
|
|
|
|
|
|
|
/* Select a random address to map at. */
|
|
|
|
KProcessAddress dst_addr = Null<KProcessAddress>;
|
|
|
|
for (s32 block_type = KPageTable::GetMaxBlockType(); block_type >= 0; block_type--) {
|
|
|
|
const size_t alignment = KPageTable::GetBlockSize(static_cast<KPageTable::BlockType>(block_type));
|
|
|
|
const size_t offset = GetInteger(aligned_src_start) & (alignment - 1);
|
|
|
|
|
|
|
|
dst_addr = this->FindFreeArea(region_start, region_size / PageSize, aligned_src_size / PageSize, alignment, offset, this->GetNumGuardPages());
|
|
|
|
if (dst_addr != Null<KProcessAddress>) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
R_UNLESS(dst_addr != Null<KProcessAddress>, svc::ResultOutOfAddressSpace());
|
|
|
|
|
|
|
|
/* Check that we can perform the operation we're about to perform. */
|
|
|
|
MESOSPHERE_ASSERT(this->CanContain(dst_addr, aligned_src_size, dst_state));
|
|
|
|
|
|
|
|
/* Create an update allocator. */
|
2021-04-07 15:46:06 +00:00
|
|
|
Result allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager);
|
|
|
|
R_TRY(allocator_result);
|
2020-07-12 22:42:47 +00:00
|
|
|
|
|
|
|
/* We're going to perform an update, so create a helper. */
|
|
|
|
KScopedPageTableUpdater updater(this);
|
|
|
|
|
|
|
|
/* Reserve space for any partial pages we allocate. */
|
|
|
|
const size_t unmapped_size = aligned_src_size - mapping_src_size;
|
2021-04-07 16:48:25 +00:00
|
|
|
KScopedResourceReservation memory_reservation(m_resource_limit, ams::svc::LimitableResource_PhysicalMemoryMax, unmapped_size);
|
2020-07-12 22:42:47 +00:00
|
|
|
R_UNLESS(memory_reservation.Succeeded(), svc::ResultLimitReached());
|
|
|
|
|
2020-12-01 14:01:44 +00:00
|
|
|
/* Ensure that we manage page references correctly. */
|
2021-09-18 07:11:10 +00:00
|
|
|
KPhysicalAddress start_partial_page = Null<KPhysicalAddress>;
|
|
|
|
KPhysicalAddress end_partial_page = Null<KPhysicalAddress>;
|
|
|
|
KProcessAddress cur_mapped_addr = dst_addr;
|
2020-07-12 22:42:47 +00:00
|
|
|
|
2020-12-01 14:01:44 +00:00
|
|
|
/* If the partial pages are mapped, an extra reference will have been opened. Otherwise, they'll free on scope exit. */
|
|
|
|
ON_SCOPE_EXIT {
|
2021-09-18 07:11:10 +00:00
|
|
|
if (start_partial_page != Null<KPhysicalAddress>) {
|
2020-07-12 22:42:47 +00:00
|
|
|
Kernel::GetMemoryManager().Close(start_partial_page, 1);
|
|
|
|
}
|
2021-09-18 07:11:10 +00:00
|
|
|
if (end_partial_page != Null<KPhysicalAddress>) {
|
2020-07-12 22:42:47 +00:00
|
|
|
Kernel::GetMemoryManager().Close(end_partial_page, 1);
|
|
|
|
}
|
2020-12-01 14:01:44 +00:00
|
|
|
};
|
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
ON_RESULT_FAILURE {
|
2020-07-12 22:42:47 +00:00
|
|
|
if (cur_mapped_addr != dst_addr) {
|
2020-12-01 11:33:46 +00:00
|
|
|
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
|
2020-07-12 22:42:47 +00:00
|
|
|
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), dst_addr, (cur_mapped_addr - dst_addr) / PageSize, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, true));
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Allocate the start page as needed. */
|
|
|
|
if (aligned_src_start < mapping_src_start) {
|
2022-03-22 22:29:55 +00:00
|
|
|
start_partial_page = Kernel::GetMemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option);
|
2021-09-18 07:11:10 +00:00
|
|
|
R_UNLESS(start_partial_page != Null<KPhysicalAddress>, svc::ResultOutOfMemory());
|
2020-07-12 22:42:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Allocate the end page as needed. */
|
|
|
|
if (mapping_src_end < aligned_src_end && (aligned_src_start < mapping_src_end || aligned_src_start == mapping_src_start)) {
|
2022-03-22 22:29:55 +00:00
|
|
|
end_partial_page = Kernel::GetMemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option);
|
2021-09-18 07:11:10 +00:00
|
|
|
R_UNLESS(end_partial_page != Null<KPhysicalAddress>, svc::ResultOutOfMemory());
|
2020-07-12 22:42:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Get the implementation. */
|
2020-07-18 05:25:28 +00:00
|
|
|
auto &src_impl = src_page_table.GetImpl();
|
2020-07-12 22:42:47 +00:00
|
|
|
|
|
|
|
/* Get the fill value for partial pages. */
|
2020-12-18 01:18:47 +00:00
|
|
|
const auto fill_val = m_ipc_fill_value;
|
2020-07-12 22:42:47 +00:00
|
|
|
|
|
|
|
/* Begin traversal. */
|
|
|
|
TraversalContext context;
|
|
|
|
TraversalEntry next_entry;
|
2020-07-18 05:25:28 +00:00
|
|
|
bool traverse_valid = src_impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), aligned_src_start);
|
2020-07-12 22:42:47 +00:00
|
|
|
MESOSPHERE_ASSERT(traverse_valid);
|
2021-01-08 10:13:36 +00:00
|
|
|
MESOSPHERE_UNUSED(traverse_valid);
|
2020-07-12 22:42:47 +00:00
|
|
|
|
|
|
|
/* Prepare tracking variables. */
|
|
|
|
KPhysicalAddress cur_block_addr = next_entry.phys_addr;
|
|
|
|
size_t cur_block_size = next_entry.block_size - (GetInteger(cur_block_addr) & (next_entry.block_size - 1));
|
|
|
|
size_t tot_block_size = cur_block_size;
|
|
|
|
|
|
|
|
/* Map the start page, if we have one. */
|
2021-09-18 07:11:10 +00:00
|
|
|
if (start_partial_page != Null<KPhysicalAddress>) {
|
2020-07-12 22:42:47 +00:00
|
|
|
/* Ensure the page holds correct data. */
|
2021-09-18 07:11:10 +00:00
|
|
|
const KVirtualAddress start_partial_virt = GetHeapVirtualAddress(start_partial_page);
|
2020-07-12 22:42:47 +00:00
|
|
|
if (send) {
|
|
|
|
const size_t partial_offset = src_start - aligned_src_start;
|
|
|
|
size_t copy_size, clear_size;
|
|
|
|
if (src_end < mapping_src_start) {
|
|
|
|
copy_size = size;
|
|
|
|
clear_size = mapping_src_start - src_end;
|
|
|
|
} else {
|
|
|
|
copy_size = mapping_src_start - src_start;
|
|
|
|
clear_size = 0;
|
|
|
|
}
|
|
|
|
|
2021-09-18 07:11:10 +00:00
|
|
|
std::memset(GetVoidPointer(start_partial_virt), fill_val, partial_offset);
|
|
|
|
std::memcpy(GetVoidPointer(start_partial_virt + partial_offset), GetVoidPointer(GetHeapVirtualAddress(cur_block_addr) + partial_offset), copy_size);
|
2020-07-12 22:42:47 +00:00
|
|
|
if (clear_size > 0) {
|
2021-09-18 07:11:10 +00:00
|
|
|
std::memset(GetVoidPointer(start_partial_virt + partial_offset + copy_size), fill_val, clear_size);
|
2020-07-12 22:42:47 +00:00
|
|
|
}
|
|
|
|
} else {
|
2021-09-18 07:11:10 +00:00
|
|
|
std::memset(GetVoidPointer(start_partial_virt), fill_val, PageSize);
|
2020-07-12 22:42:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Map the page. */
|
2020-12-01 11:33:46 +00:00
|
|
|
const KPageProperties start_map_properties = { test_perm, false, false, DisableMergeAttribute_DisableHead };
|
2021-09-18 07:11:10 +00:00
|
|
|
R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, 1, start_partial_page, true, start_map_properties, OperationType_Map, false));
|
2020-07-12 22:42:47 +00:00
|
|
|
|
|
|
|
/* Update tracking extents. */
|
|
|
|
cur_mapped_addr += PageSize;
|
|
|
|
cur_block_addr += PageSize;
|
|
|
|
cur_block_size -= PageSize;
|
|
|
|
|
|
|
|
/* If the block's size was one page, we may need to continue traversal. */
|
|
|
|
if (cur_block_size == 0 && aligned_src_size > PageSize) {
|
2020-07-18 05:25:28 +00:00
|
|
|
traverse_valid = src_impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
|
2020-07-12 22:42:47 +00:00
|
|
|
MESOSPHERE_ASSERT(traverse_valid);
|
|
|
|
|
|
|
|
cur_block_addr = next_entry.phys_addr;
|
|
|
|
cur_block_size = next_entry.block_size;
|
|
|
|
tot_block_size += next_entry.block_size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Map the remaining pages. */
|
|
|
|
while (aligned_src_start + tot_block_size < mapping_src_end) {
|
|
|
|
/* Continue the traversal. */
|
2020-07-18 05:25:28 +00:00
|
|
|
traverse_valid = src_impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
|
2020-07-12 22:42:47 +00:00
|
|
|
MESOSPHERE_ASSERT(traverse_valid);
|
|
|
|
|
|
|
|
/* Process the block. */
|
|
|
|
if (next_entry.phys_addr != cur_block_addr + cur_block_size) {
|
|
|
|
/* Map the block we've been processing so far. */
|
2020-12-01 11:33:46 +00:00
|
|
|
const KPageProperties map_properties = { test_perm, false, false, (cur_mapped_addr == dst_addr) ? DisableMergeAttribute_DisableHead : DisableMergeAttribute_None };
|
|
|
|
R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, cur_block_size / PageSize, cur_block_addr, true, map_properties, OperationType_Map, false));
|
2020-07-12 22:42:47 +00:00
|
|
|
|
|
|
|
/* Update tracking extents. */
|
|
|
|
cur_mapped_addr += cur_block_size;
|
|
|
|
cur_block_addr = next_entry.phys_addr;
|
|
|
|
cur_block_size = next_entry.block_size;
|
|
|
|
} else {
|
|
|
|
cur_block_size += next_entry.block_size;
|
|
|
|
}
|
|
|
|
tot_block_size += next_entry.block_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Handle the last direct-mapped page. */
|
2020-07-13 01:17:29 +00:00
|
|
|
if (const KProcessAddress mapped_block_end = aligned_src_start + tot_block_size - cur_block_size; mapped_block_end < mapping_src_end) {
|
|
|
|
const size_t last_block_size = mapping_src_end - mapped_block_end;
|
2020-07-12 22:42:47 +00:00
|
|
|
|
|
|
|
/* Map the last block. */
|
2020-12-01 11:33:46 +00:00
|
|
|
const KPageProperties map_properties = { test_perm, false, false, (cur_mapped_addr == dst_addr) ? DisableMergeAttribute_DisableHead : DisableMergeAttribute_None };
|
|
|
|
R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, last_block_size / PageSize, cur_block_addr, true, map_properties, OperationType_Map, false));
|
2020-07-12 22:42:47 +00:00
|
|
|
|
|
|
|
/* Update tracking extents. */
|
|
|
|
cur_mapped_addr += last_block_size;
|
|
|
|
cur_block_addr += last_block_size;
|
2020-07-13 01:17:29 +00:00
|
|
|
if (mapped_block_end + cur_block_size < aligned_src_end && cur_block_size == last_block_size) {
|
2020-07-18 05:25:28 +00:00
|
|
|
traverse_valid = src_impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
|
2020-07-12 22:42:47 +00:00
|
|
|
MESOSPHERE_ASSERT(traverse_valid);
|
|
|
|
|
|
|
|
cur_block_addr = next_entry.phys_addr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Map the end page, if we have one. */
|
2021-09-18 07:11:10 +00:00
|
|
|
if (end_partial_page != Null<KPhysicalAddress>) {
|
2020-07-12 22:42:47 +00:00
|
|
|
/* Ensure the page holds correct data. */
|
2021-09-18 07:11:10 +00:00
|
|
|
const KVirtualAddress end_partial_virt = GetHeapVirtualAddress(end_partial_page);
|
2020-07-12 22:42:47 +00:00
|
|
|
if (send) {
|
|
|
|
const size_t copy_size = src_end - mapping_src_end;
|
2021-09-18 07:11:10 +00:00
|
|
|
std::memcpy(GetVoidPointer(end_partial_virt), GetVoidPointer(GetHeapVirtualAddress(cur_block_addr)), copy_size);
|
|
|
|
std::memset(GetVoidPointer(end_partial_virt + copy_size), fill_val, PageSize - copy_size);
|
2020-07-12 22:42:47 +00:00
|
|
|
} else {
|
2021-09-18 07:11:10 +00:00
|
|
|
std::memset(GetVoidPointer(end_partial_virt), fill_val, PageSize);
|
2020-07-12 22:42:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Map the page. */
|
2020-12-01 11:33:46 +00:00
|
|
|
const KPageProperties map_properties = { test_perm, false, false, (cur_mapped_addr == dst_addr) ? DisableMergeAttribute_DisableHead : DisableMergeAttribute_None };
|
2021-09-18 07:11:10 +00:00
|
|
|
R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, 1, end_partial_page, true, map_properties, OperationType_Map, false));
|
2020-07-12 22:42:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Update memory blocks to reflect our changes */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_memory_block_manager.Update(std::addressof(allocator), dst_addr, aligned_src_size / PageSize, dst_state, test_perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
|
2020-07-12 22:42:47 +00:00
|
|
|
|
|
|
|
/* Set the output address. */
|
2020-07-13 01:53:45 +00:00
|
|
|
*out_addr = dst_addr + (src_start - aligned_src_start);
|
2020-07-12 22:42:47 +00:00
|
|
|
|
|
|
|
/* We succeeded. */
|
|
|
|
memory_reservation.Commit();
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-12 22:42:47 +00:00
|
|
|
}
|
|
|
|
|
2020-07-10 15:49:10 +00:00
|
|
|
Result KPageTableBase::SetupForIpc(KProcessAddress *out_dst_addr, size_t size, KProcessAddress src_addr, KPageTableBase &src_page_table, KMemoryPermission test_perm, KMemoryState dst_state, bool send) {
|
2020-07-12 22:42:47 +00:00
|
|
|
/* For convenience, alias this. */
|
|
|
|
KPageTableBase &dst_page_table = *this;
|
|
|
|
|
2021-04-07 16:57:32 +00:00
|
|
|
/* Acquire the table locks. */
|
|
|
|
KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
|
2020-07-12 22:42:47 +00:00
|
|
|
|
|
|
|
/* We're going to perform an update, so create a helper. */
|
|
|
|
KScopedPageTableUpdater updater(std::addressof(src_page_table));
|
|
|
|
|
|
|
|
/* Perform client setup. */
|
2020-12-01 12:24:43 +00:00
|
|
|
size_t num_allocator_blocks;
|
|
|
|
R_TRY(src_page_table.SetupForIpcClient(updater.GetPageList(), std::addressof(num_allocator_blocks), src_addr, size, test_perm, dst_state));
|
|
|
|
|
|
|
|
/* Create an update allocator. */
|
2021-04-07 15:46:06 +00:00
|
|
|
Result allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), src_page_table.m_memory_block_slab_manager, num_allocator_blocks);
|
|
|
|
R_TRY(allocator_result);
|
2020-07-12 22:42:47 +00:00
|
|
|
|
2020-12-01 11:33:46 +00:00
|
|
|
/* Get the mapped extents. */
|
|
|
|
const KProcessAddress src_map_start = util::AlignUp(GetInteger(src_addr), PageSize);
|
|
|
|
const KProcessAddress src_map_end = util::AlignDown(GetInteger(src_addr) + size, PageSize);
|
|
|
|
const size_t src_map_size = src_map_end - src_map_start;
|
|
|
|
|
2020-07-12 22:42:47 +00:00
|
|
|
/* Ensure that we clean up appropriately if we fail after this. */
|
2020-12-01 11:33:46 +00:00
|
|
|
const auto src_perm = static_cast<KMemoryPermission>((test_perm == KMemoryPermission_UserReadWrite) ? KMemoryPermission_KernelReadWrite | KMemoryPermission_NotMapped : KMemoryPermission_UserRead);
|
2022-02-14 22:45:32 +00:00
|
|
|
ON_RESULT_FAILURE {
|
2020-12-01 11:33:46 +00:00
|
|
|
if (src_map_end > src_map_start) {
|
|
|
|
src_page_table.CleanupForIpcClientOnServerSetupFailure(updater.GetPageList(), src_map_start, src_map_size, src_perm);
|
|
|
|
}
|
|
|
|
};
|
2020-07-12 22:42:47 +00:00
|
|
|
|
|
|
|
/* Perform server setup. */
|
|
|
|
R_TRY(dst_page_table.SetupForIpcServer(out_dst_addr, size, src_addr, test_perm, dst_state, src_page_table, send));
|
|
|
|
|
|
|
|
/* If anything was mapped, ipc-lock the pages. */
|
|
|
|
if (src_map_start < src_map_end) {
|
2020-07-13 05:22:54 +00:00
|
|
|
/* Get the source permission. */
|
2020-12-18 01:18:47 +00:00
|
|
|
src_page_table.m_memory_block_manager.UpdateLock(std::addressof(allocator), src_map_start, (src_map_end - src_map_start) / PageSize, &KMemoryBlock::LockForIpc, src_perm);
|
2020-07-12 22:42:47 +00:00
|
|
|
}
|
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-10 15:49:10 +00:00
|
|
|
}
|
|
|
|
|
2021-04-07 16:48:25 +00:00
|
|
|
Result KPageTableBase::CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state) {
|
2020-07-12 22:42:47 +00:00
|
|
|
/* Validate the address. */
|
|
|
|
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-07-12 22:42:47 +00:00
|
|
|
|
|
|
|
/* Validate the memory state. */
|
2020-12-01 12:24:43 +00:00
|
|
|
size_t num_allocator_blocks;
|
|
|
|
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, KMemoryState_All, dst_state, KMemoryPermission_UserRead, KMemoryPermission_UserRead, KMemoryAttribute_All, KMemoryAttribute_None));
|
2020-07-12 22:42:47 +00:00
|
|
|
|
|
|
|
/* Create an update allocator. */
|
2021-04-07 15:46:06 +00:00
|
|
|
Result allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
|
|
|
|
R_TRY(allocator_result);
|
2020-07-12 22:42:47 +00:00
|
|
|
|
|
|
|
/* We're going to perform an update, so create a helper. */
|
|
|
|
KScopedPageTableUpdater updater(this);
|
|
|
|
|
|
|
|
/* Get aligned extents. */
|
|
|
|
const KProcessAddress aligned_start = util::AlignDown(GetInteger(address), PageSize);
|
|
|
|
const KProcessAddress aligned_end = util::AlignUp(GetInteger(address) + size, PageSize);
|
2020-07-13 01:17:29 +00:00
|
|
|
const size_t aligned_size = aligned_end - aligned_start;
|
2020-07-12 22:42:47 +00:00
|
|
|
const size_t aligned_num_pages = aligned_size / PageSize;
|
|
|
|
|
|
|
|
/* Unmap the pages. */
|
2020-12-01 11:33:46 +00:00
|
|
|
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
|
2020-07-13 01:17:29 +00:00
|
|
|
R_TRY(this->Operate(updater.GetPageList(), aligned_start, aligned_num_pages, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, false));
|
2020-07-12 22:42:47 +00:00
|
|
|
|
|
|
|
/* Update memory blocks. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_memory_block_manager.Update(std::addressof(allocator), aligned_start, aligned_num_pages, KMemoryState_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal);
|
2020-07-12 22:42:47 +00:00
|
|
|
|
|
|
|
/* Release from the resource limit as relevant. */
|
2021-04-07 16:48:25 +00:00
|
|
|
const KProcessAddress mapping_start = util::AlignUp(GetInteger(address), PageSize);
|
|
|
|
const KProcessAddress mapping_end = util::AlignDown(GetInteger(address) + size, PageSize);
|
|
|
|
const size_t mapping_size = (mapping_start < mapping_end) ? mapping_end - mapping_start : 0;
|
|
|
|
m_resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, aligned_size - mapping_size);
|
2020-07-12 22:42:47 +00:00
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-10 15:49:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Result KPageTableBase::CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state) {
|
2020-07-12 22:42:47 +00:00
|
|
|
/* Validate the address. */
|
|
|
|
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
|
|
|
|
|
|
|
|
/* Get aligned source extents. */
|
|
|
|
const KProcessAddress mapping_start = util::AlignUp(GetInteger(address), PageSize);
|
|
|
|
const KProcessAddress mapping_end = util::AlignDown(GetInteger(address) + size, PageSize);
|
|
|
|
const KProcessAddress mapping_last = mapping_end - 1;
|
|
|
|
const size_t mapping_size = (mapping_start < mapping_end) ? (mapping_end - mapping_start) : 0;
|
|
|
|
|
|
|
|
/* If nothing was mapped, we're actually done immediately. */
|
|
|
|
R_SUCCEED_IF(mapping_size == 0);
|
|
|
|
|
|
|
|
/* Get the test state and attribute mask. */
|
|
|
|
u32 test_state;
|
|
|
|
u32 test_attr_mask;
|
|
|
|
switch (dst_state) {
|
|
|
|
case KMemoryState_Ipc:
|
|
|
|
test_state = KMemoryState_FlagCanUseIpc;
|
2020-12-01 12:33:46 +00:00
|
|
|
test_attr_mask = KMemoryAttribute_Uncached | KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked;
|
2020-07-12 22:42:47 +00:00
|
|
|
break;
|
|
|
|
case KMemoryState_NonSecureIpc:
|
|
|
|
test_state = KMemoryState_FlagCanUseNonSecureIpc;
|
2020-12-01 12:33:46 +00:00
|
|
|
test_attr_mask = KMemoryAttribute_Uncached | KMemoryAttribute_Locked;
|
2020-07-12 22:42:47 +00:00
|
|
|
break;
|
|
|
|
case KMemoryState_NonDeviceIpc:
|
|
|
|
test_state = KMemoryState_FlagCanUseNonDeviceIpc;
|
2020-12-01 12:33:46 +00:00
|
|
|
test_attr_mask = KMemoryAttribute_Uncached | KMemoryAttribute_Locked;
|
2020-07-12 22:42:47 +00:00
|
|
|
break;
|
|
|
|
default:
|
2022-02-14 22:45:32 +00:00
|
|
|
R_THROW(svc::ResultInvalidCombination());
|
2020-07-12 22:42:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Lock the table. */
|
|
|
|
/* NOTE: Nintendo does this *after* creating the updater below, but this does not follow convention elsewhere in KPageTableBase. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-07-12 22:42:47 +00:00
|
|
|
|
|
|
|
/* We're going to perform an update, so create a helper. */
|
|
|
|
KScopedPageTableUpdater updater(this);
|
|
|
|
|
|
|
|
/* Ensure that on failure, we roll back appropriately. */
|
|
|
|
size_t mapped_size = 0;
|
2022-02-14 22:45:32 +00:00
|
|
|
ON_RESULT_FAILURE {
|
2020-07-12 22:42:47 +00:00
|
|
|
if (mapped_size > 0) {
|
|
|
|
/* Determine where the mapping ends. */
|
|
|
|
const auto mapped_end = GetInteger(mapping_start) + mapped_size;
|
|
|
|
const auto mapped_last = mapped_end - 1;
|
|
|
|
|
2020-12-01 11:33:46 +00:00
|
|
|
/* Get current and next iterators. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KMemoryBlockManager::const_iterator start_it = m_memory_block_manager.FindIterator(mapping_start);
|
2020-12-01 11:33:46 +00:00
|
|
|
KMemoryBlockManager::const_iterator next_it = start_it;
|
|
|
|
++next_it;
|
|
|
|
|
|
|
|
/* Get the current block info. */
|
|
|
|
KMemoryInfo cur_info = start_it->GetMemoryInfo();
|
|
|
|
|
|
|
|
/* Create tracking variables. */
|
|
|
|
KProcessAddress cur_address = cur_info.GetAddress();
|
|
|
|
size_t cur_size = cur_info.GetSize();
|
|
|
|
bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission();
|
|
|
|
bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1;
|
2020-12-01 12:57:09 +00:00
|
|
|
bool first = cur_info.GetIpcDisableMergeCount() == 1 && (cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute_Locked) == 0;
|
2020-12-01 11:33:46 +00:00
|
|
|
|
|
|
|
while ((GetInteger(cur_address) + cur_size - 1) < mapped_last) {
|
|
|
|
/* Check that we have a next block. */
|
2020-12-18 01:18:47 +00:00
|
|
|
MESOSPHERE_ABORT_UNLESS(next_it != m_memory_block_manager.end());
|
2020-12-01 11:33:46 +00:00
|
|
|
|
|
|
|
/* Get the next info. */
|
|
|
|
const KMemoryInfo next_info = next_it->GetMemoryInfo();
|
|
|
|
|
|
|
|
/* Check if we can consolidate the next block's permission set with the current one. */
|
|
|
|
const bool next_perm_eq = next_info.GetPermission() == next_info.GetOriginalPermission();
|
|
|
|
const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1;
|
|
|
|
if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm && cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) {
|
|
|
|
/* We can consolidate the reprotection for the current and next block into a single call. */
|
|
|
|
cur_size += next_info.GetSize();
|
|
|
|
} else {
|
|
|
|
/* We have to operate on the current block. */
|
|
|
|
if ((cur_needs_set_perm || first) && !cur_perm_eq) {
|
|
|
|
const KPageProperties properties = { cur_info.GetPermission(), false, false, first ? DisableMergeAttribute_EnableAndMergeHeadBodyTail : DisableMergeAttribute_None };
|
|
|
|
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, Null<KPhysicalAddress>, false, properties, OperationType_ChangePermissions, true));
|
|
|
|
}
|
2020-07-12 22:42:47 +00:00
|
|
|
|
2020-12-01 11:33:46 +00:00
|
|
|
/* Advance. */
|
|
|
|
cur_address = next_info.GetAddress();
|
|
|
|
cur_size = next_info.GetSize();
|
|
|
|
first = false;
|
2020-07-12 22:42:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Advance. */
|
2020-12-01 11:33:46 +00:00
|
|
|
cur_info = next_info;
|
|
|
|
cur_perm_eq = next_perm_eq;
|
|
|
|
cur_needs_set_perm = next_needs_set_perm;
|
|
|
|
++next_it;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Process the last block. */
|
|
|
|
if ((first || cur_needs_set_perm) && !cur_perm_eq) {
|
|
|
|
const KPageProperties properties = { cur_info.GetPermission(), false, false, first ? DisableMergeAttribute_EnableAndMergeHeadBodyTail : DisableMergeAttribute_None };
|
|
|
|
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, Null<KPhysicalAddress>, false, properties, OperationType_ChangePermissions, true));
|
2020-07-12 22:42:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Iterate, reprotecting as needed. */
|
2020-12-01 11:33:46 +00:00
|
|
|
{
|
|
|
|
/* Get current and next iterators. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KMemoryBlockManager::const_iterator start_it = m_memory_block_manager.FindIterator(mapping_start);
|
2020-12-01 11:33:46 +00:00
|
|
|
KMemoryBlockManager::const_iterator next_it = start_it;
|
|
|
|
++next_it;
|
2020-07-12 22:42:47 +00:00
|
|
|
|
|
|
|
/* Validate the current block. */
|
2020-12-01 11:33:46 +00:00
|
|
|
KMemoryInfo cur_info = start_it->GetMemoryInfo();
|
|
|
|
MESOSPHERE_R_ABORT_UNLESS(this->CheckMemoryState(cur_info, test_state, test_state, KMemoryPermission_None, KMemoryPermission_None, test_attr_mask | KMemoryAttribute_IpcLocked, KMemoryAttribute_IpcLocked));
|
|
|
|
|
|
|
|
/* Create tracking variables. */
|
|
|
|
KProcessAddress cur_address = cur_info.GetAddress();
|
|
|
|
size_t cur_size = cur_info.GetSize();
|
|
|
|
bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission();
|
|
|
|
bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1;
|
2020-12-01 12:57:09 +00:00
|
|
|
bool first = cur_info.GetIpcDisableMergeCount() == 1 && (cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute_Locked) == 0;
|
2020-12-01 11:33:46 +00:00
|
|
|
|
|
|
|
while ((cur_address + cur_size - 1) < mapping_last) {
|
|
|
|
/* Check that we have a next block. */
|
2020-12-18 01:18:47 +00:00
|
|
|
MESOSPHERE_ABORT_UNLESS(next_it != m_memory_block_manager.end());
|
2020-12-01 11:33:46 +00:00
|
|
|
|
|
|
|
/* Get the next info. */
|
|
|
|
const KMemoryInfo next_info = next_it->GetMemoryInfo();
|
|
|
|
|
|
|
|
/* Validate the next block. */
|
|
|
|
MESOSPHERE_R_ABORT_UNLESS(this->CheckMemoryState(next_info, test_state, test_state, KMemoryPermission_None, KMemoryPermission_None, test_attr_mask | KMemoryAttribute_IpcLocked, KMemoryAttribute_IpcLocked));
|
|
|
|
|
|
|
|
/* Check if we can consolidate the next block's permission set with the current one. */
|
|
|
|
const bool next_perm_eq = next_info.GetPermission() == next_info.GetOriginalPermission();
|
|
|
|
const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1;
|
|
|
|
if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm && cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) {
|
|
|
|
/* We can consolidate the reprotection for the current and next block into a single call. */
|
|
|
|
cur_size += next_info.GetSize();
|
|
|
|
} else {
|
|
|
|
/* We have to operate on the current block. */
|
|
|
|
if ((cur_needs_set_perm || first) && !cur_perm_eq) {
|
|
|
|
const KPageProperties properties = { cur_needs_set_perm ? cur_info.GetOriginalPermission() : cur_info.GetPermission(), false, false, first ? DisableMergeAttribute_EnableHeadAndBody : DisableMergeAttribute_None };
|
|
|
|
R_TRY(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, Null<KPhysicalAddress>, false, properties, OperationType_ChangePermissions, false));
|
|
|
|
}
|
2020-07-12 22:42:47 +00:00
|
|
|
|
2020-12-01 11:33:46 +00:00
|
|
|
/* Mark that we mapped the block. */
|
|
|
|
mapped_size += cur_size;
|
2020-07-12 22:42:47 +00:00
|
|
|
|
2020-12-01 11:33:46 +00:00
|
|
|
/* Advance. */
|
|
|
|
cur_address = next_info.GetAddress();
|
|
|
|
cur_size = next_info.GetSize();
|
|
|
|
first = false;
|
|
|
|
}
|
2020-07-12 22:42:47 +00:00
|
|
|
|
2020-12-01 11:33:46 +00:00
|
|
|
/* Advance. */
|
|
|
|
cur_info = next_info;
|
|
|
|
cur_perm_eq = next_perm_eq;
|
|
|
|
cur_needs_set_perm = next_needs_set_perm;
|
|
|
|
++next_it;
|
2020-07-12 22:42:47 +00:00
|
|
|
}
|
|
|
|
|
2020-12-01 11:33:46 +00:00
|
|
|
/* Process the last block. */
|
2020-12-18 01:18:47 +00:00
|
|
|
const auto lock_count = cur_info.GetIpcLockCount() + (next_it != m_memory_block_manager.end() ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount()) : 0);
|
2020-12-01 11:33:46 +00:00
|
|
|
if ((first || cur_needs_set_perm || (lock_count == 1)) && !cur_perm_eq) {
|
|
|
|
const DisableMergeAttribute head_body_attr = first ? DisableMergeAttribute_EnableHeadAndBody : DisableMergeAttribute_None;
|
|
|
|
const DisableMergeAttribute tail_attr = lock_count == 1 ? DisableMergeAttribute_EnableTail : DisableMergeAttribute_None;
|
|
|
|
const KPageProperties properties = { cur_needs_set_perm ? cur_info.GetOriginalPermission() : cur_info.GetPermission(), false, false, static_cast<DisableMergeAttribute>(head_body_attr | tail_attr) };
|
|
|
|
R_TRY(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, Null<KPhysicalAddress>, false, properties, OperationType_ChangePermissions, false));
|
|
|
|
}
|
2020-07-12 22:42:47 +00:00
|
|
|
}
|
|
|
|
|
2020-12-01 12:24:43 +00:00
|
|
|
/* Create an update allocator. */
|
|
|
|
/* NOTE: Guaranteed zero blocks needed here. */
|
2021-04-07 15:46:06 +00:00
|
|
|
Result allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, 0);
|
|
|
|
R_TRY(allocator_result);
|
2020-12-01 12:24:43 +00:00
|
|
|
|
2020-07-12 22:42:47 +00:00
|
|
|
/* Unlock the pages. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_memory_block_manager.UpdateLock(std::addressof(allocator), mapping_start, mapping_size / PageSize, &KMemoryBlock::UnlockForIpc, KMemoryPermission_None);
|
2020-07-12 22:42:47 +00:00
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-12 22:42:47 +00:00
|
|
|
}
|
|
|
|
|
2020-12-01 11:33:46 +00:00
|
|
|
void KPageTableBase::CleanupForIpcClientOnServerSetupFailure(PageLinkedList *page_list, KProcessAddress address, size_t size, KMemoryPermission prot_perm) {
|
2020-07-12 22:42:47 +00:00
|
|
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
2020-12-01 11:33:46 +00:00
|
|
|
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(address), PageSize));
|
|
|
|
MESOSPHERE_ASSERT(util::IsAligned(size, PageSize));
|
2020-07-12 22:42:47 +00:00
|
|
|
|
|
|
|
/* Get the mapped extents. */
|
2020-12-01 11:33:46 +00:00
|
|
|
const KProcessAddress src_map_start = address;
|
|
|
|
const KProcessAddress src_map_end = address + size;
|
2020-07-12 22:42:47 +00:00
|
|
|
const KProcessAddress src_map_last = src_map_end - 1;
|
|
|
|
|
2020-12-01 11:33:46 +00:00
|
|
|
/* This function is only invoked when there's something to do. */
|
|
|
|
MESOSPHERE_ASSERT(src_map_end > src_map_start);
|
2020-07-12 22:42:47 +00:00
|
|
|
|
|
|
|
/* Iterate over blocks, fixing permissions. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(address);
|
2020-07-12 22:42:47 +00:00
|
|
|
while (true) {
|
|
|
|
const KMemoryInfo info = it->GetMemoryInfo();
|
|
|
|
|
|
|
|
const auto cur_start = info.GetAddress() >= GetInteger(src_map_start) ? info.GetAddress() : GetInteger(src_map_start);
|
|
|
|
const auto cur_end = src_map_last <= info.GetLastAddress() ? src_map_end : info.GetEndAddress();
|
|
|
|
|
|
|
|
/* If we can, fix the protections on the block. */
|
2020-12-01 11:33:46 +00:00
|
|
|
if ((info.GetIpcLockCount() == 0 && (info.GetPermission() & KMemoryPermission_IpcLockChangeMask) != prot_perm) ||
|
|
|
|
(info.GetIpcLockCount() != 0 && (info.GetOriginalPermission() & KMemoryPermission_IpcLockChangeMask) != prot_perm))
|
|
|
|
{
|
|
|
|
/* Check if we actually need to fix the protections on the block. */
|
|
|
|
if (cur_end == src_map_end || info.GetAddress() <= GetInteger(src_map_start) || (info.GetPermission() & KMemoryPermission_IpcLockChangeMask) != prot_perm) {
|
2020-12-01 12:57:09 +00:00
|
|
|
const bool start_nc = (info.GetAddress() == GetInteger(src_map_start)) ? ((info.GetDisableMergeAttribute() & (KMemoryBlockDisableMergeAttribute_Locked | KMemoryBlockDisableMergeAttribute_IpcLeft)) == 0) : info.GetAddress() <= GetInteger(src_map_start);
|
2020-12-01 11:33:46 +00:00
|
|
|
|
|
|
|
const DisableMergeAttribute head_body_attr = start_nc ? DisableMergeAttribute_EnableHeadAndBody : DisableMergeAttribute_None;
|
|
|
|
DisableMergeAttribute tail_attr;
|
|
|
|
if (cur_end == src_map_end && info.GetEndAddress() == src_map_end) {
|
2020-12-01 12:57:09 +00:00
|
|
|
auto next_it = it;
|
|
|
|
++next_it;
|
|
|
|
|
2020-12-18 01:18:47 +00:00
|
|
|
const auto lock_count = info.GetIpcLockCount() + (next_it != m_memory_block_manager.end() ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount()) : 0);
|
2020-12-01 11:33:46 +00:00
|
|
|
tail_attr = lock_count == 0 ? DisableMergeAttribute_EnableTail : DisableMergeAttribute_None;
|
|
|
|
} else {
|
|
|
|
tail_attr = DisableMergeAttribute_None;
|
|
|
|
}
|
|
|
|
|
|
|
|
const KPageProperties properties = { info.GetPermission(), false, false, static_cast<DisableMergeAttribute>(head_body_attr | tail_attr) };
|
|
|
|
MESOSPHERE_R_ABORT_UNLESS(this->Operate(page_list, cur_start, (cur_end - cur_start) / PageSize, Null<KPhysicalAddress>, false, properties, OperationType_ChangePermissions, true));
|
|
|
|
}
|
2020-07-12 22:42:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* If we're past the end of the region, we're done. */
|
|
|
|
if (src_map_last <= info.GetLastAddress()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Advance. */
|
|
|
|
++it;
|
2020-12-18 01:18:47 +00:00
|
|
|
MESOSPHERE_ABORT_UNLESS(it != m_memory_block_manager.end());
|
2020-07-12 22:42:47 +00:00
|
|
|
}
|
2020-07-10 15:49:10 +00:00
|
|
|
}
|
|
|
|
|
2020-12-02 10:14:24 +00:00
|
|
|
#pragma GCC pop_options
|
|
|
|
|
2020-07-24 15:07:34 +00:00
|
|
|
Result KPageTableBase::MapPhysicalMemory(KProcessAddress address, size_t size) {
|
|
|
|
/* Lock the physical memory lock. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock phys_lk(m_map_physical_memory_lock);
|
2020-07-24 15:07:34 +00:00
|
|
|
|
|
|
|
/* Calculate the last address for convenience. */
|
|
|
|
const KProcessAddress last_address = address + size - 1;
|
|
|
|
|
|
|
|
/* Define iteration variables. */
|
|
|
|
KProcessAddress cur_address;
|
|
|
|
size_t mapped_size;
|
|
|
|
|
|
|
|
/* The entire mapping process can be retried. */
|
|
|
|
while (true) {
|
|
|
|
/* Check if the memory is already mapped. */
|
|
|
|
{
|
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-07-24 15:07:34 +00:00
|
|
|
|
|
|
|
/* Iterate over the memory. */
|
|
|
|
cur_address = address;
|
|
|
|
mapped_size = 0;
|
|
|
|
|
2020-12-18 01:18:47 +00:00
|
|
|
auto it = m_memory_block_manager.FindIterator(cur_address);
|
2020-07-24 15:07:34 +00:00
|
|
|
while (true) {
|
|
|
|
/* Check that the iterator is valid. */
|
2020-12-18 01:18:47 +00:00
|
|
|
MESOSPHERE_ASSERT(it != m_memory_block_manager.end());
|
2020-07-24 15:07:34 +00:00
|
|
|
|
|
|
|
/* Get the memory info. */
|
|
|
|
const KMemoryInfo info = it->GetMemoryInfo();
|
|
|
|
|
|
|
|
/* Check if we're done. */
|
|
|
|
if (last_address <= info.GetLastAddress()) {
|
|
|
|
if (info.GetState() != KMemoryState_Free) {
|
|
|
|
mapped_size += (last_address + 1 - cur_address);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Track the memory if it's mapped. */
|
|
|
|
if (info.GetState() != KMemoryState_Free) {
|
|
|
|
mapped_size += KProcessAddress(info.GetEndAddress()) - cur_address;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Advance. */
|
|
|
|
cur_address = info.GetEndAddress();
|
|
|
|
++it;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If the size mapped is the size requested, we've nothing to do. */
|
|
|
|
R_SUCCEED_IF(size == mapped_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Allocate and map the memory. */
|
|
|
|
{
|
|
|
|
/* Reserve the memory from the process resource limit. */
|
2021-04-07 16:48:25 +00:00
|
|
|
KScopedResourceReservation memory_reservation(m_resource_limit, ams::svc::LimitableResource_PhysicalMemoryMax, size - mapped_size);
|
2020-07-24 15:07:34 +00:00
|
|
|
R_UNLESS(memory_reservation.Succeeded(), svc::ResultLimitReached());
|
|
|
|
|
|
|
|
/* Allocate pages for the new memory. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KPageGroup pg(m_block_info_manager);
|
2022-10-12 05:37:43 +00:00
|
|
|
R_TRY(Kernel::GetMemoryManager().AllocateForProcess(std::addressof(pg), (size - mapped_size) / PageSize, m_allocate_option, GetCurrentProcess().GetId(), m_heap_fill_value));
|
2020-07-24 15:07:34 +00:00
|
|
|
|
2022-10-12 05:37:43 +00:00
|
|
|
/* If we fail in the next bit (or retry), we need to cleanup the pages. */
|
|
|
|
auto pg_guard = SCOPE_GUARD {
|
|
|
|
pg.OpenFirst();
|
|
|
|
pg.Close();
|
|
|
|
};
|
2020-07-24 15:07:34 +00:00
|
|
|
|
|
|
|
/* Map the memory. */
|
|
|
|
{
|
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-07-24 15:07:34 +00:00
|
|
|
|
2020-12-01 12:24:43 +00:00
|
|
|
size_t num_allocator_blocks = 0;
|
|
|
|
|
2020-07-24 15:07:34 +00:00
|
|
|
/* Verify that nobody has mapped memory since we first checked. */
|
|
|
|
{
|
|
|
|
/* Iterate over the memory. */
|
|
|
|
size_t checked_mapped_size = 0;
|
|
|
|
cur_address = address;
|
|
|
|
|
2020-12-18 01:18:47 +00:00
|
|
|
auto it = m_memory_block_manager.FindIterator(cur_address);
|
2020-07-24 15:07:34 +00:00
|
|
|
while (true) {
|
|
|
|
/* Check that the iterator is valid. */
|
2020-12-18 01:18:47 +00:00
|
|
|
MESOSPHERE_ASSERT(it != m_memory_block_manager.end());
|
2020-07-24 15:07:34 +00:00
|
|
|
|
|
|
|
/* Get the memory info. */
|
|
|
|
const KMemoryInfo info = it->GetMemoryInfo();
|
|
|
|
|
2020-12-01 12:24:43 +00:00
|
|
|
const bool is_free = info.GetState() == KMemoryState_Free;
|
|
|
|
if (is_free) {
|
|
|
|
if (info.GetAddress() < GetInteger(address)) {
|
|
|
|
++num_allocator_blocks;
|
|
|
|
}
|
|
|
|
if (last_address < info.GetLastAddress()) {
|
|
|
|
++num_allocator_blocks;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-24 15:07:34 +00:00
|
|
|
/* Check if we're done. */
|
|
|
|
if (last_address <= info.GetLastAddress()) {
|
2020-12-01 12:24:43 +00:00
|
|
|
if (!is_free) {
|
2020-07-24 15:07:34 +00:00
|
|
|
checked_mapped_size += (last_address + 1 - cur_address);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Track the memory if it's mapped. */
|
2020-12-01 12:24:43 +00:00
|
|
|
if (!is_free) {
|
2020-07-24 15:07:34 +00:00
|
|
|
checked_mapped_size += KProcessAddress(info.GetEndAddress()) - cur_address;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Advance. */
|
|
|
|
cur_address = info.GetEndAddress();
|
|
|
|
++it;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If the size now isn't what it was before, somebody mapped or unmapped concurrently. */
|
|
|
|
/* If this happened, retry. */
|
|
|
|
if (mapped_size != checked_mapped_size) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Create an update allocator. */
|
2020-12-01 12:24:43 +00:00
|
|
|
MESOSPHERE_ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
|
2021-04-07 15:46:06 +00:00
|
|
|
Result allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
|
|
|
|
R_TRY(allocator_result);
|
2020-07-24 15:07:34 +00:00
|
|
|
|
|
|
|
/* We're going to perform an update, so create a helper. */
|
|
|
|
KScopedPageTableUpdater updater(this);
|
|
|
|
|
2022-10-12 05:37:43 +00:00
|
|
|
/* Prepare to iterate over the memory. */
|
|
|
|
auto pg_it = pg.begin();
|
|
|
|
KPhysicalAddress pg_phys_addr = pg_it->GetAddress();
|
|
|
|
size_t pg_pages = pg_it->GetNumPages();
|
|
|
|
|
2020-07-24 15:07:34 +00:00
|
|
|
/* Reset the current tracking address, and make sure we clean up on failure. */
|
2022-10-12 05:37:43 +00:00
|
|
|
pg_guard.Cancel();
|
2020-07-24 15:07:34 +00:00
|
|
|
cur_address = address;
|
2022-02-14 22:45:32 +00:00
|
|
|
ON_RESULT_FAILURE {
|
2020-07-24 15:07:34 +00:00
|
|
|
if (cur_address > address) {
|
|
|
|
const KProcessAddress last_unmap_address = cur_address - 1;
|
|
|
|
|
|
|
|
/* Iterate, unmapping the pages. */
|
|
|
|
cur_address = address;
|
|
|
|
|
2020-12-18 01:18:47 +00:00
|
|
|
auto it = m_memory_block_manager.FindIterator(cur_address);
|
2020-07-24 15:07:34 +00:00
|
|
|
while (true) {
|
|
|
|
/* Check that the iterator is valid. */
|
2020-12-18 01:18:47 +00:00
|
|
|
MESOSPHERE_ASSERT(it != m_memory_block_manager.end());
|
2020-07-24 15:07:34 +00:00
|
|
|
|
|
|
|
/* Get the memory info. */
|
|
|
|
const KMemoryInfo info = it->GetMemoryInfo();
|
|
|
|
|
|
|
|
/* If the memory state is free, we mapped it and need to unmap it. */
|
|
|
|
if (info.GetState() == KMemoryState_Free) {
|
|
|
|
/* Determine the range to unmap. */
|
2020-12-01 11:33:46 +00:00
|
|
|
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
|
2020-07-24 15:07:34 +00:00
|
|
|
const size_t cur_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address, last_unmap_address + 1 - cur_address) / PageSize;
|
|
|
|
|
|
|
|
/* Unmap. */
|
|
|
|
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), cur_address, cur_pages, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, true));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if we're done. */
|
|
|
|
if (last_unmap_address <= info.GetLastAddress()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Advance. */
|
|
|
|
cur_address = info.GetEndAddress();
|
|
|
|
++it;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-12 05:37:43 +00:00
|
|
|
/* Release any remaining unmapped memory. */
|
|
|
|
Kernel::GetMemoryManager().OpenFirst(pg_phys_addr, pg_pages);
|
|
|
|
Kernel::GetMemoryManager().Close(pg_phys_addr, pg_pages);
|
|
|
|
for (++pg_it; pg_it != pg.end(); ++pg_it) {
|
|
|
|
Kernel::GetMemoryManager().OpenFirst(pg_it->GetAddress(), pg_it->GetNumPages());
|
|
|
|
Kernel::GetMemoryManager().Close(pg_it->GetAddress(), pg_it->GetNumPages());
|
|
|
|
}
|
|
|
|
};
|
2020-07-24 15:07:34 +00:00
|
|
|
|
2020-12-18 01:18:47 +00:00
|
|
|
auto it = m_memory_block_manager.FindIterator(cur_address);
|
2020-07-24 15:07:34 +00:00
|
|
|
while (true) {
|
|
|
|
/* Check that the iterator is valid. */
|
2020-12-18 01:18:47 +00:00
|
|
|
MESOSPHERE_ASSERT(it != m_memory_block_manager.end());
|
2020-07-24 15:07:34 +00:00
|
|
|
|
|
|
|
/* Get the memory info. */
|
|
|
|
const KMemoryInfo info = it->GetMemoryInfo();
|
|
|
|
|
|
|
|
/* If it's unmapped, we need to map it. */
|
|
|
|
if (info.GetState() == KMemoryState_Free) {
|
|
|
|
/* Determine the range to map. */
|
2020-12-01 11:33:46 +00:00
|
|
|
const KPageProperties map_properties = { KMemoryPermission_UserReadWrite, false, false, DisableMergeAttribute_None };
|
2020-07-24 15:07:34 +00:00
|
|
|
size_t map_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address, last_address + 1 - cur_address) / PageSize;
|
|
|
|
|
|
|
|
/* While we have pages to map, map them. */
|
|
|
|
while (map_pages > 0) {
|
|
|
|
/* Check if we're at the end of the physical block. */
|
|
|
|
if (pg_pages == 0) {
|
|
|
|
/* Ensure there are more pages to map. */
|
|
|
|
MESOSPHERE_ASSERT(pg_it != pg.end());
|
|
|
|
|
|
|
|
/* Advance our physical block. */
|
|
|
|
++pg_it;
|
2021-09-18 07:11:10 +00:00
|
|
|
pg_phys_addr = pg_it->GetAddress();
|
2020-07-24 15:07:34 +00:00
|
|
|
pg_pages = pg_it->GetNumPages();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Map whatever we can. */
|
|
|
|
const size_t cur_pages = std::min(pg_pages, map_pages);
|
2022-10-12 05:37:43 +00:00
|
|
|
R_TRY(this->Operate(updater.GetPageList(), cur_address, cur_pages, pg_phys_addr, true, map_properties, OperationType_MapFirst, false));
|
2020-07-24 15:07:34 +00:00
|
|
|
|
|
|
|
/* Advance. */
|
|
|
|
cur_address += cur_pages * PageSize;
|
|
|
|
map_pages -= cur_pages;
|
|
|
|
|
|
|
|
pg_phys_addr += cur_pages * PageSize;
|
|
|
|
pg_pages -= cur_pages;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if we're done. */
|
|
|
|
if (last_address <= info.GetLastAddress()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Advance. */
|
|
|
|
cur_address = info.GetEndAddress();
|
|
|
|
++it;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We succeeded, so commit the memory reservation. */
|
|
|
|
memory_reservation.Commit();
|
|
|
|
|
|
|
|
/* Increase our tracked mapped size. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_mapped_physical_memory_size += (size - mapped_size);
|
2020-07-24 15:07:34 +00:00
|
|
|
|
|
|
|
/* Update the relevant memory blocks. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_memory_block_manager.UpdateIfMatch(std::addressof(allocator), address, size / PageSize,
|
2020-07-24 15:07:34 +00:00
|
|
|
KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None,
|
2023-10-11 14:59:37 +00:00
|
|
|
KMemoryState_Normal, KMemoryPermission_UserReadWrite, KMemoryAttribute_None,
|
|
|
|
address == this->GetAliasRegionStart() ? KMemoryBlockDisableMergeAttribute_Normal : KMemoryBlockDisableMergeAttribute_None,
|
|
|
|
KMemoryBlockDisableMergeAttribute_None);
|
2020-07-24 15:07:34 +00:00
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-24 15:07:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Result KPageTableBase::UnmapPhysicalMemory(KProcessAddress address, size_t size) {
|
2020-07-24 22:44:16 +00:00
|
|
|
/* Lock the physical memory lock. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock phys_lk(m_map_physical_memory_lock);
|
2020-07-24 22:44:16 +00:00
|
|
|
|
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-07-24 22:44:16 +00:00
|
|
|
|
|
|
|
/* Calculate the last address for convenience. */
|
|
|
|
const KProcessAddress last_address = address + size - 1;
|
|
|
|
|
|
|
|
/* Define iteration variables. */
|
2022-10-12 05:37:43 +00:00
|
|
|
KProcessAddress map_start_address = Null<KProcessAddress>;
|
|
|
|
KProcessAddress map_last_address = Null<KProcessAddress>;
|
|
|
|
|
2020-07-24 22:44:16 +00:00
|
|
|
KProcessAddress cur_address;
|
|
|
|
size_t mapped_size;
|
2020-12-01 12:24:43 +00:00
|
|
|
size_t num_allocator_blocks = 0;
|
2020-07-24 22:44:16 +00:00
|
|
|
|
|
|
|
/* Check if the memory is mapped. */
|
|
|
|
{
|
|
|
|
/* Iterate over the memory. */
|
|
|
|
cur_address = address;
|
|
|
|
mapped_size = 0;
|
|
|
|
|
2020-12-18 01:18:47 +00:00
|
|
|
auto it = m_memory_block_manager.FindIterator(cur_address);
|
2020-07-24 22:44:16 +00:00
|
|
|
while (true) {
|
|
|
|
/* Check that the iterator is valid. */
|
2020-12-18 01:18:47 +00:00
|
|
|
MESOSPHERE_ASSERT(it != m_memory_block_manager.end());
|
2020-07-24 22:44:16 +00:00
|
|
|
|
|
|
|
/* Get the memory info. */
|
|
|
|
const KMemoryInfo info = it->GetMemoryInfo();
|
|
|
|
|
|
|
|
/* Verify the memory's state. */
|
|
|
|
const bool is_normal = info.GetState() == KMemoryState_Normal && info.GetAttribute() == 0;
|
|
|
|
const bool is_free = info.GetState() == KMemoryState_Free;
|
|
|
|
R_UNLESS(is_normal || is_free, svc::ResultInvalidCurrentMemory());
|
|
|
|
|
2020-12-01 12:24:43 +00:00
|
|
|
if (is_normal) {
|
|
|
|
R_UNLESS(info.GetAttribute() == KMemoryAttribute_None, svc::ResultInvalidCurrentMemory());
|
|
|
|
|
2022-10-12 05:37:43 +00:00
|
|
|
if (map_start_address == Null<KProcessAddress>) {
|
|
|
|
map_start_address = cur_address;
|
|
|
|
}
|
|
|
|
map_last_address = (last_address >= info.GetLastAddress()) ? info.GetLastAddress() : last_address;
|
|
|
|
|
2020-12-01 12:24:43 +00:00
|
|
|
if (info.GetAddress() < GetInteger(address)) {
|
|
|
|
++num_allocator_blocks;
|
|
|
|
}
|
|
|
|
if (last_address < info.GetLastAddress()) {
|
|
|
|
++num_allocator_blocks;
|
|
|
|
}
|
2022-10-12 05:37:43 +00:00
|
|
|
|
|
|
|
mapped_size += (map_last_address + 1 - cur_address);
|
2020-12-01 12:24:43 +00:00
|
|
|
}
|
|
|
|
|
2020-07-24 22:44:16 +00:00
|
|
|
/* Check if we're done. */
|
|
|
|
if (last_address <= info.GetLastAddress()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Advance. */
|
|
|
|
cur_address = info.GetEndAddress();
|
|
|
|
++it;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If there's nothing mapped, we've nothing to do. */
|
|
|
|
R_SUCCEED_IF(mapped_size == 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Create an update allocator. */
|
2020-12-01 12:24:43 +00:00
|
|
|
MESOSPHERE_ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
|
2021-04-07 15:46:06 +00:00
|
|
|
Result allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
|
|
|
|
R_TRY(allocator_result);
|
2020-07-24 22:44:16 +00:00
|
|
|
|
|
|
|
/* We're going to perform an update, so create a helper. */
|
|
|
|
KScopedPageTableUpdater updater(this);
|
|
|
|
|
2022-10-12 05:37:43 +00:00
|
|
|
/* Separate the mapping. */
|
|
|
|
const KPageProperties sep_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
|
|
|
|
R_TRY(this->Operate(updater.GetPageList(), map_start_address, (map_last_address + 1 - map_start_address) / PageSize, Null<KPhysicalAddress>, false, sep_properties, OperationType_Separate, false));
|
2020-07-24 22:44:16 +00:00
|
|
|
|
|
|
|
/* Reset the current tracking address, and make sure we clean up on failure. */
|
|
|
|
cur_address = address;
|
|
|
|
|
|
|
|
/* Iterate over the memory, unmapping as we go. */
|
2020-12-18 01:18:47 +00:00
|
|
|
auto it = m_memory_block_manager.FindIterator(cur_address);
|
2023-10-11 14:59:37 +00:00
|
|
|
|
|
|
|
const auto clear_merge_attr = (it->GetState() == KMemoryState_Normal && it->GetAddress() == this->GetAliasRegionStart() && it->GetAddress() == address) ? KMemoryBlockDisableMergeAttribute_Normal : KMemoryBlockDisableMergeAttribute_None;
|
|
|
|
|
2020-07-24 22:44:16 +00:00
|
|
|
while (true) {
|
|
|
|
/* Check that the iterator is valid. */
|
2020-12-18 01:18:47 +00:00
|
|
|
MESOSPHERE_ASSERT(it != m_memory_block_manager.end());
|
2020-07-24 22:44:16 +00:00
|
|
|
|
|
|
|
/* Get the memory info. */
|
|
|
|
const KMemoryInfo info = it->GetMemoryInfo();
|
|
|
|
|
|
|
|
/* If the memory state is normal, we need to unmap it. */
|
|
|
|
if (info.GetState() == KMemoryState_Normal) {
|
|
|
|
/* Determine the range to unmap. */
|
2020-12-01 11:33:46 +00:00
|
|
|
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
|
2020-07-24 22:44:16 +00:00
|
|
|
const size_t cur_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address, last_address + 1 - cur_address) / PageSize;
|
|
|
|
|
|
|
|
/* Unmap. */
|
2022-10-12 05:37:43 +00:00
|
|
|
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), cur_address, cur_pages, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, false));
|
2020-07-24 22:44:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if we're done. */
|
|
|
|
if (last_address <= info.GetLastAddress()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Advance. */
|
|
|
|
cur_address = info.GetEndAddress();
|
|
|
|
++it;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Release the memory resource. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_mapped_physical_memory_size -= mapped_size;
|
2021-04-07 16:48:25 +00:00
|
|
|
m_resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, mapped_size);
|
2020-07-24 22:44:16 +00:00
|
|
|
|
|
|
|
/* Update memory blocks. */
|
2023-10-11 14:59:37 +00:00
|
|
|
m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, clear_merge_attr);
|
2020-07-24 22:44:16 +00:00
|
|
|
|
|
|
|
/* We succeeded. */
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-24 15:07:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Result KPageTableBase::MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
|
2020-07-25 05:04:04 +00:00
|
|
|
/* Try to reserve the unsafe memory. */
|
|
|
|
R_UNLESS(Kernel::GetUnsafeMemory().TryReserve(size), svc::ResultLimitReached());
|
|
|
|
|
|
|
|
/* Ensure we release our reservation on failure. */
|
2022-02-14 22:45:32 +00:00
|
|
|
ON_RESULT_FAILURE { Kernel::GetUnsafeMemory().Release(size); };
|
2020-07-25 05:04:04 +00:00
|
|
|
|
|
|
|
/* Create a page group for the new memory. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KPageGroup pg(m_block_info_manager);
|
2020-07-25 05:04:04 +00:00
|
|
|
|
|
|
|
/* Allocate the new memory. */
|
|
|
|
const size_t num_pages = size / PageSize;
|
2020-12-01 14:01:44 +00:00
|
|
|
R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, KMemoryManager::EncodeOption(KMemoryManager::Pool_Unsafe, KMemoryManager::Direction_FromFront)));
|
2020-07-25 05:04:04 +00:00
|
|
|
|
2020-12-01 14:01:44 +00:00
|
|
|
/* Close the page group when we're done with it. */
|
2020-07-25 05:04:04 +00:00
|
|
|
ON_SCOPE_EXIT { pg.Close(); };
|
|
|
|
|
|
|
|
/* Clear the new memory. */
|
|
|
|
for (const auto &block : pg) {
|
2021-09-18 07:11:10 +00:00
|
|
|
std::memset(GetVoidPointer(GetHeapVirtualAddress(block.GetAddress())), m_heap_fill_value, block.GetSize());
|
2020-07-25 05:04:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Map the new memory. */
|
|
|
|
{
|
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-07-25 05:04:04 +00:00
|
|
|
|
|
|
|
/* Check the memory state. */
|
2020-12-01 12:24:43 +00:00
|
|
|
size_t num_allocator_blocks;
|
|
|
|
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None));
|
2020-07-25 05:04:04 +00:00
|
|
|
|
|
|
|
/* Create an update allocator. */
|
2021-04-07 15:46:06 +00:00
|
|
|
Result allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
|
|
|
|
R_TRY(allocator_result);
|
2020-07-25 05:04:04 +00:00
|
|
|
|
|
|
|
/* We're going to perform an update, so create a helper. */
|
|
|
|
KScopedPageTableUpdater updater(this);
|
|
|
|
|
|
|
|
/* Map the pages. */
|
2020-12-01 11:33:46 +00:00
|
|
|
const KPageProperties map_properties = { KMemoryPermission_UserReadWrite, false, false, DisableMergeAttribute_DisableHead };
|
2020-07-25 05:04:04 +00:00
|
|
|
R_TRY(this->Operate(updater.GetPageList(), address, num_pages, pg, map_properties, OperationType_MapGroup, false));
|
|
|
|
|
|
|
|
/* Apply the memory block update. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState_Normal, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
|
2020-07-25 05:04:04 +00:00
|
|
|
|
|
|
|
/* Update our mapped unsafe size. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_mapped_unsafe_physical_memory += size;
|
2020-07-25 05:04:04 +00:00
|
|
|
|
|
|
|
/* We succeeded. */
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-25 05:04:04 +00:00
|
|
|
}
|
2020-07-24 15:07:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Result KPageTableBase::UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
|
2020-07-25 05:04:04 +00:00
|
|
|
/* Lock the table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_general_lock);
|
2020-07-25 05:04:04 +00:00
|
|
|
|
|
|
|
/* Check whether we can unmap this much unsafe physical memory. */
|
2020-12-18 01:18:47 +00:00
|
|
|
R_UNLESS(size <= m_mapped_unsafe_physical_memory, svc::ResultInvalidCurrentMemory());
|
2020-07-25 05:04:04 +00:00
|
|
|
|
|
|
|
/* Check the memory state. */
|
2020-12-01 12:24:43 +00:00
|
|
|
size_t num_allocator_blocks;
|
|
|
|
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, KMemoryState_All, KMemoryState_Normal, KMemoryPermission_All, KMemoryPermission_UserReadWrite, KMemoryAttribute_All, KMemoryAttribute_None));
|
2020-07-25 05:04:04 +00:00
|
|
|
|
|
|
|
/* Create an update allocator. */
|
2021-04-07 15:46:06 +00:00
|
|
|
Result allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
|
|
|
|
R_TRY(allocator_result);
|
2020-07-25 05:04:04 +00:00
|
|
|
|
|
|
|
/* We're going to perform an update, so create a helper. */
|
|
|
|
KScopedPageTableUpdater updater(this);
|
|
|
|
|
|
|
|
/* Unmap the memory. */
|
|
|
|
const size_t num_pages = size / PageSize;
|
2020-12-01 11:33:46 +00:00
|
|
|
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
|
2020-07-25 05:04:04 +00:00
|
|
|
R_TRY(this->Operate(updater.GetPageList(), address, num_pages, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, false));
|
|
|
|
|
|
|
|
/* Apply the memory block update. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal);
|
2020-07-25 05:04:04 +00:00
|
|
|
|
|
|
|
/* Release the unsafe memory from the limit. */
|
|
|
|
Kernel::GetUnsafeMemory().Release(size);
|
|
|
|
|
|
|
|
/* Update our mapped unsafe size. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_mapped_unsafe_physical_memory -= size;
|
2020-07-25 05:04:04 +00:00
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2020-07-24 15:07:34 +00:00
|
|
|
}
|
|
|
|
|
2021-04-08 00:07:01 +00:00
|
|
|
Result KPageTableBase::UnmapProcessMemory(KProcessAddress dst_address, size_t size, KPageTableBase &src_page_table, KProcessAddress src_address) {
|
|
|
|
/* We need to lock both this table, and the current process's table, so set up an alias. */
|
|
|
|
KPageTableBase &dst_page_table = *this;
|
|
|
|
|
|
|
|
/* Acquire the table locks. */
|
|
|
|
KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
|
|
|
|
|
|
|
|
/* Check that the memory is mapped in the destination process. */
|
|
|
|
size_t num_allocator_blocks;
|
|
|
|
R_TRY(dst_page_table.CheckMemoryState(std::addressof(num_allocator_blocks), dst_address, size, KMemoryState_All, KMemoryState_SharedCode, KMemoryPermission_UserReadWrite, KMemoryPermission_UserReadWrite, KMemoryAttribute_All, KMemoryAttribute_None));
|
|
|
|
|
|
|
|
/* Check that the memory is mapped in the source process. */
|
|
|
|
R_TRY(src_page_table.CheckMemoryState(src_address, size, KMemoryState_FlagCanMapProcess, KMemoryState_FlagCanMapProcess, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_All, KMemoryAttribute_None));
|
|
|
|
|
|
|
|
/* Validate that the memory ranges are compatible. */
|
|
|
|
{
|
|
|
|
/* Define a helper type. */
|
|
|
|
struct ContiguousRangeInfo {
|
|
|
|
public:
|
|
|
|
KPageTableBase &m_pt;
|
|
|
|
TraversalContext m_context;
|
|
|
|
TraversalEntry m_entry;
|
|
|
|
KPhysicalAddress m_phys_addr;
|
|
|
|
size_t m_cur_size;
|
|
|
|
size_t m_remaining_size;
|
|
|
|
public:
|
|
|
|
ContiguousRangeInfo(KPageTableBase &pt, KProcessAddress address, size_t size) : m_pt(pt), m_remaining_size(size) {
|
|
|
|
/* Begin a traversal. */
|
|
|
|
MESOSPHERE_ABORT_UNLESS(m_pt.GetImpl().BeginTraversal(std::addressof(m_entry), std::addressof(m_context), address));
|
|
|
|
|
|
|
|
/* Setup tracking fields. */
|
|
|
|
m_phys_addr = m_entry.phys_addr;
|
|
|
|
m_cur_size = std::min<size_t>(m_remaining_size, m_entry.block_size - (GetInteger(m_phys_addr) & (m_entry.block_size - 1)));
|
|
|
|
|
|
|
|
/* Consume the whole contiguous block. */
|
|
|
|
this->DetermineContiguousBlockExtents();
|
|
|
|
}
|
|
|
|
|
|
|
|
void ContinueTraversal() {
|
|
|
|
/* Update our remaining size. */
|
|
|
|
m_remaining_size = m_remaining_size - m_cur_size;
|
|
|
|
|
|
|
|
/* Update our tracking fields. */
|
|
|
|
if (m_remaining_size > 0) {
|
|
|
|
m_phys_addr = m_entry.phys_addr;
|
|
|
|
m_cur_size = std::min<size_t>(m_remaining_size, m_entry.block_size);
|
|
|
|
|
|
|
|
/* Consume the whole contiguous block. */
|
|
|
|
this->DetermineContiguousBlockExtents();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
private:
|
|
|
|
void DetermineContiguousBlockExtents() {
|
|
|
|
/* Continue traversing until we're not contiguous, or we have enough. */
|
|
|
|
while (m_cur_size < m_remaining_size) {
|
|
|
|
MESOSPHERE_ABORT_UNLESS(m_pt.GetImpl().ContinueTraversal(std::addressof(m_entry), std::addressof(m_context)));
|
|
|
|
|
|
|
|
/* If we're not contiguous, we're done. */
|
|
|
|
if (m_entry.phys_addr != m_phys_addr + m_cur_size) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update our current size. */
|
|
|
|
m_cur_size = std::min(m_remaining_size, m_cur_size + m_entry.block_size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Create ranges for both tables. */
|
|
|
|
ContiguousRangeInfo src_range(src_page_table, src_address, size);
|
|
|
|
ContiguousRangeInfo dst_range(dst_page_table, dst_address, size);
|
|
|
|
|
|
|
|
/* Validate the ranges. */
|
|
|
|
while (src_range.m_remaining_size > 0 && dst_range.m_remaining_size > 0) {
|
|
|
|
R_UNLESS(src_range.m_phys_addr == dst_range.m_phys_addr, svc::ResultInvalidMemoryRegion());
|
|
|
|
R_UNLESS(src_range.m_cur_size == dst_range.m_cur_size, svc::ResultInvalidMemoryRegion());
|
|
|
|
|
|
|
|
src_range.ContinueTraversal();
|
|
|
|
dst_range.ContinueTraversal();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We no longer need to hold our lock on the source page table. */
|
|
|
|
lk.TryUnlockHalf(src_page_table.m_general_lock);
|
|
|
|
|
|
|
|
/* Create an update allocator. */
|
|
|
|
Result allocator_result;
|
|
|
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
|
|
|
|
R_TRY(allocator_result);
|
|
|
|
|
|
|
|
/* We're going to perform an update, so create a helper. */
|
|
|
|
KScopedPageTableUpdater updater(this);
|
|
|
|
|
|
|
|
/* Unmap the memory. */
|
|
|
|
const size_t num_pages = size / PageSize;
|
|
|
|
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
|
|
|
|
R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, false));
|
|
|
|
|
|
|
|
/* Apply the memory block update. */
|
|
|
|
m_memory_block_manager.Update(std::addressof(allocator), dst_address, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal);
|
|
|
|
|
2022-02-14 22:45:32 +00:00
|
|
|
R_SUCCEED();
|
2021-04-08 00:07:01 +00:00
|
|
|
}
|
|
|
|
|
2020-02-09 11:45:45 +00:00
|
|
|
}
|