kern: add KPageTableBase::CopyMemory Linear <-> User and Linear <-> Kernel

This commit is contained in:
Michael Scire 2020-07-10 20:09:06 -07:00
parent 4a767c9082
commit c72bdec328
6 changed files with 379 additions and 15 deletions

View file

@ -546,6 +546,48 @@ namespace ams::kern {
return false; return false;
} }
static NOINLINE bool IsLinearMappedPhysicalAddress(const KMemoryRegion **out, KPhysicalAddress address, const KMemoryRegion *hint = nullptr) {
auto &tree = GetPhysicalLinearMemoryRegionTree();
KMemoryRegionTree::const_iterator it = tree.end();
if (hint != nullptr) {
it = tree.iterator_to(*hint);
}
if (it == tree.end() || !it->Contains(GetInteger(address))) {
it = tree.FindContainingRegion(GetInteger(address));
}
if (it != tree.end() && it->IsDerivedFrom(KMemoryRegionAttr_LinearMapped)) {
if (out) {
*out = std::addressof(*it);
}
return true;
}
return false;
}
static NOINLINE bool IsLinearMappedPhysicalAddress(const KMemoryRegion **out, KPhysicalAddress address, size_t size, const KMemoryRegion *hint = nullptr) {
auto &tree = GetPhysicalLinearMemoryRegionTree();
KMemoryRegionTree::const_iterator it = tree.end();
if (hint != nullptr) {
it = tree.iterator_to(*hint);
}
if (it == tree.end() || !it->Contains(GetInteger(address))) {
it = tree.FindContainingRegion(GetInteger(address));
}
if (it != tree.end() && it->IsDerivedFrom(KMemoryRegionAttr_LinearMapped)) {
const uintptr_t last_address = GetInteger(address) + size - 1;
do {
if (last_address <= it->GetLastAddress()) {
if (out) {
*out = std::addressof(*it);
}
return true;
}
it++;
} while (it != tree.end() && it->IsDerivedFrom(KMemoryRegionAttr_LinearMapped));
}
return false;
}
static NOINLINE bool IsHeapVirtualAddress(const KMemoryRegion **out, KVirtualAddress address, const KMemoryRegion *hint = nullptr) { static NOINLINE bool IsHeapVirtualAddress(const KMemoryRegion **out, KVirtualAddress address, const KMemoryRegion *hint = nullptr) {
auto &tree = GetVirtualLinearMemoryRegionTree(); auto &tree = GetVirtualLinearMemoryRegionTree();
KMemoryRegionTree::const_iterator it = tree.end(); KMemoryRegionTree::const_iterator it = tree.end();

View file

@ -193,6 +193,18 @@ namespace ams::kern {
bool IsLockedByCurrentThread() const { return this->general_lock.IsLockedByCurrentThread(); } bool IsLockedByCurrentThread() const { return this->general_lock.IsLockedByCurrentThread(); }
bool IsLinearMappedPhysicalAddress(KPhysicalAddress phys_addr) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
return KMemoryLayout::IsLinearMappedPhysicalAddress(std::addressof(this->cached_physical_linear_region), phys_addr, this->cached_physical_linear_region);
}
bool IsLinearMappedPhysicalAddress(KPhysicalAddress phys_addr, size_t size) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
return KMemoryLayout::IsLinearMappedPhysicalAddress(std::addressof(this->cached_physical_linear_region), phys_addr, size, this->cached_physical_linear_region);
}
bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr) { bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
@ -224,6 +236,8 @@ namespace ams::kern {
constexpr size_t GetNumGuardPages() const { return this->IsKernel() ? 1 : 4; } constexpr size_t GetNumGuardPages() const { return this->IsKernel() ? 1 : 4; }
ALWAYS_INLINE KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages, size_t num_pages, size_t alignment, size_t offset, size_t guard_pages) const; ALWAYS_INLINE KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages, size_t num_pages, size_t alignment, size_t offset, size_t guard_pages) const;
Result CheckMemoryStateContiguous(KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const;
Result CheckMemoryState(const KMemoryInfo &info, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const; Result CheckMemoryState(const KMemoryInfo &info, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const;
Result CheckMemoryState(KMemoryState *out_state, KMemoryPermission *out_perm, KMemoryAttribute *out_attr, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr = DefaultMemoryIgnoreAttr) const; Result CheckMemoryState(KMemoryState *out_state, KMemoryPermission *out_perm, KMemoryAttribute *out_attr, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr = DefaultMemoryIgnoreAttr) const;
Result CheckMemoryState(KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr = DefaultMemoryIgnoreAttr) const { Result CheckMemoryState(KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr = DefaultMemoryIgnoreAttr) const {
@ -313,28 +327,28 @@ namespace ams::kern {
return this->GetHeapRegionSize() + this->mapped_physical_memory_size; return this->GetHeapRegionSize() + this->mapped_physical_memory_size;
} }
public: public:
static ALWAYS_INLINE KVirtualAddress GetLinearVirtualAddress(KPhysicalAddress addr) { static ALWAYS_INLINE KVirtualAddress GetLinearMappedVirtualAddress(KPhysicalAddress addr) {
return KMemoryLayout::GetLinearVirtualAddress(addr); return KMemoryLayout::GetLinearVirtualAddress(addr);
} }
static ALWAYS_INLINE KPhysicalAddress GetLinearPhysicalAddress(KVirtualAddress addr) { static ALWAYS_INLINE KPhysicalAddress GetLinearMappedPhysicalAddress(KVirtualAddress addr) {
return KMemoryLayout::GetLinearPhysicalAddress(addr); return KMemoryLayout::GetLinearPhysicalAddress(addr);
} }
static ALWAYS_INLINE KVirtualAddress GetHeapVirtualAddress(KPhysicalAddress addr) { static ALWAYS_INLINE KVirtualAddress GetHeapVirtualAddress(KPhysicalAddress addr) {
return GetLinearVirtualAddress(addr); return GetLinearMappedVirtualAddress(addr);
} }
static ALWAYS_INLINE KPhysicalAddress GetHeapPhysicalAddress(KVirtualAddress addr) { static ALWAYS_INLINE KPhysicalAddress GetHeapPhysicalAddress(KVirtualAddress addr) {
return GetLinearPhysicalAddress(addr); return GetLinearMappedPhysicalAddress(addr);
} }
static ALWAYS_INLINE KVirtualAddress GetPageTableVirtualAddress(KPhysicalAddress addr) { static ALWAYS_INLINE KVirtualAddress GetPageTableVirtualAddress(KPhysicalAddress addr) {
return GetLinearVirtualAddress(addr); return GetLinearMappedVirtualAddress(addr);
} }
static ALWAYS_INLINE KPhysicalAddress GetPageTablePhysicalAddress(KVirtualAddress addr) { static ALWAYS_INLINE KPhysicalAddress GetPageTablePhysicalAddress(KVirtualAddress addr) {
return GetLinearPhysicalAddress(addr); return GetLinearMappedPhysicalAddress(addr);
} }
}; };

View file

@ -20,7 +20,7 @@
namespace ams::kern { namespace ams::kern {
template<typename... ArgTypes> template<typename... ArgTypes>
ALWAYS_INLINE void UnusedImpl(ArgTypes... args) { ALWAYS_INLINE void UnusedImpl(ArgTypes &&... args) {
(static_cast<void>(args), ...); (static_cast<void>(args), ...);
} }

View file

@ -172,7 +172,7 @@ namespace ams::kern::arch::arm64 {
const KVirtualAddress page = this->manager->Allocate(); const KVirtualAddress page = this->manager->Allocate();
MESOSPHERE_ASSERT(page != Null<KVirtualAddress>); MESOSPHERE_ASSERT(page != Null<KVirtualAddress>);
cpu::ClearPageToZero(GetVoidPointer(page)); cpu::ClearPageToZero(GetVoidPointer(page));
this->ttbr = GetInteger(KPageTableBase::GetLinearPhysicalAddress(page)) | asid_tag; this->ttbr = GetInteger(KPageTableBase::GetLinearMappedPhysicalAddress(page)) | asid_tag;
/* Initialize the base page table. */ /* Initialize the base page table. */
MESOSPHERE_R_ABORT_UNLESS(KPageTableBase::InitializeForKernel(true, table, start, end)); MESOSPHERE_R_ABORT_UNLESS(KPageTableBase::InitializeForKernel(true, table, start, end));
@ -619,7 +619,7 @@ namespace ams::kern::arch::arm64 {
if (num_pages < ContiguousPageSize / PageSize) { if (num_pages < ContiguousPageSize / PageSize) {
for (const auto &block : pg) { for (const auto &block : pg) {
const KPhysicalAddress block_phys_addr = GetLinearPhysicalAddress(block.GetAddress()); const KPhysicalAddress block_phys_addr = GetLinearMappedPhysicalAddress(block.GetAddress());
const size_t cur_pages = block.GetNumPages(); const size_t cur_pages = block.GetNumPages();
R_TRY(this->Map(virt_addr, block_phys_addr, cur_pages, entry_template, L3BlockSize, page_list, reuse_ll)); R_TRY(this->Map(virt_addr, block_phys_addr, cur_pages, entry_template, L3BlockSize, page_list, reuse_ll));
@ -631,7 +631,7 @@ namespace ams::kern::arch::arm64 {
AlignedMemoryBlock virt_block(GetInteger(virt_addr), num_pages, L1BlockSize); AlignedMemoryBlock virt_block(GetInteger(virt_addr), num_pages, L1BlockSize);
for (const auto &block : pg) { for (const auto &block : pg) {
/* Create a block representing this physical group, synchronize its alignment to our virtual block. */ /* Create a block representing this physical group, synchronize its alignment to our virtual block. */
const KPhysicalAddress block_phys_addr = GetLinearPhysicalAddress(block.GetAddress()); const KPhysicalAddress block_phys_addr = GetLinearMappedPhysicalAddress(block.GetAddress());
size_t cur_pages = block.GetNumPages(); size_t cur_pages = block.GetNumPages();
AlignedMemoryBlock phys_block(GetInteger(block_phys_addr), cur_pages, virt_block.GetAlignment()); AlignedMemoryBlock phys_block(GetInteger(block_phys_addr), cur_pages, virt_block.GetAlignment());

View file

@ -34,7 +34,7 @@ namespace ams::kern::arch::arm64 {
const u64 ttbr1 = cpu::GetTtbr1El1() & 0xFFFFFFFFFFFFul; const u64 ttbr1 = cpu::GetTtbr1El1() & 0xFFFFFFFFFFFFul;
const u64 kernel_vaddr_start = 0xFFFFFF8000000000ul; const u64 kernel_vaddr_start = 0xFFFFFF8000000000ul;
const u64 kernel_vaddr_end = 0xFFFFFFFFFFE00000ul; const u64 kernel_vaddr_end = 0xFFFFFFFFFFE00000ul;
void *table = GetVoidPointer(KPageTableBase::GetLinearVirtualAddress(ttbr1)); void *table = GetVoidPointer(KPageTableBase::GetLinearMappedVirtualAddress(ttbr1));
this->page_table.InitializeForKernel(table, kernel_vaddr_start, kernel_vaddr_end); this->page_table.InitializeForKernel(table, kernel_vaddr_start, kernel_vaddr_end);
} }
} }

View file

@ -394,6 +394,32 @@ namespace ams::kern {
return ResultSuccess(); return ResultSuccess();
} }
Result KPageTableBase::CheckMemoryStateContiguous(KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
/* Get information about the first block. */
const KProcessAddress last_addr = addr + size - 1;
KMemoryBlockManager::const_iterator it = this->memory_block_manager.FindIterator(addr);
KMemoryInfo info = it->GetMemoryInfo();
while (true) {
/* Validate against the provided masks. */
R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr));
/* Break once we're done. */
if (last_addr <= info.GetLastAddress()) {
break;
}
/* Advance our iterator. */
it++;
MESOSPHERE_ASSERT(it != this->memory_block_manager.cend());
info = it->GetMemoryInfo();
}
return ResultSuccess();
}
Result KPageTableBase::CheckMemoryState(KMemoryState *out_state, KMemoryPermission *out_perm, KMemoryAttribute *out_attr, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr) const { Result KPageTableBase::CheckMemoryState(KMemoryState *out_state, KMemoryPermission *out_perm, KMemoryAttribute *out_attr, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr) const {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
@ -1293,19 +1319,301 @@ namespace ams::kern {
} }
Result KPageTableBase::CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) { Result KPageTableBase::CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
MESOSPHERE_UNIMPLEMENTED(); /* Lightly validate the range before doing anything else. */
R_UNLESS(this->Contains(src_addr, size), svc::ResultInvalidCurrentMemory());
/* Copy the memory. */
{
/* Lock the table. */
KScopedLightLock lk(this->general_lock);
/* Check memory state. */
R_TRY(this->CheckMemoryStateContiguous(src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm, src_attr_mask | KMemoryAttribute_Uncached, src_attr));
auto &impl = this->GetImpl();
/* Begin traversal. */
TraversalContext context;
TraversalEntry next_entry;
bool traverse_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), src_addr);
MESOSPHERE_ABORT_UNLESS(traverse_valid);
/* Prepare tracking variables. */
KPhysicalAddress cur_addr = next_entry.phys_addr;
size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
size_t tot_size = cur_size;
auto PerformCopy = [&] ALWAYS_INLINE_LAMBDA () -> Result {
/* Ensure the address is linear mapped. */
R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory());
/* Copy as much aligned data as we can. */
if (cur_size >= sizeof(u32)) {
const size_t copy_size = util::AlignDown(cur_size, sizeof(u32));
R_UNLESS(UserspaceAccess::CopyMemoryToUserAligned32Bit(GetVoidPointer(dst_addr), GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), copy_size), svc::ResultInvalidCurrentMemory());
dst_addr += copy_size;
cur_addr += copy_size;
cur_size -= copy_size;
}
/* Copy remaining data. */
if (cur_size > 0) {
R_UNLESS(UserspaceAccess::CopyMemoryToUser(GetVoidPointer(dst_addr), GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), cur_size), svc::ResultInvalidCurrentMemory());
}
return ResultSuccess();
};
/* Iterate. */
while (tot_size < size) {
/* Continue the traversal. */
traverse_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
MESOSPHERE_ASSERT(traverse_valid);
if (next_entry.phys_addr != (cur_addr + cur_size)) {
/* Perform copy. */
R_TRY(PerformCopy());
/* Advance. */
dst_addr += cur_size;
cur_addr = next_entry.phys_addr;
cur_size = next_entry.block_size;
} else {
cur_size += next_entry.block_size;
}
tot_size += next_entry.block_size;
}
/* Ensure we use the right size for the last block. */
if (tot_size > size) {
cur_size -= (tot_size - size);
}
/* Perform copy for the last block. */
R_TRY(PerformCopy());
}
return ResultSuccess();
} }
Result KPageTableBase::CopyMemoryFromLinearToKernel(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) { Result KPageTableBase::CopyMemoryFromLinearToKernel(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
MESOSPHERE_UNIMPLEMENTED(); /* Lightly validate the range before doing anything else. */
R_UNLESS(this->Contains(src_addr, size), svc::ResultInvalidCurrentMemory());
/* Copy the memory. */
{
/* Lock the table. */
KScopedLightLock lk(this->general_lock);
/* Check memory state. */
R_TRY(this->CheckMemoryStateContiguous(src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm, src_attr_mask | KMemoryAttribute_Uncached, src_attr));
auto &impl = this->GetImpl();
/* Begin traversal. */
TraversalContext context;
TraversalEntry next_entry;
bool traverse_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), src_addr);
MESOSPHERE_ABORT_UNLESS(traverse_valid);
/* Prepare tracking variables. */
KPhysicalAddress cur_addr = next_entry.phys_addr;
size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
size_t tot_size = cur_size;
auto PerformCopy = [&] ALWAYS_INLINE_LAMBDA () -> Result {
/* Ensure the address is linear mapped. */
R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory());
/* Copy the data. */
std::memcpy(GetVoidPointer(dst_addr), GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), cur_size);
return ResultSuccess();
};
/* Iterate. */
while (tot_size < size) {
/* Continue the traversal. */
traverse_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
MESOSPHERE_ASSERT(traverse_valid);
if (next_entry.phys_addr != (cur_addr + cur_size)) {
/* Perform copy. */
R_TRY(PerformCopy());
/* Advance. */
dst_addr += cur_size;
cur_addr = next_entry.phys_addr;
cur_size = next_entry.block_size;
} else {
cur_size += next_entry.block_size;
}
tot_size += next_entry.block_size;
}
/* Ensure we use the right size for the last block. */
if (tot_size > size) {
cur_size -= (tot_size - size);
}
/* Perform copy for the last block. */
R_TRY(PerformCopy());
}
return ResultSuccess();
} }
Result KPageTableBase::CopyMemoryFromUserToLinear(KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr) { Result KPageTableBase::CopyMemoryFromUserToLinear(KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr) {
MESOSPHERE_UNIMPLEMENTED(); /* Lightly validate the range before doing anything else. */
R_UNLESS(this->Contains(dst_addr, size), svc::ResultInvalidCurrentMemory());
/* Copy the memory. */
{
/* Lock the table. */
KScopedLightLock lk(this->general_lock);
/* Check memory state. */
R_TRY(this->CheckMemoryStateContiguous(dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_test_perm, dst_attr_mask | KMemoryAttribute_Uncached, dst_attr));
auto &impl = this->GetImpl();
/* Begin traversal. */
TraversalContext context;
TraversalEntry next_entry;
bool traverse_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_addr);
MESOSPHERE_ABORT_UNLESS(traverse_valid);
/* Prepare tracking variables. */
KPhysicalAddress cur_addr = next_entry.phys_addr;
size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
size_t tot_size = cur_size;
auto PerformCopy = [&] ALWAYS_INLINE_LAMBDA () -> Result {
/* Ensure the address is linear mapped. */
R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory());
/* Copy as much aligned data as we can. */
if (cur_size >= sizeof(u32)) {
const size_t copy_size = util::AlignDown(cur_size, sizeof(u32));
R_UNLESS(UserspaceAccess::CopyMemoryFromUserAligned32Bit(GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), GetVoidPointer(src_addr), copy_size), svc::ResultInvalidCurrentMemory());
src_addr += copy_size;
cur_addr += copy_size;
cur_size -= copy_size;
}
/* Copy remaining data. */
if (cur_size > 0) {
R_UNLESS(UserspaceAccess::CopyMemoryFromUser(GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), GetVoidPointer(src_addr), cur_size), svc::ResultInvalidCurrentMemory());
}
return ResultSuccess();
};
/* Iterate. */
while (tot_size < size) {
/* Continue the traversal. */
traverse_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
MESOSPHERE_ASSERT(traverse_valid);
if (next_entry.phys_addr != (cur_addr + cur_size)) {
/* Perform copy. */
R_TRY(PerformCopy());
/* Advance. */
src_addr += cur_size;
cur_addr = next_entry.phys_addr;
cur_size = next_entry.block_size;
} else {
cur_size += next_entry.block_size;
}
tot_size += next_entry.block_size;
}
/* Ensure we use the right size for the last block. */
if (tot_size > size) {
cur_size -= (tot_size - size);
}
/* Perform copy for the last block. */
R_TRY(PerformCopy());
}
return ResultSuccess();
} }
Result KPageTableBase::CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr) { Result KPageTableBase::CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr) {
MESOSPHERE_UNIMPLEMENTED(); /* Lightly validate the range before doing anything else. */
R_UNLESS(this->Contains(dst_addr, size), svc::ResultInvalidCurrentMemory());
/* Copy the memory. */
{
/* Lock the table. */
KScopedLightLock lk(this->general_lock);
/* Check memory state. */
R_TRY(this->CheckMemoryStateContiguous(dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_test_perm, dst_attr_mask | KMemoryAttribute_Uncached, dst_attr));
auto &impl = this->GetImpl();
/* Begin traversal. */
TraversalContext context;
TraversalEntry next_entry;
bool traverse_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_addr);
MESOSPHERE_ABORT_UNLESS(traverse_valid);
/* Prepare tracking variables. */
KPhysicalAddress cur_addr = next_entry.phys_addr;
size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
size_t tot_size = cur_size;
auto PerformCopy = [&] ALWAYS_INLINE_LAMBDA () -> Result {
/* Ensure the address is linear mapped. */
R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory());
/* Copy the data. */
std::memcpy(GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), GetVoidPointer(src_addr), cur_size);
return ResultSuccess();
};
/* Iterate. */
while (tot_size < size) {
/* Continue the traversal. */
traverse_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
MESOSPHERE_ASSERT(traverse_valid);
if (next_entry.phys_addr != (cur_addr + cur_size)) {
/* Perform copy. */
R_TRY(PerformCopy());
/* Advance. */
src_addr += cur_size;
cur_addr = next_entry.phys_addr;
cur_size = next_entry.block_size;
} else {
cur_size += next_entry.block_size;
}
tot_size += next_entry.block_size;
}
/* Ensure we use the right size for the last block. */
if (tot_size > size) {
cur_size -= (tot_size - size);
}
/* Perform copy for the last block. */
R_TRY(PerformCopy());
}
return ResultSuccess();
} }
Result KPageTableBase::CopyMemoryFromLinearToLinear(KPageTableBase &dst_page_table, KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) { Result KPageTableBase::CopyMemoryFromLinearToLinear(KPageTableBase &dst_page_table, KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {