mirror of
https://github.com/Atmosphere-NX/Atmosphere
synced 2024-12-22 12:21:18 +00:00
kern: fix more page table refactor bugs
This commit is contained in:
parent
570989384b
commit
c911420d6a
3 changed files with 30 additions and 9 deletions
|
@ -146,6 +146,14 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
static bool MergePages(KVirtualAddress *out, TraversalContext *context);
|
static bool MergePages(KVirtualAddress *out, TraversalContext *context);
|
||||||
void SeparatePages(TraversalEntry *entry, TraversalContext *context, KProcessAddress address, PageTableEntry *pte) const;
|
void SeparatePages(TraversalEntry *entry, TraversalContext *context, KProcessAddress address, PageTableEntry *pte) const;
|
||||||
|
|
||||||
|
KProcessAddress GetAddressForContext(const TraversalContext *context) const {
|
||||||
|
KProcessAddress addr = m_is_kernel ? static_cast<uintptr_t>(-GetBlockSize(EntryLevel_L1)) * m_num_entries : 0;
|
||||||
|
for (u32 level = context->level; level <= EntryLevel_L1; ++level) {
|
||||||
|
addr += ((reinterpret_cast<uintptr_t>(context->level_entries[level]) / sizeof(PageTableEntry)) & (BlocksPerTable - 1)) << (PageBits + LevelBits * level);
|
||||||
|
}
|
||||||
|
return addr;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -226,6 +226,9 @@ namespace ams::kern::arch::arm64 {
|
||||||
/* If we cleared a table, we need to note that we updated and free the table. */
|
/* If we cleared a table, we need to note that we updated and free the table. */
|
||||||
if (freeing_table) {
|
if (freeing_table) {
|
||||||
KVirtualAddress table = KVirtualAddress(util::AlignDown(reinterpret_cast<uintptr_t>(context.level_entries[context.level - 1]), PageSize));
|
KVirtualAddress table = KVirtualAddress(util::AlignDown(reinterpret_cast<uintptr_t>(context.level_entries[context.level - 1]), PageSize));
|
||||||
|
if (table == Null<KVirtualAddress>) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
ClearPageTable(table);
|
ClearPageTable(table);
|
||||||
this->GetPageTableManager().Free(table);
|
this->GetPageTableManager().Free(table);
|
||||||
}
|
}
|
||||||
|
@ -243,11 +246,14 @@ namespace ams::kern::arch::arm64 {
|
||||||
context.level = static_cast<KPageTableImpl::EntryLevel>(util::ToUnderlying(context.level) + 1);
|
context.level = static_cast<KPageTableImpl::EntryLevel>(util::ToUnderlying(context.level) + 1);
|
||||||
freeing_table = true;
|
freeing_table = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Continue the traversal. */
|
/* Continue the traversal. */
|
||||||
cur_valid = impl.ContinueTraversal(std::addressof(entry), std::addressof(context));
|
cur_valid = impl.ContinueTraversal(std::addressof(entry), std::addressof(context));
|
||||||
|
|
||||||
|
if (entry.block_size == 0) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Free any remaining pages. */
|
/* Free any remaining pages. */
|
||||||
|
@ -266,7 +272,6 @@ namespace ams::kern::arch::arm64 {
|
||||||
KPageTableBase::Finalize();
|
KPageTableBase::Finalize();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -379,6 +384,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
/* Unmap the block. */
|
/* Unmap the block. */
|
||||||
bool freeing_table = false;
|
bool freeing_table = false;
|
||||||
|
bool need_recalculate_virt_addr = false;
|
||||||
while (true) {
|
while (true) {
|
||||||
/* Clear the entries. */
|
/* Clear the entries. */
|
||||||
const size_t num_to_clear = (!freeing_table && context.is_contiguous) ? BlocksPerContiguousBlock : 1;
|
const size_t num_to_clear = (!freeing_table && context.is_contiguous) ? BlocksPerContiguousBlock : 1;
|
||||||
|
@ -394,8 +400,14 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
/* If we cleared a table, we need to note that we updated and free the table. */
|
/* If we cleared a table, we need to note that we updated and free the table. */
|
||||||
if (freeing_table) {
|
if (freeing_table) {
|
||||||
|
/* If there's no table, we also don't need to do a free. */
|
||||||
|
const KVirtualAddress table = KVirtualAddress(util::AlignDown(reinterpret_cast<uintptr_t>(context.level_entries[context.level - 1]), PageSize));
|
||||||
|
if (table == Null<KVirtualAddress>) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
this->NoteUpdated();
|
this->NoteUpdated();
|
||||||
this->FreePageTable(page_list, KVirtualAddress(util::AlignDown(reinterpret_cast<uintptr_t>(context.level_entries[context.level - 1]), PageSize)));
|
this->FreePageTable(page_list, table);
|
||||||
|
need_recalculate_virt_addr = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Advance; we're no longer contiguous. */
|
/* Advance; we're no longer contiguous. */
|
||||||
|
@ -424,9 +436,10 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
/* Advance. */
|
/* Advance. */
|
||||||
size_t freed_size = next_entry.block_size;
|
size_t freed_size = next_entry.block_size;
|
||||||
if (freeing_table) {
|
if (need_recalculate_virt_addr) {
|
||||||
/* We advanced more than by the block, so we need to calculate the actual advanced size. */
|
/* We advanced more than by the block, so we need to calculate the actual advanced size. */
|
||||||
const KProcessAddress new_virt_addr = util::AlignUp(GetInteger(virt_addr), impl.GetBlockSize(context.level, context.is_contiguous));
|
const size_t block_size = impl.GetBlockSize(context.level, context.is_contiguous);
|
||||||
|
const KProcessAddress new_virt_addr = util::AlignDown(GetInteger(impl.GetAddressForContext(std::addressof(context))) + block_size, block_size);
|
||||||
MESOSPHERE_ABORT_UNLESS(new_virt_addr >= virt_addr + next_entry.block_size);
|
MESOSPHERE_ABORT_UNLESS(new_virt_addr >= virt_addr + next_entry.block_size);
|
||||||
|
|
||||||
freed_size = std::min<size_t>(new_virt_addr - virt_addr, remaining_pages * PageSize);
|
freed_size = std::min<size_t>(new_virt_addr - virt_addr, remaining_pages * PageSize);
|
||||||
|
@ -451,8 +464,8 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
Result KPageTable::Map(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, size_t page_size, PageLinkedList *page_list, bool reuse_ll) {
|
Result KPageTable::Map(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, size_t page_size, PageLinkedList *page_list, bool reuse_ll) {
|
||||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||||
/* MESOSPHERE_ASSERT(util::IsAligned(GetInteger(virt_addr), PageSize)); */
|
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(virt_addr), PageSize));
|
||||||
/* MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), PageSize)); */
|
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), PageSize));
|
||||||
|
|
||||||
auto &impl = this->GetImpl();
|
auto &impl = this->GetImpl();
|
||||||
u8 sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(disable_head_merge, false, false);
|
u8 sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(disable_head_merge, false, false);
|
||||||
|
|
|
@ -176,7 +176,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
/* We want to upgrade a contiguous mapping in a table to a block. */
|
/* We want to upgrade a contiguous mapping in a table to a block. */
|
||||||
PageTableEntry *pte = reinterpret_cast<PageTableEntry *>(util::AlignDown(reinterpret_cast<uintptr_t>(context->level_entries[context->level]), BlocksPerTable * sizeof(PageTableEntry)));
|
PageTableEntry *pte = reinterpret_cast<PageTableEntry *>(util::AlignDown(reinterpret_cast<uintptr_t>(context->level_entries[context->level]), BlocksPerTable * sizeof(PageTableEntry)));
|
||||||
const KPhysicalAddress phys_addr = GetBlock(pte, context->level);
|
const KPhysicalAddress phys_addr = util::AlignDown(GetBlock(pte, context->level), GetBlockSize(static_cast<EntryLevel>(context->level + 1), false));
|
||||||
|
|
||||||
/* First, check that all entries are valid for us to merge. */
|
/* First, check that all entries are valid for us to merge. */
|
||||||
const u64 entry_template = pte->GetEntryTemplateForMerge();
|
const u64 entry_template = pte->GetEntryTemplateForMerge();
|
||||||
|
@ -208,7 +208,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
} else {
|
} else {
|
||||||
/* We want to upgrade a non-contiguous mapping to a contiguous mapping. */
|
/* We want to upgrade a non-contiguous mapping to a contiguous mapping. */
|
||||||
PageTableEntry *pte = reinterpret_cast<PageTableEntry *>(util::AlignDown(reinterpret_cast<uintptr_t>(context->level_entries[context->level]), BlocksPerContiguousBlock * sizeof(PageTableEntry)));
|
PageTableEntry *pte = reinterpret_cast<PageTableEntry *>(util::AlignDown(reinterpret_cast<uintptr_t>(context->level_entries[context->level]), BlocksPerContiguousBlock * sizeof(PageTableEntry)));
|
||||||
const KPhysicalAddress phys_addr = GetBlock(pte, context->level);
|
const KPhysicalAddress phys_addr = util::AlignDown(GetBlock(pte, context->level), GetBlockSize(context->level, true));
|
||||||
|
|
||||||
/* First, check that all entries are valid for us to merge. */
|
/* First, check that all entries are valid for us to merge. */
|
||||||
const u64 entry_template = pte->GetEntryTemplateForMerge();
|
const u64 entry_template = pte->GetEntryTemplateForMerge();
|
||||||
|
|
Loading…
Reference in a new issue