From 9dc3e025fc87a90c3738763f6516c22584561647 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Mon, 27 Jul 2020 03:00:04 -0700 Subject: [PATCH] kern: Map L1/L2 blocks when possible --- .../arch/arm64/kern_k_page_table.hpp | 18 +++-- .../source/arch/arm64/kern_k_page_table.cpp | 77 ++++++++++++++++++- 2 files changed, 88 insertions(+), 7 deletions(-) diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp index 9d8376530..3905bc0ca 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp @@ -182,25 +182,31 @@ namespace ams::kern::arch::arm64 { NOINLINE Result InitializeForProcess(u32 id, ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager, KPageTableManager *pt_manager); Result Finalize(); private: - Result Map(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll); + Result MapL1Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll); + Result MapL2Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll); + Result MapL3Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll); + Result Unmap(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list, bool force, bool reuse_ll); Result Map(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, size_t page_size, PageLinkedList *page_list, bool reuse_ll) { switch (page_size) { case L1BlockSize: + return this->MapL1Blocks(virt_addr, phys_addr, num_pages, entry_template, page_list, reuse_ll); + case L2ContiguousBlockSize: + entry_template.SetContiguous(true); + [[fallthrough]]; #ifdef ATMOSPHERE_BOARD_NINTENDO_NX case L2TegraSmmuBlockSize: #endif case L2BlockSize: - case L3BlockSize: - break; - case L2ContiguousBlockSize: + return this->MapL2Blocks(virt_addr, phys_addr, num_pages, entry_template, page_list, reuse_ll); case L3ContiguousBlockSize: entry_template.SetContiguous(true); - break; + [[fallthrough]]; + case L3BlockSize: + return this->MapL3Blocks(virt_addr, phys_addr, num_pages, entry_template, page_list, reuse_ll); MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); } - return this->Map(virt_addr, phys_addr, num_pages, entry_template, page_list, reuse_ll); } Result MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll); diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp index 9ad471b85..18e89bfad 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp @@ -358,7 +358,82 @@ namespace ams::kern::arch::arm64 { } } - Result KPageTable::Map(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll) { + Result KPageTable::MapL1Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll) { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(virt_addr), L1BlockSize)); + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), L1BlockSize)); + MESOSPHERE_ASSERT(util::IsAligned(num_pages * PageSize, L1BlockSize)); + + auto &impl = this->GetImpl(); + + /* Iterate, mapping each block. */ + for (size_t i = 0; i < num_pages; i += L1BlockSize / PageSize) { + /* Map the block. */ + *impl.GetL1Entry(virt_addr) = L1PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, PageTableEntry(entry_template), false); + virt_addr += L1BlockSize; + phys_addr += L1BlockSize; + } + + return ResultSuccess(); + } + + Result KPageTable::MapL2Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll) { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(virt_addr), L2BlockSize)); + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), L2BlockSize)); + MESOSPHERE_ASSERT(util::IsAligned(num_pages * PageSize, L2BlockSize)); + + auto &impl = this->GetImpl(); + KVirtualAddress l2_virt = Null; + int l2_open_count = 0; + + /* Iterate, mapping each block. */ + for (size_t i = 0; i < num_pages; i += L2BlockSize / PageSize) { + KPhysicalAddress l2_phys = Null; + + /* If we have no L2 table, we should get or allocate one. */ + if (l2_virt == Null) { + if (L1PageTableEntry *l1_entry = impl.GetL1Entry(virt_addr); !l1_entry->GetTable(l2_phys)) { + /* Allocate table. */ + l2_virt = AllocatePageTable(page_list, reuse_ll); + R_UNLESS(l2_virt != Null, svc::ResultOutOfResource()); + + /* Set the entry. */ + l2_phys = GetPageTablePhysicalAddress(l2_virt); + PteDataSynchronizationBarrier(); + *l1_entry = L1PageTableEntry(PageTableEntry::TableTag{}, l2_phys, this->IsKernel(), true); + PteDataSynchronizationBarrier(); + } else { + l2_virt = GetPageTableVirtualAddress(l2_phys); + } + } + MESOSPHERE_ASSERT(l2_virt != Null); + + /* Map the block. */ + *impl.GetL2EntryFromTable(l2_virt, virt_addr) = L2PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, PageTableEntry(entry_template), false); + l2_open_count++; + virt_addr += L2BlockSize; + phys_addr += L2BlockSize; + + /* Account for hitting end of table. */ + if (util::IsAligned(GetInteger(virt_addr), L1BlockSize)) { + if (this->GetPageTableManager().IsInPageTableHeap(l2_virt)) { + this->GetPageTableManager().Open(l2_virt, l2_open_count); + } + l2_virt = Null; + l2_open_count = 0; + } + } + + /* Perform any remaining opens. */ + if (l2_open_count > 0 && this->GetPageTableManager().IsInPageTableHeap(l2_virt)) { + this->GetPageTableManager().Open(l2_virt, l2_open_count); + } + + return ResultSuccess(); + } + + Result KPageTable::MapL3Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll) { MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); MESOSPHERE_ASSERT(util::IsAligned(GetInteger(virt_addr), PageSize)); MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), PageSize));