From 104be247dadb5887545758e9532a7a2178179674 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Thu, 10 Oct 2024 12:58:15 -0700 Subject: [PATCH] kern: continue page table refactor, implement separate/unmap --- .../arch/arm64/kern_k_page_table.hpp | 7 +- .../arch/arm64/kern_k_page_table_entry.hpp | 69 +++- .../arch/arm64/kern_k_page_table_impl.hpp | 9 +- libraries/libmesosphere/libmesosphere.mk | 2 +- .../source/arch/arm64/kern_k_page_table.cpp | 359 +++++++----------- .../arch/arm64/kern_k_page_table_impl.cpp | 138 +++---- .../libmesosphere/source/kern_k_thread.cpp | 3 + 7 files changed, 265 insertions(+), 322 deletions(-) diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp index 2ccd4b72a..8446b6f1c 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp @@ -233,8 +233,11 @@ namespace ams::kern::arch::arm64 { bool MergePages(KProcessAddress virt_addr, PageLinkedList *page_list); - ALWAYS_INLINE Result SeparatePagesImpl(KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll); - Result SeparatePages(KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll); + void MergePages(TraversalContext *context, PageLinkedList *page_list); + void MergePages(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list); + + Result SeparatePagesImpl(TraversalEntry *entry, TraversalContext *context, KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll); + Result SeparatePages(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list, bool reuse_ll); Result ChangePermissions(KProcessAddress virt_addr, size_t num_pages, PageTableEntry entry_template, DisableMergeAttribute disable_merge_attr, bool refresh_mapping, bool flush_mapping, PageLinkedList *page_list, bool reuse_ll); diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp index 8941928ed..a87c7b5c5 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp @@ -20,18 +20,22 @@ namespace ams::kern::arch::arm64 { + constexpr size_t BlocksPerContiguousBlock = 0x10; + constexpr size_t BlocksPerTable = PageSize / sizeof(u64); + constexpr size_t L1BlockSize = 1_GB; - constexpr size_t L1ContiguousBlockSize = 0x10 * L1BlockSize; + constexpr size_t L1ContiguousBlockSize = BlocksPerContiguousBlock * L1BlockSize; constexpr size_t L2BlockSize = 2_MB; - constexpr size_t L2ContiguousBlockSize = 0x10 * L2BlockSize; + constexpr size_t L2ContiguousBlockSize = BlocksPerContiguousBlock * L2BlockSize; constexpr size_t L3BlockSize = PageSize; - constexpr size_t L3ContiguousBlockSize = 0x10 * L3BlockSize; + constexpr size_t L3ContiguousBlockSize = BlocksPerContiguousBlock * L3BlockSize; class PageTableEntry { public: struct InvalidTag{}; struct TableTag{}; struct BlockTag{}; + struct SeparateContiguousTag{}; enum Permission : u64 { Permission_KernelRWX = ((0ul << 53) | (1ul << 54) | (0ul << 6)), @@ -122,6 +126,25 @@ namespace ams::kern::arch::arm64 { { /* ... */ } + + /* Construct a table. */ + constexpr explicit ALWAYS_INLINE PageTableEntry(TableTag, KPhysicalAddress phys_addr, bool is_kernel, bool pxn, size_t num_blocks) + : PageTableEntry(((is_kernel ? 0x3ul : 0) << 60) | (static_cast(pxn) << 59) | GetInteger(phys_addr) | (num_blocks << 2) | 0x3) + { + /* ... */ + } + + /* Construct a block. */ + constexpr explicit ALWAYS_INLINE PageTableEntry(BlockTag, KPhysicalAddress phys_addr, const PageTableEntry &attr, u8 sw_reserved_bits, bool contig, bool page) + : PageTableEntry(attr, (static_cast(sw_reserved_bits) << 55) | (static_cast(contig) << 52) | GetInteger(phys_addr) | (page ? ExtensionFlag_TestTableMask : ExtensionFlag_Valid)) + { + /* ... */ + } + constexpr explicit ALWAYS_INLINE PageTableEntry(BlockTag, KPhysicalAddress phys_addr, const PageTableEntry &attr, SeparateContiguousTag) + : PageTableEntry(attr, GetInteger(phys_addr)) + { + /* ... */ + } protected: constexpr ALWAYS_INLINE u64 GetBits(size_t offset, size_t count) const { return (m_attributes >> offset) & ((1ul << count) - 1); @@ -165,7 +188,7 @@ namespace ams::kern::arch::arm64 { constexpr ALWAYS_INLINE Shareable GetShareable() const { return static_cast(this->SelectBits(8, 2)); } constexpr ALWAYS_INLINE PageAttribute GetPageAttribute() const { return static_cast(this->SelectBits(2, 3)); } constexpr ALWAYS_INLINE int GetAccessFlagInteger() const { return static_cast(this->GetBits(10, 1)); } - constexpr ALWAYS_INLINE int GetShareableInteger() const { return static_cast(this->GetBits(8, 2)); } + constexpr ALWAYS_INLINE int GetShareableInteger() const { return static_cast(this->GetBits(8, 2)); } constexpr ALWAYS_INLINE int GetPageAttributeInteger() const { return static_cast(this->GetBits(2, 3)); } constexpr ALWAYS_INLINE bool IsReadOnly() const { return this->GetBits(7, 1) != 0; } constexpr ALWAYS_INLINE bool IsUserAccessible() const { return this->GetBits(6, 1) != 0; } @@ -194,6 +217,12 @@ namespace ams::kern::arch::arm64 { constexpr ALWAYS_INLINE decltype(auto) SetPageAttribute(PageAttribute a) { this->SetBitsDirect(2, 3, a); return *this; } constexpr ALWAYS_INLINE decltype(auto) SetMapped(bool m) { static_assert(static_cast(MappingFlag_Mapped == (1 << 0))); this->SetBit(0, m); return *this; } + constexpr ALWAYS_INLINE size_t GetTableNumEntries() const { return this->GetBits(2, 10); } + constexpr ALWAYS_INLINE decltype(auto) SetTableNumEntries(size_t num) { this->SetBits(2, 10, num); } + + constexpr ALWAYS_INLINE decltype(auto) AddTableEntries(size_t num) { return this->SetTableNumEntries(this->GetTableNumEntries() + num); } + constexpr ALWAYS_INLINE decltype(auto) RemoveTableEntries(size_t num) { return this->SetTableNumEntries(this->GetTableNumEntries() - num); } + constexpr ALWAYS_INLINE u64 GetEntryTemplateForMerge() const { constexpr u64 BaseMask = (0xFFF0000000000FFFul & ~static_cast((0x1ul << 52) | ExtensionFlag_TestTableMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail)); return m_attributes & BaseMask; @@ -204,6 +233,38 @@ namespace ams::kern::arch::arm64 { return (m_attributes & BaseMaskForMerge) == attr; } + static constexpr ALWAYS_INLINE u64 GetEntryTemplateForSeparateContiguousMask(size_t idx) { + constexpr u64 BaseMask = (0xFFFF000000000FFFul & ~static_cast((0x1ul << 52) | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail)); + if (idx == 0) { + return BaseMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody; + } else if (idx < BlocksPerContiguousBlock - 1) { + return BaseMask; + } else { + return BaseMask | ExtensionFlag_DisableMergeTail; + } + } + + constexpr ALWAYS_INLINE u64 GetEntryTemplateForSeparateContiguous(size_t idx) const { + return m_attributes & GetEntryTemplateForSeparateContiguousMask(idx); + } + + static constexpr ALWAYS_INLINE u64 GetEntryTemplateForSeparateMask(size_t idx) { + constexpr u64 BaseMask = (0xFFFF000000000FFFul & ~static_cast((0x1ul << 52) | ExtensionFlag_TestTableMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail)); + if (idx == 0) { + return BaseMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody; + } else if (idx < BlocksPerContiguousBlock) { + return BaseMask | ExtensionFlag_DisableMergeHeadAndBody; + } else if (idx < BlocksPerTable - 1) { + return BaseMask; + } else { + return BaseMask | ExtensionFlag_DisableMergeTail; + } + } + + constexpr ALWAYS_INLINE u64 GetEntryTemplateForSeparate(size_t idx) const { + return m_attributes & GetEntryTemplateForSeparateMask(idx); + } + constexpr ALWAYS_INLINE u64 GetRawAttributesUnsafe() const { return m_attributes; } diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp index 8616ac0dd..68b324f6e 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp @@ -45,7 +45,7 @@ namespace ams::kern::arch::arm64 { }; struct TraversalContext { - const PageTableEntry *level_entries[EntryLevel_Count]; + PageTableEntry *level_entries[EntryLevel_Count]; EntryLevel level; bool is_contiguous; }; @@ -125,6 +125,10 @@ namespace ams::kern::arch::arm64 { ALWAYS_INLINE L3PageTableEntry *GetL3Entry(const L2PageTableEntry *entry, KProcessAddress address) const { return GetL3EntryFromTable(KMemoryLayout::GetLinearVirtualAddress(entry->GetTable()), address); } + + static constexpr size_t GetBlockSize(EntryLevel level, bool contiguous = false) { + return 1 << (PageBits + LevelBits * level + 4 * contiguous); + } public: constexpr explicit KPageTableImpl(util::ConstantInitializeTag) : m_table(), m_is_kernel(), m_num_entries() { /* ... */ } @@ -141,6 +145,9 @@ namespace ams::kern::arch::arm64 { bool ContinueTraversal(TraversalEntry *out_entry, TraversalContext *context) const; bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress virt_addr) const; + + static bool MergePages(KVirtualAddress *out, TraversalContext *context); + void SeparatePages(TraversalEntry *entry, TraversalContext *context, KProcessAddress address, PageTableEntry *pte) const; }; } diff --git a/libraries/libmesosphere/libmesosphere.mk b/libraries/libmesosphere/libmesosphere.mk index 32db70664..79c2c13ed 100644 --- a/libraries/libmesosphere/libmesosphere.mk +++ b/libraries/libmesosphere/libmesosphere.mk @@ -20,7 +20,7 @@ endif DEFINES := $(ATMOSPHERE_DEFINES) -DATMOSPHERE_IS_MESOSPHERE SETTINGS := $(ATMOSPHERE_SETTINGS) $(ATMOSPHERE_OPTIMIZATION_FLAG) -mgeneral-regs-only -ffixed-x18 -Wextra -Werror -fno-non-call-exceptions CFLAGS := $(ATMOSPHERE_CFLAGS) $(SETTINGS) $(DEFINES) $(INCLUDE) -CXXFLAGS := $(CFLAGS) $(ATMOSPHERE_CXXFLAGS) -fno-use-cxa-atexit -flto +CXXFLAGS := $(CFLAGS) $(ATMOSPHERE_CXXFLAGS) -fno-use-cxa-atexit ASFLAGS := $(ATMOSPHERE_ASFLAGS) $(SETTINGS) $(DEFINES) $(INCLUDE) SOURCES += $(foreach v,$(call ALL_SOURCE_DIRS,../libvapours/source),$(if $(findstring ../libvapours/source/sdmmc,$v),,$v)) diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp index c1b0c76b4..381f6c880 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp @@ -280,18 +280,7 @@ namespace ams::kern::arch::arm64 { if (operation == OperationType_Unmap) { R_RETURN(this->Unmap(virt_addr, num_pages, page_list, false, reuse_ll)); } else if (operation == OperationType_Separate) { - const size_t size = num_pages * PageSize; - R_TRY(this->SeparatePages(virt_addr, std::min(util::GetAlignment(GetInteger(virt_addr)), size), page_list, reuse_ll)); - ON_RESULT_FAILURE { this->MergePages(virt_addr, page_list); }; - - if (num_pages > 1) { - const auto end_page = virt_addr + size; - const auto last_page = end_page - PageSize; - - R_TRY(this->SeparatePages(last_page, std::min(util::GetAlignment(GetInteger(end_page)), size), page_list, reuse_ll)); - } - - R_SUCCEED(); + R_RETURN(this->SeparatePages(virt_addr, num_pages, page_list, reuse_ll)); } else { auto entry_template = this->GetEntryTemplate(properties); @@ -519,16 +508,7 @@ namespace ams::kern::arch::arm64 { /* If we're not forcing an unmap, separate pages immediately. */ if (!force) { - const size_t size = num_pages * PageSize; - R_TRY(this->SeparatePages(virt_addr, std::min(util::GetAlignment(GetInteger(virt_addr)), size), page_list, reuse_ll)); - ON_RESULT_FAILURE { this->MergePages(virt_addr, page_list); }; - - if (num_pages > 1) { - const auto end_page = virt_addr + size; - const auto last_page = end_page - PageSize; - - R_TRY(this->SeparatePages(last_page, std::min(util::GetAlignment(GetInteger(end_page)), size), page_list, reuse_ll)); - } + R_TRY(this->SeparatePages(virt_addr, num_pages, page_list, reuse_ll)); } /* Cache initial addresses for use on cleanup. */ @@ -558,10 +538,7 @@ namespace ams::kern::arch::arm64 { /* Handle the case where the block is bigger than it should be. */ if (next_entry.block_size > remaining_pages * PageSize) { MESOSPHERE_ABORT_UNLESS(force); - MESOSPHERE_R_ABORT_UNLESS(this->SeparatePages(virt_addr, remaining_pages * PageSize, page_list, reuse_ll)); - const bool new_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), virt_addr); - MESOSPHERE_ASSERT(new_valid); - MESOSPHERE_UNUSED(new_valid); + MESOSPHERE_R_ABORT_UNLESS(this->SeparatePagesImpl(std::addressof(next_entry), std::addressof(context), virt_addr, remaining_pages * PageSize, page_list, reuse_ll)); } /* Check that our state is coherent. */ @@ -569,87 +546,38 @@ namespace ams::kern::arch::arm64 { MESOSPHERE_ASSERT(util::IsAligned(GetInteger(next_entry.phys_addr), next_entry.block_size)); /* Unmap the block. */ - L1PageTableEntry *l1_entry = impl.GetL1Entry(virt_addr); - switch (next_entry.block_size) { - case L1BlockSize: - { - /* Clear the entry. */ - *l1_entry = InvalidL1PageTableEntry; - } + bool freeing_table = false; + while (true) { + /* Clear the entries. */ + const size_t num_to_clear = (!freeing_table && context.is_contiguous) ? BlocksPerContiguousBlock : 1; + auto *pte = reinterpret_cast(context.is_contiguous ? util::AlignDown(reinterpret_cast(context.level_entries[context.level]), BlocksPerContiguousBlock * sizeof(PageTableEntry)) : reinterpret_cast(context.level_entries[context.level])); + for (size_t i = 0; i < num_to_clear; ++i) { + pte[i] = InvalidPageTableEntry; + } + + /* Remove the entries from the previous table. */ + if (context.level != KPageTableImpl::EntryLevel_L1) { + context.level_entries[context.level + 1]->RemoveTableEntries(num_to_clear); + } + + /* If we cleared a table, we need to note that we updated and free the table. */ + if (freeing_table) { + this->NoteUpdated(); + this->FreePageTable(page_list, KVirtualAddress(util::AlignDown(reinterpret_cast(context.level_entries[context.level - 1]), PageSize))); + } + + /* Advance; we're no longer contiguous. */ + context.is_contiguous = false; + context.level_entries[context.level] = pte + num_to_clear - 1; + + /* We may have removed the last entries in a table, in which case we can free an unmap the tables. */ + if (context.level >= KPageTableImpl::EntryLevel_L1 || context.level_entries[context.level + 1]->GetTableNumEntries() != 0) { break; - case L2ContiguousBlockSize: - case L2BlockSize: - { - /* Get the number of L2 blocks. */ - const size_t num_l2_blocks = next_entry.block_size / L2BlockSize; + } - /* Get the L2 entry. */ - KPhysicalAddress l2_phys = Null; - MESOSPHERE_ABORT_UNLESS(l1_entry->GetTable(l2_phys)); - const KVirtualAddress l2_virt = GetPageTableVirtualAddress(l2_phys); - - /* Clear the entry. */ - for (size_t i = 0; i < num_l2_blocks; i++) { - *impl.GetL2EntryFromTable(l2_virt, virt_addr + L2BlockSize * i) = InvalidL2PageTableEntry; - } - PteDataMemoryBarrier(); - - /* Close references to the L2 table. */ - if (this->GetPageTableManager().IsInPageTableHeap(l2_virt)) { - if (this->GetPageTableManager().Close(l2_virt, num_l2_blocks)) { - *l1_entry = InvalidL1PageTableEntry; - this->NoteUpdated(); - this->FreePageTable(page_list, l2_virt); - pages_to_close.CloseAndReset(); - } - } - } - break; - case L3ContiguousBlockSize: - case L3BlockSize: - { - /* Get the number of L3 blocks. */ - const size_t num_l3_blocks = next_entry.block_size / L3BlockSize; - - /* Get the L2 entry. */ - KPhysicalAddress l2_phys = Null; - MESOSPHERE_ABORT_UNLESS(l1_entry->GetTable(l2_phys)); - const KVirtualAddress l2_virt = GetPageTableVirtualAddress(l2_phys); - L2PageTableEntry *l2_entry = impl.GetL2EntryFromTable(l2_virt, virt_addr); - - /* Get the L3 entry. */ - KPhysicalAddress l3_phys = Null; - MESOSPHERE_ABORT_UNLESS(l2_entry->GetTable(l3_phys)); - const KVirtualAddress l3_virt = GetPageTableVirtualAddress(l3_phys); - - /* Clear the entry. */ - for (size_t i = 0; i < num_l3_blocks; i++) { - *impl.GetL3EntryFromTable(l3_virt, virt_addr + L3BlockSize * i) = InvalidL3PageTableEntry; - } - PteDataMemoryBarrier(); - - /* Close references to the L3 table. */ - if (this->GetPageTableManager().IsInPageTableHeap(l3_virt)) { - if (this->GetPageTableManager().Close(l3_virt, num_l3_blocks)) { - *l2_entry = InvalidL2PageTableEntry; - this->NoteUpdated(); - - /* Close reference to the L2 table. */ - if (this->GetPageTableManager().IsInPageTableHeap(l2_virt)) { - if (this->GetPageTableManager().Close(l2_virt, 1)) { - *l1_entry = InvalidL1PageTableEntry; - this->NoteUpdated(); - this->FreePageTable(page_list, l2_virt); - } - } - - this->FreePageTable(page_list, l3_virt); - pages_to_close.CloseAndReset(); - } - } - } - break; - MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); + /* Advance; we will not be working with blocks any more. */ + context.level = static_cast(util::ToUnderlying(context.level) + 1); + freeing_table = true; } /* Close the blocks. */ @@ -663,8 +591,19 @@ namespace ams::kern::arch::arm64 { } /* Advance. */ - virt_addr += next_entry.block_size; - remaining_pages -= next_entry.block_size / PageSize; + size_t freed_size = next_entry.block_size; + if (freeing_table) { + /* We advanced more than by the block, so we need to calculate the actual advanced size. */ + const KProcessAddress new_virt_addr = util::AlignUp(GetInteger(virt_addr), impl.GetBlockSize(context.level, context.is_contiguous)); + MESOSPHERE_ABORT_UNLESS(new_virt_addr >= virt_addr + next_entry.block_size); + + freed_size = std::min(new_virt_addr - virt_addr, remaining_pages * PageSize); + } + + /* We can just advance by the block size. */ + virt_addr += freed_size; + remaining_pages -= freed_size / PageSize; + next_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); } @@ -1032,141 +971,116 @@ namespace ams::kern::arch::arm64 { return merged; } - Result KPageTable::SeparatePagesImpl(KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll) { + void KPageTable::MergePages(TraversalContext *context, PageLinkedList *page_list) { MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); auto &impl = this->GetImpl(); - /* First, try to separate an L1 block into contiguous L2 blocks. */ - L1PageTableEntry *l1_entry = impl.GetL1Entry(virt_addr); - if (l1_entry->IsBlock()) { - /* If our block size is too big, don't bother. */ - R_SUCCEED_IF(block_size >= L1BlockSize); - - /* Get the addresses we're working with. */ - const KProcessAddress block_virt_addr = util::AlignDown(GetInteger(virt_addr), L1BlockSize); - const KPhysicalAddress block_phys_addr = l1_entry->GetBlock(); - - /* Allocate a new page for the L2 table. */ - const KVirtualAddress l2_table = this->AllocatePageTable(page_list, reuse_ll); - R_UNLESS(l2_table != Null, svc::ResultOutOfResource()); - const KPhysicalAddress l2_phys = GetPageTablePhysicalAddress(l2_table); - - /* Set the entries in the L2 table. */ - for (size_t i = 0; i < L1BlockSize / L2BlockSize; i++) { - const u64 entry_template = l1_entry->GetEntryTemplateForL2Block(i); - *(impl.GetL2EntryFromTable(l2_table, block_virt_addr + L2BlockSize * i)) = L2PageTableEntry(PageTableEntry::BlockTag{}, block_phys_addr + L2BlockSize * i, PageTableEntry(entry_template), PageTableEntry::SoftwareReservedBit_None, true); + /* Iteratively merge, until we can't. */ + while (true) { + /* Try to merge. */ + KVirtualAddress freed_table = Null; + if (!impl.MergePages(std::addressof(freed_table), context)) { + break; } - /* Open references to the L2 table. */ - this->GetPageTableManager().Open(l2_table, L1BlockSize / L2BlockSize); + /* Note that we updated. */ + this->NoteUpdated(); - /* Replace the L1 entry with one to the new table. */ - PteDataMemoryBarrier(); - *l1_entry = L1PageTableEntry(PageTableEntry::TableTag{}, l2_phys, this->IsKernel(), true); + /* Free the page. */ + ClearPageTable(freed_table); + this->FreePageTable(page_list, freed_table); + } + } + + void KPageTable::MergePages(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list) { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + + auto &impl = this->GetImpl(); + + /* Begin traversal. */ + TraversalContext context; + TraversalEntry entry; + MESOSPHERE_ABORT_UNLESS(impl.BeginTraversal(std::addressof(entry), std::addressof(context), virt_addr)); + + /* Merge start of the range. */ + this->MergePages(std::addressof(context), page_list); + + /* If we have more than one page, do the same for the end of the range. */ + if (num_pages > 1) { + /* Begin traversal for end of range. */ + const size_t size = num_pages * PageSize; + const auto end_page = virt_addr + size; + const auto last_page = end_page - PageSize; + MESOSPHERE_ABORT_UNLESS(impl.BeginTraversal(std::addressof(entry), std::addressof(context), last_page)); + + /* Merge. */ + this->MergePages(std::addressof(context), page_list); + } + } + + Result KPageTable::SeparatePagesImpl(TraversalEntry *entry, TraversalContext *context, KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll) { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + + auto &impl = this->GetImpl(); + + /* If at any point we fail, we want to merge. */ + ON_RESULT_FAILURE { this->MergePages(context, page_list); }; + + /* Iterate, separating until our block size is small enough. */ + while (entry->block_size > block_size) { + /* If necessary, allocate a table. */ + KVirtualAddress table = Null; + if (!context->is_contiguous) { + table = this->AllocatePageTable(page_list, reuse_ll); + R_UNLESS(table != Null, svc::ResultOutOfResource()); + } + + /* Separate. */ + impl.SeparatePages(entry, context, virt_addr, nullptr); this->NoteUpdated(); } - /* If we don't have an l1 table, we're done. */ - MESOSPHERE_ABORT_UNLESS(l1_entry->IsTable() || l1_entry->IsEmpty()); - R_SUCCEED_IF(!l1_entry->IsTable()); - - /* We want to separate L2 contiguous blocks into L2 blocks, so check that our size permits that. */ - R_SUCCEED_IF(block_size >= L2ContiguousBlockSize); - - L2PageTableEntry *l2_entry = impl.GetL2Entry(l1_entry, virt_addr); - if (l2_entry->IsBlock()) { - /* If we're contiguous, try to separate. */ - if (l2_entry->IsContiguous()) { - const KProcessAddress block_virt_addr = util::AlignDown(GetInteger(virt_addr), L2ContiguousBlockSize); - const KPhysicalAddress block_phys_addr = util::AlignDown(GetInteger(l2_entry->GetBlock()), L2ContiguousBlockSize); - - /* Mark the entries as non-contiguous. */ - for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) { - L2PageTableEntry *target = impl.GetL2Entry(l1_entry, block_virt_addr + L2BlockSize * i); - const u64 entry_template = target->GetEntryTemplateForL2Block(i); - *target = L2PageTableEntry(PageTableEntry::BlockTag{}, block_phys_addr + L2BlockSize * i, PageTableEntry(entry_template), PageTableEntry::SoftwareReservedBit_None, false); - } - this->NoteUpdated(); - } - - /* We want to separate L2 blocks into L3 contiguous blocks, so check that our size permits that. */ - R_SUCCEED_IF(block_size >= L2BlockSize); - - /* Get the addresses we're working with. */ - const KProcessAddress block_virt_addr = util::AlignDown(GetInteger(virt_addr), L2BlockSize); - const KPhysicalAddress block_phys_addr = l2_entry->GetBlock(); - - /* Allocate a new page for the L3 table. */ - const KVirtualAddress l3_table = this->AllocatePageTable(page_list, reuse_ll); - R_UNLESS(l3_table != Null, svc::ResultOutOfResource()); - const KPhysicalAddress l3_phys = GetPageTablePhysicalAddress(l3_table); - - /* Set the entries in the L3 table. */ - for (size_t i = 0; i < L2BlockSize / L3BlockSize; i++) { - const u64 entry_template = l2_entry->GetEntryTemplateForL3Block(i); - *(impl.GetL3EntryFromTable(l3_table, block_virt_addr + L3BlockSize * i)) = L3PageTableEntry(PageTableEntry::BlockTag{}, block_phys_addr + L3BlockSize * i, PageTableEntry(entry_template), PageTableEntry::SoftwareReservedBit_None, true); - } - - /* Open references to the L3 table. */ - this->GetPageTableManager().Open(l3_table, L2BlockSize / L3BlockSize); - - /* Replace the L2 entry with one to the new table. */ - PteDataMemoryBarrier(); - *l2_entry = L2PageTableEntry(PageTableEntry::TableTag{}, l3_phys, this->IsKernel(), true); - this->NoteUpdated(); - } - - /* If we don't have an L3 table, we're done. */ - MESOSPHERE_ABORT_UNLESS(l2_entry->IsTable() || l2_entry->IsEmpty()); - R_SUCCEED_IF(!l2_entry->IsTable()); - - /* We want to separate L3 contiguous blocks into L2 blocks, so check that our size permits that. */ - R_SUCCEED_IF(block_size >= L3ContiguousBlockSize); - - /* If we're contiguous, try to separate. */ - L3PageTableEntry *l3_entry = impl.GetL3Entry(l2_entry, virt_addr); - if (l3_entry->IsBlock() && l3_entry->IsContiguous()) { - const KProcessAddress block_virt_addr = util::AlignDown(GetInteger(virt_addr), L3ContiguousBlockSize); - const KPhysicalAddress block_phys_addr = util::AlignDown(GetInteger(l3_entry->GetBlock()), L3ContiguousBlockSize); - - /* Mark the entries as non-contiguous. */ - for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) { - L3PageTableEntry *target = impl.GetL3Entry(l2_entry, block_virt_addr + L3BlockSize * i); - const u64 entry_template = target->GetEntryTemplateForL3Block(i); - *target = L3PageTableEntry(PageTableEntry::BlockTag{}, block_phys_addr + L3BlockSize * i, PageTableEntry(entry_template), PageTableEntry::SoftwareReservedBit_None, false); - } - this->NoteUpdated(); - } - - /* We're done! */ R_SUCCEED(); } - Result KPageTable::SeparatePages(KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll) { + Result KPageTable::SeparatePages(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list, bool reuse_ll) { MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); - /* If we fail while separating, re-merge. */ - ON_RESULT_FAILURE { this->MergePages(virt_addr, page_list); }; + auto &impl = this->GetImpl(); - /* Try to separate pages. */ - R_RETURN(this->SeparatePagesImpl(virt_addr, block_size, page_list, reuse_ll)); + /* Begin traversal. */ + TraversalContext start_context; + TraversalEntry entry; + MESOSPHERE_ABORT_UNLESS(impl.BeginTraversal(std::addressof(entry), std::addressof(start_context), virt_addr)); + + /* Separate pages at the start of the range. */ + const size_t size = num_pages * PageSize; + R_TRY(this->SeparatePagesImpl(std::addressof(entry), std::addressof(start_context), virt_addr, std::min(util::GetAlignment(GetInteger(virt_addr)), size), page_list, reuse_ll)); + + /* If necessary, separate pages at the end of the range. */ + if (num_pages > 1) { + const auto end_page = virt_addr + size; + const auto last_page = end_page - PageSize; + + /* Begin traversal. */ + TraversalContext end_context; + MESOSPHERE_ABORT_UNLESS(impl.BeginTraversal(std::addressof(entry), std::addressof(end_context), last_page)); + + + ON_RESULT_FAILURE { this->MergePages(std::addressof(start_context), page_list); }; + + R_TRY(this->SeparatePagesImpl(std::addressof(entry), std::addressof(end_context), last_page, std::min(util::GetAlignment(GetInteger(end_page)), size), page_list, reuse_ll)); + } + + R_SUCCEED(); } Result KPageTable::ChangePermissions(KProcessAddress virt_addr, size_t num_pages, PageTableEntry entry_template, DisableMergeAttribute disable_merge_attr, bool refresh_mapping, bool flush_mapping, PageLinkedList *page_list, bool reuse_ll) { MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); /* Separate pages before we change permissions. */ - const size_t size = num_pages * PageSize; - R_TRY(this->SeparatePages(virt_addr, std::min(util::GetAlignment(GetInteger(virt_addr)), size), page_list, reuse_ll)); - if (num_pages > 1) { - const auto end_page = virt_addr + size; - const auto last_page = end_page - PageSize; - - ON_RESULT_FAILURE { this->MergePages(virt_addr, page_list); }; - - R_TRY(this->SeparatePages(last_page, std::min(util::GetAlignment(GetInteger(end_page)), size), page_list, reuse_ll)); - } + R_TRY(this->SeparatePages(virt_addr, num_pages, page_list, reuse_ll)); /* ===================================================== */ @@ -1376,10 +1290,7 @@ namespace ams::kern::arch::arm64 { } /* We've succeeded, now perform what coalescing we can. */ - this->MergePages(virt_addr, page_list); - if (num_pages > 1) { - this->MergePages(virt_addr + (num_pages - 1) * PageSize, page_list); - } + this->MergePages(virt_addr, num_pages, page_list); R_SUCCEED(); } diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp index 5bc775f66..5f03bb83c 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp @@ -33,94 +33,6 @@ namespace ams::kern::arch::arm64 { return m_table; } - // bool KPageTableImpl::ExtractL3Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L3PageTableEntry *l3_entry, KProcessAddress virt_addr) const { - // /* Set the L3 entry. */ - // out_context->l3_entry = l3_entry; - // - // if (l3_entry->IsBlock()) { - // /* Set the output entry. */ - // out_entry->phys_addr = l3_entry->GetBlock() + (virt_addr & (L3BlockSize - 1)); - // if (l3_entry->IsContiguous()) { - // out_entry->block_size = L3ContiguousBlockSize; - // } else { - // out_entry->block_size = L3BlockSize; - // } - // out_entry->sw_reserved_bits = l3_entry->GetSoftwareReservedBits(); - // out_entry->attr = 0; - // - // return true; - // } else { - // out_entry->phys_addr = Null; - // out_entry->block_size = L3BlockSize; - // out_entry->sw_reserved_bits = 0; - // out_entry->attr = 0; - // return false; - // } - // } - // - // bool KPageTableImpl::ExtractL2Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L2PageTableEntry *l2_entry, KProcessAddress virt_addr) const { - // /* Set the L2 entry. */ - // out_context->l2_entry = l2_entry; - // - // if (l2_entry->IsBlock()) { - // /* Set the output entry. */ - // out_entry->phys_addr = l2_entry->GetBlock() + (virt_addr & (L2BlockSize - 1)); - // if (l2_entry->IsContiguous()) { - // out_entry->block_size = L2ContiguousBlockSize; - // } else { - // out_entry->block_size = L2BlockSize; - // } - // out_entry->sw_reserved_bits = l2_entry->GetSoftwareReservedBits(); - // out_entry->attr = 0; - // - // /* Set the output context. */ - // out_context->l3_entry = nullptr; - // return true; - // } else if (l2_entry->IsTable()) { - // return this->ExtractL3Entry(out_entry, out_context, this->GetL3EntryFromTable(GetPageTableVirtualAddress(l2_entry->GetTable()), virt_addr), virt_addr); - // } else { - // out_entry->phys_addr = Null; - // out_entry->block_size = L2BlockSize; - // out_entry->sw_reserved_bits = 0; - // out_entry->attr = 0; - // - // out_context->l3_entry = nullptr; - // return false; - // } - // } - // - // bool KPageTableImpl::ExtractL1Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L1PageTableEntry *l1_entry, KProcessAddress virt_addr) const { - // /* Set the L1 entry. */ - // out_context->level_entries[EntryLevel_L1] = l1_entry; - // - // if (l1_entry->IsBlock()) { - // /* Set the output entry. */ - // out_entry->phys_addr = l1_entry->GetBlock() + (virt_addr & (L1BlockSize - 1)); - // if (l1_entry->IsContiguous()) { - // out_entry->block_size = L1ContiguousBlockSize; - // } else { - // out_entry->block_size = L1BlockSize; - // } - // out_entry->sw_reserved_bits = l1_entry->GetSoftwareReservedBits(); - // - // /* Set the output context. */ - // out_context->l2_entry = nullptr; - // out_context->l3_entry = nullptr; - // return true; - // } else if (l1_entry->IsTable()) { - // return this->ExtractL2Entry(out_entry, out_context, this->GetL2EntryFromTable(GetPageTableVirtualAddress(l1_entry->GetTable()), virt_addr), virt_addr); - // } else { - // out_entry->phys_addr = Null; - // out_entry->block_size = L1BlockSize; - // out_entry->sw_reserved_bits = 0; - // out_entry->attr = 0; - // - // out_context->l2_entry = nullptr; - // out_context->l3_entry = nullptr; - // return false; - // } - // } - bool KPageTableImpl::BeginTraversal(TraversalEntry *out_entry, TraversalContext *out_context, KProcessAddress address) const { /* Setup invalid defaults. */ *out_entry = {}; @@ -176,9 +88,8 @@ namespace ams::kern::arch::arm64 { bool KPageTableImpl::ContinueTraversal(TraversalEntry *out_entry, TraversalContext *context) const { /* Advance entry. */ - auto *cur_pte = context->level_entries[context->level]; - auto *next_pte = reinterpret_cast(context->is_contiguous ? util::AlignDown(reinterpret_cast(cur_pte), 0x10 * sizeof(PageTableEntry)) + 0x10 * sizeof(PageTableEntry) : reinterpret_cast(cur_pte) + sizeof(PageTableEntry)); + auto *next_pte = reinterpret_cast(context->is_contiguous ? util::AlignDown(reinterpret_cast(cur_pte), BlocksPerContiguousBlock * sizeof(PageTableEntry)) + BlocksPerContiguousBlock * sizeof(PageTableEntry) : reinterpret_cast(cur_pte) + sizeof(PageTableEntry)); /* Set the pte. */ context->level_entries[context->level] = next_pte; @@ -255,6 +166,53 @@ namespace ams::kern::arch::arm64 { return is_block; } + bool KPageTableImpl::MergePages(KVirtualAddress *out, TraversalContext *context) { + /* TODO */ + MESOSPHERE_UNUSED(out, context); + MESOSPHERE_PANIC("page tables"); + } + + void KPageTableImpl::SeparatePages(TraversalEntry *entry, TraversalContext *context, KProcessAddress address, PageTableEntry *pte) const { + /* We want to downgrade the pages by one step. */ + if (context->is_contiguous) { + /* We want to downgrade a contiguous mapping to a non-contiguous mapping. */ + pte = reinterpret_cast(util::AlignDown(reinterpret_cast(context->level_entries[context->level]), BlocksPerContiguousBlock * sizeof(PageTableEntry))); + + auto * const first = pte; + const KPhysicalAddress block = this->GetBlock(first, context->level); + for (size_t i = 0; i < BlocksPerContiguousBlock; ++i) { + pte[i] = PageTableEntry(PageTableEntry::BlockTag{}, block + (i << (PageBits + LevelBits * context->level)), PageTableEntry(first->GetEntryTemplateForSeparateContiguous(i)), PageTableEntry::SeparateContiguousTag{}); + } + + context->is_contiguous = false; + + context->level_entries[context->level] = pte + (this->GetLevelIndex(address, context->level) & (BlocksPerContiguousBlock - 1)); + } else { + /* We want to downgrade a block into a table. */ + auto * const first = context->level_entries[context->level]; + const KPhysicalAddress block = this->GetBlock(first, context->level); + for (size_t i = 0; i < BlocksPerTable; ++i) { + pte[i] = PageTableEntry(PageTableEntry::BlockTag{}, block + (i << (PageBits + LevelBits * (context->level - 1))), PageTableEntry(first->GetEntryTemplateForSeparate(i)), PageTableEntry::SoftwareReservedBit_None, true, context->level == EntryLevel_L3); + } + + context->is_contiguous = true; + context->level = static_cast(util::ToUnderlying(context->level) - 1); + + /* Wait for pending stores to complete. */ + cpu::DataSynchronizationBarrierInnerShareableStore(); + + /* Update the block entry to be a table entry. */ + *context->level_entries[context->level + 1] = PageTableEntry(PageTableEntry::TableTag{}, KPageTable::GetPageTablePhysicalAddress(KVirtualAddress(first)), m_is_kernel, true, BlocksPerTable); + + context->level_entries[context->level] = pte + this->GetLevelIndex(address, context->level); + } + + entry->sw_reserved_bits = 0; + entry->attr = 0; + entry->phys_addr = this->GetBlock(context->level_entries[context->level], context->level) + this->GetOffset(address, context->level); + entry->block_size = static_cast(1) << (PageBits + LevelBits * context->level + 4 * context->is_contiguous); + } + void KPageTableImpl::Dump(uintptr_t start, size_t size) const { /* If zero size, there's nothing to dump. */ if (size == 0) { diff --git a/libraries/libmesosphere/source/kern_k_thread.cpp b/libraries/libmesosphere/source/kern_k_thread.cpp index 4ec7aa1dd..74b39acd3 100644 --- a/libraries/libmesosphere/source/kern_k_thread.cpp +++ b/libraries/libmesosphere/source/kern_k_thread.cpp @@ -1438,7 +1438,10 @@ namespace ams::kern { this->SetState(ThreadState_Waiting); /* Set our wait queue. */ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wdangling-pointer" m_wait_queue = queue; + #pragma GCC diagnostic pop } void KThread::NotifyAvailable(KSynchronizationObject *signaled_object, Result wait_result) {