From 4892ffae1553db40dbccff3c55212b998bb37c52 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Thu, 17 Jun 2021 13:03:46 -0700 Subject: [PATCH] kern: implement improved [new page tables are zero] invariant --- .../mesosphere/kern_k_dynamic_page_manager.hpp | 8 +++++++- .../include/mesosphere/kern_k_dynamic_slab_heap.hpp | 9 ++++++++- .../include/mesosphere/kern_k_page_table_base.hpp | 7 +++++-- .../include/mesosphere/kern_k_page_table_manager.hpp | 9 ++++----- .../source/arch/arm64/kern_k_page_table.cpp | 12 +++++++++--- 5 files changed, 33 insertions(+), 12 deletions(-) diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_dynamic_page_manager.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_dynamic_page_manager.hpp index 64bd4ca98..114895a0e 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_dynamic_page_manager.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_dynamic_page_manager.hpp @@ -64,8 +64,11 @@ namespace ams::kern { m_page_bitmap.Initialize(management_ptr, m_count); /* Free the pages to the bitmap. */ - std::memset(GetPointer(m_address), 0, m_count * sizeof(PageBuffer)); for (size_t i = 0; i < m_count; i++) { + /* Ensure the freed page is all-zero. */ + cpu::ClearPageToZero(GetPointer(m_address) + i); + + /* Set the bit for the free page. */ m_page_bitmap.SetBit(i); } @@ -99,6 +102,9 @@ namespace ams::kern { } void Free(PageBuffer *pb) { + /* Ensure all pages in the heap are zero. */ + cpu::ClearPageToZero(pb); + /* Take the lock. */ KScopedInterruptDisable di; KScopedSpinLock lk(m_lock); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_dynamic_slab_heap.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_dynamic_slab_heap.hpp index 3dc14f704..7665c4de7 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_dynamic_slab_heap.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_dynamic_slab_heap.hpp @@ -22,7 +22,7 @@ namespace ams::kern { - template + template class KDynamicSlabHeap { NON_COPYABLE(KDynamicSlabHeap); NON_MOVEABLE(KDynamicSlabHeap); @@ -97,6 +97,13 @@ namespace ams::kern { T *Allocate() { T *allocated = reinterpret_cast(this->GetImpl()->Allocate()); + /* If we successfully allocated and we should clear the node, do so. */ + if constexpr (ClearNode) { + if (AMS_LIKELY(allocated != nullptr)) { + reinterpret_cast(allocated)->next = nullptr; + } + } + /* If we fail to allocate, try to get a new page from our next allocator. */ if (AMS_UNLIKELY(allocated == nullptr)) { if (m_page_allocator != nullptr) { diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp index 6ec7498d3..4a9116f1f 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp @@ -107,8 +107,11 @@ namespace ams::kern { Node *Peek() const { return m_root; } Node *Pop() { - Node *r = m_root; - m_root = m_root->m_next; + Node * const r = m_root; + + m_root = r->m_next; + r->m_next = nullptr; + return r; } }; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_page_table_manager.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_page_table_manager.hpp index ca8edf5b5..fc5652ed1 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_page_table_manager.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_page_table_manager.hpp @@ -25,18 +25,20 @@ namespace ams::kern { class PageTablePage { private: u8 m_buffer[PageSize]; + public: + ALWAYS_INLINE PageTablePage() { /* Do not initialize anything. */ } }; static_assert(sizeof(PageTablePage) == PageSize); } - class KPageTableManager : public KDynamicSlabHeap { + class KPageTableManager : public KDynamicSlabHeap { public: using RefCount = u16; static constexpr size_t PageTableSize = sizeof(impl::PageTablePage); static_assert(PageTableSize == PageSize); private: - using BaseHeap = KDynamicSlabHeap; + using BaseHeap = KDynamicSlabHeap; private: RefCount *m_ref_counts; public: @@ -72,9 +74,6 @@ namespace ams::kern { } void Free(KVirtualAddress addr) { - /* Ensure all pages in the heap are zero. */ - cpu::ClearPageToZero(GetVoidPointer(addr)); - /* Free the page. */ BaseHeap::Free(GetPointer(addr)); } diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp index 6df32849b..e6a08d0e2 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp @@ -279,9 +279,10 @@ namespace ams::kern::arch::arm64 { if (l1_entry->IsTable()) { L2PageTableEntry *l2_entry = impl.GetL2Entry(l1_entry, cur_address); if (l2_entry->IsTable()) { - KVirtualAddress l3_table = GetPageTableVirtualAddress(l2_entry->GetTable()); + const KVirtualAddress l3_table = GetPageTableVirtualAddress(l2_entry->GetTable()); if (this->GetPageTableManager().IsInPageTableHeap(l3_table)) { while (!this->GetPageTableManager().Close(l3_table, 1)) { /* ... */ } + ClearPageTable(l3_table); this->GetPageTableManager().Free(l3_table); } } @@ -292,16 +293,21 @@ namespace ams::kern::arch::arm64 { for (KProcessAddress cur_address = as_start; cur_address <= as_last; cur_address += L1BlockSize) { L1PageTableEntry *l1_entry = impl.GetL1Entry(cur_address); if (l1_entry->IsTable()) { - KVirtualAddress l2_table = GetPageTableVirtualAddress(l1_entry->GetTable()); + const KVirtualAddress l2_table = GetPageTableVirtualAddress(l1_entry->GetTable()); if (this->GetPageTableManager().IsInPageTableHeap(l2_table)) { while (!this->GetPageTableManager().Close(l2_table, 1)) { /* ... */ } + ClearPageTable(l2_table); this->GetPageTableManager().Free(l2_table); } } } /* Free the L1 table. */ - this->GetPageTableManager().Free(reinterpret_cast(impl.Finalize())); + { + const KVirtualAddress l1_table = reinterpret_cast(impl.Finalize()); + ClearPageTable(l1_table); + this->GetPageTableManager().Free(l1_table); + } /* Perform inherited finalization. */ KPageTableBase::Finalize();