kern: implement improved [new page tables are zero] invariant

This commit is contained in:
Michael Scire 2021-06-17 13:03:46 -07:00
parent 25305257d6
commit 4892ffae15
5 changed files with 33 additions and 12 deletions

View file

@ -64,8 +64,11 @@ namespace ams::kern {
m_page_bitmap.Initialize(management_ptr, m_count); m_page_bitmap.Initialize(management_ptr, m_count);
/* Free the pages to the bitmap. */ /* Free the pages to the bitmap. */
std::memset(GetPointer<PageBuffer>(m_address), 0, m_count * sizeof(PageBuffer));
for (size_t i = 0; i < m_count; i++) { for (size_t i = 0; i < m_count; i++) {
/* Ensure the freed page is all-zero. */
cpu::ClearPageToZero(GetPointer<PageBuffer>(m_address) + i);
/* Set the bit for the free page. */
m_page_bitmap.SetBit(i); m_page_bitmap.SetBit(i);
} }
@ -99,6 +102,9 @@ namespace ams::kern {
} }
void Free(PageBuffer *pb) { void Free(PageBuffer *pb) {
/* Ensure all pages in the heap are zero. */
cpu::ClearPageToZero(pb);
/* Take the lock. */ /* Take the lock. */
KScopedInterruptDisable di; KScopedInterruptDisable di;
KScopedSpinLock lk(m_lock); KScopedSpinLock lk(m_lock);

View file

@ -22,7 +22,7 @@
namespace ams::kern { namespace ams::kern {
template<typename T> template<typename T, bool ClearNode = false>
class KDynamicSlabHeap { class KDynamicSlabHeap {
NON_COPYABLE(KDynamicSlabHeap); NON_COPYABLE(KDynamicSlabHeap);
NON_MOVEABLE(KDynamicSlabHeap); NON_MOVEABLE(KDynamicSlabHeap);
@ -97,6 +97,13 @@ namespace ams::kern {
T *Allocate() { T *Allocate() {
T *allocated = reinterpret_cast<T *>(this->GetImpl()->Allocate()); T *allocated = reinterpret_cast<T *>(this->GetImpl()->Allocate());
/* If we successfully allocated and we should clear the node, do so. */
if constexpr (ClearNode) {
if (AMS_LIKELY(allocated != nullptr)) {
reinterpret_cast<Impl::Node *>(allocated)->next = nullptr;
}
}
/* If we fail to allocate, try to get a new page from our next allocator. */ /* If we fail to allocate, try to get a new page from our next allocator. */
if (AMS_UNLIKELY(allocated == nullptr)) { if (AMS_UNLIKELY(allocated == nullptr)) {
if (m_page_allocator != nullptr) { if (m_page_allocator != nullptr) {

View file

@ -107,8 +107,11 @@ namespace ams::kern {
Node *Peek() const { return m_root; } Node *Peek() const { return m_root; }
Node *Pop() { Node *Pop() {
Node *r = m_root; Node * const r = m_root;
m_root = m_root->m_next;
m_root = r->m_next;
r->m_next = nullptr;
return r; return r;
} }
}; };

View file

@ -25,18 +25,20 @@ namespace ams::kern {
class PageTablePage { class PageTablePage {
private: private:
u8 m_buffer[PageSize]; u8 m_buffer[PageSize];
public:
ALWAYS_INLINE PageTablePage() { /* Do not initialize anything. */ }
}; };
static_assert(sizeof(PageTablePage) == PageSize); static_assert(sizeof(PageTablePage) == PageSize);
} }
class KPageTableManager : public KDynamicSlabHeap<impl::PageTablePage> { class KPageTableManager : public KDynamicSlabHeap<impl::PageTablePage, true> {
public: public:
using RefCount = u16; using RefCount = u16;
static constexpr size_t PageTableSize = sizeof(impl::PageTablePage); static constexpr size_t PageTableSize = sizeof(impl::PageTablePage);
static_assert(PageTableSize == PageSize); static_assert(PageTableSize == PageSize);
private: private:
using BaseHeap = KDynamicSlabHeap<impl::PageTablePage>; using BaseHeap = KDynamicSlabHeap<impl::PageTablePage, true>;
private: private:
RefCount *m_ref_counts; RefCount *m_ref_counts;
public: public:
@ -72,9 +74,6 @@ namespace ams::kern {
} }
void Free(KVirtualAddress addr) { void Free(KVirtualAddress addr) {
/* Ensure all pages in the heap are zero. */
cpu::ClearPageToZero(GetVoidPointer(addr));
/* Free the page. */ /* Free the page. */
BaseHeap::Free(GetPointer<impl::PageTablePage>(addr)); BaseHeap::Free(GetPointer<impl::PageTablePage>(addr));
} }

View file

@ -279,9 +279,10 @@ namespace ams::kern::arch::arm64 {
if (l1_entry->IsTable()) { if (l1_entry->IsTable()) {
L2PageTableEntry *l2_entry = impl.GetL2Entry(l1_entry, cur_address); L2PageTableEntry *l2_entry = impl.GetL2Entry(l1_entry, cur_address);
if (l2_entry->IsTable()) { if (l2_entry->IsTable()) {
KVirtualAddress l3_table = GetPageTableVirtualAddress(l2_entry->GetTable()); const KVirtualAddress l3_table = GetPageTableVirtualAddress(l2_entry->GetTable());
if (this->GetPageTableManager().IsInPageTableHeap(l3_table)) { if (this->GetPageTableManager().IsInPageTableHeap(l3_table)) {
while (!this->GetPageTableManager().Close(l3_table, 1)) { /* ... */ } while (!this->GetPageTableManager().Close(l3_table, 1)) { /* ... */ }
ClearPageTable(l3_table);
this->GetPageTableManager().Free(l3_table); this->GetPageTableManager().Free(l3_table);
} }
} }
@ -292,16 +293,21 @@ namespace ams::kern::arch::arm64 {
for (KProcessAddress cur_address = as_start; cur_address <= as_last; cur_address += L1BlockSize) { for (KProcessAddress cur_address = as_start; cur_address <= as_last; cur_address += L1BlockSize) {
L1PageTableEntry *l1_entry = impl.GetL1Entry(cur_address); L1PageTableEntry *l1_entry = impl.GetL1Entry(cur_address);
if (l1_entry->IsTable()) { if (l1_entry->IsTable()) {
KVirtualAddress l2_table = GetPageTableVirtualAddress(l1_entry->GetTable()); const KVirtualAddress l2_table = GetPageTableVirtualAddress(l1_entry->GetTable());
if (this->GetPageTableManager().IsInPageTableHeap(l2_table)) { if (this->GetPageTableManager().IsInPageTableHeap(l2_table)) {
while (!this->GetPageTableManager().Close(l2_table, 1)) { /* ... */ } while (!this->GetPageTableManager().Close(l2_table, 1)) { /* ... */ }
ClearPageTable(l2_table);
this->GetPageTableManager().Free(l2_table); this->GetPageTableManager().Free(l2_table);
} }
} }
} }
/* Free the L1 table. */ /* Free the L1 table. */
this->GetPageTableManager().Free(reinterpret_cast<uintptr_t>(impl.Finalize())); {
const KVirtualAddress l1_table = reinterpret_cast<uintptr_t>(impl.Finalize());
ClearPageTable(l1_table);
this->GetPageTableManager().Free(l1_table);
}
/* Perform inherited finalization. */ /* Perform inherited finalization. */
KPageTableBase::Finalize(); KPageTableBase::Finalize();