From 2f0470ff1cf50b92d814b51e140d8e3f7d772e7f Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Tue, 1 Dec 2020 04:14:58 -0800 Subject: [PATCH] kern: implement new software-reserved page table bits --- .../arm64/init/kern_k_init_page_table.hpp | 30 +-- .../arch/arm64/kern_k_page_table.hpp | 20 +- .../arch/arm64/kern_k_page_table_entry.hpp | 126 +++++++++-- .../arch/arm64/kern_k_page_table_impl.hpp | 5 + .../source/arch/arm64/kern_k_page_table.cpp | 207 ++++++++++++++---- .../arch/arm64/kern_k_page_table_impl.cpp | 30 ++- .../source/kern_k_page_table_base.cpp | 2 +- 7 files changed, 315 insertions(+), 105 deletions(-) diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp index 357a2ae5f..98050baae 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp @@ -305,7 +305,7 @@ namespace ams::kern::arch::arm64::init { /* Can we make an L1 block? */ if (util::IsAligned(GetInteger(virt_addr), L1BlockSize) && util::IsAligned(GetInteger(phys_addr), L1BlockSize) && size >= L1BlockSize) { - *l1_entry = L1PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, attr, false); + *l1_entry = L1PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, attr, PageTableEntry::SoftwareReservedBit_None, false); cpu::DataSynchronizationBarrierInnerShareable(); virt_addr += L1BlockSize; @@ -327,7 +327,7 @@ namespace ams::kern::arch::arm64::init { /* Can we make a contiguous L2 block? */ if (util::IsAligned(GetInteger(virt_addr), L2ContiguousBlockSize) && util::IsAligned(GetInteger(phys_addr), L2ContiguousBlockSize) && size >= L2ContiguousBlockSize) { for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) { - l2_entry[i] = L2PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, attr, true); + l2_entry[i] = L2PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, attr, PageTableEntry::SoftwareReservedBit_None, true); cpu::DataSynchronizationBarrierInnerShareable(); virt_addr += L2BlockSize; @@ -339,7 +339,7 @@ namespace ams::kern::arch::arm64::init { /* Can we make an L2 block? */ if (util::IsAligned(GetInteger(virt_addr), L2BlockSize) && util::IsAligned(GetInteger(phys_addr), L2BlockSize) && size >= L2BlockSize) { - *l2_entry = L2PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, attr, false); + *l2_entry = L2PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, attr, PageTableEntry::SoftwareReservedBit_None, false); cpu::DataSynchronizationBarrierInnerShareable(); virt_addr += L2BlockSize; @@ -361,7 +361,7 @@ namespace ams::kern::arch::arm64::init { /* Can we make a contiguous L3 block? */ if (util::IsAligned(GetInteger(virt_addr), L3ContiguousBlockSize) && util::IsAligned(GetInteger(phys_addr), L3ContiguousBlockSize) && size >= L3ContiguousBlockSize) { for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) { - l3_entry[i] = L3PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, attr, true); + l3_entry[i] = L3PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, attr, PageTableEntry::SoftwareReservedBit_None, true); cpu::DataSynchronizationBarrierInnerShareable(); virt_addr += L3BlockSize; @@ -372,7 +372,7 @@ namespace ams::kern::arch::arm64::init { } /* Make an L3 block. */ - *l3_entry = L3PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, attr, false); + *l3_entry = L3PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, attr, PageTableEntry::SoftwareReservedBit_None, false); cpu::DataSynchronizationBarrierInnerShareable(); virt_addr += L3BlockSize; phys_addr += L3BlockSize; @@ -542,7 +542,7 @@ namespace ams::kern::arch::arm64::init { const KPhysicalAddress block = l1_entry->GetBlock(); MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L1BlockSize)); MESOSPHERE_INIT_ABORT_UNLESS(size >= L1BlockSize); - MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsCompatibleWithAttribute(attr_before, false)); + MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsCompatibleWithAttribute(attr_before, PageTableEntry::SoftwareReservedBit_None, false)); /* Invalidate the existing L1 block. */ *static_cast(l1_entry) = InvalidPageTableEntry; @@ -550,7 +550,7 @@ namespace ams::kern::arch::arm64::init { cpu::InvalidateEntireTlb(); /* Create new L1 block. */ - *l1_entry = L1PageTableEntry(PageTableEntry::BlockTag{}, block, attr_after, false); + *l1_entry = L1PageTableEntry(PageTableEntry::BlockTag{}, block, attr_after, PageTableEntry::SoftwareReservedBit_None, false); virt_addr += L1BlockSize; size -= L1BlockSize; @@ -573,7 +573,7 @@ namespace ams::kern::arch::arm64::init { /* Invalidate the existing contiguous L2 block. */ for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) { /* Ensure that the entry is valid. */ - MESOSPHERE_INIT_ABORT_UNLESS(l2_entry[i].IsCompatibleWithAttribute(attr_before, true)); + MESOSPHERE_INIT_ABORT_UNLESS(l2_entry[i].IsCompatibleWithAttribute(attr_before, PageTableEntry::SoftwareReservedBit_None, true)); static_cast(l2_entry)[i] = InvalidPageTableEntry; } cpu::DataSynchronizationBarrierInnerShareable(); @@ -581,7 +581,7 @@ namespace ams::kern::arch::arm64::init { /* Create a new contiguous L2 block. */ for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) { - l2_entry[i] = L2PageTableEntry(PageTableEntry::BlockTag{}, block + L2BlockSize * i, attr_after, true); + l2_entry[i] = L2PageTableEntry(PageTableEntry::BlockTag{}, block + L2BlockSize * i, attr_after, PageTableEntry::SoftwareReservedBit_None, true); } virt_addr += L2ContiguousBlockSize; @@ -591,7 +591,7 @@ namespace ams::kern::arch::arm64::init { MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L2BlockSize)); MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(block), L2BlockSize)); MESOSPHERE_INIT_ABORT_UNLESS(size >= L2BlockSize); - MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsCompatibleWithAttribute(attr_before, false)); + MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsCompatibleWithAttribute(attr_before, PageTableEntry::SoftwareReservedBit_None, false)); /* Invalidate the existing L2 block. */ *static_cast(l2_entry) = InvalidPageTableEntry; @@ -599,7 +599,7 @@ namespace ams::kern::arch::arm64::init { cpu::InvalidateEntireTlb(); /* Create new L2 block. */ - *l2_entry = L2PageTableEntry(PageTableEntry::BlockTag{}, block, attr_after, false); + *l2_entry = L2PageTableEntry(PageTableEntry::BlockTag{}, block, attr_after, PageTableEntry::SoftwareReservedBit_None, false); virt_addr += L2BlockSize; size -= L2BlockSize; @@ -625,7 +625,7 @@ namespace ams::kern::arch::arm64::init { /* Invalidate the existing contiguous L3 block. */ for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) { /* Ensure that the entry is valid. */ - MESOSPHERE_INIT_ABORT_UNLESS(l3_entry[i].IsCompatibleWithAttribute(attr_before, true)); + MESOSPHERE_INIT_ABORT_UNLESS(l3_entry[i].IsCompatibleWithAttribute(attr_before, PageTableEntry::SoftwareReservedBit_None, true)); static_cast(l3_entry)[i] = InvalidPageTableEntry; } cpu::DataSynchronizationBarrierInnerShareable(); @@ -633,7 +633,7 @@ namespace ams::kern::arch::arm64::init { /* Create a new contiguous L3 block. */ for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) { - l3_entry[i] = L3PageTableEntry(PageTableEntry::BlockTag{}, block + L3BlockSize * i, attr_after, true); + l3_entry[i] = L3PageTableEntry(PageTableEntry::BlockTag{}, block + L3BlockSize * i, attr_after, PageTableEntry::SoftwareReservedBit_None, true); } virt_addr += L3ContiguousBlockSize; @@ -643,7 +643,7 @@ namespace ams::kern::arch::arm64::init { MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L3BlockSize)); MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(block), L3BlockSize)); MESOSPHERE_INIT_ABORT_UNLESS(size >= L3BlockSize); - MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsCompatibleWithAttribute(attr_before, false)); + MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsCompatibleWithAttribute(attr_before, PageTableEntry::SoftwareReservedBit_None, false)); /* Invalidate the existing L3 block. */ *static_cast(l3_entry) = InvalidPageTableEntry; @@ -651,7 +651,7 @@ namespace ams::kern::arch::arm64::init { cpu::InvalidateEntireTlb(); /* Create new L3 block. */ - *l3_entry = L3PageTableEntry(PageTableEntry::BlockTag{}, block, attr_after, false); + *l3_entry = L3PageTableEntry(PageTableEntry::BlockTag{}, block, attr_after, PageTableEntry::SoftwareReservedBit_None, false); virt_addr += L3BlockSize; size -= L3BlockSize; diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp index da0e2102d..119f93093 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp @@ -179,16 +179,16 @@ namespace ams::kern::arch::arm64 { NOINLINE Result InitializeForProcess(u32 id, ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager, KPageTableManager *pt_manager); Result Finalize(); private: - Result MapL1Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll); - Result MapL2Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll); - Result MapL3Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll); + Result MapL1Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll); + Result MapL2Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll); + Result MapL3Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll); Result Unmap(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list, bool force, bool reuse_ll); - Result Map(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, size_t page_size, PageLinkedList *page_list, bool reuse_ll) { + Result Map(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, size_t page_size, PageLinkedList *page_list, bool reuse_ll) { switch (page_size) { case L1BlockSize: - return this->MapL1Blocks(virt_addr, phys_addr, num_pages, entry_template, page_list, reuse_ll); + return this->MapL1Blocks(virt_addr, phys_addr, num_pages, entry_template, disable_head_merge, page_list, reuse_ll); case L2ContiguousBlockSize: entry_template.SetContiguous(true); [[fallthrough]]; @@ -196,25 +196,25 @@ namespace ams::kern::arch::arm64 { case L2TegraSmmuBlockSize: #endif case L2BlockSize: - return this->MapL2Blocks(virt_addr, phys_addr, num_pages, entry_template, page_list, reuse_ll); + return this->MapL2Blocks(virt_addr, phys_addr, num_pages, entry_template, disable_head_merge, page_list, reuse_ll); case L3ContiguousBlockSize: entry_template.SetContiguous(true); [[fallthrough]]; case L3BlockSize: - return this->MapL3Blocks(virt_addr, phys_addr, num_pages, entry_template, page_list, reuse_ll); + return this->MapL3Blocks(virt_addr, phys_addr, num_pages, entry_template, disable_head_merge, page_list, reuse_ll); MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); } } - Result MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll); - Result MapGroup(KProcessAddress virt_addr, const KPageGroup &pg, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll); + Result MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll); + Result MapGroup(KProcessAddress virt_addr, const KPageGroup &pg, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll); bool MergePages(KProcessAddress virt_addr, PageLinkedList *page_list); ALWAYS_INLINE Result SeparatePagesImpl(KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll); Result SeparatePages(KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll); - Result ChangePermissions(KProcessAddress virt_addr, size_t num_pages, PageTableEntry entry_template, bool refresh_mapping, PageLinkedList *page_list, bool reuse_ll); + Result ChangePermissions(KProcessAddress virt_addr, size_t num_pages, PageTableEntry entry_template, DisableMergeAttribute disable_merge_attr, bool refresh_mapping, PageLinkedList *page_list, bool reuse_ll); static void PteDataSynchronizationBarrier() { cpu::DataSynchronizationBarrierInnerShareable(); diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp index 23b47d402..ffcad8fc7 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp @@ -69,11 +69,23 @@ namespace ams::kern::arch::arm64 { MappingFlag_Mapped = (1 << 0), }; + enum SoftwareReservedBit : u8 { + SoftwareReservedBit_None = 0, + SoftwareReservedBit_DisableMergeHead = (1u << 0), + SoftwareReservedBit_DisableMergeHeadAndBody = (1u << 1), + SoftwareReservedBit_DisableMergeHeadTail = (1u << 2), + SoftwareReservedBit_Valid = (1u << 3), + }; + + static constexpr ALWAYS_INLINE std::underlying_type::type EncodeSoftwareReservedBits(bool head, bool head_body, bool tail) { + return (head ? SoftwareReservedBit_DisableMergeHead : SoftwareReservedBit_None) | (head_body ? SoftwareReservedBit_DisableMergeHeadAndBody : SoftwareReservedBit_None) | (tail ? SoftwareReservedBit_DisableMergeHeadTail : SoftwareReservedBit_None); + } + enum ExtensionFlag : u64 { - ExtensionFlag_DisableMergeHead = (1ul << 55), - ExtensionFlag_DisableMergeHeadAndBody = (1ul << 56), - ExtensionFlag_DisableMergeTail = (1ul << 57), - ExtensionFlag_Valid = (1ul << 58), + ExtensionFlag_DisableMergeHead = (static_cast(SoftwareReservedBit_DisableMergeHead) << 55), + ExtensionFlag_DisableMergeHeadAndBody = (static_cast(SoftwareReservedBit_DisableMergeHeadAndBody) << 55), + ExtensionFlag_DisableMergeTail = (static_cast(SoftwareReservedBit_DisableMergeHeadTail) << 55), + ExtensionFlag_Valid = (static_cast(SoftwareReservedBit_Valid) << 55), ExtensionFlag_ValidAndMapped = (ExtensionFlag_Valid | MappingFlag_Mapped), ExtensionFlag_TestTableMask = (ExtensionFlag_Valid | (1ul << 1)), @@ -140,9 +152,10 @@ namespace ams::kern::arch::arm64 { } } public: - constexpr ALWAYS_INLINE bool IsMergeAllowedForTail() const { return this->GetBits(57, 1) == 0; } - constexpr ALWAYS_INLINE bool IsMergeAllowedForHeadAndBody() const { return this->GetBits(56, 1) == 0; } - constexpr ALWAYS_INLINE bool IsMergeAllowedForHead() const { return this->GetBits(55, 1) == 0; } + constexpr ALWAYS_INLINE u8 GetSoftwareReservedBits() const { return this->GetBits(55, 3); } + constexpr ALWAYS_INLINE bool IsHeadMergeDisabled() const { return (this->GetSoftwareReservedBits() & SoftwareReservedBit_DisableMergeHead) != 0; } + constexpr ALWAYS_INLINE bool IsHeadAndBodyMergeDisabled() const { return (this->GetSoftwareReservedBits() & PageTableEntry::SoftwareReservedBit_DisableMergeHeadAndBody) != 0; } + constexpr ALWAYS_INLINE bool IsTailMergeDisabled() const { return (this->GetSoftwareReservedBits() & PageTableEntry::SoftwareReservedBit_DisableMergeHeadTail) != 0; } constexpr ALWAYS_INLINE bool IsUserExecuteNever() const { return this->GetBits(54, 1) != 0; } constexpr ALWAYS_INLINE bool IsPrivilegedExecuteNever() const { return this->GetBits(53, 1) != 0; } constexpr ALWAYS_INLINE bool IsContiguous() const { return this->GetBits(52, 1) != 0; } @@ -170,13 +183,14 @@ namespace ams::kern::arch::arm64 { constexpr ALWAYS_INLINE decltype(auto) SetPageAttribute(PageAttribute a) { this->SetBitsDirect(2, 3, a); return *this; } constexpr ALWAYS_INLINE decltype(auto) SetMapped(bool m) { static_assert(static_cast(MappingFlag_Mapped == (1 << 0))); this->SetBit(0, m); return *this; } - constexpr ALWAYS_INLINE u64 GetEntryTemplate() const { - constexpr u64 Mask = (0xFFF0000000000FFFul & ~u64((0x1ul << 52) | ExtensionFlag_TestTableMask)); - return this->attributes & Mask; + constexpr ALWAYS_INLINE u64 GetEntryTemplateForMerge() const { + constexpr u64 BaseMask = (0xFFF0000000000FFFul & ~static_cast((0x1ul << 52) | ExtensionFlag_TestTableMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail)); + return this->attributes & BaseMask; } - constexpr ALWAYS_INLINE bool Is(u64 attr) const { - return this->attributes == attr; + constexpr ALWAYS_INLINE bool IsForMerge(u64 attr) const { + constexpr u64 BaseMaskForMerge = ~static_cast(ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail); + return (this->attributes & BaseMaskForMerge) == attr; } constexpr ALWAYS_INLINE u64 GetRawAttributesUnsafeForSwap() const { @@ -211,8 +225,8 @@ namespace ams::kern::arch::arm64 { /* ... */ } - constexpr explicit ALWAYS_INLINE L1PageTableEntry(BlockTag, KPhysicalAddress phys_addr, const PageTableEntry &attr, bool contig) - : PageTableEntry(attr, (static_cast(contig) << 52) | GetInteger(phys_addr) | PageTableEntry::ExtensionFlag_Valid) + constexpr explicit ALWAYS_INLINE L1PageTableEntry(BlockTag, KPhysicalAddress phys_addr, const PageTableEntry &attr, u8 sw_reserved_bits, bool contig) + : PageTableEntry(attr, (static_cast(sw_reserved_bits) << 55) | (static_cast(contig) << 52) | GetInteger(phys_addr) | PageTableEntry::ExtensionFlag_Valid) { /* ... */ } @@ -234,9 +248,26 @@ namespace ams::kern::arch::arm64 { } } - constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, bool contig) const { + static constexpr ALWAYS_INLINE u64 GetEntryTemplateForL2BlockMask(size_t idx) { + constexpr u64 BaseMask = (0xFFF0000000000FFFul & ~static_cast((0x1ul << 52) | ExtensionFlag_TestTableMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail)); + if (idx == 0) { + return BaseMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody; + } else if (idx < L2ContiguousBlockSize / L2BlockSize) { + return BaseMask | ExtensionFlag_DisableMergeHeadAndBody; + } else if (idx < (L1BlockSize - L2ContiguousBlockSize) / L2BlockSize) { + return BaseMask; + } else { + return BaseMask | ExtensionFlag_DisableMergeTail; + } + } + + constexpr ALWAYS_INLINE u64 GetEntryTemplateForL2Block(size_t idx) const { + return this->attributes & GetEntryTemplateForL2BlockMask(idx); + } + + constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, u8 sw_reserved_bits, bool contig) const { /* Check whether this has the same permission/etc as the desired attributes. */ - return L1PageTableEntry(BlockTag{}, this->GetBlock(), rhs, contig).GetRawAttributes() == this->GetRawAttributes(); + return L1PageTableEntry(BlockTag{}, this->GetBlock(), rhs, sw_reserved_bits, contig).GetRawAttributes() == this->GetRawAttributes(); } }; @@ -256,8 +287,8 @@ namespace ams::kern::arch::arm64 { /* ... */ } - constexpr explicit ALWAYS_INLINE L2PageTableEntry(BlockTag, KPhysicalAddress phys_addr, const PageTableEntry &attr, bool contig) - : PageTableEntry(attr, (static_cast(contig) << 52) | GetInteger(phys_addr) | PageTableEntry::ExtensionFlag_Valid) + constexpr explicit ALWAYS_INLINE L2PageTableEntry(BlockTag, KPhysicalAddress phys_addr, const PageTableEntry &attr, u8 sw_reserved_bits, bool contig) + : PageTableEntry(attr, (static_cast(sw_reserved_bits) << 55) | (static_cast(contig) << 52) | GetInteger(phys_addr) | PageTableEntry::ExtensionFlag_Valid) { /* ... */ } @@ -279,9 +310,41 @@ namespace ams::kern::arch::arm64 { } } - constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, bool contig) const { + static constexpr ALWAYS_INLINE u64 GetEntryTemplateForL2BlockMask(size_t idx) { + constexpr u64 BaseMask = (0xFFF0000000000FFFul & ~static_cast((0x1ul << 52) | ExtensionFlag_TestTableMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail)); + if (idx == 0) { + return BaseMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody; + } else if (idx < (L2ContiguousBlockSize / L2BlockSize) - 1) { + return BaseMask; + } else { + return BaseMask | ExtensionFlag_DisableMergeTail; + } + } + + constexpr ALWAYS_INLINE u64 GetEntryTemplateForL2Block(size_t idx) const { + return this->attributes & GetEntryTemplateForL2BlockMask(idx); + } + + static constexpr ALWAYS_INLINE u64 GetEntryTemplateForL3BlockMask(size_t idx) { + constexpr u64 BaseMask = (0xFFF0000000000FFFul & ~static_cast((0x1ul << 52) | ExtensionFlag_TestTableMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail)); + if (idx == 0) { + return BaseMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody; + } else if (idx < L3ContiguousBlockSize / L3BlockSize) { + return BaseMask | ExtensionFlag_DisableMergeHeadAndBody; + } else if (idx < (L2BlockSize - L3ContiguousBlockSize) / L3BlockSize) { + return BaseMask; + } else { + return BaseMask | ExtensionFlag_DisableMergeTail; + } + } + + constexpr ALWAYS_INLINE u64 GetEntryTemplateForL3Block(size_t idx) const { + return this->attributes & GetEntryTemplateForL3BlockMask(idx); + } + + constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, u8 sw_reserved_bits, bool contig) const { /* Check whether this has the same permission/etc as the desired attributes. */ - return L2PageTableEntry(BlockTag{}, this->GetBlock(), rhs, contig).GetRawAttributes() == this->GetRawAttributes(); + return L2PageTableEntry(BlockTag{}, this->GetBlock(), rhs, sw_reserved_bits, contig).GetRawAttributes() == this->GetRawAttributes(); } }; @@ -289,8 +352,8 @@ namespace ams::kern::arch::arm64 { public: constexpr explicit ALWAYS_INLINE L3PageTableEntry(InvalidTag) : PageTableEntry(InvalidTag{}) { /* ... */ } - constexpr explicit ALWAYS_INLINE L3PageTableEntry(BlockTag, KPhysicalAddress phys_addr, const PageTableEntry &attr, bool contig) - : PageTableEntry(attr, (static_cast(contig) << 52) | GetInteger(phys_addr) | static_cast(ExtensionFlag_TestTableMask)) + constexpr explicit ALWAYS_INLINE L3PageTableEntry(BlockTag, KPhysicalAddress phys_addr, const PageTableEntry &attr, u8 sw_reserved_bits, bool contig) + : PageTableEntry(attr, (static_cast(sw_reserved_bits) << 55) | (static_cast(contig) << 52) | GetInteger(phys_addr) | static_cast(ExtensionFlag_TestTableMask)) { /* ... */ } @@ -301,9 +364,24 @@ namespace ams::kern::arch::arm64 { return this->SelectBits(12, 36); } - constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, bool contig) const { + static constexpr ALWAYS_INLINE u64 GetEntryTemplateForL3BlockMask(size_t idx) { + constexpr u64 BaseMask = (0xFFF0000000000FFFul & ~static_cast((0x1ul << 52) | ExtensionFlag_TestTableMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail)); + if (idx == 0) { + return BaseMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody; + } else if (idx < (L3ContiguousBlockSize / L3BlockSize) - 1) { + return BaseMask; + } else { + return BaseMask | ExtensionFlag_DisableMergeTail; + } + } + + constexpr ALWAYS_INLINE u64 GetEntryTemplateForL3Block(size_t idx) const { + return this->attributes & GetEntryTemplateForL3BlockMask(idx); + } + + constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, u8 sw_reserved_bits, bool contig) const { /* Check whether this has the same permission/etc as the desired attributes. */ - return L3PageTableEntry(BlockTag{}, this->GetBlock(), rhs, contig).GetRawAttributes() == this->GetRawAttributes(); + return L3PageTableEntry(BlockTag{}, this->GetBlock(), rhs, sw_reserved_bits, contig).GetRawAttributes() == this->GetRawAttributes(); } }; diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp index e88d5cd19..95c94dced 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp @@ -29,6 +29,11 @@ namespace ams::kern::arch::arm64 { struct TraversalEntry { KPhysicalAddress phys_addr; size_t block_size; + u8 sw_reserved_bits; + + constexpr bool IsHeadMergeDisabled() const { return (this->sw_reserved_bits & PageTableEntry::SoftwareReservedBit_DisableMergeHead) != 0; } + constexpr bool IsHeadAndBodyMergeDisabled() const { return (this->sw_reserved_bits & PageTableEntry::SoftwareReservedBit_DisableMergeHeadAndBody) != 0; } + constexpr bool IsTailMergeDisabled() const { return (this->sw_reserved_bits & PageTableEntry::SoftwareReservedBit_DisableMergeHeadTail) != 0; } }; struct TraversalContext { diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp index 22f03c415..a9c9bf80f 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp @@ -334,11 +334,11 @@ namespace ams::kern::arch::arm64 { switch (operation) { case OperationType_Map: - return this->MapContiguous(virt_addr, phys_addr, num_pages, entry_template, page_list, reuse_ll); + return this->MapContiguous(virt_addr, phys_addr, num_pages, entry_template, properties.disable_merge_attributes == DisableMergeAttribute_DisableHead, page_list, reuse_ll); case OperationType_ChangePermissions: - return this->ChangePermissions(virt_addr, num_pages, entry_template, false, page_list, reuse_ll); + return this->ChangePermissions(virt_addr, num_pages, entry_template, properties.disable_merge_attributes, false, page_list, reuse_ll); case OperationType_ChangePermissionsAndRefresh: - return this->ChangePermissions(virt_addr, num_pages, entry_template, true, page_list, reuse_ll); + return this->ChangePermissions(virt_addr, num_pages, entry_template, properties.disable_merge_attributes, true, page_list, reuse_ll); MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); } } @@ -355,12 +355,12 @@ namespace ams::kern::arch::arm64 { auto entry_template = this->GetEntryTemplate(properties); switch (operation) { case OperationType_MapGroup: - return this->MapGroup(virt_addr, page_group, num_pages, entry_template, page_list, reuse_ll); + return this->MapGroup(virt_addr, page_group, num_pages, entry_template, properties.disable_merge_attributes == DisableMergeAttribute_DisableHead, page_list, reuse_ll); MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); } } - Result KPageTable::MapL1Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll) { + Result KPageTable::MapL1Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll) { MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); MESOSPHERE_ASSERT(util::IsAligned(GetInteger(virt_addr), L1BlockSize)); MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), L1BlockSize)); @@ -371,10 +371,13 @@ namespace ams::kern::arch::arm64 { auto &impl = this->GetImpl(); + u8 sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(disable_head_merge, false, false); + /* Iterate, mapping each block. */ for (size_t i = 0; i < num_pages; i += L1BlockSize / PageSize) { /* Map the block. */ - *impl.GetL1Entry(virt_addr) = L1PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, PageTableEntry(entry_template), false); + *impl.GetL1Entry(virt_addr) = L1PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, PageTableEntry(entry_template), sw_reserved_bits, false); + sw_reserved_bits &= ~(PageTableEntry::SoftwareReservedBit_DisableMergeHead); virt_addr += L1BlockSize; phys_addr += L1BlockSize; } @@ -382,7 +385,7 @@ namespace ams::kern::arch::arm64 { return ResultSuccess(); } - Result KPageTable::MapL2Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll) { + Result KPageTable::MapL2Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll) { MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); MESOSPHERE_ASSERT(util::IsAligned(GetInteger(virt_addr), L2BlockSize)); MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), L2BlockSize)); @@ -392,6 +395,8 @@ namespace ams::kern::arch::arm64 { KVirtualAddress l2_virt = Null; int l2_open_count = 0; + u8 sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(disable_head_merge, false, false); + /* Iterate, mapping each block. */ for (size_t i = 0; i < num_pages; i += L2BlockSize / PageSize) { KPhysicalAddress l2_phys = Null; @@ -415,7 +420,8 @@ namespace ams::kern::arch::arm64 { MESOSPHERE_ASSERT(l2_virt != Null); /* Map the block. */ - *impl.GetL2EntryFromTable(l2_virt, virt_addr) = L2PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, PageTableEntry(entry_template), false); + *impl.GetL2EntryFromTable(l2_virt, virt_addr) = L2PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, PageTableEntry(entry_template), sw_reserved_bits, false); + sw_reserved_bits &= ~(PageTableEntry::SoftwareReservedBit_DisableMergeHead); l2_open_count++; virt_addr += L2BlockSize; phys_addr += L2BlockSize; @@ -438,7 +444,7 @@ namespace ams::kern::arch::arm64 { return ResultSuccess(); } - Result KPageTable::MapL3Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll) { + Result KPageTable::MapL3Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll) { MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); MESOSPHERE_ASSERT(util::IsAligned(GetInteger(virt_addr), PageSize)); MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), PageSize)); @@ -449,6 +455,8 @@ namespace ams::kern::arch::arm64 { int l2_open_count = 0; int l3_open_count = 0; + u8 sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(disable_head_merge, false, false); + /* Iterate, mapping each page. */ for (size_t i = 0; i < num_pages; i++) { KPhysicalAddress l3_phys = Null; @@ -505,7 +513,8 @@ namespace ams::kern::arch::arm64 { MESOSPHERE_ASSERT(l3_virt != Null); /* Map the page. */ - *impl.GetL3EntryFromTable(l3_virt, virt_addr) = L3PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, PageTableEntry(entry_template), false); + *impl.GetL3EntryFromTable(l3_virt, virt_addr) = L3PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, PageTableEntry(entry_template), sw_reserved_bits, false); + sw_reserved_bits &= ~(PageTableEntry::SoftwareReservedBit_DisableMergeHead); l3_open_count++; virt_addr += PageSize; phys_addr += PageSize; @@ -702,7 +711,7 @@ namespace ams::kern::arch::arm64 { return ResultSuccess(); } - Result KPageTable::MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll) { + Result KPageTable::MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll) { MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); /* Cache initial addresses for use on cleanup. */ @@ -716,7 +725,7 @@ namespace ams::kern::arch::arm64 { auto map_guard = SCOPE_GUARD { MESOSPHERE_R_ABORT_UNLESS(this->Unmap(orig_virt_addr, num_pages, page_list, true, true)); }; if (num_pages < ContiguousPageSize / PageSize) { - R_TRY(this->Map(virt_addr, phys_addr, num_pages, entry_template, L3BlockSize, page_list, reuse_ll)); + R_TRY(this->Map(virt_addr, phys_addr, num_pages, entry_template, disable_head_merge && virt_addr == orig_virt_addr, L3BlockSize, page_list, reuse_ll)); remaining_pages -= num_pages; virt_addr += num_pages * PageSize; phys_addr += num_pages * PageSize; @@ -732,7 +741,7 @@ namespace ams::kern::arch::arm64 { /* Map pages, if we should. */ if (pages_to_map > 0) { - R_TRY(this->Map(virt_addr, phys_addr, pages_to_map, entry_template, GetSmallerAlignment(alignment), page_list, reuse_ll)); + R_TRY(this->Map(virt_addr, phys_addr, pages_to_map, entry_template, disable_head_merge && virt_addr == orig_virt_addr, GetSmallerAlignment(alignment), page_list, reuse_ll)); remaining_pages -= pages_to_map; virt_addr += pages_to_map * PageSize; phys_addr += pages_to_map * PageSize; @@ -753,7 +762,7 @@ namespace ams::kern::arch::arm64 { /* Map pages, if we should. */ const size_t pages_to_map = util::AlignDown(remaining_pages, alignment / PageSize); if (pages_to_map > 0) { - R_TRY(this->Map(virt_addr, phys_addr, pages_to_map, entry_template, alignment, page_list, reuse_ll)); + R_TRY(this->Map(virt_addr, phys_addr, pages_to_map, entry_template, disable_head_merge && virt_addr == orig_virt_addr, alignment, page_list, reuse_ll)); remaining_pages -= pages_to_map; virt_addr += pages_to_map * PageSize; phys_addr += pages_to_map * PageSize; @@ -779,7 +788,7 @@ namespace ams::kern::arch::arm64 { return ResultSuccess(); } - Result KPageTable::MapGroup(KProcessAddress virt_addr, const KPageGroup &pg, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll) { + Result KPageTable::MapGroup(KProcessAddress virt_addr, const KPageGroup &pg, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll) { MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); /* We want to maintain a new reference to every page in the group. */ @@ -798,7 +807,7 @@ namespace ams::kern::arch::arm64 { for (const auto &block : pg) { const KPhysicalAddress block_phys_addr = GetLinearMappedPhysicalAddress(block.GetAddress()); const size_t cur_pages = block.GetNumPages(); - R_TRY(this->Map(virt_addr, block_phys_addr, cur_pages, entry_template, L3BlockSize, page_list, reuse_ll)); + R_TRY(this->Map(virt_addr, block_phys_addr, cur_pages, entry_template, disable_head_merge && virt_addr == orig_virt_addr, L3BlockSize, page_list, reuse_ll)); virt_addr += cur_pages * PageSize; mapped_pages += cur_pages; @@ -846,7 +855,7 @@ namespace ams::kern::arch::arm64 { } /* Map! */ - R_TRY(this->Map(virt_choice, phys_choice, virt_pages, entry_template, virt_block.GetAlignment(), page_list, reuse_ll)); + R_TRY(this->Map(virt_choice, phys_choice, virt_pages, entry_template, disable_head_merge && virt_addr == orig_virt_addr, virt_block.GetAlignment(), page_list, reuse_ll)); /* Advance. */ phys_choice += virt_pages * PageSize; @@ -893,26 +902,39 @@ namespace ams::kern::arch::arm64 { if (l2_entry->IsTable()) { /* We have an L3 entry. */ L3PageTableEntry *l3_entry = impl.GetL3Entry(l2_entry, virt_addr); - if (!l3_entry->IsBlock() || !(/* TODO l3_entry->IsContiguousAllowed() */false)) { + if (!l3_entry->IsBlock()) { return merged; } /* If it's not contiguous, try to make it so. */ if (!l3_entry->IsContiguous()) { virt_addr = util::AlignDown(GetInteger(virt_addr), L3ContiguousBlockSize); - KPhysicalAddress phys_addr = util::AlignDown(GetInteger(l3_entry->GetBlock()), L3ContiguousBlockSize); - const u64 entry_template = l3_entry->GetEntryTemplate(); + const KPhysicalAddress phys_addr = util::AlignDown(GetInteger(l3_entry->GetBlock()), L3ContiguousBlockSize); + const u64 entry_template = l3_entry->GetEntryTemplateForMerge(); /* Validate that we can merge. */ for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) { - if (!impl.GetL3Entry(l2_entry, virt_addr + L3BlockSize * i)->Is(entry_template | GetInteger(phys_addr + L3BlockSize * i) | PageTableEntry::Type_L3Block)) { + const L3PageTableEntry *check_entry = impl.GetL3Entry(l2_entry, virt_addr + L3BlockSize * i); + if (!check_entry->IsForMerge(entry_template | GetInteger(phys_addr + L3BlockSize * i) | PageTableEntry::Type_L3Block)) { + return merged; + } + if (i > 0 && (check_entry->IsHeadMergeDisabled() || check_entry->IsHeadAndBodyMergeDisabled())) { + return merged; + } + if ((i < (L3ContiguousBlockSize / L3BlockSize) - 1) && check_entry->IsTailMergeDisabled()) { return merged; } } + /* Determine the new software reserved bits. */ + const L3PageTableEntry *head_entry = impl.GetL3Entry(l2_entry, virt_addr + L3BlockSize * 0); + const L3PageTableEntry *tail_entry = impl.GetL3Entry(l2_entry, virt_addr + L3BlockSize * ((L3ContiguousBlockSize / L3BlockSize) - 1)); + auto sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(head_entry->IsHeadMergeDisabled(), head_entry->IsHeadAndBodyMergeDisabled(), tail_entry->IsTailMergeDisabled()); + /* Merge! */ for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) { - impl.GetL3Entry(l2_entry, virt_addr + L3BlockSize * i)->SetContiguous(true); + *impl.GetL3Entry(l2_entry, virt_addr + L3BlockSize * i) = L3PageTableEntry(PageTableEntry::BlockTag{}, phys_addr + L3BlockSize * i, PageTableEntry(entry_template), sw_reserved_bits, true); + sw_reserved_bits &= ~(PageTableEntry::SoftwareReservedBit_DisableMergeHead); } /* Note that we updated. */ @@ -923,18 +945,30 @@ namespace ams::kern::arch::arm64 { /* We might be able to upgrade a contiguous set of L3 entries into an L2 block. */ virt_addr = util::AlignDown(GetInteger(virt_addr), L2BlockSize); KPhysicalAddress phys_addr = util::AlignDown(GetInteger(l3_entry->GetBlock()), L2BlockSize); - const u64 entry_template = l3_entry->GetEntryTemplate(); + const u64 entry_template = l3_entry->GetEntryTemplateForMerge(); /* Validate that we can merge. */ for (size_t i = 0; i < L2BlockSize / L3ContiguousBlockSize; i++) { - if (!impl.GetL3Entry(l2_entry, virt_addr + L3ContiguousBlockSize * i)->Is(entry_template | GetInteger(phys_addr + L3ContiguousBlockSize * i) | PageTableEntry::ContigType_Contiguous | PageTableEntry::Type_L3Block)) { + const L3PageTableEntry *check_entry = impl.GetL3Entry(l2_entry, virt_addr + L3ContiguousBlockSize * i); + if (!check_entry->IsForMerge(entry_template | GetInteger(phys_addr + L3ContiguousBlockSize * i) | PageTableEntry::ContigType_Contiguous | PageTableEntry::Type_L3Block)) { + return merged; + } + if (i > 0 && (check_entry->IsHeadMergeDisabled() || check_entry->IsHeadAndBodyMergeDisabled())) { + return merged; + } + if ((i < (L2BlockSize / L3ContiguousBlockSize) - 1) && check_entry->IsTailMergeDisabled()) { return merged; } } + /* Determine the new software reserved bits. */ + const L3PageTableEntry *head_entry = impl.GetL3Entry(l2_entry, virt_addr + L3ContiguousBlockSize * 0); + const L3PageTableEntry *tail_entry = impl.GetL3Entry(l2_entry, virt_addr + L3ContiguousBlockSize * ((L2BlockSize / L3ContiguousBlockSize) - 1)); + auto sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(head_entry->IsHeadMergeDisabled(), head_entry->IsHeadAndBodyMergeDisabled(), tail_entry->IsTailMergeDisabled()); + /* Merge! */ PteDataSynchronizationBarrier(); - *l2_entry = L2PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, PageTableEntry(entry_template), false); + *l2_entry = L2PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, PageTableEntry(entry_template), sw_reserved_bits, false); /* Note that we updated. */ this->NoteUpdated(); @@ -950,7 +984,7 @@ namespace ams::kern::arch::arm64 { } /* If the l2 entry is not a block or we can't make it contiguous, we're done. */ - if (!l2_entry->IsBlock() || !(/* TODO l2_entry->IsContiguousAllowed() */ false)) { + if (!l2_entry->IsBlock()) { return merged; } @@ -958,18 +992,31 @@ namespace ams::kern::arch::arm64 { if (!l2_entry->IsContiguous()) { virt_addr = util::AlignDown(GetInteger(virt_addr), L2ContiguousBlockSize); KPhysicalAddress phys_addr = util::AlignDown(GetInteger(l2_entry->GetBlock()), L2ContiguousBlockSize); - const u64 entry_template = l2_entry->GetEntryTemplate(); + const u64 entry_template = l2_entry->GetEntryTemplateForMerge(); /* Validate that we can merge. */ for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) { - if (!impl.GetL2Entry(l1_entry, virt_addr + L2BlockSize * i)->Is(entry_template | GetInteger(phys_addr + L2BlockSize * i) | PageTableEntry::Type_L2Block)) { + const L2PageTableEntry *check_entry = impl.GetL2Entry(l1_entry, virt_addr + L2BlockSize * i); + if (!check_entry->IsForMerge(entry_template | GetInteger(phys_addr + L2BlockSize * i) | PageTableEntry::Type_L2Block)) { + return merged; + } + if (i > 0 && (check_entry->IsHeadMergeDisabled() || check_entry->IsHeadAndBodyMergeDisabled())) { + return merged; + } + if ((i < (L2ContiguousBlockSize / L2BlockSize) - 1) && check_entry->IsTailMergeDisabled()) { return merged; } } + /* Determine the new software reserved bits. */ + const L2PageTableEntry *head_entry = impl.GetL2Entry(l1_entry, virt_addr + L2BlockSize * 0); + const L2PageTableEntry *tail_entry = impl.GetL2Entry(l1_entry, virt_addr + L2BlockSize * ((L2ContiguousBlockSize / L2BlockSize) - 1)); + auto sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(head_entry->IsHeadMergeDisabled(), head_entry->IsHeadAndBodyMergeDisabled(), tail_entry->IsTailMergeDisabled()); + /* Merge! */ for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) { - impl.GetL2Entry(l1_entry, virt_addr + L2BlockSize * i)->SetContiguous(true); + *impl.GetL2Entry(l1_entry, virt_addr + L2BlockSize * i) = L2PageTableEntry(PageTableEntry::BlockTag{}, phys_addr + L2BlockSize * i, PageTableEntry(entry_template), sw_reserved_bits, true); + sw_reserved_bits &= ~(PageTableEntry::SoftwareReservedBit_DisableMergeHead); } /* Note that we updated. */ @@ -980,18 +1027,30 @@ namespace ams::kern::arch::arm64 { /* We might be able to upgrade a contiguous set of L2 entries into an L1 block. */ virt_addr = util::AlignDown(GetInteger(virt_addr), L1BlockSize); KPhysicalAddress phys_addr = util::AlignDown(GetInteger(l2_entry->GetBlock()), L1BlockSize); - const u64 entry_template = l2_entry->GetEntryTemplate(); + const u64 entry_template = l2_entry->GetEntryTemplateForMerge(); /* Validate that we can merge. */ for (size_t i = 0; i < L1BlockSize / L2ContiguousBlockSize; i++) { - if (!impl.GetL2Entry(l1_entry, virt_addr + L2ContiguousBlockSize * i)->Is(entry_template | GetInteger(phys_addr + L2ContiguousBlockSize * i) | PageTableEntry::ContigType_Contiguous | PageTableEntry::Type_L2Block)) { + const L2PageTableEntry *check_entry = impl.GetL2Entry(l1_entry, virt_addr + L2ContiguousBlockSize * i); + if (!check_entry->IsForMerge(entry_template | GetInteger(phys_addr + L2ContiguousBlockSize * i) | PageTableEntry::ContigType_Contiguous | PageTableEntry::Type_L2Block)) { + return merged; + } + if (i > 0 && (check_entry->IsHeadMergeDisabled() || check_entry->IsHeadAndBodyMergeDisabled())) { + return merged; + } + if ((i < (L1ContiguousBlockSize / L2ContiguousBlockSize) - 1) && check_entry->IsTailMergeDisabled()) { return merged; } } + /* Determine the new software reserved bits. */ + const L2PageTableEntry *head_entry = impl.GetL2Entry(l1_entry, virt_addr + L2ContiguousBlockSize * 0); + const L2PageTableEntry *tail_entry = impl.GetL2Entry(l1_entry, virt_addr + L2ContiguousBlockSize * ((L1BlockSize / L2ContiguousBlockSize) - 1)); + auto sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(head_entry->IsHeadMergeDisabled(), head_entry->IsHeadAndBodyMergeDisabled(), tail_entry->IsTailMergeDisabled()); + /* Merge! */ PteDataSynchronizationBarrier(); - *l1_entry = L1PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, PageTableEntry(entry_template), false); + *l1_entry = L1PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, PageTableEntry(entry_template), sw_reserved_bits, false); /* Note that we updated. */ this->NoteUpdated(); @@ -1029,9 +1088,9 @@ namespace ams::kern::arch::arm64 { const KPhysicalAddress l2_phys = GetPageTablePhysicalAddress(l2_table); /* Set the entries in the L2 table. */ - const u64 entry_template = l1_entry->GetEntryTemplate(); for (size_t i = 0; i < L1BlockSize / L2BlockSize; i++) { - *(impl.GetL2EntryFromTable(l2_table, block_virt_addr + L2BlockSize * i)) = L2PageTableEntry(PageTableEntry::BlockTag{}, block_phys_addr + L2BlockSize * i, PageTableEntry(entry_template), true); + const u64 entry_template = l1_entry->GetEntryTemplateForL2Block(i); + *(impl.GetL2EntryFromTable(l2_table, block_virt_addr + L2BlockSize * i)) = L2PageTableEntry(PageTableEntry::BlockTag{}, block_phys_addr + L2BlockSize * i, PageTableEntry(entry_template), PageTableEntry::SoftwareReservedBit_None, true); } /* Open references to the L2 table. */ @@ -1055,10 +1114,12 @@ namespace ams::kern::arch::arm64 { /* If we're contiguous, try to separate. */ if (l2_entry->IsContiguous()) { const KProcessAddress block_virt_addr = util::AlignDown(GetInteger(virt_addr), L2ContiguousBlockSize); + const KPhysicalAddress block_phys_addr = l2_entry->GetBlock(); /* Mark the entries as non-contiguous. */ for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) { - impl.GetL2Entry(l1_entry, block_virt_addr + L2BlockSize * i)->SetContiguous(false); + const u64 entry_template = l2_entry->GetEntryTemplateForL2Block(i); + *(impl.GetL2Entry(l1_entry, block_virt_addr + L2BlockSize * i)) = L2PageTableEntry(PageTableEntry::BlockTag{}, block_phys_addr + L2BlockSize * i, PageTableEntry(entry_template), PageTableEntry::SoftwareReservedBit_None, false); } this->NoteUpdated(); } @@ -1076,9 +1137,9 @@ namespace ams::kern::arch::arm64 { const KPhysicalAddress l3_phys = GetPageTablePhysicalAddress(l3_table); /* Set the entries in the L3 table. */ - const u64 entry_template = l2_entry->GetEntryTemplate(); for (size_t i = 0; i < L2BlockSize / L3BlockSize; i++) { - *(impl.GetL3EntryFromTable(l3_table, block_virt_addr + L3BlockSize * i)) = L3PageTableEntry(PageTableEntry::BlockTag{}, block_phys_addr + L3BlockSize * i, PageTableEntry(entry_template), true); + const u64 entry_template = l2_entry->GetEntryTemplateForL3Block(i); + *(impl.GetL3EntryFromTable(l3_table, block_virt_addr + L3BlockSize * i)) = L3PageTableEntry(PageTableEntry::BlockTag{}, block_phys_addr + L3BlockSize * i, PageTableEntry(entry_template), PageTableEntry::SoftwareReservedBit_None, true); } /* Open references to the L3 table. */ @@ -1101,10 +1162,12 @@ namespace ams::kern::arch::arm64 { L3PageTableEntry *l3_entry = impl.GetL3Entry(l2_entry, virt_addr); if (l3_entry->IsBlock() && l3_entry->IsContiguous()) { const KProcessAddress block_virt_addr = util::AlignDown(GetInteger(virt_addr), L3ContiguousBlockSize); + const KPhysicalAddress block_phys_addr = l3_entry->GetBlock(); /* Mark the entries as non-contiguous. */ for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) { - impl.GetL3Entry(l2_entry, block_virt_addr + L3BlockSize * i)->SetContiguous(false); + const u64 entry_template = l3_entry->GetEntryTemplateForL3Block(i); + *(impl.GetL3Entry(l2_entry, block_virt_addr + L3BlockSize * i)) = L3PageTableEntry(PageTableEntry::BlockTag{}, block_phys_addr + L3BlockSize * i, PageTableEntry(entry_template), PageTableEntry::SoftwareReservedBit_None, false); } this->NoteUpdated(); } @@ -1124,7 +1187,7 @@ namespace ams::kern::arch::arm64 { return ResultSuccess(); } - Result KPageTable::ChangePermissions(KProcessAddress virt_addr, size_t num_pages, PageTableEntry entry_template, bool refresh_mapping, PageLinkedList *page_list, bool reuse_ll) { + Result KPageTable::ChangePermissions(KProcessAddress virt_addr, size_t num_pages, PageTableEntry entry_template, DisableMergeAttribute disable_merge_attr, bool refresh_mapping, PageLinkedList *page_list, bool reuse_ll) { MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); /* Separate pages before we change permissions. */ @@ -1149,23 +1212,75 @@ namespace ams::kern::arch::arm64 { ApplyOption_MergeMappings = (1u << 1), }; - auto ApplyEntryTemplate = [this, virt_addr, num_pages, page_list](PageTableEntry entry_template, u32 apply_option) -> void { + auto ApplyEntryTemplate = [this, virt_addr, disable_merge_attr, num_pages, page_list](PageTableEntry entry_template, u32 apply_option) -> void { /* Create work variables for us to use. */ + const KProcessAddress orig_virt_addr = virt_addr; + const KProcessAddress end_virt_addr = orig_virt_addr + (num_pages * PageSize); KProcessAddress cur_virt_addr = virt_addr; size_t remaining_pages = num_pages; auto &impl = this->GetImpl(); + /* Parse the disable merge attrs. */ + const bool attr_disable_head = (disable_merge_attr & DisableMergeAttribute_DisableHead) != 0; + const bool attr_disable_head_body = (disable_merge_attr & DisableMergeAttribute_DisableHeadAndBody) != 0; + const bool attr_enable_head_body = (disable_merge_attr & DisableMergeAttribute_EnableHeadAndBody) != 0; + const bool attr_disable_tail = (disable_merge_attr & DisableMergeAttribute_DisableTail) != 0; + const bool attr_enable_tail = (disable_merge_attr & DisableMergeAttribute_EnableTail) != 0; + const bool attr_enable_and_merge = (disable_merge_attr & DisableMergeAttribute_EnableAndMergeHeadBodyTail) != 0; + /* Begin traversal. */ TraversalContext context; TraversalEntry next_entry; MESOSPHERE_ABORT_UNLESS(impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), cur_virt_addr)); /* Continue changing properties until we've changed them for all pages. */ + bool cleared_disable_merge_bits = false; while (remaining_pages > 0) { MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(next_entry.phys_addr), next_entry.block_size)); MESOSPHERE_ABORT_UNLESS(next_entry.block_size <= remaining_pages * PageSize); + /* Determine if we're at the start. */ + const bool is_start = (cur_virt_addr == orig_virt_addr); + const bool is_end = ((cur_virt_addr + next_entry.block_size) == end_virt_addr); + + /* Determine the relevant merge attributes. */ + bool disable_head_merge, disable_head_body_merge, disable_tail_merge; + if (next_entry.IsHeadMergeDisabled()) { + disable_head_merge = true; + } else if (attr_disable_head) { + disable_head_merge = is_start; + } else { + disable_head_merge = false; + } + if (is_start) { + if (attr_disable_head_body) { + disable_head_body_merge = true; + } else if (attr_enable_head_body) { + disable_head_body_merge = false; + } else { + disable_head_body_merge = (!attr_enable_and_merge && next_entry.IsHeadAndBodyMergeDisabled()); + } + } else { + disable_head_body_merge = (!attr_enable_and_merge && next_entry.IsHeadAndBodyMergeDisabled()); + cleared_disable_merge_bits |= (attr_enable_and_merge && next_entry.IsHeadAndBodyMergeDisabled()); + } + if (is_end) { + if (attr_disable_tail) { + disable_tail_merge = true; + } else if (attr_enable_tail) { + disable_tail_merge = false; + } else { + disable_tail_merge = (!attr_enable_and_merge && next_entry.IsTailMergeDisabled()); + } + } else { + disable_tail_merge = (!attr_enable_and_merge && next_entry.IsTailMergeDisabled()); + cleared_disable_merge_bits |= (attr_enable_and_merge && next_entry.IsTailMergeDisabled()); + } + + /* Encode the merge disable flags into the software reserved bits. */ + u8 sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(disable_head_merge, disable_head_body_merge, disable_tail_merge); + /* If we should flush entries, do so. */ if ((apply_option & ApplyOption_FlushDataCache) != 0) { if (IsHeapPhysicalAddress(next_entry.phys_addr)) { @@ -1179,7 +1294,7 @@ namespace ams::kern::arch::arm64 { case L1BlockSize: { /* Write the updated entry. */ - *l1_entry = L1PageTableEntry(PageTableEntry::BlockTag{}, next_entry.phys_addr, entry_template, false); + *l1_entry = L1PageTableEntry(PageTableEntry::BlockTag{}, next_entry.phys_addr, entry_template, sw_reserved_bits, false); } break; case L2ContiguousBlockSize: @@ -1196,7 +1311,8 @@ namespace ams::kern::arch::arm64 { /* Write the updated entry. */ const bool contig = next_entry.block_size == L2ContiguousBlockSize; for (size_t i = 0; i < num_l2_blocks; i++) { - *impl.GetL2EntryFromTable(l2_virt, cur_virt_addr + L2BlockSize * i) = L2PageTableEntry(PageTableEntry::BlockTag{}, next_entry.phys_addr + L2BlockSize * i, entry_template, contig); + *impl.GetL2EntryFromTable(l2_virt, cur_virt_addr + L2BlockSize * i) = L2PageTableEntry(PageTableEntry::BlockTag{}, next_entry.phys_addr + L2BlockSize * i, entry_template, sw_reserved_bits, contig); + sw_reserved_bits &= ~(PageTableEntry::SoftwareReservedBit_DisableMergeHead); } } break; @@ -1220,7 +1336,8 @@ namespace ams::kern::arch::arm64 { /* Write the updated entry. */ const bool contig = next_entry.block_size == L3ContiguousBlockSize; for (size_t i = 0; i < num_l3_blocks; i++) { - *impl.GetL3EntryFromTable(l3_virt, cur_virt_addr + L3BlockSize * i) = L3PageTableEntry(PageTableEntry::BlockTag{}, next_entry.phys_addr + L3BlockSize * i, entry_template, contig); + *impl.GetL3EntryFromTable(l3_virt, cur_virt_addr + L3BlockSize * i) = L3PageTableEntry(PageTableEntry::BlockTag{}, next_entry.phys_addr + L3BlockSize * i, entry_template, sw_reserved_bits, contig); + sw_reserved_bits &= ~(PageTableEntry::SoftwareReservedBit_DisableMergeHead); } } break; @@ -1228,12 +1345,12 @@ namespace ams::kern::arch::arm64 { } /* If our option asks us to, try to merge mappings. */ - bool merge = ((apply_option & ApplyOption_MergeMappings) != 0) && next_entry.block_size < L1BlockSize; + bool merge = ((apply_option & ApplyOption_MergeMappings) != 0 || cleared_disable_merge_bits) && next_entry.block_size < L1BlockSize; if (merge) { const size_t larger_align = GetLargerAlignment(next_entry.block_size); if (util::IsAligned(GetInteger(cur_virt_addr) + next_entry.block_size, larger_align)) { const uintptr_t aligned_start = util::AlignDown(GetInteger(cur_virt_addr), larger_align); - if (virt_addr <= aligned_start && aligned_start + larger_align - 1 < GetInteger(virt_addr) + (num_pages * PageSize) - 1) { + if (orig_virt_addr <= aligned_start && aligned_start + larger_align - 1 < GetInteger(orig_virt_addr) + (num_pages * PageSize) - 1) { merge = this->MergePages(cur_virt_addr, page_list); } else { merge = false; diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp index 1ab6f0e91..f2c8a8bdc 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp @@ -45,11 +45,13 @@ namespace ams::kern::arch::arm64 { } else { out_entry->block_size = L3BlockSize; } + out_entry->sw_reserved_bits = l3_entry->GetSoftwareReservedBits(); return true; } else { - out_entry->phys_addr = Null; - out_entry->block_size = L3BlockSize; + out_entry->phys_addr = Null; + out_entry->block_size = L3BlockSize; + out_entry->sw_reserved_bits = 0; return false; } } @@ -66,14 +68,17 @@ namespace ams::kern::arch::arm64 { } else { out_entry->block_size = L2BlockSize; } + out_entry->sw_reserved_bits = l2_entry->GetSoftwareReservedBits(); + /* Set the output context. */ out_context->l3_entry = nullptr; return true; } else if (l2_entry->IsTable()) { return this->ExtractL3Entry(out_entry, out_context, this->GetL3EntryFromTable(GetPageTableVirtualAddress(l2_entry->GetTable()), virt_addr), virt_addr); } else { - out_entry->phys_addr = Null; - out_entry->block_size = L2BlockSize; + out_entry->phys_addr = Null; + out_entry->block_size = L2BlockSize; + out_entry->sw_reserved_bits = 0; out_context->l3_entry = nullptr; return false; } @@ -91,6 +96,8 @@ namespace ams::kern::arch::arm64 { } else { out_entry->block_size = L1BlockSize; } + out_entry->sw_reserved_bits = l1_entry->GetSoftwareReservedBits(); + /* Set the output context. */ out_context->l2_entry = nullptr; out_context->l3_entry = nullptr; @@ -98,8 +105,9 @@ namespace ams::kern::arch::arm64 { } else if (l1_entry->IsTable()) { return this->ExtractL2Entry(out_entry, out_context, this->GetL2EntryFromTable(GetPageTableVirtualAddress(l1_entry->GetTable()), virt_addr), virt_addr); } else { - out_entry->phys_addr = Null; - out_entry->block_size = L1BlockSize; + out_entry->phys_addr = Null; + out_entry->block_size = L1BlockSize; + out_entry->sw_reserved_bits = 0; out_context->l2_entry = nullptr; out_context->l3_entry = nullptr; return false; @@ -108,8 +116,9 @@ namespace ams::kern::arch::arm64 { bool KPageTableImpl::BeginTraversal(TraversalEntry *out_entry, TraversalContext *out_context, KProcessAddress address) const { /* Setup invalid defaults. */ - out_entry->phys_addr = Null; - out_entry->block_size = L1BlockSize; + out_entry->phys_addr = Null; + out_entry->block_size = L1BlockSize; + out_entry->sw_reserved_bits = 0; out_context->l1_entry = this->table + this->num_entries; out_context->l2_entry = nullptr; out_context->l3_entry = nullptr; @@ -208,8 +217,9 @@ namespace ams::kern::arch::arm64 { valid = this->ExtractL1Entry(out_entry, context, context->l1_entry, Null); } else { /* Invalid, end traversal. */ - out_entry->phys_addr = Null; - out_entry->block_size = L1BlockSize; + out_entry->phys_addr = Null; + out_entry->block_size = L1BlockSize; + out_entry->sw_reserved_bits = 0; context->l1_entry = this->table + this->num_entries; context->l2_entry = nullptr; context->l3_entry = nullptr; diff --git a/libraries/libmesosphere/source/kern_k_page_table_base.cpp b/libraries/libmesosphere/source/kern_k_page_table_base.cpp index 2a642dfba..f1ddcb68b 100644 --- a/libraries/libmesosphere/source/kern_k_page_table_base.cpp +++ b/libraries/libmesosphere/source/kern_k_page_table_base.cpp @@ -1069,7 +1069,7 @@ namespace ams::kern { MESOSPHERE_ABORT_UNLESS(map_end_address != map_address); /* Determine if we should disable head merge. */ - const bool disable_head_merge = info.GetAddress() >= GetInteger(start_address); + const bool disable_head_merge = info.GetAddress() >= GetInteger(start_address) /* TODO */; const KPageProperties map_properties = { info.GetPermission(), false, false, disable_head_merge ? DisableMergeAttribute_DisableHead : DisableMergeAttribute_None }; /* While we have pages to map, map them. */