mirror of
https://github.com/Atmosphere-NX/Atmosphere
synced 2024-12-22 12:21:18 +00:00
kern: continue page table refactor, implement separate/unmap
This commit is contained in:
parent
02e837d82e
commit
9610f42dc0
7 changed files with 265 additions and 322 deletions
|
@ -233,8 +233,11 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
bool MergePages(KProcessAddress virt_addr, PageLinkedList *page_list);
|
bool MergePages(KProcessAddress virt_addr, PageLinkedList *page_list);
|
||||||
|
|
||||||
ALWAYS_INLINE Result SeparatePagesImpl(KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll);
|
void MergePages(TraversalContext *context, PageLinkedList *page_list);
|
||||||
Result SeparatePages(KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll);
|
void MergePages(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list);
|
||||||
|
|
||||||
|
Result SeparatePagesImpl(TraversalEntry *entry, TraversalContext *context, KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll);
|
||||||
|
Result SeparatePages(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list, bool reuse_ll);
|
||||||
|
|
||||||
Result ChangePermissions(KProcessAddress virt_addr, size_t num_pages, PageTableEntry entry_template, DisableMergeAttribute disable_merge_attr, bool refresh_mapping, bool flush_mapping, PageLinkedList *page_list, bool reuse_ll);
|
Result ChangePermissions(KProcessAddress virt_addr, size_t num_pages, PageTableEntry entry_template, DisableMergeAttribute disable_merge_attr, bool refresh_mapping, bool flush_mapping, PageLinkedList *page_list, bool reuse_ll);
|
||||||
|
|
||||||
|
|
|
@ -20,18 +20,22 @@
|
||||||
|
|
||||||
namespace ams::kern::arch::arm64 {
|
namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
|
constexpr size_t BlocksPerContiguousBlock = 0x10;
|
||||||
|
constexpr size_t BlocksPerTable = PageSize / sizeof(u64);
|
||||||
|
|
||||||
constexpr size_t L1BlockSize = 1_GB;
|
constexpr size_t L1BlockSize = 1_GB;
|
||||||
constexpr size_t L1ContiguousBlockSize = 0x10 * L1BlockSize;
|
constexpr size_t L1ContiguousBlockSize = BlocksPerContiguousBlock * L1BlockSize;
|
||||||
constexpr size_t L2BlockSize = 2_MB;
|
constexpr size_t L2BlockSize = 2_MB;
|
||||||
constexpr size_t L2ContiguousBlockSize = 0x10 * L2BlockSize;
|
constexpr size_t L2ContiguousBlockSize = BlocksPerContiguousBlock * L2BlockSize;
|
||||||
constexpr size_t L3BlockSize = PageSize;
|
constexpr size_t L3BlockSize = PageSize;
|
||||||
constexpr size_t L3ContiguousBlockSize = 0x10 * L3BlockSize;
|
constexpr size_t L3ContiguousBlockSize = BlocksPerContiguousBlock * L3BlockSize;
|
||||||
|
|
||||||
class PageTableEntry {
|
class PageTableEntry {
|
||||||
public:
|
public:
|
||||||
struct InvalidTag{};
|
struct InvalidTag{};
|
||||||
struct TableTag{};
|
struct TableTag{};
|
||||||
struct BlockTag{};
|
struct BlockTag{};
|
||||||
|
struct SeparateContiguousTag{};
|
||||||
|
|
||||||
enum Permission : u64 {
|
enum Permission : u64 {
|
||||||
Permission_KernelRWX = ((0ul << 53) | (1ul << 54) | (0ul << 6)),
|
Permission_KernelRWX = ((0ul << 53) | (1ul << 54) | (0ul << 6)),
|
||||||
|
@ -122,6 +126,25 @@ namespace ams::kern::arch::arm64 {
|
||||||
{
|
{
|
||||||
/* ... */
|
/* ... */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Construct a table. */
|
||||||
|
constexpr explicit ALWAYS_INLINE PageTableEntry(TableTag, KPhysicalAddress phys_addr, bool is_kernel, bool pxn, size_t num_blocks)
|
||||||
|
: PageTableEntry(((is_kernel ? 0x3ul : 0) << 60) | (static_cast<u64>(pxn) << 59) | GetInteger(phys_addr) | (num_blocks << 2) | 0x3)
|
||||||
|
{
|
||||||
|
/* ... */
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Construct a block. */
|
||||||
|
constexpr explicit ALWAYS_INLINE PageTableEntry(BlockTag, KPhysicalAddress phys_addr, const PageTableEntry &attr, u8 sw_reserved_bits, bool contig, bool page)
|
||||||
|
: PageTableEntry(attr, (static_cast<u64>(sw_reserved_bits) << 55) | (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | (page ? ExtensionFlag_TestTableMask : ExtensionFlag_Valid))
|
||||||
|
{
|
||||||
|
/* ... */
|
||||||
|
}
|
||||||
|
constexpr explicit ALWAYS_INLINE PageTableEntry(BlockTag, KPhysicalAddress phys_addr, const PageTableEntry &attr, SeparateContiguousTag)
|
||||||
|
: PageTableEntry(attr, GetInteger(phys_addr))
|
||||||
|
{
|
||||||
|
/* ... */
|
||||||
|
}
|
||||||
protected:
|
protected:
|
||||||
constexpr ALWAYS_INLINE u64 GetBits(size_t offset, size_t count) const {
|
constexpr ALWAYS_INLINE u64 GetBits(size_t offset, size_t count) const {
|
||||||
return (m_attributes >> offset) & ((1ul << count) - 1);
|
return (m_attributes >> offset) & ((1ul << count) - 1);
|
||||||
|
@ -194,6 +217,12 @@ namespace ams::kern::arch::arm64 {
|
||||||
constexpr ALWAYS_INLINE decltype(auto) SetPageAttribute(PageAttribute a) { this->SetBitsDirect(2, 3, a); return *this; }
|
constexpr ALWAYS_INLINE decltype(auto) SetPageAttribute(PageAttribute a) { this->SetBitsDirect(2, 3, a); return *this; }
|
||||||
constexpr ALWAYS_INLINE decltype(auto) SetMapped(bool m) { static_assert(static_cast<u64>(MappingFlag_Mapped == (1 << 0))); this->SetBit(0, m); return *this; }
|
constexpr ALWAYS_INLINE decltype(auto) SetMapped(bool m) { static_assert(static_cast<u64>(MappingFlag_Mapped == (1 << 0))); this->SetBit(0, m); return *this; }
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE size_t GetTableNumEntries() const { return this->GetBits(2, 10); }
|
||||||
|
constexpr ALWAYS_INLINE decltype(auto) SetTableNumEntries(size_t num) { this->SetBits(2, 10, num); }
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE decltype(auto) AddTableEntries(size_t num) { return this->SetTableNumEntries(this->GetTableNumEntries() + num); }
|
||||||
|
constexpr ALWAYS_INLINE decltype(auto) RemoveTableEntries(size_t num) { return this->SetTableNumEntries(this->GetTableNumEntries() - num); }
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE u64 GetEntryTemplateForMerge() const {
|
constexpr ALWAYS_INLINE u64 GetEntryTemplateForMerge() const {
|
||||||
constexpr u64 BaseMask = (0xFFF0000000000FFFul & ~static_cast<u64>((0x1ul << 52) | ExtensionFlag_TestTableMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail));
|
constexpr u64 BaseMask = (0xFFF0000000000FFFul & ~static_cast<u64>((0x1ul << 52) | ExtensionFlag_TestTableMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail));
|
||||||
return m_attributes & BaseMask;
|
return m_attributes & BaseMask;
|
||||||
|
@ -204,6 +233,38 @@ namespace ams::kern::arch::arm64 {
|
||||||
return (m_attributes & BaseMaskForMerge) == attr;
|
return (m_attributes & BaseMaskForMerge) == attr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static constexpr ALWAYS_INLINE u64 GetEntryTemplateForSeparateContiguousMask(size_t idx) {
|
||||||
|
constexpr u64 BaseMask = (0xFFFF000000000FFFul & ~static_cast<u64>((0x1ul << 52) | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail));
|
||||||
|
if (idx == 0) {
|
||||||
|
return BaseMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody;
|
||||||
|
} else if (idx < BlocksPerContiguousBlock - 1) {
|
||||||
|
return BaseMask;
|
||||||
|
} else {
|
||||||
|
return BaseMask | ExtensionFlag_DisableMergeTail;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE u64 GetEntryTemplateForSeparateContiguous(size_t idx) const {
|
||||||
|
return m_attributes & GetEntryTemplateForSeparateContiguousMask(idx);
|
||||||
|
}
|
||||||
|
|
||||||
|
static constexpr ALWAYS_INLINE u64 GetEntryTemplateForSeparateMask(size_t idx) {
|
||||||
|
constexpr u64 BaseMask = (0xFFFF000000000FFFul & ~static_cast<u64>((0x1ul << 52) | ExtensionFlag_TestTableMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail));
|
||||||
|
if (idx == 0) {
|
||||||
|
return BaseMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody;
|
||||||
|
} else if (idx < BlocksPerContiguousBlock) {
|
||||||
|
return BaseMask | ExtensionFlag_DisableMergeHeadAndBody;
|
||||||
|
} else if (idx < BlocksPerTable - 1) {
|
||||||
|
return BaseMask;
|
||||||
|
} else {
|
||||||
|
return BaseMask | ExtensionFlag_DisableMergeTail;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE u64 GetEntryTemplateForSeparate(size_t idx) const {
|
||||||
|
return m_attributes & GetEntryTemplateForSeparateMask(idx);
|
||||||
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE u64 GetRawAttributesUnsafe() const {
|
constexpr ALWAYS_INLINE u64 GetRawAttributesUnsafe() const {
|
||||||
return m_attributes;
|
return m_attributes;
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,7 +45,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct TraversalContext {
|
struct TraversalContext {
|
||||||
const PageTableEntry *level_entries[EntryLevel_Count];
|
PageTableEntry *level_entries[EntryLevel_Count];
|
||||||
EntryLevel level;
|
EntryLevel level;
|
||||||
bool is_contiguous;
|
bool is_contiguous;
|
||||||
};
|
};
|
||||||
|
@ -125,6 +125,10 @@ namespace ams::kern::arch::arm64 {
|
||||||
ALWAYS_INLINE L3PageTableEntry *GetL3Entry(const L2PageTableEntry *entry, KProcessAddress address) const {
|
ALWAYS_INLINE L3PageTableEntry *GetL3Entry(const L2PageTableEntry *entry, KProcessAddress address) const {
|
||||||
return GetL3EntryFromTable(KMemoryLayout::GetLinearVirtualAddress(entry->GetTable()), address);
|
return GetL3EntryFromTable(KMemoryLayout::GetLinearVirtualAddress(entry->GetTable()), address);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static constexpr size_t GetBlockSize(EntryLevel level, bool contiguous = false) {
|
||||||
|
return 1 << (PageBits + LevelBits * level + 4 * contiguous);
|
||||||
|
}
|
||||||
public:
|
public:
|
||||||
constexpr explicit KPageTableImpl(util::ConstantInitializeTag) : m_table(), m_is_kernel(), m_num_entries() { /* ... */ }
|
constexpr explicit KPageTableImpl(util::ConstantInitializeTag) : m_table(), m_is_kernel(), m_num_entries() { /* ... */ }
|
||||||
|
|
||||||
|
@ -141,6 +145,9 @@ namespace ams::kern::arch::arm64 {
|
||||||
bool ContinueTraversal(TraversalEntry *out_entry, TraversalContext *context) const;
|
bool ContinueTraversal(TraversalEntry *out_entry, TraversalContext *context) const;
|
||||||
|
|
||||||
bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress virt_addr) const;
|
bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress virt_addr) const;
|
||||||
|
|
||||||
|
static bool MergePages(KVirtualAddress *out, TraversalContext *context);
|
||||||
|
void SeparatePages(TraversalEntry *entry, TraversalContext *context, KProcessAddress address, PageTableEntry *pte) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,7 @@ endif
|
||||||
DEFINES := $(ATMOSPHERE_DEFINES) -DATMOSPHERE_IS_MESOSPHERE
|
DEFINES := $(ATMOSPHERE_DEFINES) -DATMOSPHERE_IS_MESOSPHERE
|
||||||
SETTINGS := $(ATMOSPHERE_SETTINGS) $(ATMOSPHERE_OPTIMIZATION_FLAG) -mgeneral-regs-only -ffixed-x18 -Wextra -Werror -fno-non-call-exceptions
|
SETTINGS := $(ATMOSPHERE_SETTINGS) $(ATMOSPHERE_OPTIMIZATION_FLAG) -mgeneral-regs-only -ffixed-x18 -Wextra -Werror -fno-non-call-exceptions
|
||||||
CFLAGS := $(ATMOSPHERE_CFLAGS) $(SETTINGS) $(DEFINES) $(INCLUDE)
|
CFLAGS := $(ATMOSPHERE_CFLAGS) $(SETTINGS) $(DEFINES) $(INCLUDE)
|
||||||
CXXFLAGS := $(CFLAGS) $(ATMOSPHERE_CXXFLAGS) -fno-use-cxa-atexit -flto
|
CXXFLAGS := $(CFLAGS) $(ATMOSPHERE_CXXFLAGS) -fno-use-cxa-atexit
|
||||||
ASFLAGS := $(ATMOSPHERE_ASFLAGS) $(SETTINGS) $(DEFINES) $(INCLUDE)
|
ASFLAGS := $(ATMOSPHERE_ASFLAGS) $(SETTINGS) $(DEFINES) $(INCLUDE)
|
||||||
|
|
||||||
SOURCES += $(foreach v,$(call ALL_SOURCE_DIRS,../libvapours/source),$(if $(findstring ../libvapours/source/sdmmc,$v),,$v))
|
SOURCES += $(foreach v,$(call ALL_SOURCE_DIRS,../libvapours/source),$(if $(findstring ../libvapours/source/sdmmc,$v),,$v))
|
||||||
|
|
|
@ -280,18 +280,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
if (operation == OperationType_Unmap) {
|
if (operation == OperationType_Unmap) {
|
||||||
R_RETURN(this->Unmap(virt_addr, num_pages, page_list, false, reuse_ll));
|
R_RETURN(this->Unmap(virt_addr, num_pages, page_list, false, reuse_ll));
|
||||||
} else if (operation == OperationType_Separate) {
|
} else if (operation == OperationType_Separate) {
|
||||||
const size_t size = num_pages * PageSize;
|
R_RETURN(this->SeparatePages(virt_addr, num_pages, page_list, reuse_ll));
|
||||||
R_TRY(this->SeparatePages(virt_addr, std::min(util::GetAlignment(GetInteger(virt_addr)), size), page_list, reuse_ll));
|
|
||||||
ON_RESULT_FAILURE { this->MergePages(virt_addr, page_list); };
|
|
||||||
|
|
||||||
if (num_pages > 1) {
|
|
||||||
const auto end_page = virt_addr + size;
|
|
||||||
const auto last_page = end_page - PageSize;
|
|
||||||
|
|
||||||
R_TRY(this->SeparatePages(last_page, std::min(util::GetAlignment(GetInteger(end_page)), size), page_list, reuse_ll));
|
|
||||||
}
|
|
||||||
|
|
||||||
R_SUCCEED();
|
|
||||||
} else {
|
} else {
|
||||||
auto entry_template = this->GetEntryTemplate(properties);
|
auto entry_template = this->GetEntryTemplate(properties);
|
||||||
|
|
||||||
|
@ -519,16 +508,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
/* If we're not forcing an unmap, separate pages immediately. */
|
/* If we're not forcing an unmap, separate pages immediately. */
|
||||||
if (!force) {
|
if (!force) {
|
||||||
const size_t size = num_pages * PageSize;
|
R_TRY(this->SeparatePages(virt_addr, num_pages, page_list, reuse_ll));
|
||||||
R_TRY(this->SeparatePages(virt_addr, std::min(util::GetAlignment(GetInteger(virt_addr)), size), page_list, reuse_ll));
|
|
||||||
ON_RESULT_FAILURE { this->MergePages(virt_addr, page_list); };
|
|
||||||
|
|
||||||
if (num_pages > 1) {
|
|
||||||
const auto end_page = virt_addr + size;
|
|
||||||
const auto last_page = end_page - PageSize;
|
|
||||||
|
|
||||||
R_TRY(this->SeparatePages(last_page, std::min(util::GetAlignment(GetInteger(end_page)), size), page_list, reuse_ll));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Cache initial addresses for use on cleanup. */
|
/* Cache initial addresses for use on cleanup. */
|
||||||
|
@ -558,10 +538,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
/* Handle the case where the block is bigger than it should be. */
|
/* Handle the case where the block is bigger than it should be. */
|
||||||
if (next_entry.block_size > remaining_pages * PageSize) {
|
if (next_entry.block_size > remaining_pages * PageSize) {
|
||||||
MESOSPHERE_ABORT_UNLESS(force);
|
MESOSPHERE_ABORT_UNLESS(force);
|
||||||
MESOSPHERE_R_ABORT_UNLESS(this->SeparatePages(virt_addr, remaining_pages * PageSize, page_list, reuse_ll));
|
MESOSPHERE_R_ABORT_UNLESS(this->SeparatePagesImpl(std::addressof(next_entry), std::addressof(context), virt_addr, remaining_pages * PageSize, page_list, reuse_ll));
|
||||||
const bool new_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), virt_addr);
|
|
||||||
MESOSPHERE_ASSERT(new_valid);
|
|
||||||
MESOSPHERE_UNUSED(new_valid);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check that our state is coherent. */
|
/* Check that our state is coherent. */
|
||||||
|
@ -569,87 +546,38 @@ namespace ams::kern::arch::arm64 {
|
||||||
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(next_entry.phys_addr), next_entry.block_size));
|
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(next_entry.phys_addr), next_entry.block_size));
|
||||||
|
|
||||||
/* Unmap the block. */
|
/* Unmap the block. */
|
||||||
L1PageTableEntry *l1_entry = impl.GetL1Entry(virt_addr);
|
bool freeing_table = false;
|
||||||
switch (next_entry.block_size) {
|
while (true) {
|
||||||
case L1BlockSize:
|
/* Clear the entries. */
|
||||||
{
|
const size_t num_to_clear = (!freeing_table && context.is_contiguous) ? BlocksPerContiguousBlock : 1;
|
||||||
/* Clear the entry. */
|
auto *pte = reinterpret_cast<PageTableEntry *>(context.is_contiguous ? util::AlignDown(reinterpret_cast<uintptr_t>(context.level_entries[context.level]), BlocksPerContiguousBlock * sizeof(PageTableEntry)) : reinterpret_cast<uintptr_t>(context.level_entries[context.level]));
|
||||||
*l1_entry = InvalidL1PageTableEntry;
|
for (size_t i = 0; i < num_to_clear; ++i) {
|
||||||
|
pte[i] = InvalidPageTableEntry;
|
||||||
}
|
}
|
||||||
break;
|
|
||||||
case L2ContiguousBlockSize:
|
|
||||||
case L2BlockSize:
|
|
||||||
{
|
|
||||||
/* Get the number of L2 blocks. */
|
|
||||||
const size_t num_l2_blocks = next_entry.block_size / L2BlockSize;
|
|
||||||
|
|
||||||
/* Get the L2 entry. */
|
/* Remove the entries from the previous table. */
|
||||||
KPhysicalAddress l2_phys = Null<KPhysicalAddress>;
|
if (context.level != KPageTableImpl::EntryLevel_L1) {
|
||||||
MESOSPHERE_ABORT_UNLESS(l1_entry->GetTable(l2_phys));
|
context.level_entries[context.level + 1]->RemoveTableEntries(num_to_clear);
|
||||||
const KVirtualAddress l2_virt = GetPageTableVirtualAddress(l2_phys);
|
|
||||||
|
|
||||||
/* Clear the entry. */
|
|
||||||
for (size_t i = 0; i < num_l2_blocks; i++) {
|
|
||||||
*impl.GetL2EntryFromTable(l2_virt, virt_addr + L2BlockSize * i) = InvalidL2PageTableEntry;
|
|
||||||
}
|
}
|
||||||
PteDataMemoryBarrier();
|
|
||||||
|
|
||||||
/* Close references to the L2 table. */
|
/* If we cleared a table, we need to note that we updated and free the table. */
|
||||||
if (this->GetPageTableManager().IsInPageTableHeap(l2_virt)) {
|
if (freeing_table) {
|
||||||
if (this->GetPageTableManager().Close(l2_virt, num_l2_blocks)) {
|
|
||||||
*l1_entry = InvalidL1PageTableEntry;
|
|
||||||
this->NoteUpdated();
|
this->NoteUpdated();
|
||||||
this->FreePageTable(page_list, l2_virt);
|
this->FreePageTable(page_list, KVirtualAddress(util::AlignDown(reinterpret_cast<uintptr_t>(context.level_entries[context.level - 1]), PageSize)));
|
||||||
pages_to_close.CloseAndReset();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Advance; we're no longer contiguous. */
|
||||||
|
context.is_contiguous = false;
|
||||||
|
context.level_entries[context.level] = pte + num_to_clear - 1;
|
||||||
|
|
||||||
|
/* We may have removed the last entries in a table, in which case we can free an unmap the tables. */
|
||||||
|
if (context.level >= KPageTableImpl::EntryLevel_L1 || context.level_entries[context.level + 1]->GetTableNumEntries() != 0) {
|
||||||
break;
|
break;
|
||||||
case L3ContiguousBlockSize:
|
|
||||||
case L3BlockSize:
|
|
||||||
{
|
|
||||||
/* Get the number of L3 blocks. */
|
|
||||||
const size_t num_l3_blocks = next_entry.block_size / L3BlockSize;
|
|
||||||
|
|
||||||
/* Get the L2 entry. */
|
|
||||||
KPhysicalAddress l2_phys = Null<KPhysicalAddress>;
|
|
||||||
MESOSPHERE_ABORT_UNLESS(l1_entry->GetTable(l2_phys));
|
|
||||||
const KVirtualAddress l2_virt = GetPageTableVirtualAddress(l2_phys);
|
|
||||||
L2PageTableEntry *l2_entry = impl.GetL2EntryFromTable(l2_virt, virt_addr);
|
|
||||||
|
|
||||||
/* Get the L3 entry. */
|
|
||||||
KPhysicalAddress l3_phys = Null<KPhysicalAddress>;
|
|
||||||
MESOSPHERE_ABORT_UNLESS(l2_entry->GetTable(l3_phys));
|
|
||||||
const KVirtualAddress l3_virt = GetPageTableVirtualAddress(l3_phys);
|
|
||||||
|
|
||||||
/* Clear the entry. */
|
|
||||||
for (size_t i = 0; i < num_l3_blocks; i++) {
|
|
||||||
*impl.GetL3EntryFromTable(l3_virt, virt_addr + L3BlockSize * i) = InvalidL3PageTableEntry;
|
|
||||||
}
|
|
||||||
PteDataMemoryBarrier();
|
|
||||||
|
|
||||||
/* Close references to the L3 table. */
|
|
||||||
if (this->GetPageTableManager().IsInPageTableHeap(l3_virt)) {
|
|
||||||
if (this->GetPageTableManager().Close(l3_virt, num_l3_blocks)) {
|
|
||||||
*l2_entry = InvalidL2PageTableEntry;
|
|
||||||
this->NoteUpdated();
|
|
||||||
|
|
||||||
/* Close reference to the L2 table. */
|
|
||||||
if (this->GetPageTableManager().IsInPageTableHeap(l2_virt)) {
|
|
||||||
if (this->GetPageTableManager().Close(l2_virt, 1)) {
|
|
||||||
*l1_entry = InvalidL1PageTableEntry;
|
|
||||||
this->NoteUpdated();
|
|
||||||
this->FreePageTable(page_list, l2_virt);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
this->FreePageTable(page_list, l3_virt);
|
/* Advance; we will not be working with blocks any more. */
|
||||||
pages_to_close.CloseAndReset();
|
context.level = static_cast<KPageTableImpl::EntryLevel>(util::ToUnderlying(context.level) + 1);
|
||||||
}
|
freeing_table = true;
|
||||||
}
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Close the blocks. */
|
/* Close the blocks. */
|
||||||
|
@ -663,8 +591,19 @@ namespace ams::kern::arch::arm64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Advance. */
|
/* Advance. */
|
||||||
virt_addr += next_entry.block_size;
|
size_t freed_size = next_entry.block_size;
|
||||||
remaining_pages -= next_entry.block_size / PageSize;
|
if (freeing_table) {
|
||||||
|
/* We advanced more than by the block, so we need to calculate the actual advanced size. */
|
||||||
|
const KProcessAddress new_virt_addr = util::AlignUp(GetInteger(virt_addr), impl.GetBlockSize(context.level, context.is_contiguous));
|
||||||
|
MESOSPHERE_ABORT_UNLESS(new_virt_addr >= virt_addr + next_entry.block_size);
|
||||||
|
|
||||||
|
freed_size = std::min<size_t>(new_virt_addr - virt_addr, remaining_pages * PageSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* We can just advance by the block size. */
|
||||||
|
virt_addr += freed_size;
|
||||||
|
remaining_pages -= freed_size / PageSize;
|
||||||
|
|
||||||
next_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
|
next_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1032,141 +971,116 @@ namespace ams::kern::arch::arm64 {
|
||||||
return merged;
|
return merged;
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KPageTable::SeparatePagesImpl(KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll) {
|
void KPageTable::MergePages(TraversalContext *context, PageLinkedList *page_list) {
|
||||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
auto &impl = this->GetImpl();
|
auto &impl = this->GetImpl();
|
||||||
|
|
||||||
/* First, try to separate an L1 block into contiguous L2 blocks. */
|
/* Iteratively merge, until we can't. */
|
||||||
L1PageTableEntry *l1_entry = impl.GetL1Entry(virt_addr);
|
while (true) {
|
||||||
if (l1_entry->IsBlock()) {
|
/* Try to merge. */
|
||||||
/* If our block size is too big, don't bother. */
|
KVirtualAddress freed_table = Null<KVirtualAddress>;
|
||||||
R_SUCCEED_IF(block_size >= L1BlockSize);
|
if (!impl.MergePages(std::addressof(freed_table), context)) {
|
||||||
|
break;
|
||||||
/* Get the addresses we're working with. */
|
|
||||||
const KProcessAddress block_virt_addr = util::AlignDown(GetInteger(virt_addr), L1BlockSize);
|
|
||||||
const KPhysicalAddress block_phys_addr = l1_entry->GetBlock();
|
|
||||||
|
|
||||||
/* Allocate a new page for the L2 table. */
|
|
||||||
const KVirtualAddress l2_table = this->AllocatePageTable(page_list, reuse_ll);
|
|
||||||
R_UNLESS(l2_table != Null<KVirtualAddress>, svc::ResultOutOfResource());
|
|
||||||
const KPhysicalAddress l2_phys = GetPageTablePhysicalAddress(l2_table);
|
|
||||||
|
|
||||||
/* Set the entries in the L2 table. */
|
|
||||||
for (size_t i = 0; i < L1BlockSize / L2BlockSize; i++) {
|
|
||||||
const u64 entry_template = l1_entry->GetEntryTemplateForL2Block(i);
|
|
||||||
*(impl.GetL2EntryFromTable(l2_table, block_virt_addr + L2BlockSize * i)) = L2PageTableEntry(PageTableEntry::BlockTag{}, block_phys_addr + L2BlockSize * i, PageTableEntry(entry_template), PageTableEntry::SoftwareReservedBit_None, true);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Open references to the L2 table. */
|
/* Note that we updated. */
|
||||||
this->GetPageTableManager().Open(l2_table, L1BlockSize / L2BlockSize);
|
this->NoteUpdated();
|
||||||
|
|
||||||
/* Replace the L1 entry with one to the new table. */
|
/* Free the page. */
|
||||||
PteDataMemoryBarrier();
|
ClearPageTable(freed_table);
|
||||||
*l1_entry = L1PageTableEntry(PageTableEntry::TableTag{}, l2_phys, this->IsKernel(), true);
|
this->FreePageTable(page_list, freed_table);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void KPageTable::MergePages(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list) {
|
||||||
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
|
auto &impl = this->GetImpl();
|
||||||
|
|
||||||
|
/* Begin traversal. */
|
||||||
|
TraversalContext context;
|
||||||
|
TraversalEntry entry;
|
||||||
|
MESOSPHERE_ABORT_UNLESS(impl.BeginTraversal(std::addressof(entry), std::addressof(context), virt_addr));
|
||||||
|
|
||||||
|
/* Merge start of the range. */
|
||||||
|
this->MergePages(std::addressof(context), page_list);
|
||||||
|
|
||||||
|
/* If we have more than one page, do the same for the end of the range. */
|
||||||
|
if (num_pages > 1) {
|
||||||
|
/* Begin traversal for end of range. */
|
||||||
|
const size_t size = num_pages * PageSize;
|
||||||
|
const auto end_page = virt_addr + size;
|
||||||
|
const auto last_page = end_page - PageSize;
|
||||||
|
MESOSPHERE_ABORT_UNLESS(impl.BeginTraversal(std::addressof(entry), std::addressof(context), last_page));
|
||||||
|
|
||||||
|
/* Merge. */
|
||||||
|
this->MergePages(std::addressof(context), page_list);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Result KPageTable::SeparatePagesImpl(TraversalEntry *entry, TraversalContext *context, KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll) {
|
||||||
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
|
auto &impl = this->GetImpl();
|
||||||
|
|
||||||
|
/* If at any point we fail, we want to merge. */
|
||||||
|
ON_RESULT_FAILURE { this->MergePages(context, page_list); };
|
||||||
|
|
||||||
|
/* Iterate, separating until our block size is small enough. */
|
||||||
|
while (entry->block_size > block_size) {
|
||||||
|
/* If necessary, allocate a table. */
|
||||||
|
KVirtualAddress table = Null<KVirtualAddress>;
|
||||||
|
if (!context->is_contiguous) {
|
||||||
|
table = this->AllocatePageTable(page_list, reuse_ll);
|
||||||
|
R_UNLESS(table != Null<KVirtualAddress>, svc::ResultOutOfResource());
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Separate. */
|
||||||
|
impl.SeparatePages(entry, context, virt_addr, nullptr);
|
||||||
this->NoteUpdated();
|
this->NoteUpdated();
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If we don't have an l1 table, we're done. */
|
|
||||||
MESOSPHERE_ABORT_UNLESS(l1_entry->IsTable() || l1_entry->IsEmpty());
|
|
||||||
R_SUCCEED_IF(!l1_entry->IsTable());
|
|
||||||
|
|
||||||
/* We want to separate L2 contiguous blocks into L2 blocks, so check that our size permits that. */
|
|
||||||
R_SUCCEED_IF(block_size >= L2ContiguousBlockSize);
|
|
||||||
|
|
||||||
L2PageTableEntry *l2_entry = impl.GetL2Entry(l1_entry, virt_addr);
|
|
||||||
if (l2_entry->IsBlock()) {
|
|
||||||
/* If we're contiguous, try to separate. */
|
|
||||||
if (l2_entry->IsContiguous()) {
|
|
||||||
const KProcessAddress block_virt_addr = util::AlignDown(GetInteger(virt_addr), L2ContiguousBlockSize);
|
|
||||||
const KPhysicalAddress block_phys_addr = util::AlignDown(GetInteger(l2_entry->GetBlock()), L2ContiguousBlockSize);
|
|
||||||
|
|
||||||
/* Mark the entries as non-contiguous. */
|
|
||||||
for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) {
|
|
||||||
L2PageTableEntry *target = impl.GetL2Entry(l1_entry, block_virt_addr + L2BlockSize * i);
|
|
||||||
const u64 entry_template = target->GetEntryTemplateForL2Block(i);
|
|
||||||
*target = L2PageTableEntry(PageTableEntry::BlockTag{}, block_phys_addr + L2BlockSize * i, PageTableEntry(entry_template), PageTableEntry::SoftwareReservedBit_None, false);
|
|
||||||
}
|
|
||||||
this->NoteUpdated();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* We want to separate L2 blocks into L3 contiguous blocks, so check that our size permits that. */
|
|
||||||
R_SUCCEED_IF(block_size >= L2BlockSize);
|
|
||||||
|
|
||||||
/* Get the addresses we're working with. */
|
|
||||||
const KProcessAddress block_virt_addr = util::AlignDown(GetInteger(virt_addr), L2BlockSize);
|
|
||||||
const KPhysicalAddress block_phys_addr = l2_entry->GetBlock();
|
|
||||||
|
|
||||||
/* Allocate a new page for the L3 table. */
|
|
||||||
const KVirtualAddress l3_table = this->AllocatePageTable(page_list, reuse_ll);
|
|
||||||
R_UNLESS(l3_table != Null<KVirtualAddress>, svc::ResultOutOfResource());
|
|
||||||
const KPhysicalAddress l3_phys = GetPageTablePhysicalAddress(l3_table);
|
|
||||||
|
|
||||||
/* Set the entries in the L3 table. */
|
|
||||||
for (size_t i = 0; i < L2BlockSize / L3BlockSize; i++) {
|
|
||||||
const u64 entry_template = l2_entry->GetEntryTemplateForL3Block(i);
|
|
||||||
*(impl.GetL3EntryFromTable(l3_table, block_virt_addr + L3BlockSize * i)) = L3PageTableEntry(PageTableEntry::BlockTag{}, block_phys_addr + L3BlockSize * i, PageTableEntry(entry_template), PageTableEntry::SoftwareReservedBit_None, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Open references to the L3 table. */
|
|
||||||
this->GetPageTableManager().Open(l3_table, L2BlockSize / L3BlockSize);
|
|
||||||
|
|
||||||
/* Replace the L2 entry with one to the new table. */
|
|
||||||
PteDataMemoryBarrier();
|
|
||||||
*l2_entry = L2PageTableEntry(PageTableEntry::TableTag{}, l3_phys, this->IsKernel(), true);
|
|
||||||
this->NoteUpdated();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* If we don't have an L3 table, we're done. */
|
|
||||||
MESOSPHERE_ABORT_UNLESS(l2_entry->IsTable() || l2_entry->IsEmpty());
|
|
||||||
R_SUCCEED_IF(!l2_entry->IsTable());
|
|
||||||
|
|
||||||
/* We want to separate L3 contiguous blocks into L2 blocks, so check that our size permits that. */
|
|
||||||
R_SUCCEED_IF(block_size >= L3ContiguousBlockSize);
|
|
||||||
|
|
||||||
/* If we're contiguous, try to separate. */
|
|
||||||
L3PageTableEntry *l3_entry = impl.GetL3Entry(l2_entry, virt_addr);
|
|
||||||
if (l3_entry->IsBlock() && l3_entry->IsContiguous()) {
|
|
||||||
const KProcessAddress block_virt_addr = util::AlignDown(GetInteger(virt_addr), L3ContiguousBlockSize);
|
|
||||||
const KPhysicalAddress block_phys_addr = util::AlignDown(GetInteger(l3_entry->GetBlock()), L3ContiguousBlockSize);
|
|
||||||
|
|
||||||
/* Mark the entries as non-contiguous. */
|
|
||||||
for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) {
|
|
||||||
L3PageTableEntry *target = impl.GetL3Entry(l2_entry, block_virt_addr + L3BlockSize * i);
|
|
||||||
const u64 entry_template = target->GetEntryTemplateForL3Block(i);
|
|
||||||
*target = L3PageTableEntry(PageTableEntry::BlockTag{}, block_phys_addr + L3BlockSize * i, PageTableEntry(entry_template), PageTableEntry::SoftwareReservedBit_None, false);
|
|
||||||
}
|
|
||||||
this->NoteUpdated();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* We're done! */
|
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KPageTable::SeparatePages(KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll) {
|
Result KPageTable::SeparatePages(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list, bool reuse_ll) {
|
||||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
/* If we fail while separating, re-merge. */
|
auto &impl = this->GetImpl();
|
||||||
ON_RESULT_FAILURE { this->MergePages(virt_addr, page_list); };
|
|
||||||
|
|
||||||
/* Try to separate pages. */
|
/* Begin traversal. */
|
||||||
R_RETURN(this->SeparatePagesImpl(virt_addr, block_size, page_list, reuse_ll));
|
TraversalContext start_context;
|
||||||
|
TraversalEntry entry;
|
||||||
|
MESOSPHERE_ABORT_UNLESS(impl.BeginTraversal(std::addressof(entry), std::addressof(start_context), virt_addr));
|
||||||
|
|
||||||
|
/* Separate pages at the start of the range. */
|
||||||
|
const size_t size = num_pages * PageSize;
|
||||||
|
R_TRY(this->SeparatePagesImpl(std::addressof(entry), std::addressof(start_context), virt_addr, std::min(util::GetAlignment(GetInteger(virt_addr)), size), page_list, reuse_ll));
|
||||||
|
|
||||||
|
/* If necessary, separate pages at the end of the range. */
|
||||||
|
if (num_pages > 1) {
|
||||||
|
const auto end_page = virt_addr + size;
|
||||||
|
const auto last_page = end_page - PageSize;
|
||||||
|
|
||||||
|
/* Begin traversal. */
|
||||||
|
TraversalContext end_context;
|
||||||
|
MESOSPHERE_ABORT_UNLESS(impl.BeginTraversal(std::addressof(entry), std::addressof(end_context), last_page));
|
||||||
|
|
||||||
|
|
||||||
|
ON_RESULT_FAILURE { this->MergePages(std::addressof(start_context), page_list); };
|
||||||
|
|
||||||
|
R_TRY(this->SeparatePagesImpl(std::addressof(entry), std::addressof(end_context), last_page, std::min(util::GetAlignment(GetInteger(end_page)), size), page_list, reuse_ll));
|
||||||
|
}
|
||||||
|
|
||||||
|
R_SUCCEED();
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KPageTable::ChangePermissions(KProcessAddress virt_addr, size_t num_pages, PageTableEntry entry_template, DisableMergeAttribute disable_merge_attr, bool refresh_mapping, bool flush_mapping, PageLinkedList *page_list, bool reuse_ll) {
|
Result KPageTable::ChangePermissions(KProcessAddress virt_addr, size_t num_pages, PageTableEntry entry_template, DisableMergeAttribute disable_merge_attr, bool refresh_mapping, bool flush_mapping, PageLinkedList *page_list, bool reuse_ll) {
|
||||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
/* Separate pages before we change permissions. */
|
/* Separate pages before we change permissions. */
|
||||||
const size_t size = num_pages * PageSize;
|
R_TRY(this->SeparatePages(virt_addr, num_pages, page_list, reuse_ll));
|
||||||
R_TRY(this->SeparatePages(virt_addr, std::min(util::GetAlignment(GetInteger(virt_addr)), size), page_list, reuse_ll));
|
|
||||||
if (num_pages > 1) {
|
|
||||||
const auto end_page = virt_addr + size;
|
|
||||||
const auto last_page = end_page - PageSize;
|
|
||||||
|
|
||||||
ON_RESULT_FAILURE { this->MergePages(virt_addr, page_list); };
|
|
||||||
|
|
||||||
R_TRY(this->SeparatePages(last_page, std::min(util::GetAlignment(GetInteger(end_page)), size), page_list, reuse_ll));
|
|
||||||
}
|
|
||||||
|
|
||||||
/* ===================================================== */
|
/* ===================================================== */
|
||||||
|
|
||||||
|
@ -1376,10 +1290,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* We've succeeded, now perform what coalescing we can. */
|
/* We've succeeded, now perform what coalescing we can. */
|
||||||
this->MergePages(virt_addr, page_list);
|
this->MergePages(virt_addr, num_pages, page_list);
|
||||||
if (num_pages > 1) {
|
|
||||||
this->MergePages(virt_addr + (num_pages - 1) * PageSize, page_list);
|
|
||||||
}
|
|
||||||
|
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,94 +33,6 @@ namespace ams::kern::arch::arm64 {
|
||||||
return m_table;
|
return m_table;
|
||||||
}
|
}
|
||||||
|
|
||||||
// bool KPageTableImpl::ExtractL3Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L3PageTableEntry *l3_entry, KProcessAddress virt_addr) const {
|
|
||||||
// /* Set the L3 entry. */
|
|
||||||
// out_context->l3_entry = l3_entry;
|
|
||||||
//
|
|
||||||
// if (l3_entry->IsBlock()) {
|
|
||||||
// /* Set the output entry. */
|
|
||||||
// out_entry->phys_addr = l3_entry->GetBlock() + (virt_addr & (L3BlockSize - 1));
|
|
||||||
// if (l3_entry->IsContiguous()) {
|
|
||||||
// out_entry->block_size = L3ContiguousBlockSize;
|
|
||||||
// } else {
|
|
||||||
// out_entry->block_size = L3BlockSize;
|
|
||||||
// }
|
|
||||||
// out_entry->sw_reserved_bits = l3_entry->GetSoftwareReservedBits();
|
|
||||||
// out_entry->attr = 0;
|
|
||||||
//
|
|
||||||
// return true;
|
|
||||||
// } else {
|
|
||||||
// out_entry->phys_addr = Null<KPhysicalAddress>;
|
|
||||||
// out_entry->block_size = L3BlockSize;
|
|
||||||
// out_entry->sw_reserved_bits = 0;
|
|
||||||
// out_entry->attr = 0;
|
|
||||||
// return false;
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// bool KPageTableImpl::ExtractL2Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L2PageTableEntry *l2_entry, KProcessAddress virt_addr) const {
|
|
||||||
// /* Set the L2 entry. */
|
|
||||||
// out_context->l2_entry = l2_entry;
|
|
||||||
//
|
|
||||||
// if (l2_entry->IsBlock()) {
|
|
||||||
// /* Set the output entry. */
|
|
||||||
// out_entry->phys_addr = l2_entry->GetBlock() + (virt_addr & (L2BlockSize - 1));
|
|
||||||
// if (l2_entry->IsContiguous()) {
|
|
||||||
// out_entry->block_size = L2ContiguousBlockSize;
|
|
||||||
// } else {
|
|
||||||
// out_entry->block_size = L2BlockSize;
|
|
||||||
// }
|
|
||||||
// out_entry->sw_reserved_bits = l2_entry->GetSoftwareReservedBits();
|
|
||||||
// out_entry->attr = 0;
|
|
||||||
//
|
|
||||||
// /* Set the output context. */
|
|
||||||
// out_context->l3_entry = nullptr;
|
|
||||||
// return true;
|
|
||||||
// } else if (l2_entry->IsTable()) {
|
|
||||||
// return this->ExtractL3Entry(out_entry, out_context, this->GetL3EntryFromTable(GetPageTableVirtualAddress(l2_entry->GetTable()), virt_addr), virt_addr);
|
|
||||||
// } else {
|
|
||||||
// out_entry->phys_addr = Null<KPhysicalAddress>;
|
|
||||||
// out_entry->block_size = L2BlockSize;
|
|
||||||
// out_entry->sw_reserved_bits = 0;
|
|
||||||
// out_entry->attr = 0;
|
|
||||||
//
|
|
||||||
// out_context->l3_entry = nullptr;
|
|
||||||
// return false;
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// bool KPageTableImpl::ExtractL1Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L1PageTableEntry *l1_entry, KProcessAddress virt_addr) const {
|
|
||||||
// /* Set the L1 entry. */
|
|
||||||
// out_context->level_entries[EntryLevel_L1] = l1_entry;
|
|
||||||
//
|
|
||||||
// if (l1_entry->IsBlock()) {
|
|
||||||
// /* Set the output entry. */
|
|
||||||
// out_entry->phys_addr = l1_entry->GetBlock() + (virt_addr & (L1BlockSize - 1));
|
|
||||||
// if (l1_entry->IsContiguous()) {
|
|
||||||
// out_entry->block_size = L1ContiguousBlockSize;
|
|
||||||
// } else {
|
|
||||||
// out_entry->block_size = L1BlockSize;
|
|
||||||
// }
|
|
||||||
// out_entry->sw_reserved_bits = l1_entry->GetSoftwareReservedBits();
|
|
||||||
//
|
|
||||||
// /* Set the output context. */
|
|
||||||
// out_context->l2_entry = nullptr;
|
|
||||||
// out_context->l3_entry = nullptr;
|
|
||||||
// return true;
|
|
||||||
// } else if (l1_entry->IsTable()) {
|
|
||||||
// return this->ExtractL2Entry(out_entry, out_context, this->GetL2EntryFromTable(GetPageTableVirtualAddress(l1_entry->GetTable()), virt_addr), virt_addr);
|
|
||||||
// } else {
|
|
||||||
// out_entry->phys_addr = Null<KPhysicalAddress>;
|
|
||||||
// out_entry->block_size = L1BlockSize;
|
|
||||||
// out_entry->sw_reserved_bits = 0;
|
|
||||||
// out_entry->attr = 0;
|
|
||||||
//
|
|
||||||
// out_context->l2_entry = nullptr;
|
|
||||||
// out_context->l3_entry = nullptr;
|
|
||||||
// return false;
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
bool KPageTableImpl::BeginTraversal(TraversalEntry *out_entry, TraversalContext *out_context, KProcessAddress address) const {
|
bool KPageTableImpl::BeginTraversal(TraversalEntry *out_entry, TraversalContext *out_context, KProcessAddress address) const {
|
||||||
/* Setup invalid defaults. */
|
/* Setup invalid defaults. */
|
||||||
*out_entry = {};
|
*out_entry = {};
|
||||||
|
@ -176,9 +88,8 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
bool KPageTableImpl::ContinueTraversal(TraversalEntry *out_entry, TraversalContext *context) const {
|
bool KPageTableImpl::ContinueTraversal(TraversalEntry *out_entry, TraversalContext *context) const {
|
||||||
/* Advance entry. */
|
/* Advance entry. */
|
||||||
|
|
||||||
auto *cur_pte = context->level_entries[context->level];
|
auto *cur_pte = context->level_entries[context->level];
|
||||||
auto *next_pte = reinterpret_cast<PageTableEntry *>(context->is_contiguous ? util::AlignDown(reinterpret_cast<uintptr_t>(cur_pte), 0x10 * sizeof(PageTableEntry)) + 0x10 * sizeof(PageTableEntry) : reinterpret_cast<uintptr_t>(cur_pte) + sizeof(PageTableEntry));
|
auto *next_pte = reinterpret_cast<PageTableEntry *>(context->is_contiguous ? util::AlignDown(reinterpret_cast<uintptr_t>(cur_pte), BlocksPerContiguousBlock * sizeof(PageTableEntry)) + BlocksPerContiguousBlock * sizeof(PageTableEntry) : reinterpret_cast<uintptr_t>(cur_pte) + sizeof(PageTableEntry));
|
||||||
|
|
||||||
/* Set the pte. */
|
/* Set the pte. */
|
||||||
context->level_entries[context->level] = next_pte;
|
context->level_entries[context->level] = next_pte;
|
||||||
|
@ -255,6 +166,53 @@ namespace ams::kern::arch::arm64 {
|
||||||
return is_block;
|
return is_block;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool KPageTableImpl::MergePages(KVirtualAddress *out, TraversalContext *context) {
|
||||||
|
/* TODO */
|
||||||
|
MESOSPHERE_UNUSED(out, context);
|
||||||
|
MESOSPHERE_PANIC("page tables");
|
||||||
|
}
|
||||||
|
|
||||||
|
void KPageTableImpl::SeparatePages(TraversalEntry *entry, TraversalContext *context, KProcessAddress address, PageTableEntry *pte) const {
|
||||||
|
/* We want to downgrade the pages by one step. */
|
||||||
|
if (context->is_contiguous) {
|
||||||
|
/* We want to downgrade a contiguous mapping to a non-contiguous mapping. */
|
||||||
|
pte = reinterpret_cast<PageTableEntry *>(util::AlignDown(reinterpret_cast<uintptr_t>(context->level_entries[context->level]), BlocksPerContiguousBlock * sizeof(PageTableEntry)));
|
||||||
|
|
||||||
|
auto * const first = pte;
|
||||||
|
const KPhysicalAddress block = this->GetBlock(first, context->level);
|
||||||
|
for (size_t i = 0; i < BlocksPerContiguousBlock; ++i) {
|
||||||
|
pte[i] = PageTableEntry(PageTableEntry::BlockTag{}, block + (i << (PageBits + LevelBits * context->level)), PageTableEntry(first->GetEntryTemplateForSeparateContiguous(i)), PageTableEntry::SeparateContiguousTag{});
|
||||||
|
}
|
||||||
|
|
||||||
|
context->is_contiguous = false;
|
||||||
|
|
||||||
|
context->level_entries[context->level] = pte + (this->GetLevelIndex(address, context->level) & (BlocksPerContiguousBlock - 1));
|
||||||
|
} else {
|
||||||
|
/* We want to downgrade a block into a table. */
|
||||||
|
auto * const first = context->level_entries[context->level];
|
||||||
|
const KPhysicalAddress block = this->GetBlock(first, context->level);
|
||||||
|
for (size_t i = 0; i < BlocksPerTable; ++i) {
|
||||||
|
pte[i] = PageTableEntry(PageTableEntry::BlockTag{}, block + (i << (PageBits + LevelBits * (context->level - 1))), PageTableEntry(first->GetEntryTemplateForSeparate(i)), PageTableEntry::SoftwareReservedBit_None, true, context->level == EntryLevel_L3);
|
||||||
|
}
|
||||||
|
|
||||||
|
context->is_contiguous = true;
|
||||||
|
context->level = static_cast<EntryLevel>(util::ToUnderlying(context->level) - 1);
|
||||||
|
|
||||||
|
/* Wait for pending stores to complete. */
|
||||||
|
cpu::DataSynchronizationBarrierInnerShareableStore();
|
||||||
|
|
||||||
|
/* Update the block entry to be a table entry. */
|
||||||
|
*context->level_entries[context->level + 1] = PageTableEntry(PageTableEntry::TableTag{}, KPageTable::GetPageTablePhysicalAddress(KVirtualAddress(first)), m_is_kernel, true, BlocksPerTable);
|
||||||
|
|
||||||
|
context->level_entries[context->level] = pte + this->GetLevelIndex(address, context->level);
|
||||||
|
}
|
||||||
|
|
||||||
|
entry->sw_reserved_bits = 0;
|
||||||
|
entry->attr = 0;
|
||||||
|
entry->phys_addr = this->GetBlock(context->level_entries[context->level], context->level) + this->GetOffset(address, context->level);
|
||||||
|
entry->block_size = static_cast<size_t>(1) << (PageBits + LevelBits * context->level + 4 * context->is_contiguous);
|
||||||
|
}
|
||||||
|
|
||||||
void KPageTableImpl::Dump(uintptr_t start, size_t size) const {
|
void KPageTableImpl::Dump(uintptr_t start, size_t size) const {
|
||||||
/* If zero size, there's nothing to dump. */
|
/* If zero size, there's nothing to dump. */
|
||||||
if (size == 0) {
|
if (size == 0) {
|
||||||
|
|
|
@ -1438,7 +1438,10 @@ namespace ams::kern {
|
||||||
this->SetState(ThreadState_Waiting);
|
this->SetState(ThreadState_Waiting);
|
||||||
|
|
||||||
/* Set our wait queue. */
|
/* Set our wait queue. */
|
||||||
|
#pragma GCC diagnostic push
|
||||||
|
#pragma GCC diagnostic ignored "-Wdangling-pointer"
|
||||||
m_wait_queue = queue;
|
m_wait_queue = queue;
|
||||||
|
#pragma GCC diagnostic pop
|
||||||
}
|
}
|
||||||
|
|
||||||
void KThread::NotifyAvailable(KSynchronizationObject *signaled_object, Result wait_result) {
|
void KThread::NotifyAvailable(KSynchronizationObject *signaled_object, Result wait_result) {
|
||||||
|
|
Loading…
Reference in a new issue