kern: KPageTable: remove MapFirst operation, replace with MapFirstGroup

This commit is contained in:
Michael Scire 2023-10-11 09:32:23 -07:00 committed by SciresM
parent 7b523cfc8d
commit 2a4d68f916
5 changed files with 53 additions and 37 deletions

View file

@ -208,8 +208,8 @@ namespace ams::kern::arch::arm64 {
} }
} }
Result MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, bool not_first, PageLinkedList *page_list, bool reuse_ll); Result MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll);
Result MapGroup(KProcessAddress virt_addr, const KPageGroup &pg, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll); Result MapGroup(KProcessAddress virt_addr, const KPageGroup &pg, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, bool not_first, PageLinkedList *page_list, bool reuse_ll);
bool MergePages(KProcessAddress virt_addr, PageLinkedList *page_list); bool MergePages(KProcessAddress virt_addr, PageLinkedList *page_list);

View file

@ -158,8 +158,16 @@ namespace ams::kern {
private: private:
const KPageGroup *m_pg; const KPageGroup *m_pg;
public: public:
explicit ALWAYS_INLINE KScopedPageGroup(const KPageGroup *gp) : m_pg(gp) { if (m_pg) { m_pg->Open(); } } explicit ALWAYS_INLINE KScopedPageGroup(const KPageGroup *gp, bool not_first = true) : m_pg(gp) {
explicit ALWAYS_INLINE KScopedPageGroup(const KPageGroup &gp) : KScopedPageGroup(std::addressof(gp)) { /* ... */ } if (m_pg) {
if (not_first) {
m_pg->Open();
} else {
m_pg->OpenFirst();
}
}
}
explicit ALWAYS_INLINE KScopedPageGroup(const KPageGroup &gp, bool not_first = true) : KScopedPageGroup(std::addressof(gp), not_first) { /* ... */ }
ALWAYS_INLINE ~KScopedPageGroup() { if (m_pg) { m_pg->Close(); } } ALWAYS_INLINE ~KScopedPageGroup() { if (m_pg) { m_pg->Close(); } }
ALWAYS_INLINE void CancelClose() { ALWAYS_INLINE void CancelClose() {

View file

@ -88,8 +88,8 @@ namespace ams::kern {
enum OperationType { enum OperationType {
OperationType_Map = 0, OperationType_Map = 0,
OperationType_MapFirst = 1,
OperationType_MapGroup = 2, OperationType_MapGroup = 2,
OperationType_MapFirstGroup = 1,
OperationType_Unmap = 3, OperationType_Unmap = 3,
OperationType_ChangePermissions = 4, OperationType_ChangePermissions = 4,
OperationType_ChangePermissionsAndRefresh = 5, OperationType_ChangePermissionsAndRefresh = 5,

View file

@ -351,7 +351,7 @@ namespace ams::kern::arch::arm64 {
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(virt_addr), PageSize)); MESOSPHERE_ASSERT(util::IsAligned(GetInteger(virt_addr), PageSize));
MESOSPHERE_ASSERT(this->ContainsPages(virt_addr, num_pages)); MESOSPHERE_ASSERT(this->ContainsPages(virt_addr, num_pages));
if (operation == OperationType_Map || operation == OperationType_MapFirst) { if (operation == OperationType_Map) {
MESOSPHERE_ABORT_UNLESS(is_pa_valid); MESOSPHERE_ABORT_UNLESS(is_pa_valid);
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), PageSize)); MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), PageSize));
} else { } else {
@ -378,8 +378,7 @@ namespace ams::kern::arch::arm64 {
switch (operation) { switch (operation) {
case OperationType_Map: case OperationType_Map:
case OperationType_MapFirst: R_RETURN(this->MapContiguous(virt_addr, phys_addr, num_pages, entry_template, properties.disable_merge_attributes == DisableMergeAttribute_DisableHead, page_list, reuse_ll));
R_RETURN(this->MapContiguous(virt_addr, phys_addr, num_pages, entry_template, properties.disable_merge_attributes == DisableMergeAttribute_DisableHead, operation != OperationType_MapFirst, page_list, reuse_ll));
case OperationType_ChangePermissions: case OperationType_ChangePermissions:
R_RETURN(this->ChangePermissions(virt_addr, num_pages, entry_template, properties.disable_merge_attributes, false, false, page_list, reuse_ll)); R_RETURN(this->ChangePermissions(virt_addr, num_pages, entry_template, properties.disable_merge_attributes, false, false, page_list, reuse_ll));
case OperationType_ChangePermissionsAndRefresh: case OperationType_ChangePermissionsAndRefresh:
@ -402,7 +401,8 @@ namespace ams::kern::arch::arm64 {
auto entry_template = this->GetEntryTemplate(properties); auto entry_template = this->GetEntryTemplate(properties);
switch (operation) { switch (operation) {
case OperationType_MapGroup: case OperationType_MapGroup:
R_RETURN(this->MapGroup(virt_addr, page_group, num_pages, entry_template, properties.disable_merge_attributes == DisableMergeAttribute_DisableHead, page_list, reuse_ll)); case OperationType_MapFirstGroup:
R_RETURN(this->MapGroup(virt_addr, page_group, num_pages, entry_template, properties.disable_merge_attributes == DisableMergeAttribute_DisableHead, operation != OperationType_MapFirstGroup, page_list, reuse_ll));
MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
} }
} }
@ -759,7 +759,7 @@ namespace ams::kern::arch::arm64 {
R_SUCCEED(); R_SUCCEED();
} }
Result KPageTable::MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, bool not_first, PageLinkedList *page_list, bool reuse_ll) { Result KPageTable::MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
/* Cache initial addresses for use on cleanup. */ /* Cache initial addresses for use on cleanup. */
@ -830,21 +830,17 @@ namespace ams::kern::arch::arm64 {
/* Open references to the pages, if we should. */ /* Open references to the pages, if we should. */
if (IsHeapPhysicalAddress(orig_phys_addr)) { if (IsHeapPhysicalAddress(orig_phys_addr)) {
if (not_first) { Kernel::GetMemoryManager().Open(orig_phys_addr, num_pages);
Kernel::GetMemoryManager().Open(orig_phys_addr, num_pages);
} else {
Kernel::GetMemoryManager().OpenFirst(orig_phys_addr, num_pages);
}
} }
R_SUCCEED(); R_SUCCEED();
} }
Result KPageTable::MapGroup(KProcessAddress virt_addr, const KPageGroup &pg, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll) { Result KPageTable::MapGroup(KProcessAddress virt_addr, const KPageGroup &pg, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, bool not_first, PageLinkedList *page_list, bool reuse_ll) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
/* We want to maintain a new reference to every page in the group. */ /* We want to maintain a new reference to every page in the group. */
KScopedPageGroup spg(pg); KScopedPageGroup spg(pg, not_first);
/* Cache initial address for use on cleanup. */ /* Cache initial address for use on cleanup. */
const KProcessAddress orig_virt_addr = virt_addr; const KProcessAddress orig_virt_addr = virt_addr;

View file

@ -4435,32 +4435,44 @@ namespace ams::kern {
/* If it's unmapped, we need to map it. */ /* If it's unmapped, we need to map it. */
if (info.GetState() == KMemoryState_Free) { if (info.GetState() == KMemoryState_Free) {
/* Determine the range to map. */ /* Determine the range to map. */
const KPageProperties map_properties = { KMemoryPermission_UserReadWrite, false, false, DisableMergeAttribute_None }; const KPageProperties map_properties = { KMemoryPermission_UserReadWrite, false, false, cur_address == this->GetAliasRegionStart() ? DisableMergeAttribute_DisableHead : DisableMergeAttribute_None };
size_t map_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address, last_address + 1 - cur_address) / PageSize; size_t map_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address, last_address + 1 - cur_address) / PageSize;
/* While we have pages to map, map them. */ /* While we have pages to map, map them. */
while (map_pages > 0) { {
/* Check if we're at the end of the physical block. */ /* Create a page group for the current mapping range. */
if (pg_pages == 0) { KPageGroup cur_pg(m_block_info_manager);
/* Ensure there are more pages to map. */ {
MESOSPHERE_ASSERT(pg_it != pg.end()); ON_RESULT_FAILURE {
cur_pg.OpenFirst();
cur_pg.Close();
};
/* Advance our physical block. */ size_t remain_pages = map_pages;
++pg_it; while (remain_pages > 0) {
pg_phys_addr = pg_it->GetAddress(); /* Check if we're at the end of the physical block. */
pg_pages = pg_it->GetNumPages(); if (pg_pages == 0) {
/* Ensure there are more pages to map. */
MESOSPHERE_ASSERT(pg_it != pg.end());
/* Advance our physical block. */
++pg_it;
pg_phys_addr = pg_it->GetAddress();
pg_pages = pg_it->GetNumPages();
}
/* Add whatever we can to the current block. */
const size_t cur_pages = std::min(pg_pages, remain_pages);
R_TRY(cur_pg.AddBlock(pg_phys_addr + ((pg_pages - cur_pages) * PageSize), cur_pages));
/* Advance. */
remain_pages -= cur_pages;
pg_pages -= cur_pages;
}
} }
/* Map whatever we can. */ /* Map the papges. */
const size_t cur_pages = std::min(pg_pages, map_pages); R_TRY(this->Operate(updater.GetPageList(), cur_address, map_pages, cur_pg, map_properties, OperationType_MapFirstGroup, false));
R_TRY(this->Operate(updater.GetPageList(), cur_address, cur_pages, pg_phys_addr, true, map_properties, OperationType_MapFirst, false));
/* Advance. */
cur_address += cur_pages * PageSize;
map_pages -= cur_pages;
pg_phys_addr += cur_pages * PageSize;
pg_pages -= cur_pages;
} }
} }