kern: allocate all TTBR0 pages during init, use procidx as asid

This commit is contained in:
Michael Scire 2024-10-09 14:04:15 -07:00
parent c6b2692168
commit c8e73003f3
9 changed files with 72 additions and 110 deletions

View file

@ -93,9 +93,13 @@ namespace ams::kern::arch::arm64 {
MESOSPHERE_ASSERT(alignment < L1BlockSize); MESOSPHERE_ASSERT(alignment < L1BlockSize);
return KPageTable::GetBlockSize(static_cast<KPageTable::BlockType>(KPageTable::GetBlockType(alignment) + 1)); return KPageTable::GetBlockSize(static_cast<KPageTable::BlockType>(KPageTable::GetBlockType(alignment) + 1));
} }
public:
/* TODO: How should this size be determined. Does the KProcess slab count need to go in a header as a define? */
static constexpr size_t NumTtbr0Entries = 81;
private:
static constinit inline const volatile u64 s_ttbr0_entries[NumTtbr0Entries] = {};
private: private:
KPageTableManager *m_manager; KPageTableManager *m_manager;
u64 m_ttbr;
u8 m_asid; u8 m_asid;
protected: protected:
Result OperateImpl(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, OperationType operation, bool reuse_ll); Result OperateImpl(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, OperationType operation, bool reuse_ll);
@ -168,17 +172,28 @@ namespace ams::kern::arch::arm64 {
return entry; return entry;
} }
public: public:
constexpr explicit KPageTable(util::ConstantInitializeTag) : KPageTableBase(util::ConstantInitialize), m_manager(), m_ttbr(), m_asid() { /* ... */ } constexpr explicit KPageTable(util::ConstantInitializeTag) : KPageTableBase(util::ConstantInitialize), m_manager(), m_asid() { /* ... */ }
explicit KPageTable() { /* ... */ } explicit KPageTable() { /* ... */ }
static NOINLINE void Initialize(s32 core_id); static NOINLINE void Initialize(s32 core_id);
ALWAYS_INLINE void Activate(u32 proc_id) { static const volatile u64 &GetTtbr0Entry(size_t index) { return s_ttbr0_entries[index]; }
cpu::SwitchProcess(m_ttbr, proc_id);
static ALWAYS_INLINE u64 GetKernelTtbr0() {
return s_ttbr0_entries[0];
}
static ALWAYS_INLINE void ActivateKernel() {
/* Activate, using asid 0 and process id = 0xFFFFFFFF */
cpu::SwitchProcess(GetKernelTtbr0(), 0xFFFFFFFF);
}
static ALWAYS_INLINE void ActivateProcess(size_t proc_idx, u32 proc_id) {
cpu::SwitchProcess(s_ttbr0_entries[proc_idx + 1], proc_id);
} }
NOINLINE Result InitializeForKernel(void *table, KVirtualAddress start, KVirtualAddress end); NOINLINE Result InitializeForKernel(void *table, KVirtualAddress start, KVirtualAddress end);
NOINLINE Result InitializeForProcess(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit); NOINLINE Result InitializeForProcess(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit, size_t process_index);
Result Finalize(); Result Finalize();
private: private:
Result MapL1Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll); Result MapL1Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll);

View file

@ -23,13 +23,13 @@ namespace ams::kern::arch::arm64 {
private: private:
KPageTable m_page_table; KPageTable m_page_table;
public: public:
void Activate(u64 id) { void Activate(size_t process_index, u64 id) {
/* Activate the page table with the specified contextidr. */ /* Activate the page table with the specified contextidr. */
m_page_table.Activate(id); m_page_table.ActivateProcess(process_index, id);
} }
Result Initialize(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit) { Result Initialize(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit, size_t process_index) {
R_RETURN(m_page_table.InitializeForProcess(flags, from_back, pool, code_address, code_size, system_resource, resource_limit)); R_RETURN(m_page_table.InitializeForProcess(flags, from_back, pool, code_address, code_size, system_resource, resource_limit, process_index));
} }
void Finalize() { m_page_table.Finalize(); } void Finalize() { m_page_table.Finalize(); }

View file

@ -29,8 +29,7 @@ namespace ams::kern::arch::arm64 {
NOINLINE void Initialize(s32 core_id); NOINLINE void Initialize(s32 core_id);
void Activate() { void Activate() {
/* Activate, using process id = 0xFFFFFFFF */ m_page_table.ActivateKernel();
m_page_table.Activate(0xFFFFFFFF);
} }
void ActivateForInit() { void ActivateForInit() {

View file

@ -374,7 +374,7 @@ namespace ams::kern {
/* Update the current page table. */ /* Update the current page table. */
if (next_process) { if (next_process) {
next_process->GetPageTable().Activate(next_process->GetProcessId()); next_process->GetPageTable().Activate(next_process->GetSlabIndex(), next_process->GetProcessId());
} else { } else {
Kernel::GetKernelPageTable().Activate(); Kernel::GetKernelPageTable().Activate();
} }

View file

@ -85,77 +85,6 @@ namespace ams::kern::arch::arm64 {
return (static_cast<u64>(asid) << 48) | (static_cast<u64>(GetInteger(table))); return (static_cast<u64>(asid) << 48) | (static_cast<u64>(GetInteger(table)));
} }
class KPageTableAsidManager {
private:
using WordType = u32;
static constexpr u8 ReservedAsids[] = { 0 };
static constexpr size_t NumReservedAsids = util::size(ReservedAsids);
static constexpr size_t BitsPerWord = BITSIZEOF(WordType);
static constexpr size_t AsidCount = 0x100;
static constexpr size_t NumWords = AsidCount / BitsPerWord;
static constexpr WordType FullWord = ~WordType(0u);
private:
WordType m_state[NumWords];
KLightLock m_lock;
u8 m_hint;
private:
constexpr bool TestImpl(u8 asid) const {
return m_state[asid / BitsPerWord] & (1u << (asid % BitsPerWord));
}
constexpr void ReserveImpl(u8 asid) {
MESOSPHERE_ASSERT(!this->TestImpl(asid));
m_state[asid / BitsPerWord] |= (1u << (asid % BitsPerWord));
}
constexpr void ReleaseImpl(u8 asid) {
MESOSPHERE_ASSERT(this->TestImpl(asid));
m_state[asid / BitsPerWord] &= ~(1u << (asid % BitsPerWord));
}
constexpr u8 FindAvailable() const {
for (size_t i = 0; i < util::size(m_state); i++) {
if (m_state[i] == FullWord) {
continue;
}
const WordType clear_bit = (m_state[i] + 1) ^ (m_state[i]);
return BitsPerWord * i + BitsPerWord - 1 - ClearLeadingZero(clear_bit);
}
if (m_state[util::size(m_state)-1] == FullWord) {
MESOSPHERE_PANIC("Unable to reserve ASID");
}
__builtin_unreachable();
}
static constexpr ALWAYS_INLINE WordType ClearLeadingZero(WordType value) {
return __builtin_clzll(value) - (BITSIZEOF(unsigned long long) - BITSIZEOF(WordType));
}
public:
constexpr KPageTableAsidManager() : m_state(), m_lock(), m_hint() {
for (size_t i = 0; i < NumReservedAsids; i++) {
this->ReserveImpl(ReservedAsids[i]);
}
}
u8 Reserve() {
KScopedLightLock lk(m_lock);
if (this->TestImpl(m_hint)) {
m_hint = this->FindAvailable();
}
this->ReserveImpl(m_hint);
return m_hint++;
}
void Release(u8 asid) {
KScopedLightLock lk(m_lock);
this->ReleaseImpl(asid);
}
};
KPageTableAsidManager g_asid_manager;
} }
ALWAYS_INLINE void KPageTable::NoteUpdated() const { ALWAYS_INLINE void KPageTable::NoteUpdated() const {
@ -184,6 +113,7 @@ namespace ams::kern::arch::arm64 {
this->OnKernelTableSinglePageUpdated(virt_addr); this->OnKernelTableSinglePageUpdated(virt_addr);
} }
void KPageTable::Initialize(s32 core_id) { void KPageTable::Initialize(s32 core_id) {
/* Nothing actually needed here. */ /* Nothing actually needed here. */
MESOSPHERE_UNUSED(core_id); MESOSPHERE_UNUSED(core_id);
@ -194,38 +124,29 @@ namespace ams::kern::arch::arm64 {
m_asid = 0; m_asid = 0;
m_manager = Kernel::GetSystemSystemResource().GetPageTableManagerPointer(); m_manager = Kernel::GetSystemSystemResource().GetPageTableManagerPointer();
/* Allocate a page for ttbr. */
/* NOTE: It is a postcondition of page table manager allocation that the page is all-zero. */
const u64 asid_tag = (static_cast<u64>(m_asid) << 48ul);
const KVirtualAddress page = m_manager->Allocate();
MESOSPHERE_ASSERT(page != Null<KVirtualAddress>);
m_ttbr = GetInteger(KPageTableBase::GetLinearMappedPhysicalAddress(page)) | asid_tag;
/* Initialize the base page table. */ /* Initialize the base page table. */
MESOSPHERE_R_ABORT_UNLESS(KPageTableBase::InitializeForKernel(true, table, start, end)); MESOSPHERE_R_ABORT_UNLESS(KPageTableBase::InitializeForKernel(true, table, start, end));
R_SUCCEED(); R_SUCCEED();
} }
Result KPageTable::InitializeForProcess(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit) { Result KPageTable::InitializeForProcess(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit, size_t process_index) {
/* Get an ASID */ /* Determine our ASID */
m_asid = g_asid_manager.Reserve(); m_asid = process_index + 1;
ON_RESULT_FAILURE { g_asid_manager.Release(m_asid); }; MESOSPHERE_ABORT_UNLESS(0 < m_asid && m_asid < util::size(s_ttbr0_entries));
/* Set our manager. */ /* Set our manager. */
m_manager = system_resource->GetPageTableManagerPointer(); m_manager = system_resource->GetPageTableManagerPointer();
/* Allocate a new table, and set our ttbr value. */ /* Get the virtual address of our L1 table. */
const KVirtualAddress new_table = m_manager->Allocate(); const KPhysicalAddress ttbr0_phys = KPhysicalAddress(s_ttbr0_entries[m_asid] & UINT64_C(0xFFFFFFFFFFFE));
R_UNLESS(new_table != Null<KVirtualAddress>, svc::ResultOutOfResource()); const KVirtualAddress ttbr0_virt = KMemoryLayout::GetLinearVirtualAddress(ttbr0_phys);
m_ttbr = EncodeTtbr(GetPageTablePhysicalAddress(new_table), m_asid);
ON_RESULT_FAILURE_2 { m_manager->Free(new_table); };
/* Initialize our base table. */ /* Initialize our base table. */
const size_t as_width = GetAddressSpaceWidth(flags); const size_t as_width = GetAddressSpaceWidth(flags);
const KProcessAddress as_start = 0; const KProcessAddress as_start = 0;
const KProcessAddress as_end = (1ul << as_width); const KProcessAddress as_end = (1ul << as_width);
R_TRY(KPageTableBase::InitializeForProcess(flags, from_back, pool, GetVoidPointer(new_table), as_start, as_end, code_address, code_size, system_resource, resource_limit)); R_TRY(KPageTableBase::InitializeForProcess(flags, from_back, pool, GetVoidPointer(ttbr0_virt), as_start, as_end, code_address, code_size, system_resource, resource_limit));
/* Note that we've updated the table (since we created it). */ /* Note that we've updated the table (since we created it). */
this->NoteUpdated(); this->NoteUpdated();
@ -329,20 +250,16 @@ namespace ams::kern::arch::arm64 {
} }
} }
/* Free the L1 table. */ /* Clear the L1 table. */
{ {
const KVirtualAddress l1_table = reinterpret_cast<uintptr_t>(impl.Finalize()); const KVirtualAddress l1_table = reinterpret_cast<uintptr_t>(impl.Finalize());
ClearPageTable(l1_table); ClearPageTable(l1_table);
this->GetPageTableManager().Free(l1_table);
} }
/* Perform inherited finalization. */ /* Perform inherited finalization. */
KPageTableBase::Finalize(); KPageTableBase::Finalize();
} }
/* Release our asid. */
g_asid_manager.Release(m_asid);
R_SUCCEED(); R_SUCCEED();
} }

View file

@ -31,7 +31,6 @@ namespace ams::kern::board::nintendo::nx {
/* Struct representing registers saved on wake/sleep. */ /* Struct representing registers saved on wake/sleep. */
class SavedSystemRegisters { class SavedSystemRegisters {
private: private:
u64 ttbr0_el1;
u64 elr_el1; u64 elr_el1;
u64 sp_el0; u64 sp_el0;
u64 spsr_el1; u64 spsr_el1;
@ -90,7 +89,6 @@ namespace ams::kern::board::nintendo::nx {
void SavedSystemRegisters::Save() { void SavedSystemRegisters::Save() {
/* Save system registers. */ /* Save system registers. */
this->ttbr0_el1 = cpu::GetTtbr0El1();
this->tpidr_el0 = cpu::GetTpidrEl0(); this->tpidr_el0 = cpu::GetTpidrEl0();
this->elr_el1 = cpu::GetElrEl1(); this->elr_el1 = cpu::GetElrEl1();
this->sp_el0 = cpu::GetSpEl0(); this->sp_el0 = cpu::GetSpEl0();
@ -405,7 +403,7 @@ namespace ams::kern::board::nintendo::nx {
cpu::EnsureInstructionConsistency(); cpu::EnsureInstructionConsistency();
/* Restore system registers. */ /* Restore system registers. */
cpu::SetTtbr0El1 (this->ttbr0_el1); cpu::SetTtbr0El1 (KPageTable::GetKernelTtbr0());
cpu::SetTpidrEl0 (this->tpidr_el0); cpu::SetTpidrEl0 (this->tpidr_el0);
cpu::SetElrEl1 (this->elr_el1); cpu::SetElrEl1 (this->elr_el1);
cpu::SetSpEl0 (this->sp_el0); cpu::SetSpEl0 (this->sp_el0);

View file

@ -299,7 +299,7 @@ namespace ams::kern {
/* Setup page table. */ /* Setup page table. */
{ {
const bool from_back = (params.flags & ams::svc::CreateProcessFlag_EnableAslr) == 0; const bool from_back = (params.flags & ams::svc::CreateProcessFlag_EnableAslr) == 0;
R_TRY(m_page_table.Initialize(static_cast<ams::svc::CreateProcessFlag>(params.flags), from_back, pool, params.code_address, params.code_num_pages * PageSize, m_system_resource, res_limit)); R_TRY(m_page_table.Initialize(static_cast<ams::svc::CreateProcessFlag>(params.flags), from_back, pool, params.code_address, params.code_num_pages * PageSize, m_system_resource, res_limit, this->GetSlabIndex()));
} }
ON_RESULT_FAILURE_2 { m_page_table.Finalize(); }; ON_RESULT_FAILURE_2 { m_page_table.Finalize(); };
@ -378,7 +378,7 @@ namespace ams::kern {
/* Setup page table. */ /* Setup page table. */
{ {
const bool from_back = (params.flags & ams::svc::CreateProcessFlag_EnableAslr) == 0; const bool from_back = (params.flags & ams::svc::CreateProcessFlag_EnableAslr) == 0;
R_TRY(m_page_table.Initialize(static_cast<ams::svc::CreateProcessFlag>(params.flags), from_back, pool, params.code_address, code_size, m_system_resource, res_limit)); R_TRY(m_page_table.Initialize(static_cast<ams::svc::CreateProcessFlag>(params.flags), from_back, pool, params.code_address, code_size, m_system_resource, res_limit, this->GetSlabIndex()));
} }
ON_RESULT_FAILURE_2 { m_page_table.Finalize(); }; ON_RESULT_FAILURE_2 { m_page_table.Finalize(); };

View file

@ -125,6 +125,8 @@ SECTIONS
.gnu.version_r : { *(.gnu.version_r) } :rodata .gnu.version_r : { *(.gnu.version_r) } :rodata
.note.gnu.build-id : { *(.note.gnu.build-id) } :rodata .note.gnu.build-id : { *(.note.gnu.build-id) } :rodata
__rodata_end = .;
/* =========== DATA section =========== */ /* =========== DATA section =========== */
. = ALIGN(0x1000); . = ALIGN(0x1000);
__data_start = . ; __data_start = . ;

View file

@ -15,6 +15,9 @@
*/ */
#include <mesosphere.hpp> #include <mesosphere.hpp>
extern "C" void __rodata_start();
extern "C" void __rodata_end();
extern "C" void __bin_start__(); extern "C" void __bin_start__();
extern "C" void __bin_end__(); extern "C" void __bin_end__();
@ -220,6 +223,31 @@ namespace ams::kern::init {
}; };
static_assert(kern::arch::arm64::init::IsInitialPageAllocator<KInitialPageAllocatorForFinalizeIdentityMapping>); static_assert(kern::arch::arm64::init::IsInitialPageAllocator<KInitialPageAllocatorForFinalizeIdentityMapping>);
void SetupAllTtbr0Entries(KInitialPageTable &init_pt, KInitialPageAllocator &allocator) {
/* Validate that the ttbr0 array is in rodata. */
const uintptr_t rodata_start = reinterpret_cast<uintptr_t>(__rodata_start);
const uintptr_t rodata_end = reinterpret_cast<uintptr_t>(__rodata_end);
MESOSPHERE_INIT_ABORT_UNLESS(rodata_start < rodata_end);
MESOSPHERE_INIT_ABORT_UNLESS(rodata_start <= reinterpret_cast<uintptr_t>(std::addressof(KPageTable::GetTtbr0Entry(0))));
MESOSPHERE_INIT_ABORT_UNLESS(reinterpret_cast<uintptr_t>(std::addressof(KPageTable::GetTtbr0Entry(KPageTable::NumTtbr0Entries))) < rodata_end);
/* Allocate pages for all ttbr0 entries. */
for (size_t i = 0; i < KPageTable::NumTtbr0Entries; ++i) {
/* Allocate a page. */
KPhysicalAddress page = allocator.Allocate(PageSize);
MESOSPHERE_INIT_ABORT_UNLESS(page != Null<KPhysicalAddress>);
/* Check that the page is allowed to be a ttbr0 entry. */
MESOSPHERE_INIT_ABORT_UNLESS((GetInteger(page) & UINT64_C(0xFFFF000000000001)) == 0);
/* Get the physical address of the ttbr0 entry. */
const auto ttbr0_phys_ptr = init_pt.GetPhysicalAddress(KVirtualAddress(std::addressof(KPageTable::GetTtbr0Entry(i))));
/* Set the entry to the newly allocated page. */
*reinterpret_cast<volatile u64 *>(GetInteger(ttbr0_phys_ptr)) = (static_cast<u64>(i) << 48) | GetInteger(page);
}
}
void FinalizeIdentityMapping(KInitialPageTable &init_pt, KInitialPageAllocator &allocator, u64 phys_to_virt_offset) { void FinalizeIdentityMapping(KInitialPageTable &init_pt, KInitialPageAllocator &allocator, u64 phys_to_virt_offset) {
/* Create an allocator for identity mapping finalization. */ /* Create an allocator for identity mapping finalization. */
KInitialPageAllocatorForFinalizeIdentityMapping finalize_allocator(allocator, phys_to_virt_offset); KInitialPageAllocatorForFinalizeIdentityMapping finalize_allocator(allocator, phys_to_virt_offset);
@ -591,6 +619,9 @@ namespace ams::kern::init {
/* Create page table object for use during remaining initialization. */ /* Create page table object for use during remaining initialization. */
KInitialPageTable init_pt; KInitialPageTable init_pt;
/* Setup all ttbr0 pages. */
SetupAllTtbr0Entries(init_pt, g_initial_page_allocator);
/* Unmap the identity mapping. */ /* Unmap the identity mapping. */
FinalizeIdentityMapping(init_pt, g_initial_page_allocator, g_phase2_linear_region_phys_to_virt_diff); FinalizeIdentityMapping(init_pt, g_initial_page_allocator, g_phase2_linear_region_phys_to_virt_diff);