From a1e137cc1ca4a995c20e9446b04f58cf176de71b Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Wed, 7 Apr 2021 11:25:49 -0700 Subject: [PATCH] kern: update Initialize0 for new changes --- .../mesosphere/kern_k_memory_region_type.hpp | 9 +- .../source/arch/arm64/kern_k_page_table.cpp | 8 +- .../source/kern_k_page_table_base.cpp | 4 +- .../include/vapours/util/util_alignment.hpp | 5 + .../source/arch/arm64/init/kern_init_core.cpp | 181 +++++++++++++----- 5 files changed, 147 insertions(+), 60 deletions(-) diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_region_type.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_region_type.hpp index 5c77ed363..e80e246ad 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_memory_region_type.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_region_type.hpp @@ -21,7 +21,8 @@ namespace ams::kern { enum KMemoryRegionType : u32 {}; enum KMemoryRegionAttr : typename std::underlying_type::type { - KMemoryRegionAttr_CarveoutProtected = 0x04000000, + KMemoryRegionAttr_CarveoutProtected = 0x02000000, + KMemoryRegionAttr_Uncached = 0x04000000, KMemoryRegionAttr_DidKernelMap = 0x08000000, KMemoryRegionAttr_ShouldKernelMap = 0x10000000, KMemoryRegionAttr_UserReadOnly = 0x20000000, @@ -216,6 +217,10 @@ namespace ams::kern { static_assert(KMemoryRegionType_VirtualDramKernelPtHeap .GetValue() == 0x2A); static_assert(KMemoryRegionType_VirtualDramKernelTraceBuffer.GetValue() == 0x4A); + /* UNUSED: .DeriveSparse(2, 2, 0); */ + constexpr inline const auto KMemoryRegionType_VirtualDramUnknownDebug = KMemoryRegionType_Dram.DeriveSparse(2, 2, 1); + static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x52)); + constexpr inline const auto KMemoryRegionType_VirtualDramKernelInitPt = KMemoryRegionType_VirtualDramHeapBase.Derive(3, 0); constexpr inline const auto KMemoryRegionType_VirtualDramPoolManagement = KMemoryRegionType_VirtualDramHeapBase.Derive(3, 1); constexpr inline const auto KMemoryRegionType_VirtualDramUserPool = KMemoryRegionType_VirtualDramHeapBase.Derive(3, 2); @@ -292,6 +297,8 @@ namespace ams::kern { return KMemoryRegionType_VirtualDramKernelTraceBuffer; } else if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) { return KMemoryRegionType_VirtualDramKernelPtHeap; + } else if ((type_id | KMemoryRegionAttr_ShouldKernelMap) == type_id) { + return KMemoryRegionType_VirtualDramUnknownDebug; } else { return KMemoryRegionType_Dram; } diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp index 53539e925..6df32849b 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp @@ -556,13 +556,13 @@ namespace ams::kern::arch::arm64 { /* If we're not forcing an unmap, separate pages immediately. */ if (!force) { const size_t size = num_pages * PageSize; - R_TRY(this->SeparatePages(virt_addr, std::min(GetInteger(virt_addr) & -GetInteger(virt_addr), size), page_list, reuse_ll)); + R_TRY(this->SeparatePages(virt_addr, std::min(util::GetAlignment(GetInteger(virt_addr)), size), page_list, reuse_ll)); if (num_pages > 1) { const auto end_page = virt_addr + size; const auto last_page = end_page - PageSize; auto merge_guard = SCOPE_GUARD { this->MergePages(virt_addr, page_list); }; - R_TRY(this->SeparatePages(last_page, std::min(GetInteger(end_page) & -GetInteger(end_page), size), page_list, reuse_ll)); + R_TRY(this->SeparatePages(last_page, std::min(util::GetAlignment(GetInteger(end_page)), size), page_list, reuse_ll)); merge_guard.Cancel(); } } @@ -1194,13 +1194,13 @@ namespace ams::kern::arch::arm64 { /* Separate pages before we change permissions. */ const size_t size = num_pages * PageSize; - R_TRY(this->SeparatePages(virt_addr, std::min(GetInteger(virt_addr) & -GetInteger(virt_addr), size), page_list, reuse_ll)); + R_TRY(this->SeparatePages(virt_addr, std::min(util::GetAlignment(GetInteger(virt_addr)), size), page_list, reuse_ll)); if (num_pages > 1) { const auto end_page = virt_addr + size; const auto last_page = end_page - PageSize; auto merge_guard = SCOPE_GUARD { this->MergePages(virt_addr, page_list); }; - R_TRY(this->SeparatePages(last_page, std::min(GetInteger(end_page) & -GetInteger(end_page), size), page_list, reuse_ll)); + R_TRY(this->SeparatePages(last_page, std::min(util::GetAlignment(GetInteger(end_page)), size), page_list, reuse_ll)); merge_guard.Cancel(); } diff --git a/libraries/libmesosphere/source/kern_k_page_table_base.cpp b/libraries/libmesosphere/source/kern_k_page_table_base.cpp index 166cce173..3f2aff676 100644 --- a/libraries/libmesosphere/source/kern_k_page_table_base.cpp +++ b/libraries/libmesosphere/source/kern_k_page_table_base.cpp @@ -1810,7 +1810,7 @@ namespace ams::kern { /* Select an address to map at. */ KProcessAddress addr = Null; - const size_t phys_alignment = std::min(std::min(GetInteger(phys_addr) & -GetInteger(phys_addr), size & -size), MaxPhysicalMapAlignment); + const size_t phys_alignment = std::min(std::min(util::GetAlignment(GetInteger(phys_addr)), util::GetAlignment(size)), MaxPhysicalMapAlignment); for (s32 block_type = KPageTable::GetMaxBlockType(); block_type >= 0; block_type--) { const size_t alignment = KPageTable::GetBlockSize(static_cast(block_type)); if (alignment > phys_alignment) { @@ -1892,7 +1892,7 @@ namespace ams::kern { /* Select an address to map at. */ KProcessAddress addr = Null; - const size_t phys_alignment = std::min(std::min(GetInteger(phys_addr) & -GetInteger(phys_addr), size & -size), MaxPhysicalMapAlignment); + const size_t phys_alignment = std::min(std::min(util::GetAlignment(GetInteger(phys_addr)), util::GetAlignment(size)), MaxPhysicalMapAlignment); for (s32 block_type = KPageTable::GetMaxBlockType(); block_type >= 0; block_type--) { const size_t alignment = KPageTable::GetBlockSize(static_cast(block_type)); if (alignment > phys_alignment) { diff --git a/libraries/libvapours/include/vapours/util/util_alignment.hpp b/libraries/libvapours/include/vapours/util/util_alignment.hpp index 3fa951504..79e0ffd05 100644 --- a/libraries/libvapours/include/vapours/util/util_alignment.hpp +++ b/libraries/libvapours/include/vapours/util/util_alignment.hpp @@ -43,6 +43,11 @@ namespace ams::util { return (value & invmask) == 0; } + template requires std::unsigned_integral + constexpr ALWAYS_INLINE T GetAlignment(T value) { + return value & -value; + } + template<> constexpr ALWAYS_INLINE void *AlignUp(void *value, size_t alignment) { return reinterpret_cast(AlignUp(reinterpret_cast(value), alignment)); diff --git a/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp b/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp index 6ca985e31..a6c213344 100644 --- a/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp +++ b/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp @@ -46,6 +46,8 @@ namespace ams::kern::init { constexpr PageTableEntry KernelRwDataAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped); constexpr PageTableEntry KernelMmioAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_Device_nGnRE, PageTableEntry::Shareable_OuterShareable, PageTableEntry::MappingFlag_Mapped); + constexpr PageTableEntry KernelRwDataUncachedAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemoryNotCacheable, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped); + void MapStackForCore(KInitialPageTable &page_table, KMemoryRegionType type, u32 core_id) { constexpr size_t StackSize = PageSize; constexpr size_t StackAlign = PageSize; @@ -77,8 +79,8 @@ namespace ams::kern::init { } } - void SetupInitialArguments(KInitialPageTable &ttbr1_table, KInitialPageAllocator &allocator) { - AMS_UNUSED(ttbr1_table, allocator); + void SetupInitialArguments(KInitialPageTable &init_pt, KInitialPageAllocator &allocator) { + AMS_UNUSED(init_pt, allocator); /* Get parameters for initial arguments. */ const u64 ttbr0 = cpu::GetTtbr0El1(); @@ -98,7 +100,7 @@ namespace ams::kern::init { /* if (cpu::GetPhysicalAddressWritable(std::addressof(phys_addr), KVirtualAddress(init_args), true)) { */ /* g_init_arguments_phys_addr[i] = phys_addr; */ /* } */ - g_init_arguments_phys_addr[i] = ttbr1_table.GetPhysicalAddress(KVirtualAddress(init_args)); + g_init_arguments_phys_addr[i] = init_pt.GetPhysicalAddress(KVirtualAddress(init_args)); /* Set the arguments. */ init_args->ttbr0 = ttbr0; @@ -122,7 +124,7 @@ namespace ams::kern::init { } void InitializeCore(uintptr_t misc_unk_debug_phys_addr, uintptr_t initial_page_allocator_state) { - /* Ensure our first argument is page aligned (as we will map it if it is non-zero). */ + /* Ensure our first argument is page aligned. */ MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(misc_unk_debug_phys_addr, PageSize)); /* Restore the page allocator state setup by kernel loader. */ @@ -132,7 +134,7 @@ namespace ams::kern::init { MESOSPHERE_INIT_ABORT_UNLESS((cpu::TranslationControlRegisterAccessor().GetT1Size() / arch::arm64::L1BlockSize) == arch::arm64::MaxPageTableEntries); /* Create page table object for use during initialization. */ - KInitialPageTable ttbr1_table; + KInitialPageTable init_pt; /* Initialize the slab allocator counts. */ InitializeSlabResourceCounts(); @@ -180,7 +182,14 @@ namespace ams::kern::init { MESOSPHERE_INIT_ABORT_UNLESS(region.GetEndAddress() != 0); /* Account for the region. */ - misc_region_needed_size += PageSize + (util::AlignUp(region.GetLastAddress(), PageSize) - util::AlignDown(region.GetAddress(), PageSize)); + const auto aligned_start = util::AlignDown(region.GetAddress(), PageSize); + const auto aligned_end = util::AlignUp(region.GetLastAddress(), PageSize); + const size_t cur_region_size = aligned_end - aligned_start; + misc_region_needed_size += cur_region_size; + + /* Account for alignment requirements. */ + const size_t min_align = std::min(util::GetAlignment(cur_region_size), util::GetAlignment(aligned_start)); + misc_region_needed_size += min_align >= KernelAslrAlignment ? KernelAslrAlignment : PageSize; } } @@ -215,7 +224,7 @@ namespace ams::kern::init { MESOSPHERE_INIT_ABORT_UNLESS(slab_region_size <= resource_region_size); /* Setup the slab region. */ - const KPhysicalAddress code_start_phys_addr = ttbr1_table.GetPhysicalAddressOfRandomizedRange(code_start_virt_addr, code_region_size); + const KPhysicalAddress code_start_phys_addr = init_pt.GetPhysicalAddressOfRandomizedRange(code_start_virt_addr, code_region_size); const KPhysicalAddress code_end_phys_addr = code_start_phys_addr + code_region_size; const KPhysicalAddress slab_start_phys_addr = code_end_phys_addr; const KPhysicalAddress slab_end_phys_addr = slab_start_phys_addr + slab_region_size; @@ -230,52 +239,116 @@ namespace ams::kern::init { const KVirtualAddress temp_region_start = KMemoryLayout::GetVirtualMemoryRegionTree().GetRandomAlignedRegion(TempRegionSize, TempRegionAlign, KMemoryRegionType_Kernel); MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(temp_region_start), TempRegionSize, KMemoryRegionType_KernelTemp)); - /* Setup the Misc Unknown Debug region, if it's not zero. */ - if (misc_unk_debug_phys_addr) { - constexpr size_t MiscUnknownDebugRegionAlign = PageSize; - const size_t misc_unk_debug_size = GetMiscUnknownDebugRegionSize(); - const KVirtualAddress misc_unk_debug_virt_addr = KMemoryLayout::GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(misc_unk_debug_size, MiscUnknownDebugRegionAlign, KMemoryRegionType_KernelMisc, PageSize); - MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(misc_unk_debug_virt_addr), misc_unk_debug_size, KMemoryRegionType_KernelMiscUnknownDebug)); - ttbr1_table.Map(misc_unk_debug_virt_addr, misc_unk_debug_size, misc_unk_debug_phys_addr, KernelRoDataAttribute, g_initial_page_allocator); - } + /* Automatically map in devices that have auto-map attributes, from largest region to smallest region. */ + { + /* We want to map the regions from largest to smallest. */ + KMemoryRegion *largest; + do { + /* Begin with no knowledge of the largest region. */ + largest = nullptr; - /* Automatically map in devices that have auto-map attributes. */ - for (auto ®ion : KMemoryLayout::GetPhysicalMemoryRegionTree()) { - /* We only care about kernel regions. */ - if (!region.IsDerivedFrom(KMemoryRegionType_Kernel)) { - continue; - } + for (auto ®ion : KMemoryLayout::GetPhysicalMemoryRegionTree()) { + /* We only care about kernel regions. */ + if (!region.IsDerivedFrom(KMemoryRegionType_Kernel)) { + continue; + } - /* Check whether we should map the region. */ - if (!region.HasTypeAttribute(KMemoryRegionAttr_ShouldKernelMap)) { - continue; - } + /* Check whether we should map the region. */ + if (!region.HasTypeAttribute(KMemoryRegionAttr_ShouldKernelMap)) { + continue; + } - /* If this region has already been mapped, no need to consider it. */ - if (region.HasTypeAttribute(KMemoryRegionAttr_DidKernelMap)) { - continue; - } + /* If this region has already been mapped, no need to consider it. */ + if (region.HasTypeAttribute(KMemoryRegionAttr_DidKernelMap)) { + continue; + } - /* Check that the region is valid. */ - MESOSPHERE_INIT_ABORT_UNLESS(region.GetEndAddress() != 0); + /* Check that the region is valid. */ + MESOSPHERE_INIT_ABORT_UNLESS(region.GetEndAddress() != 0); - /* Set the attribute to note we've mapped this region. */ - region.SetTypeAttribute(KMemoryRegionAttr_DidKernelMap); + /* Update the largest region. */ + if (largest == nullptr || largest->GetSize() < region.GetSize()) { + largest = std::addressof(region); + } + } - /* Create a virtual pair region and insert it into the tree. */ - const KPhysicalAddress map_phys_addr = util::AlignDown(region.GetAddress(), PageSize); - const size_t map_size = util::AlignUp(region.GetEndAddress(), PageSize) - GetInteger(map_phys_addr); - const KVirtualAddress map_virt_addr = KMemoryLayout::GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(map_size, PageSize, KMemoryRegionType_KernelMisc, PageSize); - MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(map_virt_addr), map_size, KMemoryRegionType_KernelMiscMappedDevice)); - region.SetPairAddress(GetInteger(map_virt_addr) + region.GetAddress() - GetInteger(map_phys_addr)); + /* If we found a region, map it. */ + if (largest != nullptr) { + /* Set the attribute to note we've mapped this region. */ + largest->SetTypeAttribute(KMemoryRegionAttr_DidKernelMap); - /* Map the page in to our page table. */ - ttbr1_table.Map(map_virt_addr, map_size, map_phys_addr, KernelMmioAttribute, g_initial_page_allocator); + /* Create a virtual pair region and insert it into the tree. */ + const KPhysicalAddress map_phys_addr = util::AlignDown(largest->GetAddress(), PageSize); + const size_t map_size = util::AlignUp(largest->GetEndAddress(), PageSize) - GetInteger(map_phys_addr); + const size_t min_align = std::min(util::GetAlignment(map_size), util::GetAlignment(GetInteger(map_phys_addr))); + const size_t map_align = min_align >= KernelAslrAlignment ? KernelAslrAlignment : PageSize; + const KVirtualAddress map_virt_addr = KMemoryLayout::GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(map_size, map_align, KMemoryRegionType_KernelMisc, map_align); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(map_virt_addr), map_size, KMemoryRegionType_KernelMiscMappedDevice)); + largest->SetPairAddress(GetInteger(map_virt_addr) + largest->GetAddress() - GetInteger(map_phys_addr)); + + /* Map the page in to our page table. */ + init_pt.Map(map_virt_addr, map_size, map_phys_addr, KernelMmioAttribute, g_initial_page_allocator); + } + } while (largest != nullptr); } /* Setup the basic DRAM regions. */ SetupDramPhysicalMemoryRegions(); + /* Automatically map in reserved physical memory that has auto-map attributes. */ + { + /* We want to map the regions from largest to smallest. */ + KMemoryRegion *largest; + do { + /* Begin with no knowledge of the largest region. */ + largest = nullptr; + + for (auto ®ion : KMemoryLayout::GetPhysicalMemoryRegionTree()) { + /* We only care about reserved memory. */ + if (!region.IsDerivedFrom(KMemoryRegionType_DramReservedBase)) { + continue; + } + + /* Check whether we should map the region. */ + if (!region.HasTypeAttribute(KMemoryRegionAttr_ShouldKernelMap)) { + continue; + } + + /* If this region has already been mapped, no need to consider it. */ + if (region.HasTypeAttribute(KMemoryRegionAttr_DidKernelMap)) { + continue; + } + + /* Check that the region is valid. */ + MESOSPHERE_INIT_ABORT_UNLESS(region.GetEndAddress() != 0); + + /* Update the largest region. */ + if (largest == nullptr || largest->GetSize() < region.GetSize()) { + largest = std::addressof(region); + } + } + + /* If we found a region, map it. */ + if (largest != nullptr) { + /* Set the attribute to note we've mapped this region. */ + largest->SetTypeAttribute(KMemoryRegionAttr_DidKernelMap); + + /* Create a virtual pair region and insert it into the tree. */ + const KPhysicalAddress map_phys_addr = util::AlignDown(largest->GetAddress(), PageSize); + const size_t map_size = util::AlignUp(largest->GetEndAddress(), PageSize) - GetInteger(map_phys_addr); + const size_t min_align = std::min(util::GetAlignment(map_size), util::GetAlignment(GetInteger(map_phys_addr))); + const size_t map_align = min_align >= KernelAslrAlignment ? KernelAslrAlignment : PageSize; + const KVirtualAddress map_virt_addr = KMemoryLayout::GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(map_size, map_align, KMemoryRegionType_KernelMisc, map_align); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(map_virt_addr), map_size, KMemoryRegionType_KernelMiscUnknownDebug)); + largest->SetPairAddress(GetInteger(map_virt_addr) + largest->GetAddress() - GetInteger(map_phys_addr)); + + /* Map the page in to our page table. */ + const auto attribute = largest->HasTypeAttribute(KMemoryRegionAttr_Uncached) ? KernelRwDataUncachedAttribute : KernelRwDataAttribute; + init_pt.Map(map_virt_addr, map_size, map_phys_addr, attribute, g_initial_page_allocator); + } + } while (largest != nullptr); + } + /* Insert a physical region for the kernel code region. */ MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(code_start_phys_addr), code_region_size, KMemoryRegionType_DramKernelCode)); @@ -283,11 +356,11 @@ namespace ams::kern::init { MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(slab_start_phys_addr), slab_region_size, KMemoryRegionType_DramKernelSlab)); /* Map the slab region. */ - ttbr1_table.Map(slab_region_start, slab_region_size, slab_start_phys_addr, KernelRwDataAttribute, g_initial_page_allocator); + init_pt.Map(slab_region_start, slab_region_size, slab_start_phys_addr, KernelRwDataAttribute, g_initial_page_allocator); /* Physically randomize the slab region. */ /* NOTE: Nintendo does this only on 10.0.0+ */ - ttbr1_table.PhysicallyRandomize(slab_region_start, slab_region_size, false); + init_pt.PhysicallyRandomize(slab_region_start, slab_region_size, false); /* Determine size available for kernel page table heaps, requiring > 8 MB. */ const KPhysicalAddress resource_end_phys_addr = slab_start_phys_addr + resource_region_size; @@ -338,14 +411,16 @@ namespace ams::kern::init { cur_size += region.GetSize(); } else { const uintptr_t cur_virt_addr = cur_phys_addr + linear_region_phys_to_virt_diff; - ttbr1_table.Map(cur_virt_addr, cur_size, cur_phys_addr, KernelRwDataAttribute, g_initial_page_allocator); + init_pt.Map(cur_virt_addr, cur_size, cur_phys_addr, KernelRwDataAttribute, g_initial_page_allocator); cur_phys_addr = region.GetAddress(); cur_size = region.GetSize(); } const uintptr_t region_virt_addr = region.GetAddress() + linear_region_phys_to_virt_diff; + if (!region.HasTypeAttribute(KMemoryRegionAttr_ShouldKernelMap)) { + region.SetPairAddress(region_virt_addr); + } MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(region_virt_addr, region.GetSize(), GetTypeForVirtualLinearMapping(region.GetType()))); - region.SetPairAddress(region_virt_addr); KMemoryRegion *virt_region = KMemoryLayout::GetVirtualMemoryRegionTree().FindModifiable(region_virt_addr); MESOSPHERE_INIT_ABORT_UNLESS(virt_region != nullptr); @@ -355,7 +430,7 @@ namespace ams::kern::init { /* Map the last block, which we may have skipped. */ if (cur_size != 0) { const uintptr_t cur_virt_addr = cur_phys_addr + linear_region_phys_to_virt_diff; - ttbr1_table.Map(cur_virt_addr, cur_size, cur_phys_addr, KernelRwDataAttribute, g_initial_page_allocator); + init_pt.Map(cur_virt_addr, cur_size, cur_phys_addr, KernelRwDataAttribute, g_initial_page_allocator); } } @@ -363,18 +438,18 @@ namespace ams::kern::init { std::memset(GetVoidPointer(slab_region_start), 0, slab_region_size); /* NOTE: Unknown function is called here which is ifdef'd out on retail kernel. */ - /* The unknown function is immediately before the function which gets the unknown debug region size, inside this translation unit. */ - /* It's likely that this is some kind of initializer for the unknown debug region. */ + /* The unknown function is immediately before the function which gets an unknown debug region size, inside this translation unit. */ + /* It's likely that this is some kind of initializer for this unknown debug region. */ /* Create regions for and map all core-specific stacks. */ for (size_t i = 0; i < cpu::NumCores; i++) { - MapStackForCore(ttbr1_table, KMemoryRegionType_KernelMiscMainStack, i); - MapStackForCore(ttbr1_table, KMemoryRegionType_KernelMiscIdleStack, i); - MapStackForCore(ttbr1_table, KMemoryRegionType_KernelMiscExceptionStack, i); + MapStackForCore(init_pt, KMemoryRegionType_KernelMiscMainStack, i); + MapStackForCore(init_pt, KMemoryRegionType_KernelMiscIdleStack, i); + MapStackForCore(init_pt, KMemoryRegionType_KernelMiscExceptionStack, i); } /* Setup the initial arguments. */ - SetupInitialArguments(ttbr1_table, g_initial_page_allocator); + SetupInitialArguments(init_pt, g_initial_page_allocator); /* Finalize the page allocator, we're done allocating at this point. */ KInitialPageAllocator::State final_init_page_table_state; @@ -400,7 +475,7 @@ namespace ams::kern::init { KMemoryLayout::InitializeLinearMemoryRegionTrees(aligned_linear_phys_start, linear_region_start); /* Turn on all other cores. */ - TurnOnAllCores(GetInteger(ttbr1_table.GetPhysicalAddress(reinterpret_cast(::ams::kern::init::StartOtherCore)))); + TurnOnAllCores(GetInteger(init_pt.GetPhysicalAddress(reinterpret_cast(::ams::kern::init::StartOtherCore)))); } KPhysicalAddress GetInitArgumentsAddress(s32 core_id) {