mirror of
https://github.com/Atmosphere-NX/Atmosphere
synced 2024-11-09 22:56:35 +00:00
kern: update Initialize0 for new changes
This commit is contained in:
parent
504472af4e
commit
a1e137cc1c
5 changed files with 147 additions and 60 deletions
|
@ -21,7 +21,8 @@ namespace ams::kern {
|
||||||
enum KMemoryRegionType : u32 {};
|
enum KMemoryRegionType : u32 {};
|
||||||
|
|
||||||
enum KMemoryRegionAttr : typename std::underlying_type<KMemoryRegionType>::type {
|
enum KMemoryRegionAttr : typename std::underlying_type<KMemoryRegionType>::type {
|
||||||
KMemoryRegionAttr_CarveoutProtected = 0x04000000,
|
KMemoryRegionAttr_CarveoutProtected = 0x02000000,
|
||||||
|
KMemoryRegionAttr_Uncached = 0x04000000,
|
||||||
KMemoryRegionAttr_DidKernelMap = 0x08000000,
|
KMemoryRegionAttr_DidKernelMap = 0x08000000,
|
||||||
KMemoryRegionAttr_ShouldKernelMap = 0x10000000,
|
KMemoryRegionAttr_ShouldKernelMap = 0x10000000,
|
||||||
KMemoryRegionAttr_UserReadOnly = 0x20000000,
|
KMemoryRegionAttr_UserReadOnly = 0x20000000,
|
||||||
|
@ -216,6 +217,10 @@ namespace ams::kern {
|
||||||
static_assert(KMemoryRegionType_VirtualDramKernelPtHeap .GetValue() == 0x2A);
|
static_assert(KMemoryRegionType_VirtualDramKernelPtHeap .GetValue() == 0x2A);
|
||||||
static_assert(KMemoryRegionType_VirtualDramKernelTraceBuffer.GetValue() == 0x4A);
|
static_assert(KMemoryRegionType_VirtualDramKernelTraceBuffer.GetValue() == 0x4A);
|
||||||
|
|
||||||
|
/* UNUSED: .DeriveSparse(2, 2, 0); */
|
||||||
|
constexpr inline const auto KMemoryRegionType_VirtualDramUnknownDebug = KMemoryRegionType_Dram.DeriveSparse(2, 2, 1);
|
||||||
|
static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x52));
|
||||||
|
|
||||||
constexpr inline const auto KMemoryRegionType_VirtualDramKernelInitPt = KMemoryRegionType_VirtualDramHeapBase.Derive(3, 0);
|
constexpr inline const auto KMemoryRegionType_VirtualDramKernelInitPt = KMemoryRegionType_VirtualDramHeapBase.Derive(3, 0);
|
||||||
constexpr inline const auto KMemoryRegionType_VirtualDramPoolManagement = KMemoryRegionType_VirtualDramHeapBase.Derive(3, 1);
|
constexpr inline const auto KMemoryRegionType_VirtualDramPoolManagement = KMemoryRegionType_VirtualDramHeapBase.Derive(3, 1);
|
||||||
constexpr inline const auto KMemoryRegionType_VirtualDramUserPool = KMemoryRegionType_VirtualDramHeapBase.Derive(3, 2);
|
constexpr inline const auto KMemoryRegionType_VirtualDramUserPool = KMemoryRegionType_VirtualDramHeapBase.Derive(3, 2);
|
||||||
|
@ -292,6 +297,8 @@ namespace ams::kern {
|
||||||
return KMemoryRegionType_VirtualDramKernelTraceBuffer;
|
return KMemoryRegionType_VirtualDramKernelTraceBuffer;
|
||||||
} else if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) {
|
} else if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) {
|
||||||
return KMemoryRegionType_VirtualDramKernelPtHeap;
|
return KMemoryRegionType_VirtualDramKernelPtHeap;
|
||||||
|
} else if ((type_id | KMemoryRegionAttr_ShouldKernelMap) == type_id) {
|
||||||
|
return KMemoryRegionType_VirtualDramUnknownDebug;
|
||||||
} else {
|
} else {
|
||||||
return KMemoryRegionType_Dram;
|
return KMemoryRegionType_Dram;
|
||||||
}
|
}
|
||||||
|
|
|
@ -556,13 +556,13 @@ namespace ams::kern::arch::arm64 {
|
||||||
/* If we're not forcing an unmap, separate pages immediately. */
|
/* If we're not forcing an unmap, separate pages immediately. */
|
||||||
if (!force) {
|
if (!force) {
|
||||||
const size_t size = num_pages * PageSize;
|
const size_t size = num_pages * PageSize;
|
||||||
R_TRY(this->SeparatePages(virt_addr, std::min(GetInteger(virt_addr) & -GetInteger(virt_addr), size), page_list, reuse_ll));
|
R_TRY(this->SeparatePages(virt_addr, std::min(util::GetAlignment(GetInteger(virt_addr)), size), page_list, reuse_ll));
|
||||||
if (num_pages > 1) {
|
if (num_pages > 1) {
|
||||||
const auto end_page = virt_addr + size;
|
const auto end_page = virt_addr + size;
|
||||||
const auto last_page = end_page - PageSize;
|
const auto last_page = end_page - PageSize;
|
||||||
|
|
||||||
auto merge_guard = SCOPE_GUARD { this->MergePages(virt_addr, page_list); };
|
auto merge_guard = SCOPE_GUARD { this->MergePages(virt_addr, page_list); };
|
||||||
R_TRY(this->SeparatePages(last_page, std::min(GetInteger(end_page) & -GetInteger(end_page), size), page_list, reuse_ll));
|
R_TRY(this->SeparatePages(last_page, std::min(util::GetAlignment(GetInteger(end_page)), size), page_list, reuse_ll));
|
||||||
merge_guard.Cancel();
|
merge_guard.Cancel();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1194,13 +1194,13 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
/* Separate pages before we change permissions. */
|
/* Separate pages before we change permissions. */
|
||||||
const size_t size = num_pages * PageSize;
|
const size_t size = num_pages * PageSize;
|
||||||
R_TRY(this->SeparatePages(virt_addr, std::min(GetInteger(virt_addr) & -GetInteger(virt_addr), size), page_list, reuse_ll));
|
R_TRY(this->SeparatePages(virt_addr, std::min(util::GetAlignment(GetInteger(virt_addr)), size), page_list, reuse_ll));
|
||||||
if (num_pages > 1) {
|
if (num_pages > 1) {
|
||||||
const auto end_page = virt_addr + size;
|
const auto end_page = virt_addr + size;
|
||||||
const auto last_page = end_page - PageSize;
|
const auto last_page = end_page - PageSize;
|
||||||
|
|
||||||
auto merge_guard = SCOPE_GUARD { this->MergePages(virt_addr, page_list); };
|
auto merge_guard = SCOPE_GUARD { this->MergePages(virt_addr, page_list); };
|
||||||
R_TRY(this->SeparatePages(last_page, std::min(GetInteger(end_page) & -GetInteger(end_page), size), page_list, reuse_ll));
|
R_TRY(this->SeparatePages(last_page, std::min(util::GetAlignment(GetInteger(end_page)), size), page_list, reuse_ll));
|
||||||
merge_guard.Cancel();
|
merge_guard.Cancel();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1810,7 +1810,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
/* Select an address to map at. */
|
/* Select an address to map at. */
|
||||||
KProcessAddress addr = Null<KProcessAddress>;
|
KProcessAddress addr = Null<KProcessAddress>;
|
||||||
const size_t phys_alignment = std::min(std::min(GetInteger(phys_addr) & -GetInteger(phys_addr), size & -size), MaxPhysicalMapAlignment);
|
const size_t phys_alignment = std::min(std::min(util::GetAlignment(GetInteger(phys_addr)), util::GetAlignment(size)), MaxPhysicalMapAlignment);
|
||||||
for (s32 block_type = KPageTable::GetMaxBlockType(); block_type >= 0; block_type--) {
|
for (s32 block_type = KPageTable::GetMaxBlockType(); block_type >= 0; block_type--) {
|
||||||
const size_t alignment = KPageTable::GetBlockSize(static_cast<KPageTable::BlockType>(block_type));
|
const size_t alignment = KPageTable::GetBlockSize(static_cast<KPageTable::BlockType>(block_type));
|
||||||
if (alignment > phys_alignment) {
|
if (alignment > phys_alignment) {
|
||||||
|
@ -1892,7 +1892,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
/* Select an address to map at. */
|
/* Select an address to map at. */
|
||||||
KProcessAddress addr = Null<KProcessAddress>;
|
KProcessAddress addr = Null<KProcessAddress>;
|
||||||
const size_t phys_alignment = std::min(std::min(GetInteger(phys_addr) & -GetInteger(phys_addr), size & -size), MaxPhysicalMapAlignment);
|
const size_t phys_alignment = std::min(std::min(util::GetAlignment(GetInteger(phys_addr)), util::GetAlignment(size)), MaxPhysicalMapAlignment);
|
||||||
for (s32 block_type = KPageTable::GetMaxBlockType(); block_type >= 0; block_type--) {
|
for (s32 block_type = KPageTable::GetMaxBlockType(); block_type >= 0; block_type--) {
|
||||||
const size_t alignment = KPageTable::GetBlockSize(static_cast<KPageTable::BlockType>(block_type));
|
const size_t alignment = KPageTable::GetBlockSize(static_cast<KPageTable::BlockType>(block_type));
|
||||||
if (alignment > phys_alignment) {
|
if (alignment > phys_alignment) {
|
||||||
|
|
|
@ -43,6 +43,11 @@ namespace ams::util {
|
||||||
return (value & invmask) == 0;
|
return (value & invmask) == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template<typename T> requires std::unsigned_integral<T>
|
||||||
|
constexpr ALWAYS_INLINE T GetAlignment(T value) {
|
||||||
|
return value & -value;
|
||||||
|
}
|
||||||
|
|
||||||
template<>
|
template<>
|
||||||
constexpr ALWAYS_INLINE void *AlignUp<void *>(void *value, size_t alignment) {
|
constexpr ALWAYS_INLINE void *AlignUp<void *>(void *value, size_t alignment) {
|
||||||
return reinterpret_cast<void *>(AlignUp(reinterpret_cast<uintptr_t>(value), alignment));
|
return reinterpret_cast<void *>(AlignUp(reinterpret_cast<uintptr_t>(value), alignment));
|
||||||
|
|
|
@ -46,6 +46,8 @@ namespace ams::kern::init {
|
||||||
constexpr PageTableEntry KernelRwDataAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
|
constexpr PageTableEntry KernelRwDataAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
|
||||||
constexpr PageTableEntry KernelMmioAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_Device_nGnRE, PageTableEntry::Shareable_OuterShareable, PageTableEntry::MappingFlag_Mapped);
|
constexpr PageTableEntry KernelMmioAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_Device_nGnRE, PageTableEntry::Shareable_OuterShareable, PageTableEntry::MappingFlag_Mapped);
|
||||||
|
|
||||||
|
constexpr PageTableEntry KernelRwDataUncachedAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemoryNotCacheable, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
|
||||||
|
|
||||||
void MapStackForCore(KInitialPageTable &page_table, KMemoryRegionType type, u32 core_id) {
|
void MapStackForCore(KInitialPageTable &page_table, KMemoryRegionType type, u32 core_id) {
|
||||||
constexpr size_t StackSize = PageSize;
|
constexpr size_t StackSize = PageSize;
|
||||||
constexpr size_t StackAlign = PageSize;
|
constexpr size_t StackAlign = PageSize;
|
||||||
|
@ -77,8 +79,8 @@ namespace ams::kern::init {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SetupInitialArguments(KInitialPageTable &ttbr1_table, KInitialPageAllocator &allocator) {
|
void SetupInitialArguments(KInitialPageTable &init_pt, KInitialPageAllocator &allocator) {
|
||||||
AMS_UNUSED(ttbr1_table, allocator);
|
AMS_UNUSED(init_pt, allocator);
|
||||||
|
|
||||||
/* Get parameters for initial arguments. */
|
/* Get parameters for initial arguments. */
|
||||||
const u64 ttbr0 = cpu::GetTtbr0El1();
|
const u64 ttbr0 = cpu::GetTtbr0El1();
|
||||||
|
@ -98,7 +100,7 @@ namespace ams::kern::init {
|
||||||
/* if (cpu::GetPhysicalAddressWritable(std::addressof(phys_addr), KVirtualAddress(init_args), true)) { */
|
/* if (cpu::GetPhysicalAddressWritable(std::addressof(phys_addr), KVirtualAddress(init_args), true)) { */
|
||||||
/* g_init_arguments_phys_addr[i] = phys_addr; */
|
/* g_init_arguments_phys_addr[i] = phys_addr; */
|
||||||
/* } */
|
/* } */
|
||||||
g_init_arguments_phys_addr[i] = ttbr1_table.GetPhysicalAddress(KVirtualAddress(init_args));
|
g_init_arguments_phys_addr[i] = init_pt.GetPhysicalAddress(KVirtualAddress(init_args));
|
||||||
|
|
||||||
/* Set the arguments. */
|
/* Set the arguments. */
|
||||||
init_args->ttbr0 = ttbr0;
|
init_args->ttbr0 = ttbr0;
|
||||||
|
@ -122,7 +124,7 @@ namespace ams::kern::init {
|
||||||
}
|
}
|
||||||
|
|
||||||
void InitializeCore(uintptr_t misc_unk_debug_phys_addr, uintptr_t initial_page_allocator_state) {
|
void InitializeCore(uintptr_t misc_unk_debug_phys_addr, uintptr_t initial_page_allocator_state) {
|
||||||
/* Ensure our first argument is page aligned (as we will map it if it is non-zero). */
|
/* Ensure our first argument is page aligned. */
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(misc_unk_debug_phys_addr, PageSize));
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(misc_unk_debug_phys_addr, PageSize));
|
||||||
|
|
||||||
/* Restore the page allocator state setup by kernel loader. */
|
/* Restore the page allocator state setup by kernel loader. */
|
||||||
|
@ -132,7 +134,7 @@ namespace ams::kern::init {
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS((cpu::TranslationControlRegisterAccessor().GetT1Size() / arch::arm64::L1BlockSize) == arch::arm64::MaxPageTableEntries);
|
MESOSPHERE_INIT_ABORT_UNLESS((cpu::TranslationControlRegisterAccessor().GetT1Size() / arch::arm64::L1BlockSize) == arch::arm64::MaxPageTableEntries);
|
||||||
|
|
||||||
/* Create page table object for use during initialization. */
|
/* Create page table object for use during initialization. */
|
||||||
KInitialPageTable ttbr1_table;
|
KInitialPageTable init_pt;
|
||||||
|
|
||||||
/* Initialize the slab allocator counts. */
|
/* Initialize the slab allocator counts. */
|
||||||
InitializeSlabResourceCounts();
|
InitializeSlabResourceCounts();
|
||||||
|
@ -180,7 +182,14 @@ namespace ams::kern::init {
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(region.GetEndAddress() != 0);
|
MESOSPHERE_INIT_ABORT_UNLESS(region.GetEndAddress() != 0);
|
||||||
|
|
||||||
/* Account for the region. */
|
/* Account for the region. */
|
||||||
misc_region_needed_size += PageSize + (util::AlignUp(region.GetLastAddress(), PageSize) - util::AlignDown(region.GetAddress(), PageSize));
|
const auto aligned_start = util::AlignDown(region.GetAddress(), PageSize);
|
||||||
|
const auto aligned_end = util::AlignUp(region.GetLastAddress(), PageSize);
|
||||||
|
const size_t cur_region_size = aligned_end - aligned_start;
|
||||||
|
misc_region_needed_size += cur_region_size;
|
||||||
|
|
||||||
|
/* Account for alignment requirements. */
|
||||||
|
const size_t min_align = std::min<size_t>(util::GetAlignment(cur_region_size), util::GetAlignment(aligned_start));
|
||||||
|
misc_region_needed_size += min_align >= KernelAslrAlignment ? KernelAslrAlignment : PageSize;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -215,7 +224,7 @@ namespace ams::kern::init {
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(slab_region_size <= resource_region_size);
|
MESOSPHERE_INIT_ABORT_UNLESS(slab_region_size <= resource_region_size);
|
||||||
|
|
||||||
/* Setup the slab region. */
|
/* Setup the slab region. */
|
||||||
const KPhysicalAddress code_start_phys_addr = ttbr1_table.GetPhysicalAddressOfRandomizedRange(code_start_virt_addr, code_region_size);
|
const KPhysicalAddress code_start_phys_addr = init_pt.GetPhysicalAddressOfRandomizedRange(code_start_virt_addr, code_region_size);
|
||||||
const KPhysicalAddress code_end_phys_addr = code_start_phys_addr + code_region_size;
|
const KPhysicalAddress code_end_phys_addr = code_start_phys_addr + code_region_size;
|
||||||
const KPhysicalAddress slab_start_phys_addr = code_end_phys_addr;
|
const KPhysicalAddress slab_start_phys_addr = code_end_phys_addr;
|
||||||
const KPhysicalAddress slab_end_phys_addr = slab_start_phys_addr + slab_region_size;
|
const KPhysicalAddress slab_end_phys_addr = slab_start_phys_addr + slab_region_size;
|
||||||
|
@ -230,52 +239,116 @@ namespace ams::kern::init {
|
||||||
const KVirtualAddress temp_region_start = KMemoryLayout::GetVirtualMemoryRegionTree().GetRandomAlignedRegion(TempRegionSize, TempRegionAlign, KMemoryRegionType_Kernel);
|
const KVirtualAddress temp_region_start = KMemoryLayout::GetVirtualMemoryRegionTree().GetRandomAlignedRegion(TempRegionSize, TempRegionAlign, KMemoryRegionType_Kernel);
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(temp_region_start), TempRegionSize, KMemoryRegionType_KernelTemp));
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(temp_region_start), TempRegionSize, KMemoryRegionType_KernelTemp));
|
||||||
|
|
||||||
/* Setup the Misc Unknown Debug region, if it's not zero. */
|
/* Automatically map in devices that have auto-map attributes, from largest region to smallest region. */
|
||||||
if (misc_unk_debug_phys_addr) {
|
{
|
||||||
constexpr size_t MiscUnknownDebugRegionAlign = PageSize;
|
/* We want to map the regions from largest to smallest. */
|
||||||
const size_t misc_unk_debug_size = GetMiscUnknownDebugRegionSize();
|
KMemoryRegion *largest;
|
||||||
const KVirtualAddress misc_unk_debug_virt_addr = KMemoryLayout::GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(misc_unk_debug_size, MiscUnknownDebugRegionAlign, KMemoryRegionType_KernelMisc, PageSize);
|
do {
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(misc_unk_debug_virt_addr), misc_unk_debug_size, KMemoryRegionType_KernelMiscUnknownDebug));
|
/* Begin with no knowledge of the largest region. */
|
||||||
ttbr1_table.Map(misc_unk_debug_virt_addr, misc_unk_debug_size, misc_unk_debug_phys_addr, KernelRoDataAttribute, g_initial_page_allocator);
|
largest = nullptr;
|
||||||
}
|
|
||||||
|
|
||||||
/* Automatically map in devices that have auto-map attributes. */
|
for (auto ®ion : KMemoryLayout::GetPhysicalMemoryRegionTree()) {
|
||||||
for (auto ®ion : KMemoryLayout::GetPhysicalMemoryRegionTree()) {
|
/* We only care about kernel regions. */
|
||||||
/* We only care about kernel regions. */
|
if (!region.IsDerivedFrom(KMemoryRegionType_Kernel)) {
|
||||||
if (!region.IsDerivedFrom(KMemoryRegionType_Kernel)) {
|
continue;
|
||||||
continue;
|
}
|
||||||
}
|
|
||||||
|
|
||||||
/* Check whether we should map the region. */
|
/* Check whether we should map the region. */
|
||||||
if (!region.HasTypeAttribute(KMemoryRegionAttr_ShouldKernelMap)) {
|
if (!region.HasTypeAttribute(KMemoryRegionAttr_ShouldKernelMap)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If this region has already been mapped, no need to consider it. */
|
/* If this region has already been mapped, no need to consider it. */
|
||||||
if (region.HasTypeAttribute(KMemoryRegionAttr_DidKernelMap)) {
|
if (region.HasTypeAttribute(KMemoryRegionAttr_DidKernelMap)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check that the region is valid. */
|
/* Check that the region is valid. */
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(region.GetEndAddress() != 0);
|
MESOSPHERE_INIT_ABORT_UNLESS(region.GetEndAddress() != 0);
|
||||||
|
|
||||||
/* Set the attribute to note we've mapped this region. */
|
/* Update the largest region. */
|
||||||
region.SetTypeAttribute(KMemoryRegionAttr_DidKernelMap);
|
if (largest == nullptr || largest->GetSize() < region.GetSize()) {
|
||||||
|
largest = std::addressof(region);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* Create a virtual pair region and insert it into the tree. */
|
/* If we found a region, map it. */
|
||||||
const KPhysicalAddress map_phys_addr = util::AlignDown(region.GetAddress(), PageSize);
|
if (largest != nullptr) {
|
||||||
const size_t map_size = util::AlignUp(region.GetEndAddress(), PageSize) - GetInteger(map_phys_addr);
|
/* Set the attribute to note we've mapped this region. */
|
||||||
const KVirtualAddress map_virt_addr = KMemoryLayout::GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(map_size, PageSize, KMemoryRegionType_KernelMisc, PageSize);
|
largest->SetTypeAttribute(KMemoryRegionAttr_DidKernelMap);
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(map_virt_addr), map_size, KMemoryRegionType_KernelMiscMappedDevice));
|
|
||||||
region.SetPairAddress(GetInteger(map_virt_addr) + region.GetAddress() - GetInteger(map_phys_addr));
|
|
||||||
|
|
||||||
/* Map the page in to our page table. */
|
/* Create a virtual pair region and insert it into the tree. */
|
||||||
ttbr1_table.Map(map_virt_addr, map_size, map_phys_addr, KernelMmioAttribute, g_initial_page_allocator);
|
const KPhysicalAddress map_phys_addr = util::AlignDown(largest->GetAddress(), PageSize);
|
||||||
|
const size_t map_size = util::AlignUp(largest->GetEndAddress(), PageSize) - GetInteger(map_phys_addr);
|
||||||
|
const size_t min_align = std::min<size_t>(util::GetAlignment(map_size), util::GetAlignment(GetInteger(map_phys_addr)));
|
||||||
|
const size_t map_align = min_align >= KernelAslrAlignment ? KernelAslrAlignment : PageSize;
|
||||||
|
const KVirtualAddress map_virt_addr = KMemoryLayout::GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(map_size, map_align, KMemoryRegionType_KernelMisc, map_align);
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(map_virt_addr), map_size, KMemoryRegionType_KernelMiscMappedDevice));
|
||||||
|
largest->SetPairAddress(GetInteger(map_virt_addr) + largest->GetAddress() - GetInteger(map_phys_addr));
|
||||||
|
|
||||||
|
/* Map the page in to our page table. */
|
||||||
|
init_pt.Map(map_virt_addr, map_size, map_phys_addr, KernelMmioAttribute, g_initial_page_allocator);
|
||||||
|
}
|
||||||
|
} while (largest != nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Setup the basic DRAM regions. */
|
/* Setup the basic DRAM regions. */
|
||||||
SetupDramPhysicalMemoryRegions();
|
SetupDramPhysicalMemoryRegions();
|
||||||
|
|
||||||
|
/* Automatically map in reserved physical memory that has auto-map attributes. */
|
||||||
|
{
|
||||||
|
/* We want to map the regions from largest to smallest. */
|
||||||
|
KMemoryRegion *largest;
|
||||||
|
do {
|
||||||
|
/* Begin with no knowledge of the largest region. */
|
||||||
|
largest = nullptr;
|
||||||
|
|
||||||
|
for (auto ®ion : KMemoryLayout::GetPhysicalMemoryRegionTree()) {
|
||||||
|
/* We only care about reserved memory. */
|
||||||
|
if (!region.IsDerivedFrom(KMemoryRegionType_DramReservedBase)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Check whether we should map the region. */
|
||||||
|
if (!region.HasTypeAttribute(KMemoryRegionAttr_ShouldKernelMap)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* If this region has already been mapped, no need to consider it. */
|
||||||
|
if (region.HasTypeAttribute(KMemoryRegionAttr_DidKernelMap)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Check that the region is valid. */
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(region.GetEndAddress() != 0);
|
||||||
|
|
||||||
|
/* Update the largest region. */
|
||||||
|
if (largest == nullptr || largest->GetSize() < region.GetSize()) {
|
||||||
|
largest = std::addressof(region);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* If we found a region, map it. */
|
||||||
|
if (largest != nullptr) {
|
||||||
|
/* Set the attribute to note we've mapped this region. */
|
||||||
|
largest->SetTypeAttribute(KMemoryRegionAttr_DidKernelMap);
|
||||||
|
|
||||||
|
/* Create a virtual pair region and insert it into the tree. */
|
||||||
|
const KPhysicalAddress map_phys_addr = util::AlignDown(largest->GetAddress(), PageSize);
|
||||||
|
const size_t map_size = util::AlignUp(largest->GetEndAddress(), PageSize) - GetInteger(map_phys_addr);
|
||||||
|
const size_t min_align = std::min<size_t>(util::GetAlignment(map_size), util::GetAlignment(GetInteger(map_phys_addr)));
|
||||||
|
const size_t map_align = min_align >= KernelAslrAlignment ? KernelAslrAlignment : PageSize;
|
||||||
|
const KVirtualAddress map_virt_addr = KMemoryLayout::GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(map_size, map_align, KMemoryRegionType_KernelMisc, map_align);
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(map_virt_addr), map_size, KMemoryRegionType_KernelMiscUnknownDebug));
|
||||||
|
largest->SetPairAddress(GetInteger(map_virt_addr) + largest->GetAddress() - GetInteger(map_phys_addr));
|
||||||
|
|
||||||
|
/* Map the page in to our page table. */
|
||||||
|
const auto attribute = largest->HasTypeAttribute(KMemoryRegionAttr_Uncached) ? KernelRwDataUncachedAttribute : KernelRwDataAttribute;
|
||||||
|
init_pt.Map(map_virt_addr, map_size, map_phys_addr, attribute, g_initial_page_allocator);
|
||||||
|
}
|
||||||
|
} while (largest != nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
/* Insert a physical region for the kernel code region. */
|
/* Insert a physical region for the kernel code region. */
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(code_start_phys_addr), code_region_size, KMemoryRegionType_DramKernelCode));
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(code_start_phys_addr), code_region_size, KMemoryRegionType_DramKernelCode));
|
||||||
|
|
||||||
|
@ -283,11 +356,11 @@ namespace ams::kern::init {
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(slab_start_phys_addr), slab_region_size, KMemoryRegionType_DramKernelSlab));
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(slab_start_phys_addr), slab_region_size, KMemoryRegionType_DramKernelSlab));
|
||||||
|
|
||||||
/* Map the slab region. */
|
/* Map the slab region. */
|
||||||
ttbr1_table.Map(slab_region_start, slab_region_size, slab_start_phys_addr, KernelRwDataAttribute, g_initial_page_allocator);
|
init_pt.Map(slab_region_start, slab_region_size, slab_start_phys_addr, KernelRwDataAttribute, g_initial_page_allocator);
|
||||||
|
|
||||||
/* Physically randomize the slab region. */
|
/* Physically randomize the slab region. */
|
||||||
/* NOTE: Nintendo does this only on 10.0.0+ */
|
/* NOTE: Nintendo does this only on 10.0.0+ */
|
||||||
ttbr1_table.PhysicallyRandomize(slab_region_start, slab_region_size, false);
|
init_pt.PhysicallyRandomize(slab_region_start, slab_region_size, false);
|
||||||
|
|
||||||
/* Determine size available for kernel page table heaps, requiring > 8 MB. */
|
/* Determine size available for kernel page table heaps, requiring > 8 MB. */
|
||||||
const KPhysicalAddress resource_end_phys_addr = slab_start_phys_addr + resource_region_size;
|
const KPhysicalAddress resource_end_phys_addr = slab_start_phys_addr + resource_region_size;
|
||||||
|
@ -338,14 +411,16 @@ namespace ams::kern::init {
|
||||||
cur_size += region.GetSize();
|
cur_size += region.GetSize();
|
||||||
} else {
|
} else {
|
||||||
const uintptr_t cur_virt_addr = cur_phys_addr + linear_region_phys_to_virt_diff;
|
const uintptr_t cur_virt_addr = cur_phys_addr + linear_region_phys_to_virt_diff;
|
||||||
ttbr1_table.Map(cur_virt_addr, cur_size, cur_phys_addr, KernelRwDataAttribute, g_initial_page_allocator);
|
init_pt.Map(cur_virt_addr, cur_size, cur_phys_addr, KernelRwDataAttribute, g_initial_page_allocator);
|
||||||
cur_phys_addr = region.GetAddress();
|
cur_phys_addr = region.GetAddress();
|
||||||
cur_size = region.GetSize();
|
cur_size = region.GetSize();
|
||||||
}
|
}
|
||||||
|
|
||||||
const uintptr_t region_virt_addr = region.GetAddress() + linear_region_phys_to_virt_diff;
|
const uintptr_t region_virt_addr = region.GetAddress() + linear_region_phys_to_virt_diff;
|
||||||
|
if (!region.HasTypeAttribute(KMemoryRegionAttr_ShouldKernelMap)) {
|
||||||
|
region.SetPairAddress(region_virt_addr);
|
||||||
|
}
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(region_virt_addr, region.GetSize(), GetTypeForVirtualLinearMapping(region.GetType())));
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(region_virt_addr, region.GetSize(), GetTypeForVirtualLinearMapping(region.GetType())));
|
||||||
region.SetPairAddress(region_virt_addr);
|
|
||||||
|
|
||||||
KMemoryRegion *virt_region = KMemoryLayout::GetVirtualMemoryRegionTree().FindModifiable(region_virt_addr);
|
KMemoryRegion *virt_region = KMemoryLayout::GetVirtualMemoryRegionTree().FindModifiable(region_virt_addr);
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(virt_region != nullptr);
|
MESOSPHERE_INIT_ABORT_UNLESS(virt_region != nullptr);
|
||||||
|
@ -355,7 +430,7 @@ namespace ams::kern::init {
|
||||||
/* Map the last block, which we may have skipped. */
|
/* Map the last block, which we may have skipped. */
|
||||||
if (cur_size != 0) {
|
if (cur_size != 0) {
|
||||||
const uintptr_t cur_virt_addr = cur_phys_addr + linear_region_phys_to_virt_diff;
|
const uintptr_t cur_virt_addr = cur_phys_addr + linear_region_phys_to_virt_diff;
|
||||||
ttbr1_table.Map(cur_virt_addr, cur_size, cur_phys_addr, KernelRwDataAttribute, g_initial_page_allocator);
|
init_pt.Map(cur_virt_addr, cur_size, cur_phys_addr, KernelRwDataAttribute, g_initial_page_allocator);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -363,18 +438,18 @@ namespace ams::kern::init {
|
||||||
std::memset(GetVoidPointer(slab_region_start), 0, slab_region_size);
|
std::memset(GetVoidPointer(slab_region_start), 0, slab_region_size);
|
||||||
|
|
||||||
/* NOTE: Unknown function is called here which is ifdef'd out on retail kernel. */
|
/* NOTE: Unknown function is called here which is ifdef'd out on retail kernel. */
|
||||||
/* The unknown function is immediately before the function which gets the unknown debug region size, inside this translation unit. */
|
/* The unknown function is immediately before the function which gets an unknown debug region size, inside this translation unit. */
|
||||||
/* It's likely that this is some kind of initializer for the unknown debug region. */
|
/* It's likely that this is some kind of initializer for this unknown debug region. */
|
||||||
|
|
||||||
/* Create regions for and map all core-specific stacks. */
|
/* Create regions for and map all core-specific stacks. */
|
||||||
for (size_t i = 0; i < cpu::NumCores; i++) {
|
for (size_t i = 0; i < cpu::NumCores; i++) {
|
||||||
MapStackForCore(ttbr1_table, KMemoryRegionType_KernelMiscMainStack, i);
|
MapStackForCore(init_pt, KMemoryRegionType_KernelMiscMainStack, i);
|
||||||
MapStackForCore(ttbr1_table, KMemoryRegionType_KernelMiscIdleStack, i);
|
MapStackForCore(init_pt, KMemoryRegionType_KernelMiscIdleStack, i);
|
||||||
MapStackForCore(ttbr1_table, KMemoryRegionType_KernelMiscExceptionStack, i);
|
MapStackForCore(init_pt, KMemoryRegionType_KernelMiscExceptionStack, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Setup the initial arguments. */
|
/* Setup the initial arguments. */
|
||||||
SetupInitialArguments(ttbr1_table, g_initial_page_allocator);
|
SetupInitialArguments(init_pt, g_initial_page_allocator);
|
||||||
|
|
||||||
/* Finalize the page allocator, we're done allocating at this point. */
|
/* Finalize the page allocator, we're done allocating at this point. */
|
||||||
KInitialPageAllocator::State final_init_page_table_state;
|
KInitialPageAllocator::State final_init_page_table_state;
|
||||||
|
@ -400,7 +475,7 @@ namespace ams::kern::init {
|
||||||
KMemoryLayout::InitializeLinearMemoryRegionTrees(aligned_linear_phys_start, linear_region_start);
|
KMemoryLayout::InitializeLinearMemoryRegionTrees(aligned_linear_phys_start, linear_region_start);
|
||||||
|
|
||||||
/* Turn on all other cores. */
|
/* Turn on all other cores. */
|
||||||
TurnOnAllCores(GetInteger(ttbr1_table.GetPhysicalAddress(reinterpret_cast<uintptr_t>(::ams::kern::init::StartOtherCore))));
|
TurnOnAllCores(GetInteger(init_pt.GetPhysicalAddress(reinterpret_cast<uintptr_t>(::ams::kern::init::StartOtherCore))));
|
||||||
}
|
}
|
||||||
|
|
||||||
KPhysicalAddress GetInitArgumentsAddress(s32 core_id) {
|
KPhysicalAddress GetInitArgumentsAddress(s32 core_id) {
|
||||||
|
|
Loading…
Reference in a new issue