mirror of
https://github.com/Atmosphere-NX/Atmosphere
synced 2024-12-22 12:21:18 +00:00
kern: update Initialize0 for new arguments/randomization semantics
This commit is contained in:
parent
1cf3b24c2d
commit
a33576e674
13 changed files with 160 additions and 115 deletions
|
@ -293,7 +293,7 @@ namespace ams::kern::arch::arm64::init {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Swap the mappings. */
|
/* Swap the mappings. */
|
||||||
const u64 attr_preserve_mask = (negative_block_size_for_mask | 0xFFFF000000000000ul) ^ ((1ul << 48) - 1);
|
const u64 attr_preserve_mask = (block_size - 1) | 0xFFFF000000000000ul;
|
||||||
const size_t shift_for_contig = contig ? 4 : 0;
|
const size_t shift_for_contig = contig ? 4 : 0;
|
||||||
size_t advanced_size = 0;
|
size_t advanced_size = 0;
|
||||||
const u64 src_attr_val = src_saved.GetRawAttributesUnsafeForSwap() & attr_preserve_mask;
|
const u64 src_attr_val = src_saved.GetRawAttributesUnsafeForSwap() & attr_preserve_mask;
|
||||||
|
@ -726,8 +726,8 @@ namespace ams::kern::arch::arm64::init {
|
||||||
m_state.end_address = address;
|
m_state.end_address = address;
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE void InitializeFromState(uintptr_t state_val) {
|
ALWAYS_INLINE void InitializeFromState(const State *state) {
|
||||||
m_state = *reinterpret_cast<State *>(state_val);
|
m_state = *state;
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE void GetFinalState(State *out) {
|
ALWAYS_INLINE void GetFinalState(State *out) {
|
||||||
|
|
|
@ -16,6 +16,12 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
#include <mesosphere/kern_common.hpp>
|
#include <mesosphere/kern_common.hpp>
|
||||||
|
|
||||||
|
namespace ams::kern {
|
||||||
|
|
||||||
|
struct InitialProcessBinaryLayout;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
namespace ams::kern::board::nintendo::nx {
|
namespace ams::kern::board::nintendo::nx {
|
||||||
|
|
||||||
class KSystemControl {
|
class KSystemControl {
|
||||||
|
@ -25,7 +31,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
/* Initialization. */
|
/* Initialization. */
|
||||||
static size_t GetIntendedMemorySize();
|
static size_t GetIntendedMemorySize();
|
||||||
static KPhysicalAddress GetKernelPhysicalBaseAddress(uintptr_t base_address);
|
static KPhysicalAddress GetKernelPhysicalBaseAddress(uintptr_t base_address);
|
||||||
static KPhysicalAddress GetInitialProcessBinaryPhysicalAddress();
|
static void GetInitialProcessBinaryLayout(InitialProcessBinaryLayout *out);
|
||||||
static bool ShouldIncreaseThreadResourceLimit();
|
static bool ShouldIncreaseThreadResourceLimit();
|
||||||
static void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg);
|
static void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg);
|
||||||
static size_t GetApplicationPoolSize();
|
static size_t GetApplicationPoolSize();
|
||||||
|
|
|
@ -29,14 +29,19 @@ namespace ams::kern {
|
||||||
u32 reserved;
|
u32 reserved;
|
||||||
};
|
};
|
||||||
|
|
||||||
NOINLINE size_t CopyInitialProcessBinaryToKernelMemory();
|
struct InitialProcessBinaryLayout {
|
||||||
NOINLINE void CreateAndRunInitialProcesses();
|
uintptr_t address;
|
||||||
|
uintptr_t _08;
|
||||||
|
};
|
||||||
|
|
||||||
|
KPhysicalAddress GetInitialProcessBinaryPhysicalAddress();
|
||||||
|
void SetInitialProcessBinaryPhysicalAddress(KPhysicalAddress phys_addr);
|
||||||
|
|
||||||
u64 GetInitialProcessIdMin();
|
u64 GetInitialProcessIdMin();
|
||||||
u64 GetInitialProcessIdMax();
|
u64 GetInitialProcessIdMax();
|
||||||
KVirtualAddress GetInitialProcessBinaryAddress();
|
|
||||||
size_t GetInitialProcessesSecureMemorySize();
|
size_t GetInitialProcessesSecureMemorySize();
|
||||||
|
|
||||||
void LoadInitialProcessBinaryHeaderDeprecated(KPhysicalAddress pool_end);
|
NOINLINE size_t CopyInitialProcessBinaryToKernelMemory();
|
||||||
|
NOINLINE void CreateAndRunInitialProcesses();
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -237,12 +237,6 @@ namespace ams::kern {
|
||||||
public:
|
public:
|
||||||
NOINLINE void InsertDirectly(uintptr_t address, uintptr_t last_address, u32 attr = 0, u32 type_id = 0);
|
NOINLINE void InsertDirectly(uintptr_t address, uintptr_t last_address, u32 attr = 0, u32 type_id = 0);
|
||||||
NOINLINE bool Insert(uintptr_t address, size_t size, u32 type_id, u32 new_attr = 0, u32 old_attr = 0);
|
NOINLINE bool Insert(uintptr_t address, size_t size, u32 type_id, u32 new_attr = 0, u32 old_attr = 0);
|
||||||
|
|
||||||
NOINLINE KVirtualAddress GetRandomAlignedRegion(size_t size, size_t alignment, u32 type_id);
|
|
||||||
|
|
||||||
ALWAYS_INLINE KVirtualAddress GetRandomAlignedRegionWithGuard(size_t size, size_t alignment, u32 type_id, size_t guard_size) {
|
|
||||||
return this->GetRandomAlignedRegion(size + 2 * guard_size, alignment, type_id) + guard_size;
|
|
||||||
}
|
|
||||||
public:
|
public:
|
||||||
/* Iterator accessors. */
|
/* Iterator accessors. */
|
||||||
iterator begin() {
|
iterator begin() {
|
||||||
|
|
|
@ -346,8 +346,11 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
KPhysicalAddress KSystemControl::Init::GetInitialProcessBinaryPhysicalAddress() {
|
void KSystemControl::Init::GetInitialProcessBinaryLayout(InitialProcessBinaryLayout *out) {
|
||||||
return GetKernelPhysicalBaseAddress(DramPhysicalAddress) + GetIntendedMemorySize() - KTraceBufferSize - InitialProcessBinarySizeMax;
|
*out = {
|
||||||
|
.address = GetInteger(GetKernelPhysicalBaseAddress(DramPhysicalAddress)) + GetIntendedMemorySize() - KTraceBufferSize - InitialProcessBinarySizeMax,
|
||||||
|
._08 = 0,
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
bool KSystemControl::Init::ShouldIncreaseThreadResourceLimit() {
|
bool KSystemControl::Init::ShouldIncreaseThreadResourceLimit() {
|
||||||
|
|
|
@ -25,18 +25,18 @@ namespace ams::kern {
|
||||||
s32 priority;
|
s32 priority;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
constinit KPhysicalAddress g_initial_process_binary_phys_addr = Null<KPhysicalAddress>;
|
||||||
constinit KVirtualAddress g_initial_process_binary_address = Null<KVirtualAddress>;
|
constinit KVirtualAddress g_initial_process_binary_address = Null<KVirtualAddress>;
|
||||||
constinit InitialProcessBinaryHeader g_initial_process_binary_header = {};
|
constinit InitialProcessBinaryHeader g_initial_process_binary_header = {};
|
||||||
constinit size_t g_initial_process_secure_memory_size = 0;
|
constinit size_t g_initial_process_secure_memory_size = 0;
|
||||||
constinit u64 g_initial_process_id_min = std::numeric_limits<u64>::max();
|
constinit u64 g_initial_process_id_min = std::numeric_limits<u64>::max();
|
||||||
constinit u64 g_initial_process_id_max = std::numeric_limits<u64>::min();
|
constinit u64 g_initial_process_id_max = std::numeric_limits<u64>::min();
|
||||||
|
|
||||||
void LoadInitialProcessBinaryHeader(KVirtualAddress virt_addr = Null<KVirtualAddress>) {
|
void LoadInitialProcessBinaryHeader() {
|
||||||
if (g_initial_process_binary_header.magic != InitialProcessBinaryMagic) {
|
if (g_initial_process_binary_header.magic != InitialProcessBinaryMagic) {
|
||||||
/* Get the virtual address, if it's not overridden. */
|
/* Get the virtual address. */
|
||||||
if (virt_addr == Null<KVirtualAddress>) {
|
MESOSPHERE_INIT_ABORT_UNLESS(g_initial_process_binary_phys_addr != Null<KPhysicalAddress>);
|
||||||
virt_addr = GetInitialProcessBinaryAddress();
|
const KVirtualAddress virt_addr = KMemoryLayout::GetLinearVirtualAddress(g_initial_process_binary_phys_addr);
|
||||||
}
|
|
||||||
|
|
||||||
/* Copy and validate the header. */
|
/* Copy and validate the header. */
|
||||||
g_initial_process_binary_header = *GetPointer<InitialProcessBinaryHeader>(virt_addr);
|
g_initial_process_binary_header = *GetPointer<InitialProcessBinaryHeader>(virt_addr);
|
||||||
|
@ -273,10 +273,18 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE KVirtualAddress GetInitialProcessBinaryAddress(KVirtualAddress pool_end) {
|
}
|
||||||
return pool_end - InitialProcessBinarySizeMax;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
void SetInitialProcessBinaryPhysicalAddress(KPhysicalAddress phys_addr) {
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(g_initial_process_binary_phys_addr == Null<KPhysicalAddress>);
|
||||||
|
|
||||||
|
g_initial_process_binary_phys_addr = phys_addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
KPhysicalAddress GetInitialProcessBinaryPhysicalAddress() {
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(g_initial_process_binary_phys_addr != Null<KPhysicalAddress>);
|
||||||
|
|
||||||
|
return g_initial_process_binary_phys_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 GetInitialProcessIdMin() {
|
u64 GetInitialProcessIdMin() {
|
||||||
|
@ -287,15 +295,6 @@ namespace ams::kern {
|
||||||
return g_initial_process_id_max;
|
return g_initial_process_id_max;
|
||||||
}
|
}
|
||||||
|
|
||||||
KVirtualAddress GetInitialProcessBinaryAddress() {
|
|
||||||
/* Get, validate the pool region. */
|
|
||||||
const auto *pool_region = KMemoryLayout::GetVirtualMemoryRegionTree().FindLastDerived(KMemoryRegionType_VirtualDramUserPool);
|
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(pool_region != nullptr);
|
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(pool_region->GetEndAddress() != 0);
|
|
||||||
MESOSPHERE_ABORT_UNLESS(pool_region->GetSize() >= InitialProcessBinarySizeMax);
|
|
||||||
return GetInitialProcessBinaryAddress(pool_region->GetEndAddress());
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t GetInitialProcessesSecureMemorySize() {
|
size_t GetInitialProcessesSecureMemorySize() {
|
||||||
LoadInitialProcessBinaryHeader();
|
LoadInitialProcessBinaryHeader();
|
||||||
|
|
||||||
|
@ -321,10 +320,6 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void LoadInitialProcessBinaryHeaderDeprecated(KPhysicalAddress pool_end) {
|
|
||||||
LoadInitialProcessBinaryHeader(GetInitialProcessBinaryAddress(KMemoryLayout::GetLinearVirtualAddress(pool_end)));
|
|
||||||
}
|
|
||||||
|
|
||||||
void CreateAndRunInitialProcesses() {
|
void CreateAndRunInitialProcesses() {
|
||||||
/* Allocate space for the processes. */
|
/* Allocate space for the processes. */
|
||||||
InitialProcessInfo *infos = static_cast<InitialProcessInfo *>(__builtin_alloca(sizeof(InitialProcessInfo) * g_initial_process_binary_header.num_processes));
|
InitialProcessInfo *infos = static_cast<InitialProcessInfo *>(__builtin_alloca(sizeof(InitialProcessInfo) * g_initial_process_binary_header.num_processes));
|
||||||
|
|
|
@ -190,12 +190,6 @@ namespace ams::kern {
|
||||||
static_assert(KMemoryManager::Pool_Unsafe == KMemoryManager::Pool_Application);
|
static_assert(KMemoryManager::Pool_Unsafe == KMemoryManager::Pool_Application);
|
||||||
static_assert(KMemoryManager::Pool_Secure == KMemoryManager::Pool_System);
|
static_assert(KMemoryManager::Pool_Secure == KMemoryManager::Pool_System);
|
||||||
|
|
||||||
/* NOTE: Beginning with 12.0.0 (and always, in mesosphere), the initial process binary is at the end of the pool region. */
|
|
||||||
/* However, this is problematic for < 5.0.0, because we require the initial process binary to be parsed in order */
|
|
||||||
/* to determine the pool sizes. Hence, we will force an initial binary load with the known pool end directly, so */
|
|
||||||
/* that we retain compatibility with lower firmware versions. */
|
|
||||||
LoadInitialProcessBinaryHeaderDeprecated(pool_end);
|
|
||||||
|
|
||||||
/* Get Secure pool size. */
|
/* Get Secure pool size. */
|
||||||
const size_t secure_pool_size = [] ALWAYS_INLINE_LAMBDA (auto target_firmware) -> size_t {
|
const size_t secure_pool_size = [] ALWAYS_INLINE_LAMBDA (auto target_firmware) -> size_t {
|
||||||
constexpr size_t LegacySecureKernelSize = 8_MB; /* KPageBuffer pages, other small kernel allocations. */
|
constexpr size_t LegacySecureKernelSize = 8_MB; /* KPageBuffer pages, other small kernel allocations. */
|
||||||
|
|
|
@ -111,43 +111,6 @@ namespace ams::kern {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
KVirtualAddress KMemoryRegionTree::GetRandomAlignedRegion(size_t size, size_t alignment, u32 type_id) {
|
|
||||||
/* We want to find the total extents of the type id. */
|
|
||||||
const auto extents = this->GetDerivedRegionExtents(type_id);
|
|
||||||
|
|
||||||
/* Ensure that our alignment is correct. */
|
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(extents.GetAddress(), alignment));
|
|
||||||
|
|
||||||
const uintptr_t first_address = extents.GetAddress();
|
|
||||||
const uintptr_t last_address = extents.GetLastAddress();
|
|
||||||
|
|
||||||
const uintptr_t first_index = first_address / alignment;
|
|
||||||
const uintptr_t last_index = last_address / alignment;
|
|
||||||
|
|
||||||
while (true) {
|
|
||||||
const uintptr_t candidate = KSystemControl::Init::GenerateRandomRange(first_index, last_index) * alignment;
|
|
||||||
|
|
||||||
/* Ensure that the candidate doesn't overflow with the size. */
|
|
||||||
if (!(candidate < candidate + size)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
const uintptr_t candidate_last = candidate + size - 1;
|
|
||||||
|
|
||||||
/* Ensure that the candidate fits within the region. */
|
|
||||||
if (candidate_last > last_address) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Locate the candidate region, and ensure it fits and has the correct type id. */
|
|
||||||
if (const auto &candidate_region = *this->Find(candidate); !(candidate_last <= candidate_region.GetLastAddress() && candidate_region.GetType() == type_id)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
return candidate;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void KMemoryLayout::InitializeLinearMemoryRegionTrees() {
|
void KMemoryLayout::InitializeLinearMemoryRegionTrees() {
|
||||||
/* Initialize linear trees. */
|
/* Initialize linear trees. */
|
||||||
for (auto ®ion : GetPhysicalMemoryRegionTree()) {
|
for (auto ®ion : GetPhysicalMemoryRegionTree()) {
|
||||||
|
|
|
@ -107,7 +107,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
/* Free each region to its corresponding heap. */
|
/* Free each region to its corresponding heap. */
|
||||||
size_t reserved_sizes[MaxManagerCount] = {};
|
size_t reserved_sizes[MaxManagerCount] = {};
|
||||||
const KPhysicalAddress ini_start = KMemoryLayout::GetLinearPhysicalAddress(GetInitialProcessBinaryAddress());
|
const KPhysicalAddress ini_start = GetInitialProcessBinaryPhysicalAddress();
|
||||||
const KPhysicalAddress ini_end = ini_start + InitialProcessBinarySizeMax;
|
const KPhysicalAddress ini_end = ini_start + InitialProcessBinarySizeMax;
|
||||||
const KPhysicalAddress ini_last = ini_end - 1;
|
const KPhysicalAddress ini_last = ini_end - 1;
|
||||||
for (const auto &it : KMemoryLayout::GetPhysicalMemoryRegionTree()) {
|
for (const auto &it : KMemoryLayout::GetPhysicalMemoryRegionTree()) {
|
||||||
|
|
|
@ -48,16 +48,6 @@ namespace ams::kern::init {
|
||||||
|
|
||||||
constexpr PageTableEntry KernelRwDataUncachedAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemoryNotCacheable, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
|
constexpr PageTableEntry KernelRwDataUncachedAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemoryNotCacheable, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
|
||||||
|
|
||||||
void MapStackForCore(KInitialPageTable &page_table, KMemoryRegionType type, u32 core_id) {
|
|
||||||
constexpr size_t StackSize = PageSize;
|
|
||||||
constexpr size_t StackAlign = PageSize;
|
|
||||||
const KVirtualAddress stack_start_virt = KMemoryLayout::GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(StackSize, StackAlign, KMemoryRegionType_KernelMisc, PageSize);
|
|
||||||
const KPhysicalAddress stack_start_phys = g_initial_page_allocator.Allocate(PageSize);
|
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(stack_start_virt), StackSize, type, core_id));
|
|
||||||
|
|
||||||
page_table.Map(stack_start_virt, StackSize, stack_start_phys, KernelRwDataAttribute, g_initial_page_allocator);
|
|
||||||
}
|
|
||||||
|
|
||||||
void StoreDataCache(const void *addr, size_t size) {
|
void StoreDataCache(const void *addr, size_t size) {
|
||||||
uintptr_t start = util::AlignDown(reinterpret_cast<uintptr_t>(addr), cpu::DataCacheLineSize);
|
uintptr_t start = util::AlignDown(reinterpret_cast<uintptr_t>(addr), cpu::DataCacheLineSize);
|
||||||
uintptr_t end = reinterpret_cast<uintptr_t>(addr) + size;
|
uintptr_t end = reinterpret_cast<uintptr_t>(addr) + size;
|
||||||
|
@ -121,14 +111,78 @@ namespace ams::kern::init {
|
||||||
StoreDataCache(g_init_arguments, sizeof(g_init_arguments));
|
StoreDataCache(g_init_arguments, sizeof(g_init_arguments));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
KVirtualAddress GetRandomAlignedRegionWithGuard(size_t size, size_t alignment, KInitialPageTable &pt, KMemoryRegionTree &tree, u32 type_id, size_t guard_size) {
|
||||||
|
/* Check that the size is valid. */
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(size > 0);
|
||||||
|
|
||||||
|
/* We want to find the total extents of the type id. */
|
||||||
|
const auto extents = tree.GetDerivedRegionExtents(type_id);
|
||||||
|
|
||||||
|
/* Ensure that our alignment is correct. */
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(extents.GetAddress(), alignment));
|
||||||
|
|
||||||
|
const uintptr_t first_address = extents.GetAddress();
|
||||||
|
const uintptr_t last_address = extents.GetLastAddress();
|
||||||
|
|
||||||
|
const uintptr_t first_index = first_address / alignment;
|
||||||
|
const uintptr_t last_index = last_address / alignment;
|
||||||
|
|
||||||
|
while (true) {
|
||||||
|
const uintptr_t candidate_start = KSystemControl::Init::GenerateRandomRange(first_index, last_index) * alignment;
|
||||||
|
const uintptr_t candidate_end = candidate_start + size + guard_size;
|
||||||
|
|
||||||
|
/* Ensure that the candidate doesn't overflow with the size/guard. */
|
||||||
|
if (!(candidate_start < candidate_end) || !(candidate_start >= guard_size)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
const uintptr_t candidate_last = candidate_end - 1;
|
||||||
|
|
||||||
|
/* Ensure that the candidate fits within the region. */
|
||||||
|
if (candidate_last > last_address) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Ensure that the candidate range is free. */
|
||||||
|
if (!pt.IsFree(candidate_start, size)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Locate the candidate's guard start, and ensure the whole range fits/has the correct type id. */
|
||||||
|
if (const auto &candidate_region = *tree.Find(candidate_start - guard_size); !(candidate_last <= candidate_region.GetLastAddress() && candidate_region.GetType() == type_id)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
return candidate_start;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
KVirtualAddress GetRandomAlignedRegion(size_t size, size_t alignment, KInitialPageTable &pt, KMemoryRegionTree &tree, u32 type_id) {
|
||||||
|
return GetRandomAlignedRegionWithGuard(size, alignment, pt, tree, type_id, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MapStackForCore(KInitialPageTable &page_table, KMemoryRegionType type, u32 core_id) {
|
||||||
|
constexpr size_t StackSize = PageSize;
|
||||||
|
constexpr size_t StackAlign = PageSize;
|
||||||
|
const KVirtualAddress stack_start_virt = GetRandomAlignedRegionWithGuard(StackSize, StackAlign, page_table, KMemoryLayout::GetVirtualMemoryRegionTree(), KMemoryRegionType_KernelMisc, PageSize);
|
||||||
|
const KPhysicalAddress stack_start_phys = g_initial_page_allocator.Allocate(PageSize);
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(stack_start_virt), StackSize, type, core_id));
|
||||||
|
|
||||||
|
page_table.Map(stack_start_virt, StackSize, stack_start_phys, KernelRwDataAttribute, g_initial_page_allocator);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void InitializeCore(uintptr_t misc_unk_debug_phys_addr, uintptr_t initial_page_allocator_state) {
|
void InitializeCore(uintptr_t misc_unk_debug_phys_addr, void **initial_state) {
|
||||||
/* Ensure our first argument is page aligned. */
|
/* Ensure our first argument is page aligned. */
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(misc_unk_debug_phys_addr, PageSize));
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(misc_unk_debug_phys_addr, PageSize));
|
||||||
|
|
||||||
|
/* Decode the initial state. */
|
||||||
|
const auto initial_page_allocator_state = *static_cast<KInitialPageAllocator::State *>(initial_state[0]);
|
||||||
|
const auto initial_process_binary_layout = *static_cast<InitialProcessBinaryLayout *>(initial_state[1]);
|
||||||
|
|
||||||
/* Restore the page allocator state setup by kernel loader. */
|
/* Restore the page allocator state setup by kernel loader. */
|
||||||
g_initial_page_allocator.InitializeFromState(initial_page_allocator_state);
|
g_initial_page_allocator.InitializeFromState(std::addressof(initial_page_allocator_state));
|
||||||
|
|
||||||
/* Ensure that the T1SZ is correct (and what we expect). */
|
/* Ensure that the T1SZ is correct (and what we expect). */
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS((cpu::TranslationControlRegisterAccessor().GetT1Size() / arch::arm64::L1BlockSize) == arch::arm64::MaxPageTableEntries);
|
MESOSPHERE_INIT_ABORT_UNLESS((cpu::TranslationControlRegisterAccessor().GetT1Size() / arch::arm64::L1BlockSize) == arch::arm64::MaxPageTableEntries);
|
||||||
|
@ -207,13 +261,13 @@ namespace ams::kern::init {
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(misc_region_size > 0);
|
MESOSPHERE_INIT_ABORT_UNLESS(misc_region_size > 0);
|
||||||
|
|
||||||
/* Setup the misc region. */
|
/* Setup the misc region. */
|
||||||
const KVirtualAddress misc_region_start = KMemoryLayout::GetVirtualMemoryRegionTree().GetRandomAlignedRegion(misc_region_size, MiscRegionAlign, KMemoryRegionType_Kernel);
|
const KVirtualAddress misc_region_start = GetRandomAlignedRegion(misc_region_size, MiscRegionAlign, init_pt, KMemoryLayout::GetVirtualMemoryRegionTree(), KMemoryRegionType_Kernel);
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(misc_region_start), misc_region_size, KMemoryRegionType_KernelMisc));
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(misc_region_start), misc_region_size, KMemoryRegionType_KernelMisc));
|
||||||
|
|
||||||
/* Setup the stack region. */
|
/* Setup the stack region. */
|
||||||
constexpr size_t StackRegionSize = 14_MB;
|
constexpr size_t StackRegionSize = 14_MB;
|
||||||
constexpr size_t StackRegionAlign = KernelAslrAlignment;
|
constexpr size_t StackRegionAlign = KernelAslrAlignment;
|
||||||
const KVirtualAddress stack_region_start = KMemoryLayout::GetVirtualMemoryRegionTree().GetRandomAlignedRegion(StackRegionSize, StackRegionAlign, KMemoryRegionType_Kernel);
|
const KVirtualAddress stack_region_start = GetRandomAlignedRegion(StackRegionSize, StackRegionAlign, init_pt, KMemoryLayout::GetVirtualMemoryRegionTree(), KMemoryRegionType_Kernel);
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(stack_region_start), StackRegionSize, KMemoryRegionType_KernelStack));
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(stack_region_start), StackRegionSize, KMemoryRegionType_KernelStack));
|
||||||
|
|
||||||
/* Determine the size of the resource region. */
|
/* Determine the size of the resource region. */
|
||||||
|
@ -230,13 +284,13 @@ namespace ams::kern::init {
|
||||||
const KPhysicalAddress slab_end_phys_addr = slab_start_phys_addr + slab_region_size;
|
const KPhysicalAddress slab_end_phys_addr = slab_start_phys_addr + slab_region_size;
|
||||||
constexpr size_t SlabRegionAlign = KernelAslrAlignment;
|
constexpr size_t SlabRegionAlign = KernelAslrAlignment;
|
||||||
const size_t slab_region_needed_size = util::AlignUp(GetInteger(code_end_phys_addr) + slab_region_size, SlabRegionAlign) - util::AlignDown(GetInteger(code_end_phys_addr), SlabRegionAlign);
|
const size_t slab_region_needed_size = util::AlignUp(GetInteger(code_end_phys_addr) + slab_region_size, SlabRegionAlign) - util::AlignDown(GetInteger(code_end_phys_addr), SlabRegionAlign);
|
||||||
const KVirtualAddress slab_region_start = KMemoryLayout::GetVirtualMemoryRegionTree().GetRandomAlignedRegion(slab_region_needed_size, SlabRegionAlign, KMemoryRegionType_Kernel) + (GetInteger(code_end_phys_addr) % SlabRegionAlign);
|
const KVirtualAddress slab_region_start = GetRandomAlignedRegion(slab_region_needed_size, SlabRegionAlign, init_pt, KMemoryLayout::GetVirtualMemoryRegionTree(), KMemoryRegionType_Kernel) + (GetInteger(code_end_phys_addr) % SlabRegionAlign);
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(slab_region_start), slab_region_size, KMemoryRegionType_KernelSlab));
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(slab_region_start), slab_region_size, KMemoryRegionType_KernelSlab));
|
||||||
|
|
||||||
/* Setup the temp region. */
|
/* Setup the temp region. */
|
||||||
constexpr size_t TempRegionSize = 128_MB;
|
constexpr size_t TempRegionSize = 128_MB;
|
||||||
constexpr size_t TempRegionAlign = KernelAslrAlignment;
|
constexpr size_t TempRegionAlign = KernelAslrAlignment;
|
||||||
const KVirtualAddress temp_region_start = KMemoryLayout::GetVirtualMemoryRegionTree().GetRandomAlignedRegion(TempRegionSize, TempRegionAlign, KMemoryRegionType_Kernel);
|
const KVirtualAddress temp_region_start = GetRandomAlignedRegion(TempRegionSize, TempRegionAlign, init_pt, KMemoryLayout::GetVirtualMemoryRegionTree(), KMemoryRegionType_Kernel);
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(temp_region_start), TempRegionSize, KMemoryRegionType_KernelTemp));
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(temp_region_start), TempRegionSize, KMemoryRegionType_KernelTemp));
|
||||||
|
|
||||||
/* Automatically map in devices that have auto-map attributes, from largest region to smallest region. */
|
/* Automatically map in devices that have auto-map attributes, from largest region to smallest region. */
|
||||||
|
@ -282,7 +336,7 @@ namespace ams::kern::init {
|
||||||
const size_t map_size = util::AlignUp(largest->GetEndAddress(), PageSize) - GetInteger(map_phys_addr);
|
const size_t map_size = util::AlignUp(largest->GetEndAddress(), PageSize) - GetInteger(map_phys_addr);
|
||||||
const size_t min_align = std::min<size_t>(util::GetAlignment(map_size), util::GetAlignment(GetInteger(map_phys_addr)));
|
const size_t min_align = std::min<size_t>(util::GetAlignment(map_size), util::GetAlignment(GetInteger(map_phys_addr)));
|
||||||
const size_t map_align = min_align >= KernelAslrAlignment ? KernelAslrAlignment : PageSize;
|
const size_t map_align = min_align >= KernelAslrAlignment ? KernelAslrAlignment : PageSize;
|
||||||
const KVirtualAddress map_virt_addr = KMemoryLayout::GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(map_size, map_align, KMemoryRegionType_KernelMisc, map_align);
|
const KVirtualAddress map_virt_addr = GetRandomAlignedRegionWithGuard(map_size, map_align, init_pt, KMemoryLayout::GetVirtualMemoryRegionTree(), KMemoryRegionType_KernelMisc, PageSize);
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(map_virt_addr), map_size, KMemoryRegionType_KernelMiscMappedDevice));
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(map_virt_addr), map_size, KMemoryRegionType_KernelMiscMappedDevice));
|
||||||
largest->SetPairAddress(GetInteger(map_virt_addr) + largest->GetAddress() - GetInteger(map_phys_addr));
|
largest->SetPairAddress(GetInteger(map_virt_addr) + largest->GetAddress() - GetInteger(map_phys_addr));
|
||||||
|
|
||||||
|
@ -338,7 +392,7 @@ namespace ams::kern::init {
|
||||||
const size_t map_size = util::AlignUp(largest->GetEndAddress(), PageSize) - GetInteger(map_phys_addr);
|
const size_t map_size = util::AlignUp(largest->GetEndAddress(), PageSize) - GetInteger(map_phys_addr);
|
||||||
const size_t min_align = std::min<size_t>(util::GetAlignment(map_size), util::GetAlignment(GetInteger(map_phys_addr)));
|
const size_t min_align = std::min<size_t>(util::GetAlignment(map_size), util::GetAlignment(GetInteger(map_phys_addr)));
|
||||||
const size_t map_align = min_align >= KernelAslrAlignment ? KernelAslrAlignment : PageSize;
|
const size_t map_align = min_align >= KernelAslrAlignment ? KernelAslrAlignment : PageSize;
|
||||||
const KVirtualAddress map_virt_addr = KMemoryLayout::GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(map_size, map_align, KMemoryRegionType_KernelMisc, map_align);
|
const KVirtualAddress map_virt_addr = GetRandomAlignedRegionWithGuard(map_size, map_align, init_pt, KMemoryLayout::GetVirtualMemoryRegionTree(), KMemoryRegionType_KernelMisc, PageSize);
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(map_virt_addr), map_size, KMemoryRegionType_KernelMiscUnknownDebug));
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(map_virt_addr), map_size, KMemoryRegionType_KernelMiscUnknownDebug));
|
||||||
largest->SetPairAddress(GetInteger(map_virt_addr) + largest->GetAddress() - GetInteger(map_phys_addr));
|
largest->SetPairAddress(GetInteger(map_virt_addr) + largest->GetAddress() - GetInteger(map_phys_addr));
|
||||||
|
|
||||||
|
@ -388,7 +442,7 @@ namespace ams::kern::init {
|
||||||
constexpr size_t LinearRegionAlign = 1_GB;
|
constexpr size_t LinearRegionAlign = 1_GB;
|
||||||
const KPhysicalAddress aligned_linear_phys_start = util::AlignDown(linear_extents.GetAddress(), LinearRegionAlign);
|
const KPhysicalAddress aligned_linear_phys_start = util::AlignDown(linear_extents.GetAddress(), LinearRegionAlign);
|
||||||
const size_t linear_region_size = util::AlignUp(linear_extents.GetEndAddress(), LinearRegionAlign) - GetInteger(aligned_linear_phys_start);
|
const size_t linear_region_size = util::AlignUp(linear_extents.GetEndAddress(), LinearRegionAlign) - GetInteger(aligned_linear_phys_start);
|
||||||
const KVirtualAddress linear_region_start = KMemoryLayout::GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(linear_region_size, LinearRegionAlign, KMemoryRegionType_None, LinearRegionAlign);
|
const KVirtualAddress linear_region_start = GetRandomAlignedRegionWithGuard(linear_region_size, LinearRegionAlign, init_pt, KMemoryLayout::GetVirtualMemoryRegionTree(), KMemoryRegionType_None, LinearRegionAlign);
|
||||||
|
|
||||||
const uintptr_t linear_region_phys_to_virt_diff = GetInteger(linear_region_start) - GetInteger(aligned_linear_phys_start);
|
const uintptr_t linear_region_phys_to_virt_diff = GetInteger(linear_region_start) - GetInteger(aligned_linear_phys_start);
|
||||||
|
|
||||||
|
@ -471,9 +525,29 @@ namespace ams::kern::init {
|
||||||
/* Set the linear memory offsets, to enable conversion between physical and virtual addresses. */
|
/* Set the linear memory offsets, to enable conversion between physical and virtual addresses. */
|
||||||
KMemoryLayout::InitializeLinearMemoryAddresses(aligned_linear_phys_start, linear_region_start);
|
KMemoryLayout::InitializeLinearMemoryAddresses(aligned_linear_phys_start, linear_region_start);
|
||||||
|
|
||||||
|
/* Set the initial process binary physical address. */
|
||||||
|
/* NOTE: Nintendo does this after pool partition setup, but it's a requirement that we do it before */
|
||||||
|
/* to retain compatibility with < 5.0.0. */
|
||||||
|
const KPhysicalAddress ini_address = initial_process_binary_layout.address;
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(ini_address != Null<KPhysicalAddress>);
|
||||||
|
SetInitialProcessBinaryPhysicalAddress(ini_address);
|
||||||
|
|
||||||
/* Setup all other memory regions needed to arrange the pool partitions. */
|
/* Setup all other memory regions needed to arrange the pool partitions. */
|
||||||
SetupPoolPartitionMemoryRegions();
|
SetupPoolPartitionMemoryRegions();
|
||||||
|
|
||||||
|
/* Validate the initial process binary address. */
|
||||||
|
{
|
||||||
|
const KMemoryRegion *ini_region = KMemoryLayout::Find(ini_address);
|
||||||
|
|
||||||
|
/* Check that the region is non-kernel dram. */
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(ini_region->IsDerivedFrom(KMemoryRegionType_DramUserPool));
|
||||||
|
|
||||||
|
/* Check that the region contains the ini. */
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(ini_region->GetAddress() <= GetInteger(ini_address));
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(GetInteger(ini_address) + InitialProcessBinarySizeMax <= ini_region->GetEndAddress());
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(ini_region->GetEndAddress() != 0);
|
||||||
|
}
|
||||||
|
|
||||||
/* Cache all linear regions in their own trees for faster access, later. */
|
/* Cache all linear regions in their own trees for faster access, later. */
|
||||||
KMemoryLayout::InitializeLinearMemoryRegionTrees();
|
KMemoryLayout::InitializeLinearMemoryRegionTrees();
|
||||||
|
|
||||||
|
|
|
@ -124,10 +124,10 @@ core0_el1:
|
||||||
|
|
||||||
/* At this point kernelldr has been invoked, and we are relocated at a random virtual address. */
|
/* At this point kernelldr has been invoked, and we are relocated at a random virtual address. */
|
||||||
/* Next thing to do is to set up our memory management and slabheaps -- all the other core initialization. */
|
/* Next thing to do is to set up our memory management and slabheaps -- all the other core initialization. */
|
||||||
/* Call ams::kern::init::InitializeCore(uintptr_t, uintptr_t) */
|
/* Call ams::kern::init::InitializeCore(uintptr_t, void **) */
|
||||||
mov x1, x0 /* Kernelldr returns a KInitialPageAllocator state for the kernel to re-use. */
|
mov x1, x0 /* Kernelldr returns a state object for the kernel to re-use. */
|
||||||
mov x0, xzr /* Official kernel always passes zero, when this is non-zero the address is mapped. */
|
mov x0, xzr /* Official kernel always passes zero, when this is non-zero the address is mapped. */
|
||||||
bl _ZN3ams4kern4init14InitializeCoreEmm
|
bl _ZN3ams4kern4init14InitializeCoreEmPPv
|
||||||
|
|
||||||
/* Get the init arguments for core 0. */
|
/* Get the init arguments for core 0. */
|
||||||
mov x0, xzr
|
mov x0, xzr
|
||||||
|
|
|
@ -110,10 +110,10 @@ _main:
|
||||||
str x0, [sp, #0x20]
|
str x0, [sp, #0x20]
|
||||||
|
|
||||||
|
|
||||||
/* Call ams::kern::init::loader::GetFinalPageAllocatorState() */
|
/* Call ams::kern::init::loader::GetFinalState() */
|
||||||
bl _ZN3ams4kern4init6loader26GetFinalPageAllocatorStateEv
|
bl _ZN3ams4kern4init6loader13GetFinalStateEv
|
||||||
|
|
||||||
/* X0 is now the saved state for the page allocator. */
|
/* X0 is now the saved state. */
|
||||||
/* We will return this to the kernel. */
|
/* We will return this to the kernel. */
|
||||||
|
|
||||||
/* Return to the newly-relocated kernel. */
|
/* Return to the newly-relocated kernel. */
|
||||||
|
|
|
@ -42,9 +42,12 @@ namespace ams::kern::init::loader {
|
||||||
static_assert(InitialPageTableRegionSizeMax < KernelPageTableHeapSize + KernelInitialPageHeapSize);
|
static_assert(InitialPageTableRegionSizeMax < KernelPageTableHeapSize + KernelInitialPageHeapSize);
|
||||||
|
|
||||||
/* Global Allocator. */
|
/* Global Allocator. */
|
||||||
KInitialPageAllocator g_initial_page_allocator;
|
constinit KInitialPageAllocator g_initial_page_allocator;
|
||||||
|
|
||||||
KInitialPageAllocator::State g_final_page_allocator_state;
|
constinit KInitialPageAllocator::State g_final_page_allocator_state;
|
||||||
|
constinit InitialProcessBinaryLayout g_initial_process_binary_layout;
|
||||||
|
|
||||||
|
constinit void *g_final_state[2];
|
||||||
|
|
||||||
void RelocateKernelPhysically(uintptr_t &base_address, KernelLayout *&layout) {
|
void RelocateKernelPhysically(uintptr_t &base_address, KernelLayout *&layout) {
|
||||||
KPhysicalAddress correct_base = KSystemControl::Init::GetKernelPhysicalBaseAddress(base_address);
|
KPhysicalAddress correct_base = KSystemControl::Init::GetKernelPhysicalBaseAddress(base_address);
|
||||||
|
@ -159,19 +162,21 @@ namespace ams::kern::init::loader {
|
||||||
|
|
||||||
/* Determine the size of the resource region. */
|
/* Determine the size of the resource region. */
|
||||||
const size_t resource_region_size = KMemoryLayout::GetResourceRegionSizeForInit();
|
const size_t resource_region_size = KMemoryLayout::GetResourceRegionSizeForInit();
|
||||||
|
const uintptr_t resource_end_address = base_address + resource_offset + resource_region_size;
|
||||||
|
|
||||||
/* Setup the INI1 header in memory for the kernel. */
|
/* Setup the INI1 header in memory for the kernel. */
|
||||||
const uintptr_t resource_end_address = base_address + resource_offset + resource_region_size;
|
KSystemControl::Init::GetInitialProcessBinaryLayout(std::addressof(g_initial_process_binary_layout));
|
||||||
const uintptr_t ini_load_address = GetInteger(KSystemControl::Init::GetInitialProcessBinaryPhysicalAddress());
|
MESOSPHERE_INIT_ABORT_UNLESS(g_initial_process_binary_layout.address != 0);
|
||||||
if (ini_base_address != ini_load_address) {
|
|
||||||
|
if (ini_base_address != g_initial_process_binary_layout.address) {
|
||||||
/* The INI is not at the correct address, so we need to relocate it. */
|
/* The INI is not at the correct address, so we need to relocate it. */
|
||||||
const InitialProcessBinaryHeader *ini_header = reinterpret_cast<const InitialProcessBinaryHeader *>(ini_base_address);
|
const InitialProcessBinaryHeader *ini_header = reinterpret_cast<const InitialProcessBinaryHeader *>(ini_base_address);
|
||||||
if (ini_header->magic == InitialProcessBinaryMagic && ini_header->size <= InitialProcessBinarySizeMax) {
|
if (ini_header->magic == InitialProcessBinaryMagic && ini_header->size <= InitialProcessBinarySizeMax) {
|
||||||
/* INI is valid, relocate it. */
|
/* INI is valid, relocate it. */
|
||||||
std::memmove(reinterpret_cast<void *>(ini_load_address), ini_header, ini_header->size);
|
std::memmove(reinterpret_cast<void *>(g_initial_process_binary_layout.address), ini_header, ini_header->size);
|
||||||
} else {
|
} else {
|
||||||
/* INI is invalid. Make the destination header invalid. */
|
/* INI is invalid. Make the destination header invalid. */
|
||||||
std::memset(reinterpret_cast<void *>(ini_load_address), 0, sizeof(InitialProcessBinaryHeader));
|
std::memset(reinterpret_cast<void *>(g_initial_process_binary_layout.address), 0, sizeof(InitialProcessBinaryHeader));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -225,9 +230,15 @@ namespace ams::kern::init::loader {
|
||||||
return g_initial_page_allocator.Allocate(PageSize) + PageSize;
|
return g_initial_page_allocator.Allocate(PageSize) + PageSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
uintptr_t GetFinalPageAllocatorState() {
|
void **GetFinalState() {
|
||||||
|
/* Get final page allocator state. */
|
||||||
g_initial_page_allocator.GetFinalState(std::addressof(g_final_page_allocator_state));
|
g_initial_page_allocator.GetFinalState(std::addressof(g_final_page_allocator_state));
|
||||||
return reinterpret_cast<uintptr_t>(std::addressof(g_final_page_allocator_state));
|
|
||||||
|
/* Setup final kernel loader state. */
|
||||||
|
g_final_state[0] = std::addressof(g_final_page_allocator_state);
|
||||||
|
g_final_state[1] = std::addressof(g_initial_process_binary_layout);
|
||||||
|
|
||||||
|
return g_final_state;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
Loading…
Reference in a new issue