kern: refactor init (kill identity map, merge cpu on logic)

This commit is contained in:
Michael Scire 2023-02-21 10:38:48 -07:00 committed by SciresM
parent 8db22967bf
commit 035cebef9d
20 changed files with 431 additions and 387 deletions

View file

@ -29,8 +29,6 @@ namespace ams::kern::init {
u64 sp; u64 sp;
u64 entrypoint; u64 entrypoint;
u64 argument; u64 argument;
u64 setup_function;
u64 exception_stack;
}; };
static_assert(alignof(KInitArguments) == util::CeilingPowerOfTwo(INIT_ARGUMENTS_SIZE)); static_assert(alignof(KInitArguments) == util::CeilingPowerOfTwo(INIT_ARGUMENTS_SIZE));
static_assert(sizeof(KInitArguments) == std::max(INIT_ARGUMENTS_SIZE, util::CeilingPowerOfTwo(INIT_ARGUMENTS_SIZE))); static_assert(sizeof(KInitArguments) == std::max(INIT_ARGUMENTS_SIZE, util::CeilingPowerOfTwo(INIT_ARGUMENTS_SIZE)));
@ -45,7 +43,5 @@ namespace ams::kern::init {
static_assert(AMS_OFFSETOF(KInitArguments, sp) == INIT_ARGUMENTS_SP); static_assert(AMS_OFFSETOF(KInitArguments, sp) == INIT_ARGUMENTS_SP);
static_assert(AMS_OFFSETOF(KInitArguments, entrypoint) == INIT_ARGUMENTS_ENTRYPOINT); static_assert(AMS_OFFSETOF(KInitArguments, entrypoint) == INIT_ARGUMENTS_ENTRYPOINT);
static_assert(AMS_OFFSETOF(KInitArguments, argument) == INIT_ARGUMENTS_ARGUMENT); static_assert(AMS_OFFSETOF(KInitArguments, argument) == INIT_ARGUMENTS_ARGUMENT);
static_assert(AMS_OFFSETOF(KInitArguments, setup_function) == INIT_ARGUMENTS_SETUP_FUNCTION);
static_assert(AMS_OFFSETOF(KInitArguments, exception_stack) == INIT_ARGUMENTS_EXCEPTION_STACK);
} }

View file

@ -23,17 +23,6 @@
namespace ams::kern::arch::arm64::init { namespace ams::kern::arch::arm64::init {
inline void ClearPhysicalMemory(KPhysicalAddress address, size_t size) {
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, sizeof(u64)));
/* This Physical Address -> void * conversion is valid, because this is init page table code. */
/* The MMU is necessarily not yet turned on, if we are creating an initial page table. */
volatile u64 *ptr = reinterpret_cast<volatile u64 *>(GetInteger(address));
for (size_t i = 0; i < size / sizeof(u64); ++i) {
ptr[i] = 0;
}
}
/* NOTE: Nintendo uses virtual functions, rather than a concept + template. */ /* NOTE: Nintendo uses virtual functions, rather than a concept + template. */
template<typename T> template<typename T>
concept IsInitialPageAllocator = requires (T &t, KPhysicalAddress phys_addr, size_t size) { concept IsInitialPageAllocator = requires (T &t, KPhysicalAddress phys_addr, size_t size) {
@ -41,25 +30,23 @@ namespace ams::kern::arch::arm64::init {
{ t.Free(phys_addr, size) } -> std::same_as<void>; { t.Free(phys_addr, size) } -> std::same_as<void>;
}; };
template<IsInitialPageAllocator _PageAllocator> class KInitialPageTable {
class KInitialPageTableTemplate {
public:
using PageAllocator = _PageAllocator;
private: private:
KPhysicalAddress m_l1_tables[2]; KPhysicalAddress m_l1_tables[2];
u32 m_num_entries[2]; u32 m_num_entries[2];
public: public:
KInitialPageTableTemplate(KVirtualAddress start_address, KVirtualAddress end_address, PageAllocator &allocator) { template<IsInitialPageAllocator PageAllocator>
KInitialPageTable(KVirtualAddress start_address, KVirtualAddress end_address, PageAllocator &allocator) {
/* Set tables. */ /* Set tables. */
m_l1_tables[0] = AllocateNewPageTable(allocator); m_l1_tables[0] = AllocateNewPageTable(allocator, 0);
m_l1_tables[1] = AllocateNewPageTable(allocator); m_l1_tables[1] = AllocateNewPageTable(allocator, 0);
/* Set counts. */ /* Set counts. */
m_num_entries[0] = MaxPageTableEntries; m_num_entries[0] = MaxPageTableEntries;
m_num_entries[1] = ((end_address / L1BlockSize) & (MaxPageTableEntries - 1)) - ((start_address / L1BlockSize) & (MaxPageTableEntries - 1)) + 1; m_num_entries[1] = ((end_address / L1BlockSize) & (MaxPageTableEntries - 1)) - ((start_address / L1BlockSize) & (MaxPageTableEntries - 1)) + 1;
} }
KInitialPageTableTemplate() { KInitialPageTable() {
/* Set tables. */ /* Set tables. */
m_l1_tables[0] = util::AlignDown(cpu::GetTtbr0El1(), PageSize); m_l1_tables[0] = util::AlignDown(cpu::GetTtbr0El1(), PageSize);
m_l1_tables[1] = util::AlignDown(cpu::GetTtbr1El1(), PageSize); m_l1_tables[1] = util::AlignDown(cpu::GetTtbr1El1(), PageSize);
@ -82,30 +69,35 @@ namespace ams::kern::arch::arm64::init {
return GetInteger(m_l1_tables[1]); return GetInteger(m_l1_tables[1]);
} }
private: private:
constexpr ALWAYS_INLINE L1PageTableEntry *GetL1Entry(KVirtualAddress address) const { constexpr ALWAYS_INLINE L1PageTableEntry *GetL1Entry(KVirtualAddress address, u64 phys_to_virt_offset = 0) const {
const size_t index = (GetInteger(address) >> (BITSIZEOF(address) - 1)) & 1; const size_t index = (GetInteger(address) >> (BITSIZEOF(address) - 1)) & 1;
L1PageTableEntry *l1_table = reinterpret_cast<L1PageTableEntry *>(GetInteger(m_l1_tables[index])); L1PageTableEntry *l1_table = reinterpret_cast<L1PageTableEntry *>(GetInteger(m_l1_tables[index]) + phys_to_virt_offset);
return l1_table + ((GetInteger(address) / L1BlockSize) & (m_num_entries[index] - 1)); return l1_table + ((GetInteger(address) / L1BlockSize) & (m_num_entries[index] - 1));
} }
static constexpr ALWAYS_INLINE L2PageTableEntry *GetL2Entry(const L1PageTableEntry *entry, KVirtualAddress address) { static constexpr ALWAYS_INLINE L2PageTableEntry *GetL2Entry(const L1PageTableEntry *entry, KVirtualAddress address, u64 phys_to_virt_offset = 0) {
L2PageTableEntry *l2_table = reinterpret_cast<L2PageTableEntry *>(GetInteger(entry->GetTable())); L2PageTableEntry *l2_table = reinterpret_cast<L2PageTableEntry *>(GetInteger(entry->GetTable()) + phys_to_virt_offset);
return l2_table + ((GetInteger(address) / L2BlockSize) & (MaxPageTableEntries - 1)); return l2_table + ((GetInteger(address) / L2BlockSize) & (MaxPageTableEntries - 1));
} }
static constexpr ALWAYS_INLINE L3PageTableEntry *GetL3Entry(const L2PageTableEntry *entry, KVirtualAddress address) { static constexpr ALWAYS_INLINE L3PageTableEntry *GetL3Entry(const L2PageTableEntry *entry, KVirtualAddress address, u64 phys_to_virt_offset = 0) {
L3PageTableEntry *l3_table = reinterpret_cast<L3PageTableEntry *>(GetInteger(entry->GetTable())); L3PageTableEntry *l3_table = reinterpret_cast<L3PageTableEntry *>(GetInteger(entry->GetTable()) + phys_to_virt_offset);
return l3_table + ((GetInteger(address) / L3BlockSize) & (MaxPageTableEntries - 1)); return l3_table + ((GetInteger(address) / L3BlockSize) & (MaxPageTableEntries - 1));
} }
static ALWAYS_INLINE KPhysicalAddress AllocateNewPageTable(PageAllocator &allocator) { template<IsInitialPageAllocator PageAllocator>
static ALWAYS_INLINE KPhysicalAddress AllocateNewPageTable(PageAllocator &allocator, u64 phys_to_virt_offset) {
auto address = allocator.Allocate(PageSize); auto address = allocator.Allocate(PageSize);
ClearNewPageTable(address); ClearNewPageTable(address, phys_to_virt_offset);
return address; return address;
} }
static ALWAYS_INLINE void ClearNewPageTable(KPhysicalAddress address) { static ALWAYS_INLINE void ClearNewPageTable(KPhysicalAddress address, u64 phys_to_virt_offset) {
ClearPhysicalMemory(address, PageSize); /* Convert to a deferenceable address, and clear. */
volatile u64 *ptr = reinterpret_cast<volatile u64 *>(GetInteger(address) + phys_to_virt_offset);
for (size_t i = 0; i < PageSize / sizeof(u64); ++i) {
ptr[i] = 0;
}
} }
public: public:
static consteval size_t GetMaximumOverheadSize(size_t size) { static consteval size_t GetMaximumOverheadSize(size_t size) {
@ -327,7 +319,8 @@ namespace ams::kern::arch::arm64::init {
} }
} }
public: public:
void NOINLINE Map(KVirtualAddress virt_addr, size_t size, KPhysicalAddress phys_addr, const PageTableEntry &attr, PageAllocator &allocator) { template<IsInitialPageAllocator PageAllocator>
void NOINLINE Map(KVirtualAddress virt_addr, size_t size, KPhysicalAddress phys_addr, const PageTableEntry &attr, PageAllocator &allocator, u64 phys_to_virt_offset) {
/* Ensure that addresses and sizes are page aligned. */ /* Ensure that addresses and sizes are page aligned. */
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize)); MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize));
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(phys_addr), PageSize)); MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(phys_addr), PageSize));
@ -335,7 +328,7 @@ namespace ams::kern::arch::arm64::init {
/* Iteratively map pages until the requested region is mapped. */ /* Iteratively map pages until the requested region is mapped. */
while (size > 0) { while (size > 0) {
L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr); L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr, phys_to_virt_offset);
/* Can we make an L1 block? */ /* Can we make an L1 block? */
if (util::IsAligned(GetInteger(virt_addr), L1BlockSize) && util::IsAligned(GetInteger(phys_addr), L1BlockSize) && size >= L1BlockSize) { if (util::IsAligned(GetInteger(virt_addr), L1BlockSize) && util::IsAligned(GetInteger(phys_addr), L1BlockSize) && size >= L1BlockSize) {
@ -349,12 +342,12 @@ namespace ams::kern::arch::arm64::init {
/* If we don't already have an L2 table, we need to make a new one. */ /* If we don't already have an L2 table, we need to make a new one. */
if (!l1_entry->IsTable()) { if (!l1_entry->IsTable()) {
KPhysicalAddress new_table = AllocateNewPageTable(allocator); KPhysicalAddress new_table = AllocateNewPageTable(allocator, phys_to_virt_offset);
cpu::DataSynchronizationBarrierInnerShareable(); cpu::DataSynchronizationBarrierInnerShareable();
*l1_entry = L1PageTableEntry(PageTableEntry::TableTag{}, new_table, attr.IsPrivilegedExecuteNever()); *l1_entry = L1PageTableEntry(PageTableEntry::TableTag{}, new_table, attr.IsPrivilegedExecuteNever());
} }
L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr); L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr, phys_to_virt_offset);
/* Can we make a contiguous L2 block? */ /* Can we make a contiguous L2 block? */
if (util::IsAligned(GetInteger(virt_addr), L2ContiguousBlockSize) && util::IsAligned(GetInteger(phys_addr), L2ContiguousBlockSize) && size >= L2ContiguousBlockSize) { if (util::IsAligned(GetInteger(virt_addr), L2ContiguousBlockSize) && util::IsAligned(GetInteger(phys_addr), L2ContiguousBlockSize) && size >= L2ContiguousBlockSize) {
@ -380,12 +373,12 @@ namespace ams::kern::arch::arm64::init {
/* If we don't already have an L3 table, we need to make a new one. */ /* If we don't already have an L3 table, we need to make a new one. */
if (!l2_entry->IsTable()) { if (!l2_entry->IsTable()) {
KPhysicalAddress new_table = AllocateNewPageTable(allocator); KPhysicalAddress new_table = AllocateNewPageTable(allocator, phys_to_virt_offset);
cpu::DataSynchronizationBarrierInnerShareable(); cpu::DataSynchronizationBarrierInnerShareable();
*l2_entry = L2PageTableEntry(PageTableEntry::TableTag{}, new_table, attr.IsPrivilegedExecuteNever()); *l2_entry = L2PageTableEntry(PageTableEntry::TableTag{}, new_table, attr.IsPrivilegedExecuteNever());
} }
L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr); L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr, phys_to_virt_offset);
/* Can we make a contiguous L3 block? */ /* Can we make a contiguous L3 block? */
if (util::IsAligned(GetInteger(virt_addr), L3ContiguousBlockSize) && util::IsAligned(GetInteger(phys_addr), L3ContiguousBlockSize) && size >= L3ContiguousBlockSize) { if (util::IsAligned(GetInteger(virt_addr), L3ContiguousBlockSize) && util::IsAligned(GetInteger(phys_addr), L3ContiguousBlockSize) && size >= L3ContiguousBlockSize) {
@ -410,6 +403,98 @@ namespace ams::kern::arch::arm64::init {
cpu::DataSynchronizationBarrierInnerShareable(); cpu::DataSynchronizationBarrierInnerShareable();
} }
void UnmapTtbr0Entries(u64 phys_to_virt_offset) {
/* Ensure data consistency before we unmap. */
cpu::DataSynchronizationBarrierInnerShareable();
/* Define helper, as we only want to clear non-nGnRE pages. */
constexpr auto ShouldUnmap = [](const PageTableEntry *entry) ALWAYS_INLINE_LAMBDA -> bool {
return entry->GetPageAttribute() != PageTableEntry::PageAttribute_Device_nGnRE;
};
/* Iterate all L1 entries. */
L1PageTableEntry * const l1_table = reinterpret_cast<L1PageTableEntry *>(GetInteger(m_l1_tables[0]) + phys_to_virt_offset);
for (size_t l1_index = 0; l1_index < m_num_entries[0]; l1_index++) {
/* Get L1 entry. */
L1PageTableEntry * const l1_entry = l1_table + l1_index;
if (l1_entry->IsBlock()) {
/* Unmap the L1 entry, if we should. */
if (ShouldUnmap(l1_entry)) {
*static_cast<PageTableEntry *>(l1_entry) = InvalidPageTableEntry;
}
} else if (l1_entry->IsTable()) {
/* Get the L2 table. */
L2PageTableEntry * const l2_table = reinterpret_cast<L2PageTableEntry *>(GetInteger(l1_entry->GetTable()) + phys_to_virt_offset);
/* Unmap all L2 entries, as relevant. */
size_t remaining_l2_entries = 0;
for (size_t l2_index = 0; l2_index < MaxPageTableEntries; ++l2_index) {
/* Get L2 entry. */
L2PageTableEntry * const l2_entry = l2_table + l2_index;
if (l2_entry->IsBlock()) {
const size_t num_to_clear = (l2_entry->IsContiguous() ? L2ContiguousBlockSize : L2BlockSize) / L2BlockSize;
if (ShouldUnmap(l2_entry)) {
for (size_t i = 0; i < num_to_clear; ++i) {
static_cast<PageTableEntry *>(l2_entry)[i] = InvalidPageTableEntry;
}
} else {
remaining_l2_entries += num_to_clear;
}
l2_index = l2_index + num_to_clear - 1;
} else if (l2_entry->IsTable()) {
/* Get the L3 table. */
L3PageTableEntry * const l3_table = reinterpret_cast<L3PageTableEntry *>(GetInteger(l2_entry->GetTable()) + phys_to_virt_offset);
/* Unmap all L3 entries, as relevant. */
size_t remaining_l3_entries = 0;
for (size_t l3_index = 0; l3_index < MaxPageTableEntries; ++l3_index) {
/* Get L3 entry. */
if (L3PageTableEntry * const l3_entry = l3_table + l3_index; l3_entry->IsBlock()) {
const size_t num_to_clear = (l3_entry->IsContiguous() ? L3ContiguousBlockSize : L3BlockSize) / L3BlockSize;
if (ShouldUnmap(l3_entry)) {
for (size_t i = 0; i < num_to_clear; ++i) {
static_cast<PageTableEntry *>(l3_entry)[i] = InvalidPageTableEntry;
}
} else {
remaining_l3_entries += num_to_clear;
}
l3_index = l3_index + num_to_clear - 1;
}
}
/* If we unmapped all L3 entries, clear the L2 entry. */
if (remaining_l3_entries == 0) {
*static_cast<PageTableEntry *>(l2_entry) = InvalidPageTableEntry;
/* Invalidate the entire tlb. */
cpu::DataSynchronizationBarrierInnerShareable();
cpu::InvalidateEntireTlb();
} else {
remaining_l2_entries++;
}
}
}
/* If we unmapped all L2 entries, clear the L1 entry. */
if (remaining_l2_entries == 0) {
*static_cast<PageTableEntry *>(l1_entry) = InvalidPageTableEntry;
/* Invalidate the entire tlb. */
cpu::DataSynchronizationBarrierInnerShareable();
cpu::InvalidateEntireTlb();
}
}
}
/* Invalidate the entire tlb. */
cpu::DataSynchronizationBarrierInnerShareable();
cpu::InvalidateEntireTlb();
}
KPhysicalAddress GetPhysicalAddress(KVirtualAddress virt_addr) const { KPhysicalAddress GetPhysicalAddress(KVirtualAddress virt_addr) const {
/* Get the L1 entry. */ /* Get the L1 entry. */
const L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr); const L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr);
@ -861,6 +946,4 @@ namespace ams::kern::arch::arm64::init {
}; };
static_assert(IsInitialPageAllocator<KInitialPageAllocator>); static_assert(IsInitialPageAllocator<KInitialPageAllocator>);
using KInitialPageTable = KInitialPageTableTemplate<KInitialPageAllocator>;
} }

View file

@ -246,7 +246,7 @@
#define THREAD_LOCAL_REGION_SIZE 0x200 #define THREAD_LOCAL_REGION_SIZE 0x200
/* ams::kern::init::KInitArguments, https://github.com/Atmosphere-NX/Atmosphere/blob/master/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_arguments.hpp */ /* ams::kern::init::KInitArguments, https://github.com/Atmosphere-NX/Atmosphere/blob/master/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_arguments.hpp */
#define INIT_ARGUMENTS_SIZE 0x60 #define INIT_ARGUMENTS_SIZE 0x50
#define INIT_ARGUMENTS_TTBR0 0x00 #define INIT_ARGUMENTS_TTBR0 0x00
#define INIT_ARGUMENTS_TTBR1 0x08 #define INIT_ARGUMENTS_TTBR1 0x08
#define INIT_ARGUMENTS_TCR 0x10 #define INIT_ARGUMENTS_TCR 0x10
@ -257,8 +257,6 @@
#define INIT_ARGUMENTS_SP 0x38 #define INIT_ARGUMENTS_SP 0x38
#define INIT_ARGUMENTS_ENTRYPOINT 0x40 #define INIT_ARGUMENTS_ENTRYPOINT 0x40
#define INIT_ARGUMENTS_ARGUMENT 0x48 #define INIT_ARGUMENTS_ARGUMENT 0x48
#define INIT_ARGUMENTS_SETUP_FUNCTION 0x50
#define INIT_ARGUMENTS_EXCEPTION_STACK 0x58
/* ams::kern::KScheduler (::SchedulingState), https://github.com/Atmosphere-NX/Atmosphere/blob/master/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp */ /* ams::kern::KScheduler (::SchedulingState), https://github.com/Atmosphere-NX/Atmosphere/blob/master/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp */
/* NOTE: Due to constraints on ldarb relative offsets, KSCHEDULER_NEEDS_SCHEDULING cannot trivially be changed, and will require assembly edits. */ /* NOTE: Due to constraints on ldarb relative offsets, KSCHEDULER_NEEDS_SCHEDULING cannot trivially be changed, and will require assembly edits. */

View file

@ -189,6 +189,14 @@ namespace ams::kern::arch::arm64::cpu {
return (par >> (BITSIZEOF(par) - BITSIZEOF(u8))) == 0xFF; return (par >> (BITSIZEOF(par) - BITSIZEOF(u8))) == 0xFF;
} }
ALWAYS_INLINE void StoreDataCacheForInitArguments(const void *addr, size_t size) {
const uintptr_t start = util::AlignDown(reinterpret_cast<uintptr_t>(addr), DataCacheLineSize);
for (size_t stored = 0; stored < size; stored += cpu::DataCacheLineSize) {
__asm__ __volatile__("dc cvac, %[cur]" :: [cur]"r"(start + stored) : "memory");
}
DataSynchronizationBarrier();
}
/* Synchronization helpers. */ /* Synchronization helpers. */
NOINLINE void SynchronizeAllCores(); NOINLINE void SynchronizeAllCores();
void SynchronizeCores(u64 core_mask); void SynchronizeCores(u64 core_mask);

View file

@ -161,9 +161,12 @@ namespace ams::kern::arch::arm64 {
constexpr ALWAYS_INLINE bool IsPrivilegedExecuteNever() const { return this->GetBits(53, 1) != 0; } constexpr ALWAYS_INLINE bool IsPrivilegedExecuteNever() const { return this->GetBits(53, 1) != 0; }
constexpr ALWAYS_INLINE bool IsContiguous() const { return this->GetBits(52, 1) != 0; } constexpr ALWAYS_INLINE bool IsContiguous() const { return this->GetBits(52, 1) != 0; }
constexpr ALWAYS_INLINE bool IsGlobal() const { return this->GetBits(11, 1) == 0; } constexpr ALWAYS_INLINE bool IsGlobal() const { return this->GetBits(11, 1) == 0; }
constexpr ALWAYS_INLINE AccessFlag GetAccessFlag() const { return static_cast<AccessFlag>(this->GetBits(10, 1)); } constexpr ALWAYS_INLINE AccessFlag GetAccessFlag() const { return static_cast<AccessFlag>(this->SelectBits(10, 1)); }
constexpr ALWAYS_INLINE Shareable GetShareable() const { return static_cast<Shareable>(this->GetBits(8, 2)); } constexpr ALWAYS_INLINE Shareable GetShareable() const { return static_cast<Shareable>(this->SelectBits(8, 2)); }
constexpr ALWAYS_INLINE PageAttribute GetPageAttribute() const { return static_cast<PageAttribute>(this->GetBits(2, 3)); } constexpr ALWAYS_INLINE PageAttribute GetPageAttribute() const { return static_cast<PageAttribute>(this->SelectBits(2, 3)); }
constexpr ALWAYS_INLINE int GetAccessFlagInteger() const { return static_cast<int>(this->GetBits(10, 1)); }
constexpr ALWAYS_INLINE int GetShareableInteger() const { return static_cast<int>(this->GetBits(8, 2)); }
constexpr ALWAYS_INLINE int GetPageAttributeInteger() const { return static_cast<int>(this->GetBits(2, 3)); }
constexpr ALWAYS_INLINE bool IsReadOnly() const { return this->GetBits(7, 1) != 0; } constexpr ALWAYS_INLINE bool IsReadOnly() const { return this->GetBits(7, 1) != 0; }
constexpr ALWAYS_INLINE bool IsUserAccessible() const { return this->GetBits(6, 1) != 0; } constexpr ALWAYS_INLINE bool IsUserAccessible() const { return this->GetBits(6, 1) != 0; }
constexpr ALWAYS_INLINE bool IsNonSecure() const { return this->GetBits(5, 1) != 0; } constexpr ALWAYS_INLINE bool IsNonSecure() const { return this->GetBits(5, 1) != 0; }

View file

@ -25,12 +25,15 @@ namespace ams::kern::board::nintendo::nx {
static constexpr size_t SecureAppletMemorySize = 4_MB; static constexpr size_t SecureAppletMemorySize = 4_MB;
public: public:
class Init : public KSystemControlBase::Init { class Init : public KSystemControlBase::Init {
private:
friend class KSystemControlBase::Init;
private:
static void CpuOnImpl(u64 core_id, uintptr_t entrypoint, uintptr_t arg);
public: public:
/* Initialization. */ /* Initialization. */
static size_t GetRealMemorySize(); static size_t GetRealMemorySize();
static size_t GetIntendedMemorySize(); static size_t GetIntendedMemorySize();
static bool ShouldIncreaseThreadResourceLimit(); static bool ShouldIncreaseThreadResourceLimit();
static void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg);
static size_t GetApplicationPoolSize(); static size_t GetApplicationPoolSize();
static size_t GetAppletPoolSize(); static size_t GetAppletPoolSize();
static size_t GetMinimumNonSecureSystemPoolSize(); static size_t GetMinimumNonSecureSystemPoolSize();

View file

@ -26,6 +26,6 @@ namespace ams::kern::init {
static_assert(util::IsPowerOfTwo(alignof(KInitArguments)) && util::IsPowerOfTwo(sizeof(KInitArguments))); static_assert(util::IsPowerOfTwo(alignof(KInitArguments)) && util::IsPowerOfTwo(sizeof(KInitArguments)));
KPhysicalAddress GetInitArgumentsAddress(s32 core_id); KInitArguments *GetInitArguments(s32 core_id);
} }

View file

@ -183,10 +183,10 @@ namespace ams::kern {
return std::make_tuple(total_size, kernel_size); return std::make_tuple(total_size, kernel_size);
} }
static void InitializeLinearMemoryAddresses(KPhysicalAddress aligned_linear_phys_start, KVirtualAddress linear_virtual_start) { static void InitializeLinearMemoryAddresses(u64 phys_to_virt_diff) {
/* Set static differences. */ /* Set static differences. */
s_linear_phys_to_virt_diff = GetInteger(linear_virtual_start) - GetInteger(aligned_linear_phys_start); s_linear_phys_to_virt_diff = phys_to_virt_diff;
s_linear_virt_to_phys_diff = GetInteger(aligned_linear_phys_start) - GetInteger(linear_virtual_start); s_linear_virt_to_phys_diff = -phys_to_virt_diff;
} }
static void InitializeLinearMemoryRegionTrees(); static void InitializeLinearMemoryRegionTrees();

View file

@ -21,6 +21,12 @@ namespace ams::kern {
struct InitialProcessBinaryLayout; struct InitialProcessBinaryLayout;
namespace init {
struct KInitArguments;
}
} }
namespace ams::kern { namespace ams::kern {
@ -40,6 +46,8 @@ namespace ams::kern {
static constinit inline KSpinLock s_random_lock; static constinit inline KSpinLock s_random_lock;
public: public:
class Init { class Init {
private:
static void CpuOnImpl(u64 core_id, uintptr_t entrypoint, uintptr_t arg);
public: public:
/* Initialization. */ /* Initialization. */
static size_t GetRealMemorySize(); static size_t GetRealMemorySize();
@ -47,7 +55,7 @@ namespace ams::kern {
static KPhysicalAddress GetKernelPhysicalBaseAddress(KPhysicalAddress base_address); static KPhysicalAddress GetKernelPhysicalBaseAddress(KPhysicalAddress base_address);
static void GetInitialProcessBinaryLayout(InitialProcessBinaryLayout *out); static void GetInitialProcessBinaryLayout(InitialProcessBinaryLayout *out);
static bool ShouldIncreaseThreadResourceLimit(); static bool ShouldIncreaseThreadResourceLimit();
static void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg); static void TurnOnCpu(u64 core_id, const ams::kern::init::KInitArguments *args);
static size_t GetApplicationPoolSize(); static size_t GetApplicationPoolSize();
static size_t GetAppletPoolSize(); static size_t GetAppletPoolSize();
static size_t GetMinimumNonSecureSystemPoolSize(); static size_t GetMinimumNonSecureSystemPoolSize();

View file

@ -353,12 +353,12 @@ namespace ams::kern::arch::arm64 {
l1_entry->IsPrivilegedExecuteNever(), l1_entry->IsPrivilegedExecuteNever(),
l1_entry->IsContiguous(), l1_entry->IsContiguous(),
!l1_entry->IsGlobal(), !l1_entry->IsGlobal(),
static_cast<int>(l1_entry->GetAccessFlag()), static_cast<int>(l1_entry->GetAccessFlagInteger()),
static_cast<unsigned int>(l1_entry->GetShareable()), static_cast<unsigned int>(l1_entry->GetShareableInteger()),
l1_entry->IsReadOnly(), l1_entry->IsReadOnly(),
l1_entry->IsUserAccessible(), l1_entry->IsUserAccessible(),
l1_entry->IsNonSecure(), l1_entry->IsNonSecure(),
static_cast<int>(l1_entry->GetPageAttribute()), static_cast<int>(l1_entry->GetPageAttributeInteger()),
l1_entry->IsHeadMergeDisabled(), l1_entry->IsHeadMergeDisabled(),
l1_entry->IsHeadAndBodyMergeDisabled(), l1_entry->IsHeadAndBodyMergeDisabled(),
l1_entry->IsTailMergeDisabled()); l1_entry->IsTailMergeDisabled());
@ -398,12 +398,12 @@ namespace ams::kern::arch::arm64 {
l2_entry->IsPrivilegedExecuteNever(), l2_entry->IsPrivilegedExecuteNever(),
l2_entry->IsContiguous(), l2_entry->IsContiguous(),
!l2_entry->IsGlobal(), !l2_entry->IsGlobal(),
static_cast<int>(l2_entry->GetAccessFlag()), static_cast<int>(l2_entry->GetAccessFlagInteger()),
static_cast<unsigned int>(l2_entry->GetShareable()), static_cast<unsigned int>(l2_entry->GetShareableInteger()),
l2_entry->IsReadOnly(), l2_entry->IsReadOnly(),
l2_entry->IsUserAccessible(), l2_entry->IsUserAccessible(),
l2_entry->IsNonSecure(), l2_entry->IsNonSecure(),
static_cast<int>(l2_entry->GetPageAttribute()), static_cast<int>(l2_entry->GetPageAttributeInteger()),
l2_entry->IsHeadMergeDisabled(), l2_entry->IsHeadMergeDisabled(),
l2_entry->IsHeadAndBodyMergeDisabled(), l2_entry->IsHeadAndBodyMergeDisabled(),
l2_entry->IsTailMergeDisabled()); l2_entry->IsTailMergeDisabled());
@ -443,12 +443,12 @@ namespace ams::kern::arch::arm64 {
l3_entry->IsPrivilegedExecuteNever(), l3_entry->IsPrivilegedExecuteNever(),
l3_entry->IsContiguous(), l3_entry->IsContiguous(),
!l3_entry->IsGlobal(), !l3_entry->IsGlobal(),
static_cast<int>(l3_entry->GetAccessFlag()), static_cast<int>(l3_entry->GetAccessFlagInteger()),
static_cast<unsigned int>(l3_entry->GetShareable()), static_cast<unsigned int>(l3_entry->GetShareableInteger()),
l3_entry->IsReadOnly(), l3_entry->IsReadOnly(),
l3_entry->IsUserAccessible(), l3_entry->IsUserAccessible(),
l3_entry->IsNonSecure(), l3_entry->IsNonSecure(),
static_cast<int>(l3_entry->GetPageAttribute()), static_cast<int>(l3_entry->GetPageAttributeInteger()),
l3_entry->IsHeadMergeDisabled(), l3_entry->IsHeadMergeDisabled(),
l3_entry->IsHeadAndBodyMergeDisabled(), l3_entry->IsHeadAndBodyMergeDisabled(),
l3_entry->IsTailMergeDisabled()); l3_entry->IsTailMergeDisabled());

View file

@ -18,6 +18,12 @@
#include "kern_secure_monitor.hpp" #include "kern_secure_monitor.hpp"
#include "kern_lps_driver.hpp" #include "kern_lps_driver.hpp"
namespace ams::kern::init {
void StartOtherCore(const ams::kern::init::KInitArguments *init_args);
}
namespace ams::kern::board::nintendo::nx { namespace ams::kern::board::nintendo::nx {
namespace { namespace {
@ -67,15 +73,10 @@ namespace ams::kern::board::nintendo::nx {
constinit KLightLock g_request_lock; constinit KLightLock g_request_lock;
constinit KLightLock g_cv_lock; constinit KLightLock g_cv_lock;
constinit KLightConditionVariable g_cv{util::ConstantInitialize}; constinit KLightConditionVariable g_cv{util::ConstantInitialize};
constinit KPhysicalAddress g_sleep_buffer_phys_addrs[cpu::NumCores];
alignas(1_KB) constinit u64 g_sleep_buffers[cpu::NumCores][1_KB / sizeof(u64)]; alignas(1_KB) constinit u64 g_sleep_buffers[cpu::NumCores][1_KB / sizeof(u64)];
constinit ams::kern::init::KInitArguments g_sleep_init_arguments[cpu::NumCores];
constinit SavedSystemRegisters g_sleep_system_registers[cpu::NumCores] = {}; constinit SavedSystemRegisters g_sleep_system_registers[cpu::NumCores] = {};
void PowerOnCpu(int core_id, KPhysicalAddress entry_phys_addr, u64 context_id) {
/* Request the secure monitor power on the core. */
::ams::kern::arch::arm64::smc::CpuOn<smc::SmcId_Supervisor, true>(cpu::MultiprocessorAffinityRegisterAccessor().GetCpuOnArgument() | core_id, GetInteger(entry_phys_addr), context_id);
}
void WaitOtherCpuPowerOff() { void WaitOtherCpuPowerOff() {
constexpr u64 PmcPhysicalAddress = 0x7000E400; constexpr u64 PmcPhysicalAddress = 0x7000E400;
constexpr u32 PWRGATE_STATUS_CE123_MASK = ((1u << 3) - 1) << 9; constexpr u32 PWRGATE_STATUS_CE123_MASK = ((1u << 3) - 1) << 9;
@ -473,18 +474,20 @@ namespace ams::kern::board::nintendo::nx {
} }
} }
void KSleepManager::ProcessRequests(uintptr_t buffer) { void KSleepManager::ProcessRequests(uintptr_t sleep_buffer) {
const auto target_fw = GetTargetFirmware(); const auto target_fw = GetTargetFirmware();
const s32 core_id = GetCurrentCoreId(); const s32 core_id = GetCurrentCoreId();
KPhysicalAddress resume_entry_phys_addr = Null<KPhysicalAddress>;
ams::kern::init::KInitArguments * const init_args = g_sleep_init_arguments + core_id;
KPhysicalAddress start_core_phys_addr = Null<KPhysicalAddress>;
KPhysicalAddress init_args_phys_addr = Null<KPhysicalAddress>;
/* Get the physical addresses we'll need. */ /* Get the physical addresses we'll need. */
{ {
MESOSPHERE_ABORT_UNLESS(Kernel::GetKernelPageTable().GetPhysicalAddress(std::addressof(g_sleep_buffer_phys_addrs[core_id]), KProcessAddress(buffer))); MESOSPHERE_ABORT_UNLESS(Kernel::GetKernelPageTable().GetPhysicalAddress(std::addressof(start_core_phys_addr), KProcessAddress(&::ams::kern::init::StartOtherCore)));
MESOSPHERE_ABORT_UNLESS(Kernel::GetKernelPageTable().GetPhysicalAddress(std::addressof(resume_entry_phys_addr), KProcessAddress(&::ams::kern::board::nintendo::nx::KSleepManager::ResumeEntry))); MESOSPHERE_ABORT_UNLESS(Kernel::GetKernelPageTable().GetPhysicalAddress(std::addressof(init_args_phys_addr), KProcessAddress(init_args)));
} }
const KPhysicalAddress sleep_buffer_phys_addr = g_sleep_buffer_phys_addrs[core_id];
const u64 target_core_mask = (1ul << core_id); const u64 target_core_mask = (1ul << core_id);
const bool use_legacy_lps_driver = target_fw < TargetFirmware_2_0_0; const bool use_legacy_lps_driver = target_fw < TargetFirmware_2_0_0;
@ -547,15 +550,29 @@ namespace ams::kern::board::nintendo::nx {
/* Save the interrupt manager's state. */ /* Save the interrupt manager's state. */
Kernel::GetInterruptManager().Save(core_id); Kernel::GetInterruptManager().Save(core_id);
/* Setup the initial arguments. */
{
init_args->ttbr0 = cpu::GetTtbr0El1();
init_args->ttbr1 = cpu::GetTtbr1El1();
init_args->tcr = cpu::GetTcrEl1();
init_args->mair = cpu::GetMairEl1();
init_args->cpuactlr = cpu::GetCpuActlrEl1();
init_args->cpuectlr = cpu::GetCpuEctlrEl1();
init_args->sctlr = cpu::GetSctlrEl1();
init_args->sp = 0;
init_args->entrypoint = reinterpret_cast<uintptr_t>(::ams::kern::board::nintendo::nx::KSleepManager::ResumeEntry);
init_args->argument = sleep_buffer;
}
/* Ensure that all cores get to this point before continuing. */ /* Ensure that all cores get to this point before continuing. */
cpu::SynchronizeAllCores(); cpu::SynchronizeAllCores();
/* Log that the core is going to sleep. */ /* Log that the core is going to sleep. */
MESOSPHERE_LOG("Core[%d]: Going to sleep, buffer = %010lx\n", core_id, GetInteger(sleep_buffer_phys_addr)); MESOSPHERE_LOG("Core[%d]: Going to sleep, buffer = %010lx\n", core_id, sleep_buffer);
/* If we're on a core other than zero, we can just invoke the sleep handler. */ /* If we're on a core other than zero, we can just invoke the sleep handler. */
if (core_id != 0) { if (core_id != 0) {
CpuSleepHandler(GetInteger(sleep_buffer_phys_addr), GetInteger(resume_entry_phys_addr)); CpuSleepHandler(sleep_buffer, GetInteger(start_core_phys_addr), GetInteger(init_args_phys_addr));
} else { } else {
/* Wait for all other cores to be powered off. */ /* Wait for all other cores to be powered off. */
WaitOtherCpuPowerOff(); WaitOtherCpuPowerOff();
@ -574,9 +591,9 @@ namespace ams::kern::board::nintendo::nx {
/* Invoke the sleep handler. */ /* Invoke the sleep handler. */
if (!use_legacy_lps_driver) { if (!use_legacy_lps_driver) {
/* When not using the legacy driver, invoke directly. */ /* When not using the legacy driver, invoke directly. */
CpuSleepHandler(GetInteger(sleep_buffer_phys_addr), GetInteger(resume_entry_phys_addr)); CpuSleepHandler(sleep_buffer, GetInteger(start_core_phys_addr), GetInteger(init_args_phys_addr));
} else { } else {
lps::InvokeCpuSleepHandler(GetInteger(sleep_buffer_phys_addr), GetInteger(resume_entry_phys_addr)); lps::InvokeCpuSleepHandler(sleep_buffer, GetInteger(start_core_phys_addr), GetInteger(init_args_phys_addr));
} }
/* Restore the debug log state. */ /* Restore the debug log state. */
@ -586,8 +603,10 @@ namespace ams::kern::board::nintendo::nx {
MESOSPHERE_LOG("Exiting SC7\n"); MESOSPHERE_LOG("Exiting SC7\n");
/* Wake up the other cores. */ /* Wake up the other cores. */
cpu::MultiprocessorAffinityRegisterAccessor mpidr;
const auto arg = mpidr.GetCpuOnArgument();
for (s32 i = 1; i < static_cast<s32>(cpu::NumCores); ++i) { for (s32 i = 1; i < static_cast<s32>(cpu::NumCores); ++i) {
PowerOnCpu(i, resume_entry_phys_addr, GetInteger(g_sleep_buffer_phys_addrs[i])); KSystemControl::Init::TurnOnCpu(arg | i, g_sleep_init_arguments + i);
} }
} }

View file

@ -22,14 +22,12 @@ namespace ams::kern::board::nintendo::nx {
private: private:
static void ResumeEntry(uintptr_t arg); static void ResumeEntry(uintptr_t arg);
static void InvalidateDataCacheForResumeEntry(uintptr_t level);
static void ProcessRequests(uintptr_t buffer); static void ProcessRequests(uintptr_t buffer);
public: public:
static void Initialize(); static void Initialize();
static void SleepSystem(); static void SleepSystem();
public: public:
static void CpuSleepHandler(uintptr_t arg, uintptr_t entry); static void CpuSleepHandler(uintptr_t arg, uintptr_t entry, uintptr_t entry_args);
}; };

View file

@ -22,14 +22,14 @@
mov reg, #(((val) >> 0x00) & 0xFFFF); \ mov reg, #(((val) >> 0x00) & 0xFFFF); \
movk reg, #(((val) >> 0x10) & 0xFFFF), lsl#16 movk reg, #(((val) >> 0x10) & 0xFFFF), lsl#16
/* ams::kern::board::nintendo::nx::KSleepManager::CpuSleepHandler(uintptr_t arg, uintptr_t entry) */ /* ams::kern::board::nintendo::nx::KSleepManager::CpuSleepHandler(uintptr_t arg, uintptr_t entry, uintptr_t entry_arg) */
.section .sleep._ZN3ams4kern5board8nintendo2nx13KSleepManager15CpuSleepHandlerEmm, "ax", %progbits .section .sleep._ZN3ams4kern5board8nintendo2nx13KSleepManager15CpuSleepHandlerEmmm, "ax", %progbits
.global _ZN3ams4kern5board8nintendo2nx13KSleepManager15CpuSleepHandlerEmm .global _ZN3ams4kern5board8nintendo2nx13KSleepManager15CpuSleepHandlerEmmm
.type _ZN3ams4kern5board8nintendo2nx13KSleepManager15CpuSleepHandlerEmm, %function .type _ZN3ams4kern5board8nintendo2nx13KSleepManager15CpuSleepHandlerEmmm, %function
_ZN3ams4kern5board8nintendo2nx13KSleepManager15CpuSleepHandlerEmm: _ZN3ams4kern5board8nintendo2nx13KSleepManager15CpuSleepHandlerEmmm:
/* Save arguments. */ /* Save arguments. */
mov x16, x0 mov x16, x1
mov x17, x1 mov x17, x2
/* Enable access to FPU registers. */ /* Enable access to FPU registers. */
mrs x1, cpacr_el1 mrs x1, cpacr_el1
@ -74,28 +74,8 @@ _ZN3ams4kern5board8nintendo2nx13KSleepManager15CpuSleepHandlerEmm:
stp q28, q29, [x0], #0x20 stp q28, q29, [x0], #0x20
stp q30, q31, [x0], #0x20 stp q30, q31, [x0], #0x20
/* Save cpuactlr/cpuectlr. */ /* Save tpidr/cntv_cval_el0. */
mrs x1, cpuectlr_el1 mrs x1, tpidr_el1
mrs x2, cpuactlr_el1
stp x1, x2, [x0], #0x10
/* Save ttbr0/ttbr1. */
mrs x1, ttbr0_el1
mrs x2, ttbr1_el1
stp x1, x2, [x0], #0x10
/* Save tcr/mair. */
mrs x1, tcr_el1
mrs x2, mair_el1
stp x1, x2, [x0], #0x10
/* Save sctlr/tpidr. */
mrs x1, sctlr_el1
mrs x2, tpidr_el1
stp x1, x2, [x0], #0x10
/* Save the virtual resumption entrypoint and cntv_cval_el0. */
adr x1, 77f
mrs x2, cntv_cval_el0 mrs x2, cntv_cval_el0
stp x1, x2, [x0], #0x10 stp x1, x2, [x0], #0x10
@ -114,8 +94,8 @@ _ZN3ams4kern5board8nintendo2nx13KSleepManager15CpuSleepHandlerEmm:
1: /* Suspend. */ 1: /* Suspend. */
LOAD_IMMEDIATE_32(x0, 0xC4000001) LOAD_IMMEDIATE_32(x0, 0xC4000001)
LOAD_IMMEDIATE_32(x1, 0x0201001B) LOAD_IMMEDIATE_32(x1, 0x0201001B)
mov x2, x17 mov x2, x16
mov x3, x16 mov x3, x17
smc #1 smc #1
0: b 0b 0: b 0b
@ -124,65 +104,6 @@ _ZN3ams4kern5board8nintendo2nx13KSleepManager15CpuSleepHandlerEmm:
.global _ZN3ams4kern5board8nintendo2nx13KSleepManager11ResumeEntryEm .global _ZN3ams4kern5board8nintendo2nx13KSleepManager11ResumeEntryEm
.type _ZN3ams4kern5board8nintendo2nx13KSleepManager11ResumeEntryEm, %function .type _ZN3ams4kern5board8nintendo2nx13KSleepManager11ResumeEntryEm, %function
_ZN3ams4kern5board8nintendo2nx13KSleepManager11ResumeEntryEm: _ZN3ams4kern5board8nintendo2nx13KSleepManager11ResumeEntryEm:
/* Mask interrupts. */
msr daifset, #0xF
/* Save the argument. */
mov x21, x0
/* Check that we're at the correct exception level. */
mrs x0, currentel
/* Check if we're EL1. */
cmp x0, #0x4
b.eq 3f
/* Check if we're EL2. */
cmp x0, #0x8
b.eq 2f
1: /* We're running at EL3. */
b 1b
2: /* We're running at EL2. */
b 2b
3: /* We're running at EL1. */
/* Invalidate the L1 cache. */
mov x0, #0
bl _ZN3ams4kern5board8nintendo2nx13KSleepManager33InvalidateDataCacheForResumeEntryEm
/* Get the current core id. */
mrs x0, mpidr_el1
and x0, x0, #0xFF
/* If we're on core0, we want to invalidate the L2 cache. */
cbnz x0, 4f
mov x0, #1
bl _ZN3ams4kern5board8nintendo2nx13KSleepManager33InvalidateDataCacheForResumeEntryEm
4: /* Invalidate the L1 cache. */
mov x0, #0
bl _ZN3ams4kern5board8nintendo2nx13KSleepManager33InvalidateDataCacheForResumeEntryEm
/* Invalidate the instruction cache. */
ic ialluis
dsb sy
isb
/* Invalidate the entire tlb. */
tlbi vmalle1is
dsb sy
isb
/* Switch to sp 1. */
msr spsel, #1
/* Prepare to restore the saved context. */
mov x0, x21
/* Enable access to FPU registers. */ /* Enable access to FPU registers. */
mrs x1, cpacr_el1 mrs x1, cpacr_el1
orr x1, x1, #0x100000 orr x1, x1, #0x100000
@ -226,121 +147,12 @@ _ZN3ams4kern5board8nintendo2nx13KSleepManager11ResumeEntryEm:
ldp q28, q29, [x0], #0x20 ldp q28, q29, [x0], #0x20
ldp q30, q31, [x0], #0x20 ldp q30, q31, [x0], #0x20
/* Restore cpuactlr/cpuectlr. */ /* Restore tpidr/cntv_cval_el0. */
ldp x1, x2, [x0], #0x10 ldp x1, x2, [x0], #0x10
mrs x3, cpuectlr_el1 msr tpidr_el1, x1
cmp x1, x3 msr cntv_cval_el0, x2
5: b.ne 5b
mrs x3, cpuactlr_el1
cmp x2, x3
6: b.ne 6b
/* Restore ttbr0/ttbr1. */
ldp x1, x2, [x0], #0x10
msr ttbr0_el1, x1
msr ttbr1_el1, x2
/* Restore tcr/mair. */
ldp x1, x2, [x0], #0x10
msr tcr_el1, x1
msr mair_el1, x2
/* Get sctlr, tpidr, the entrypoint, and cntv_cval_el0. */
ldp x1, x2, [x0], #0x10
ldp x3, x4, [x0], #0x10
/* Set the global context back into x18/tpidr. */
msr tpidr_el1, x2
msr cntv_cval_el0, x4
dsb sy
isb
/* Restore sctlr with the wxn bit cleared. */
bic x2, x1, #0x80000
msr sctlr_el1, x2
dsb sy
isb
/* Jump to the entrypoint. */
br x3
77: /* Virtual resumption entrypoint. */
/* Restore sctlr. */
msr sctlr_el1, x1
dsb sy dsb sy
isb isb
ret /* Return. */
/* ams::kern::board::nintendo::nx::KSleepManager::InvalidateDataCacheForResumeEntry(uintptr_t level) */
.section .sleep._ZN3ams4kern5board8nintendo2nx13KSleepManager33InvalidateDataCacheForResumeEntryEm, "ax", %progbits
.global _ZN3ams4kern5board8nintendo2nx13KSleepManager33InvalidateDataCacheForResumeEntryEm
.type _ZN3ams4kern5board8nintendo2nx13KSleepManager33InvalidateDataCacheForResumeEntryEm, %function
_ZN3ams4kern5board8nintendo2nx13KSleepManager33InvalidateDataCacheForResumeEntryEm:
/* cpu::DataSynchronizationBarrier(); */
dsb sy
/* const u64 level_sel_value = level << 1; */
lsl x8, x0, #1
/* cpu::SetCsselrEl1(level_sel_value); */
msr csselr_el1, x8
/* cpu::InstructionMemoryBarrier(); */
isb
/* CacheSizeIdAccessor ccsidr_el1; */
mrs x13, ccsidr_el1
/* const int num_ways = ccsidr_el1.GetAssociativity(); */
ubfx w10, w13, #3, #0xA
/* const int line_size = ccsidr_el1.GetLineSize(); */
and w11, w13, #7
/* const int num_sets = ccsidr_el1.GetNumberOfSets(); */
ubfx w13, w13, #0xD, #0xF
/* int way = 0; */
mov w9, wzr
/* const u64 set_shift = static_cast<u64>(line_size + 4); */
add w11, w11, #4
/* const u64 way_shift = static_cast<u64>(__builtin_clz(num_ways)); */
clz w12, w10
0: /* do { */
/* int set = 0; */
mov w14, wzr
/* const u64 way_value = (static_cast<u64>(way) << way_shift); */
lsl w15, w9, w12
1: /* do { */
/* const u64 isw_value = (static_cast<u64>(set) << set_shift) | way_value | level_sel_value; */
lsl w16, w14, w11
orr w16, w16, w15
sxtw x16, w16
orr x16, x16, x8
/* __asm__ __volatile__("dc isw, %0" :: "r"(isw_value) : "memory"); */
dc isw, x16
/* while (set <= num_sets); */
cmp w13, w14
add w14, w14, #1
b.ne 1b
/* while (way <= num_ways); */
cmp w9, w10
add w9, w9, #1
b.ne 0b
/* cpu::EnsureInstructionConsistency(); */
dsb sy
isb
ret ret

View file

@ -382,7 +382,7 @@ namespace ams::kern::board::nintendo::nx {
return static_cast<u8>((value >> 32) & 0xFF); return static_cast<u8>((value >> 32) & 0xFF);
} }
void KSystemControl::Init::CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) { void KSystemControl::Init::CpuOnImpl(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
MESOSPHERE_INIT_ABORT_UNLESS((::ams::kern::arch::arm64::smc::CpuOn<smc::SmcId_Supervisor, false>(core_id, entrypoint, arg)) == 0); MESOSPHERE_INIT_ABORT_UNLESS((::ams::kern::arch::arm64::smc::CpuOn<smc::SmcId_Supervisor, false>(core_id, entrypoint, arg)) == 0);
} }

View file

@ -395,7 +395,7 @@ namespace ams::kern::board::nintendo::nx::lps {
R_SUCCEED(); R_SUCCEED();
} }
void InvokeCpuSleepHandler(uintptr_t arg, uintptr_t entry) { void InvokeCpuSleepHandler(uintptr_t arg, uintptr_t entry, uintptr_t entry_arg) {
/* Verify that we're allowed to perform suspension. */ /* Verify that we're allowed to perform suspension. */
MESOSPHERE_ABORT_UNLESS(g_lps_init_done); MESOSPHERE_ABORT_UNLESS(g_lps_init_done);
MESOSPHERE_ABORT_UNLESS(GetCurrentCoreId() == 0); MESOSPHERE_ABORT_UNLESS(GetCurrentCoreId() == 0);
@ -416,7 +416,7 @@ namespace ams::kern::board::nintendo::nx::lps {
Read(g_pmc_address + APBDEV_PMC_SCRATCH0); Read(g_pmc_address + APBDEV_PMC_SCRATCH0);
/* Invoke the sleep hander. */ /* Invoke the sleep hander. */
KSleepManager::CpuSleepHandler(arg, entry); KSleepManager::CpuSleepHandler(arg, entry, entry_arg);
/* Disable deep power down. */ /* Disable deep power down. */
Write(g_pmc_address + APBDEV_PMC_DPD_ENABLE, 0); Write(g_pmc_address + APBDEV_PMC_DPD_ENABLE, 0);

View file

@ -22,7 +22,7 @@ namespace ams::kern::board::nintendo::nx {
void Initialize(); void Initialize();
Result EnableSuspend(bool enable); Result EnableSuspend(bool enable);
void InvokeCpuSleepHandler(uintptr_t arg, uintptr_t entry); void InvokeCpuSleepHandler(uintptr_t arg, uintptr_t entry, uintptr_t entry_arg);
void ResumeBpmpFirmware(); void ResumeBpmpFirmware();
} }

View file

@ -20,6 +20,13 @@
namespace ams::kern { namespace ams::kern {
namespace init {
/* TODO: Is this function name architecture specific? */
void StartOtherCore(const ams::kern::init::KInitArguments *init_args);
}
/* Initialization. */ /* Initialization. */
size_t KSystemControlBase::Init::GetRealMemorySize() { size_t KSystemControlBase::Init::GetRealMemorySize() {
return ams::kern::MainMemorySize; return ams::kern::MainMemorySize;
@ -68,7 +75,7 @@ namespace ams::kern {
return 0; return 0;
} }
void KSystemControlBase::Init::CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) { void KSystemControlBase::Init::CpuOnImpl(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
#if defined(ATMOSPHERE_ARCH_ARM64) #if defined(ATMOSPHERE_ARCH_ARM64)
MESOSPHERE_INIT_ABORT_UNLESS((::ams::kern::arch::arm64::smc::CpuOn<0, false>(core_id, entrypoint, arg)) == 0); MESOSPHERE_INIT_ABORT_UNLESS((::ams::kern::arch::arm64::smc::CpuOn<0, false>(core_id, entrypoint, arg)) == 0);
#else #else
@ -76,6 +83,22 @@ namespace ams::kern {
#endif #endif
} }
void KSystemControlBase::Init::TurnOnCpu(u64 core_id, const ams::kern::init::KInitArguments *args) {
/* Get entrypoint. */
KPhysicalAddress entrypoint = Null<KPhysicalAddress>;
while (!cpu::GetPhysicalAddressReadable(std::addressof(entrypoint), reinterpret_cast<uintptr_t>(::ams::kern::init::StartOtherCore), true)) { /* ... */ }
/* Get arguments. */
KPhysicalAddress args_addr = Null<KPhysicalAddress>;
while (!cpu::GetPhysicalAddressReadable(std::addressof(args_addr), reinterpret_cast<uintptr_t>(args), true)) { /* ... */ }
/* Ensure cache is correct for the initial arguments. */
cpu::StoreDataCacheForInitArguments(args, sizeof(*args));
/* Turn on the cpu. */
KSystemControl::Init::CpuOnImpl(core_id, GetInteger(entrypoint), GetInteger(args_addr));
}
/* Randomness for Initialization. */ /* Randomness for Initialization. */
void KSystemControlBase::Init::GenerateRandom(u64 *dst, size_t count) { void KSystemControlBase::Init::GenerateRandom(u64 *dst, size_t count) {
if (AMS_UNLIKELY(!s_initialized_random_generator)) { if (AMS_UNLIKELY(!s_initialized_random_generator)) {

View file

@ -29,48 +29,63 @@ namespace ams::kern::init {
/* Prototypes for functions declared in ASM that we need to reference. */ /* Prototypes for functions declared in ASM that we need to reference. */
void StartOtherCore(const ams::kern::init::KInitArguments *init_args); void StartOtherCore(const ams::kern::init::KInitArguments *init_args);
void IdentityMappedFunctionAreaBegin();
void IdentityMappedFunctionAreaEnd();
size_t GetMiscUnknownDebugRegionSize(); size_t GetMiscUnknownDebugRegionSize();
void InitializeDebugRegisters();
void InitializeExceptionVectors();
namespace { namespace {
/* Global Allocator. */ /* Global Allocator. */
constinit KInitialPageAllocator g_initial_page_allocator; constinit KInitialPageAllocator g_initial_page_allocator;
/* Global initial arguments array. */
constinit KPhysicalAddress g_init_arguments_phys_addr[cpu::NumCores];
constinit KInitArguments g_init_arguments[cpu::NumCores]; constinit KInitArguments g_init_arguments[cpu::NumCores];
/* Globals for passing data between InitializeCorePhase1 and InitializeCorePhase2. */
constinit InitialProcessBinaryLayout g_phase2_initial_process_binary_layout{};
constinit KPhysicalAddress g_phase2_resource_end_phys_addr = Null<KPhysicalAddress>;
constinit u64 g_phase2_linear_region_phys_to_virt_diff = 0;
/* Page table attributes. */ /* Page table attributes. */
constexpr PageTableEntry KernelTextAttribute(PageTableEntry::Permission_KernelRX, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
constexpr PageTableEntry KernelRoDataAttribute(PageTableEntry::Permission_KernelR, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped); constexpr PageTableEntry KernelRoDataAttribute(PageTableEntry::Permission_KernelR, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
constexpr PageTableEntry KernelRwDataAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped); constexpr PageTableEntry KernelRwDataAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
constexpr PageTableEntry KernelMmioAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_Device_nGnRE, PageTableEntry::Shareable_OuterShareable, PageTableEntry::MappingFlag_Mapped); constexpr PageTableEntry KernelMmioAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_Device_nGnRE, PageTableEntry::Shareable_OuterShareable, PageTableEntry::MappingFlag_Mapped);
constexpr PageTableEntry KernelRwDataUncachedAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemoryNotCacheable, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped); constexpr PageTableEntry KernelRwDataUncachedAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemoryNotCacheable, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
void StoreDataCache(const void *addr, size_t size) { void TurnOnAllCores() {
const uintptr_t start = util::AlignDown(reinterpret_cast<uintptr_t>(addr), cpu::DataCacheLineSize);
for (size_t stored = 0; stored < size; stored += cpu::DataCacheLineSize) {
__asm__ __volatile__("dc cvac, %[cur]" :: [cur]"r"(start + stored) : "memory");
}
cpu::DataSynchronizationBarrier();
}
void TurnOnAllCores(uintptr_t start_other_core_phys) {
cpu::MultiprocessorAffinityRegisterAccessor mpidr; cpu::MultiprocessorAffinityRegisterAccessor mpidr;
const auto arg = mpidr.GetCpuOnArgument(); const auto arg = mpidr.GetCpuOnArgument();
const auto current_core = mpidr.GetAff0(); const auto current_core = mpidr.GetAff0();
for (s32 i = 0; i < static_cast<s32>(cpu::NumCores); i++) { for (s32 i = 0; i < static_cast<s32>(cpu::NumCores); i++) {
if (static_cast<s32>(current_core) != i) { if (static_cast<s32>(current_core) != i) {
KSystemControl::Init::CpuOn(arg | i, start_other_core_phys, GetInteger(g_init_arguments_phys_addr[i])); KSystemControl::Init::TurnOnCpu(arg | i, g_init_arguments + i);
} }
} }
} }
void SetupInitialArguments(KInitialPageTable &init_pt, KInitialPageAllocator &allocator) { void InvokeMain(u64 core_id) {
AMS_UNUSED(init_pt, allocator); /* Clear cpacr_el1. */
cpu::SetCpacrEl1(0);
cpu::InstructionMemoryBarrier();
/* Initialize registers. */
InitializeDebugRegisters();
InitializeExceptionVectors();
/* Set exception stack. */
cpu::SetCntvCvalEl0(GetInteger(KMemoryLayout::GetExceptionStackTopAddress(static_cast<s32>(core_id))) - sizeof(KThread::StackParameters));
/* Call main. */
HorizonKernelMain(static_cast<s32>(core_id));
}
void SetupInitialArguments() {
/* Get parameters for initial arguments. */ /* Get parameters for initial arguments. */
const u64 ttbr0 = cpu::GetTtbr0El1(); const u64 ttbr0 = cpu::GetTtbr0El1();
const u64 ttbr1 = cpu::GetTtbr1El1(); const u64 ttbr1 = cpu::GetTtbr1El1();
@ -84,13 +99,6 @@ namespace ams::kern::init {
/* Get the arguments. */ /* Get the arguments. */
KInitArguments *init_args = g_init_arguments + i; KInitArguments *init_args = g_init_arguments + i;
/* Translate to a physical address. */
/* KPhysicalAddress phys_addr = Null<KPhysicalAddress>; */
/* if (cpu::GetPhysicalAddressWritable(std::addressof(phys_addr), KVirtualAddress(init_args), true)) { */
/* g_init_arguments_phys_addr[i] = phys_addr; */
/* } */
g_init_arguments_phys_addr[i] = init_pt.GetPhysicalAddress(KVirtualAddress(init_args));
/* Set the arguments. */ /* Set the arguments. */
init_args->ttbr0 = ttbr0; init_args->ttbr0 = ttbr0;
init_args->ttbr1 = ttbr1; init_args->ttbr1 = ttbr1;
@ -100,14 +108,9 @@ namespace ams::kern::init {
init_args->cpuectlr = cpuectlr; init_args->cpuectlr = cpuectlr;
init_args->sctlr = sctlr; init_args->sctlr = sctlr;
init_args->sp = GetInteger(KMemoryLayout::GetMainStackTopAddress(i)) - sizeof(KThread::StackParameters); init_args->sp = GetInteger(KMemoryLayout::GetMainStackTopAddress(i)) - sizeof(KThread::StackParameters);
init_args->entrypoint = reinterpret_cast<uintptr_t>(::ams::kern::HorizonKernelMain); init_args->entrypoint = reinterpret_cast<uintptr_t>(::ams::kern::init::InvokeMain);
init_args->argument = static_cast<u64>(i); init_args->argument = static_cast<u64>(i);
init_args->setup_function = reinterpret_cast<uintptr_t>(::ams::kern::init::StartOtherCore);
init_args->exception_stack = GetInteger(KMemoryLayout::GetExceptionStackTopAddress(i)) - sizeof(KThread::StackParameters);
} }
/* Ensure the arguments are written to memory. */
StoreDataCache(g_init_arguments, sizeof(g_init_arguments));
} }
KVirtualAddress GetRandomAlignedRegionWithGuard(size_t size, size_t alignment, KInitialPageTable &pt, KMemoryRegionTree &tree, u32 type_id, size_t guard_size) { KVirtualAddress GetRandomAlignedRegionWithGuard(size_t size, size_t alignment, KInitialPageTable &pt, KMemoryRegionTree &tree, u32 type_id, size_t guard_size) {
@ -167,18 +170,83 @@ namespace ams::kern::init {
const KPhysicalAddress stack_start_phys = g_initial_page_allocator.Allocate(PageSize); const KPhysicalAddress stack_start_phys = g_initial_page_allocator.Allocate(PageSize);
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(stack_start_virt), StackSize, type, core_id)); MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(stack_start_virt), StackSize, type, core_id));
page_table.Map(stack_start_virt, StackSize, stack_start_phys, KernelRwDataAttribute, g_initial_page_allocator); page_table.Map(stack_start_virt, StackSize, stack_start_phys, KernelRwDataAttribute, g_initial_page_allocator, 0);
}
class KInitialPageAllocatorForFinalizeIdentityMapping final {
private:
struct FreeListEntry {
FreeListEntry *next;
};
private:
FreeListEntry *m_free_list_head;
u64 m_phys_to_virt_offset;
public:
template<kern::arch::arm64::init::IsInitialPageAllocator PageAllocator>
KInitialPageAllocatorForFinalizeIdentityMapping(PageAllocator &allocator, u64 phys_to_virt) : m_free_list_head(nullptr), m_phys_to_virt_offset(phys_to_virt) {
/* Allocate and free two pages. */
for (size_t i = 0; i < 2; ++i) {
KPhysicalAddress page = allocator.Allocate(PageSize);
MESOSPHERE_INIT_ABORT_UNLESS(page != Null<KPhysicalAddress>);
/* Free the pages. */
this->Free(page, PageSize);
}
}
public:
KPhysicalAddress Allocate(size_t size) {
/* Check that the size is correct. */
MESOSPHERE_INIT_ABORT_UNLESS(size == PageSize);
/* Check that we have a free page. */
FreeListEntry *head = m_free_list_head;
MESOSPHERE_INIT_ABORT_UNLESS(head != nullptr);
/* Update the free list. */
m_free_list_head = head->next;
/* Return the page. */
return KPhysicalAddress(reinterpret_cast<uintptr_t>(head) - m_phys_to_virt_offset);
}
void Free(KPhysicalAddress phys_addr, size_t size) {
/* Check that the size is correct. */
MESOSPHERE_INIT_ABORT_UNLESS(size == PageSize);
/* Convert to a free list entry. */
FreeListEntry *fl = reinterpret_cast<FreeListEntry *>(GetInteger(phys_addr) + m_phys_to_virt_offset);
/* Insert into free list. */
fl->next = m_free_list_head;
m_free_list_head = fl;
}
};
static_assert(kern::arch::arm64::init::IsInitialPageAllocator<KInitialPageAllocatorForFinalizeIdentityMapping>);
void FinalizeIdentityMapping(KInitialPageTable &init_pt, KInitialPageAllocator &allocator, u64 phys_to_virt_offset) {
/* Create an allocator for identity mapping finalization. */
KInitialPageAllocatorForFinalizeIdentityMapping finalize_allocator(allocator, phys_to_virt_offset);
/* Get the physical address of crt0. */
const KPhysicalAddress start_phys_addr = init_pt.GetPhysicalAddress(reinterpret_cast<uintptr_t>(::ams::kern::init::IdentityMappedFunctionAreaBegin));
/* Unmap the entire identity mapping. */
init_pt.UnmapTtbr0Entries(phys_to_virt_offset);
/* Re-map only the first page of code. */
const size_t size = util::AlignUp<size_t>(reinterpret_cast<uintptr_t>(::ams::kern::init::IdentityMappedFunctionAreaEnd) - reinterpret_cast<uintptr_t>(::ams::kern::init::IdentityMappedFunctionAreaBegin), PageSize);
init_pt.Map(KVirtualAddress(GetInteger(start_phys_addr)), size, start_phys_addr, KernelTextAttribute, finalize_allocator, phys_to_virt_offset);
} }
} }
void InitializeCore(uintptr_t misc_unk_debug_phys_addr, void **initial_state) { void InitializeCorePhase1(uintptr_t misc_unk_debug_phys_addr, void **initial_state) {
/* Ensure our first argument is page aligned. */ /* Ensure our first argument is page aligned. */
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(misc_unk_debug_phys_addr, PageSize)); MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(misc_unk_debug_phys_addr, PageSize));
/* Decode the initial state. */ /* Decode the initial state. */
const auto initial_page_allocator_state = *static_cast<KInitialPageAllocator::State *>(initial_state[0]); const auto initial_page_allocator_state = *static_cast<KInitialPageAllocator::State *>(initial_state[0]);
const auto initial_process_binary_layout = *static_cast<InitialProcessBinaryLayout *>(initial_state[1]); g_phase2_initial_process_binary_layout = *static_cast<InitialProcessBinaryLayout *>(initial_state[1]);
/* Restore the page allocator state setup by kernel loader. */ /* Restore the page allocator state setup by kernel loader. */
g_initial_page_allocator.InitializeFromState(std::addressof(initial_page_allocator_state)); g_initial_page_allocator.InitializeFromState(std::addressof(initial_page_allocator_state));
@ -343,7 +411,7 @@ namespace ams::kern::init {
largest->SetPairAddress(GetInteger(map_virt_addr) + largest->GetAddress() - GetInteger(map_phys_addr)); largest->SetPairAddress(GetInteger(map_virt_addr) + largest->GetAddress() - GetInteger(map_phys_addr));
/* Map the page in to our page table. */ /* Map the page in to our page table. */
init_pt.Map(map_virt_addr, map_size, map_phys_addr, KernelMmioAttribute, g_initial_page_allocator); init_pt.Map(map_virt_addr, map_size, map_phys_addr, KernelMmioAttribute, g_initial_page_allocator, 0);
} }
} while (largest != nullptr); } while (largest != nullptr);
} }
@ -400,7 +468,7 @@ namespace ams::kern::init {
/* Map the page in to our page table. */ /* Map the page in to our page table. */
const auto attribute = largest->HasTypeAttribute(KMemoryRegionAttr_Uncached) ? KernelRwDataUncachedAttribute : KernelRwDataAttribute; const auto attribute = largest->HasTypeAttribute(KMemoryRegionAttr_Uncached) ? KernelRwDataUncachedAttribute : KernelRwDataAttribute;
init_pt.Map(map_virt_addr, map_size, map_phys_addr, attribute, g_initial_page_allocator); init_pt.Map(map_virt_addr, map_size, map_phys_addr, attribute, g_initial_page_allocator, 0);
} }
} while (largest != nullptr); } while (largest != nullptr);
} }
@ -412,7 +480,7 @@ namespace ams::kern::init {
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(slab_start_phys_addr), slab_region_size, KMemoryRegionType_DramKernelSlab)); MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(slab_start_phys_addr), slab_region_size, KMemoryRegionType_DramKernelSlab));
/* Map the slab region. */ /* Map the slab region. */
init_pt.Map(slab_region_start, slab_region_size, slab_start_phys_addr, KernelRwDataAttribute, g_initial_page_allocator); init_pt.Map(slab_region_start, slab_region_size, slab_start_phys_addr, KernelRwDataAttribute, g_initial_page_allocator, 0);
/* Physically randomize the slab region. */ /* Physically randomize the slab region. */
/* NOTE: Nintendo does this only on 10.0.0+ */ /* NOTE: Nintendo does this only on 10.0.0+ */
@ -426,6 +494,8 @@ namespace ams::kern::init {
/* Determine size available for kernel page table heaps. */ /* Determine size available for kernel page table heaps. */
const KPhysicalAddress resource_end_phys_addr = slab_start_phys_addr + resource_region_size; const KPhysicalAddress resource_end_phys_addr = slab_start_phys_addr + resource_region_size;
g_phase2_resource_end_phys_addr = resource_end_phys_addr;
const size_t page_table_heap_size = GetInteger(resource_end_phys_addr) - GetInteger(secure_applet_end_phys_addr); const size_t page_table_heap_size = GetInteger(resource_end_phys_addr) - GetInteger(secure_applet_end_phys_addr);
/* Insert a physical region for the kernel page table heap region */ /* Insert a physical region for the kernel page table heap region */
@ -472,7 +542,7 @@ namespace ams::kern::init {
cur_size += region.GetSize(); cur_size += region.GetSize();
} else { } else {
const uintptr_t cur_virt_addr = cur_phys_addr + linear_region_phys_to_virt_diff; const uintptr_t cur_virt_addr = cur_phys_addr + linear_region_phys_to_virt_diff;
init_pt.Map(cur_virt_addr, cur_size, cur_phys_addr, KernelRwDataAttribute, g_initial_page_allocator); init_pt.Map(cur_virt_addr, cur_size, cur_phys_addr, KernelRwDataAttribute, g_initial_page_allocator, 0);
cur_phys_addr = region.GetAddress(); cur_phys_addr = region.GetAddress();
cur_size = region.GetSize(); cur_size = region.GetSize();
} }
@ -491,7 +561,7 @@ namespace ams::kern::init {
/* Map the last block, which we may have skipped. */ /* Map the last block, which we may have skipped. */
if (cur_size != 0) { if (cur_size != 0) {
const uintptr_t cur_virt_addr = cur_phys_addr + linear_region_phys_to_virt_diff; const uintptr_t cur_virt_addr = cur_phys_addr + linear_region_phys_to_virt_diff;
init_pt.Map(cur_virt_addr, cur_size, cur_phys_addr, KernelRwDataAttribute, g_initial_page_allocator); init_pt.Map(cur_virt_addr, cur_size, cur_phys_addr, KernelRwDataAttribute, g_initial_page_allocator, 0);
} }
} }
@ -510,23 +580,34 @@ namespace ams::kern::init {
} }
/* Setup the initial arguments. */ /* Setup the initial arguments. */
SetupInitialArguments(init_pt, g_initial_page_allocator); SetupInitialArguments();
/* Set linear difference for Phase2. */
g_phase2_linear_region_phys_to_virt_diff = linear_region_phys_to_virt_diff;
}
void InitializeCorePhase2() {
/* Create page table object for use during remaining initialization. */
KInitialPageTable init_pt;
/* Unmap the identity mapping. */
FinalizeIdentityMapping(init_pt, g_initial_page_allocator, g_phase2_linear_region_phys_to_virt_diff);
/* Finalize the page allocator, we're done allocating at this point. */ /* Finalize the page allocator, we're done allocating at this point. */
KInitialPageAllocator::State final_init_page_table_state; KInitialPageAllocator::State final_init_page_table_state;
g_initial_page_allocator.GetFinalState(std::addressof(final_init_page_table_state)); g_initial_page_allocator.GetFinalState(std::addressof(final_init_page_table_state));
const KPhysicalAddress final_init_page_table_end_address = final_init_page_table_state.end_address; const KPhysicalAddress final_init_page_table_end_address = final_init_page_table_state.end_address;
const size_t init_page_table_region_size = GetInteger(final_init_page_table_end_address) - GetInteger(resource_end_phys_addr); const size_t init_page_table_region_size = GetInteger(final_init_page_table_end_address) - GetInteger(g_phase2_resource_end_phys_addr);
/* Insert regions for the initial page table region. */ /* Insert regions for the initial page table region. */
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(resource_end_phys_addr), init_page_table_region_size, KMemoryRegionType_DramKernelInitPt)); MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(g_phase2_resource_end_phys_addr), init_page_table_region_size, KMemoryRegionType_DramKernelInitPt));
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(resource_end_phys_addr) + linear_region_phys_to_virt_diff, init_page_table_region_size, KMemoryRegionType_VirtualDramKernelInitPt)); MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(g_phase2_resource_end_phys_addr) + g_phase2_linear_region_phys_to_virt_diff, init_page_table_region_size, KMemoryRegionType_VirtualDramKernelInitPt));
/* Insert a physical region for the kernel trace buffer */ /* Insert a physical region for the kernel trace buffer */
if constexpr (IsKTraceEnabled) { if constexpr (IsKTraceEnabled) {
const KPhysicalAddress ktrace_buffer_phys_addr = GetInteger(resource_end_phys_addr) + init_page_table_region_size; const KPhysicalAddress ktrace_buffer_phys_addr = GetInteger(g_phase2_resource_end_phys_addr) + init_page_table_region_size;
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(ktrace_buffer_phys_addr), KTraceBufferSize, KMemoryRegionType_KernelTraceBuffer)); MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(ktrace_buffer_phys_addr), KTraceBufferSize, KMemoryRegionType_KernelTraceBuffer));
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(ktrace_buffer_phys_addr) + linear_region_phys_to_virt_diff, KTraceBufferSize, GetTypeForVirtualLinearMapping(KMemoryRegionType_KernelTraceBuffer))); MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(ktrace_buffer_phys_addr) + g_phase2_linear_region_phys_to_virt_diff, KTraceBufferSize, GetTypeForVirtualLinearMapping(KMemoryRegionType_KernelTraceBuffer)));
} }
/* All linear-mapped DRAM regions that we haven't tagged by this point will be allocated to some pool partition. Tag them. */ /* All linear-mapped DRAM regions that we haven't tagged by this point will be allocated to some pool partition. Tag them. */
@ -538,12 +619,12 @@ namespace ams::kern::init {
} }
/* Set the linear memory offsets, to enable conversion between physical and virtual addresses. */ /* Set the linear memory offsets, to enable conversion between physical and virtual addresses. */
KMemoryLayout::InitializeLinearMemoryAddresses(aligned_linear_phys_start, linear_region_start); KMemoryLayout::InitializeLinearMemoryAddresses(g_phase2_linear_region_phys_to_virt_diff);
/* Set the initial process binary physical address. */ /* Set the initial process binary physical address. */
/* NOTE: Nintendo does this after pool partition setup, but it's a requirement that we do it before */ /* NOTE: Nintendo does this after pool partition setup, but it's a requirement that we do it before */
/* to retain compatibility with < 5.0.0. */ /* to retain compatibility with < 5.0.0. */
const KPhysicalAddress ini_address = initial_process_binary_layout.address; const KPhysicalAddress ini_address = g_phase2_initial_process_binary_layout.address;
MESOSPHERE_INIT_ABORT_UNLESS(ini_address != Null<KPhysicalAddress>); MESOSPHERE_INIT_ABORT_UNLESS(ini_address != Null<KPhysicalAddress>);
SetInitialProcessBinaryPhysicalAddress(ini_address); SetInitialProcessBinaryPhysicalAddress(ini_address);
@ -567,11 +648,11 @@ namespace ams::kern::init {
KMemoryLayout::InitializeLinearMemoryRegionTrees(); KMemoryLayout::InitializeLinearMemoryRegionTrees();
/* Turn on all other cores. */ /* Turn on all other cores. */
TurnOnAllCores(GetInteger(init_pt.GetPhysicalAddress(reinterpret_cast<uintptr_t>(::ams::kern::init::StartOtherCore)))); TurnOnAllCores();
} }
KPhysicalAddress GetInitArgumentsAddress(s32 core_id) { KInitArguments *GetInitArguments(s32 core_id) {
return g_init_arguments_phys_addr[core_id]; return g_init_arguments + core_id;
} }
void InitializeDebugRegisters() { void InitializeDebugRegisters() {

View file

@ -33,7 +33,18 @@
adr reg, label; \ adr reg, label; \
ldr reg, [reg] ldr reg, [reg]
.section .crt0.text.start, "ax", %progbits .section .crt0.text.start, "ax", %progbits
/* ams::kern::init::IdentityMappedFunctionAreaBegin() */
.global _ZN3ams4kern4init31IdentityMappedFunctionAreaBeginEv
.type _ZN3ams4kern4init31IdentityMappedFunctionAreaBeginEv, %function
_ZN3ams4kern4init31IdentityMappedFunctionAreaBeginEv:
/* NOTE: This is not a real function, and only exists as a label for safety. */
/* ================ Functions after this line remain identity-mapped after initialization finishes. ================ */
.global _start .global _start
_start: _start:
b _ZN3ams4kern4init10StartCore0Emm b _ZN3ams4kern4init10StartCore0Emm
@ -145,13 +156,31 @@ _ZN3ams4kern4init10StartCore0Emm:
/* Call ams::kern::init::InitializeCore(uintptr_t, void **) */ /* Call ams::kern::init::InitializeCore(uintptr_t, void **) */
mov x1, x0 /* Kernelldr returns a state object for the kernel to re-use. */ mov x1, x0 /* Kernelldr returns a state object for the kernel to re-use. */
mov x0, x21 /* Use the address we determined earlier. */ mov x0, x21 /* Use the address we determined earlier. */
bl _ZN3ams4kern4init14InitializeCoreEmPPv bl _ZN3ams4kern4init20InitializeCorePhase1EmPPv
/* Get the init arguments for core 0. */ /* Get the init arguments for core 0. */
mov x0, xzr mov x0, xzr
bl _ZN3ams4kern4init23GetInitArgumentsAddressEi bl _ZN3ams4kern4init16GetInitArgumentsEi
bl _ZN3ams4kern4init16InvokeEntrypointEPKNS1_14KInitArgumentsE /* Setup the stack pointer. */
ldr x2, [x0, #(INIT_ARGUMENTS_SP)]
mov sp, x2
/* Perform further initialization with the stack pointer set up, as required. */
/* This will include e.g. unmapping the identity mapping. */
bl _ZN3ams4kern4init20InitializeCorePhase2Ev
/* Get the init arguments for core 0. */
mov x0, xzr
bl _ZN3ams4kern4init16GetInitArgumentsEi
/* Invoke the entrypoint. */
ldr x1, [x0, #(INIT_ARGUMENTS_ENTRYPOINT)]
ldr x0, [x0, #(INIT_ARGUMENTS_ARGUMENT)]
blr x1
0: /* If we return here, something has gone wrong, so wait forever. */
b 0b
/* ams::kern::init::StartOtherCore(const ams::kern::init::KInitArguments *) */ /* ams::kern::init::StartOtherCore(const ams::kern::init::KInitArguments *) */
.section .crt0.text._ZN3ams4kern4init14StartOtherCoreEPKNS1_14KInitArgumentsE, "ax", %progbits .section .crt0.text._ZN3ams4kern4init14StartOtherCoreEPKNS1_14KInitArgumentsE, "ax", %progbits
@ -221,52 +250,26 @@ _ZN3ams4kern4init14StartOtherCoreEPKNS1_14KInitArgumentsE:
dsb sy dsb sy
isb isb
/* Load remaining needed fields from the init args. */
ldr x3, [x20, #(INIT_ARGUMENTS_SCTLR)]
ldr x2, [x20, #(INIT_ARGUMENTS_SP)]
ldr x1, [x20, #(INIT_ARGUMENTS_ENTRYPOINT)]
ldr x0, [x20, #(INIT_ARGUMENTS_ARGUMENT)]
/* Set sctlr_el1 and ensure instruction consistency. */ /* Set sctlr_el1 and ensure instruction consistency. */
ldr x1, [x20, #(INIT_ARGUMENTS_SCTLR)] msr sctlr_el1, x3
msr sctlr_el1, x1
dsb sy dsb sy
isb isb
/* Jump to the virtual address equivalent to ams::kern::init::InvokeEntrypoint */ /* Set the stack pointer. */
ldr x1, [x20, #(INIT_ARGUMENTS_SETUP_FUNCTION)] mov sp, x2
adr x2, _ZN3ams4kern4init14StartOtherCoreEPKNS1_14KInitArgumentsE
sub x1, x1, x2
adr x2, _ZN3ams4kern4init16InvokeEntrypointEPKNS1_14KInitArgumentsE
add x1, x1, x2
mov x0, x20
br x1
/* ams::kern::init::InvokeEntrypoint(const ams::kern::init::KInitArguments *) */ /* Invoke the entrypoint. */
.section .crt0.text._ZN3ams4kern4init16InvokeEntrypointEPKNS1_14KInitArgumentsE, "ax", %progbits blr x1
.global _ZN3ams4kern4init16InvokeEntrypointEPKNS1_14KInitArgumentsE
.type _ZN3ams4kern4init16InvokeEntrypointEPKNS1_14KInitArgumentsE, %function
_ZN3ams4kern4init16InvokeEntrypointEPKNS1_14KInitArgumentsE:
/* Preserve the KInitArguments pointer in a register. */
mov x20, x0
/* Clear CPACR_EL1. This will prevent classes of traps (SVE, etc). */ 0: /* If we return here, something has gone wrong, so wait forever. */
msr cpacr_el1, xzr b 0b
isb
/* Setup the stack pointer. */
ldr x1, [x20, #(INIT_ARGUMENTS_SP)]
mov sp, x1
/* Ensure that system debug registers are setup. */
bl _ZN3ams4kern4init24InitializeDebugRegistersEv
/* Ensure that the exception vectors are setup. */
bl _ZN3ams4kern4init26InitializeExceptionVectorsEv
/* Setup the exception stack in cntv_cval_el0. */
ldr x1, [x20, #(INIT_ARGUMENTS_EXCEPTION_STACK)]
msr cntv_cval_el0, x1
/* Jump to the entrypoint. */
ldr x1, [x20, #(INIT_ARGUMENTS_ENTRYPOINT)]
ldr x0, [x20, #(INIT_ARGUMENTS_ARGUMENT)]
br x1
/* TODO: Can we remove this while retaining QEMU support? */ /* TODO: Can we remove this while retaining QEMU support? */
#ifndef ATMOSPHERE_BOARD_NINTENDO_NX #ifndef ATMOSPHERE_BOARD_NINTENDO_NX
@ -559,3 +562,12 @@ _ZN3ams4kern4arch5arm643cpu36FlushEntireDataCacheImplWithoutStackEv:
b 0b b 0b
3: 3:
ret ret
/* ================ Functions before this line remain identity-mapped after initialization finishes. ================ */
/* ams::kern::init::IdentityMappedFunctionAreaEnd() */
.global _ZN3ams4kern4init29IdentityMappedFunctionAreaEndEv
.type _ZN3ams4kern4init31IdentityMappedFunctionAreaEndEv, %function
_ZN3ams4kern4init29IdentityMappedFunctionAreaEndEv:
/* NOTE: This is not a real function, and only exists as a label for safety. */

View file

@ -62,20 +62,20 @@ namespace ams::kern::init::loader {
} }
} }
void SetupInitialIdentityMapping(KInitialPageTable &init_pt, uintptr_t base_address, uintptr_t kernel_size, uintptr_t page_table_region, size_t page_table_region_size, KInitialPageTable::PageAllocator &allocator) { void SetupInitialIdentityMapping(KInitialPageTable &init_pt, uintptr_t base_address, uintptr_t kernel_size, uintptr_t page_table_region, size_t page_table_region_size, KInitialPageAllocator &allocator) {
/* Map in an RWX identity mapping for the kernel. */ /* Map in an RWX identity mapping for the kernel. */
constexpr PageTableEntry KernelRWXIdentityAttribute(PageTableEntry::Permission_KernelRWX, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped); constexpr PageTableEntry KernelRWXIdentityAttribute(PageTableEntry::Permission_KernelRWX, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
init_pt.Map(base_address, kernel_size, base_address, KernelRWXIdentityAttribute, allocator); init_pt.Map(base_address, kernel_size, base_address, KernelRWXIdentityAttribute, allocator, 0);
/* Map in an RWX identity mapping for ourselves. */ /* Map in an RWX identity mapping for ourselves. */
constexpr PageTableEntry KernelLdrRWXIdentityAttribute(PageTableEntry::Permission_KernelRWX, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped); constexpr PageTableEntry KernelLdrRWXIdentityAttribute(PageTableEntry::Permission_KernelRWX, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
const uintptr_t kernel_ldr_base = util::AlignDown(reinterpret_cast<uintptr_t>(__start__), PageSize); const uintptr_t kernel_ldr_base = util::AlignDown(reinterpret_cast<uintptr_t>(__start__), PageSize);
const uintptr_t kernel_ldr_size = util::AlignUp(reinterpret_cast<uintptr_t>(__end__), PageSize) - kernel_ldr_base; const uintptr_t kernel_ldr_size = util::AlignUp(reinterpret_cast<uintptr_t>(__end__), PageSize) - kernel_ldr_base;
init_pt.Map(kernel_ldr_base, kernel_ldr_size, kernel_ldr_base, KernelRWXIdentityAttribute, allocator); init_pt.Map(kernel_ldr_base, kernel_ldr_size, kernel_ldr_base, KernelRWXIdentityAttribute, allocator, 0);
/* Map in the page table region as RW- for ourselves. */ /* Map in the page table region as RW- for ourselves. */
constexpr PageTableEntry PageTableRegionRWAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped); constexpr PageTableEntry PageTableRegionRWAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
init_pt.Map(page_table_region, page_table_region_size, page_table_region, KernelRWXIdentityAttribute, allocator); init_pt.Map(page_table_region, page_table_region_size, page_table_region, KernelRWXIdentityAttribute, allocator, 0);
/* Place the L1 table addresses in the relevant system registers. */ /* Place the L1 table addresses in the relevant system registers. */
cpu::SetTtbr0El1(init_pt.GetTtbr0L1TableAddress()); cpu::SetTtbr0El1(init_pt.GetTtbr0L1TableAddress());
@ -194,14 +194,14 @@ namespace ams::kern::init::loader {
/* Map kernel .text as R-X. */ /* Map kernel .text as R-X. */
constexpr PageTableEntry KernelTextAttribute(PageTableEntry::Permission_KernelRX, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped); constexpr PageTableEntry KernelTextAttribute(PageTableEntry::Permission_KernelRX, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
init_pt.Map(virtual_base_address + rx_offset, rx_end_offset - rx_offset, base_address + rx_offset, KernelTextAttribute, g_initial_page_allocator); init_pt.Map(virtual_base_address + rx_offset, rx_end_offset - rx_offset, base_address + rx_offset, KernelTextAttribute, g_initial_page_allocator, 0);
/* Map kernel .rodata and .rwdata as RW-. */ /* Map kernel .rodata and .rwdata as RW-. */
/* Note that we will later reprotect .rodata as R-- */ /* Note that we will later reprotect .rodata as R-- */
constexpr PageTableEntry KernelRoDataAttribute(PageTableEntry::Permission_KernelR, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped); constexpr PageTableEntry KernelRoDataAttribute(PageTableEntry::Permission_KernelR, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
constexpr PageTableEntry KernelRwDataAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped); constexpr PageTableEntry KernelRwDataAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
init_pt.Map(virtual_base_address + ro_offset, ro_end_offset - ro_offset, base_address + ro_offset, KernelRwDataAttribute, g_initial_page_allocator); init_pt.Map(virtual_base_address + ro_offset, ro_end_offset - ro_offset, base_address + ro_offset, KernelRwDataAttribute, g_initial_page_allocator, 0);
init_pt.Map(virtual_base_address + rw_offset, bss_end_offset - rw_offset, base_address + rw_offset, KernelRwDataAttribute, g_initial_page_allocator); init_pt.Map(virtual_base_address + rw_offset, bss_end_offset - rw_offset, base_address + rw_offset, KernelRwDataAttribute, g_initial_page_allocator, 0);
/* Physically randomize the kernel region. */ /* Physically randomize the kernel region. */
/* NOTE: Nintendo does this only on 10.0.0+ */ /* NOTE: Nintendo does this only on 10.0.0+ */