mirror of
https://github.com/Atmosphere-NX/Atmosphere
synced 2024-12-22 20:31:14 +00:00
kern: Implement most of memory init (all cores hit main, but still more to do)
This commit is contained in:
parent
b2e522c0a0
commit
e7dee2a9fc
22 changed files with 1246 additions and 81 deletions
|
@ -76,7 +76,7 @@ TARGET := $(notdir $(CURDIR))
|
||||||
BUILD := build
|
BUILD := build
|
||||||
DATA := data
|
DATA := data
|
||||||
INCLUDES := include
|
INCLUDES := include
|
||||||
SOURCES ?= source $(foreach d,$(filter-out source/arch source/board source,$(wildcard source)),$(call DIR_WILDCARD,$d) $d)
|
SOURCES ?= source $(foreach d,$(filter-out source/arch source/board source,$(wildcard source/*)),$(if $(wildcard $d/.),$(call DIR_WILDCARD,$d) $d,))
|
||||||
|
|
||||||
ifneq ($(strip $(wildcard source/$(ATMOSPHERE_ARCH_DIR)/.*)),)
|
ifneq ($(strip $(wildcard source/$(ATMOSPHERE_ARCH_DIR)/.*)),)
|
||||||
SOURCES += source/$(ATMOSPHERE_ARCH_DIR) $(call DIR_WILDCARD,source/$(ATMOSPHERE_ARCH_DIR))
|
SOURCES += source/$(ATMOSPHERE_ARCH_DIR) $(call DIR_WILDCARD,source/$(ATMOSPHERE_ARCH_DIR))
|
||||||
|
|
|
@ -27,16 +27,20 @@
|
||||||
|
|
||||||
/* Core pre-initialization includes. */
|
/* Core pre-initialization includes. */
|
||||||
#include "mesosphere/kern_select_cpu.hpp"
|
#include "mesosphere/kern_select_cpu.hpp"
|
||||||
|
#include "mesosphere/kern_select_k_system_control.hpp"
|
||||||
|
|
||||||
/* Initialization headers. */
|
/* Initialization headers. */
|
||||||
#include "mesosphere/init/kern_init_elf.hpp"
|
#include "mesosphere/init/kern_init_elf.hpp"
|
||||||
#include "mesosphere/init/kern_init_layout.hpp"
|
#include "mesosphere/init/kern_init_layout.hpp"
|
||||||
#include "mesosphere/init/kern_init_page_table_select.hpp"
|
#include "mesosphere/init/kern_init_page_table_select.hpp"
|
||||||
#include "mesosphere/init/kern_init_arguments_select.hpp"
|
#include "mesosphere/init/kern_init_arguments_select.hpp"
|
||||||
|
#include "mesosphere/kern_k_memory_layout.hpp"
|
||||||
|
|
||||||
/* Core functionality. */
|
/* Core functionality. */
|
||||||
#include "mesosphere/kern_select_interrupts.hpp"
|
#include "mesosphere/kern_select_interrupts.hpp"
|
||||||
#include "mesosphere/kern_select_k_system_control.hpp"
|
|
||||||
|
|
||||||
/* Supervisor Calls. */
|
/* Supervisor Calls. */
|
||||||
#include "mesosphere/kern_svc.hpp"
|
#include "mesosphere/kern_svc.hpp"
|
||||||
|
|
||||||
|
/* Main functionality. */
|
||||||
|
#include "mesosphere/kern_main.hpp"
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
#include <vapours.hpp>
|
#include <vapours.hpp>
|
||||||
#include <mesosphere/kern_panic.hpp>
|
#include <mesosphere/kern_panic.hpp>
|
||||||
#include <mesosphere/kern_k_typed_address.hpp>
|
#include <mesosphere/kern_k_typed_address.hpp>
|
||||||
#include "../kern_cpu.hpp"
|
#include <mesosphere/kern_select_cpu.hpp>
|
||||||
|
|
||||||
namespace ams::kern::init {
|
namespace ams::kern::init {
|
||||||
|
|
||||||
|
@ -190,10 +190,14 @@ namespace ams::kern::init {
|
||||||
virtual KPhysicalAddress Allocate() { return Null<KPhysicalAddress>; }
|
virtual KPhysicalAddress Allocate() { return Null<KPhysicalAddress>; }
|
||||||
virtual void Free(KPhysicalAddress phys_addr) { /* Nothing to do here. */ (void)(phys_addr); }
|
virtual void Free(KPhysicalAddress phys_addr) { /* Nothing to do here. */ (void)(phys_addr); }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct NoClear{};
|
||||||
private:
|
private:
|
||||||
KPhysicalAddress l1_table;
|
KPhysicalAddress l1_table;
|
||||||
public:
|
public:
|
||||||
constexpr ALWAYS_INLINE KInitialPageTable(KPhysicalAddress l1) : l1_table(l1) {
|
constexpr ALWAYS_INLINE KInitialPageTable(KPhysicalAddress l1, NoClear) : l1_table(l1) { /* ... */ }
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE KInitialPageTable(KPhysicalAddress l1) : KInitialPageTable(l1, NoClear{}) {
|
||||||
ClearNewPageTable(this->l1_table);
|
ClearNewPageTable(this->l1_table);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -224,9 +228,9 @@ namespace ams::kern::init {
|
||||||
public:
|
public:
|
||||||
void NOINLINE Map(KVirtualAddress virt_addr, size_t size, KPhysicalAddress phys_addr, const PageTableEntry &attr, IPageAllocator &allocator) {
|
void NOINLINE Map(KVirtualAddress virt_addr, size_t size, KPhysicalAddress phys_addr, const PageTableEntry &attr, IPageAllocator &allocator) {
|
||||||
/* Ensure that addresses and sizes are page aligned. */
|
/* Ensure that addresses and sizes are page aligned. */
|
||||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize));
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize));
|
||||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(phys_addr), PageSize));
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(phys_addr), PageSize));
|
||||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, PageSize));
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, PageSize));
|
||||||
|
|
||||||
/* Iteratively map pages until the requested region is mapped. */
|
/* Iteratively map pages until the requested region is mapped. */
|
||||||
while (size > 0) {
|
while (size > 0) {
|
||||||
|
@ -309,10 +313,37 @@ namespace ams::kern::init {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
KPhysicalAddress GetPhysicalAddress(KVirtualAddress virt_addr) const {
|
||||||
|
/* Get the L1 entry. */
|
||||||
|
const L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr);
|
||||||
|
|
||||||
|
if (l1_entry->IsBlock()) {
|
||||||
|
return l1_entry->GetBlock() + (GetInteger(virt_addr) & (L1BlockSize - 1));
|
||||||
|
}
|
||||||
|
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsTable());
|
||||||
|
|
||||||
|
/* Get the L2 entry. */
|
||||||
|
const L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr);
|
||||||
|
|
||||||
|
if (l2_entry->IsBlock()) {
|
||||||
|
return l2_entry->GetBlock() + (GetInteger(virt_addr) & (L2BlockSize - 1));
|
||||||
|
}
|
||||||
|
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsTable());
|
||||||
|
|
||||||
|
/* Get the L3 entry. */
|
||||||
|
const L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr);
|
||||||
|
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsBlock());
|
||||||
|
|
||||||
|
return l3_entry->GetBlock() + (GetInteger(virt_addr) & (L3BlockSize - 1));
|
||||||
|
}
|
||||||
|
|
||||||
bool IsFree(KVirtualAddress virt_addr, size_t size) {
|
bool IsFree(KVirtualAddress virt_addr, size_t size) {
|
||||||
/* Ensure that addresses and sizes are page aligned. */
|
/* Ensure that addresses and sizes are page aligned. */
|
||||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize));
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize));
|
||||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, PageSize));
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, PageSize));
|
||||||
|
|
||||||
const KVirtualAddress end_virt_addr = virt_addr + size;
|
const KVirtualAddress end_virt_addr = virt_addr + size;
|
||||||
while (virt_addr < end_virt_addr) {
|
while (virt_addr < end_virt_addr) {
|
||||||
|
@ -360,8 +391,8 @@ namespace ams::kern::init {
|
||||||
cpu::DataSynchronizationBarrierInnerShareable();
|
cpu::DataSynchronizationBarrierInnerShareable();
|
||||||
|
|
||||||
/* Ensure that addresses and sizes are page aligned. */
|
/* Ensure that addresses and sizes are page aligned. */
|
||||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize));
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize));
|
||||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, PageSize));
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, PageSize));
|
||||||
|
|
||||||
/* Iteratively reprotect pages until the requested region is reprotected. */
|
/* Iteratively reprotect pages until the requested region is reprotected. */
|
||||||
while (size > 0) {
|
while (size > 0) {
|
||||||
|
@ -371,9 +402,9 @@ namespace ams::kern::init {
|
||||||
if (l1_entry->IsBlock()) {
|
if (l1_entry->IsBlock()) {
|
||||||
/* Ensure that we are allowed to have an L1 block here. */
|
/* Ensure that we are allowed to have an L1 block here. */
|
||||||
const KPhysicalAddress block = l1_entry->GetBlock();
|
const KPhysicalAddress block = l1_entry->GetBlock();
|
||||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L1BlockSize));
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L1BlockSize));
|
||||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, L1BlockSize));
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, L1BlockSize));
|
||||||
MESOSPHERE_ABORT_UNLESS(l1_entry->IsCompatibleWithAttribute(attr_before, false));
|
MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsCompatibleWithAttribute(attr_before, false));
|
||||||
|
|
||||||
/* Invalidate the existing L1 block. */
|
/* Invalidate the existing L1 block. */
|
||||||
*static_cast<PageTableEntry *>(l1_entry) = InvalidPageTableEntry;
|
*static_cast<PageTableEntry *>(l1_entry) = InvalidPageTableEntry;
|
||||||
|
@ -389,7 +420,7 @@ namespace ams::kern::init {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Not a block, so we must be a table. */
|
/* Not a block, so we must be a table. */
|
||||||
MESOSPHERE_ABORT_UNLESS(l1_entry->IsTable());
|
MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsTable());
|
||||||
|
|
||||||
L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr);
|
L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr);
|
||||||
if (l2_entry->IsBlock()) {
|
if (l2_entry->IsBlock()) {
|
||||||
|
@ -397,14 +428,14 @@ namespace ams::kern::init {
|
||||||
|
|
||||||
if (l2_entry->IsContiguous()) {
|
if (l2_entry->IsContiguous()) {
|
||||||
/* Ensure that we are allowed to have a contiguous L2 block here. */
|
/* Ensure that we are allowed to have a contiguous L2 block here. */
|
||||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L2ContiguousBlockSize));
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L2ContiguousBlockSize));
|
||||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(block), L2ContiguousBlockSize));
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(block), L2ContiguousBlockSize));
|
||||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, L2ContiguousBlockSize));
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, L2ContiguousBlockSize));
|
||||||
|
|
||||||
/* Invalidate the existing contiguous L2 block. */
|
/* Invalidate the existing contiguous L2 block. */
|
||||||
for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) {
|
for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) {
|
||||||
/* Ensure that the entry is valid. */
|
/* Ensure that the entry is valid. */
|
||||||
MESOSPHERE_ABORT_UNLESS(l2_entry[i].IsCompatibleWithAttribute(attr_before, true));
|
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry[i].IsCompatibleWithAttribute(attr_before, true));
|
||||||
static_cast<PageTableEntry *>(l2_entry)[i] = InvalidPageTableEntry;
|
static_cast<PageTableEntry *>(l2_entry)[i] = InvalidPageTableEntry;
|
||||||
}
|
}
|
||||||
cpu::DataSynchronizationBarrierInnerShareable();
|
cpu::DataSynchronizationBarrierInnerShareable();
|
||||||
|
@ -419,10 +450,10 @@ namespace ams::kern::init {
|
||||||
size -= L2ContiguousBlockSize;
|
size -= L2ContiguousBlockSize;
|
||||||
} else {
|
} else {
|
||||||
/* Ensure that we are allowed to have an L2 block here. */
|
/* Ensure that we are allowed to have an L2 block here. */
|
||||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L2BlockSize));
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L2BlockSize));
|
||||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(block), L2BlockSize));
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(block), L2BlockSize));
|
||||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, L2BlockSize));
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, L2BlockSize));
|
||||||
MESOSPHERE_ABORT_UNLESS(l2_entry->IsCompatibleWithAttribute(attr_before, false));
|
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsCompatibleWithAttribute(attr_before, false));
|
||||||
|
|
||||||
/* Invalidate the existing L2 block. */
|
/* Invalidate the existing L2 block. */
|
||||||
*static_cast<PageTableEntry *>(l2_entry) = InvalidPageTableEntry;
|
*static_cast<PageTableEntry *>(l2_entry) = InvalidPageTableEntry;
|
||||||
|
@ -440,23 +471,23 @@ namespace ams::kern::init {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Not a block, so we must be a table. */
|
/* Not a block, so we must be a table. */
|
||||||
MESOSPHERE_ABORT_UNLESS(l2_entry->IsTable());
|
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsTable());
|
||||||
|
|
||||||
/* We must have a mapped l3 entry to reprotect. */
|
/* We must have a mapped l3 entry to reprotect. */
|
||||||
L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr);
|
L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr);
|
||||||
MESOSPHERE_ABORT_UNLESS(l3_entry->IsBlock());
|
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsBlock());
|
||||||
const KPhysicalAddress block = l3_entry->GetBlock();
|
const KPhysicalAddress block = l3_entry->GetBlock();
|
||||||
|
|
||||||
if (l3_entry->IsContiguous()) {
|
if (l3_entry->IsContiguous()) {
|
||||||
/* Ensure that we are allowed to have a contiguous L3 block here. */
|
/* Ensure that we are allowed to have a contiguous L3 block here. */
|
||||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L3ContiguousBlockSize));
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L3ContiguousBlockSize));
|
||||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(block), L3ContiguousBlockSize));
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(block), L3ContiguousBlockSize));
|
||||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, L3ContiguousBlockSize));
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, L3ContiguousBlockSize));
|
||||||
|
|
||||||
/* Invalidate the existing contiguous L3 block. */
|
/* Invalidate the existing contiguous L3 block. */
|
||||||
for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) {
|
for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) {
|
||||||
/* Ensure that the entry is valid. */
|
/* Ensure that the entry is valid. */
|
||||||
MESOSPHERE_ABORT_UNLESS(l3_entry[i].IsCompatibleWithAttribute(attr_before, true));
|
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry[i].IsCompatibleWithAttribute(attr_before, true));
|
||||||
static_cast<PageTableEntry *>(l3_entry)[i] = InvalidPageTableEntry;
|
static_cast<PageTableEntry *>(l3_entry)[i] = InvalidPageTableEntry;
|
||||||
}
|
}
|
||||||
cpu::DataSynchronizationBarrierInnerShareable();
|
cpu::DataSynchronizationBarrierInnerShareable();
|
||||||
|
@ -471,10 +502,10 @@ namespace ams::kern::init {
|
||||||
size -= L3ContiguousBlockSize;
|
size -= L3ContiguousBlockSize;
|
||||||
} else {
|
} else {
|
||||||
/* Ensure that we are allowed to have an L3 block here. */
|
/* Ensure that we are allowed to have an L3 block here. */
|
||||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L3BlockSize));
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L3BlockSize));
|
||||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(block), L3BlockSize));
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(block), L3BlockSize));
|
||||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, L3BlockSize));
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, L3BlockSize));
|
||||||
MESOSPHERE_ABORT_UNLESS(l3_entry->IsCompatibleWithAttribute(attr_before, false));
|
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsCompatibleWithAttribute(attr_before, false));
|
||||||
|
|
||||||
/* Invalidate the existing L3 block. */
|
/* Invalidate the existing L3 block. */
|
||||||
*static_cast<PageTableEntry *>(l3_entry) = InvalidPageTableEntry;
|
*static_cast<PageTableEntry *>(l3_entry) = InvalidPageTableEntry;
|
||||||
|
@ -505,14 +536,18 @@ namespace ams::kern::init {
|
||||||
this->next_address = address;
|
this->next_address = address;
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE uintptr_t GetFinalState() {
|
ALWAYS_INLINE uintptr_t GetFinalNextAddress() {
|
||||||
const uintptr_t final_address = this->next_address;
|
const uintptr_t final_address = this->next_address;
|
||||||
this->next_address = Null<uintptr_t>;
|
this->next_address = Null<uintptr_t>;
|
||||||
return final_address;
|
return final_address;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE uintptr_t GetFinalState() {
|
||||||
|
return this->GetFinalNextAddress();
|
||||||
|
}
|
||||||
public:
|
public:
|
||||||
virtual KPhysicalAddress Allocate() override {
|
virtual KPhysicalAddress Allocate() override {
|
||||||
MESOSPHERE_ABORT_UNLESS(this->next_address != Null<uintptr_t>);
|
MESOSPHERE_INIT_ABORT_UNLESS(this->next_address != Null<uintptr_t>);
|
||||||
const uintptr_t allocated = this->next_address;
|
const uintptr_t allocated = this->next_address;
|
||||||
this->next_address += PageSize;
|
this->next_address += PageSize;
|
||||||
std::memset(reinterpret_cast<void *>(allocated), 0, PageSize);
|
std::memset(reinterpret_cast<void *>(allocated), 0, PageSize);
|
||||||
|
|
|
@ -59,6 +59,9 @@ namespace ams::kern::arm64::cpu {
|
||||||
EnsureInstructionConsistency();
|
EnsureInstructionConsistency();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Synchronization helpers. */
|
||||||
|
NOINLINE void SynchronizeAllCores();
|
||||||
|
|
||||||
/* Cache management helpers. */
|
/* Cache management helpers. */
|
||||||
void FlushEntireDataCacheShared();
|
void FlushEntireDataCacheShared();
|
||||||
void FlushEntireDataCacheLocal();
|
void FlushEntireDataCacheLocal();
|
||||||
|
|
|
@ -37,8 +37,8 @@ namespace ams::kern::arm64::cpu {
|
||||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(Ttbr0El1, ttbr0_el1)
|
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(Ttbr0El1, ttbr0_el1)
|
||||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(Ttbr1El1, ttbr1_el1)
|
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(Ttbr1El1, ttbr1_el1)
|
||||||
|
|
||||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(MairEl1, mair_el1)
|
|
||||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(TcrEl1, tcr_el1)
|
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(TcrEl1, tcr_el1)
|
||||||
|
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(MairEl1, mair_el1)
|
||||||
|
|
||||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(SctlrEl1, sctlr_el1)
|
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(SctlrEl1, sctlr_el1)
|
||||||
|
|
||||||
|
@ -48,19 +48,88 @@ namespace ams::kern::arm64::cpu {
|
||||||
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(CsselrEl1, csselr_el1)
|
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(CsselrEl1, csselr_el1)
|
||||||
|
|
||||||
/* Base class for register accessors. */
|
/* Base class for register accessors. */
|
||||||
class GenericRegisterAccessor {
|
class GenericRegisterAccessorBase {
|
||||||
|
NON_COPYABLE(GenericRegisterAccessorBase);
|
||||||
|
NON_MOVEABLE(GenericRegisterAccessorBase);
|
||||||
private:
|
private:
|
||||||
u64 value;
|
u64 value;
|
||||||
public:
|
public:
|
||||||
ALWAYS_INLINE GenericRegisterAccessor(u64 v) : value(v) { /* ... */ }
|
constexpr ALWAYS_INLINE GenericRegisterAccessorBase(u64 v) : value(v) { /* ... */ }
|
||||||
protected:
|
protected:
|
||||||
|
constexpr ALWAYS_INLINE u64 GetValue() const {
|
||||||
|
return this->value;
|
||||||
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE u64 GetBits(size_t offset, size_t count) const {
|
constexpr ALWAYS_INLINE u64 GetBits(size_t offset, size_t count) const {
|
||||||
return (this->value >> offset) & ((1ul << count) - 1);
|
return (this->value >> offset) & ((1ul << count) - 1);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Special code for main id register. */
|
template<typename Derived>
|
||||||
class MainIdRegisterAccessor : public GenericRegisterAccessor {
|
class GenericRegisterAccessor : public GenericRegisterAccessorBase {
|
||||||
|
public:
|
||||||
|
constexpr ALWAYS_INLINE GenericRegisterAccessor(u64 v) : GenericRegisterAccessorBase(v) { /* ... */ }
|
||||||
|
protected:
|
||||||
|
ALWAYS_INLINE void Store() const {
|
||||||
|
static_cast<const Derived *>(this)->Store();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
#define MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(name) class name##RegisterAccessor : public GenericRegisterAccessor<name##RegisterAccessor>
|
||||||
|
|
||||||
|
#define MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(accessor, reg_name) \
|
||||||
|
ALWAYS_INLINE accessor##RegisterAccessor() : GenericRegisterAccessor(MESOSPHERE_CPU_GET_SYSREG(reg_name)) { /* ... */ } \
|
||||||
|
constexpr ALWAYS_INLINE accessor##RegisterAccessor(u64 v) : GenericRegisterAccessor(v) { /* ... */ } \
|
||||||
|
\
|
||||||
|
ALWAYS_INLINE void Store() { const u64 v = this->GetValue(); MESOSPHERE_CPU_SET_SYSREG(reg_name, v); }
|
||||||
|
|
||||||
|
/* Accessors. */
|
||||||
|
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(MemoryAccessIndirection) {
|
||||||
|
public:
|
||||||
|
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(MemoryAccessIndirection, mair_el1)
|
||||||
|
};
|
||||||
|
|
||||||
|
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(TranslationControl) {
|
||||||
|
public:
|
||||||
|
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(TranslationControl, tcr_el1)
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE size_t GetT1Size() const {
|
||||||
|
const size_t shift_value = this->GetBits(16, 6);
|
||||||
|
return size_t(1) << (size_t(64) - shift_value);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(MultiprocessorAffinity) {
|
||||||
|
public:
|
||||||
|
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(MultiprocessorAffinity, mpidr_el1)
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE u64 GetAff0() const {
|
||||||
|
return this->GetBits(0, 8);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE u64 GetAff1() const {
|
||||||
|
return this->GetBits(8, 8);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE u64 GetAff2() const {
|
||||||
|
return this->GetBits(16, 8);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE u64 GetAff3() const {
|
||||||
|
return this->GetBits(32, 8);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE u64 GetCpuOnArgument() const {
|
||||||
|
constexpr u64 Mask = 0x000000FF00FFFF00ul;
|
||||||
|
return this->GetValue() & Mask;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(ThreadId) {
|
||||||
|
public:
|
||||||
|
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(ThreadId, tpidr_el1)
|
||||||
|
};
|
||||||
|
|
||||||
|
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(MainId) {
|
||||||
public:
|
public:
|
||||||
enum class Implementer {
|
enum class Implementer {
|
||||||
ArmLimited = 0x41,
|
ArmLimited = 0x41,
|
||||||
|
@ -70,7 +139,7 @@ namespace ams::kern::arm64::cpu {
|
||||||
CortexA57 = 0xD07,
|
CortexA57 = 0xD07,
|
||||||
};
|
};
|
||||||
public:
|
public:
|
||||||
ALWAYS_INLINE MainIdRegisterAccessor() : GenericRegisterAccessor(MESOSPHERE_CPU_GET_SYSREG(midr_el1)) { /* ... */ }
|
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(MainId, midr_el1)
|
||||||
public:
|
public:
|
||||||
constexpr ALWAYS_INLINE Implementer GetImplementer() const {
|
constexpr ALWAYS_INLINE Implementer GetImplementer() const {
|
||||||
return static_cast<Implementer>(this->GetBits(24, 8));
|
return static_cast<Implementer>(this->GetBits(24, 8));
|
||||||
|
@ -94,9 +163,9 @@ namespace ams::kern::arm64::cpu {
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Accessors for cache registers. */
|
/* Accessors for cache registers. */
|
||||||
class CacheLineIdAccessor : public GenericRegisterAccessor {
|
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(CacheLineId) {
|
||||||
public:
|
public:
|
||||||
ALWAYS_INLINE CacheLineIdAccessor() : GenericRegisterAccessor(MESOSPHERE_CPU_GET_SYSREG(clidr_el1)) { /* ... */ }
|
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(CacheLineId, clidr_el1)
|
||||||
public:
|
public:
|
||||||
constexpr ALWAYS_INLINE int GetLevelsOfCoherency() const {
|
constexpr ALWAYS_INLINE int GetLevelsOfCoherency() const {
|
||||||
return static_cast<int>(this->GetBits(24, 3));
|
return static_cast<int>(this->GetBits(24, 3));
|
||||||
|
@ -109,9 +178,9 @@ namespace ams::kern::arm64::cpu {
|
||||||
/* TODO: Other bitfield accessors? */
|
/* TODO: Other bitfield accessors? */
|
||||||
};
|
};
|
||||||
|
|
||||||
class CacheSizeIdAccessor : public GenericRegisterAccessor {
|
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(CacheSizeId) {
|
||||||
public:
|
public:
|
||||||
ALWAYS_INLINE CacheSizeIdAccessor() : GenericRegisterAccessor(MESOSPHERE_CPU_GET_SYSREG(ccsidr_el1)) { /* ... */ }
|
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(CacheSizeId, ccsidr_el1)
|
||||||
public:
|
public:
|
||||||
constexpr ALWAYS_INLINE int GetNumberOfSets() const {
|
constexpr ALWAYS_INLINE int GetNumberOfSets() const {
|
||||||
return static_cast<int>(this->GetBits(13, 15));
|
return static_cast<int>(this->GetBits(13, 15));
|
||||||
|
@ -128,6 +197,8 @@ namespace ams::kern::arm64::cpu {
|
||||||
/* TODO: Other bitfield accessors? */
|
/* TODO: Other bitfield accessors? */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#undef MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS
|
||||||
|
#undef MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS
|
||||||
#undef MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS
|
#undef MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS
|
||||||
#undef MESOSPHERE_CPU_GET_SYSREG
|
#undef MESOSPHERE_CPU_GET_SYSREG
|
||||||
#undef MESOSPHERE_CPU_SET_SYSREG
|
#undef MESOSPHERE_CPU_SET_SYSREG
|
||||||
|
|
|
@ -23,8 +23,10 @@ namespace ams::kern {
|
||||||
class Init {
|
class Init {
|
||||||
public:
|
public:
|
||||||
/* Initialization. */
|
/* Initialization. */
|
||||||
|
static size_t GetIntendedMemorySize();
|
||||||
static KPhysicalAddress GetKernelPhysicalBaseAddress(uintptr_t base_address);
|
static KPhysicalAddress GetKernelPhysicalBaseAddress(uintptr_t base_address);
|
||||||
static bool ShouldIncreaseThreadResourceLimit();
|
static bool ShouldIncreaseThreadResourceLimit();
|
||||||
|
static void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg);
|
||||||
|
|
||||||
/* Randomness. */
|
/* Randomness. */
|
||||||
static void GenerateRandomBytes(void *dst, size_t size);
|
static void GenerateRandomBytes(void *dst, size_t size);
|
||||||
|
|
|
@ -25,5 +25,7 @@
|
||||||
namespace ams::kern::init {
|
namespace ams::kern::init {
|
||||||
|
|
||||||
KPhysicalAddress GetInitArgumentsAddress(s32 core_id);
|
KPhysicalAddress GetInitArgumentsAddress(s32 core_id);
|
||||||
|
void SetInitArguments(s32 core_id, KPhysicalAddress address, uintptr_t arg);
|
||||||
|
void StoreInitArguments();
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,391 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms and conditions of the GNU General Public License,
|
||||||
|
* version 2, as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||||
|
* more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
#pragma once
|
||||||
|
#include <vapours.hpp>
|
||||||
|
#include <mesosphere/init/kern_init_page_table_select.hpp>
|
||||||
|
|
||||||
|
namespace ams::kern {
|
||||||
|
|
||||||
|
constexpr size_t KernelAslrAlignment = 2_MB;
|
||||||
|
constexpr size_t KernelVirtualAddressSpaceWidth = size_t(1ul) << 39ul;
|
||||||
|
constexpr size_t KernelPhysicalAddressSpaceWidth = size_t(1ul) << 48ul;
|
||||||
|
|
||||||
|
constexpr size_t KernelVirtualAddressSpaceBase = 0ul - KernelVirtualAddressSpaceWidth;
|
||||||
|
constexpr size_t KernelVirtualAddressSpaceEnd = KernelVirtualAddressSpaceBase + (KernelVirtualAddressSpaceWidth - KernelAslrAlignment);
|
||||||
|
constexpr size_t KernelVirtualAddressSpaceLast = KernelVirtualAddressSpaceEnd - 1ul;
|
||||||
|
constexpr size_t KernelVirtualAddressSpaceSize = KernelVirtualAddressSpaceEnd - KernelVirtualAddressSpaceBase;
|
||||||
|
|
||||||
|
constexpr size_t KernelPhysicalAddressSpaceBase = 0ul;
|
||||||
|
constexpr size_t KernelPhysicalAddressSpaceEnd = KernelPhysicalAddressSpaceBase + KernelPhysicalAddressSpaceWidth;
|
||||||
|
constexpr size_t KernelPhysicalAddressSpaceLast = KernelPhysicalAddressSpaceEnd - 1ul;
|
||||||
|
constexpr size_t KernelPhysicalAddressSpaceSize = KernelPhysicalAddressSpaceEnd - KernelPhysicalAddressSpaceBase;
|
||||||
|
|
||||||
|
enum KMemoryRegionType : u32 {
|
||||||
|
KMemoryRegionAttr_CarveoutProtected = 0x04000000,
|
||||||
|
KMemoryRegionAttr_DidKernelMap = 0x08000000,
|
||||||
|
KMemoryRegionAttr_ShouldKernelMap = 0x10000000,
|
||||||
|
KMemoryRegionAttr_UserReadOnly = 0x20000000,
|
||||||
|
KMemoryRegionAttr_NoUserMap = 0x40000000,
|
||||||
|
KMemoryRegionAttr_LinearMapped = 0x80000000,
|
||||||
|
|
||||||
|
KMemoryRegionType_None = 0,
|
||||||
|
KMemoryRegionType_Kernel = 1,
|
||||||
|
KMemoryRegionType_Dram = 2,
|
||||||
|
KMemoryRegionType_CoreLocal = 4,
|
||||||
|
|
||||||
|
KMemoryRegionType_VirtualKernelPtHeap = 0x2A,
|
||||||
|
KMemoryRegionType_VirtualKernelTraceBuffer = 0x4A,
|
||||||
|
KMemoryRegionType_VirtualKernelInitPt = 0x19A,
|
||||||
|
|
||||||
|
KMemoryRegionType_Uart = 0x1D,
|
||||||
|
KMemoryRegionType_InterruptDistributor = 0x4D,
|
||||||
|
KMemoryRegionType_InterruptController = 0x2D,
|
||||||
|
|
||||||
|
KMemoryRegionType_MemoryController = 0x55,
|
||||||
|
KMemoryRegionType_MemoryController0 = 0x95,
|
||||||
|
KMemoryRegionType_MemoryController1 = 0x65,
|
||||||
|
KMemoryRegionType_PowerManagementController = 0x1A5,
|
||||||
|
|
||||||
|
KMemoryRegionType_KernelAutoMap = KMemoryRegionType_Kernel | KMemoryRegionAttr_ShouldKernelMap,
|
||||||
|
|
||||||
|
KMemoryRegionType_KernelTemp = 0x31,
|
||||||
|
|
||||||
|
KMemoryRegionType_KernelCode = 0x19,
|
||||||
|
KMemoryRegionType_KernelStack = 0x29,
|
||||||
|
KMemoryRegionType_KernelMisc = 0x49,
|
||||||
|
KMemoryRegionType_KernelSlab = 0x89,
|
||||||
|
|
||||||
|
KMemoryRegionType_KernelMiscMainStack = 0xB49,
|
||||||
|
KMemoryRegionType_KernelMiscMappedDevice = 0xD49,
|
||||||
|
KMemoryRegionType_KernelMiscIdleStack = 0x1349,
|
||||||
|
KMemoryRegionType_KernelMiscUnknownDebug = 0x1549,
|
||||||
|
KMemoryRegionType_KernelMiscExceptionStack = 0x2349,
|
||||||
|
|
||||||
|
KMemoryRegionType_DramLinearMapped = KMemoryRegionType_Dram | KMemoryRegionAttr_LinearMapped,
|
||||||
|
|
||||||
|
KMemoryRegionType_DramReservedEarly = 0x16 | KMemoryRegionAttr_NoUserMap,
|
||||||
|
KMemoryRegionType_DramPoolPartition = 0x26 | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped,
|
||||||
|
|
||||||
|
KMemoryRegionType_DramKernel = 0xE | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected,
|
||||||
|
KMemoryRegionType_DramKernelCode = 0xCE | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected,
|
||||||
|
KMemoryRegionType_DramKernelSlab = 0x14E | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected,
|
||||||
|
KMemoryRegionType_DramKernelPtHeap = 0x24E | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_LinearMapped,
|
||||||
|
KMemoryRegionType_DramKernelInitPt = 0x44E | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_LinearMapped,
|
||||||
|
|
||||||
|
/* These regions aren't normally mapped in retail kernel. */
|
||||||
|
KMemoryRegionType_KernelTraceBuffer = 0xA6 | KMemoryRegionAttr_UserReadOnly | KMemoryRegionAttr_LinearMapped,
|
||||||
|
KMemoryRegionType_OnMemoryBootImage = 0x156,
|
||||||
|
KMemoryRegionType_DTB = 0x256,
|
||||||
|
};
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) {
|
||||||
|
if (type_id == (type_id | KMemoryRegionType_KernelTraceBuffer)) {
|
||||||
|
return KMemoryRegionType_VirtualKernelTraceBuffer;
|
||||||
|
} else if (type_id == (type_id | KMemoryRegionType_DramKernelPtHeap)) {
|
||||||
|
return KMemoryRegionType_VirtualKernelPtHeap;
|
||||||
|
} else {
|
||||||
|
return KMemoryRegionType_Dram;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
class KMemoryBlock : public util::IntrusiveRedBlackTreeBaseNode<KMemoryBlock> {
|
||||||
|
NON_COPYABLE(KMemoryBlock);
|
||||||
|
NON_MOVEABLE(KMemoryBlock);
|
||||||
|
private:
|
||||||
|
uintptr_t address;
|
||||||
|
uintptr_t pair_address;
|
||||||
|
size_t block_size;
|
||||||
|
u32 attributes;
|
||||||
|
u32 type_id;
|
||||||
|
public:
|
||||||
|
static constexpr ALWAYS_INLINE int Compare(const KMemoryBlock &lhs, const KMemoryBlock &rhs) {
|
||||||
|
if (lhs.address < rhs.address) {
|
||||||
|
return -1;
|
||||||
|
} else if (lhs.address == rhs.address) {
|
||||||
|
return 0;
|
||||||
|
} else {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
public:
|
||||||
|
constexpr ALWAYS_INLINE KMemoryBlock() : address(0), pair_address(0), block_size(0), attributes(0), type_id(0) { /* ... */ }
|
||||||
|
constexpr ALWAYS_INLINE KMemoryBlock(uintptr_t a, size_t bl, uintptr_t p, u32 r, u32 t) :
|
||||||
|
address(a), pair_address(p), block_size(bl), attributes(r), type_id(t)
|
||||||
|
{
|
||||||
|
/* ... */
|
||||||
|
}
|
||||||
|
constexpr ALWAYS_INLINE KMemoryBlock(uintptr_t a, size_t bl, u32 r, u32 t) : KMemoryBlock(a, bl, std::numeric_limits<uintptr_t>::max(), r, t) { /* ... */ }
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE uintptr_t GetAddress() const {
|
||||||
|
return this->address;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE uintptr_t GetPairAddress() const {
|
||||||
|
return this->pair_address;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE size_t GetSize() const {
|
||||||
|
return this->block_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE uintptr_t GetEndAddress() const {
|
||||||
|
return this->GetAddress() + this->GetSize();
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE uintptr_t GetLastAddress() const {
|
||||||
|
return this->GetEndAddress() - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE u32 GetAttributes() const {
|
||||||
|
return this->attributes;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE u32 GetType() const {
|
||||||
|
return this->type_id;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE void SetType(u32 type) {
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(this->CanDerive(type));
|
||||||
|
this->type_id = type;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE bool Contains(uintptr_t address) const {
|
||||||
|
return this->GetAddress() <= address && address < this->GetLastAddress();
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE bool IsDerivedFrom(u32 type) const {
|
||||||
|
return (this->GetType() | type) == this->GetType();
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE bool HasTypeAttribute(KMemoryRegionType attr) const {
|
||||||
|
return (this->GetType() | attr) == this->GetType();
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE bool CanDerive(u32 type) const {
|
||||||
|
return (this->GetType() | type) == type;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE void SetPairAddress(uintptr_t a) {
|
||||||
|
this->pair_address = a;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE void SetTypeAttribute(KMemoryRegionType attr) {
|
||||||
|
this->type_id |= attr;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
static_assert(std::is_trivially_destructible<KMemoryBlock>::value);
|
||||||
|
|
||||||
|
class KMemoryBlockTree {
|
||||||
|
public:
|
||||||
|
struct DerivedRegionExtents {
|
||||||
|
const KMemoryBlock *first_block;
|
||||||
|
const KMemoryBlock *last_block;
|
||||||
|
};
|
||||||
|
private:
|
||||||
|
using TreeType = util::IntrusiveRedBlackTreeBaseTraits<KMemoryBlock>::TreeType<KMemoryBlock>;
|
||||||
|
using value_type = TreeType::value_type;
|
||||||
|
using size_type = TreeType::size_type;
|
||||||
|
using difference_type = TreeType::difference_type;
|
||||||
|
using pointer = TreeType::pointer;
|
||||||
|
using const_pointer = TreeType::const_pointer;
|
||||||
|
using reference = TreeType::reference;
|
||||||
|
using const_reference = TreeType::const_reference;
|
||||||
|
using iterator = TreeType::iterator;
|
||||||
|
using const_iterator = TreeType::const_iterator;
|
||||||
|
private:
|
||||||
|
TreeType tree;
|
||||||
|
public:
|
||||||
|
constexpr ALWAYS_INLINE KMemoryBlockTree() : tree() { /* ... */ }
|
||||||
|
public:
|
||||||
|
iterator FindContainingBlock(uintptr_t address) {
|
||||||
|
for (auto it = this->begin(); it != this->end(); it++) {
|
||||||
|
if (it->Contains(address)) {
|
||||||
|
return it;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
MESOSPHERE_INIT_ABORT();
|
||||||
|
}
|
||||||
|
|
||||||
|
iterator FindFirstBlockByTypeAttr(u32 type_id, u32 attr = 0) {
|
||||||
|
for (auto it = this->begin(); it != this->end(); it++) {
|
||||||
|
if (it->GetType() == type_id && it->GetAttributes() == attr) {
|
||||||
|
return it;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
MESOSPHERE_INIT_ABORT();
|
||||||
|
}
|
||||||
|
|
||||||
|
DerivedRegionExtents GetDerivedRegionExtents(u32 type_id) {
|
||||||
|
DerivedRegionExtents extents = { .first_block = nullptr, .last_block = nullptr };
|
||||||
|
|
||||||
|
for (auto it = this->cbegin(); it != this->cend(); it++) {
|
||||||
|
if (it->IsDerivedFrom(type_id)) {
|
||||||
|
if (extents.first_block == nullptr) {
|
||||||
|
extents.first_block = std::addressof(*it);
|
||||||
|
}
|
||||||
|
extents.last_block = std::addressof(*it);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(extents.first_block != nullptr);
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(extents.last_block != nullptr);
|
||||||
|
|
||||||
|
return extents;
|
||||||
|
}
|
||||||
|
public:
|
||||||
|
NOINLINE bool Insert(uintptr_t address, size_t size, u32 type_id, u32 new_attr = 0, u32 old_attr = 0);
|
||||||
|
NOINLINE KVirtualAddress GetRandomAlignedRegion(size_t size, size_t alignment, u32 type_id);
|
||||||
|
|
||||||
|
ALWAYS_INLINE KVirtualAddress GetRandomAlignedRegionWithGuard(size_t size, size_t alignment, u32 type_id, size_t guard_size) {
|
||||||
|
return this->GetRandomAlignedRegion(size + 2 * guard_size, alignment, type_id) + guard_size;
|
||||||
|
}
|
||||||
|
public:
|
||||||
|
/* Iterator accessors. */
|
||||||
|
iterator begin() {
|
||||||
|
return this->tree.begin();
|
||||||
|
}
|
||||||
|
|
||||||
|
const_iterator begin() const {
|
||||||
|
return this->tree.begin();
|
||||||
|
}
|
||||||
|
|
||||||
|
iterator end() {
|
||||||
|
return this->tree.end();
|
||||||
|
}
|
||||||
|
|
||||||
|
const_iterator end() const {
|
||||||
|
return this->tree.end();
|
||||||
|
}
|
||||||
|
|
||||||
|
const_iterator cbegin() const {
|
||||||
|
return this->begin();
|
||||||
|
}
|
||||||
|
|
||||||
|
const_iterator cend() const {
|
||||||
|
return this->end();
|
||||||
|
}
|
||||||
|
|
||||||
|
iterator iterator_to(reference ref) {
|
||||||
|
return this->tree.iterator_to(ref);
|
||||||
|
}
|
||||||
|
|
||||||
|
const_iterator iterator_to(const_reference ref) const {
|
||||||
|
return this->tree.iterator_to(ref);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Content management. */
|
||||||
|
bool empty() const {
|
||||||
|
return this->tree.empty();
|
||||||
|
}
|
||||||
|
|
||||||
|
reference back() {
|
||||||
|
return this->tree.back();
|
||||||
|
}
|
||||||
|
|
||||||
|
const_reference back() const {
|
||||||
|
return this->tree.back();
|
||||||
|
}
|
||||||
|
|
||||||
|
reference front() {
|
||||||
|
return this->tree.front();
|
||||||
|
}
|
||||||
|
|
||||||
|
const_reference front() const {
|
||||||
|
return this->tree.front();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* GCC over-eagerly inlines this operation. */
|
||||||
|
NOINLINE iterator insert(reference ref) {
|
||||||
|
return this->tree.insert(ref);
|
||||||
|
}
|
||||||
|
|
||||||
|
NOINLINE iterator erase(iterator it) {
|
||||||
|
return this->tree.erase(it);
|
||||||
|
}
|
||||||
|
|
||||||
|
iterator find(const_reference ref) const {
|
||||||
|
return this->tree.find(ref);
|
||||||
|
}
|
||||||
|
|
||||||
|
iterator nfind(const_reference ref) const {
|
||||||
|
return this->tree.nfind(ref);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class KMemoryBlockAllocator {
|
||||||
|
NON_COPYABLE(KMemoryBlockAllocator);
|
||||||
|
NON_MOVEABLE(KMemoryBlockAllocator);
|
||||||
|
public:
|
||||||
|
static constexpr size_t MaxMemoryBlocks = 1000;
|
||||||
|
friend class KMemoryLayout;
|
||||||
|
private:
|
||||||
|
KMemoryBlock block_heap[MaxMemoryBlocks];
|
||||||
|
size_t num_blocks;
|
||||||
|
private:
|
||||||
|
constexpr ALWAYS_INLINE KMemoryBlockAllocator() : block_heap(), num_blocks() { /* ... */ }
|
||||||
|
public:
|
||||||
|
ALWAYS_INLINE KMemoryBlock *Allocate() {
|
||||||
|
/* Ensure we stay within the bounds of our heap. */
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(this->num_blocks < MaxMemoryBlocks);
|
||||||
|
|
||||||
|
return &this->block_heap[this->num_blocks++];
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename... Args>
|
||||||
|
ALWAYS_INLINE KMemoryBlock *Create(Args&&... args) {
|
||||||
|
KMemoryBlock *block = this->Allocate();
|
||||||
|
new (block) KMemoryBlock(std::forward<Args>(args)...);
|
||||||
|
return block;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class KMemoryLayout {
|
||||||
|
private:
|
||||||
|
static /* constinit */ inline uintptr_t s_linear_phys_to_virt_diff;
|
||||||
|
static /* constinit */ inline uintptr_t s_linear_virt_to_phys_diff;
|
||||||
|
static /* constinit */ inline KMemoryBlockAllocator s_block_allocator;
|
||||||
|
static /* constinit */ inline KMemoryBlockTree s_virtual_tree;
|
||||||
|
static /* constinit */ inline KMemoryBlockTree s_physical_tree;
|
||||||
|
static /* constinit */ inline KMemoryBlockTree s_virtual_linear_tree;
|
||||||
|
static /* constinit */ inline KMemoryBlockTree s_physical_linear_tree;
|
||||||
|
public:
|
||||||
|
static ALWAYS_INLINE KMemoryBlockAllocator &GetMemoryBlockAllocator() { return s_block_allocator; }
|
||||||
|
static ALWAYS_INLINE KMemoryBlockTree &GetVirtualMemoryBlockTree() { return s_virtual_tree; }
|
||||||
|
static ALWAYS_INLINE KMemoryBlockTree &GetPhysicalMemoryBlockTree() { return s_physical_tree; }
|
||||||
|
static ALWAYS_INLINE KMemoryBlockTree &GetVirtualLinearMemoryBlockTree() { return s_virtual_linear_tree; }
|
||||||
|
static ALWAYS_INLINE KMemoryBlockTree &GetPhysicalLinearMemoryBlockTree() { return s_physical_linear_tree; }
|
||||||
|
|
||||||
|
static NOINLINE KVirtualAddress GetMainStackTopAddress(s32 core_id) {
|
||||||
|
return GetVirtualMemoryBlockTree().FindFirstBlockByTypeAttr(KMemoryRegionType_KernelMiscMainStack, static_cast<u32>(core_id))->GetEndAddress();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void InitializeLinearMemoryBlockTrees(KPhysicalAddress aligned_linear_phys_start, KVirtualAddress linear_virtual_start);
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
namespace init {
|
||||||
|
|
||||||
|
/* These should be generic, regardless of board. */
|
||||||
|
void SetupCoreLocalRegionMemoryBlocks(KInitialPageTable &page_table, KInitialPageAllocator &page_allocator);
|
||||||
|
void SetupPoolPartitionMemoryBlocks();
|
||||||
|
|
||||||
|
/* These may be implemented in a board-specific manner. */
|
||||||
|
void SetupDevicePhysicalMemoryBlocks();
|
||||||
|
void SetupDramPhysicalMemoryBlocks();
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
23
libraries/libmesosphere/include/mesosphere/kern_main.hpp
Normal file
23
libraries/libmesosphere/include/mesosphere/kern_main.hpp
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms and conditions of the GNU General Public License,
|
||||||
|
* version 2, as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||||
|
* more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
#pragma once
|
||||||
|
#include <vapours.hpp>
|
||||||
|
|
||||||
|
namespace ams::kern {
|
||||||
|
|
||||||
|
NORETURN void HorizonKernelMain(s32 core_id);
|
||||||
|
|
||||||
|
}
|
|
@ -44,6 +44,7 @@ namespace ams::kern {
|
||||||
#define MESOSPHERE_R_ASSERT(expr) MESOSPHERE_ASSERT_IMPL(R_SUCCEEDED(expr), "Result assertion failed: %s", #expr)
|
#define MESOSPHERE_R_ASSERT(expr) MESOSPHERE_ASSERT_IMPL(R_SUCCEEDED(expr), "Result assertion failed: %s", #expr)
|
||||||
|
|
||||||
#define MESOSPHERE_ABORT() MESOSPHERE_PANIC("Abort()");
|
#define MESOSPHERE_ABORT() MESOSPHERE_PANIC("Abort()");
|
||||||
|
#define MESOSPHERE_INIT_ABORT() do { /* ... */ } while (true)
|
||||||
|
|
||||||
#define MESOSPHERE_ABORT_UNLESS(expr) \
|
#define MESOSPHERE_ABORT_UNLESS(expr) \
|
||||||
({ \
|
({ \
|
||||||
|
@ -51,3 +52,10 @@ namespace ams::kern {
|
||||||
MESOSPHERE_PANIC("Abort(): %s", #expr); \
|
MESOSPHERE_PANIC("Abort(): %s", #expr); \
|
||||||
} \
|
} \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
#define MESOSPHERE_INIT_ABORT_UNLESS(expr) \
|
||||||
|
({ \
|
||||||
|
if (AMS_UNLIKELY(!(expr))) { \
|
||||||
|
MESOSPHERE_INIT_ABORT(); \
|
||||||
|
} \
|
||||||
|
})
|
||||||
|
|
|
@ -19,6 +19,8 @@ namespace ams::kern::arm64::cpu {
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
|
std::atomic<s32> g_all_core_sync_count;
|
||||||
|
|
||||||
void FlushEntireDataCacheImpl(int level) {
|
void FlushEntireDataCacheImpl(int level) {
|
||||||
/* Used in multiple locations. */
|
/* Used in multiple locations. */
|
||||||
const u64 level_sel_value = static_cast<u64>(level << 1);
|
const u64 level_sel_value = static_cast<u64>(level << 1);
|
||||||
|
@ -28,7 +30,7 @@ namespace ams::kern::arm64::cpu {
|
||||||
cpu::InstructionMemoryBarrier();
|
cpu::InstructionMemoryBarrier();
|
||||||
|
|
||||||
/* Get cache size id info. */
|
/* Get cache size id info. */
|
||||||
CacheSizeIdAccessor ccsidr_el1;
|
CacheSizeIdRegisterAccessor ccsidr_el1;
|
||||||
const int num_sets = ccsidr_el1.GetNumberOfSets();
|
const int num_sets = ccsidr_el1.GetNumberOfSets();
|
||||||
const int num_ways = ccsidr_el1.GetAssociativity();
|
const int num_ways = ccsidr_el1.GetAssociativity();
|
||||||
const int line_size = ccsidr_el1.GetLineSize();
|
const int line_size = ccsidr_el1.GetLineSize();
|
||||||
|
@ -49,7 +51,7 @@ namespace ams::kern::arm64::cpu {
|
||||||
}
|
}
|
||||||
|
|
||||||
void FlushEntireDataCacheShared() {
|
void FlushEntireDataCacheShared() {
|
||||||
CacheLineIdAccessor clidr_el1;
|
CacheLineIdRegisterAccessor clidr_el1;
|
||||||
const int levels_of_coherency = clidr_el1.GetLevelsOfCoherency();
|
const int levels_of_coherency = clidr_el1.GetLevelsOfCoherency();
|
||||||
const int levels_of_unification = clidr_el1.GetLevelsOfUnification();
|
const int levels_of_unification = clidr_el1.GetLevelsOfUnification();
|
||||||
|
|
||||||
|
@ -59,11 +61,28 @@ namespace ams::kern::arm64::cpu {
|
||||||
}
|
}
|
||||||
|
|
||||||
void FlushEntireDataCacheLocal() {
|
void FlushEntireDataCacheLocal() {
|
||||||
CacheLineIdAccessor clidr_el1;
|
CacheLineIdRegisterAccessor clidr_el1;
|
||||||
const int levels_of_unification = clidr_el1.GetLevelsOfUnification();
|
const int levels_of_unification = clidr_el1.GetLevelsOfUnification();
|
||||||
|
|
||||||
for (int level = levels_of_unification - 1; level >= 0; level--) {
|
for (int level = levels_of_unification - 1; level >= 0; level--) {
|
||||||
FlushEntireDataCacheImpl(level);
|
FlushEntireDataCacheImpl(level);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
NOINLINE void SynchronizeAllCores() {
|
||||||
|
/* Wait until the count can be read. */
|
||||||
|
while (!(g_all_core_sync_count < static_cast<s32>(cpu::NumCores))) { /* ... */ }
|
||||||
|
|
||||||
|
const s32 per_core_idx = g_all_core_sync_count.fetch_add(1);
|
||||||
|
|
||||||
|
/* Loop until it's our turn. This will act on each core in order. */
|
||||||
|
while (g_all_core_sync_count != per_core_idx + static_cast<s32>(cpu::NumCores)) { /* ... */ }
|
||||||
|
|
||||||
|
if (g_all_core_sync_count != 2 * static_cast<s32>(cpu::NumCores) - 1) {
|
||||||
|
g_all_core_sync_count++;
|
||||||
|
} else {
|
||||||
|
g_all_core_sync_count = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
|
@ -0,0 +1,56 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms and conditions of the GNU General Public License,
|
||||||
|
* version 2, as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||||
|
* more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
#include <mesosphere.hpp>
|
||||||
|
|
||||||
|
namespace ams::kern {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
constexpr uintptr_t DramPhysicalAddress = 0x80000000;
|
||||||
|
constexpr size_t ReservedEarlyDramSize = 0x60000;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace init {
|
||||||
|
|
||||||
|
void SetupDevicePhysicalMemoryBlocks() {
|
||||||
|
/* TODO: Give these constexpr defines somewhere? */
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x70006000, 0x40, KMemoryRegionType_Uart | KMemoryRegionAttr_ShouldKernelMap));
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x70019000, 0x1000, KMemoryRegionType_MemoryController | KMemoryRegionAttr_NoUserMap));
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x7001C000, 0x1000, KMemoryRegionType_MemoryController0 | KMemoryRegionAttr_NoUserMap));
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x7001D000, 0x1000, KMemoryRegionType_MemoryController1 | KMemoryRegionAttr_NoUserMap));
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x7000E000, 0x400, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap));
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x7000E400, 0xC00, KMemoryRegionType_PowerManagementController | KMemoryRegionAttr_NoUserMap));
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x50040000, 0x1000, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap));
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x50041000, 0x1000, KMemoryRegionType_InterruptDistributor | KMemoryRegionAttr_ShouldKernelMap | KMemoryRegionAttr_NoUserMap));
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x50042000, 0x1000, KMemoryRegionType_InterruptController | KMemoryRegionAttr_ShouldKernelMap | KMemoryRegionAttr_NoUserMap));
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x50043000, 0x1D000, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap));
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x6000F000, 0x1000, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap));
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x6001DC00, 0x400, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap));
|
||||||
|
}
|
||||||
|
|
||||||
|
void SetupDramPhysicalMemoryBlocks() {
|
||||||
|
const size_t intended_memory_size = KSystemControl::Init::GetIntendedMemorySize();
|
||||||
|
const KPhysicalAddress physical_memory_base_address = KSystemControl::Init::GetKernelPhysicalBaseAddress(DramPhysicalAddress);
|
||||||
|
|
||||||
|
/* Insert blocks into the tree. */
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(GetInteger(physical_memory_base_address), intended_memory_size, KMemoryRegionType_Dram));
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(GetInteger(physical_memory_base_address), ReservedEarlyDramSize, KMemoryRegionType_DramReservedEarly));
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -24,7 +24,7 @@ namespace ams::kern {
|
||||||
/* TODO: Move this into a header for the MC in general. */
|
/* TODO: Move this into a header for the MC in general. */
|
||||||
constexpr u32 MemoryControllerConfigurationRegister = 0x70019050;
|
constexpr u32 MemoryControllerConfigurationRegister = 0x70019050;
|
||||||
u32 config_value;
|
u32 config_value;
|
||||||
MESOSPHERE_ABORT_UNLESS(smc::init::ReadWriteRegister(&config_value, MemoryControllerConfigurationRegister, 0, 0));
|
MESOSPHERE_INIT_ABORT_UNLESS(smc::init::ReadWriteRegister(&config_value, MemoryControllerConfigurationRegister, 0, 0));
|
||||||
return static_cast<size_t>(config_value & 0x3FFF) << 20;
|
return static_cast<size_t>(config_value & 0x3FFF) << 20;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -40,7 +40,10 @@ namespace ams::kern {
|
||||||
return value;
|
return value;
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE size_t GetIntendedMemorySizeForInit() {
|
}
|
||||||
|
|
||||||
|
/* Initialization. */
|
||||||
|
size_t KSystemControl::Init::GetIntendedMemorySize() {
|
||||||
switch (GetKernelConfigurationForInit().Get<smc::KernelConfiguration::MemorySize>()) {
|
switch (GetKernelConfigurationForInit().Get<smc::KernelConfiguration::MemorySize>()) {
|
||||||
case smc::MemorySize_4GB:
|
case smc::MemorySize_4GB:
|
||||||
default: /* All invalid modes should go to 4GB. */
|
default: /* All invalid modes should go to 4GB. */
|
||||||
|
@ -52,12 +55,9 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Initialization. */
|
|
||||||
KPhysicalAddress KSystemControl::Init::GetKernelPhysicalBaseAddress(uintptr_t base_address) {
|
KPhysicalAddress KSystemControl::Init::GetKernelPhysicalBaseAddress(uintptr_t base_address) {
|
||||||
const size_t real_dram_size = GetRealMemorySizeForInit();
|
const size_t real_dram_size = GetRealMemorySizeForInit();
|
||||||
const size_t intended_dram_size = GetIntendedMemorySizeForInit();
|
const size_t intended_dram_size = KSystemControl::Init::GetIntendedMemorySize();
|
||||||
if (intended_dram_size * 2 < real_dram_size) {
|
if (intended_dram_size * 2 < real_dram_size) {
|
||||||
return base_address;
|
return base_address;
|
||||||
} else {
|
} else {
|
||||||
|
@ -69,9 +69,13 @@ namespace ams::kern {
|
||||||
return GetKernelConfigurationForInit().Get<smc::KernelConfiguration::IncreaseThreadResourceLimit>();
|
return GetKernelConfigurationForInit().Get<smc::KernelConfiguration::IncreaseThreadResourceLimit>();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void KSystemControl::Init::CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
|
||||||
|
smc::init::CpuOn(core_id, entrypoint, arg);
|
||||||
|
}
|
||||||
|
|
||||||
/* Randomness for Initialization. */
|
/* Randomness for Initialization. */
|
||||||
void KSystemControl::Init::GenerateRandomBytes(void *dst, size_t size) {
|
void KSystemControl::Init::GenerateRandomBytes(void *dst, size_t size) {
|
||||||
MESOSPHERE_ABORT_UNLESS(size <= 0x38);
|
MESOSPHERE_INIT_ABORT_UNLESS(size <= 0x38);
|
||||||
smc::init::GenerateRandomBytes(dst, size);
|
smc::init::GenerateRandomBytes(dst, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -103,6 +103,11 @@ namespace ams::kern::smc {
|
||||||
/* SMC functionality needed for init. */
|
/* SMC functionality needed for init. */
|
||||||
namespace init {
|
namespace init {
|
||||||
|
|
||||||
|
void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
|
||||||
|
SecureMonitorArguments args = { FunctionId_CpuOn, core_id, entrypoint, arg };
|
||||||
|
CallPrivilegedSecureMonitorFunctionForInit(args);
|
||||||
|
}
|
||||||
|
|
||||||
void GetConfig(u64 *out, size_t num_qwords, ConfigItem config_item) {
|
void GetConfig(u64 *out, size_t num_qwords, ConfigItem config_item) {
|
||||||
SecureMonitorArguments args = { FunctionId_GetConfig, static_cast<u32>(config_item) };
|
SecureMonitorArguments args = { FunctionId_GetConfig, static_cast<u32>(config_item) };
|
||||||
CallPrivilegedSecureMonitorFunctionForInit(args);
|
CallPrivilegedSecureMonitorFunctionForInit(args);
|
||||||
|
|
|
@ -79,6 +79,7 @@ namespace ams::kern::smc {
|
||||||
|
|
||||||
namespace init {
|
namespace init {
|
||||||
|
|
||||||
|
void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg);
|
||||||
void GetConfig(u64 *out, size_t num_qwords, ConfigItem config_item);
|
void GetConfig(u64 *out, size_t num_qwords, ConfigItem config_item);
|
||||||
void GenerateRandomBytes(void *dst, size_t size);
|
void GenerateRandomBytes(void *dst, size_t size);
|
||||||
bool ReadWriteRegister(u32 *out, u64 address, u32 mask, u32 value);
|
bool ReadWriteRegister(u32 *out, u64 address, u32 mask, u32 value);
|
||||||
|
|
223
libraries/libmesosphere/source/kern_k_memory_layout.cpp
Normal file
223
libraries/libmesosphere/source/kern_k_memory_layout.cpp
Normal file
|
@ -0,0 +1,223 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms and conditions of the GNU General Public License,
|
||||||
|
* version 2, as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||||
|
* more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
#include <mesosphere.hpp>
|
||||||
|
|
||||||
|
namespace ams::kern {
|
||||||
|
|
||||||
|
bool KMemoryBlockTree::Insert(uintptr_t address, size_t size, u32 type_id, u32 new_attr, u32 old_attr) {
|
||||||
|
/* Locate the memory block that contains the address. */
|
||||||
|
auto it = this->FindContainingBlock(address);
|
||||||
|
|
||||||
|
/* We require that the old attr is correct. */
|
||||||
|
if (it->GetAttributes() != old_attr) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* We further require that the block can be split from the old block. */
|
||||||
|
const uintptr_t inserted_block_end = address + size;
|
||||||
|
const uintptr_t inserted_block_last = inserted_block_end - 1;
|
||||||
|
if (it->GetLastAddress() < inserted_block_last) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Further, we require that the type id is a valid transformation. */
|
||||||
|
if (!it->CanDerive(type_id)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Cache information from the block before we remove it. */
|
||||||
|
KMemoryBlock *cur_block = std::addressof(*it);
|
||||||
|
const uintptr_t old_address = it->GetAddress();
|
||||||
|
const size_t old_size = it->GetSize();
|
||||||
|
const uintptr_t old_end = old_address + old_size;
|
||||||
|
const uintptr_t old_last = old_end - 1;
|
||||||
|
const uintptr_t old_pair = it->GetPairAddress();
|
||||||
|
const u32 old_type = it->GetType();
|
||||||
|
|
||||||
|
/* Erase the existing block from the tree. */
|
||||||
|
this->erase(it);
|
||||||
|
|
||||||
|
/* If we need to insert a block before the region, do so. */
|
||||||
|
if (old_address != address) {
|
||||||
|
new (cur_block) KMemoryBlock(old_address, address - old_address, old_pair, old_attr, old_type);
|
||||||
|
this->insert(*cur_block);
|
||||||
|
cur_block = KMemoryLayout::GetMemoryBlockAllocator().Allocate();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Insert a new block. */
|
||||||
|
const uintptr_t new_pair = (old_pair != std::numeric_limits<uintptr_t>::max()) ? old_pair + (address - old_address) : old_pair;
|
||||||
|
new (cur_block) KMemoryBlock(address, size, new_pair, new_attr, type_id);
|
||||||
|
this->insert(*cur_block);
|
||||||
|
|
||||||
|
/* If we need to insert a block after the region, do so. */
|
||||||
|
if (old_last != inserted_block_last) {
|
||||||
|
const uintptr_t after_pair = (old_pair != std::numeric_limits<uintptr_t>::max()) ? old_pair + (inserted_block_end - old_address) : old_pair;
|
||||||
|
this->insert(*KMemoryLayout::GetMemoryBlockAllocator().Create(inserted_block_end, old_end - inserted_block_end, after_pair, old_attr, old_type));
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
KVirtualAddress KMemoryBlockTree::GetRandomAlignedRegion(size_t size, size_t alignment, u32 type_id) {
|
||||||
|
/* We want to find the total extents of the type id. */
|
||||||
|
const auto extents = this->GetDerivedRegionExtents(type_id);
|
||||||
|
|
||||||
|
/* Ensure that our alignment is correct. */
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(extents.first_block->GetAddress(), alignment));
|
||||||
|
|
||||||
|
const uintptr_t first_address = extents.first_block->GetAddress();
|
||||||
|
const uintptr_t last_address = extents.last_block->GetLastAddress();
|
||||||
|
|
||||||
|
while (true) {
|
||||||
|
const uintptr_t candidate = util::AlignDown(KSystemControl::Init::GenerateRandomRange(first_address, last_address), alignment);
|
||||||
|
|
||||||
|
/* Ensure that the candidate doesn't overflow with the size. */
|
||||||
|
if (!(candidate < candidate + size)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
const uintptr_t candidate_last = candidate + size - 1;
|
||||||
|
|
||||||
|
/* Ensure that the candidate fits within the region. */
|
||||||
|
if (candidate_last > last_address) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Locate the candidate block, and ensure it fits. */
|
||||||
|
const KMemoryBlock *candidate_block = std::addressof(*this->FindContainingBlock(candidate));
|
||||||
|
if (candidate_last > candidate_block->GetLastAddress()) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Ensure that the block has the correct type id. */
|
||||||
|
if (candidate_block->GetType() != type_id)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
return candidate;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void KMemoryLayout::InitializeLinearMemoryBlockTrees(KPhysicalAddress aligned_linear_phys_start, KVirtualAddress linear_virtual_start) {
|
||||||
|
/* Set static differences. */
|
||||||
|
s_linear_phys_to_virt_diff = GetInteger(linear_virtual_start) - GetInteger(aligned_linear_phys_start);
|
||||||
|
s_linear_virt_to_phys_diff = GetInteger(aligned_linear_phys_start) - GetInteger(linear_virtual_start);
|
||||||
|
|
||||||
|
/* Initialize linear trees. */
|
||||||
|
for (auto &block : GetPhysicalMemoryBlockTree()) {
|
||||||
|
if (!block.HasTypeAttribute(KMemoryRegionAttr_LinearMapped)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
GetPhysicalLinearMemoryBlockTree().insert(*GetMemoryBlockAllocator().Create(block.GetAddress(), block.GetSize(), block.GetAttributes(), block.GetType()));
|
||||||
|
}
|
||||||
|
|
||||||
|
for (auto &block : GetVirtualMemoryBlockTree()) {
|
||||||
|
if (!block.IsDerivedFrom(KMemoryRegionType_Dram)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
GetVirtualLinearMemoryBlockTree().insert(*GetMemoryBlockAllocator().Create(block.GetAddress(), block.GetSize(), block.GetAttributes(), block.GetType()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace init {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
|
||||||
|
constexpr PageTableEntry KernelRwDataAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable);
|
||||||
|
|
||||||
|
constexpr size_t CoreLocalRegionAlign = PageSize;
|
||||||
|
constexpr size_t CoreLocalRegionSize = PageSize * (1 + cpu::NumCores);
|
||||||
|
constexpr size_t CoreLocalRegionSizeWithGuards = CoreLocalRegionSize + 2 * PageSize;
|
||||||
|
constexpr size_t CoreLocalRegionBoundsAlign = 1_GB;
|
||||||
|
/* TODO: static_assert(CoreLocalRegionSize == sizeof(KCoreLocalRegion)); */
|
||||||
|
|
||||||
|
KVirtualAddress GetCoreLocalRegionVirtualAddress() {
|
||||||
|
while (true) {
|
||||||
|
const uintptr_t candidate_start = GetInteger(KMemoryLayout::GetVirtualMemoryBlockTree().GetRandomAlignedRegion(CoreLocalRegionSizeWithGuards, CoreLocalRegionAlign, KMemoryRegionType_None));
|
||||||
|
const uintptr_t candidate_end = candidate_start + CoreLocalRegionSizeWithGuards;
|
||||||
|
const uintptr_t candidate_last = candidate_end - 1;
|
||||||
|
|
||||||
|
const KMemoryBlock *containing_block = std::addressof(*KMemoryLayout::GetVirtualMemoryBlockTree().FindContainingBlock(candidate_start));
|
||||||
|
|
||||||
|
if (candidate_last > containing_block->GetLastAddress()) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (containing_block->GetType() != KMemoryRegionType_None) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (util::AlignDown(candidate_start, CoreLocalRegionBoundsAlign) != util::AlignDown(candidate_last, CoreLocalRegionBoundsAlign)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (containing_block->GetAddress() > util::AlignDown(candidate_start, CoreLocalRegionBoundsAlign)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (util::AlignUp(candidate_last, CoreLocalRegionBoundsAlign) - 1 > containing_block->GetLastAddress()) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
return candidate_start + PageSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
void SetupCoreLocalRegionMemoryBlocks(KInitialPageTable &page_table, KInitialPageAllocator &page_allocator) {
|
||||||
|
const KVirtualAddress core_local_virt_start = GetCoreLocalRegionVirtualAddress();
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryBlockTree().Insert(GetInteger(core_local_virt_start), CoreLocalRegionSize, KMemoryRegionType_CoreLocal));
|
||||||
|
|
||||||
|
/* Allocate a page for each core. */
|
||||||
|
KPhysicalAddress core_local_region_start_phys[cpu::NumCores] = {};
|
||||||
|
for (size_t i = 0; i < cpu::NumCores; i++) {
|
||||||
|
core_local_region_start_phys[i] = page_allocator.Allocate();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Allocate an l1 page table for each core. */
|
||||||
|
KPhysicalAddress core_l1_ttbr1_phys[cpu::NumCores] = {};
|
||||||
|
core_l1_ttbr1_phys[0] = util::AlignDown(cpu::GetTtbr1El1(), PageSize);
|
||||||
|
for (size_t i = 1; i < cpu::NumCores; i++) {
|
||||||
|
core_l1_ttbr1_phys[i] = page_allocator.Allocate();
|
||||||
|
std::memcpy(reinterpret_cast<void *>(GetInteger(core_l1_ttbr1_phys[i])), reinterpret_cast<void *>(GetInteger(core_l1_ttbr1_phys[0])), PageSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Use the l1 page table for each core to map the core local region for each core. */
|
||||||
|
for (size_t i = 0; i < cpu::NumCores; i++) {
|
||||||
|
KInitialPageTable temp_pt(core_l1_ttbr1_phys[i], KInitialPageTable::NoClear{});
|
||||||
|
temp_pt.Map(core_local_virt_start, PageSize, core_l1_ttbr1_phys[i], KernelRwDataAttribute, page_allocator);
|
||||||
|
for (size_t j = 0; j < cpu::NumCores; j++) {
|
||||||
|
temp_pt.Map(core_local_virt_start + (j + 1) * PageSize, PageSize, core_l1_ttbr1_phys[j], KernelRwDataAttribute, page_allocator);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Setup the InitArguments. */
|
||||||
|
SetInitArguments(static_cast<s32>(i), core_local_region_start_phys[i], GetInteger(core_l1_ttbr1_phys[i]));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Ensure the InitArguments are flushed to cache. */
|
||||||
|
StoreInitArguments();
|
||||||
|
}
|
||||||
|
|
||||||
|
void SetupPoolPartitionMemoryBlocks() {
|
||||||
|
/* TODO */
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
25
libraries/libmesosphere/source/kern_main.cpp
Normal file
25
libraries/libmesosphere/source/kern_main.cpp
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms and conditions of the GNU General Public License,
|
||||||
|
* version 2, as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||||
|
* more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
#include <mesosphere.hpp>
|
||||||
|
|
||||||
|
namespace ams::kern {
|
||||||
|
|
||||||
|
NORETURN void HorizonKernelMain(s32 core_id) {
|
||||||
|
cpu::SynchronizeAllCores();
|
||||||
|
while (true) { /* ... */ }
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -31,13 +31,13 @@
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <iterator>
|
#include <iterator>
|
||||||
#include <limits>
|
#include <limits>
|
||||||
|
#include <atomic>
|
||||||
#include <random>
|
#include <random>
|
||||||
|
|
||||||
/* Stratosphere wants stdlib headers, others do not.. */
|
/* Stratosphere wants stdlib headers, others do not.. */
|
||||||
#ifdef ATMOSPHERE_IS_STRATOSPHERE
|
#ifdef ATMOSPHERE_IS_STRATOSPHERE
|
||||||
|
|
||||||
/* C++ headers. */
|
/* C++ headers. */
|
||||||
#include <atomic>
|
|
||||||
#include <utility>
|
#include <utility>
|
||||||
#include <optional>
|
#include <optional>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
|
|
@ -133,7 +133,7 @@ namespace ams::util {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Define accessors using RB_* functions. */
|
/* Define accessors using RB_* functions. */
|
||||||
void InitializeImpl() {
|
constexpr ALWAYS_INLINE void InitializeImpl() {
|
||||||
RB_INIT(&this->root);
|
RB_INIT(&this->root);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -166,7 +166,7 @@ namespace ams::util {
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
IntrusiveRedBlackTree() {
|
constexpr ALWAYS_INLINE IntrusiveRedBlackTree() : root() {
|
||||||
this->InitializeImpl();
|
this->InitializeImpl();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -187,6 +187,14 @@ namespace ams::util {
|
||||||
return const_iterator(Traits::GetParent(static_cast<IntrusiveRedBlackTreeNode *>(nullptr)));
|
return const_iterator(Traits::GetParent(static_cast<IntrusiveRedBlackTreeNode *>(nullptr)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const_iterator cbegin() const {
|
||||||
|
return this->begin();
|
||||||
|
}
|
||||||
|
|
||||||
|
const_iterator cend() const {
|
||||||
|
return this->end();
|
||||||
|
}
|
||||||
|
|
||||||
iterator iterator_to(reference ref) {
|
iterator iterator_to(reference ref) {
|
||||||
return iterator(&ref);
|
return iterator(&ref);
|
||||||
}
|
}
|
||||||
|
@ -201,19 +209,19 @@ namespace ams::util {
|
||||||
}
|
}
|
||||||
|
|
||||||
reference back() {
|
reference back() {
|
||||||
return Traits::GetParent(this->GetMaxImpl());
|
return *Traits::GetParent(this->GetMaxImpl());
|
||||||
}
|
}
|
||||||
|
|
||||||
const_reference back() const {
|
const_reference back() const {
|
||||||
return Traits::GetParent(this->GetMaxImpl());
|
return *Traits::GetParent(this->GetMaxImpl());
|
||||||
}
|
}
|
||||||
|
|
||||||
reference front() {
|
reference front() {
|
||||||
return Traits::GetParent(this->GetMinImpl());
|
return *Traits::GetParent(this->GetMinImpl());
|
||||||
}
|
}
|
||||||
|
|
||||||
const_reference front() const {
|
const_reference front() const {
|
||||||
return Traits::GetParent(this->GetMinImpl());
|
return *Traits::GetParent(this->GetMinImpl());
|
||||||
}
|
}
|
||||||
|
|
||||||
iterator insert(reference ref) {
|
iterator insert(reference ref) {
|
||||||
|
@ -244,7 +252,7 @@ namespace ams::util {
|
||||||
class IntrusiveRedBlackTreeMemberTraits<Member, Derived> {
|
class IntrusiveRedBlackTreeMemberTraits<Member, Derived> {
|
||||||
public:
|
public:
|
||||||
template<class Comparator>
|
template<class Comparator>
|
||||||
using ListType = IntrusiveRedBlackTree<Derived, IntrusiveRedBlackTreeMemberTraits, Comparator>;
|
using TreeType = IntrusiveRedBlackTree<Derived, IntrusiveRedBlackTreeMemberTraits, Comparator>;
|
||||||
private:
|
private:
|
||||||
template<class, class, class>
|
template<class, class, class>
|
||||||
friend class IntrusiveRedBlackTree;
|
friend class IntrusiveRedBlackTree;
|
||||||
|
@ -276,7 +284,7 @@ namespace ams::util {
|
||||||
class IntrusiveRedBlackTreeBaseTraits {
|
class IntrusiveRedBlackTreeBaseTraits {
|
||||||
public:
|
public:
|
||||||
template<class Comparator>
|
template<class Comparator>
|
||||||
using ListType = IntrusiveRedBlackTree<Derived, IntrusiveRedBlackTreeBaseTraits, Comparator>;
|
using TreeType = IntrusiveRedBlackTree<Derived, IntrusiveRedBlackTreeBaseTraits, Comparator>;
|
||||||
private:
|
private:
|
||||||
template<class, class, class>
|
template<class, class, class>
|
||||||
friend class IntrusiveRedBlackTree;
|
friend class IntrusiveRedBlackTree;
|
||||||
|
|
|
@ -15,24 +15,297 @@
|
||||||
*/
|
*/
|
||||||
#include <mesosphere.hpp>
|
#include <mesosphere.hpp>
|
||||||
|
|
||||||
|
extern "C" void _start();
|
||||||
|
extern "C" void __end__();
|
||||||
|
|
||||||
namespace ams::kern::init {
|
namespace ams::kern::init {
|
||||||
|
|
||||||
|
/* Prototypes for functions declared in ASM that we need to reference. */
|
||||||
|
void StartOtherCore(const ams::kern::init::KInitArguments *init_args);
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
|
constexpr size_t KernelResourceRegionSize = 0x1728000;
|
||||||
|
constexpr size_t ExtraKernelResourceSize = 0x68000;
|
||||||
|
static_assert(ExtraKernelResourceSize + KernelResourceRegionSize == 0x1790000);
|
||||||
|
|
||||||
/* Global Allocator. */
|
/* Global Allocator. */
|
||||||
KInitialPageAllocator g_initial_page_allocator;
|
KInitialPageAllocator g_initial_page_allocator;
|
||||||
|
|
||||||
/* Global initial arguments array. */
|
/* Global initial arguments array. */
|
||||||
KInitArguments g_init_arguments[cpu::NumCores];
|
KPhysicalAddress g_init_arguments_phys_addr[cpu::NumCores];
|
||||||
|
|
||||||
|
/* Page table attributes. */
|
||||||
|
constexpr PageTableEntry KernelRoDataAttribute(PageTableEntry::Permission_KernelR, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable);
|
||||||
|
constexpr PageTableEntry KernelRwDataAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable);
|
||||||
|
constexpr PageTableEntry KernelMmioAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_Device_nGnRE, PageTableEntry::Shareable_OuterShareable);
|
||||||
|
|
||||||
|
void MapStackForCore(KInitialPageTable &page_table, KMemoryRegionType type, u32 core_id) {
|
||||||
|
constexpr size_t StackSize = PageSize;
|
||||||
|
constexpr size_t StackAlign = PageSize;
|
||||||
|
const KVirtualAddress stack_start_virt = KMemoryLayout::GetVirtualMemoryBlockTree().GetRandomAlignedRegionWithGuard(StackSize, StackAlign, KMemoryRegionType_KernelMisc, PageSize);
|
||||||
|
const KPhysicalAddress stack_start_phys = g_initial_page_allocator.Allocate();
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryBlockTree().Insert(GetInteger(stack_start_virt), StackSize, type, core_id));
|
||||||
|
|
||||||
|
page_table.Map(stack_start_virt, StackSize, stack_start_phys, KernelRwDataAttribute, g_initial_page_allocator);
|
||||||
|
}
|
||||||
|
|
||||||
|
void StoreDataCache(const void *addr, size_t size) {
|
||||||
|
uintptr_t start = util::AlignDown(reinterpret_cast<uintptr_t>(addr), cpu::DataCacheLineSize);
|
||||||
|
uintptr_t end = reinterpret_cast<uintptr_t>(addr) + size;
|
||||||
|
for (uintptr_t cur = start; cur < end; cur += cpu::DataCacheLineSize) {
|
||||||
|
__asm__ __volatile__("dc cvac, %[cur]" :: [cur]"r"(cur) : "memory");
|
||||||
|
}
|
||||||
|
cpu::DataSynchronizationBarrier();
|
||||||
|
}
|
||||||
|
|
||||||
|
void TurnOnAllCores(uintptr_t start_other_core_phys) {
|
||||||
|
cpu::MultiprocessorAffinityRegisterAccessor mpidr;
|
||||||
|
const auto arg = mpidr.GetCpuOnArgument();
|
||||||
|
const auto current_core = mpidr.GetAff0();
|
||||||
|
|
||||||
|
for (s32 i = 0; i < static_cast<s32>(cpu::NumCores); i++) {
|
||||||
|
if (static_cast<s32>(current_core) != i) {
|
||||||
|
KSystemControl::Init::CpuOn(arg | i, start_other_core_phys, GetInteger(g_init_arguments_phys_addr[i]));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void InitializeCore(uintptr_t arg0, uintptr_t initial_page_allocator_state) {
|
void InitializeCore(uintptr_t misc_unk_debug_phys_addr, uintptr_t initial_page_allocator_state) {
|
||||||
|
/* Ensure our first argument is page aligned (as we will map it if it is non-zero). */
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(misc_unk_debug_phys_addr, PageSize));
|
||||||
|
|
||||||
|
/* Clear TPIDR_EL1 to zero. */
|
||||||
|
cpu::ThreadIdRegisterAccessor(0).Store();
|
||||||
|
|
||||||
|
/* Restore the page allocator state setup by kernel loader. */
|
||||||
|
g_initial_page_allocator.Initialize(initial_page_allocator_state);
|
||||||
|
|
||||||
|
/* Ensure that the T1SZ is correct (and what we expect). */
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS((cpu::TranslationControlRegisterAccessor().GetT1Size() / L1BlockSize) == MaxPageTableEntries);
|
||||||
|
|
||||||
|
/* Create page table object for use during initialization. */
|
||||||
|
KInitialPageTable ttbr1_table(util::AlignDown(cpu::GetTtbr1El1(), PageSize), KInitialPageTable::NoClear{});
|
||||||
|
|
||||||
|
/* Initialize the slab allocator counts. */
|
||||||
/* TODO */
|
/* TODO */
|
||||||
|
|
||||||
|
/* Insert the root block for the virtual memory tree, from which all other blocks will derive. */
|
||||||
|
KMemoryLayout::GetVirtualMemoryBlockTree().insert(*KMemoryLayout::GetMemoryBlockAllocator().Create(KernelVirtualAddressSpaceBase, KernelVirtualAddressSpaceSize, 0, 0));
|
||||||
|
|
||||||
|
/* Insert the root block for the physical memory tree, from which all other blocks will derive. */
|
||||||
|
KMemoryLayout::GetPhysicalMemoryBlockTree().insert(*KMemoryLayout::GetMemoryBlockAllocator().Create(KernelPhysicalAddressSpaceBase, KernelPhysicalAddressSpaceSize, 0, 0));
|
||||||
|
|
||||||
|
/* Save start and end for ease of use. */
|
||||||
|
const uintptr_t code_start_virt_addr = reinterpret_cast<uintptr_t>(_start);
|
||||||
|
const uintptr_t code_end_virt_addr = reinterpret_cast<uintptr_t>(__end__);
|
||||||
|
|
||||||
|
/* Setup the containing kernel region. */
|
||||||
|
constexpr size_t KernelRegionSize = 1_GB;
|
||||||
|
constexpr size_t KernelRegionAlign = 1_GB;
|
||||||
|
const KVirtualAddress kernel_region_start = util::AlignDown(code_start_virt_addr, KernelRegionAlign);
|
||||||
|
size_t kernel_region_size = KernelRegionSize;
|
||||||
|
if (!(kernel_region_start + KernelRegionSize - 1 <= KernelVirtualAddressSpaceLast)) {
|
||||||
|
kernel_region_size = KernelVirtualAddressSpaceEnd - GetInteger(kernel_region_start);
|
||||||
|
}
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryBlockTree().Insert(GetInteger(kernel_region_start), kernel_region_size, KMemoryRegionType_Kernel));
|
||||||
|
|
||||||
|
/* Setup the code region. */
|
||||||
|
constexpr size_t CodeRegionAlign = PageSize;
|
||||||
|
const KVirtualAddress code_region_start = util::AlignDown(code_start_virt_addr, CodeRegionAlign);
|
||||||
|
const KVirtualAddress code_region_end = util::AlignUp(code_end_virt_addr, CodeRegionAlign);
|
||||||
|
const size_t code_region_size = GetInteger(code_region_end) - GetInteger(code_region_start);
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryBlockTree().Insert(GetInteger(code_region_start), code_region_size, KMemoryRegionType_KernelCode));
|
||||||
|
|
||||||
|
/* Setup the misc region. */
|
||||||
|
constexpr size_t MiscRegionSize = 32_MB;
|
||||||
|
constexpr size_t MiscRegionAlign = KernelAslrAlignment;
|
||||||
|
const KVirtualAddress misc_region_start = KMemoryLayout::GetVirtualMemoryBlockTree().GetRandomAlignedRegion(MiscRegionSize, MiscRegionAlign, KMemoryRegionType_Kernel);
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryBlockTree().Insert(GetInteger(misc_region_start), MiscRegionSize, KMemoryRegionType_KernelMisc));
|
||||||
|
|
||||||
|
/* Setup the stack region. */
|
||||||
|
constexpr size_t StackRegionSize = 14_MB;
|
||||||
|
constexpr size_t StackRegionAlign = KernelAslrAlignment;
|
||||||
|
const KVirtualAddress stack_region_start = KMemoryLayout::GetVirtualMemoryBlockTree().GetRandomAlignedRegion(StackRegionSize, StackRegionAlign, KMemoryRegionType_Kernel);
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryBlockTree().Insert(GetInteger(stack_region_start), StackRegionSize, KMemoryRegionType_KernelStack));
|
||||||
|
|
||||||
|
/* Decide if Kernel should have enlarged resource region (slab region + page table heap region). */
|
||||||
|
const bool use_extra_resources = KSystemControl::Init::ShouldIncreaseThreadResourceLimit();
|
||||||
|
const size_t resource_region_size = KernelResourceRegionSize + (use_extra_resources ? ExtraKernelResourceSize : 0);
|
||||||
|
|
||||||
|
/* Determine the size of the slab region. */
|
||||||
|
const size_t slab_region_size = 0x647000; /* TODO: Calculate this on the fly. */
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(slab_region_size <= resource_region_size);
|
||||||
|
|
||||||
|
/* Setup the slab region. */
|
||||||
|
const KPhysicalAddress code_start_phys_addr = ttbr1_table.GetPhysicalAddress(code_start_virt_addr);
|
||||||
|
const KPhysicalAddress code_end_phys_addr = code_start_phys_addr + (code_end_virt_addr - code_start_virt_addr);
|
||||||
|
const KPhysicalAddress slab_start_phys_addr = code_end_phys_addr;
|
||||||
|
const KPhysicalAddress slab_end_phys_addr = slab_start_phys_addr + slab_region_size;
|
||||||
|
constexpr size_t SlabRegionAlign = KernelAslrAlignment;
|
||||||
|
const size_t slab_region_needed_size = util::AlignUp(GetInteger(code_end_phys_addr) + slab_region_size, SlabRegionAlign) - util::AlignDown(GetInteger(code_end_phys_addr), SlabRegionAlign);
|
||||||
|
const KVirtualAddress slab_region_start = KMemoryLayout::GetVirtualMemoryBlockTree().GetRandomAlignedRegion(slab_region_needed_size, SlabRegionAlign, KMemoryRegionType_Kernel) + (GetInteger(code_end_phys_addr) % SlabRegionAlign);
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryBlockTree().Insert(GetInteger(slab_region_start), slab_region_size, KMemoryRegionType_KernelSlab));
|
||||||
|
|
||||||
|
/* Set the slab region's pair block. */
|
||||||
|
KMemoryLayout::GetVirtualMemoryBlockTree().FindFirstBlockByTypeAttr(KMemoryRegionType_KernelSlab)->SetPairAddress(GetInteger(slab_start_phys_addr));
|
||||||
|
|
||||||
|
/* Setup the temp region. */
|
||||||
|
constexpr size_t TempRegionSize = 128_MB;
|
||||||
|
constexpr size_t TempRegionAlign = KernelAslrAlignment;
|
||||||
|
const KVirtualAddress temp_region_start = KMemoryLayout::GetVirtualMemoryBlockTree().GetRandomAlignedRegion(TempRegionSize, TempRegionAlign, KMemoryRegionType_Kernel);
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryBlockTree().Insert(GetInteger(temp_region_start), TempRegionSize, KMemoryRegionType_KernelTemp));
|
||||||
|
|
||||||
|
/* Setup the Misc Unknown Debug region, if it's not zero. */
|
||||||
|
if (misc_unk_debug_phys_addr) {
|
||||||
|
constexpr size_t MiscUnknownDebugRegionSize = PageSize;
|
||||||
|
constexpr size_t MiscUnknownDebugRegionAlign = PageSize;
|
||||||
|
const KVirtualAddress misc_unk_debug_virt_addr = KMemoryLayout::GetVirtualMemoryBlockTree().GetRandomAlignedRegionWithGuard(MiscUnknownDebugRegionSize, MiscUnknownDebugRegionAlign, KMemoryRegionType_KernelMisc, PageSize);
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryBlockTree().Insert(GetInteger(misc_unk_debug_virt_addr), MiscUnknownDebugRegionSize, KMemoryRegionType_KernelMiscUnknownDebug));
|
||||||
|
ttbr1_table.Map(misc_unk_debug_virt_addr, MiscUnknownDebugRegionSize, misc_unk_debug_phys_addr, KernelRoDataAttribute, g_initial_page_allocator);
|
||||||
}
|
}
|
||||||
|
|
||||||
KPhysicalAddress GetInitArgumentsAddress(s32 core) {
|
/* Setup board-specific device physical blocks. */
|
||||||
return KPhysicalAddress(std::addressof(g_init_arguments[core]));
|
SetupDevicePhysicalMemoryBlocks();
|
||||||
|
|
||||||
|
/* Automatically map in devices that have auto-map attributes. */
|
||||||
|
for (auto &block : KMemoryLayout::GetPhysicalMemoryBlockTree()) {
|
||||||
|
/* We only care about automatically-mapped blocks. */
|
||||||
|
if (!block.IsDerivedFrom(KMemoryRegionType_KernelAutoMap)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* If this block has already been mapped, no need to consider it. */
|
||||||
|
if (block.HasTypeAttribute(KMemoryRegionAttr_DidKernelMap)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Set the attribute to note we've mapped this block. */
|
||||||
|
block.SetTypeAttribute(KMemoryRegionAttr_DidKernelMap);
|
||||||
|
|
||||||
|
/* Create a virtual pair block and insert it into the tree. */
|
||||||
|
const KPhysicalAddress map_phys_addr = util::AlignDown(block.GetAddress(), PageSize);
|
||||||
|
const size_t map_size = util::AlignUp(block.GetEndAddress(), PageSize) - GetInteger(map_phys_addr);
|
||||||
|
const KVirtualAddress map_virt_addr = KMemoryLayout::GetVirtualMemoryBlockTree().GetRandomAlignedRegionWithGuard(map_size, PageSize, KMemoryRegionType_KernelMisc, PageSize);
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryBlockTree().Insert(GetInteger(map_virt_addr), map_size, KMemoryRegionType_KernelMiscMappedDevice));
|
||||||
|
block.SetPairAddress(GetInteger(map_virt_addr) + block.GetAddress() - GetInteger(map_phys_addr));
|
||||||
|
|
||||||
|
/* Map the page in to our page table. */
|
||||||
|
ttbr1_table.Map(map_virt_addr, map_size, map_phys_addr, KernelMmioAttribute, g_initial_page_allocator);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Setup the basic DRAM blocks. */
|
||||||
|
SetupDramPhysicalMemoryBlocks();
|
||||||
|
|
||||||
|
/* Insert a physical block for the kernel code region. */
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(GetInteger(code_start_phys_addr), (code_end_virt_addr - code_start_virt_addr), KMemoryRegionType_DramKernelCode));
|
||||||
|
KMemoryLayout::GetPhysicalMemoryBlockTree().FindFirstBlockByTypeAttr(KMemoryRegionType_DramKernelCode)->SetPairAddress(code_start_virt_addr);
|
||||||
|
|
||||||
|
/* Insert a physical block for the kernel slab region. */
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(GetInteger(slab_start_phys_addr), slab_region_size, KMemoryRegionType_DramKernelSlab));
|
||||||
|
KMemoryLayout::GetPhysicalMemoryBlockTree().FindFirstBlockByTypeAttr(KMemoryRegionType_DramKernelSlab)->SetPairAddress(GetInteger(slab_region_start));
|
||||||
|
|
||||||
|
/* Map and clear the slab region. */
|
||||||
|
ttbr1_table.Map(slab_region_start, slab_region_size, slab_start_phys_addr, KernelRwDataAttribute, g_initial_page_allocator);
|
||||||
|
std::memset(GetVoidPointer(slab_region_start), 0, slab_region_size);
|
||||||
|
|
||||||
|
/* Determine size available for kernel page table heaps, requiring > 8 MB. */
|
||||||
|
const KPhysicalAddress resource_end_phys_addr = slab_start_phys_addr + resource_region_size;
|
||||||
|
const size_t page_table_heap_size = GetInteger(resource_end_phys_addr) - GetInteger(slab_end_phys_addr);
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(page_table_heap_size / 4_MB > 2);
|
||||||
|
|
||||||
|
/* Insert a physical block for the kernel page table heap region */
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(GetInteger(slab_end_phys_addr), page_table_heap_size, KMemoryRegionType_DramKernelPtHeap));
|
||||||
|
|
||||||
|
/* All DRAM blocks that we haven't tagged by this point will be mapped under the linear mapping. Tag them. */
|
||||||
|
for (auto &block : KMemoryLayout::GetPhysicalMemoryBlockTree()) {
|
||||||
|
if (block.GetType() == KMemoryRegionType_Dram) {
|
||||||
|
block.SetTypeAttribute(KMemoryRegionAttr_LinearMapped);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Setup the linear mapping region. */
|
||||||
|
constexpr size_t LinearRegionAlign = 1_GB;
|
||||||
|
const auto linear_extents = KMemoryLayout::GetPhysicalMemoryBlockTree().GetDerivedRegionExtents(KMemoryRegionAttr_LinearMapped);
|
||||||
|
const KPhysicalAddress aligned_linear_phys_start = util::AlignDown(linear_extents.first_block->GetAddress(), LinearRegionAlign);
|
||||||
|
const size_t linear_region_size = util::AlignUp(linear_extents.last_block->GetEndAddress(), LinearRegionAlign) - GetInteger(aligned_linear_phys_start);
|
||||||
|
const KVirtualAddress linear_region_start = KMemoryLayout::GetVirtualMemoryBlockTree().GetRandomAlignedRegionWithGuard(linear_region_size, LinearRegionAlign, KMemoryRegionType_None, LinearRegionAlign);
|
||||||
|
|
||||||
|
const uintptr_t linear_region_phys_to_virt_diff = GetInteger(linear_region_start) - GetInteger(aligned_linear_phys_start);
|
||||||
|
|
||||||
|
/* Map and create blocks for all the linearly-mapped data. */
|
||||||
|
for (auto &block : KMemoryLayout::GetPhysicalMemoryBlockTree()) {
|
||||||
|
if (!block.HasTypeAttribute(KMemoryRegionAttr_LinearMapped)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
const uintptr_t block_virt_addr = block.GetAddress() + linear_region_phys_to_virt_diff;
|
||||||
|
ttbr1_table.Map(block_virt_addr, block.GetSize(), block.GetAddress(), KernelRwDataAttribute, g_initial_page_allocator);
|
||||||
|
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryBlockTree().Insert(block_virt_addr, block.GetSize(), GetTypeForVirtualLinearMapping(block.GetType())));
|
||||||
|
KMemoryLayout::GetVirtualMemoryBlockTree().FindContainingBlock(block_virt_addr)->SetPairAddress(block.GetAddress());
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Create blocks for and map all core-specific stacks. */
|
||||||
|
for (size_t i = 0; i < cpu::NumCores; i++) {
|
||||||
|
MapStackForCore(ttbr1_table, KMemoryRegionType_KernelMiscMainStack, i);
|
||||||
|
MapStackForCore(ttbr1_table, KMemoryRegionType_KernelMiscIdleStack, i);
|
||||||
|
MapStackForCore(ttbr1_table, KMemoryRegionType_KernelMiscExceptionStack, i);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Setup the KCoreLocalRegion blocks. */
|
||||||
|
SetupCoreLocalRegionMemoryBlocks(ttbr1_table, g_initial_page_allocator);
|
||||||
|
|
||||||
|
/* Finalize the page allocator, we're done allocating at this point. */
|
||||||
|
const KPhysicalAddress final_init_page_table_end_address = g_initial_page_allocator.GetFinalNextAddress();
|
||||||
|
const size_t init_page_table_region_size = GetInteger(final_init_page_table_end_address) - GetInteger(resource_end_phys_addr);
|
||||||
|
|
||||||
|
/* Insert blocks for the initial page table region. */
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(GetInteger(resource_end_phys_addr), init_page_table_region_size, KMemoryRegionType_DramKernelInitPt));
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryBlockTree().Insert(GetInteger(resource_end_phys_addr) + linear_region_phys_to_virt_diff, init_page_table_region_size, KMemoryRegionType_VirtualKernelInitPt));
|
||||||
|
|
||||||
|
/* All linear-mapped DRAM blocks that we haven't tagged by this point will be allocated to some pool partition. Tag them. */
|
||||||
|
for (auto &block : KMemoryLayout::GetPhysicalMemoryBlockTree()) {
|
||||||
|
if (block.GetType() == KMemoryRegionType_DramLinearMapped) {
|
||||||
|
block.SetType(KMemoryRegionType_DramPoolPartition);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Setup all other memory blocks needed to arrange the pool partitions. */
|
||||||
|
SetupPoolPartitionMemoryBlocks();
|
||||||
|
|
||||||
|
/* Cache all linear blocks in their own trees for faster access, later. */
|
||||||
|
KMemoryLayout::InitializeLinearMemoryBlockTrees(aligned_linear_phys_start, linear_region_start);
|
||||||
|
|
||||||
|
/* Turn on all other cores. */
|
||||||
|
TurnOnAllCores(GetInteger(ttbr1_table.GetPhysicalAddress(reinterpret_cast<uintptr_t>(::ams::kern::init::StartOtherCore))));
|
||||||
|
}
|
||||||
|
|
||||||
|
KPhysicalAddress GetInitArgumentsAddress(s32 core_id) {
|
||||||
|
return g_init_arguments_phys_addr[core_id];
|
||||||
|
}
|
||||||
|
|
||||||
|
void SetInitArguments(s32 core_id, KPhysicalAddress address, uintptr_t arg) {
|
||||||
|
KInitArguments *init_args = reinterpret_cast<KInitArguments *>(GetInteger(address));
|
||||||
|
init_args->ttbr0 = cpu::GetTtbr0El1();
|
||||||
|
init_args->ttbr1 = arg;
|
||||||
|
init_args->tcr = cpu::GetTcrEl1();
|
||||||
|
init_args->mair = cpu::GetMairEl1();
|
||||||
|
init_args->cpuactlr = cpu::GetCpuActlrEl1();
|
||||||
|
init_args->cpuectlr = cpu::GetCpuEctlrEl1();
|
||||||
|
init_args->sctlr = cpu::GetSctlrEl1();
|
||||||
|
init_args->sp = GetInteger(KMemoryLayout::GetMainStackTopAddress(core_id));
|
||||||
|
init_args->entrypoint = reinterpret_cast<uintptr_t>(::ams::kern::HorizonKernelMain);
|
||||||
|
init_args->argument = static_cast<u64>(core_id);
|
||||||
|
init_args->setup_function = reinterpret_cast<uintptr_t>(::ams::kern::init::StartOtherCore);
|
||||||
|
g_init_arguments_phys_addr[core_id] = address;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void StoreInitArguments() {
|
||||||
|
StoreDataCache(g_init_arguments_phys_addr, sizeof(g_init_arguments_phys_addr));
|
||||||
}
|
}
|
||||||
|
|
||||||
void InitializeDebugRegisters() {
|
void InitializeDebugRegisters() {
|
||||||
|
|
|
@ -40,9 +40,10 @@ _start:
|
||||||
|
|
||||||
/* Stack is now set up. */
|
/* Stack is now set up. */
|
||||||
/* Apply relocations and call init array for KernelLdr. */
|
/* Apply relocations and call init array for KernelLdr. */
|
||||||
sub sp, sp, #0x20
|
sub sp, sp, #0x30
|
||||||
stp x0, x1, [sp, #0x00]
|
stp x0, x1, [sp, #0x00]
|
||||||
stp x2, x30, [sp, #0x10]
|
stp x2, x30, [sp, #0x10]
|
||||||
|
stp xzr, xzr, [sp, #0x20]
|
||||||
adr x0, _start
|
adr x0, _start
|
||||||
adr x1, __external_references
|
adr x1, __external_references
|
||||||
ldr x1, [x1, #0x18] /* .dynamic. */
|
ldr x1, [x1, #0x18] /* .dynamic. */
|
||||||
|
@ -75,6 +76,11 @@ _start:
|
||||||
bl _ZN3ams4kern4init6loader4MainEmPNS1_12KernelLayoutEm
|
bl _ZN3ams4kern4init6loader4MainEmPNS1_12KernelLayoutEm
|
||||||
str x0, [sp, #0x00]
|
str x0, [sp, #0x00]
|
||||||
|
|
||||||
|
/* Get ams::kern::init::loader::AllocateKernelInitStack(). */
|
||||||
|
bl _ZN3ams4kern4init6loader23AllocateKernelInitStackEv
|
||||||
|
str x0, [sp, #0x20]
|
||||||
|
|
||||||
|
|
||||||
/* Call ams::kern::init::loader::GetFinalPageAllocatorState() */
|
/* Call ams::kern::init::loader::GetFinalPageAllocatorState() */
|
||||||
bl _ZN3ams4kern4init6loader26GetFinalPageAllocatorStateEv
|
bl _ZN3ams4kern4init6loader26GetFinalPageAllocatorStateEv
|
||||||
|
|
||||||
|
@ -85,6 +91,8 @@ _start:
|
||||||
ldr x1, [sp, #0x18] /* Return address to Kernel */
|
ldr x1, [sp, #0x18] /* Return address to Kernel */
|
||||||
ldr x2, [sp, #0x00] /* Relocated kernel base address diff. */
|
ldr x2, [sp, #0x00] /* Relocated kernel base address diff. */
|
||||||
add x1, x2, x1
|
add x1, x2, x1
|
||||||
|
ldr x2, [sp, #0x20]
|
||||||
|
mov sp, x2
|
||||||
br x1
|
br x1
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -97,8 +97,8 @@ namespace ams::kern::init::loader {
|
||||||
/* TODO: Define these bits properly elsewhere, document exactly what each bit set is doing .*/
|
/* TODO: Define these bits properly elsewhere, document exactly what each bit set is doing .*/
|
||||||
constexpr u64 MairValue = 0x0000000044FF0400ul;
|
constexpr u64 MairValue = 0x0000000044FF0400ul;
|
||||||
constexpr u64 TcrValue = 0x00000011B5193519ul;
|
constexpr u64 TcrValue = 0x00000011B5193519ul;
|
||||||
cpu::SetMairEl1(MairValue);
|
cpu::MemoryAccessIndirectionRegisterAccessor(MairValue).Store();
|
||||||
cpu::SetTcrEl1(TcrValue);
|
cpu::TranslationControlRegisterAccessor(TcrValue).Store();
|
||||||
|
|
||||||
/* Perform cpu-specific setup. */
|
/* Perform cpu-specific setup. */
|
||||||
{
|
{
|
||||||
|
@ -308,6 +308,10 @@ namespace ams::kern::init::loader {
|
||||||
return GetInteger(virtual_base_address) - base_address;
|
return GetInteger(virtual_base_address) - base_address;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
KPhysicalAddress AllocateKernelInitStack() {
|
||||||
|
return g_initial_page_allocator.Allocate() + PageSize;
|
||||||
|
}
|
||||||
|
|
||||||
uintptr_t GetFinalPageAllocatorState() {
|
uintptr_t GetFinalPageAllocatorState() {
|
||||||
return g_initial_page_allocator.GetFinalState();
|
return g_initial_page_allocator.GetFinalState();
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue