mirror of
https://github.com/Atmosphere-NX/Atmosphere
synced 2024-12-22 20:31:14 +00:00
kern: load initial process binary from user pool, rather than from pt heap
This commit is contained in:
parent
a1e137cc1c
commit
0f8b7be2d2
13 changed files with 350 additions and 184 deletions
|
@ -25,6 +25,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
/* Initialization. */
|
/* Initialization. */
|
||||||
static size_t GetIntendedMemorySize();
|
static size_t GetIntendedMemorySize();
|
||||||
static KPhysicalAddress GetKernelPhysicalBaseAddress(uintptr_t base_address);
|
static KPhysicalAddress GetKernelPhysicalBaseAddress(uintptr_t base_address);
|
||||||
|
static KPhysicalAddress GetInitialProcessBinaryPhysicalAddress();
|
||||||
static bool ShouldIncreaseThreadResourceLimit();
|
static bool ShouldIncreaseThreadResourceLimit();
|
||||||
static void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg);
|
static void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg);
|
||||||
static size_t GetApplicationPoolSize();
|
static size_t GetApplicationPoolSize();
|
||||||
|
|
|
@ -27,7 +27,7 @@ namespace ams::kern::init {
|
||||||
u32 rw_end_offset;
|
u32 rw_end_offset;
|
||||||
u32 bss_offset;
|
u32 bss_offset;
|
||||||
u32 bss_end_offset;
|
u32 bss_end_offset;
|
||||||
u32 ini_load_offset;
|
u32 resource_offset;
|
||||||
u32 dynamic_offset;
|
u32 dynamic_offset;
|
||||||
u32 init_array_offset;
|
u32 init_array_offset;
|
||||||
u32 init_array_end_offset;
|
u32 init_array_end_offset;
|
||||||
|
|
|
@ -29,11 +29,12 @@ namespace ams::kern {
|
||||||
u32 reserved;
|
u32 reserved;
|
||||||
};
|
};
|
||||||
|
|
||||||
NOINLINE void CopyInitialProcessBinaryToKernelMemory();
|
NOINLINE size_t CopyInitialProcessBinaryToKernelMemory();
|
||||||
NOINLINE void CreateAndRunInitialProcesses();
|
NOINLINE void CreateAndRunInitialProcesses();
|
||||||
|
|
||||||
u64 GetInitialProcessIdMin();
|
u64 GetInitialProcessIdMin();
|
||||||
u64 GetInitialProcessIdMax();
|
u64 GetInitialProcessIdMax();
|
||||||
|
KVirtualAddress GetInitialProcessBinaryAddress();
|
||||||
size_t GetInitialProcessesSecureMemorySize();
|
size_t GetInitialProcessesSecureMemorySize();
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -91,46 +91,49 @@ namespace ams::kern {
|
||||||
|
|
||||||
class KInitialProcessReader {
|
class KInitialProcessReader {
|
||||||
private:
|
private:
|
||||||
KInitialProcessHeader *m_kip_header;
|
KInitialProcessHeader m_kip_header;
|
||||||
public:
|
public:
|
||||||
constexpr KInitialProcessReader() : m_kip_header() { /* ... */ }
|
constexpr KInitialProcessReader() : m_kip_header() { /* ... */ }
|
||||||
|
|
||||||
constexpr const u32 *GetCapabilities() const { return m_kip_header->GetCapabilities(); }
|
constexpr const u32 *GetCapabilities() const { return m_kip_header.GetCapabilities(); }
|
||||||
constexpr size_t GetNumCapabilities() const { return m_kip_header->GetNumCapabilities(); }
|
constexpr size_t GetNumCapabilities() const { return m_kip_header.GetNumCapabilities(); }
|
||||||
|
|
||||||
constexpr size_t GetBinarySize() const {
|
constexpr size_t GetBinarySize() const {
|
||||||
return sizeof(*m_kip_header) + m_kip_header->GetRxCompressedSize() + m_kip_header->GetRoCompressedSize() + m_kip_header->GetRwCompressedSize();
|
return m_kip_header.GetRxCompressedSize() + m_kip_header.GetRoCompressedSize() + m_kip_header.GetRwCompressedSize();
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr size_t GetSize() const {
|
constexpr size_t GetSize() const {
|
||||||
if (const size_t bss_size = m_kip_header->GetBssSize(); bss_size != 0) {
|
if (const size_t bss_size = m_kip_header.GetBssSize(); bss_size != 0) {
|
||||||
return m_kip_header->GetBssAddress() + m_kip_header->GetBssSize();
|
return util::AlignUp(m_kip_header.GetBssAddress() + m_kip_header.GetBssSize(), PageSize);
|
||||||
} else {
|
} else {
|
||||||
return m_kip_header->GetRwAddress() + m_kip_header->GetRwSize();
|
return util::AlignUp(m_kip_header.GetRwAddress() + m_kip_header.GetRwSize(), PageSize);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr u8 GetPriority() const { return m_kip_header->GetPriority(); }
|
constexpr u8 GetPriority() const { return m_kip_header.GetPriority(); }
|
||||||
constexpr u8 GetIdealCoreId() const { return m_kip_header->GetIdealCoreId(); }
|
constexpr u8 GetIdealCoreId() const { return m_kip_header.GetIdealCoreId(); }
|
||||||
constexpr u32 GetAffinityMask() const { return m_kip_header->GetAffinityMask(); }
|
constexpr u32 GetAffinityMask() const { return m_kip_header.GetAffinityMask(); }
|
||||||
constexpr u32 GetStackSize() const { return m_kip_header->GetStackSize(); }
|
constexpr u32 GetStackSize() const { return m_kip_header.GetStackSize(); }
|
||||||
|
|
||||||
constexpr bool Is64Bit() const { return m_kip_header->Is64Bit(); }
|
constexpr bool Is64Bit() const { return m_kip_header.Is64Bit(); }
|
||||||
constexpr bool Is64BitAddressSpace() const { return m_kip_header->Is64BitAddressSpace(); }
|
constexpr bool Is64BitAddressSpace() const { return m_kip_header.Is64BitAddressSpace(); }
|
||||||
constexpr bool UsesSecureMemory() const { return m_kip_header->UsesSecureMemory(); }
|
constexpr bool UsesSecureMemory() const { return m_kip_header.UsesSecureMemory(); }
|
||||||
constexpr bool IsImmortal() const { return m_kip_header->IsImmortal(); }
|
constexpr bool IsImmortal() const { return m_kip_header.IsImmortal(); }
|
||||||
|
|
||||||
bool Attach(u8 *bin) {
|
KVirtualAddress Attach(KVirtualAddress bin) {
|
||||||
if (KInitialProcessHeader *header = reinterpret_cast<KInitialProcessHeader *>(bin); header->IsValid()) {
|
/* Copy the header. */
|
||||||
m_kip_header = header;
|
m_kip_header = *GetPointer<const KInitialProcessHeader>(bin);
|
||||||
return true;
|
|
||||||
|
/* Check that it's valid. */
|
||||||
|
if (m_kip_header.IsValid()) {
|
||||||
|
return bin + sizeof(KInitialProcessHeader);
|
||||||
} else {
|
} else {
|
||||||
return false;
|
return Null<KVirtualAddress>;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Result MakeCreateProcessParameter(ams::svc::CreateProcessParameter *out, bool enable_aslr) const;
|
Result MakeCreateProcessParameter(ams::svc::CreateProcessParameter *out, bool enable_aslr) const;
|
||||||
Result Load(KProcessAddress address, const ams::svc::CreateProcessParameter ¶ms) const;
|
Result Load(KProcessAddress address, const ams::svc::CreateProcessParameter ¶ms, KProcessAddress src) const;
|
||||||
Result SetMemoryPermissions(KProcessPageTable &page_table, const ams::svc::CreateProcessParameter ¶ms) const;
|
Result SetMemoryPermissions(KProcessPageTable &page_table, const ams::svc::CreateProcessParameter ¶ms) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -75,7 +75,7 @@ namespace ams::kern {
|
||||||
KVirtualAddress AllocateBlock(s32 index, bool random) { return m_heap.AllocateBlock(index, random); }
|
KVirtualAddress AllocateBlock(s32 index, bool random) { return m_heap.AllocateBlock(index, random); }
|
||||||
void Free(KVirtualAddress addr, size_t num_pages) { m_heap.Free(addr, num_pages); }
|
void Free(KVirtualAddress addr, size_t num_pages) { m_heap.Free(addr, num_pages); }
|
||||||
|
|
||||||
void UpdateUsedHeapSize() { m_heap.UpdateUsedSize(); }
|
void SetInitialUsedHeapSize(size_t reserved_size) { m_heap.SetInitialUsedSize(reserved_size); }
|
||||||
|
|
||||||
void InitializeOptimizedMemory() { std::memset(GetVoidPointer(m_management_region), 0, CalculateOptimizedProcessOverheadSize(m_heap.GetSize())); }
|
void InitializeOptimizedMemory() { std::memset(GetVoidPointer(m_management_region), 0, CalculateOptimizedProcessOverheadSize(m_heap.GetSize())); }
|
||||||
|
|
||||||
|
@ -168,6 +168,10 @@ namespace ams::kern {
|
||||||
return m_managers[KMemoryLayout::GetVirtualLinearRegion(address).GetAttributes()];
|
return m_managers[KMemoryLayout::GetVirtualLinearRegion(address).GetAttributes()];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const Impl &GetManager(KVirtualAddress address) const {
|
||||||
|
return m_managers[KMemoryLayout::GetVirtualLinearRegion(address).GetAttributes()];
|
||||||
|
}
|
||||||
|
|
||||||
constexpr Impl *GetFirstManager(Pool pool, Direction dir) {
|
constexpr Impl *GetFirstManager(Pool pool, Direction dir) {
|
||||||
return dir == Direction_FromBack ? m_pool_managers_tail[pool] : m_pool_managers_head[pool];
|
return dir == Direction_FromBack ? m_pool_managers_tail[pool] : m_pool_managers_head[pool];
|
||||||
}
|
}
|
||||||
|
@ -197,6 +201,10 @@ namespace ams::kern {
|
||||||
NOINLINE Result AllocateAndOpen(KPageGroup *out, size_t num_pages, u32 option);
|
NOINLINE Result AllocateAndOpen(KPageGroup *out, size_t num_pages, u32 option);
|
||||||
NOINLINE Result AllocateAndOpenForProcess(KPageGroup *out, size_t num_pages, u32 option, u64 process_id, u8 fill_pattern);
|
NOINLINE Result AllocateAndOpenForProcess(KPageGroup *out, size_t num_pages, u32 option, u64 process_id, u8 fill_pattern);
|
||||||
|
|
||||||
|
Pool GetPool(KVirtualAddress address) const {
|
||||||
|
return this->GetManager(address).GetPool();
|
||||||
|
}
|
||||||
|
|
||||||
void Open(KVirtualAddress address, size_t num_pages) {
|
void Open(KVirtualAddress address, size_t num_pages) {
|
||||||
/* Repeatedly open references until we've done so for all pages. */
|
/* Repeatedly open references until we've done so for all pages. */
|
||||||
while (num_pages) {
|
while (num_pages) {
|
||||||
|
|
|
@ -125,7 +125,7 @@ namespace ams::kern {
|
||||||
private:
|
private:
|
||||||
KVirtualAddress m_heap_address;
|
KVirtualAddress m_heap_address;
|
||||||
size_t m_heap_size;
|
size_t m_heap_size;
|
||||||
size_t m_used_size;
|
size_t m_initial_used_size;
|
||||||
size_t m_num_blocks;
|
size_t m_num_blocks;
|
||||||
Block m_blocks[NumMemoryBlockPageShifts];
|
Block m_blocks[NumMemoryBlockPageShifts];
|
||||||
private:
|
private:
|
||||||
|
@ -134,7 +134,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
void FreeBlock(KVirtualAddress block, s32 index);
|
void FreeBlock(KVirtualAddress block, s32 index);
|
||||||
public:
|
public:
|
||||||
KPageHeap() : m_heap_address(), m_heap_size(), m_used_size(), m_num_blocks(), m_blocks() { /* ... */ }
|
KPageHeap() : m_heap_address(), m_heap_size(), m_initial_used_size(), m_num_blocks(), m_blocks() { /* ... */ }
|
||||||
|
|
||||||
constexpr KVirtualAddress GetAddress() const { return m_heap_address; }
|
constexpr KVirtualAddress GetAddress() const { return m_heap_address; }
|
||||||
constexpr size_t GetSize() const { return m_heap_size; }
|
constexpr size_t GetSize() const { return m_heap_size; }
|
||||||
|
@ -149,8 +149,13 @@ namespace ams::kern {
|
||||||
size_t GetFreeSize() const { return this->GetNumFreePages() * PageSize; }
|
size_t GetFreeSize() const { return this->GetNumFreePages() * PageSize; }
|
||||||
void DumpFreeList() const;
|
void DumpFreeList() const;
|
||||||
|
|
||||||
void UpdateUsedSize() {
|
void SetInitialUsedSize(size_t reserved_size) {
|
||||||
m_used_size = m_heap_size - (this->GetNumFreePages() * PageSize);
|
/* Check that the reserved size is valid. */
|
||||||
|
const size_t free_size = this->GetNumFreePages() * PageSize;
|
||||||
|
MESOSPHERE_ABORT_UNLESS(m_heap_size >= free_size + reserved_size);
|
||||||
|
|
||||||
|
/* Set the initial used size. */
|
||||||
|
m_initial_used_size = m_heap_size - free_size - reserved_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
KVirtualAddress AllocateBlock(s32 index, bool random);
|
KVirtualAddress AllocateBlock(s32 index, bool random);
|
||||||
|
|
|
@ -21,6 +21,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
|
constexpr uintptr_t DramPhysicalAddress = 0x80000000;
|
||||||
constexpr size_t SecureAlignment = 128_KB;
|
constexpr size_t SecureAlignment = 128_KB;
|
||||||
|
|
||||||
/* Global variables for panic. */
|
/* Global variables for panic. */
|
||||||
|
@ -348,6 +349,10 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
KPhysicalAddress KSystemControl::Init::GetInitialProcessBinaryPhysicalAddress() {
|
||||||
|
return GetKernelPhysicalBaseAddress(DramPhysicalAddress) + GetIntendedMemorySize() - KTraceBufferSize - InitialProcessBinarySizeMax;
|
||||||
|
}
|
||||||
|
|
||||||
bool KSystemControl::Init::ShouldIncreaseThreadResourceLimit() {
|
bool KSystemControl::Init::ShouldIncreaseThreadResourceLimit() {
|
||||||
return GetKernelConfigurationForInit().Get<smc::KernelConfiguration::IncreaseThreadResourceLimit>();
|
return GetKernelConfigurationForInit().Get<smc::KernelConfiguration::IncreaseThreadResourceLimit>();
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,79 +25,129 @@ namespace ams::kern {
|
||||||
s32 priority;
|
s32 priority;
|
||||||
};
|
};
|
||||||
|
|
||||||
KVirtualAddress GetInitialProcessBinaryAddress() {
|
constinit KVirtualAddress g_initial_process_binary_address = Null<KVirtualAddress>;
|
||||||
const uintptr_t end_address = KMemoryLayout::GetPageTableHeapRegion().GetEndAddress();
|
constinit InitialProcessBinaryHeader g_initial_process_binary_header = {};
|
||||||
MESOSPHERE_ABORT_UNLESS(end_address != 0);
|
constinit size_t g_initial_process_secure_memory_size = 0;
|
||||||
return end_address - InitialProcessBinarySizeMax;
|
constinit u64 g_initial_process_id_min = std::numeric_limits<u64>::max();
|
||||||
}
|
constinit u64 g_initial_process_id_max = std::numeric_limits<u64>::min();
|
||||||
|
|
||||||
void LoadInitialProcessBinaryHeader(InitialProcessBinaryHeader *header) {
|
void LoadInitialProcessBinaryHeader() {
|
||||||
if (header->magic != InitialProcessBinaryMagic) {
|
if (g_initial_process_binary_header.magic != InitialProcessBinaryMagic) {
|
||||||
*header = *GetPointer<InitialProcessBinaryHeader>(GetInitialProcessBinaryAddress());
|
/* Get the virtual address for the image. */
|
||||||
}
|
const KVirtualAddress virt_addr = GetInitialProcessBinaryAddress();
|
||||||
|
|
||||||
MESOSPHERE_ABORT_UNLESS(header->magic == InitialProcessBinaryMagic);
|
/* Copy and validate the header. */
|
||||||
MESOSPHERE_ABORT_UNLESS(header->num_processes <= init::GetSlabResourceCounts().num_KProcess);
|
g_initial_process_binary_header = *GetPointer<InitialProcessBinaryHeader>(virt_addr);
|
||||||
}
|
MESOSPHERE_ABORT_UNLESS(g_initial_process_binary_header.magic == InitialProcessBinaryMagic);
|
||||||
|
MESOSPHERE_ABORT_UNLESS(g_initial_process_binary_header.num_processes <= init::GetSlabResourceCounts().num_KProcess);
|
||||||
|
|
||||||
size_t GetProcessesSecureMemorySize(KVirtualAddress binary_address, const InitialProcessBinaryHeader &header) {
|
/* Set the image address. */
|
||||||
u8 *current = GetPointer<u8>(binary_address + sizeof(InitialProcessBinaryHeader));
|
g_initial_process_binary_address = virt_addr;
|
||||||
const u8 * const end = GetPointer<u8>(binary_address + header.size - sizeof(KInitialProcessHeader));
|
|
||||||
|
|
||||||
size_t size = 0;
|
/* Process/calculate the secure memory size. */
|
||||||
const size_t num_processes = header.num_processes;
|
KVirtualAddress current = g_initial_process_binary_address + sizeof(InitialProcessBinaryHeader);
|
||||||
for (size_t i = 0; i < num_processes; i++) {
|
const KVirtualAddress end = g_initial_process_binary_address + g_initial_process_binary_header.size;
|
||||||
|
const size_t num_processes = g_initial_process_binary_header.num_processes;
|
||||||
|
for (size_t i = 0; i < num_processes; ++i) {
|
||||||
/* Validate that we can read the current KIP. */
|
/* Validate that we can read the current KIP. */
|
||||||
MESOSPHERE_ABORT_UNLESS(current <= end);
|
MESOSPHERE_ABORT_UNLESS(current <= end - sizeof(KInitialProcessHeader));
|
||||||
|
|
||||||
|
/* Attach to the current KIP. */
|
||||||
KInitialProcessReader reader;
|
KInitialProcessReader reader;
|
||||||
MESOSPHERE_ABORT_UNLESS(reader.Attach(current));
|
MESOSPHERE_ABORT_UNLESS(reader.Attach(current) != Null<KVirtualAddress>);
|
||||||
|
|
||||||
/* If the process uses secure memory, account for that. */
|
/* If the process uses secure memory, account for that. */
|
||||||
if (reader.UsesSecureMemory()) {
|
if (reader.UsesSecureMemory()) {
|
||||||
size += util::AlignUp(reader.GetSize(), PageSize);
|
g_initial_process_secure_memory_size += reader.GetSize() + util::AlignUp(reader.GetStackSize(), PageSize);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Advance the reader. */
|
void CreateProcesses(InitialProcessInfo *infos) {
|
||||||
current += reader.GetBinarySize();
|
/* Determine process image extents. */
|
||||||
}
|
KVirtualAddress current = g_initial_process_binary_address + sizeof(InitialProcessBinaryHeader);
|
||||||
|
KVirtualAddress end = g_initial_process_binary_address + g_initial_process_binary_header.size;
|
||||||
return size;
|
|
||||||
}
|
|
||||||
|
|
||||||
void CreateProcesses(InitialProcessInfo *infos, KVirtualAddress binary_address, const InitialProcessBinaryHeader &header) {
|
|
||||||
u8 *current = GetPointer<u8>(binary_address + sizeof(InitialProcessBinaryHeader));
|
|
||||||
const u8 * const end = GetPointer<u8>(binary_address + header.size - sizeof(KInitialProcessHeader));
|
|
||||||
|
|
||||||
/* Decide on pools to use. */
|
/* Decide on pools to use. */
|
||||||
const auto unsafe_pool = static_cast<KMemoryManager::Pool>(KSystemControl::GetCreateProcessMemoryPool());
|
const auto unsafe_pool = static_cast<KMemoryManager::Pool>(KSystemControl::GetCreateProcessMemoryPool());
|
||||||
const auto secure_pool = (GetTargetFirmware() >= TargetFirmware_2_0_0) ? KMemoryManager::Pool_Secure : unsafe_pool;
|
const auto secure_pool = (GetTargetFirmware() >= TargetFirmware_2_0_0) ? KMemoryManager::Pool_Secure : unsafe_pool;
|
||||||
|
|
||||||
const size_t num_processes = header.num_processes;
|
const size_t num_processes = g_initial_process_binary_header.num_processes;
|
||||||
for (size_t i = 0; i < num_processes; i++) {
|
for (size_t i = 0; i < num_processes; ++i) {
|
||||||
/* Validate that we can read the current KIP. */
|
/* Validate that we can read the current KIP header. */
|
||||||
MESOSPHERE_ABORT_UNLESS(current <= end);
|
MESOSPHERE_ABORT_UNLESS(current <= end - sizeof(KInitialProcessHeader));
|
||||||
KInitialProcessReader reader;
|
|
||||||
MESOSPHERE_ABORT_UNLESS(reader.Attach(current));
|
|
||||||
|
|
||||||
/* Parse process parameters and reserve memory. */
|
/* Attach to the current kip. */
|
||||||
|
KInitialProcessReader reader;
|
||||||
|
KVirtualAddress data = reader.Attach(current);
|
||||||
|
MESOSPHERE_ABORT_UNLESS(data != Null<KVirtualAddress>);
|
||||||
|
|
||||||
|
/* Ensure that the remainder of our parse is page aligned. */
|
||||||
|
if (!util::IsAligned(GetInteger(data), PageSize)) {
|
||||||
|
const KVirtualAddress aligned_data = util::AlignDown(GetInteger(data), PageSize);
|
||||||
|
std::memmove(GetVoidPointer(aligned_data), GetVoidPointer(data), end - data);
|
||||||
|
|
||||||
|
data = aligned_data;
|
||||||
|
end -= (data - aligned_data);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* If we crossed a page boundary, free the pages we're done using. */
|
||||||
|
if (KVirtualAddress aligned_current = util::AlignDown(GetInteger(current), PageSize); aligned_current != data) {
|
||||||
|
const size_t freed_size = data - aligned_current;
|
||||||
|
Kernel::GetMemoryManager().Close(aligned_current, freed_size / PageSize);
|
||||||
|
Kernel::GetSystemResourceLimit().Release(ams::svc::LimitableResource_PhysicalMemoryMax, freed_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Parse process parameters. */
|
||||||
ams::svc::CreateProcessParameter params;
|
ams::svc::CreateProcessParameter params;
|
||||||
MESOSPHERE_R_ABORT_UNLESS(reader.MakeCreateProcessParameter(std::addressof(params), true));
|
MESOSPHERE_R_ABORT_UNLESS(reader.MakeCreateProcessParameter(std::addressof(params), true));
|
||||||
MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, params.code_num_pages * PageSize));
|
|
||||||
|
/* Get the binary size for the kip. */
|
||||||
|
const size_t binary_size = reader.GetBinarySize();
|
||||||
|
const size_t binary_pages = binary_size / PageSize;
|
||||||
|
|
||||||
|
/* Get the pool for both the current (compressed) image, and the decompressed process. */
|
||||||
|
const auto src_pool = Kernel::GetMemoryManager().GetPool(data);
|
||||||
|
const auto dst_pool = reader.UsesSecureMemory() ? secure_pool : unsafe_pool;
|
||||||
|
|
||||||
|
/* Determine the process size, and how much memory isn't already reserved. */
|
||||||
|
const size_t process_size = params.code_num_pages * PageSize;
|
||||||
|
const size_t unreserved_size = process_size - (src_pool == dst_pool ? util::AlignDown(binary_size, PageSize) : 0);
|
||||||
|
|
||||||
|
/* Reserve however much memory we need to reserve. */
|
||||||
|
MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, unreserved_size));
|
||||||
|
|
||||||
/* Create the process. */
|
/* Create the process. */
|
||||||
KProcess *new_process = nullptr;
|
KProcess *new_process = nullptr;
|
||||||
{
|
{
|
||||||
/* Declare page group to use for process memory. */
|
/* Make page groups to represent the data. */
|
||||||
KPageGroup pg(std::addressof(Kernel::GetBlockInfoManager()));
|
KPageGroup pg(std::addressof(Kernel::GetBlockInfoManager()));
|
||||||
|
KPageGroup workaround_pg(std::addressof(Kernel::GetBlockInfoManager()));
|
||||||
|
|
||||||
/* Allocate memory for the process. */
|
/* Populate the page group to represent the data. */
|
||||||
auto &mm = Kernel::GetMemoryManager();
|
|
||||||
const auto pool = reader.UsesSecureMemory() ? secure_pool : unsafe_pool;
|
|
||||||
MESOSPHERE_R_ABORT_UNLESS(mm.AllocateAndOpen(std::addressof(pg), params.code_num_pages, KMemoryManager::EncodeOption(pool, KMemoryManager::Direction_FromFront)));
|
|
||||||
|
|
||||||
{
|
{
|
||||||
|
/* Allocate the previously unreserved pages. */
|
||||||
|
KPageGroup unreserve_pg(std::addressof(Kernel::GetBlockInfoManager()));
|
||||||
|
MESOSPHERE_R_ABORT_UNLESS(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(unreserve_pg), unreserved_size / PageSize, KMemoryManager::EncodeOption(dst_pool, KMemoryManager::Direction_FromFront)));
|
||||||
|
|
||||||
|
/* Add the previously reserved pages. */
|
||||||
|
if (src_pool == dst_pool && binary_pages != 0) {
|
||||||
|
/* NOTE: Nintendo does not check the result of this operation. */
|
||||||
|
pg.AddBlock(data, binary_pages);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Add the previously unreserved pages. */
|
||||||
|
for (const auto &block : unreserve_pg) {
|
||||||
|
/* NOTE: Nintendo does not check the result of this operation. */
|
||||||
|
pg.AddBlock(block.GetAddress(), block.GetNumPages());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
MESOSPHERE_ABORT_UNLESS(pg.GetNumPages() == static_cast<size_t>(params.code_num_pages));
|
||||||
|
|
||||||
/* Ensure that we do not leak pages. */
|
/* Ensure that we do not leak pages. */
|
||||||
ON_SCOPE_EXIT { pg.Close(); };
|
KPageGroup *process_pg = std::addressof(pg);
|
||||||
|
ON_SCOPE_EXIT { process_pg->Close(); };
|
||||||
|
|
||||||
/* Get the temporary region. */
|
/* Get the temporary region. */
|
||||||
const auto &temp_region = KMemoryLayout::GetTempRegion();
|
const auto &temp_region = KMemoryLayout::GetTempRegion();
|
||||||
|
@ -107,8 +157,29 @@ namespace ams::kern {
|
||||||
KProcessAddress temp_address = Null<KProcessAddress>;
|
KProcessAddress temp_address = Null<KProcessAddress>;
|
||||||
MESOSPHERE_R_ABORT_UNLESS(Kernel::GetKernelPageTable().MapPageGroup(std::addressof(temp_address), pg, temp_region.GetAddress(), temp_region.GetSize() / PageSize, KMemoryState_Kernel, KMemoryPermission_KernelReadWrite));
|
MESOSPHERE_R_ABORT_UNLESS(Kernel::GetKernelPageTable().MapPageGroup(std::addressof(temp_address), pg, temp_region.GetAddress(), temp_region.GetSize() / PageSize, KMemoryState_Kernel, KMemoryPermission_KernelReadWrite));
|
||||||
|
|
||||||
|
/* Setup the new page group's memory, so that we can load the process. */
|
||||||
|
{
|
||||||
|
/* Copy the unaligned ending of the compressed binary. */
|
||||||
|
if (const size_t unaligned_size = binary_size - util::AlignDown(binary_size, PageSize); unaligned_size != 0) {
|
||||||
|
std::memcpy(GetVoidPointer(temp_address + process_size - unaligned_size), GetVoidPointer(data + binary_size - unaligned_size), unaligned_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Copy the aligned part of the compressed binary. */
|
||||||
|
if (const size_t aligned_size = util::AlignDown(binary_size, PageSize); aligned_size != 0 && src_pool == dst_pool) {
|
||||||
|
std::memmove(GetVoidPointer(temp_address + process_size - binary_size), GetVoidPointer(temp_address), aligned_size);
|
||||||
|
} else {
|
||||||
|
if (src_pool != dst_pool) {
|
||||||
|
std::memcpy(GetVoidPointer(temp_address + process_size - binary_size), GetVoidPointer(data), aligned_size);
|
||||||
|
Kernel::GetMemoryManager().Close(data, aligned_size / PageSize);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Clear the first part of the memory. */
|
||||||
|
std::memset(GetVoidPointer(temp_address), 0, process_size - binary_size);
|
||||||
|
}
|
||||||
|
|
||||||
/* Load the process. */
|
/* Load the process. */
|
||||||
MESOSPHERE_R_ABORT_UNLESS(reader.Load(temp_address, params));
|
MESOSPHERE_R_ABORT_UNLESS(reader.Load(temp_address, params, temp_address + process_size - binary_size));
|
||||||
|
|
||||||
/* Unmap the temporary mapping. */
|
/* Unmap the temporary mapping. */
|
||||||
MESOSPHERE_R_ABORT_UNLESS(Kernel::GetKernelPageTable().UnmapPageGroup(temp_address, pg, KMemoryState_Kernel));
|
MESOSPHERE_R_ABORT_UNLESS(Kernel::GetKernelPageTable().UnmapPageGroup(temp_address, pg, KMemoryState_Kernel));
|
||||||
|
@ -117,9 +188,56 @@ namespace ams::kern {
|
||||||
new_process = KProcess::Create();
|
new_process = KProcess::Create();
|
||||||
MESOSPHERE_ABORT_UNLESS(new_process != nullptr);
|
MESOSPHERE_ABORT_UNLESS(new_process != nullptr);
|
||||||
|
|
||||||
/* Initialize the process. */
|
/* Ensure the page group is usable for the process. */
|
||||||
MESOSPHERE_R_ABORT_UNLESS(new_process->Initialize(params, pg, reader.GetCapabilities(), reader.GetNumCapabilities(), std::addressof(Kernel::GetSystemResourceLimit()), pool, reader.IsImmortal()));
|
/* If the pool is the same, we need to use the workaround page group. */
|
||||||
|
if (src_pool == dst_pool) {
|
||||||
|
/* Allocate a new, usable group for the process. */
|
||||||
|
MESOSPHERE_R_ABORT_UNLESS(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(workaround_pg), static_cast<size_t>(params.code_num_pages), KMemoryManager::EncodeOption(dst_pool, KMemoryManager::Direction_FromFront)));
|
||||||
|
|
||||||
|
/* Copy data from the working page group to the usable one. */
|
||||||
|
auto work_it = pg.begin();
|
||||||
|
MESOSPHERE_ABORT_UNLESS(work_it != pg.end());
|
||||||
|
{
|
||||||
|
auto work_address = work_it->GetAddress();
|
||||||
|
auto work_remaining = work_it->GetNumPages();
|
||||||
|
for (const auto &block : workaround_pg) {
|
||||||
|
auto block_address = block.GetAddress();
|
||||||
|
auto block_remaining = block.GetNumPages();
|
||||||
|
while (block_remaining > 0) {
|
||||||
|
if (work_remaining == 0) {
|
||||||
|
++work_it;
|
||||||
|
work_address = work_it->GetAddress();
|
||||||
|
work_remaining = work_it->GetNumPages();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const size_t cur_pages = std::min(block_remaining, work_remaining);
|
||||||
|
const size_t cur_size = cur_pages * PageSize;
|
||||||
|
std::memcpy(GetVoidPointer(block_address), GetVoidPointer(work_address), cur_size);
|
||||||
|
|
||||||
|
block_address += cur_size;
|
||||||
|
work_address += cur_size;
|
||||||
|
|
||||||
|
block_remaining -= cur_pages;
|
||||||
|
work_remaining -= cur_pages;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
++work_it;
|
||||||
|
}
|
||||||
|
MESOSPHERE_ABORT_UNLESS(work_it == pg.end());
|
||||||
|
|
||||||
|
/* We want to use the new page group. */
|
||||||
|
process_pg = std::addressof(workaround_pg);
|
||||||
|
pg.Close();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Initialize the process. */
|
||||||
|
MESOSPHERE_R_ABORT_UNLESS(new_process->Initialize(params, *process_pg, reader.GetCapabilities(), reader.GetNumCapabilities(), std::addressof(Kernel::GetSystemResourceLimit()), dst_pool, reader.IsImmortal()));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Release the memory that was previously reserved. */
|
||||||
|
if (const size_t aligned_bin_size = util::AlignDown(binary_size, PageSize); aligned_bin_size != 0 && src_pool != dst_pool) {
|
||||||
|
Kernel::GetSystemResourceLimit().Release(ams::svc::LimitableResource_PhysicalMemoryMax, aligned_bin_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Set the process's memory permissions. */
|
/* Set the process's memory permissions. */
|
||||||
|
@ -137,14 +255,17 @@ namespace ams::kern {
|
||||||
infos[i].priority = reader.GetPriority();
|
infos[i].priority = reader.GetPriority();
|
||||||
|
|
||||||
/* Advance the reader. */
|
/* Advance the reader. */
|
||||||
current += reader.GetBinarySize();
|
current = data + binary_size;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
constinit KVirtualAddress g_initial_process_binary_address = Null<KVirtualAddress>;
|
/* Release remaining memory used by the image. */
|
||||||
constinit InitialProcessBinaryHeader g_initial_process_binary_header = {};
|
{
|
||||||
constinit u64 g_initial_process_id_min = std::numeric_limits<u64>::max();
|
const size_t remaining_size = util::AlignUp(GetInteger(g_initial_process_binary_address) + g_initial_process_binary_header.size, PageSize) - util::AlignDown(GetInteger(current), PageSize);
|
||||||
constinit u64 g_initial_process_id_max = std::numeric_limits<u64>::min();
|
const size_t remaining_pages = remaining_size / PageSize;
|
||||||
|
Kernel::GetMemoryManager().Close(util::AlignDown(GetInteger(current), PageSize), remaining_pages);
|
||||||
|
Kernel::GetSystemResourceLimit().Release(ams::svc::LimitableResource_PhysicalMemoryMax, remaining_size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -156,32 +277,37 @@ namespace ams::kern {
|
||||||
return g_initial_process_id_max;
|
return g_initial_process_id_max;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t GetInitialProcessesSecureMemorySize() {
|
KVirtualAddress GetInitialProcessBinaryAddress() {
|
||||||
LoadInitialProcessBinaryHeader(&g_initial_process_binary_header);
|
/* Get, validate the pool region. */
|
||||||
|
const auto *pool_region = KMemoryLayout::GetVirtualMemoryRegionTree().FindLastDerived(KMemoryRegionType_VirtualDramUserPool);
|
||||||
return GetProcessesSecureMemorySize(g_initial_process_binary_address != Null<KVirtualAddress> ? g_initial_process_binary_address : GetInitialProcessBinaryAddress(), g_initial_process_binary_header);
|
MESOSPHERE_INIT_ABORT_UNLESS(pool_region != nullptr);
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(pool_region->GetEndAddress() != 0);
|
||||||
|
MESOSPHERE_ABORT_UNLESS(pool_region->GetSize() >= InitialProcessBinarySizeMax);
|
||||||
|
return pool_region->GetEndAddress() - InitialProcessBinarySizeMax;
|
||||||
}
|
}
|
||||||
|
|
||||||
void CopyInitialProcessBinaryToKernelMemory() {
|
size_t GetInitialProcessesSecureMemorySize() {
|
||||||
LoadInitialProcessBinaryHeader(&g_initial_process_binary_header);
|
LoadInitialProcessBinaryHeader();
|
||||||
|
|
||||||
|
return g_initial_process_secure_memory_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t CopyInitialProcessBinaryToKernelMemory() {
|
||||||
|
LoadInitialProcessBinaryHeader();
|
||||||
|
|
||||||
if (g_initial_process_binary_header.num_processes > 0) {
|
if (g_initial_process_binary_header.num_processes > 0) {
|
||||||
/* Reserve pages for the initial process binary from the system resource limit. */
|
/* Reserve pages for the initial process binary from the system resource limit. */
|
||||||
auto &mm = Kernel::GetMemoryManager();
|
|
||||||
const size_t total_size = util::AlignUp(g_initial_process_binary_header.size, PageSize);
|
const size_t total_size = util::AlignUp(g_initial_process_binary_header.size, PageSize);
|
||||||
const size_t num_pages = total_size / PageSize;
|
|
||||||
MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, total_size));
|
MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, total_size));
|
||||||
|
|
||||||
/* Allocate memory for the image. */
|
/* The initial process binary is potentially over-allocated, so free any extra pages. */
|
||||||
const KMemoryManager::Pool pool = static_cast<KMemoryManager::Pool>(KSystemControl::GetCreateProcessMemoryPool());
|
if (total_size < InitialProcessBinarySizeMax) {
|
||||||
const auto allocate_option = KMemoryManager::EncodeOption(pool, KMemoryManager::Direction_FromFront);
|
Kernel::GetMemoryManager().Close(g_initial_process_binary_address + total_size, (InitialProcessBinarySizeMax - total_size) / PageSize);
|
||||||
KVirtualAddress allocated_memory = mm.AllocateAndOpenContinuous(num_pages, 1, allocate_option);
|
}
|
||||||
MESOSPHERE_ABORT_UNLESS(allocated_memory != Null<KVirtualAddress>);
|
|
||||||
|
|
||||||
/* Relocate the image. */
|
return total_size;
|
||||||
std::memmove(GetVoidPointer(allocated_memory), GetVoidPointer(GetInitialProcessBinaryAddress()), g_initial_process_binary_header.size);
|
} else {
|
||||||
std::memset(GetVoidPointer(GetInitialProcessBinaryAddress()), 0, g_initial_process_binary_header.size);
|
return 0;
|
||||||
g_initial_process_binary_address = allocated_memory;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -190,15 +316,7 @@ namespace ams::kern {
|
||||||
InitialProcessInfo *infos = static_cast<InitialProcessInfo *>(__builtin_alloca(sizeof(InitialProcessInfo) * g_initial_process_binary_header.num_processes));
|
InitialProcessInfo *infos = static_cast<InitialProcessInfo *>(__builtin_alloca(sizeof(InitialProcessInfo) * g_initial_process_binary_header.num_processes));
|
||||||
|
|
||||||
/* Create the processes. */
|
/* Create the processes. */
|
||||||
CreateProcesses(infos, g_initial_process_binary_address, g_initial_process_binary_header);
|
CreateProcesses(infos);
|
||||||
|
|
||||||
/* Release the memory used by the image. */
|
|
||||||
{
|
|
||||||
const size_t total_size = util::AlignUp(g_initial_process_binary_header.size, PageSize);
|
|
||||||
const size_t num_pages = total_size / PageSize;
|
|
||||||
Kernel::GetMemoryManager().Close(g_initial_process_binary_address, num_pages);
|
|
||||||
Kernel::GetSystemResourceLimit().Release(ams::svc::LimitableResource_PhysicalMemoryMax, total_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Determine the initial process id range. */
|
/* Determine the initial process id range. */
|
||||||
for (size_t i = 0; i < g_initial_process_binary_header.num_processes; i++) {
|
for (size_t i = 0; i < g_initial_process_binary_header.num_processes; i++) {
|
||||||
|
|
|
@ -77,14 +77,14 @@ namespace ams::kern {
|
||||||
|
|
||||||
Result KInitialProcessReader::MakeCreateProcessParameter(ams::svc::CreateProcessParameter *out, bool enable_aslr) const {
|
Result KInitialProcessReader::MakeCreateProcessParameter(ams::svc::CreateProcessParameter *out, bool enable_aslr) const {
|
||||||
/* Get and validate addresses/sizes. */
|
/* Get and validate addresses/sizes. */
|
||||||
const uintptr_t rx_address = m_kip_header->GetRxAddress();
|
const uintptr_t rx_address = m_kip_header.GetRxAddress();
|
||||||
const size_t rx_size = m_kip_header->GetRxSize();
|
const size_t rx_size = m_kip_header.GetRxSize();
|
||||||
const uintptr_t ro_address = m_kip_header->GetRoAddress();
|
const uintptr_t ro_address = m_kip_header.GetRoAddress();
|
||||||
const size_t ro_size = m_kip_header->GetRoSize();
|
const size_t ro_size = m_kip_header.GetRoSize();
|
||||||
const uintptr_t rw_address = m_kip_header->GetRwAddress();
|
const uintptr_t rw_address = m_kip_header.GetRwAddress();
|
||||||
const size_t rw_size = m_kip_header->GetRwSize();
|
const size_t rw_size = m_kip_header.GetRwSize();
|
||||||
const uintptr_t bss_address = m_kip_header->GetBssAddress();
|
const uintptr_t bss_address = m_kip_header.GetBssAddress();
|
||||||
const size_t bss_size = m_kip_header->GetBssSize();
|
const size_t bss_size = m_kip_header.GetBssSize();
|
||||||
R_UNLESS(util::IsAligned(rx_address, PageSize), svc::ResultInvalidAddress());
|
R_UNLESS(util::IsAligned(rx_address, PageSize), svc::ResultInvalidAddress());
|
||||||
R_UNLESS(util::IsAligned(ro_address, PageSize), svc::ResultInvalidAddress());
|
R_UNLESS(util::IsAligned(ro_address, PageSize), svc::ResultInvalidAddress());
|
||||||
R_UNLESS(util::IsAligned(rw_address, PageSize), svc::ResultInvalidAddress());
|
R_UNLESS(util::IsAligned(rw_address, PageSize), svc::ResultInvalidAddress());
|
||||||
|
@ -115,13 +115,13 @@ namespace ams::kern {
|
||||||
/* Set fields in parameter. */
|
/* Set fields in parameter. */
|
||||||
out->code_address = map_start + start_address;
|
out->code_address = map_start + start_address;
|
||||||
out->code_num_pages = util::AlignUp(end_address - start_address, PageSize) / PageSize;
|
out->code_num_pages = util::AlignUp(end_address - start_address, PageSize) / PageSize;
|
||||||
out->program_id = m_kip_header->GetProgramId();
|
out->program_id = m_kip_header.GetProgramId();
|
||||||
out->version = m_kip_header->GetVersion();
|
out->version = m_kip_header.GetVersion();
|
||||||
out->flags = 0;
|
out->flags = 0;
|
||||||
MESOSPHERE_ABORT_UNLESS((out->code_address / PageSize) + out->code_num_pages <= (map_end / PageSize));
|
MESOSPHERE_ABORT_UNLESS((out->code_address / PageSize) + out->code_num_pages <= (map_end / PageSize));
|
||||||
|
|
||||||
/* Copy name field. */
|
/* Copy name field. */
|
||||||
m_kip_header->GetName(out->name, sizeof(out->name));
|
m_kip_header.GetName(out->name, sizeof(out->name));
|
||||||
|
|
||||||
/* Apply ASLR, if needed. */
|
/* Apply ASLR, if needed. */
|
||||||
if (enable_aslr) {
|
if (enable_aslr) {
|
||||||
|
@ -146,39 +146,36 @@ namespace ams::kern {
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KInitialProcessReader::Load(KProcessAddress address, const ams::svc::CreateProcessParameter ¶ms) const {
|
Result KInitialProcessReader::Load(KProcessAddress address, const ams::svc::CreateProcessParameter ¶ms, KProcessAddress src) const {
|
||||||
/* Clear memory at the address. */
|
|
||||||
std::memset(GetVoidPointer(address), 0, params.code_num_pages * PageSize);
|
|
||||||
|
|
||||||
/* Prepare to layout the data. */
|
/* Prepare to layout the data. */
|
||||||
const KProcessAddress rx_address = address + m_kip_header->GetRxAddress();
|
const KProcessAddress rx_address = address + m_kip_header.GetRxAddress();
|
||||||
const KProcessAddress ro_address = address + m_kip_header->GetRoAddress();
|
const KProcessAddress ro_address = address + m_kip_header.GetRoAddress();
|
||||||
const KProcessAddress rw_address = address + m_kip_header->GetRwAddress();
|
const KProcessAddress rw_address = address + m_kip_header.GetRwAddress();
|
||||||
const u8 *rx_binary = reinterpret_cast<const u8 *>(m_kip_header + 1);
|
const u8 *rx_binary = GetPointer<const u8>(src);
|
||||||
const u8 *ro_binary = rx_binary + m_kip_header->GetRxCompressedSize();
|
const u8 *ro_binary = rx_binary + m_kip_header.GetRxCompressedSize();
|
||||||
const u8 *rw_binary = ro_binary + m_kip_header->GetRoCompressedSize();
|
const u8 *rw_binary = ro_binary + m_kip_header.GetRoCompressedSize();
|
||||||
|
|
||||||
/* Copy text. */
|
/* Copy text. */
|
||||||
if (util::AlignUp(m_kip_header->GetRxSize(), PageSize)) {
|
if (util::AlignUp(m_kip_header.GetRxSize(), PageSize)) {
|
||||||
std::memcpy(GetVoidPointer(rx_address), rx_binary, m_kip_header->GetRxCompressedSize());
|
std::memmove(GetVoidPointer(rx_address), rx_binary, m_kip_header.GetRxCompressedSize());
|
||||||
if (m_kip_header->IsRxCompressed()) {
|
if (m_kip_header.IsRxCompressed()) {
|
||||||
BlzUncompress(GetVoidPointer(rx_address + m_kip_header->GetRxCompressedSize()));
|
BlzUncompress(GetVoidPointer(rx_address + m_kip_header.GetRxCompressedSize()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Copy rodata. */
|
/* Copy rodata. */
|
||||||
if (util::AlignUp(m_kip_header->GetRoSize(), PageSize)) {
|
if (util::AlignUp(m_kip_header.GetRoSize(), PageSize)) {
|
||||||
std::memcpy(GetVoidPointer(ro_address), ro_binary, m_kip_header->GetRoCompressedSize());
|
std::memmove(GetVoidPointer(ro_address), ro_binary, m_kip_header.GetRoCompressedSize());
|
||||||
if (m_kip_header->IsRoCompressed()) {
|
if (m_kip_header.IsRoCompressed()) {
|
||||||
BlzUncompress(GetVoidPointer(ro_address + m_kip_header->GetRoCompressedSize()));
|
BlzUncompress(GetVoidPointer(ro_address + m_kip_header.GetRoCompressedSize()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Copy rwdata. */
|
/* Copy rwdata. */
|
||||||
if (util::AlignUp(m_kip_header->GetRwSize(), PageSize)) {
|
if (util::AlignUp(m_kip_header.GetRwSize(), PageSize)) {
|
||||||
std::memcpy(GetVoidPointer(rw_address), rw_binary, m_kip_header->GetRwCompressedSize());
|
std::memmove(GetVoidPointer(rw_address), rw_binary, m_kip_header.GetRwCompressedSize());
|
||||||
if (m_kip_header->IsRwCompressed()) {
|
if (m_kip_header.IsRwCompressed()) {
|
||||||
BlzUncompress(GetVoidPointer(rw_address + m_kip_header->GetRwCompressedSize()));
|
BlzUncompress(GetVoidPointer(rw_address + m_kip_header.GetRwCompressedSize()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -192,27 +189,27 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KInitialProcessReader::SetMemoryPermissions(KProcessPageTable &page_table, const ams::svc::CreateProcessParameter ¶ms) const {
|
Result KInitialProcessReader::SetMemoryPermissions(KProcessPageTable &page_table, const ams::svc::CreateProcessParameter ¶ms) const {
|
||||||
const size_t rx_size = m_kip_header->GetRxSize();
|
const size_t rx_size = m_kip_header.GetRxSize();
|
||||||
const size_t ro_size = m_kip_header->GetRoSize();
|
const size_t ro_size = m_kip_header.GetRoSize();
|
||||||
const size_t rw_size = m_kip_header->GetRwSize();
|
const size_t rw_size = m_kip_header.GetRwSize();
|
||||||
const size_t bss_size = m_kip_header->GetBssSize();
|
const size_t bss_size = m_kip_header.GetBssSize();
|
||||||
|
|
||||||
/* Set R-X pages. */
|
/* Set R-X pages. */
|
||||||
if (rx_size) {
|
if (rx_size) {
|
||||||
const uintptr_t start = m_kip_header->GetRxAddress() + params.code_address;
|
const uintptr_t start = m_kip_header.GetRxAddress() + params.code_address;
|
||||||
R_TRY(page_table.SetProcessMemoryPermission(start, util::AlignUp(rx_size, PageSize), ams::svc::MemoryPermission_ReadExecute));
|
R_TRY(page_table.SetProcessMemoryPermission(start, util::AlignUp(rx_size, PageSize), ams::svc::MemoryPermission_ReadExecute));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Set R-- pages. */
|
/* Set R-- pages. */
|
||||||
if (ro_size) {
|
if (ro_size) {
|
||||||
const uintptr_t start = m_kip_header->GetRoAddress() + params.code_address;
|
const uintptr_t start = m_kip_header.GetRoAddress() + params.code_address;
|
||||||
R_TRY(page_table.SetProcessMemoryPermission(start, util::AlignUp(ro_size, PageSize), ams::svc::MemoryPermission_Read));
|
R_TRY(page_table.SetProcessMemoryPermission(start, util::AlignUp(ro_size, PageSize), ams::svc::MemoryPermission_Read));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Set RW- pages. */
|
/* Set RW- pages. */
|
||||||
if (rw_size || bss_size) {
|
if (rw_size || bss_size) {
|
||||||
const uintptr_t start = (rw_size ? m_kip_header->GetRwAddress() : m_kip_header->GetBssAddress()) + params.code_address;
|
const uintptr_t start = (rw_size ? m_kip_header.GetRwAddress() : m_kip_header.GetBssAddress()) + params.code_address;
|
||||||
const uintptr_t end = (bss_size ? m_kip_header->GetBssAddress() + bss_size : m_kip_header->GetRwAddress() + rw_size) + params.code_address;
|
const uintptr_t end = (bss_size ? m_kip_header.GetBssAddress() + bss_size : m_kip_header.GetRwAddress() + rw_size) + params.code_address;
|
||||||
R_TRY(page_table.SetProcessMemoryPermission(start, util::AlignUp(end - start, PageSize), ams::svc::MemoryPermission_ReadWrite));
|
R_TRY(page_table.SetProcessMemoryPermission(start, util::AlignUp(end - start, PageSize), ams::svc::MemoryPermission_ReadWrite));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -100,19 +100,48 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Free each region to its corresponding heap. */
|
/* Free each region to its corresponding heap. */
|
||||||
|
size_t reserved_sizes[MaxManagerCount] = {};
|
||||||
|
const uintptr_t ini_start = GetInteger(GetInitialProcessBinaryAddress());
|
||||||
|
const uintptr_t ini_end = ini_start + InitialProcessBinarySizeMax;
|
||||||
|
const uintptr_t ini_last = ini_end - 1;
|
||||||
for (const auto &it : KMemoryLayout::GetVirtualMemoryRegionTree()) {
|
for (const auto &it : KMemoryLayout::GetVirtualMemoryRegionTree()) {
|
||||||
if (it.IsDerivedFrom(KMemoryRegionType_VirtualDramUserPool)) {
|
if (it.IsDerivedFrom(KMemoryRegionType_VirtualDramUserPool)) {
|
||||||
/* Check the region. */
|
/* Get the manager for the region. */
|
||||||
|
auto &manager = m_managers[it.GetAttributes()];
|
||||||
|
|
||||||
|
if (it.GetAddress() <= ini_start && ini_last <= it.GetLastAddress()) {
|
||||||
|
/* Free memory before the ini to the heap. */
|
||||||
|
if (it.GetAddress() != ini_start) {
|
||||||
|
manager.Free(it.GetAddress(), (ini_start - it.GetAddress()) / PageSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Open/reserve the ini memory. */
|
||||||
|
manager.OpenFirst(ini_start, InitialProcessBinarySizeMax / PageSize);
|
||||||
|
reserved_sizes[it.GetAttributes()] += InitialProcessBinarySizeMax;
|
||||||
|
|
||||||
|
/* Free memory after the ini to the heap. */
|
||||||
|
if (ini_last != it.GetLastAddress()) {
|
||||||
MESOSPHERE_ABORT_UNLESS(it.GetEndAddress() != 0);
|
MESOSPHERE_ABORT_UNLESS(it.GetEndAddress() != 0);
|
||||||
|
manager.Free(ini_end, it.GetEndAddress() - ini_end);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
/* Ensure there's no partial overlap with the ini image. */
|
||||||
|
if (it.GetAddress() <= ini_last) {
|
||||||
|
MESOSPHERE_ABORT_UNLESS(it.GetLastAddress() < ini_start);
|
||||||
|
} else {
|
||||||
|
/* Otherwise, check the region for general validity. */
|
||||||
|
MESOSPHERE_ABORT_UNLESS(it.GetEndAddress() != 0);
|
||||||
|
}
|
||||||
|
|
||||||
/* Free the memory to the heap. */
|
/* Free the memory to the heap. */
|
||||||
m_managers[it.GetAttributes()].Free(it.GetAddress(), it.GetSize() / PageSize);
|
manager.Free(it.GetAddress(), it.GetSize() / PageSize);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Update the used size for all managers. */
|
/* Update the used size for all managers. */
|
||||||
for (size_t i = 0; i < m_num_managers; ++i) {
|
for (size_t i = 0; i < m_num_managers; ++i) {
|
||||||
m_managers[i].UpdateUsedHeapSize();
|
m_managers[i].SetInitialUsedHeapSize(reserved_sizes[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -362,10 +362,9 @@ namespace ams::kern::init {
|
||||||
/* NOTE: Nintendo does this only on 10.0.0+ */
|
/* NOTE: Nintendo does this only on 10.0.0+ */
|
||||||
init_pt.PhysicallyRandomize(slab_region_start, slab_region_size, false);
|
init_pt.PhysicallyRandomize(slab_region_start, slab_region_size, false);
|
||||||
|
|
||||||
/* Determine size available for kernel page table heaps, requiring > 8 MB. */
|
/* Determine size available for kernel page table heaps. */
|
||||||
const KPhysicalAddress resource_end_phys_addr = slab_start_phys_addr + resource_region_size;
|
const KPhysicalAddress resource_end_phys_addr = slab_start_phys_addr + resource_region_size;
|
||||||
const size_t page_table_heap_size = GetInteger(resource_end_phys_addr) - GetInteger(slab_end_phys_addr);
|
const size_t page_table_heap_size = GetInteger(resource_end_phys_addr) - GetInteger(slab_end_phys_addr);
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(page_table_heap_size / 4_MB > 2);
|
|
||||||
|
|
||||||
/* Insert a physical region for the kernel page table heap region */
|
/* Insert a physical region for the kernel page table heap region */
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(slab_end_phys_addr), page_table_heap_size, KMemoryRegionType_DramKernelPtHeap));
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(slab_end_phys_addr), page_table_heap_size, KMemoryRegionType_DramKernelPtHeap));
|
||||||
|
|
|
@ -53,7 +53,7 @@ __metadata_kernel_layout:
|
||||||
.word __bss_start__ - _start /* rw_end_offset */
|
.word __bss_start__ - _start /* rw_end_offset */
|
||||||
.word __bss_start__ - _start /* bss_offset */
|
.word __bss_start__ - _start /* bss_offset */
|
||||||
.word __bss_end__ - _start /* bss_end_offset */
|
.word __bss_end__ - _start /* bss_end_offset */
|
||||||
.word __end__ - _start /* ini_load_offset */
|
.word __end__ - _start /* resource_offset */
|
||||||
.word _DYNAMIC - _start /* dynamic_offset */
|
.word _DYNAMIC - _start /* dynamic_offset */
|
||||||
.word __init_array_start - _start /* init_array_offset */
|
.word __init_array_start - _start /* init_array_offset */
|
||||||
.word __init_array_end - _start /* init_array_end_offset */
|
.word __init_array_end - _start /* init_array_end_offset */
|
||||||
|
|
|
@ -172,7 +172,7 @@ namespace ams::kern::init::loader {
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(rw_offset, PageSize));
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(rw_offset, PageSize));
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(bss_end_offset, PageSize));
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(bss_end_offset, PageSize));
|
||||||
const uintptr_t bss_offset = layout->bss_offset;
|
const uintptr_t bss_offset = layout->bss_offset;
|
||||||
const uintptr_t ini_load_offset = layout->ini_load_offset;
|
const uintptr_t resource_offset = layout->resource_offset;
|
||||||
const uintptr_t dynamic_offset = layout->dynamic_offset;
|
const uintptr_t dynamic_offset = layout->dynamic_offset;
|
||||||
const uintptr_t init_array_offset = layout->init_array_offset;
|
const uintptr_t init_array_offset = layout->init_array_offset;
|
||||||
const uintptr_t init_array_end_offset = layout->init_array_end_offset;
|
const uintptr_t init_array_end_offset = layout->init_array_end_offset;
|
||||||
|
@ -181,8 +181,8 @@ namespace ams::kern::init::loader {
|
||||||
const size_t resource_region_size = KMemoryLayout::GetResourceRegionSizeForInit();
|
const size_t resource_region_size = KMemoryLayout::GetResourceRegionSizeForInit();
|
||||||
|
|
||||||
/* Setup the INI1 header in memory for the kernel. */
|
/* Setup the INI1 header in memory for the kernel. */
|
||||||
const uintptr_t ini_end_address = base_address + ini_load_offset + resource_region_size;
|
const uintptr_t resource_end_address = base_address + resource_offset + resource_region_size;
|
||||||
const uintptr_t ini_load_address = ini_end_address - InitialProcessBinarySizeMax;
|
const uintptr_t ini_load_address = GetInteger(KSystemControl::Init::GetInitialProcessBinaryPhysicalAddress());
|
||||||
if (ini_base_address != ini_load_address) {
|
if (ini_base_address != ini_load_address) {
|
||||||
/* The INI is not at the correct address, so we need to relocate it. */
|
/* The INI is not at the correct address, so we need to relocate it. */
|
||||||
const InitialProcessBinaryHeader *ini_header = reinterpret_cast<const InitialProcessBinaryHeader *>(ini_base_address);
|
const InitialProcessBinaryHeader *ini_header = reinterpret_cast<const InitialProcessBinaryHeader *>(ini_base_address);
|
||||||
|
@ -195,14 +195,14 @@ namespace ams::kern::init::loader {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* We want to start allocating page tables at ini_end_address. */
|
/* We want to start allocating page tables at the end of the resource region. */
|
||||||
g_initial_page_allocator.Initialize(ini_end_address);
|
g_initial_page_allocator.Initialize(resource_end_address);
|
||||||
|
|
||||||
/* Make a new page table for TTBR1_EL1. */
|
/* Make a new page table for TTBR1_EL1. */
|
||||||
KInitialPageTable init_pt(KernelBaseRangeStart, KernelBaseRangeLast, g_initial_page_allocator);
|
KInitialPageTable init_pt(KernelBaseRangeStart, KernelBaseRangeLast, g_initial_page_allocator);
|
||||||
|
|
||||||
/* Setup initial identity mapping. TTBR1 table passed by reference. */
|
/* Setup initial identity mapping. TTBR1 table passed by reference. */
|
||||||
SetupInitialIdentityMapping(init_pt, base_address, bss_end_offset, ini_end_address, InitialPageTableRegionSizeMax, g_initial_page_allocator);
|
SetupInitialIdentityMapping(init_pt, base_address, bss_end_offset, resource_end_address, InitialPageTableRegionSizeMax, g_initial_page_allocator);
|
||||||
|
|
||||||
/* Generate a random slide for the kernel's base address. */
|
/* Generate a random slide for the kernel's base address. */
|
||||||
const KVirtualAddress virtual_base_address = GetRandomKernelBaseAddress(init_pt, base_address, bss_end_offset);
|
const KVirtualAddress virtual_base_address = GetRandomKernelBaseAddress(init_pt, base_address, bss_end_offset);
|
||||||
|
|
Loading…
Reference in a new issue