mirror of
https://github.com/Atmosphere-NX/Atmosphere
synced 2024-11-09 22:56:35 +00:00
kern: implement dynamic slab init + ini relocation
This commit is contained in:
parent
d9e6771e63
commit
cb6af379d8
20 changed files with 851 additions and 22 deletions
|
@ -49,6 +49,7 @@
|
|||
#include <mesosphere/kern_k_slab_heap.hpp>
|
||||
#include <mesosphere/kern_k_light_lock.hpp>
|
||||
#include <mesosphere/kern_kernel.hpp>
|
||||
#include <mesosphere/kern_k_page_table_manager.hpp>
|
||||
|
||||
/* Auto Objects. */
|
||||
#include <mesosphere/kern_k_auto_object.hpp>
|
||||
|
|
|
@ -47,10 +47,10 @@ namespace ams::kern::arm64 {
|
|||
constexpr KGlobalInterruptEntry() : handler(nullptr), manually_cleared(false), needs_clear(false) { /* ... */ }
|
||||
};
|
||||
private:
|
||||
static inline KSpinLock s_lock;
|
||||
static inline KGlobalInterruptEntry s_global_interrupts[KInterruptController::NumGlobalInterrupts];
|
||||
static inline KInterruptController::GlobalState s_global_state;
|
||||
static inline bool s_global_state_saved;
|
||||
static KSpinLock s_lock;
|
||||
static std::array<KGlobalInterruptEntry, KInterruptController::NumGlobalInterrupts> s_global_interrupts;
|
||||
static KInterruptController::GlobalState s_global_state;
|
||||
static bool s_global_state_saved;
|
||||
private:
|
||||
KCoreLocalInterruptEntry core_local_interrupts[KInterruptController::NumLocalInterrupts];
|
||||
KInterruptController interrupt_controller;
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <vapours.hpp>
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
|
@ -38,6 +38,7 @@ namespace ams::kern {
|
|||
public:
|
||||
/* Initialization. */
|
||||
static NOINLINE void Initialize();
|
||||
static NOINLINE u32 GetInitialProcessBinaryPool();
|
||||
|
||||
/* Randomness. */
|
||||
static void GenerateRandomBytes(void *dst, size_t size);
|
||||
|
|
|
@ -28,4 +28,9 @@ namespace ams::kern {
|
|||
u32 reserved;
|
||||
};
|
||||
|
||||
NOINLINE void CopyInitialProcessBinaryToKernelMemory();
|
||||
|
||||
u64 GetInitialProcessIdMin();
|
||||
u64 GetInitialProcessIdMax();
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,130 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_slab_heap.hpp>
|
||||
#include <mesosphere/kern_k_page_group.hpp>
|
||||
#include <mesosphere/kern_k_memory_block.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
namespace impl {
|
||||
|
||||
class DynamicSlabHeapPage {
|
||||
private:
|
||||
u8 buffer[PageSize];
|
||||
};
|
||||
static_assert(sizeof(DynamicSlabHeapPage) == PageSize);
|
||||
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
class KDynamicSlabHeap {
|
||||
NON_COPYABLE(KDynamicSlabHeap);
|
||||
NON_MOVEABLE(KDynamicSlabHeap);
|
||||
private:
|
||||
using Impl = impl::KSlabHeapImpl;
|
||||
using PageBuffer = impl::DynamicSlabHeapPage;
|
||||
private:
|
||||
Impl impl;
|
||||
KDynamicSlabHeap<PageBuffer> *next_allocator;
|
||||
std::atomic<size_t> used;
|
||||
std::atomic<size_t> peak;
|
||||
std::atomic<size_t> count;
|
||||
KVirtualAddress address;
|
||||
size_t size;
|
||||
private:
|
||||
ALWAYS_INLINE Impl *GetImpl() {
|
||||
return std::addressof(this->impl);
|
||||
}
|
||||
ALWAYS_INLINE const Impl *GetImpl() const {
|
||||
return std::addressof(this->impl);
|
||||
}
|
||||
public:
|
||||
constexpr KDynamicSlabHeap() : impl(), next_allocator(), used(), peak(), count(), address(), size() { /* ... */ }
|
||||
|
||||
constexpr KVirtualAddress GetAddress() const { return this->address; }
|
||||
constexpr size_t GetSize() const { return this->size; }
|
||||
constexpr size_t GetUsed() const { return this->used; }
|
||||
constexpr size_t GetPeak() const { return this->peak; }
|
||||
constexpr size_t GetCount() const { return this->count; }
|
||||
|
||||
constexpr bool IsInRange(KVirtualAddress addr) const {
|
||||
return this->GetAddress() <= addr && addr <= this->GetAddress() + this->GetSize() - 1;
|
||||
}
|
||||
|
||||
void Initialize(KVirtualAddress memory, size_t sz) {
|
||||
/* Set tracking fields. */
|
||||
this->address = memory;
|
||||
this->count = sz / sizeof(T);
|
||||
this->size = this->count * sizeof(T);
|
||||
|
||||
/* Free blocks to memory. */
|
||||
u8 *cur = GetPointer<u8>(this->address + this->size);
|
||||
for (size_t i = 0; i < this->count; i++) {
|
||||
cur -= sizeof(T);
|
||||
this->GetImpl()->Free(cur);
|
||||
}
|
||||
}
|
||||
|
||||
void Initialize(KDynamicSlabHeap<PageBuffer> *next) {
|
||||
this->next_allocator = next;
|
||||
this->address = next->GetAddress();
|
||||
this->size = next->GetSize();
|
||||
}
|
||||
|
||||
T *Allocate() {
|
||||
T *allocated = reinterpret_cast<T *>(this->GetImpl()->Allocate());
|
||||
|
||||
/* If we fail to allocate, try to get a new page from our next allocator. */
|
||||
if (AMS_UNLIKELY(allocated == nullptr)) {
|
||||
if (this->next_allocator != nullptr) {
|
||||
allocated = reinterpret_cast<T *>(this->next_allocator->Allocate());
|
||||
if (allocated != nullptr) {
|
||||
/* If we succeeded in getting a page, free the rest to our slab. */
|
||||
for (size_t i = 1; i < sizeof(PageBuffer) / sizeof(T); i++) {
|
||||
this->GetImpl()->Free(allocated + i);
|
||||
}
|
||||
this->count += sizeof(PageBuffer) / sizeof(T);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Update our tracking. */
|
||||
if (AMS_LIKELY(allocated != nullptr)) {
|
||||
size_t used = ++this->used;
|
||||
size_t peak = this->peak;
|
||||
while (peak < used) {
|
||||
if (this->peak.compare_exchange_weak(peak, used, std::memory_order_relaxed)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return allocated;
|
||||
}
|
||||
|
||||
void Free(T *t) {
|
||||
this->GetImpl()->Free(t);
|
||||
--this->used;
|
||||
}
|
||||
};
|
||||
|
||||
class KDynamicPageManager : public KDynamicSlabHeap<impl::DynamicSlabHeapPage>{};
|
||||
class KBlockInfoManager : public KDynamicSlabHeap<KBlockInfo>{};
|
||||
class KMemoryBlockSlabManager : public KDynamicSlabHeap<KMemoryBlock>{};
|
||||
|
||||
}
|
|
@ -0,0 +1,187 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_typed_address.hpp>
|
||||
#include <mesosphere/kern_slab_helpers.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
enum KMemoryState : u32 {
|
||||
KMemoryState_None = 0,
|
||||
KMemoryState_Mask = 0xFF,
|
||||
|
||||
KMemoryState_FlagCanReprotect = (1 << 8),
|
||||
KMemoryState_FlagCanDebug = (1 << 9),
|
||||
KMemoryState_FlagCanUseIpc = (1 << 10),
|
||||
KMemoryState_FlagCanUseNonDeviceIpc = (1 << 11),
|
||||
KMemoryState_FlagCanUseNonSecureIpc = (1 << 12),
|
||||
KMemoryState_FlagMapped = (1 << 13),
|
||||
KMemoryState_FlagCode = (1 << 14),
|
||||
KMemoryState_FlagCanAlias = (1 << 15),
|
||||
KMemoryState_FlagCanCodeAlias = (1 << 16),
|
||||
KMemoryState_FlagCanTransfer = (1 << 17),
|
||||
KMemoryState_FlagCanQueryPhysical = (1 << 18),
|
||||
KMemoryState_FlagCanDeviceMap = (1 << 19),
|
||||
KMemoryState_FlagCanAlignedDeviceMap = (1 << 20),
|
||||
KMemoryState_FlagCanIpcUserBuffer = (1 << 21),
|
||||
KMemoryState_FlagReferenceCounted = (1 << 22),
|
||||
KMemoryState_FlagCanMapProcess = (1 << 23),
|
||||
KMemoryState_FlagCanChangeAttribute = (1 << 24),
|
||||
KMemoryState_FlagCanCodeMemory = (1 << 25),
|
||||
|
||||
KMemoryState_FlagsData = KMemoryState_FlagCanReprotect | KMemoryState_FlagCanUseIpc |
|
||||
KMemoryState_FlagCanUseNonDeviceIpc | KMemoryState_FlagCanUseNonSecureIpc |
|
||||
KMemoryState_FlagMapped | KMemoryState_FlagCanAlias |
|
||||
KMemoryState_FlagCanTransfer | KMemoryState_FlagCanQueryPhysical |
|
||||
KMemoryState_FlagCanDeviceMap | KMemoryState_FlagCanAlignedDeviceMap |
|
||||
KMemoryState_FlagCanIpcUserBuffer | KMemoryState_FlagReferenceCounted |
|
||||
KMemoryState_FlagCanChangeAttribute,
|
||||
|
||||
KMemoryState_FlagsCode = KMemoryState_FlagCanDebug | KMemoryState_FlagCanUseIpc |
|
||||
KMemoryState_FlagCanUseNonDeviceIpc | KMemoryState_FlagCanUseNonSecureIpc |
|
||||
KMemoryState_FlagMapped | KMemoryState_FlagCode |
|
||||
KMemoryState_FlagCanQueryPhysical | KMemoryState_FlagCanDeviceMap |
|
||||
KMemoryState_FlagCanAlignedDeviceMap | KMemoryState_FlagReferenceCounted,
|
||||
|
||||
KMemoryState_FlagsMisc = KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted |
|
||||
KMemoryState_FlagCanQueryPhysical | KMemoryState_FlagCanDeviceMap,
|
||||
|
||||
|
||||
KMemoryState_Free = ams::svc::MemoryState_Free,
|
||||
KMemoryState_Io = ams::svc::MemoryState_Io | KMemoryState_FlagMapped,
|
||||
KMemoryState_Static = ams::svc::MemoryState_Static | KMemoryState_FlagMapped | KMemoryState_FlagCanQueryPhysical,
|
||||
KMemoryState_Code = ams::svc::MemoryState_Code | KMemoryState_FlagsCode | KMemoryState_FlagCanMapProcess,
|
||||
KMemoryState_CodeData = ams::svc::MemoryState_CodeData | KMemoryState_FlagsData | KMemoryState_FlagCanMapProcess | KMemoryState_FlagCanCodeMemory,
|
||||
KMemoryState_Normal = ams::svc::MemoryState_Normal | KMemoryState_FlagsData | KMemoryState_FlagCanCodeMemory,
|
||||
KMemoryState_Shared = ams::svc::MemoryState_Shared | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted,
|
||||
|
||||
/* KMemoryState_Alias was removed after 1.0.0. */
|
||||
|
||||
KMemoryState_AliasCode = ams::svc::MemoryState_AliasCode | KMemoryState_FlagsCode | KMemoryState_FlagCanMapProcess | KMemoryState_FlagCanCodeAlias,
|
||||
KMemoryState_AliasCodeData = ams::svc::MemoryState_AliasCodeData | KMemoryState_FlagsData | KMemoryState_FlagCanMapProcess | KMemoryState_FlagCanCodeAlias | KMemoryState_FlagCanCodeMemory,
|
||||
|
||||
|
||||
KMemoryState_Ipc = ams::svc::MemoryState_Ipc | KMemoryState_FlagsMisc | KMemoryState_FlagCanAlignedDeviceMap
|
||||
| KMemoryState_FlagCanUseIpc | KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
|
||||
|
||||
KMemoryState_Stack = ams::svc::MemoryState_Stack | KMemoryState_FlagsMisc | KMemoryState_FlagCanAlignedDeviceMap
|
||||
| KMemoryState_FlagCanUseIpc | KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
|
||||
|
||||
KMemoryState_ThreadLocal = ams::svc::MemoryState_ThreadLocal | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted,
|
||||
|
||||
KMemoryState_Transfered = ams::svc::MemoryState_Transfered | KMemoryState_FlagsMisc | KMemoryState_FlagCanAlignedDeviceMap | KMemoryState_FlagCanChangeAttribute
|
||||
| KMemoryState_FlagCanUseIpc | KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
|
||||
|
||||
KMemoryState_SharedTransfered = ams::svc::MemoryState_SharedTransfered | KMemoryState_FlagsMisc | KMemoryState_FlagCanAlignedDeviceMap
|
||||
| KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
|
||||
|
||||
KMemoryState_SharedCode = ams::svc::MemoryState_SharedCode | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted
|
||||
| KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
|
||||
|
||||
KMemoryState_Inaccessible = ams::svc::MemoryState_Inaccessible,
|
||||
|
||||
KMemoryState_NonSecureIpc = ams::svc::MemoryState_NonSecureIpc | KMemoryState_FlagsMisc | KMemoryState_FlagCanAlignedDeviceMap
|
||||
| KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
|
||||
|
||||
KMemoryState_NonDeviceIpc = ams::svc::MemoryState_NonDeviceIpc | KMemoryState_FlagsMisc | KMemoryState_FlagCanUseNonDeviceIpc,
|
||||
|
||||
|
||||
KMemoryState_Kernel = ams::svc::MemoryState_Kernel | KMemoryState_FlagMapped,
|
||||
|
||||
KMemoryState_GeneratedCode = ams::svc::MemoryState_GeneratedCode | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted | KMemoryState_FlagCanDebug,
|
||||
KMemoryState_CodeOut = ams::svc::MemoryState_CodeOut | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted,
|
||||
};
|
||||
|
||||
#if 1
|
||||
static_assert(KMemoryState_Free == 0x00000000);
|
||||
static_assert(KMemoryState_Io == 0x00002001);
|
||||
static_assert(KMemoryState_Static == 0x00042002);
|
||||
static_assert(KMemoryState_Code == 0x00DC7E03);
|
||||
static_assert(KMemoryState_CodeData == 0x03FEBD04);
|
||||
static_assert(KMemoryState_Normal == 0x037EBD05);
|
||||
static_assert(KMemoryState_Shared == 0x00402006);
|
||||
|
||||
static_assert(KMemoryState_AliasCode == 0x00DD7E08);
|
||||
static_assert(KMemoryState_AliasCodeData == 0x03FFBD09);
|
||||
static_assert(KMemoryState_Ipc == 0x005C3C0A);
|
||||
static_assert(KMemoryState_Stack == 0x005C3C0B);
|
||||
static_assert(KMemoryState_ThreadLocal == 0x0040200C);
|
||||
static_assert(KMemoryState_Transfered == 0x015C3C0D);
|
||||
static_assert(KMemoryState_SharedTransfered == 0x005C380E);
|
||||
static_assert(KMemoryState_SharedCode == 0x0040380F);
|
||||
static_assert(KMemoryState_Inaccessible == 0x00000010);
|
||||
static_assert(KMemoryState_NonSecureIpc == 0x005C3811);
|
||||
static_assert(KMemoryState_NonDeviceIpc == 0x004C2812);
|
||||
static_assert(KMemoryState_Kernel == 0x00002013);
|
||||
static_assert(KMemoryState_GeneratedCode == 0x00402214);
|
||||
static_assert(KMemoryState_CodeOut == 0x00402015);
|
||||
#endif
|
||||
|
||||
enum KMemoryPermission : u8 {
|
||||
KMemoryPermission_None = 0,
|
||||
|
||||
KMemoryPermission_UserRead = ams::svc::MemoryPermission_Read,
|
||||
KMemoryPermission_UserWrite = ams::svc::MemoryPermission_Write,
|
||||
KMemoryPermission_UserExecute = ams::svc::MemoryPermission_Execute,
|
||||
|
||||
KMemoryPermission_UserReadWrite = ams::svc::MemoryPermission_ReadWrite,
|
||||
KMemoryPermission_UserReadExecute = ams::svc::MemoryPermission_ReadExecute,
|
||||
|
||||
KMemoryPermission_UserMask = KMemoryPermission_UserRead | KMemoryPermission_UserWrite | KMemoryPermission_UserExecute,
|
||||
|
||||
KMemoryPermission_KernelShift = 3,
|
||||
|
||||
KMemoryPermission_KernelRead = KMemoryPermission_UserRead << KMemoryPermission_KernelShift,
|
||||
KMemoryPermission_KernelWrite = KMemoryPermission_UserWrite << KMemoryPermission_KernelShift,
|
||||
KMemoryPermission_KernelExecute = KMemoryPermission_UserExecute << KMemoryPermission_KernelShift,
|
||||
|
||||
KMemoryPermission_KernelReadWrite = KMemoryPermission_KernelRead | KMemoryPermission_KernelWrite,
|
||||
KMemoryPermission_KernelReadExecute = KMemoryPermission_KernelRead | KMemoryPermission_KernelExecute,
|
||||
};
|
||||
|
||||
constexpr KMemoryPermission ConvertToKMemoryPermission(ams::svc::MemoryPermission perm) {
|
||||
return static_cast<KMemoryPermission>((perm & KMemoryPermission_UserMask) | KMemoryPermission_KernelRead | ((perm & KMemoryPermission_UserWrite) << KMemoryPermission_KernelShift));
|
||||
}
|
||||
|
||||
enum KMemoryAttribute : u8 {
|
||||
KMemoryAttribute_None = 0x00,
|
||||
|
||||
KMemoryAttribute_Locked = ams::svc::MemoryAttribute_Locked,
|
||||
KMemoryAttribute_IpcLocked = ams::svc::MemoryAttribute_IpcLocked,
|
||||
KMemoryAttribute_DeviceShared = ams::svc::MemoryAttribute_DeviceShared,
|
||||
KMemoryAttribute_Uncached = ams::svc::MemoryAttribute_Uncached,
|
||||
};
|
||||
|
||||
class KMemoryBlock : public util::IntrusiveRedBlackTreeBaseNode<KMemoryBlock> {
|
||||
private:
|
||||
KProcessAddress address;
|
||||
size_t num_pages;
|
||||
KMemoryState memory_state;
|
||||
u16 ipc_lock_count;
|
||||
u16 device_use_count;
|
||||
KMemoryPermission perm;
|
||||
KMemoryPermission original_perm;
|
||||
KMemoryAttribute attribute;
|
||||
public:
|
||||
constexpr KMemoryBlock()
|
||||
: address(), num_pages(), memory_state(KMemoryState_None), ipc_lock_count(), device_use_count(), perm(), original_perm(), attribute()
|
||||
{
|
||||
/* ... */
|
||||
}
|
||||
};
|
||||
|
||||
}
|
|
@ -467,6 +467,10 @@ namespace ams::kern {
|
|||
return *GetVirtualMemoryRegionTree().FindFirstRegionByType(KMemoryRegionType_VirtualDramMetadataPool);
|
||||
}
|
||||
|
||||
static NOINLINE KMemoryRegion &GetPageTableHeapRegion() {
|
||||
return *GetVirtualMemoryRegionTree().FindFirstRegionByType(KMemoryRegionType_VirtualKernelPtHeap);
|
||||
}
|
||||
|
||||
static NOINLINE KMemoryRegion &GetVirtualLinearRegion(KVirtualAddress address) {
|
||||
return *GetVirtualLinearMemoryRegionTree().FindContainingRegion(GetInteger(address));
|
||||
}
|
||||
|
|
|
@ -64,6 +64,7 @@ namespace ams::kern {
|
|||
|
||||
void TrackAllocationForOptimizedProcess(KVirtualAddress block, size_t num_pages);
|
||||
|
||||
constexpr size_t GetSize() const { return this->heap.GetSize(); }
|
||||
constexpr KVirtualAddress GetEndAddress() const { return this->heap.GetEndAddress(); }
|
||||
|
||||
constexpr void SetNext(Impl *n) { this->next = n; }
|
||||
|
@ -77,8 +78,45 @@ namespace ams::kern {
|
|||
size_t index = this->heap.GetPageOffset(address);
|
||||
const size_t end = index + num_pages;
|
||||
while (index < end) {
|
||||
const RefCount ref_count = (++this->page_reference_counts[index++]);
|
||||
const RefCount ref_count = (++this->page_reference_counts[index]);
|
||||
MESOSPHERE_ABORT_UNLESS(ref_count > 0);
|
||||
|
||||
index++;
|
||||
}
|
||||
}
|
||||
|
||||
void Close(KLightLock *pool_locks, KVirtualAddress address, size_t num_pages) {
|
||||
KScopedLightLock lk(pool_locks[this->pool]);
|
||||
|
||||
size_t index = this->heap.GetPageOffset(address);
|
||||
const size_t end = index + num_pages;
|
||||
|
||||
size_t free_start = 0;
|
||||
size_t free_count = 0;
|
||||
while (index < end) {
|
||||
MESOSPHERE_ABORT_UNLESS(this->page_reference_counts[index] > 0);
|
||||
const RefCount ref_count = (--this->page_reference_counts[index]);
|
||||
|
||||
/* Keep track of how many zero refcounts we see in a row, to minimize calls to free. */
|
||||
if (ref_count == 0) {
|
||||
if (free_count > 0) {
|
||||
free_count++;
|
||||
} else {
|
||||
free_start = index;
|
||||
free_count = 1;
|
||||
}
|
||||
} else {
|
||||
if (free_count > 0) {
|
||||
this->Free(this->heap.GetAddress() + free_start * PageSize, free_count);
|
||||
free_count = 0;
|
||||
}
|
||||
}
|
||||
|
||||
index++;
|
||||
}
|
||||
|
||||
if (free_count > 0) {
|
||||
this->Free(this->heap.GetAddress() + free_start * PageSize, free_count);
|
||||
}
|
||||
}
|
||||
public:
|
||||
|
@ -117,6 +155,17 @@ namespace ams::kern {
|
|||
address += cur_pages * PageSize;
|
||||
}
|
||||
}
|
||||
|
||||
void Close(KVirtualAddress address, size_t num_pages) {
|
||||
/* Repeatedly close references until we've done so for all pages. */
|
||||
while (num_pages) {
|
||||
auto &manager = this->GetManager(address);
|
||||
const size_t cur_pages = std::min(num_pages, (manager.GetEndAddress() - address) / PageSize);
|
||||
manager.Close(this->pool_locks, address, cur_pages);
|
||||
num_pages -= cur_pages;
|
||||
address += cur_pages * PageSize;
|
||||
}
|
||||
}
|
||||
public:
|
||||
static size_t CalculateMetadataOverheadSize(size_t region_size) {
|
||||
return Impl::CalculateMetadataOverheadSize(region_size);
|
||||
|
|
110
libraries/libmesosphere/include/mesosphere/kern_k_page_group.hpp
Normal file
110
libraries/libmesosphere/include/mesosphere/kern_k_page_group.hpp
Normal file
|
@ -0,0 +1,110 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_typed_address.hpp>
|
||||
#include <mesosphere/kern_slab_helpers.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KBlockInfoManager;
|
||||
|
||||
class KBlockInfo : public util::IntrusiveListBaseNode<KBlockInfo> {
|
||||
private:
|
||||
KVirtualAddress address;
|
||||
size_t num_pages;
|
||||
public:
|
||||
constexpr KBlockInfo() : address(), num_pages() { /* ... */ }
|
||||
|
||||
constexpr void Initialize(KVirtualAddress addr, size_t np) {
|
||||
this->address = addr;
|
||||
this->num_pages = np;
|
||||
}
|
||||
|
||||
constexpr KVirtualAddress GetAddress() const { return this->address; }
|
||||
constexpr size_t GetNumPages() const { return this->num_pages; }
|
||||
constexpr size_t GetSize() const { return this->GetNumPages() * PageSize; }
|
||||
constexpr KVirtualAddress GetEndAddress() const { return this->GetAddress() + this->GetSize(); }
|
||||
|
||||
constexpr bool IsEquivalentTo(const KBlockInfo &rhs) const {
|
||||
return this->address == rhs.address && this->num_pages == rhs.num_pages;
|
||||
}
|
||||
|
||||
constexpr bool operator==(const KBlockInfo &rhs) const {
|
||||
return this->IsEquivalentTo(rhs);
|
||||
}
|
||||
|
||||
constexpr bool operator!=(const KBlockInfo &rhs) const {
|
||||
return !(*this == rhs);
|
||||
}
|
||||
|
||||
constexpr bool IsStrictlyBefore(KVirtualAddress addr) const {
|
||||
const KVirtualAddress end = this->GetEndAddress();
|
||||
|
||||
if (this->address != Null<KVirtualAddress> && end == Null<KVirtualAddress>) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return end < addr;
|
||||
}
|
||||
|
||||
constexpr bool operator<(KVirtualAddress addr) const {
|
||||
return this->IsStrictlyBefore(addr);
|
||||
}
|
||||
|
||||
constexpr bool TryConcatenate(KVirtualAddress addr, size_t np) {
|
||||
if (addr != Null<KVirtualAddress> && addr == this->GetEndAddress()) {
|
||||
this->num_pages += np;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
class KPageGroup {
|
||||
public:
|
||||
using BlockInfoList = util::IntrusiveListBaseTraits<KBlockInfo>::ListType;
|
||||
using iterator = BlockInfoList::const_iterator;
|
||||
private:
|
||||
BlockInfoList block_list;
|
||||
KBlockInfoManager *manager;
|
||||
public:
|
||||
KPageGroup() : block_list(), manager() { /* ... */ }
|
||||
|
||||
void Initialize(KBlockInfoManager *m);
|
||||
void Finalize();
|
||||
|
||||
iterator begin() const { return this->block_list.begin(); }
|
||||
iterator end() const { return this->block_list.end(); }
|
||||
|
||||
Result AddBlock(KVirtualAddress addr, size_t num_pages);
|
||||
void Open() const;
|
||||
void Close() const;
|
||||
|
||||
size_t GetNumPages() const;
|
||||
|
||||
bool IsEquivalentTo(const KPageGroup &rhs) const;
|
||||
|
||||
bool operator==(const KPageGroup &rhs) const {
|
||||
return this->IsEquivalentTo(rhs);
|
||||
}
|
||||
|
||||
bool operator!=(const KPageGroup &rhs) const {
|
||||
return !(*this == rhs);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
|
@ -300,8 +300,10 @@ namespace ams::kern {
|
|||
public:
|
||||
constexpr KPageHeap() : heap_address(), heap_size(), used_size(), num_blocks(), blocks() { /* ... */ }
|
||||
|
||||
constexpr KVirtualAddress GetEndAddress() const { return this->heap_address + this->heap_size; }
|
||||
constexpr size_t GetPageOffset(KVirtualAddress block) const { return (block - this->heap_address) / PageSize; }
|
||||
constexpr KVirtualAddress GetAddress() const { return this->heap_address; }
|
||||
constexpr size_t GetSize() const { return this->heap_size; }
|
||||
constexpr KVirtualAddress GetEndAddress() const { return this->GetAddress() + this->GetSize(); }
|
||||
constexpr size_t GetPageOffset(KVirtualAddress block) const { return (block - this->GetAddress()) / PageSize; }
|
||||
|
||||
void Initialize(KVirtualAddress heap_address, size_t heap_size, KVirtualAddress metadata_address, size_t metadata_size) {
|
||||
return Initialize(heap_address, heap_size, metadata_address, metadata_size, MemoryBlockPageShifts, NumMemoryBlockPageShifts);
|
||||
|
|
|
@ -0,0 +1,98 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_slab_helpers.hpp>
|
||||
#include <mesosphere/kern_k_dynamic_slab_heap.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
namespace impl {
|
||||
|
||||
class PageTablePage {
|
||||
private:
|
||||
u8 buffer[PageSize];
|
||||
};
|
||||
static_assert(sizeof(PageTablePage) == PageSize);
|
||||
|
||||
}
|
||||
|
||||
class KPageTableManager : public KDynamicSlabHeap<impl::PageTablePage> {
|
||||
public:
|
||||
using RefCount = u16;
|
||||
private:
|
||||
using BaseHeap = KDynamicSlabHeap<impl::PageTablePage>;
|
||||
private:
|
||||
RefCount *ref_counts;
|
||||
public:
|
||||
static constexpr size_t CalculateReferenceCountSize(size_t size) {
|
||||
return (size / PageSize) * sizeof(RefCount);
|
||||
}
|
||||
public:
|
||||
constexpr KPageTableManager() : BaseHeap(), ref_counts() { /* ... */ }
|
||||
private:
|
||||
void Initialize(RefCount *rc) {
|
||||
this->ref_counts = rc;
|
||||
for (size_t i = 0; i < this->GetSize() / PageSize; i++) {
|
||||
this->ref_counts[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
constexpr RefCount *GetRefCountPointer(KVirtualAddress addr) const {
|
||||
return std::addressof(this->ref_counts[(addr - this->GetAddress()) / PageSize]);
|
||||
}
|
||||
public:
|
||||
void Initialize(KDynamicPageManager *next_allocator, RefCount *rc) {
|
||||
BaseHeap::Initialize(next_allocator);
|
||||
this->Initialize(rc);
|
||||
}
|
||||
|
||||
void Initialize(KVirtualAddress memory, size_t sz, RefCount *rc) {
|
||||
BaseHeap::Initialize(memory, sz);
|
||||
this->Initialize(rc);
|
||||
}
|
||||
|
||||
KVirtualAddress Allocate() {
|
||||
return KVirtualAddress(BaseHeap::Allocate());
|
||||
}
|
||||
|
||||
void Free(KVirtualAddress addr) {
|
||||
BaseHeap::Free(GetPointer<impl::PageTablePage>(addr));
|
||||
}
|
||||
|
||||
RefCount GetRefCount(KVirtualAddress addr) const {
|
||||
MESOSPHERE_ASSERT(this->IsInRange(addr));
|
||||
return *this->GetRefCountPointer(addr);
|
||||
}
|
||||
|
||||
void Open(KVirtualAddress addr, int count) {
|
||||
MESOSPHERE_ASSERT(this->IsInRange(addr));
|
||||
|
||||
*this->GetRefCountPointer(addr) += count;
|
||||
|
||||
MESOSPHERE_ABORT_UNLESS(this->GetRefCount(addr) > 0);
|
||||
}
|
||||
|
||||
bool Close(KVirtualAddress addr, int count) {
|
||||
MESOSPHERE_ASSERT(this->IsInRange(addr));
|
||||
MESOSPHERE_ABORT_UNLESS(this->GetRefCount(addr) >= count);
|
||||
|
||||
*this->GetRefCountPointer(addr) -= count;
|
||||
return this->GetRefCount(addr) == 0;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
|
@ -30,6 +30,9 @@ namespace ams::kern {
|
|||
class KInterruptTaskManager;
|
||||
class KScheduler;
|
||||
class KMemoryManager;
|
||||
class KPageTableManager;
|
||||
class KMemoryBlockSlabManager;
|
||||
class KBlockInfoManager;
|
||||
|
||||
class Kernel {
|
||||
public:
|
||||
|
@ -38,12 +41,20 @@ namespace ams::kern {
|
|||
Initializing = 1,
|
||||
Initialized = 2,
|
||||
};
|
||||
|
||||
static constexpr size_t ApplicationMemoryBlockSlabHeapSize = 20000;
|
||||
static constexpr size_t SystemMemoryBlockSlabHeapSize = 10000;
|
||||
static constexpr size_t BlockInfoSlabHeapSize = 4000;
|
||||
private:
|
||||
static State s_state;
|
||||
static KThread s_main_threads[cpu::NumCores];
|
||||
static KThread s_idle_threads[cpu::NumCores];
|
||||
static KResourceLimit s_system_resource_limit;
|
||||
static KMemoryManager s_memory_manager;
|
||||
static KPageTableManager s_page_table_manager;
|
||||
static KMemoryBlockSlabManager s_app_memory_block_manager;
|
||||
static KMemoryBlockSlabManager s_sys_memory_block_manager;
|
||||
static KBlockInfoManager s_block_info_manager;
|
||||
private:
|
||||
static ALWAYS_INLINE KCoreLocalContext &GetCoreLocalContext() {
|
||||
return reinterpret_cast<KCoreLocalRegion *>(cpu::GetCoreLocalRegionAddress())->current.context;
|
||||
|
@ -54,6 +65,7 @@ namespace ams::kern {
|
|||
public:
|
||||
static NOINLINE void InitializeCoreLocalRegion(s32 core_id);
|
||||
static NOINLINE void InitializeMainAndIdleThreads(s32 core_id);
|
||||
static NOINLINE void InitializeResourceManagers(KVirtualAddress address, size_t size);
|
||||
|
||||
static ALWAYS_INLINE State GetState() { return s_state; }
|
||||
static ALWAYS_INLINE void SetState(State state) { s_state = state; }
|
||||
|
@ -86,12 +98,24 @@ namespace ams::kern {
|
|||
return GetCoreLocalContext().hardware_timer;
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE KResourceLimit &GetSystemResourceLimit() {
|
||||
return s_system_resource_limit;
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE KMemoryManager &GetMemoryManager() {
|
||||
return s_memory_manager;
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE KResourceLimit &GetSystemResourceLimit() {
|
||||
return s_system_resource_limit;
|
||||
static ALWAYS_INLINE KMemoryBlockSlabManager &GetApplicationMemoryBlockManager() {
|
||||
return s_app_memory_block_manager;
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE KMemoryBlockSlabManager &GetSystemMemoryBlockManager() {
|
||||
return s_sys_memory_block_manager;
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE KBlockInfoManager &GetBlockInfoManager() {
|
||||
return s_block_info_manager;
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -17,6 +17,12 @@
|
|||
|
||||
namespace ams::kern::arm64 {
|
||||
|
||||
/* Instantiate static members in specific translation unit. */
|
||||
KSpinLock KInterruptManager::s_lock;
|
||||
std::array<KInterruptManager::KGlobalInterruptEntry, KInterruptController::NumGlobalInterrupts> KInterruptManager::s_global_interrupts;
|
||||
KInterruptController::GlobalState KInterruptManager::s_global_state;
|
||||
bool KInterruptManager::s_global_state_saved;
|
||||
|
||||
void KInterruptManager::Initialize(s32 core_id) {
|
||||
this->interrupt_controller.Initialize(core_id);
|
||||
}
|
||||
|
|
|
@ -233,6 +233,10 @@ namespace ams::kern {
|
|||
}
|
||||
}
|
||||
|
||||
u32 KSystemControl::GetInitialProcessBinaryPool() {
|
||||
return KMemoryManager::Pool_Application;
|
||||
}
|
||||
|
||||
/* Randomness. */
|
||||
void KSystemControl::GenerateRandomBytes(void *dst, size_t size) {
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(size <= 0x38);
|
||||
|
|
74
libraries/libmesosphere/source/kern_initial_process.cpp
Normal file
74
libraries/libmesosphere/source/kern_initial_process.cpp
Normal file
|
@ -0,0 +1,74 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#include <mesosphere.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
namespace {
|
||||
|
||||
KVirtualAddress GetInitialProcessBinaryAddress() {
|
||||
return KMemoryLayout::GetPageTableHeapRegion().GetEndAddress() - InitialProcessBinarySizeMax;
|
||||
}
|
||||
|
||||
void LoadInitialProcessBinaryHeader(InitialProcessBinaryHeader *header) {
|
||||
if (header->magic != InitialProcessBinaryMagic) {
|
||||
*header = *GetPointer<InitialProcessBinaryHeader>(GetInitialProcessBinaryAddress());
|
||||
}
|
||||
|
||||
MESOSPHERE_ABORT_UNLESS(header->magic == InitialProcessBinaryMagic);
|
||||
MESOSPHERE_ABORT_UNLESS(header->num_processes <= init::GetSlabResourceCounts().num_KProcess);
|
||||
}
|
||||
|
||||
KVirtualAddress g_initial_process_binary_address;
|
||||
InitialProcessBinaryHeader g_initial_process_binary_header;
|
||||
u64 g_initial_process_id_min = std::numeric_limits<u64>::max();
|
||||
u64 g_initial_process_id_max = std::numeric_limits<u64>::min();
|
||||
|
||||
}
|
||||
|
||||
u64 GetInitialProcessIdMin() {
|
||||
return g_initial_process_id_min;
|
||||
}
|
||||
|
||||
u64 GetInitialProcessIdMax() {
|
||||
return g_initial_process_id_max;
|
||||
}
|
||||
|
||||
void CopyInitialProcessBinaryToKernelMemory() {
|
||||
LoadInitialProcessBinaryHeader(&g_initial_process_binary_header);
|
||||
|
||||
if (g_initial_process_binary_header.num_processes > 0) {
|
||||
/* Reserve pages for the initial process binary from the system resource limit. */
|
||||
auto &mm = Kernel::GetMemoryManager();
|
||||
const size_t total_size = util::AlignUp(g_initial_process_binary_header.size, PageSize);
|
||||
const size_t num_pages = total_size / PageSize;
|
||||
MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, total_size));
|
||||
|
||||
/* Allocate memory for the image. */
|
||||
const KMemoryManager::Pool pool = static_cast<KMemoryManager::Pool>(KSystemControl::GetInitialProcessBinaryPool());
|
||||
const auto allocate_option = KMemoryManager::EncodeOption(pool, KMemoryManager::Direction_FromFront);
|
||||
KVirtualAddress allocated_memory = mm.AllocateContinuous(num_pages, 1, allocate_option);
|
||||
MESOSPHERE_ABORT_UNLESS(allocated_memory != Null<KVirtualAddress>);
|
||||
mm.Open(allocated_memory, num_pages);
|
||||
|
||||
/* Relocate the image. */
|
||||
std::memmove(GetVoidPointer(allocated_memory), GetVoidPointer(GetInitialProcessBinaryAddress()), g_initial_process_binary_header.size);
|
||||
std::memset(GetVoidPointer(GetInitialProcessBinaryAddress()), 0, g_initial_process_binary_header.size);
|
||||
g_initial_process_binary_address = allocated_memory;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
101
libraries/libmesosphere/source/kern_k_page_group.cpp
Normal file
101
libraries/libmesosphere/source/kern_k_page_group.cpp
Normal file
|
@ -0,0 +1,101 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#include <mesosphere.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
void KPageGroup::Initialize(KBlockInfoManager *m) {
|
||||
this->manager = m;
|
||||
}
|
||||
|
||||
void KPageGroup::Finalize() {
|
||||
auto it = this->block_list.begin();
|
||||
while (it != this->block_list.end()) {
|
||||
KBlockInfo *info = std::addressof(*it);
|
||||
it = this->block_list.erase(it);
|
||||
this->manager->Free(info);
|
||||
}
|
||||
}
|
||||
|
||||
size_t KPageGroup::GetNumPages() const {
|
||||
size_t num_pages = 0;
|
||||
|
||||
for (const auto &it : *this) {
|
||||
num_pages += it.GetNumPages();
|
||||
}
|
||||
|
||||
return num_pages;
|
||||
}
|
||||
|
||||
Result KPageGroup::AddBlock(KVirtualAddress addr, size_t num_pages) {
|
||||
/* Succeed immediately if we're adding no pages. */
|
||||
R_UNLESS(num_pages != 0, ResultSuccess());
|
||||
|
||||
/* Check for overflow. */
|
||||
MESOSPHERE_ASSERT(addr < addr + num_pages * PageSize);
|
||||
|
||||
/* Try to just append to the last block. */
|
||||
if (!this->block_list.empty()) {
|
||||
auto it = --(this->block_list.end());
|
||||
R_UNLESS(!it->TryConcatenate(addr, num_pages), ResultSuccess());
|
||||
}
|
||||
|
||||
/* Allocate a new block. */
|
||||
KBlockInfo *new_block = this->manager->Allocate();
|
||||
R_UNLESS(new_block != nullptr, svc::ResultOutOfResource());
|
||||
|
||||
/* Initialize the block. */
|
||||
new_block->Initialize(addr, num_pages);
|
||||
this->block_list.push_back(*new_block);
|
||||
|
||||
return ResultSuccess();
|
||||
}
|
||||
|
||||
void KPageGroup::Open() const {
|
||||
auto &mm = Kernel::GetMemoryManager();
|
||||
|
||||
for (const auto &it : *this) {
|
||||
mm.Open(it.GetAddress(), it.GetNumPages());
|
||||
}
|
||||
}
|
||||
|
||||
void KPageGroup::Close() const {
|
||||
auto &mm = Kernel::GetMemoryManager();
|
||||
|
||||
for (const auto &it : *this) {
|
||||
mm.Close(it.GetAddress(), it.GetNumPages());
|
||||
}
|
||||
}
|
||||
|
||||
bool KPageGroup::IsEquivalentTo(const KPageGroup &rhs) const {
|
||||
auto lit = this->block_list.cbegin();
|
||||
auto rit = rhs.block_list.cbegin();
|
||||
auto lend = this->block_list.cend();
|
||||
auto rend = rhs.block_list.cend();
|
||||
|
||||
while (lit != lend && rit != rend) {
|
||||
if (*lit != *rit) {
|
||||
return false;
|
||||
}
|
||||
|
||||
++lit;
|
||||
++rit;
|
||||
}
|
||||
|
||||
return lit == lend && rit == rend;
|
||||
}
|
||||
|
||||
}
|
|
@ -55,8 +55,8 @@ namespace ams::kern {
|
|||
const size_t needed_size = this->blocks[index].GetSize();
|
||||
|
||||
for (s32 i = index; i < static_cast<s32>(this->num_blocks); i++) {
|
||||
if (const KVirtualAddress addr = this->blocks[index].PopBlock(); addr != Null<KVirtualAddress>) {
|
||||
if (const size_t allocated_size = this->blocks[index].GetSize(); allocated_size > needed_size) {
|
||||
if (const KVirtualAddress addr = this->blocks[i].PopBlock(); addr != Null<KVirtualAddress>) {
|
||||
if (const size_t allocated_size = this->blocks[i].GetSize(); allocated_size > needed_size) {
|
||||
this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize);
|
||||
}
|
||||
return addr;
|
||||
|
|
|
@ -18,11 +18,15 @@
|
|||
namespace ams::kern {
|
||||
|
||||
/* Declare kernel data members in kernel TU. */
|
||||
Kernel::State Kernel::s_state = Kernel::State::Invalid;
|
||||
KThread Kernel::s_main_threads[cpu::NumCores];
|
||||
KThread Kernel::s_idle_threads[cpu::NumCores];
|
||||
KResourceLimit Kernel::s_system_resource_limit;
|
||||
KMemoryManager Kernel::s_memory_manager;
|
||||
Kernel::State Kernel::s_state = Kernel::State::Invalid;
|
||||
KThread Kernel::s_main_threads[cpu::NumCores];
|
||||
KThread Kernel::s_idle_threads[cpu::NumCores];
|
||||
KResourceLimit Kernel::s_system_resource_limit;
|
||||
KMemoryManager Kernel::s_memory_manager;
|
||||
KPageTableManager Kernel::s_page_table_manager;
|
||||
KMemoryBlockSlabManager Kernel::s_app_memory_block_manager;
|
||||
KMemoryBlockSlabManager Kernel::s_sys_memory_block_manager;
|
||||
KBlockInfoManager Kernel::s_block_info_manager;
|
||||
|
||||
void Kernel::InitializeCoreLocalRegion(s32 core_id) {
|
||||
/* Construct the core local region object in place. */
|
||||
|
@ -68,10 +72,32 @@ namespace ams::kern {
|
|||
SetCurrentThread(main_thread);
|
||||
SetCurrentProcess(nullptr);
|
||||
|
||||
/* TODO: Initialize the interrupt manager. */
|
||||
/* Initialize the interrupt manager, hardware timer, and scheduler */
|
||||
GetInterruptManager().Initialize(core_id);
|
||||
GetHardwareTimer().Initialize(core_id);
|
||||
GetScheduler().Initialize(idle_thread);
|
||||
}
|
||||
|
||||
void Kernel::InitializeResourceManagers(KVirtualAddress address, size_t size) {
|
||||
/* Ensure that the buffer is suitable for our use. */
|
||||
const size_t app_size = ApplicationMemoryBlockSlabHeapSize * sizeof(KMemoryBlock);
|
||||
const size_t sys_size = SystemMemoryBlockSlabHeapSize * sizeof(KMemoryBlock);
|
||||
const size_t info_size = BlockInfoSlabHeapSize * sizeof(KBlockInfo);
|
||||
const size_t fixed_size = util::AlignUp(app_size + sys_size + info_size, PageSize);
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(address), PageSize));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, PageSize));
|
||||
MESOSPHERE_ABORT_UNLESS(fixed_size < size);
|
||||
|
||||
size_t pt_size = size - fixed_size;
|
||||
const size_t rc_size = util::AlignUp(KPageTableManager::CalculateReferenceCountSize(pt_size), PageSize);
|
||||
MESOSPHERE_ABORT_UNLESS(rc_size < pt_size);
|
||||
pt_size -= rc_size;
|
||||
|
||||
/* Initialize the slabheaps. */
|
||||
s_app_memory_block_manager.Initialize(address + pt_size, app_size);
|
||||
s_sys_memory_block_manager.Initialize(address + pt_size + app_size, sys_size);
|
||||
s_block_info_manager.Initialize(address + pt_size + app_size + sys_size, info_size);
|
||||
s_page_table_manager.Initialize(address, pt_size, GetPointer<KPageTableManager::RefCount>(address + pt_size + fixed_size));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -45,10 +45,17 @@ namespace ams::kern {
|
|||
init::InitializeKPageBufferSlabHeap();
|
||||
}
|
||||
|
||||
/* Note: this is not actually done here, it's done later in main after more stuff is setup. */
|
||||
/* However, for testing (and to manifest this code in the produced binary, this is here for now. */
|
||||
/* TODO: Do this better. */
|
||||
/* Copy the Initial Process Binary to safe memory. */
|
||||
CopyInitialProcessBinaryToKernelMemory();
|
||||
|
||||
/* Initialize the KObject Slab Heaps. */
|
||||
init::InitializeSlabHeaps();
|
||||
|
||||
/* Initialize the Dynamic Slab Heaps. */
|
||||
{
|
||||
const auto &pt_heap_region = KMemoryLayout::GetPageTableHeapRegion();
|
||||
Kernel::InitializeResourceManagers(pt_heap_region.GetAddress(), pt_heap_region.GetSize());
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO: Implement more of Main() */
|
||||
|
|
|
@ -242,7 +242,7 @@ namespace ams::kern::init::loader {
|
|||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(rw_offset, 0x1000));
|
||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(bss_end_offset, 0x1000));
|
||||
const uintptr_t bss_offset = layout->bss_offset;
|
||||
const uintptr_t ini_load_offset = layout->ini_load_offset;
|
||||
const uintptr_t ini_load_offset = layout->ini_load_offset;
|
||||
const uintptr_t dynamic_offset = layout->dynamic_offset;
|
||||
const uintptr_t init_array_offset = layout->init_array_offset;
|
||||
const uintptr_t init_array_end_offset = layout->init_array_end_offset;
|
||||
|
|
Loading…
Reference in a new issue