kern: implement KMemoryManager init

This commit is contained in:
Michael Scire 2020-02-06 17:40:57 -08:00
parent 1de607c183
commit f7d3d50f33
9 changed files with 283 additions and 9 deletions

View file

@ -51,10 +51,11 @@ namespace ams::kern {
KMemoryRegionType_VirtualKernelInitPt = 0x19A,
KMemoryRegionType_VirtualDramMetadataPool = 0x29A,
KMemoryRegionType_VirtualDramManagedPool = 0x31A,
KMemoryRegionType_VirtualDramApplicationPool = 0x271A,
KMemoryRegionType_VirtualDramAppletPool = 0x1B1A,
KMemoryRegionType_VirtualDramSystemNonSecurePool = 0x331A,
KMemoryRegionType_VirtualDramSystemPool = 0x2B1A,
KMemoryRegionType_VirtualDramSystemNonSecurePool = 0x331A,
KMemoryRegionType_Uart = 0x1D,
KMemoryRegionType_InterruptDistributor = 0x4D | KMemoryRegionAttr_NoUserMap,
@ -462,6 +463,10 @@ namespace ams::kern {
return GetPhysicalMemoryRegionTree().FindFirstDerivedRegion(KMemoryRegionType_Uart)->GetPairAddress();
}
static NOINLINE KMemoryRegion &GetMetadataPoolRegion() {
return *GetVirtualMemoryRegionTree().FindFirstRegionByType(KMemoryRegionType_VirtualDramMetadataPool);
}
static NOINLINE auto GetCarveoutRegionExtents() {
return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionAttr_CarveoutProtected);
}

View file

@ -15,19 +15,91 @@
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_light_lock.hpp>
#include <mesosphere/kern_k_memory_layout.hpp>
namespace ams::kern {
class KMemoryManager {
public:
enum Pool {
Pool_Application = 0,
Pool_Applet = 1,
Pool_System = 2,
Pool_SystemNonSecure = 3,
Pool_Count,
Pool_Shift = 4,
Pool_Mask = (0xF << Pool_Shift),
};
enum Direction {
Direction_FromFront = 0,
Direction_FromBack = 1,
Direction_Shift = 0,
Direction_Mask = (0xF << Direction_Shift),
};
static constexpr size_t MaxManagerCount = 10;
private:
class Impl {
private:
using RefCount = u16;
private:
KPageHeap heap;
RefCount *page_reference_counts;
KVirtualAddress metadata_region;
Pool pool;
Impl *next;
Impl *prev;
public:
constexpr Impl() : heap(), page_reference_counts(), metadata_region(), pool(), next(), prev() { /* ... */ }
size_t Initialize(const KMemoryRegion *region, Pool pool, KVirtualAddress metadata_region, KVirtualAddress metadata_region_end);
constexpr ALWAYS_INLINE void SetNext(Impl *n) { this->next = n; }
constexpr ALWAYS_INLINE void SetPrev(Impl *n) { this->prev = n; }
public:
static size_t CalculateMetadataOverheadSize(size_t region_size);
};
private:
KLightLock pool_locks[Pool_Count];
Impl *pool_managers_head[Pool_Count];
Impl *pool_managers_tail[Pool_Count];
Impl managers[MaxManagerCount];
size_t num_managers;
u64 optimized_process_ids[Pool_Count];
bool has_optimized_process[Pool_Count];
public:
constexpr KMemoryManager()
: pool_locks(), pool_managers_head(), pool_managers_tail(), managers(), num_managers(), optimized_process_ids(), has_optimized_process()
{
/* ... */
}
void Initialize(KVirtualAddress metadata_region, size_t metadata_region_size);
public:
static size_t CalculateMetadataOverheadSize(size_t region_size) {
return Impl::CalculateMetadataOverheadSize(region_size);
}
static constexpr ALWAYS_INLINE u32 EncodeOption(Pool pool, Direction dir) {
return (pool << Pool_Shift) | (dir << Direction_Shift);
}
static constexpr ALWAYS_INLINE Pool GetPool(u32 option) {
return static_cast<Pool>((option & Pool_Mask) >> Pool_Shift);
}
static constexpr ALWAYS_INLINE Direction GetDirection(u32 option) {
return static_cast<Direction>((option & Direction_Mask) >> Direction_Shift);
}
static constexpr ALWAYS_INLINE std::tuple<Pool, Direction> DecodeOption(u32 option) {
return std::make_tuple(GetPool(option), GetDirection(option));
}
};
}

View file

@ -19,11 +19,39 @@
namespace ams::kern {
class KPageHeap {
private:
static constexpr inline size_t MemoryBlockPageShifts[] = { 0xC, 0x10, 0x15, 0x16, 0x19, 0x1D, 0x1E };
static constexpr size_t NumMemoryBlockPageShifts = util::size(MemoryBlockPageShifts);
private:
class Block {
private:
class Bitmap {
/* TODO: This is a four-level bitmap tracking page usage. */
public:
static constexpr size_t MaxDepth = 4;
private:
u64 *bit_storages[MaxDepth];
size_t num_bits;
size_t depth;
public:
constexpr Bitmap() : bit_storages(), num_bits(), depth() { /* ... */ }
u64 *Initialize(u64 *storage, size_t size) {
/* Initially, everything is un-set. */
this->num_bits = 0;
/* Calculate the needed bitmap depth. */
this->depth = static_cast<size_t>(GetRequiredDepth(size));
MESOSPHERE_ASSERT(this->depth <= MaxDepth);
/* Set the bitmap pointers. */
for (s32 d = static_cast<s32>(this->depth) - 1; d >= 0; d--) {
this->bit_storages[d] = storage;
size = util::AlignUp(size, BITSIZEOF(u64)) / BITSIZEOF(u64);
storage += size;
}
return storage;
}
private:
static constexpr s32 GetRequiredDepth(size_t region_size) {
s32 depth = 0;
@ -45,6 +73,30 @@ namespace ams::kern {
return overhead_bits * sizeof(u64);
}
};
private:
Bitmap bitmap;
KVirtualAddress heap_address;
uintptr_t end_offset;
size_t block_shift;
size_t next_block_shift;
public:
constexpr Block() : bitmap(), heap_address(), end_offset(), block_shift(), next_block_shift() { /* ... */ }
u64 *Initialize(KVirtualAddress addr, size_t size, size_t bs, size_t nbs, u64 *bit_storage) {
/* Set shifts. */
this->block_shift = bs;
this->next_block_shift = nbs;
/* Align up the address. */
KVirtualAddress end = addr + size;
const size_t align = (this->next_block_shift != 0) ? (1ul << this->next_block_shift) : (this->block_shift);
addr = util::AlignDown(GetInteger(addr), align);
end = util::AlignUp(GetInteger(end), align);
this->heap_address = addr;
this->end_offset = (end - addr) / (1ul << this->block_shift);
return this->bitmap.Initialize(bit_storage, this->end_offset);
}
public:
static constexpr size_t CalculateMetadataOverheadSize(size_t region_size, size_t cur_block_shift, size_t next_block_shift) {
const size_t cur_block_size = (1ul << cur_block_shift);
@ -53,8 +105,26 @@ namespace ams::kern {
return Bitmap::CalculateMetadataOverheadSize((align * 2 + util::AlignUp(region_size, align)) / cur_block_size);
}
};
private:
KVirtualAddress heap_address;
size_t heap_size;
size_t used_size;
size_t num_blocks;
Block blocks[NumMemoryBlockPageShifts];
private:
void Initialize(KVirtualAddress heap_address, size_t heap_size, KVirtualAddress metadata_address, size_t metadata_size, const size_t *block_shifts, size_t num_block_shifts);
public:
constexpr KPageHeap() : heap_address(), heap_size(), used_size(), num_blocks(), blocks() { /* ... */ }
void Initialize(KVirtualAddress heap_address, size_t heap_size, KVirtualAddress metadata_address, size_t metadata_size) {
return Initialize(heap_address, heap_size, metadata_address, metadata_size, MemoryBlockPageShifts, NumMemoryBlockPageShifts);
}
private:
static size_t CalculateMetadataOverheadSize(size_t region_size, const size_t *block_shifts, size_t num_block_shifts);
public:
static size_t CalculateMetadataOverheadSize(size_t region_size) {
return CalculateMetadataOverheadSize(region_size, MemoryBlockPageShifts, NumMemoryBlockPageShifts);
}
};
}

View file

@ -29,6 +29,7 @@ namespace ams::kern {
class KInterruptManager;
class KInterruptTaskManager;
class KScheduler;
class KMemoryManager;
class Kernel {
public:
@ -42,6 +43,7 @@ namespace ams::kern {
static KThread s_main_threads[cpu::NumCores];
static KThread s_idle_threads[cpu::NumCores];
static KResourceLimit s_system_resource_limit;
static KMemoryManager s_memory_manager;
private:
static ALWAYS_INLINE KCoreLocalContext &GetCoreLocalContext() {
return reinterpret_cast<KCoreLocalRegion *>(cpu::GetCoreLocalRegionAddress())->current.context;
@ -84,6 +86,10 @@ namespace ams::kern {
return GetCoreLocalContext().hardware_timer;
}
static ALWAYS_INLINE KMemoryManager &GetMemoryManager() {
return s_memory_manager;
}
static ALWAYS_INLINE KResourceLimit &GetSystemResourceLimit() {
return s_system_resource_limit;
}

View file

@ -43,6 +43,7 @@ namespace ams::kern {
#define MESOSPHERE_ASSERT(expr) MESOSPHERE_ASSERT_IMPL(expr, "Assertion failed: %s", #expr)
#define MESOSPHERE_R_ASSERT(expr) MESOSPHERE_ASSERT_IMPL(R_SUCCEEDED(expr), "Result assertion failed: %s", #expr)
#define MESOSPHERE_UNREACHABLE_DEFAULT_CASE() default: MESOSPHERE_PANIC("Unreachable default case entered")
#ifdef MESOSPHERE_ENABLE_THIS_ASSERT
#define MESOSPHERE_ASSERT_THIS() MESOSPHERE_ASSERT(this != nullptr)

View file

@ -19,16 +19,103 @@ namespace ams::kern {
namespace {
constexpr size_t g_memory_block_page_shifts[] = { 0xC, 0x10, 0x15, 0x16, 0x19, 0x1D, 0x1E };
constexpr size_t NumMemoryBlockPageShifts = util::size(g_memory_block_page_shifts);
constexpr KMemoryManager::Pool GetPoolFromMemoryRegionType(u32 type) {
switch (type) {
case KMemoryRegionType_VirtualDramApplicationPool: return KMemoryManager::Pool_Application;
case KMemoryRegionType_VirtualDramAppletPool: return KMemoryManager::Pool_Applet;
case KMemoryRegionType_VirtualDramSystemPool: return KMemoryManager::Pool_System;
case KMemoryRegionType_VirtualDramSystemNonSecurePool: return KMemoryManager::Pool_SystemNonSecure;
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
}
}
}
void KMemoryManager::Initialize(KVirtualAddress metadata_region, size_t metadata_region_size) {
/* Clear the metadata region to zero. */
const KVirtualAddress metadata_region_end = metadata_region + metadata_region_size;
std::memset(GetVoidPointer(metadata_region), 0, metadata_region_size);
/* Traverse the virtual memory layout tree, initializing each manager as appropriate. */
while (true) {
/* Locate the region that should initialize the current manager. */
const KMemoryRegion *region = nullptr;
for (const auto &it : KMemoryLayout::GetVirtualMemoryRegionTree()) {
/* We only care about regions that we need to create managers for. */
if (!it.IsDerivedFrom(KMemoryRegionType_VirtualDramManagedPool)) {
continue;
}
/* We want to initialize the managers in order. */
if (it.GetAttributes() != this->num_managers) {
continue;
}
region = std::addressof(it);
break;
}
/* If we didn't find a region, then we're done initializing managers. */
if (region == nullptr) {
break;
}
/* Ensure that the region is correct. */
MESOSPHERE_ASSERT(region->GetAddress() != Null<decltype(region->GetAddress())>);
MESOSPHERE_ASSERT(region->GetSize() > 0);
MESOSPHERE_ASSERT(region->GetEndAddress() >= region->GetAddress());
MESOSPHERE_ASSERT(region->IsDerivedFrom(KMemoryRegionType_VirtualDramManagedPool));
MESOSPHERE_ASSERT(region->GetAttributes() == this->num_managers);
/* Initialize a new manager for the region. */
const Pool pool = GetPoolFromMemoryRegionType(region->GetType());
Impl *manager = std::addressof(this->managers[this->num_managers++]);
MESOSPHERE_ABORT_UNLESS(this->num_managers <= util::size(this->managers));
const size_t cur_size = manager->Initialize(region, pool, metadata_region, metadata_region_end);
metadata_region += cur_size;
MESOSPHERE_ABORT_UNLESS(metadata_region <= metadata_region_end);
/* Insert the manager into the pool list. */
if (this->pool_managers_tail[pool] == nullptr) {
this->pool_managers_head[pool] = manager;
} else {
this->pool_managers_tail[pool]->SetNext(manager);
manager->SetPrev(this->pool_managers_tail[pool]);
}
this->pool_managers_tail[pool] = manager;
}
}
size_t KMemoryManager::Impl::Initialize(const KMemoryRegion *region, Pool p, KVirtualAddress metadata, KVirtualAddress metadata_end) {
/* Calculate metadata sizes. */
const size_t ref_count_size = (region->GetSize() / PageSize) * sizeof(u16);
const size_t optimize_map_size = (util::AlignUp((region->GetSize() / PageSize), BITSIZEOF(u64)) / BITSIZEOF(u64)) * sizeof(u64);
const size_t manager_size = util::AlignUp(optimize_map_size + ref_count_size, PageSize);
const size_t page_heap_size = KPageHeap::CalculateMetadataOverheadSize(region->GetSize());
const size_t total_metadata_size = manager_size + page_heap_size;
MESOSPHERE_ABORT_UNLESS(manager_size <= total_metadata_size);
MESOSPHERE_ABORT_UNLESS(metadata + total_metadata_size <= metadata_end);
MESOSPHERE_ABORT_UNLESS(util::IsAligned(total_metadata_size, PageSize));
/* Setup region. */
this->pool = p;
this->metadata_region = metadata;
this->page_reference_counts = GetPointer<RefCount>(metadata + optimize_map_size);
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(this->metadata_region), PageSize));
/* Initialize the manager's KPageHeap. */
this->heap.Initialize(region->GetAddress(), region->GetSize(), metadata + manager_size, page_heap_size);
return total_metadata_size;
}
size_t KMemoryManager::Impl::CalculateMetadataOverheadSize(size_t region_size) {
const size_t ref_count_size = (region_size / PageSize) * sizeof(u16);
const size_t bitmap_size = (util::AlignUp((region_size / PageSize), BITSIZEOF(u64)) / BITSIZEOF(u64)) * sizeof(u64);
const size_t page_heap_size = KPageHeap::CalculateMetadataOverheadSize(region_size, g_memory_block_page_shifts, NumMemoryBlockPageShifts);
return util::AlignUp(page_heap_size + bitmap_size + ref_count_size, PageSize);
const size_t ref_count_size = (region_size / PageSize) * sizeof(u16);
const size_t optimize_map_size = (util::AlignUp((region_size / PageSize), BITSIZEOF(u64)) / BITSIZEOF(u64)) * sizeof(u64);
const size_t manager_meta_size = util::AlignUp(optimize_map_size + ref_count_size, PageSize);
const size_t page_heap_size = KPageHeap::CalculateMetadataOverheadSize(region_size);
return manager_meta_size + page_heap_size;
}
}

View file

@ -17,10 +17,36 @@
namespace ams::kern {
void KPageHeap::Initialize(KVirtualAddress address, size_t size, KVirtualAddress metadata_address, size_t metadata_size, const size_t *block_shifts, size_t num_block_shifts) {
/* Check our assumptions. */
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(address), PageSize));
MESOSPHERE_ASSERT(util::IsAligned(size, PageSize));
MESOSPHERE_ASSERT(0 < num_block_shifts && num_block_shifts <= NumMemoryBlockPageShifts);
const KVirtualAddress metadata_end = metadata_address + metadata_size;
/* Set our members. */
this->heap_address = address;
this->heap_size = size;
this->num_blocks = num_block_shifts;
/* Setup bitmaps. */
u64 *cur_bitmap_storage = GetPointer<u64>(metadata_address);
for (size_t i = 0; i < num_block_shifts; i++) {
const size_t cur_block_shift = block_shifts[i];
const size_t next_block_shift = (i != num_block_shifts - 1) ? block_shifts[i + 1] : 0;
cur_bitmap_storage = this->blocks[i].Initialize(this->heap_address, this->heap_size, cur_block_shift, next_block_shift, cur_bitmap_storage);
}
/* Ensure we didn't overextend our bounds. */
MESOSPHERE_ABORT_UNLESS(KVirtualAddress(cur_bitmap_storage) <= metadata_end);
}
size_t KPageHeap::CalculateMetadataOverheadSize(size_t region_size, const size_t *block_shifts, size_t num_block_shifts) {
size_t overhead_size = 0;
for (size_t i = 0; i < num_block_shifts; i++) {
overhead_size += KPageHeap::Block::CalculateMetadataOverheadSize(region_size, block_shifts[i], (i != num_block_shifts - 1) ? block_shifts[i + 1] : 0);
const size_t cur_block_shift = block_shifts[i];
const size_t next_block_shift = (i != num_block_shifts - 1) ? block_shifts[i + 1] : 0;
overhead_size += KPageHeap::Block::CalculateMetadataOverheadSize(region_size, cur_block_shift, next_block_shift);
}
return util::AlignUp(overhead_size, PageSize);
}

View file

@ -22,6 +22,7 @@ namespace ams::kern {
KThread Kernel::s_main_threads[cpu::NumCores];
KThread Kernel::s_idle_threads[cpu::NumCores];
KResourceLimit Kernel::s_system_resource_limit;
KMemoryManager Kernel::s_memory_manager;
void Kernel::InitializeCoreLocalRegion(s32 core_id) {
/* Construct the core local region object in place. */

View file

@ -38,6 +38,12 @@ namespace ams::kern {
/* Initialize KSystemControl. */
KSystemControl::Initialize();
/* Initialize the memory manager. */
{
const auto &metadata_region = KMemoryLayout::GetMetadataPoolRegion();
Kernel::GetMemoryManager().Initialize(metadata_region.GetAddress(), metadata_region.GetSize());
}
/* Note: this is not actually done here, it's done later in main after more stuff is setup. */
/* However, for testing (and to manifest this code in the produced binary, this is here for now. */
/* TODO: Do this better. */