kern: fully implement slabheap init

This commit is contained in:
Michael Scire 2020-01-30 01:41:59 -08:00
parent 7d6b16d7fb
commit d5a4c17ee7
12 changed files with 319 additions and 13 deletions

View file

@ -32,6 +32,7 @@
/* Initialization headers. */
#include "mesosphere/init/kern_init_elf.hpp"
#include "mesosphere/init/kern_init_layout.hpp"
#include "mesosphere/init/kern_init_slab_setup.hpp"
#include "mesosphere/init/kern_init_page_table_select.hpp"
#include "mesosphere/init/kern_init_arguments_select.hpp"
#include "mesosphere/kern_k_memory_layout.hpp"

View file

@ -30,21 +30,21 @@ namespace ams::kern::arm64 {
__asm__ __volatile__(
" prfm pstl1keep, %[packed_tickets]\n"
"loop1:\n"
"1:\n"
" ldaxr %w[tmp0], %[packed_tickets]\n"
" add %w[tmp0], %w[tmp0], #0x10000\n"
" stxr %w[tmp1], %w[tmp0], %[packed_tickets]\n"
" cbnz %w[tmp1], loop1\n"
" cbnz %w[tmp1], 1b\n"
" \n"
" and %w[tmp1], %w[tmp0], #0xFFFF\n"
" cmp %w[tmp1], %w[tmp0], lsr #16\n"
" b.eq done"
" sevl\n"
"loop2:\n"
"2:\n"
" wfe\n"
" ldaxrh %w[tmp1], %[packed_tickets]\n"
" cmp %w[tmp1], %w[tmp0], lsr #16\n"
" b.ne loop2\n"
" b.ne 2b\n"
"done:\n"
: [tmp0]"=&r"(tmp0), [tmp1]"=&r"(tmp1), [packed_tickets]"+Q"(this->packed_tickets)
:
@ -76,18 +76,18 @@ namespace ams::kern::arm64 {
__asm__ __volatile__(
" prfm pstl1keep, %[next_ticket]\n"
"loop1:\n"
"1:\n"
" ldaxrh %w[tmp0], %[next_ticket]\n"
" add %w[tmp1], %w[tmp0], #0x1\n"
" stxrh %w[got_lock], %w[tmp1], %[next_ticket]\n"
" cbnz %w[got_lock], loop1\n"
" cbnz %w[got_lock], 1b\n"
" \n"
" sevl\n"
"loop2:\n"
"2:\n"
" wfe\n"
" ldaxrh %w[tmp1], %[current_ticket]\n"
" cmp %w[tmp1], %w[tmp0]\n"
" b.ne loop2\n"
" b.ne 2b\n"
: [tmp0]"=&r"(tmp0), [tmp1]"=&r"(tmp1), [got_lock]"=&r"(got_lock), [next_ticket]"+Q"(this->next_ticket)
: [current_ticket]"Q"(this->current_ticket)
: "cc", "memory"

View file

@ -36,6 +36,10 @@ namespace ams::kern {
static u64 GenerateRandomRange(u64 min, u64 max);
};
public:
/* Randomness. */
static void GenerateRandomBytes(void *dst, size_t size);
static u64 GenerateRandomRange(u64 min, u64 max);
/* Panic. */
static NORETURN void StopSystem();
};

View file

@ -0,0 +1,45 @@
/*
* Copyright (c) 2018-2020 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <vapours.hpp>
#include <mesosphere/kern_k_slab_heap.hpp>
namespace ams::kern::init {
struct KSlabResourceCounts {
size_t num_KProcess;
size_t num_KThread;
size_t num_KEvent;
size_t num_KInterruptEvent;
size_t num_KPort;
size_t num_KSharedMemory;
size_t num_KTransferMemory;
size_t num_KCodeMemory;
size_t num_KDeviceAddressSpace;
size_t num_KSession;
size_t num_KLightSession;
size_t num_KObjectName;
size_t num_KResourceLimit;
size_t num_KDebug;
};
NOINLINE void InitializeSlabResourceCounts();
const KSlabResourceCounts &GetSlabResourceCounts();
size_t CalculateTotalSlabHeapSize();
NOINLINE void InitializeSlabHeaps();
}

View file

@ -409,8 +409,12 @@ namespace ams::kern {
return GetVirtualMemoryBlockTree().FindFirstBlockByTypeAttr(KMemoryRegionType_KernelMiscExceptionStack, static_cast<u32>(core_id))->GetAddress();
}
static NOINLINE KVirtualAddress GetSlabRegionAddress() {
return GetVirtualMemoryBlockTree().FindFirstBlockByType(KMemoryRegionType_KernelSlab)->GetAddress();
}
static NOINLINE KVirtualAddress GetCoreLocalRegionAddress() {
return GetVirtualMemoryBlockTree().FindFirstBlockByTypeAttr(KMemoryRegionType_CoreLocal)->GetAddress();
return GetVirtualMemoryBlockTree().FindFirstBlockByType(KMemoryRegionType_CoreLocal)->GetAddress();
}
static void InitializeLinearMemoryBlockTrees(KPhysicalAddress aligned_linear_phys_start, KVirtualAddress linear_virtual_start);

View file

@ -45,6 +45,8 @@ namespace ams::kern {
ALWAYS_INLINE ~KScopedSpinLock() {
this->lock_ptr->Unlock();
}
explicit ALWAYS_INLINE KScopedSpinLock(KSpinLock &l) : KScopedSpinLock(std::addressof(l)) { /* ... */ }
};
class KScopedAlignedSpinLock {
@ -57,6 +59,7 @@ namespace ams::kern {
ALWAYS_INLINE ~KScopedAlignedSpinLock() {
this->lock_ptr->Unlock();
}
explicit ALWAYS_INLINE KScopedAlignedSpinLock(KAlignedSpinLock &l) : KScopedAlignedSpinLock(std::addressof(l)) { /* ... */ }
};
class KScopedNotAlignedSpinLock {
@ -69,6 +72,8 @@ namespace ams::kern {
ALWAYS_INLINE ~KScopedNotAlignedSpinLock() {
this->lock_ptr->Unlock();
}
explicit ALWAYS_INLINE KScopedNotAlignedSpinLock(KNotAlignedSpinLock &l) : KScopedNotAlignedSpinLock(std::addressof(l)) { /* ... */ }
};
}

View file

@ -20,6 +20,12 @@ namespace ams::kern {
namespace {
/* Global variables for randomness. */
/* Incredibly, N really does use std:: randomness... */
bool g_initialized_random_generator;
std::mt19937 g_random_generator;
KSpinLock g_random_lock;
ALWAYS_INLINE size_t GetRealMemorySizeForInit() {
/* TODO: Move this into a header for the MC in general. */
constexpr u32 MemoryControllerConfigurationRegister = 0x70019050;
@ -154,6 +160,26 @@ namespace ams::kern {
}
}
/* Randomness. */
void KSystemControl::GenerateRandomBytes(void *dst, size_t size) {
MESOSPHERE_INIT_ABORT_UNLESS(size <= 0x38);
smc::GenerateRandomBytes(dst, size);
}
u64 KSystemControl::GenerateRandomRange(u64 min, u64 max) {
KScopedInterruptDisable intr_disable;
KScopedSpinLock lk(g_random_lock);
if (AMS_UNLIKELY(!g_initialized_random_generator)) {
u64 seed;
GenerateRandomBytes(&seed, sizeof(seed));
g_random_generator.seed(seed);
g_initialized_random_generator = true;
}
return (std::uniform_int_distribution<u64>(min, max))(g_random_generator);
}
void KSystemControl::StopSystem() {
/* Display a panic screen via exosphere. */
smc::Panic(0xF00);

View file

@ -55,7 +55,9 @@ namespace ams::kern::smc {
:
: "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "cc", "memory"
);
/* TODO: Restore X18 */
/* Restore the CoreLocalRegion into X18. */
cpu::SetCoreLocalRegionAddress(cpu::GetTpidrEl1());
}
/* Store arguments to output. */
@ -98,6 +100,9 @@ namespace ams::kern::smc {
args.x[7] = x7;
}
/* Global lock for generate random bytes. */
KSpinLock g_generate_random_lock;
}
/* SMC functionality needed for init. */
@ -119,9 +124,9 @@ namespace ams::kern::smc {
void GenerateRandomBytes(void *dst, size_t size) {
/* Call SmcGenerateRandomBytes() */
/* TODO: Lock this to ensure only one core calls at once. */
SecureMonitorArguments args = { FunctionId_GenerateRandomBytes, size };
MESOSPHERE_ABORT_UNLESS(size <= sizeof(args) - sizeof(args.x[0]));
CallPrivilegedSecureMonitorFunctionForInit(args);
MESOSPHERE_ABORT_UNLESS((static_cast<SmcResult>(args.x[0]) == SmcResult::Success));
@ -138,6 +143,24 @@ namespace ams::kern::smc {
}
void GenerateRandomBytes(void *dst, size_t size) {
/* Setup for call. */
SecureMonitorArguments args = { FunctionId_GenerateRandomBytes, size };
MESOSPHERE_ABORT_UNLESS(size <= sizeof(args) - sizeof(args.x[0]));
/* Make call. */
{
KScopedInterruptDisable intr_disable;
KScopedSpinLock lk(g_generate_random_lock);
CallPrivilegedSecureMonitorFunction(args);
}
MESOSPHERE_ABORT_UNLESS((static_cast<SmcResult>(args.x[0]) == SmcResult::Success));
/* Copy output. */
std::memcpy(dst, &args.x[1], size);
}
void NORETURN Panic(u32 color) {
SecureMonitorArguments args = { FunctionId_Panic, color };
CallPrivilegedSecureMonitorFunction(args);

View file

@ -84,6 +84,7 @@ namespace ams::kern::smc {
};
/* TODO: Rest of Secure Monitor API. */
void GenerateRandomBytes(void *dst, size_t size);
void NORETURN Panic(u32 color);
namespace init {

View file

@ -0,0 +1,190 @@
/*
* Copyright (c) 2018-2020 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern::init {
#define SLAB_COUNT(CLASS) g_slab_resource_counts.num_##CLASS
#define FOREACH_SLAB_TYPE(HANDLER, ...) \
HANDLER(KProcess, (SLAB_COUNT(KProcess)), ## __VA_ARGS__) \
HANDLER(KThread, (SLAB_COUNT(KThread)), ## __VA_ARGS__) \
HANDLER(KLinkedListNode, (SLAB_COUNT(KThread) * 17), ## __VA_ARGS__) \
HANDLER(KEvent, (SLAB_COUNT(KEvent)), ## __VA_ARGS__) \
HANDLER(KInterruptEvent, (SLAB_COUNT(KInterruptEvent)), ## __VA_ARGS__) \
HANDLER(KInterruptEventTask, (SLAB_COUNT(KInterruptEvent)), ## __VA_ARGS__) \
HANDLER(KPort, (SLAB_COUNT(KPort)), ## __VA_ARGS__) \
HANDLER(KSharedMemory, (SLAB_COUNT(KSharedMemory)), ## __VA_ARGS__) \
HANDLER(KSharedMemoryInfo, (SLAB_COUNT(KSharedMemory) * 8), ## __VA_ARGS__) \
HANDLER(KTransferMemory, (SLAB_COUNT(KTransferMemory)), ## __VA_ARGS__) \
HANDLER(KCodeMemory, (SLAB_COUNT(KCodeMemory)), ## __VA_ARGS__) \
HANDLER(KDeviceAddressSpace, (SLAB_COUNT(KDeviceAddressSpace)), ## __VA_ARGS__) \
HANDLER(KSession, (SLAB_COUNT(KSession)), ## __VA_ARGS__) \
HANDLER(KSessionRequest, (SLAB_COUNT(KSession) * 2), ## __VA_ARGS__) \
HANDLER(KLightSession, (SLAB_COUNT(KLightSession)), ## __VA_ARGS__) \
HANDLER(KThreadLocalPage, (SLAB_COUNT(KProcess) + (SLAB_COUNT(KProcess) + SLAB_COUNT(KThread)) / 8), ## __VA_ARGS__) \
HANDLER(KObjectName, (SLAB_COUNT(KObjectName)), ## __VA_ARGS__) \
HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ## __VA_ARGS__) \
HANDLER(KEventInfo, (SLAB_COUNT(KThread) + SLAB_COUNT(KDebug)), ## __VA_ARGS__) \
HANDLER(KDebug, (SLAB_COUNT(KDebug)), ## __VA_ARGS__)
namespace {
#define DEFINE_SLAB_TYPE_ENUM_MEMBER(NAME, COUNT, ...) KSlabType_##NAME,
enum KSlabType : u32 {
FOREACH_SLAB_TYPE(DEFINE_SLAB_TYPE_ENUM_MEMBER)
KSlabType_Count,
};
#undef DEFINE_SLAB_TYPE_ENUM_MEMBER
/* Constexpr counts. */
constexpr size_t SlabCountKProcess = 80;
constexpr size_t SlabCountKThread = 800;
constexpr size_t SlabCountKEvent = 700;
constexpr size_t SlabCountKInterruptEvent = 100;
constexpr size_t SlabCountKPort = 256;
constexpr size_t SlabCountKSharedMemory = 80;
constexpr size_t SlabCountKTransferMemory = 200;
constexpr size_t SlabCountKCodeMemory = 10;
constexpr size_t SlabCountKDeviceAddressSpace = 300;
constexpr size_t SlabCountKSession = 900;
constexpr size_t SlabCountKLightSession = 100;
constexpr size_t SlabCountKObjectName = 7;
constexpr size_t SlabCountKResourceLimit = 5;
constexpr size_t SlabCountKDebug = cpu::NumCores;
constexpr size_t SlabCountExtraKThread = 160;
/* This is used for gaps between the slab allocators. */
constexpr size_t SlabRegionReservedSize = 2_MB;
/* Global to hold our resource counts. */
KSlabResourceCounts g_slab_resource_counts = {
.num_KProcess = SlabCountKProcess,
.num_KThread = SlabCountKThread,
.num_KEvent = SlabCountKEvent,
.num_KInterruptEvent = SlabCountKInterruptEvent,
.num_KPort = SlabCountKPort,
.num_KSharedMemory = SlabCountKSharedMemory,
.num_KTransferMemory = SlabCountKTransferMemory,
.num_KCodeMemory = SlabCountKCodeMemory,
.num_KDeviceAddressSpace = SlabCountKDeviceAddressSpace,
.num_KSession = SlabCountKSession,
.num_KLightSession = SlabCountKLightSession,
.num_KObjectName = SlabCountKObjectName,
.num_KResourceLimit = SlabCountKResourceLimit,
.num_KDebug = SlabCountKDebug,
};
template<typename T>
NOINLINE KVirtualAddress InitializeSlabHeap(KVirtualAddress address, size_t num_objects) {
const size_t size = util::AlignUp(sizeof(T) * num_objects, alignof(void *));
KVirtualAddress start = util::AlignUp(GetInteger(address), alignof(T));
if (size > 0) {
MESOSPHERE_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryBlockTree().FindContainingBlock(GetInteger(start) + size - 1)->IsDerivedFrom(KMemoryRegionType_KernelSlab));
T::InitializeSlabHeap(GetVoidPointer(start), size);
}
return start + size;
}
}
const KSlabResourceCounts &GetSlabResourceCounts() {
return g_slab_resource_counts;
}
void InitializeSlabResourceCounts() {
/* Note: Nintendo initializes all fields here, but we initialize all constants at compile-time. */
if (KSystemControl::Init::ShouldIncreaseThreadResourceLimit()) {
g_slab_resource_counts.num_KThread += SlabCountExtraKThread;
}
}
size_t CalculateTotalSlabHeapSize() {
size_t size = 0;
#define ADD_SLAB_SIZE(NAME, COUNT, ...) ({ \
size += alignof(NAME); \
size += util::AlignUp(sizeof(NAME) * (COUNT), alignof(void *)); \
});
/* NOTE: This can't be used right now because we don't have all these types implemented. */
/* Once we do, uncomment the following and stop using the hardcoded size. */
/* TODO: FOREACH_SLAB_TYPE(ADD_SLAB_SIZE) */
size = 0x647000;
return size;
}
void InitializeSlabHeaps() {
/* Get the start of the slab region, since that's where we'll be working. */
KVirtualAddress address = KMemoryLayout::GetSlabRegionAddress();
/* Initialize slab type array to be in sorted order. */
KSlabType slab_types[KSlabType_Count];
for (size_t i = 0; i < util::size(slab_types); i++) { slab_types[i] = static_cast<KSlabType>(i); }
/* N shuffles the slab type array with the following simple algorithm. */
for (size_t i = 0; i < util::size(slab_types); i++) {
const size_t rnd = KSystemControl::GenerateRandomRange(0, util::size(slab_types));
std::swap(slab_types[i], slab_types[rnd]);
}
/* Create an array to represent the gaps between the slabs. */
size_t slab_gaps[util::size(slab_types)];
for (size_t i = 0; i < util::size(slab_gaps); i++) {
/* Note: This is an off-by-one error from Nintendo's intention, because GenerateRandomRange is inclusive. */
/* However, Nintendo also has the off-by-one error, and it's "harmless", so we will include it ourselves. */
slab_gaps[i] = KSystemControl::GenerateRandomRange(0, SlabRegionReservedSize);
}
/* Sort the array, so that we can treat differences between values as offsets to the starts of slabs. */
for (size_t i = 1; i < util::size(slab_gaps); i++) {
for (size_t j = i; j > 0 && slab_gaps[j-1] > slab_gaps[j]; j--) {
std::swap(slab_gaps[j], slab_gaps[j-1]);
}
}
for (size_t i = 0; i < util::size(slab_types); i++) {
/* Add the random gap to the address. */
address += (i == 0) ? slab_gaps[0] : slab_gaps[i] - slab_gaps[i - 1];
#define INITIALIZE_SLAB_HEAP(NAME, COUNT, ...) \
case KSlabType_##NAME: \
address = InitializeSlabHeap<NAME>(address, COUNT); \
break;
/* Initialize the slabheap. */
switch (slab_types[i]) {
/* NOTE: This can't be used right now because we don't have all these types implemented. */
/* Once we do, uncomment the following. */
/* TODO: FOREACH_SLAB_TYPE(INITIALIZE_SLAB_HEAP) */
case KSlabType_KThread:
address = InitializeSlabHeap<KThread>(address, SLAB_COUNT(KThread));
break;
default:
MESOSPHERE_ABORT();
}
}
}
}

View file

@ -34,6 +34,13 @@ namespace ams::kern {
cpu::SynchronizeAllCores();
}
if (core_id == 0) {
/* Note: this is not actually done here, it's done later in main after more stuff is setup. */
/* However, for testing (and to manifest this code in the produced binary, this is here for now. */
/* TODO: Do this better. */
init::InitializeSlabHeaps();
}
/* TODO: Implement more of Main() */
while (true) { /* ... */ }
}

View file

@ -96,7 +96,7 @@ namespace ams::kern::init {
KInitialPageTable ttbr1_table(util::AlignDown(cpu::GetTtbr1El1(), PageSize), KInitialPageTable::NoClear{});
/* Initialize the slab allocator counts. */
/* TODO */
InitializeSlabResourceCounts();
/* Insert the root block for the virtual memory tree, from which all other blocks will derive. */
KMemoryLayout::GetVirtualMemoryBlockTree().insert(*KMemoryLayout::GetMemoryBlockAllocator().Create(KernelVirtualAddressSpaceBase, KernelVirtualAddressSpaceSize, 0, 0));
@ -142,7 +142,7 @@ namespace ams::kern::init {
const size_t resource_region_size = KernelResourceRegionSize + (use_extra_resources ? ExtraKernelResourceSize : 0);
/* Determine the size of the slab region. */
const size_t slab_region_size = 0x647000; /* TODO: Calculate this on the fly. */
const size_t slab_region_size = CalculateTotalSlabHeapSize();
MESOSPHERE_INIT_ABORT_UNLESS(slab_region_size <= resource_region_size);
/* Setup the slab region. */