mirror of
https://github.com/Atmosphere-NX/Atmosphere
synced 2024-11-10 07:06:34 +00:00
kern: improve resource region size definitions/calculations
This commit is contained in:
parent
79201428b0
commit
1a262c1063
7 changed files with 52 additions and 47 deletions
|
@ -74,6 +74,10 @@ namespace ams::kern::arch::arm64::init {
|
||||||
static ALWAYS_INLINE void ClearNewPageTable(KPhysicalAddress address) {
|
static ALWAYS_INLINE void ClearNewPageTable(KPhysicalAddress address) {
|
||||||
ClearPhysicalMemory(address, PageSize);
|
ClearPhysicalMemory(address, PageSize);
|
||||||
}
|
}
|
||||||
|
public:
|
||||||
|
static consteval size_t GetMaximumOverheadSize(size_t size) {
|
||||||
|
return (util::DivideUp(size, L1BlockSize) + util::DivideUp(size, L2BlockSize)) * PageSize;
|
||||||
|
}
|
||||||
private:
|
private:
|
||||||
size_t NOINLINE GetBlockCount(KVirtualAddress virt_addr, size_t size, size_t block_size) {
|
size_t NOINLINE GetBlockCount(KVirtualAddress virt_addr, size_t size, size_t block_size) {
|
||||||
const KVirtualAddress end_virt_addr = virt_addr + size;
|
const KVirtualAddress end_virt_addr = virt_addr + size;
|
||||||
|
|
|
@ -20,7 +20,7 @@
|
||||||
namespace ams::kern {
|
namespace ams::kern {
|
||||||
|
|
||||||
constexpr u32 InitialProcessBinaryMagic = util::FourCC<'I','N','I','1'>::Code;
|
constexpr u32 InitialProcessBinaryMagic = util::FourCC<'I','N','I','1'>::Code;
|
||||||
constexpr size_t InitialProcessBinarySizeMax = 0xC00000;
|
constexpr size_t InitialProcessBinarySizeMax = 12_MB;
|
||||||
|
|
||||||
struct InitialProcessBinaryHeader {
|
struct InitialProcessBinaryHeader {
|
||||||
u32 magic;
|
u32 magic;
|
||||||
|
|
|
@ -39,6 +39,19 @@ namespace ams::kern {
|
||||||
constexpr size_t KernelPhysicalAddressSpaceLast = KernelPhysicalAddressSpaceEnd - 1ul;
|
constexpr size_t KernelPhysicalAddressSpaceLast = KernelPhysicalAddressSpaceEnd - 1ul;
|
||||||
constexpr size_t KernelPhysicalAddressSpaceSize = KernelPhysicalAddressSpaceEnd - KernelPhysicalAddressSpaceBase;
|
constexpr size_t KernelPhysicalAddressSpaceSize = KernelPhysicalAddressSpaceEnd - KernelPhysicalAddressSpaceBase;
|
||||||
|
|
||||||
|
constexpr size_t KernelPageTableHeapSize = init::KInitialPageTable::GetMaximumOverheadSize(8_GB);
|
||||||
|
constexpr size_t KernelInitialPageHeapSize = 128_KB;
|
||||||
|
|
||||||
|
constexpr size_t KernelSlabHeapDataSize = 5_MB;
|
||||||
|
constexpr size_t KernelSlabHeapGapsSize = 2_MB - 64_KB;
|
||||||
|
constexpr size_t KernelSlabHeapGapsSizeDeprecated = 2_MB;
|
||||||
|
constexpr size_t KernelSlabHeapSize = KernelSlabHeapDataSize + KernelSlabHeapGapsSize;
|
||||||
|
|
||||||
|
/* NOTE: This is calculated from KThread slab counts, assuming KThread size <= 0x860. */
|
||||||
|
constexpr size_t KernelSlabHeapAdditionalSize = 0x68000;
|
||||||
|
|
||||||
|
constexpr size_t KernelResourceSize = KernelPageTableHeapSize + KernelInitialPageHeapSize + KernelSlabHeapSize;
|
||||||
|
|
||||||
enum KMemoryRegionType : u32 {
|
enum KMemoryRegionType : u32 {
|
||||||
KMemoryRegionAttr_CarveoutProtected = 0x04000000,
|
KMemoryRegionAttr_CarveoutProtected = 0x04000000,
|
||||||
KMemoryRegionAttr_DidKernelMap = 0x08000000,
|
KMemoryRegionAttr_DidKernelMap = 0x08000000,
|
||||||
|
@ -544,6 +557,7 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
static void InitializeLinearMemoryRegionTrees(KPhysicalAddress aligned_linear_phys_start, KVirtualAddress linear_virtual_start);
|
static void InitializeLinearMemoryRegionTrees(KPhysicalAddress aligned_linear_phys_start, KVirtualAddress linear_virtual_start);
|
||||||
|
static size_t GetResourceRegionSizeForInit();
|
||||||
|
|
||||||
static NOINLINE auto GetKernelRegionExtents() { return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_Kernel); }
|
static NOINLINE auto GetKernelRegionExtents() { return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_Kernel); }
|
||||||
static NOINLINE auto GetKernelCodeRegionExtents() { return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelCode); }
|
static NOINLINE auto GetKernelCodeRegionExtents() { return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelCode); }
|
||||||
|
|
|
@ -71,8 +71,12 @@ namespace ams::kern::init {
|
||||||
|
|
||||||
constexpr size_t SlabCountExtraKThread = 160;
|
constexpr size_t SlabCountExtraKThread = 160;
|
||||||
|
|
||||||
/* This is used for gaps between the slab allocators. */
|
namespace test {
|
||||||
constexpr size_t SlabRegionReservedSize = 2_MB - 64_KB;
|
|
||||||
|
constexpr size_t RequiredSizeForExtraThreadCount = SlabCountExtraKThread * (sizeof(KThread) + (sizeof(KLinkedListNode) * 17) + (sizeof(KThreadLocalPage) / 8) + sizeof(KEventInfo));
|
||||||
|
static_assert(RequiredSizeForExtraThreadCount <= KernelSlabHeapAdditionalSize);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
/* Global to hold our resource counts. */
|
/* Global to hold our resource counts. */
|
||||||
KSlabResourceCounts g_slab_resource_counts = {
|
KSlabResourceCounts g_slab_resource_counts = {
|
||||||
|
@ -121,6 +125,10 @@ namespace ams::kern::init {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
size_t CalculateSlabHeapGapSize() {
|
||||||
|
return (kern::GetTargetFirmware() >= TargetFirmware_10_0_0) ? KernelSlabHeapGapsSize : KernelSlabHeapGapsSizeDeprecated;
|
||||||
|
}
|
||||||
|
|
||||||
size_t CalculateTotalSlabHeapSize() {
|
size_t CalculateTotalSlabHeapSize() {
|
||||||
size_t size = 0;
|
size_t size = 0;
|
||||||
|
|
||||||
|
@ -135,7 +143,7 @@ namespace ams::kern::init {
|
||||||
#undef ADD_SLAB_SIZE
|
#undef ADD_SLAB_SIZE
|
||||||
|
|
||||||
/* Add the reserved size. */
|
/* Add the reserved size. */
|
||||||
size += SlabRegionReservedSize;
|
size += CalculateSlabHeapGapSize();
|
||||||
|
|
||||||
return size;
|
return size;
|
||||||
}
|
}
|
||||||
|
@ -175,11 +183,12 @@ namespace ams::kern::init {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Create an array to represent the gaps between the slabs. */
|
/* Create an array to represent the gaps between the slabs. */
|
||||||
|
const size_t total_gap_size = CalculateSlabHeapGapSize();
|
||||||
size_t slab_gaps[util::size(slab_types)];
|
size_t slab_gaps[util::size(slab_types)];
|
||||||
for (size_t i = 0; i < util::size(slab_gaps); i++) {
|
for (size_t i = 0; i < util::size(slab_gaps); i++) {
|
||||||
/* Note: This is an off-by-one error from Nintendo's intention, because GenerateRandomRange is inclusive. */
|
/* Note: This is an off-by-one error from Nintendo's intention, because GenerateRandomRange is inclusive. */
|
||||||
/* However, Nintendo also has the off-by-one error, and it's "harmless", so we will include it ourselves. */
|
/* However, Nintendo also has the off-by-one error, and it's "harmless", so we will include it ourselves. */
|
||||||
slab_gaps[i] = KSystemControl::GenerateRandomRange(0, SlabRegionReservedSize);
|
slab_gaps[i] = KSystemControl::GenerateRandomRange(0, total_gap_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Sort the array, so that we can treat differences between values as offsets to the starts of slabs. */
|
/* Sort the array, so that we can treat differences between values as offsets to the starts of slabs. */
|
||||||
|
|
|
@ -168,6 +168,19 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
size_t KMemoryLayout::GetResourceRegionSizeForInit() {
|
||||||
|
/* Calculate resource region size based on whether we allow extra threads. */
|
||||||
|
const bool use_extra_resources = KSystemControl::Init::ShouldIncreaseThreadResourceLimit();
|
||||||
|
size_t resource_region_size = KernelResourceSize + (use_extra_resources ? KernelSlabHeapAdditionalSize : 0);
|
||||||
|
|
||||||
|
/* 10.0.0 reduced the slab heap gaps by 64K. */
|
||||||
|
if (kern::GetTargetFirmware() < ams::TargetFirmware_10_0_0) {
|
||||||
|
resource_region_size += (KernelSlabHeapGapsSizeDeprecated - KernelSlabHeapGapsSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
return resource_region_size;
|
||||||
|
}
|
||||||
|
|
||||||
namespace init {
|
namespace init {
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
|
@ -31,31 +31,12 @@ namespace ams::kern::init {
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
constexpr size_t KernelResourceRegionSize = 0x1728000;
|
|
||||||
constexpr size_t ExtraKernelResourceSize = 0x68000;
|
|
||||||
static_assert(ExtraKernelResourceSize + KernelResourceRegionSize == 0x1790000);
|
|
||||||
constexpr size_t KernelResourceReduction_10_0_0 = 0x10000;
|
|
||||||
|
|
||||||
/* Global Allocator. */
|
/* Global Allocator. */
|
||||||
KInitialPageAllocator g_initial_page_allocator;
|
KInitialPageAllocator g_initial_page_allocator;
|
||||||
|
|
||||||
/* Global initial arguments array. */
|
/* Global initial arguments array. */
|
||||||
KPhysicalAddress g_init_arguments_phys_addr[cpu::NumCores];
|
KPhysicalAddress g_init_arguments_phys_addr[cpu::NumCores];
|
||||||
|
|
||||||
size_t GetResourceRegionSize() {
|
|
||||||
/* Decide if Kernel should have enlarged resource region. */
|
|
||||||
const bool use_extra_resources = KSystemControl::Init::ShouldIncreaseThreadResourceLimit();
|
|
||||||
size_t resource_region_size = KernelResourceRegionSize + (use_extra_resources ? ExtraKernelResourceSize : 0);
|
|
||||||
static_assert(KernelResourceRegionSize > InitialProcessBinarySizeMax);
|
|
||||||
static_assert(KernelResourceRegionSize + ExtraKernelResourceSize > InitialProcessBinarySizeMax);
|
|
||||||
|
|
||||||
/* 10.0.0 reduced the kernel resource region size by 64K. */
|
|
||||||
if (kern::GetTargetFirmware() >= ams::TargetFirmware_10_0_0) {
|
|
||||||
resource_region_size -= KernelResourceReduction_10_0_0;
|
|
||||||
}
|
|
||||||
return resource_region_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Page table attributes. */
|
/* Page table attributes. */
|
||||||
constexpr PageTableEntry KernelRoDataAttribute(PageTableEntry::Permission_KernelR, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
|
constexpr PageTableEntry KernelRoDataAttribute(PageTableEntry::Permission_KernelR, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
|
||||||
constexpr PageTableEntry KernelRwDataAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
|
constexpr PageTableEntry KernelRwDataAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
|
||||||
|
@ -152,8 +133,8 @@ namespace ams::kern::init {
|
||||||
const KVirtualAddress stack_region_start = KMemoryLayout::GetVirtualMemoryRegionTree().GetRandomAlignedRegion(StackRegionSize, StackRegionAlign, KMemoryRegionType_Kernel);
|
const KVirtualAddress stack_region_start = KMemoryLayout::GetVirtualMemoryRegionTree().GetRandomAlignedRegion(StackRegionSize, StackRegionAlign, KMemoryRegionType_Kernel);
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(stack_region_start), StackRegionSize, KMemoryRegionType_KernelStack));
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(stack_region_start), StackRegionSize, KMemoryRegionType_KernelStack));
|
||||||
|
|
||||||
/* Decide if Kernel should have enlarged resource region (slab region + page table heap region). */
|
/* Determine the size of the resource region. */
|
||||||
const size_t resource_region_size = GetResourceRegionSize();
|
const size_t resource_region_size = KMemoryLayout::GetResourceRegionSizeForInit();
|
||||||
|
|
||||||
/* Determine the size of the slab region. */
|
/* Determine the size of the slab region. */
|
||||||
const size_t slab_region_size = util::AlignUp(CalculateTotalSlabHeapSize(), PageSize);
|
const size_t slab_region_size = util::AlignUp(CalculateTotalSlabHeapSize(), PageSize);
|
||||||
|
|
|
@ -28,32 +28,16 @@ namespace ams::kern::init::loader {
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
constexpr size_t KernelResourceRegionSize = 0x1728000;
|
static_assert(InitialProcessBinarySizeMax <= KernelResourceSize);
|
||||||
constexpr size_t ExtraKernelResourceSize = 0x68000;
|
|
||||||
static_assert(ExtraKernelResourceSize + KernelResourceRegionSize == 0x1790000);
|
|
||||||
constexpr size_t KernelResourceReduction_10_0_0 = 0x10000;
|
|
||||||
|
|
||||||
constexpr size_t InitialPageTableRegionSize = 0x200000;
|
constexpr size_t InitialPageTableRegionSizeMax = 2_MB;
|
||||||
|
static_assert(InitialPageTableRegionSizeMax < KernelPageTableHeapSize + KernelInitialPageHeapSize);
|
||||||
|
|
||||||
/* Global Allocator. */
|
/* Global Allocator. */
|
||||||
KInitialPageAllocator g_initial_page_allocator;
|
KInitialPageAllocator g_initial_page_allocator;
|
||||||
|
|
||||||
KInitialPageAllocator::State g_final_page_allocator_state;
|
KInitialPageAllocator::State g_final_page_allocator_state;
|
||||||
|
|
||||||
size_t GetResourceRegionSize() {
|
|
||||||
/* Decide if Kernel should have enlarged resource region. */
|
|
||||||
const bool use_extra_resources = KSystemControl::Init::ShouldIncreaseThreadResourceLimit();
|
|
||||||
size_t resource_region_size = KernelResourceRegionSize + (use_extra_resources ? ExtraKernelResourceSize : 0);
|
|
||||||
static_assert(KernelResourceRegionSize > InitialProcessBinarySizeMax);
|
|
||||||
static_assert(KernelResourceRegionSize + ExtraKernelResourceSize > InitialProcessBinarySizeMax);
|
|
||||||
|
|
||||||
/* 10.0.0 reduced the kernel resource region size by 64K. */
|
|
||||||
if (kern::GetTargetFirmware() >= ams::TargetFirmware_10_0_0) {
|
|
||||||
resource_region_size -= KernelResourceReduction_10_0_0;
|
|
||||||
}
|
|
||||||
return resource_region_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
void RelocateKernelPhysically(uintptr_t &base_address, KernelLayout *&layout) {
|
void RelocateKernelPhysically(uintptr_t &base_address, KernelLayout *&layout) {
|
||||||
KPhysicalAddress correct_base = KSystemControl::Init::GetKernelPhysicalBaseAddress(base_address);
|
KPhysicalAddress correct_base = KSystemControl::Init::GetKernelPhysicalBaseAddress(base_address);
|
||||||
if (correct_base != base_address) {
|
if (correct_base != base_address) {
|
||||||
|
@ -276,7 +260,7 @@ namespace ams::kern::init::loader {
|
||||||
const uintptr_t init_array_end_offset = layout->init_array_end_offset;
|
const uintptr_t init_array_end_offset = layout->init_array_end_offset;
|
||||||
|
|
||||||
/* Determine the size of the resource region. */
|
/* Determine the size of the resource region. */
|
||||||
const size_t resource_region_size = GetResourceRegionSize();
|
const size_t resource_region_size = KMemoryLayout::GetResourceRegionSizeForInit();
|
||||||
|
|
||||||
/* Setup the INI1 header in memory for the kernel. */
|
/* Setup the INI1 header in memory for the kernel. */
|
||||||
const uintptr_t ini_end_address = base_address + ini_load_offset + resource_region_size;
|
const uintptr_t ini_end_address = base_address + ini_load_offset + resource_region_size;
|
||||||
|
@ -300,7 +284,7 @@ namespace ams::kern::init::loader {
|
||||||
KInitialPageTable ttbr1_table(g_initial_page_allocator.Allocate());
|
KInitialPageTable ttbr1_table(g_initial_page_allocator.Allocate());
|
||||||
|
|
||||||
/* Setup initial identity mapping. TTBR1 table passed by reference. */
|
/* Setup initial identity mapping. TTBR1 table passed by reference. */
|
||||||
SetupInitialIdentityMapping(ttbr1_table, base_address, bss_end_offset, ini_end_address, InitialPageTableRegionSize, g_initial_page_allocator);
|
SetupInitialIdentityMapping(ttbr1_table, base_address, bss_end_offset, ini_end_address, InitialPageTableRegionSizeMax, g_initial_page_allocator);
|
||||||
|
|
||||||
/* Generate a random slide for the kernel's base address. */
|
/* Generate a random slide for the kernel's base address. */
|
||||||
const KVirtualAddress virtual_base_address = GetRandomKernelBaseAddress(ttbr1_table, base_address, bss_end_offset);
|
const KVirtualAddress virtual_base_address = GetRandomKernelBaseAddress(ttbr1_table, base_address, bss_end_offset);
|
||||||
|
|
Loading…
Reference in a new issue