mirror of
https://github.com/Atmosphere-NX/Atmosphere
synced 2024-12-22 20:31:14 +00:00
kern: move SecureAppletMemory/KPageBuffer heap into the ResourceRegion
This commit is contained in:
parent
ea82889e6c
commit
5a918f3bc9
21 changed files with 282 additions and 100 deletions
|
@ -20,6 +20,9 @@
|
||||||
namespace ams::kern::board::nintendo::nx {
|
namespace ams::kern::board::nintendo::nx {
|
||||||
|
|
||||||
class KSystemControl : public KSystemControlBase {
|
class KSystemControl : public KSystemControlBase {
|
||||||
|
public:
|
||||||
|
/* This can be overridden as needed. */
|
||||||
|
static constexpr size_t SecureAppletMemorySize = 4_MB;
|
||||||
public:
|
public:
|
||||||
class Init : public KSystemControlBase::Init {
|
class Init : public KSystemControlBase::Init {
|
||||||
public:
|
public:
|
||||||
|
|
|
@ -42,7 +42,6 @@ namespace ams::kern::init {
|
||||||
const KSlabResourceCounts &GetSlabResourceCounts();
|
const KSlabResourceCounts &GetSlabResourceCounts();
|
||||||
|
|
||||||
size_t CalculateTotalSlabHeapSize();
|
size_t CalculateTotalSlabHeapSize();
|
||||||
NOINLINE void InitializeKPageBufferSlabHeap();
|
|
||||||
NOINLINE void InitializeSlabHeaps();
|
NOINLINE void InitializeSlabHeaps();
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,30 +38,37 @@ namespace ams::kern {
|
||||||
size_t m_peak;
|
size_t m_peak;
|
||||||
size_t m_count;
|
size_t m_count;
|
||||||
KVirtualAddress m_address;
|
KVirtualAddress m_address;
|
||||||
|
KVirtualAddress m_aligned_address;
|
||||||
size_t m_size;
|
size_t m_size;
|
||||||
public:
|
public:
|
||||||
KDynamicPageManager() : m_lock(), m_page_bitmap(), m_used(), m_peak(), m_count(), m_address(Null<KVirtualAddress>), m_size() { /* ... */ }
|
KDynamicPageManager() : m_lock(), m_page_bitmap(), m_used(), m_peak(), m_count(), m_address(Null<KVirtualAddress>), m_aligned_address(Null<KVirtualAddress>), m_size() { /* ... */ }
|
||||||
|
|
||||||
Result Initialize(KVirtualAddress memory, size_t sz) {
|
Result Initialize(KVirtualAddress memory, size_t size, size_t align) {
|
||||||
/* We need to have positive size. */
|
/* We need to have positive size. */
|
||||||
R_UNLESS(sz > 0, svc::ResultOutOfMemory());
|
R_UNLESS(size > 0, svc::ResultOutOfMemory());
|
||||||
|
|
||||||
/* Calculate management overhead. */
|
/* Set addresses. */
|
||||||
const size_t management_size = KPageBitmap::CalculateManagementOverheadSize(sz / sizeof(PageBuffer));
|
m_address = memory;
|
||||||
const size_t allocatable_size = sz - management_size;
|
m_aligned_address = util::AlignDown(GetInteger(memory), align);
|
||||||
|
|
||||||
|
/* Calculate extents. */
|
||||||
|
const size_t managed_size = m_address + size - m_aligned_address;
|
||||||
|
const size_t overhead_size = util::AlignUp(KPageBitmap::CalculateManagementOverheadSize(managed_size / sizeof(PageBuffer)), sizeof(PageBuffer));
|
||||||
|
R_UNLESS(overhead_size < size, svc::ResultOutOfMemory());
|
||||||
|
|
||||||
/* Set tracking fields. */
|
/* Set tracking fields. */
|
||||||
m_address = memory;
|
m_size = util::AlignDown(size - overhead_size, sizeof(PageBuffer));
|
||||||
m_size = util::AlignDown(allocatable_size, sizeof(PageBuffer));
|
m_count = m_size / sizeof(PageBuffer);
|
||||||
m_count = allocatable_size / sizeof(PageBuffer);
|
|
||||||
R_UNLESS(m_count > 0, svc::ResultOutOfMemory());
|
|
||||||
|
|
||||||
/* Clear the management region. */
|
/* Clear the management region. */
|
||||||
u64 *management_ptr = GetPointer<u64>(m_address + allocatable_size);
|
u64 *management_ptr = GetPointer<u64>(m_address + size - overhead_size);
|
||||||
std::memset(management_ptr, 0, management_size);
|
std::memset(management_ptr, 0, overhead_size);
|
||||||
|
|
||||||
/* Initialize the bitmap. */
|
/* Initialize the bitmap. */
|
||||||
m_page_bitmap.Initialize(management_ptr, m_count);
|
const size_t allocatable_region_size = (GetInteger(m_address) + size - overhead_size) - GetInteger(m_aligned_address);
|
||||||
|
MESOSPHERE_ABORT_UNLESS(allocatable_region_size >= sizeof(PageBuffer));
|
||||||
|
|
||||||
|
m_page_bitmap.Initialize(management_ptr, allocatable_region_size / sizeof(PageBuffer));
|
||||||
|
|
||||||
/* Free the pages to the bitmap. */
|
/* Free the pages to the bitmap. */
|
||||||
for (size_t i = 0; i < m_count; i++) {
|
for (size_t i = 0; i < m_count; i++) {
|
||||||
|
@ -69,7 +76,7 @@ namespace ams::kern {
|
||||||
cpu::ClearPageToZero(GetPointer<PageBuffer>(m_address) + i);
|
cpu::ClearPageToZero(GetPointer<PageBuffer>(m_address) + i);
|
||||||
|
|
||||||
/* Set the bit for the free page. */
|
/* Set the bit for the free page. */
|
||||||
m_page_bitmap.SetBit(i);
|
m_page_bitmap.SetBit((GetInteger(m_address) + (i * sizeof(PageBuffer)) - GetInteger(m_aligned_address)) / sizeof(PageBuffer));
|
||||||
}
|
}
|
||||||
|
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
|
@ -98,7 +105,28 @@ namespace ams::kern {
|
||||||
m_page_bitmap.ClearBit(offset);
|
m_page_bitmap.ClearBit(offset);
|
||||||
m_peak = std::max(m_peak, (++m_used));
|
m_peak = std::max(m_peak, (++m_used));
|
||||||
|
|
||||||
return GetPointer<PageBuffer>(m_address) + offset;
|
return GetPointer<PageBuffer>(m_aligned_address) + offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
PageBuffer *Allocate(size_t count) {
|
||||||
|
/* Take the lock. */
|
||||||
|
KScopedInterruptDisable di;
|
||||||
|
KScopedSpinLock lk(m_lock);
|
||||||
|
|
||||||
|
/* Find a random free block. */
|
||||||
|
ssize_t soffset = m_page_bitmap.FindFreeRange(count);
|
||||||
|
if (AMS_UNLIKELY(soffset < 0)) {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
const size_t offset = static_cast<size_t>(soffset);
|
||||||
|
|
||||||
|
/* Update our tracking. */
|
||||||
|
m_page_bitmap.ClearRange(offset, count);
|
||||||
|
m_used += count;
|
||||||
|
m_peak = std::max(m_peak, m_used);
|
||||||
|
|
||||||
|
return GetPointer<PageBuffer>(m_aligned_address) + offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Free(PageBuffer *pb) {
|
void Free(PageBuffer *pb) {
|
||||||
|
@ -110,7 +138,7 @@ namespace ams::kern {
|
||||||
KScopedSpinLock lk(m_lock);
|
KScopedSpinLock lk(m_lock);
|
||||||
|
|
||||||
/* Set the bit for the free page. */
|
/* Set the bit for the free page. */
|
||||||
size_t offset = (reinterpret_cast<uintptr_t>(pb) - GetInteger(m_address)) / sizeof(PageBuffer);
|
size_t offset = (reinterpret_cast<uintptr_t>(pb) - GetInteger(m_aligned_address)) / sizeof(PageBuffer);
|
||||||
m_page_bitmap.SetBit(offset);
|
m_page_bitmap.SetBit(offset);
|
||||||
|
|
||||||
/* Decrement our used count. */
|
/* Decrement our used count. */
|
||||||
|
|
|
@ -43,6 +43,7 @@ namespace ams::kern {
|
||||||
KMemoryState_FlagCanMapProcess = (1 << 23),
|
KMemoryState_FlagCanMapProcess = (1 << 23),
|
||||||
KMemoryState_FlagCanChangeAttribute = (1 << 24),
|
KMemoryState_FlagCanChangeAttribute = (1 << 24),
|
||||||
KMemoryState_FlagCanCodeMemory = (1 << 25),
|
KMemoryState_FlagCanCodeMemory = (1 << 25),
|
||||||
|
KMemoryState_FlagLinearMapped = (1 << 26),
|
||||||
|
|
||||||
KMemoryState_FlagsData = KMemoryState_FlagCanReprotect | KMemoryState_FlagCanUseIpc |
|
KMemoryState_FlagsData = KMemoryState_FlagCanReprotect | KMemoryState_FlagCanUseIpc |
|
||||||
KMemoryState_FlagCanUseNonDeviceIpc | KMemoryState_FlagCanUseNonSecureIpc |
|
KMemoryState_FlagCanUseNonDeviceIpc | KMemoryState_FlagCanUseNonSecureIpc |
|
||||||
|
@ -50,16 +51,18 @@ namespace ams::kern {
|
||||||
KMemoryState_FlagCanTransfer | KMemoryState_FlagCanQueryPhysical |
|
KMemoryState_FlagCanTransfer | KMemoryState_FlagCanQueryPhysical |
|
||||||
KMemoryState_FlagCanDeviceMap | KMemoryState_FlagCanAlignedDeviceMap |
|
KMemoryState_FlagCanDeviceMap | KMemoryState_FlagCanAlignedDeviceMap |
|
||||||
KMemoryState_FlagCanIpcUserBuffer | KMemoryState_FlagReferenceCounted |
|
KMemoryState_FlagCanIpcUserBuffer | KMemoryState_FlagReferenceCounted |
|
||||||
KMemoryState_FlagCanChangeAttribute,
|
KMemoryState_FlagCanChangeAttribute | KMemoryState_FlagLinearMapped,
|
||||||
|
|
||||||
KMemoryState_FlagsCode = KMemoryState_FlagCanDebug | KMemoryState_FlagCanUseIpc |
|
KMemoryState_FlagsCode = KMemoryState_FlagCanDebug | KMemoryState_FlagCanUseIpc |
|
||||||
KMemoryState_FlagCanUseNonDeviceIpc | KMemoryState_FlagCanUseNonSecureIpc |
|
KMemoryState_FlagCanUseNonDeviceIpc | KMemoryState_FlagCanUseNonSecureIpc |
|
||||||
KMemoryState_FlagMapped | KMemoryState_FlagCode |
|
KMemoryState_FlagMapped | KMemoryState_FlagCode |
|
||||||
KMemoryState_FlagCanQueryPhysical | KMemoryState_FlagCanDeviceMap |
|
KMemoryState_FlagCanQueryPhysical | KMemoryState_FlagCanDeviceMap |
|
||||||
KMemoryState_FlagCanAlignedDeviceMap | KMemoryState_FlagReferenceCounted,
|
KMemoryState_FlagCanAlignedDeviceMap | KMemoryState_FlagReferenceCounted |
|
||||||
|
KMemoryState_FlagLinearMapped,
|
||||||
|
|
||||||
KMemoryState_FlagsMisc = KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted |
|
KMemoryState_FlagsMisc = KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted |
|
||||||
KMemoryState_FlagCanQueryPhysical | KMemoryState_FlagCanDeviceMap,
|
KMemoryState_FlagCanQueryPhysical | KMemoryState_FlagCanDeviceMap |
|
||||||
|
KMemoryState_FlagLinearMapped,
|
||||||
|
|
||||||
|
|
||||||
KMemoryState_Free = ams::svc::MemoryState_Free,
|
KMemoryState_Free = ams::svc::MemoryState_Free,
|
||||||
|
@ -68,7 +71,7 @@ namespace ams::kern {
|
||||||
KMemoryState_Code = ams::svc::MemoryState_Code | KMemoryState_FlagsCode | KMemoryState_FlagCanMapProcess,
|
KMemoryState_Code = ams::svc::MemoryState_Code | KMemoryState_FlagsCode | KMemoryState_FlagCanMapProcess,
|
||||||
KMemoryState_CodeData = ams::svc::MemoryState_CodeData | KMemoryState_FlagsData | KMemoryState_FlagCanMapProcess | KMemoryState_FlagCanCodeMemory,
|
KMemoryState_CodeData = ams::svc::MemoryState_CodeData | KMemoryState_FlagsData | KMemoryState_FlagCanMapProcess | KMemoryState_FlagCanCodeMemory,
|
||||||
KMemoryState_Normal = ams::svc::MemoryState_Normal | KMemoryState_FlagsData | KMemoryState_FlagCanCodeMemory,
|
KMemoryState_Normal = ams::svc::MemoryState_Normal | KMemoryState_FlagsData | KMemoryState_FlagCanCodeMemory,
|
||||||
KMemoryState_Shared = ams::svc::MemoryState_Shared | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted,
|
KMemoryState_Shared = ams::svc::MemoryState_Shared | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted | KMemoryState_FlagLinearMapped,
|
||||||
|
|
||||||
/* KMemoryState_Alias was removed after 1.0.0. */
|
/* KMemoryState_Alias was removed after 1.0.0. */
|
||||||
|
|
||||||
|
@ -82,7 +85,7 @@ namespace ams::kern {
|
||||||
KMemoryState_Stack = ams::svc::MemoryState_Stack | KMemoryState_FlagsMisc | KMemoryState_FlagCanAlignedDeviceMap
|
KMemoryState_Stack = ams::svc::MemoryState_Stack | KMemoryState_FlagsMisc | KMemoryState_FlagCanAlignedDeviceMap
|
||||||
| KMemoryState_FlagCanUseIpc | KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
|
| KMemoryState_FlagCanUseIpc | KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
|
||||||
|
|
||||||
KMemoryState_ThreadLocal = ams::svc::MemoryState_ThreadLocal | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted,
|
KMemoryState_ThreadLocal = ams::svc::MemoryState_ThreadLocal | KMemoryState_FlagMapped | KMemoryState_FlagLinearMapped,
|
||||||
|
|
||||||
KMemoryState_Transfered = ams::svc::MemoryState_Transfered | KMemoryState_FlagsMisc | KMemoryState_FlagCanAlignedDeviceMap | KMemoryState_FlagCanChangeAttribute
|
KMemoryState_Transfered = ams::svc::MemoryState_Transfered | KMemoryState_FlagsMisc | KMemoryState_FlagCanAlignedDeviceMap | KMemoryState_FlagCanChangeAttribute
|
||||||
| KMemoryState_FlagCanUseIpc | KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
|
| KMemoryState_FlagCanUseIpc | KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
|
||||||
|
@ -90,7 +93,7 @@ namespace ams::kern {
|
||||||
KMemoryState_SharedTransfered = ams::svc::MemoryState_SharedTransfered | KMemoryState_FlagsMisc | KMemoryState_FlagCanAlignedDeviceMap
|
KMemoryState_SharedTransfered = ams::svc::MemoryState_SharedTransfered | KMemoryState_FlagsMisc | KMemoryState_FlagCanAlignedDeviceMap
|
||||||
| KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
|
| KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
|
||||||
|
|
||||||
KMemoryState_SharedCode = ams::svc::MemoryState_SharedCode | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted
|
KMemoryState_SharedCode = ams::svc::MemoryState_SharedCode | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted | KMemoryState_FlagLinearMapped
|
||||||
| KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
|
| KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
|
||||||
|
|
||||||
KMemoryState_Inaccessible = ams::svc::MemoryState_Inaccessible,
|
KMemoryState_Inaccessible = ams::svc::MemoryState_Inaccessible,
|
||||||
|
@ -103,8 +106,8 @@ namespace ams::kern {
|
||||||
|
|
||||||
KMemoryState_Kernel = ams::svc::MemoryState_Kernel | KMemoryState_FlagMapped,
|
KMemoryState_Kernel = ams::svc::MemoryState_Kernel | KMemoryState_FlagMapped,
|
||||||
|
|
||||||
KMemoryState_GeneratedCode = ams::svc::MemoryState_GeneratedCode | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted | KMemoryState_FlagCanDebug,
|
KMemoryState_GeneratedCode = ams::svc::MemoryState_GeneratedCode | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted | KMemoryState_FlagCanDebug | KMemoryState_FlagLinearMapped,
|
||||||
KMemoryState_CodeOut = ams::svc::MemoryState_CodeOut | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted,
|
KMemoryState_CodeOut = ams::svc::MemoryState_CodeOut | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted | KMemoryState_FlagLinearMapped,
|
||||||
|
|
||||||
KMemoryState_Coverage = ams::svc::MemoryState_Coverage | KMemoryState_FlagMapped,
|
KMemoryState_Coverage = ams::svc::MemoryState_Coverage | KMemoryState_FlagMapped,
|
||||||
};
|
};
|
||||||
|
@ -113,25 +116,25 @@ namespace ams::kern {
|
||||||
static_assert(KMemoryState_Free == 0x00000000);
|
static_assert(KMemoryState_Free == 0x00000000);
|
||||||
static_assert(KMemoryState_Io == 0x00182001);
|
static_assert(KMemoryState_Io == 0x00182001);
|
||||||
static_assert(KMemoryState_Static == 0x00042002);
|
static_assert(KMemoryState_Static == 0x00042002);
|
||||||
static_assert(KMemoryState_Code == 0x00DC7E03);
|
static_assert(KMemoryState_Code == 0x04DC7E03);
|
||||||
static_assert(KMemoryState_CodeData == 0x03FEBD04);
|
static_assert(KMemoryState_CodeData == 0x07FEBD04);
|
||||||
static_assert(KMemoryState_Normal == 0x037EBD05);
|
static_assert(KMemoryState_Normal == 0x077EBD05);
|
||||||
static_assert(KMemoryState_Shared == 0x00402006);
|
static_assert(KMemoryState_Shared == 0x04402006);
|
||||||
|
|
||||||
static_assert(KMemoryState_AliasCode == 0x00DD7E08);
|
static_assert(KMemoryState_AliasCode == 0x04DD7E08);
|
||||||
static_assert(KMemoryState_AliasCodeData == 0x03FFBD09);
|
static_assert(KMemoryState_AliasCodeData == 0x07FFBD09);
|
||||||
static_assert(KMemoryState_Ipc == 0x005C3C0A);
|
static_assert(KMemoryState_Ipc == 0x045C3C0A);
|
||||||
static_assert(KMemoryState_Stack == 0x005C3C0B);
|
static_assert(KMemoryState_Stack == 0x045C3C0B);
|
||||||
static_assert(KMemoryState_ThreadLocal == 0x0040200C);
|
static_assert(KMemoryState_ThreadLocal == 0x0400200C);
|
||||||
static_assert(KMemoryState_Transfered == 0x015C3C0D);
|
static_assert(KMemoryState_Transfered == 0x055C3C0D);
|
||||||
static_assert(KMemoryState_SharedTransfered == 0x005C380E);
|
static_assert(KMemoryState_SharedTransfered == 0x045C380E);
|
||||||
static_assert(KMemoryState_SharedCode == 0x0040380F);
|
static_assert(KMemoryState_SharedCode == 0x0440380F);
|
||||||
static_assert(KMemoryState_Inaccessible == 0x00000010);
|
static_assert(KMemoryState_Inaccessible == 0x00000010);
|
||||||
static_assert(KMemoryState_NonSecureIpc == 0x005C3811);
|
static_assert(KMemoryState_NonSecureIpc == 0x045C3811);
|
||||||
static_assert(KMemoryState_NonDeviceIpc == 0x004C2812);
|
static_assert(KMemoryState_NonDeviceIpc == 0x044C2812);
|
||||||
static_assert(KMemoryState_Kernel == 0x00002013);
|
static_assert(KMemoryState_Kernel == 0x00002013);
|
||||||
static_assert(KMemoryState_GeneratedCode == 0x00402214);
|
static_assert(KMemoryState_GeneratedCode == 0x04402214);
|
||||||
static_assert(KMemoryState_CodeOut == 0x00402015);
|
static_assert(KMemoryState_CodeOut == 0x04402015);
|
||||||
static_assert(KMemoryState_Coverage == 0x00002016);
|
static_assert(KMemoryState_Coverage == 0x00002016);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -50,9 +50,11 @@ namespace ams::kern {
|
||||||
constexpr size_t KernelSlabHeapSize = KernelSlabHeapDataSize + KernelSlabHeapGapsSizeMax;
|
constexpr size_t KernelSlabHeapSize = KernelSlabHeapDataSize + KernelSlabHeapGapsSizeMax;
|
||||||
|
|
||||||
/* NOTE: This is calculated from KThread slab counts, assuming KThread size <= 0x800. */
|
/* NOTE: This is calculated from KThread slab counts, assuming KThread size <= 0x800. */
|
||||||
|
constexpr size_t KernelPageBufferHeapSize = 0x3E0000;
|
||||||
constexpr size_t KernelSlabHeapAdditionalSize = 0x148000;
|
constexpr size_t KernelSlabHeapAdditionalSize = 0x148000;
|
||||||
|
constexpr size_t KernelPageBufferAdditionalSize = 0x33C000;
|
||||||
|
|
||||||
constexpr size_t KernelResourceSize = KernelPageTableHeapSize + KernelInitialPageHeapSize + KernelSlabHeapSize;
|
constexpr size_t KernelResourceSize = KernelPageTableHeapSize + KernelInitialPageHeapSize + KernelSlabHeapSize + KernelPageBufferHeapSize;
|
||||||
|
|
||||||
class KMemoryLayout {
|
class KMemoryLayout {
|
||||||
private:
|
private:
|
||||||
|
@ -150,6 +152,8 @@ namespace ams::kern {
|
||||||
|
|
||||||
static MESOSPHERE_NOINLINE_IF_DEBUG const KMemoryRegion &GetKernelTraceBufferRegion() { return Dereference(GetVirtualLinearMemoryRegionTree().FindByType(KMemoryRegionType_VirtualDramKernelTraceBuffer)); }
|
static MESOSPHERE_NOINLINE_IF_DEBUG const KMemoryRegion &GetKernelTraceBufferRegion() { return Dereference(GetVirtualLinearMemoryRegionTree().FindByType(KMemoryRegionType_VirtualDramKernelTraceBuffer)); }
|
||||||
|
|
||||||
|
static MESOSPHERE_NOINLINE_IF_DEBUG const KMemoryRegion &GetSecureAppletMemoryRegion() { return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_VirtualDramKernelSecureAppletMemory)); }
|
||||||
|
|
||||||
static MESOSPHERE_NOINLINE_IF_DEBUG const KMemoryRegion &GetVirtualLinearRegion(KVirtualAddress address) { return Dereference(FindLinear(address)); }
|
static MESOSPHERE_NOINLINE_IF_DEBUG const KMemoryRegion &GetVirtualLinearRegion(KVirtualAddress address) { return Dereference(FindLinear(address)); }
|
||||||
static MESOSPHERE_NOINLINE_IF_DEBUG const KMemoryRegion &GetPhysicalLinearRegion(KPhysicalAddress address) { return Dereference(FindLinear(address)); }
|
static MESOSPHERE_NOINLINE_IF_DEBUG const KMemoryRegion &GetPhysicalLinearRegion(KPhysicalAddress address) { return Dereference(FindLinear(address)); }
|
||||||
|
|
||||||
|
@ -209,6 +213,7 @@ namespace ams::kern {
|
||||||
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetKernelRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelBase); }
|
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetKernelRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelBase); }
|
||||||
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetKernelCodeRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelCode); }
|
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetKernelCodeRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelCode); }
|
||||||
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetKernelSlabRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelSlab); }
|
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetKernelSlabRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelSlab); }
|
||||||
|
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetKernelSecureAppletMemoryRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelSecureAppletMemory); }
|
||||||
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetKernelPageTableHeapRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelPtHeap); }
|
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetKernelPageTableHeapRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelPtHeap); }
|
||||||
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetKernelInitPageTableRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelInitPt); }
|
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetKernelInitPageTableRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelInitPt); }
|
||||||
|
|
||||||
|
|
|
@ -211,6 +211,8 @@ namespace ams::kern {
|
||||||
static_assert(KMemoryRegionType_DramKernelPtHeap.GetValue() == (0x24E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped));
|
static_assert(KMemoryRegionType_DramKernelPtHeap.GetValue() == (0x24E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped));
|
||||||
static_assert(KMemoryRegionType_DramKernelInitPt.GetValue() == (0x44E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped));
|
static_assert(KMemoryRegionType_DramKernelInitPt.GetValue() == (0x44E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped));
|
||||||
|
|
||||||
|
constexpr inline const auto KMemoryRegionType_DramKernelSecureAppletMemory = KMemoryRegionType_DramKernelBase.DeriveSparse(1, 3, 0).SetAttribute(KMemoryRegionAttr_LinearMapped);
|
||||||
|
static_assert(KMemoryRegionType_DramKernelSecureAppletMemory.GetValue() == (0x18E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped));
|
||||||
|
|
||||||
constexpr inline const auto KMemoryRegionType_DramReservedEarly = KMemoryRegionType_DramReservedBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap);
|
constexpr inline const auto KMemoryRegionType_DramReservedEarly = KMemoryRegionType_DramReservedBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap);
|
||||||
static_assert(KMemoryRegionType_DramReservedEarly.GetValue() == (0x16 | KMemoryRegionAttr_NoUserMap));
|
static_assert(KMemoryRegionType_DramReservedEarly.GetValue() == (0x16 | KMemoryRegionAttr_NoUserMap));
|
||||||
|
@ -251,6 +253,9 @@ namespace ams::kern {
|
||||||
constexpr inline const auto KMemoryRegionType_VirtualDramUnknownDebug = KMemoryRegionType_Dram.DeriveSparse(2, 2, 1);
|
constexpr inline const auto KMemoryRegionType_VirtualDramUnknownDebug = KMemoryRegionType_Dram.DeriveSparse(2, 2, 1);
|
||||||
static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x52));
|
static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x52));
|
||||||
|
|
||||||
|
constexpr inline const auto KMemoryRegionType_VirtualDramKernelSecureAppletMemory = KMemoryRegionType_Dram.DeriveSparse(3, 1, 0);
|
||||||
|
static_assert(KMemoryRegionType_VirtualDramKernelSecureAppletMemory.GetValue() == (0x62));
|
||||||
|
|
||||||
constexpr inline const auto KMemoryRegionType_VirtualDramKernelInitPt = KMemoryRegionType_VirtualDramHeapBase.Derive(3, 0);
|
constexpr inline const auto KMemoryRegionType_VirtualDramKernelInitPt = KMemoryRegionType_VirtualDramHeapBase.Derive(3, 0);
|
||||||
constexpr inline const auto KMemoryRegionType_VirtualDramPoolManagement = KMemoryRegionType_VirtualDramHeapBase.Derive(3, 1);
|
constexpr inline const auto KMemoryRegionType_VirtualDramPoolManagement = KMemoryRegionType_VirtualDramHeapBase.Derive(3, 1);
|
||||||
constexpr inline const auto KMemoryRegionType_VirtualDramUserPool = KMemoryRegionType_VirtualDramHeapBase.Derive(3, 2);
|
constexpr inline const auto KMemoryRegionType_VirtualDramUserPool = KMemoryRegionType_VirtualDramHeapBase.Derive(3, 2);
|
||||||
|
@ -327,6 +332,8 @@ namespace ams::kern {
|
||||||
return KMemoryRegionType_VirtualDramKernelTraceBuffer;
|
return KMemoryRegionType_VirtualDramKernelTraceBuffer;
|
||||||
} else if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) {
|
} else if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) {
|
||||||
return KMemoryRegionType_VirtualDramKernelPtHeap;
|
return KMemoryRegionType_VirtualDramKernelPtHeap;
|
||||||
|
} else if (KMemoryRegionType_DramKernelSecureAppletMemory.IsAncestorOf(type_id)) {
|
||||||
|
return KMemoryRegionType_VirtualDramKernelSecureAppletMemory;
|
||||||
} else if ((type_id | KMemoryRegionAttr_ShouldKernelMap) == type_id) {
|
} else if ((type_id | KMemoryRegionAttr_ShouldKernelMap) == type_id) {
|
||||||
return KMemoryRegionType_VirtualDramUnknownDebug;
|
return KMemoryRegionType_VirtualDramUnknownDebug;
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -113,11 +113,12 @@ namespace ams::kern {
|
||||||
static constexpr size_t MaxDepth = 4;
|
static constexpr size_t MaxDepth = 4;
|
||||||
private:
|
private:
|
||||||
u64 *m_bit_storages[MaxDepth];
|
u64 *m_bit_storages[MaxDepth];
|
||||||
|
u64 *m_end_storages[MaxDepth];
|
||||||
RandomBitGenerator m_rng;
|
RandomBitGenerator m_rng;
|
||||||
size_t m_num_bits;
|
size_t m_num_bits;
|
||||||
size_t m_used_depths;
|
size_t m_used_depths;
|
||||||
public:
|
public:
|
||||||
KPageBitmap() : m_bit_storages(), m_rng(), m_num_bits(), m_used_depths() { /* ... */ }
|
KPageBitmap() : m_bit_storages(), m_end_storages(), m_rng(), m_num_bits(), m_used_depths() { /* ... */ }
|
||||||
|
|
||||||
constexpr size_t GetNumBits() const { return m_num_bits; }
|
constexpr size_t GetNumBits() const { return m_num_bits; }
|
||||||
constexpr s32 GetHighestDepthIndex() const { return static_cast<s32>(m_used_depths) - 1; }
|
constexpr s32 GetHighestDepthIndex() const { return static_cast<s32>(m_used_depths) - 1; }
|
||||||
|
@ -135,6 +136,7 @@ namespace ams::kern {
|
||||||
m_bit_storages[depth] = storage;
|
m_bit_storages[depth] = storage;
|
||||||
size = util::AlignUp(size, BITSIZEOF(u64)) / BITSIZEOF(u64);
|
size = util::AlignUp(size, BITSIZEOF(u64)) / BITSIZEOF(u64);
|
||||||
storage += size;
|
storage += size;
|
||||||
|
m_end_storages[depth] = storage;
|
||||||
}
|
}
|
||||||
|
|
||||||
return storage;
|
return storage;
|
||||||
|
@ -171,6 +173,45 @@ namespace ams::kern {
|
||||||
return static_cast<ssize_t>(offset);
|
return static_cast<ssize_t>(offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ssize_t FindFreeRange(size_t count) {
|
||||||
|
/* Check that it is possible to find a range. */
|
||||||
|
const u64 * const storage_start = m_bit_storages[m_used_depths - 1];
|
||||||
|
const u64 * const storage_end = m_end_storages[m_used_depths - 1];
|
||||||
|
|
||||||
|
/* If we don't have a storage to iterate (or want more blocks than fit in a single storage), we can't find a free range. */
|
||||||
|
if (!(storage_start < storage_end && count <= BITSIZEOF(u64))) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Walk the storages to select a random free range. */
|
||||||
|
const size_t options_per_storage = std::max<size_t>(BITSIZEOF(u64) / count, 1);
|
||||||
|
const size_t num_entries = std::max<size_t>(storage_end - storage_start, 1);
|
||||||
|
|
||||||
|
const u64 free_mask = (static_cast<u64>(1) << count) - 1;
|
||||||
|
|
||||||
|
size_t num_valid_options = 0;
|
||||||
|
ssize_t chosen_offset = -1;
|
||||||
|
for (size_t storage_index = 0; storage_index < num_entries; ++storage_index) {
|
||||||
|
u64 storage = storage_start[storage_index];
|
||||||
|
for (size_t option = 0; option < options_per_storage; ++option) {
|
||||||
|
if ((storage & free_mask) == free_mask) {
|
||||||
|
/* We've found a new valid option. */
|
||||||
|
++num_valid_options;
|
||||||
|
|
||||||
|
/* Select the Kth valid option with probability 1/K. This leads to an overall uniform distribution. */
|
||||||
|
if (num_valid_options == 1 || m_rng.GenerateRandom(num_valid_options) == 0) {
|
||||||
|
/* This is our first option, so select it. */
|
||||||
|
chosen_offset = storage_index * BITSIZEOF(u64) + option * count;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
storage >>= count;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Return the random offset we chose.*/
|
||||||
|
return chosen_offset;
|
||||||
|
}
|
||||||
|
|
||||||
void SetBit(size_t offset) {
|
void SetBit(size_t offset) {
|
||||||
this->SetBit(this->GetHighestDepthIndex(), offset);
|
this->SetBit(this->GetHighestDepthIndex(), offset);
|
||||||
m_num_bits++;
|
m_num_bits++;
|
||||||
|
|
|
@ -16,12 +16,33 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
#include <mesosphere/kern_slab_helpers.hpp>
|
#include <mesosphere/kern_slab_helpers.hpp>
|
||||||
#include <mesosphere/kern_k_memory_layout.hpp>
|
#include <mesosphere/kern_k_memory_layout.hpp>
|
||||||
|
#include <mesosphere/kern_k_dynamic_page_manager.hpp>
|
||||||
|
|
||||||
namespace ams::kern {
|
namespace ams::kern {
|
||||||
|
|
||||||
class KPageBuffer : public KSlabAllocated<KPageBuffer> {
|
class KDynamicPageManager;
|
||||||
|
|
||||||
|
class KPageBuffer;
|
||||||
|
|
||||||
|
class KPageBufferSlabHeap : protected impl::KSlabHeapImpl {
|
||||||
|
public:
|
||||||
|
static constexpr size_t BufferSize = PageSize;
|
||||||
|
static constinit inline size_t s_buffer_count = 0;
|
||||||
private:
|
private:
|
||||||
alignas(PageSize) u8 m_buffer[PageSize];
|
size_t m_obj_size{};
|
||||||
|
public:
|
||||||
|
constexpr KPageBufferSlabHeap() = default;
|
||||||
|
|
||||||
|
/* See kern_init_slab_setup.cpp for definition. */
|
||||||
|
void Initialize(KDynamicPageManager &allocator);
|
||||||
|
|
||||||
|
KPageBuffer *Allocate();
|
||||||
|
void Free(KPageBuffer *pb);
|
||||||
|
};
|
||||||
|
|
||||||
|
class KPageBuffer {
|
||||||
|
private:
|
||||||
|
u8 m_buffer[KPageBufferSlabHeap::BufferSize];
|
||||||
public:
|
public:
|
||||||
KPageBuffer() {
|
KPageBuffer() {
|
||||||
std::memset(m_buffer, 0, sizeof(m_buffer));
|
std::memset(m_buffer, 0, sizeof(m_buffer));
|
||||||
|
@ -39,8 +60,49 @@ namespace ams::kern {
|
||||||
|
|
||||||
return GetPointer<KPageBuffer>(virt_addr);
|
return GetPointer<KPageBuffer>(virt_addr);
|
||||||
}
|
}
|
||||||
|
private:
|
||||||
|
static constinit inline KPageBufferSlabHeap s_slab_heap;
|
||||||
|
public:
|
||||||
|
static void InitializeSlabHeap(KDynamicPageManager &allocator) {
|
||||||
|
s_slab_heap.Initialize(allocator);
|
||||||
|
}
|
||||||
|
|
||||||
|
static KPageBuffer *Allocate() {
|
||||||
|
return s_slab_heap.Allocate();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void Free(KPageBuffer *obj) {
|
||||||
|
s_slab_heap.Free(obj);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<size_t ExpectedSize>
|
||||||
|
static ALWAYS_INLINE KPageBuffer *AllocateChecked() {
|
||||||
|
/* Check that the allocation is valid. */
|
||||||
|
MESOSPHERE_ABORT_UNLESS(sizeof(KPageBuffer) == ExpectedSize);
|
||||||
|
|
||||||
|
return Allocate();
|
||||||
|
}
|
||||||
|
|
||||||
|
template<size_t ExpectedSize>
|
||||||
|
static ALWAYS_INLINE void FreeChecked(KPageBuffer *obj) {
|
||||||
|
/* Check that the free is valid. */
|
||||||
|
MESOSPHERE_ABORT_UNLESS(sizeof(KPageBuffer) == ExpectedSize);
|
||||||
|
|
||||||
|
return Free(obj);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
static_assert(sizeof(KPageBuffer) == PageSize);
|
static_assert(sizeof(KPageBuffer) == KPageBufferSlabHeap::BufferSize);
|
||||||
static_assert(alignof(KPageBuffer) == PageSize);
|
|
||||||
|
ALWAYS_INLINE KPageBuffer *KPageBufferSlabHeap::Allocate() {
|
||||||
|
KPageBuffer *pb = static_cast<KPageBuffer *>(KSlabHeapImpl::Allocate());
|
||||||
|
if (AMS_LIKELY(pb != nullptr)) {
|
||||||
|
std::construct_at(pb);
|
||||||
|
}
|
||||||
|
return pb;
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void KPageBufferSlabHeap::Free(KPageBuffer *pb) {
|
||||||
|
KSlabHeapImpl::Free(pb);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,6 +26,9 @@ namespace ams::kern {
|
||||||
namespace ams::kern {
|
namespace ams::kern {
|
||||||
|
|
||||||
class KSystemControlBase {
|
class KSystemControlBase {
|
||||||
|
public:
|
||||||
|
/* This can be overridden as needed. */
|
||||||
|
static constexpr size_t SecureAppletMemorySize = 0;
|
||||||
protected:
|
protected:
|
||||||
/* Nintendo uses std::mt19937_t for randomness. */
|
/* Nintendo uses std::mt19937_t for randomness. */
|
||||||
/* To save space (and because mt19337_t isn't secure anyway), */
|
/* To save space (and because mt19337_t isn't secure anyway), */
|
||||||
|
|
|
@ -63,7 +63,7 @@ namespace ams::kern {
|
||||||
static constexpr size_t ApplicationMemoryBlockSlabHeapSize = 20000;
|
static constexpr size_t ApplicationMemoryBlockSlabHeapSize = 20000;
|
||||||
static constexpr size_t SystemMemoryBlockSlabHeapSize = 10000;
|
static constexpr size_t SystemMemoryBlockSlabHeapSize = 10000;
|
||||||
static constexpr size_t BlockInfoSlabHeapSize = 4000;
|
static constexpr size_t BlockInfoSlabHeapSize = 4000;
|
||||||
static constexpr size_t ReservedDynamicPageCount = 70;
|
static constexpr size_t ReservedDynamicPageCount = 64;
|
||||||
private:
|
private:
|
||||||
static State s_state;
|
static State s_state;
|
||||||
static KResourceLimit s_system_resource_limit;
|
static KResourceLimit s_system_resource_limit;
|
||||||
|
|
|
@ -29,7 +29,6 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
constinit bool g_call_smc_on_panic;
|
constinit bool g_call_smc_on_panic;
|
||||||
|
|
||||||
/* Global variables for secure memory. */
|
/* Global variables for secure memory. */
|
||||||
constexpr size_t SecureAppletMemorySize = 4_MB;
|
|
||||||
constinit KSpinLock g_secure_applet_lock;
|
constinit KSpinLock g_secure_applet_lock;
|
||||||
constinit bool g_secure_applet_memory_used = false;
|
constinit bool g_secure_applet_memory_used = false;
|
||||||
constinit KVirtualAddress g_secure_applet_memory_address = Null<KVirtualAddress>;
|
constinit KVirtualAddress g_secure_applet_memory_address = Null<KVirtualAddress>;
|
||||||
|
@ -246,8 +245,8 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
|
|
||||||
Result AllocateSecureMemoryForApplet(KVirtualAddress *out, size_t size) {
|
Result AllocateSecureMemoryForApplet(KVirtualAddress *out, size_t size) {
|
||||||
/* Verify that the size is valid. */
|
/* Verify that the size is valid. */
|
||||||
R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize());
|
R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize());
|
||||||
R_UNLESS(size <= SecureAppletMemorySize, svc::ResultOutOfMemory());
|
R_UNLESS(size <= KSystemControl::SecureAppletMemorySize, svc::ResultOutOfMemory());
|
||||||
|
|
||||||
/* Disable interrupts and acquire the secure applet lock. */
|
/* Disable interrupts and acquire the secure applet lock. */
|
||||||
KScopedInterruptDisable di;
|
KScopedInterruptDisable di;
|
||||||
|
@ -273,7 +272,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
|
|
||||||
/* Verify that the memory being freed is correct. */
|
/* Verify that the memory being freed is correct. */
|
||||||
MESOSPHERE_ABORT_UNLESS(address == g_secure_applet_memory_address);
|
MESOSPHERE_ABORT_UNLESS(address == g_secure_applet_memory_address);
|
||||||
MESOSPHERE_ABORT_UNLESS(size <= SecureAppletMemorySize);
|
MESOSPHERE_ABORT_UNLESS(size <= KSystemControl::SecureAppletMemorySize);
|
||||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, PageSize));
|
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, PageSize));
|
||||||
MESOSPHERE_ABORT_UNLESS(g_secure_applet_memory_used);
|
MESOSPHERE_ABORT_UNLESS(g_secure_applet_memory_used);
|
||||||
|
|
||||||
|
@ -451,17 +450,11 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
/* Initialize the sleep manager. */
|
/* Initialize the sleep manager. */
|
||||||
KSleepManager::Initialize();
|
KSleepManager::Initialize();
|
||||||
|
|
||||||
/* Reserve secure applet memory. */
|
/* Get the secure applet memory. */
|
||||||
if (GetTargetFirmware() >= TargetFirmware_5_0_0) {
|
const auto &secure_applet_memory = KMemoryLayout::GetSecureAppletMemoryRegion();
|
||||||
MESOSPHERE_ABORT_UNLESS(g_secure_applet_memory_address == Null<KVirtualAddress>);
|
MESOSPHERE_INIT_ABORT_UNLESS(secure_applet_memory.GetSize() == SecureAppletMemorySize);
|
||||||
MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, SecureAppletMemorySize));
|
|
||||||
|
|
||||||
constexpr auto SecureAppletAllocateOption = KMemoryManager::EncodeOption(KMemoryManager::Pool_System, KMemoryManager::Direction_FromFront);
|
g_secure_applet_memory_address = secure_applet_memory.GetAddress();
|
||||||
const KPhysicalAddress secure_applet_memory_phys_addr = Kernel::GetMemoryManager().AllocateAndOpenContinuous(SecureAppletMemorySize / PageSize, 1, SecureAppletAllocateOption);
|
|
||||||
MESOSPHERE_ABORT_UNLESS(secure_applet_memory_phys_addr != Null<KPhysicalAddress>);
|
|
||||||
|
|
||||||
g_secure_applet_memory_address = KMemoryLayout::GetLinearVirtualAddress(secure_applet_memory_phys_addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Initialize KTrace (and potentially other init). */
|
/* Initialize KTrace (and potentially other init). */
|
||||||
KSystemControlBase::InitializePhase2();
|
KSystemControlBase::InitializePhase2();
|
||||||
|
|
|
@ -79,6 +79,9 @@ namespace ams::kern::init {
|
||||||
constexpr size_t RequiredSizeForExtraThreadCount = SlabCountExtraKThread * (sizeof(KThread) + (sizeof(KThreadLocalPage) / 8) + sizeof(KEventInfo));
|
constexpr size_t RequiredSizeForExtraThreadCount = SlabCountExtraKThread * (sizeof(KThread) + (sizeof(KThreadLocalPage) / 8) + sizeof(KEventInfo));
|
||||||
static_assert(RequiredSizeForExtraThreadCount <= KernelSlabHeapAdditionalSize);
|
static_assert(RequiredSizeForExtraThreadCount <= KernelSlabHeapAdditionalSize);
|
||||||
|
|
||||||
|
static_assert(KernelPageBufferHeapSize == 2 * PageSize + (SlabCountKProcess + SlabCountKThread + (SlabCountKProcess + SlabCountKThread) / 8) * PageSize);
|
||||||
|
static_assert(KernelPageBufferAdditionalSize == (SlabCountExtraKThread + (SlabCountExtraKThread / 8)) * PageSize);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Global to hold our resource counts. */
|
/* Global to hold our resource counts. */
|
||||||
|
@ -131,7 +134,7 @@ namespace ams::kern::init {
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t CalculateSlabHeapGapSize() {
|
size_t CalculateSlabHeapGapSize() {
|
||||||
constexpr size_t KernelSlabHeapGapSize = 2_MB - 296_KB;
|
constexpr size_t KernelSlabHeapGapSize = 2_MB - 320_KB;
|
||||||
static_assert(KernelSlabHeapGapSize <= KernelSlabHeapGapsSizeMax);
|
static_assert(KernelSlabHeapGapSize <= KernelSlabHeapGapsSizeMax);
|
||||||
return KernelSlabHeapGapSize;
|
return KernelSlabHeapGapSize;
|
||||||
}
|
}
|
||||||
|
@ -155,23 +158,6 @@ namespace ams::kern::init {
|
||||||
return size;
|
return size;
|
||||||
}
|
}
|
||||||
|
|
||||||
void InitializeKPageBufferSlabHeap() {
|
|
||||||
const auto &counts = GetSlabResourceCounts();
|
|
||||||
const size_t num_pages = counts.num_KProcess + counts.num_KThread + (counts.num_KProcess + counts.num_KThread) / 8;
|
|
||||||
const size_t slab_size = num_pages * PageSize;
|
|
||||||
|
|
||||||
/* Reserve memory from the system resource limit. */
|
|
||||||
MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, slab_size));
|
|
||||||
|
|
||||||
/* Allocate memory for the slab. */
|
|
||||||
constexpr auto AllocateOption = KMemoryManager::EncodeOption(KMemoryManager::Pool_System, KMemoryManager::Direction_FromFront);
|
|
||||||
const KPhysicalAddress slab_address = Kernel::GetMemoryManager().AllocateAndOpenContinuous(num_pages, 1, AllocateOption);
|
|
||||||
MESOSPHERE_ABORT_UNLESS(slab_address != Null<KPhysicalAddress>);
|
|
||||||
|
|
||||||
/* Initialize the slabheap. */
|
|
||||||
KPageBuffer::InitializeSlabHeap(GetVoidPointer(KMemoryLayout::GetLinearVirtualAddress(slab_address)), slab_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
void InitializeSlabHeaps() {
|
void InitializeSlabHeaps() {
|
||||||
/* Get the slab region, since that's where we'll be working. */
|
/* Get the slab region, since that's where we'll be working. */
|
||||||
const KMemoryRegion &slab_region = KMemoryLayout::GetSlabRegion();
|
const KMemoryRegion &slab_region = KMemoryLayout::GetSlabRegion();
|
||||||
|
@ -241,3 +227,33 @@ namespace ams::kern::init {
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
namespace ams::kern {
|
||||||
|
|
||||||
|
void KPageBufferSlabHeap::Initialize(KDynamicPageManager &allocator) {
|
||||||
|
/* Get slab resource counts. */
|
||||||
|
const auto &counts = init::GetSlabResourceCounts();
|
||||||
|
|
||||||
|
/* If size is correct, account for thread local pages. */
|
||||||
|
if (BufferSize == PageSize) {
|
||||||
|
s_buffer_count += counts.num_KProcess + counts.num_KThread + (counts.num_KProcess + counts.num_KThread) / 8;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Set our object size. */
|
||||||
|
m_obj_size = BufferSize;
|
||||||
|
|
||||||
|
/* Initialize the base allocator. */
|
||||||
|
KSlabHeapImpl::Initialize();
|
||||||
|
|
||||||
|
/* Allocate the desired page count. */
|
||||||
|
for (size_t i = 0; i < s_buffer_count; ++i) {
|
||||||
|
/* Allocate an appropriate buffer. */
|
||||||
|
auto * const pb = (BufferSize <= PageSize) ? allocator.Allocate() : allocator.Allocate(BufferSize / PageSize);
|
||||||
|
MESOSPHERE_ABORT_UNLESS(pb != nullptr);
|
||||||
|
|
||||||
|
/* Free to our slab. */
|
||||||
|
KSlabHeapImpl::Free(pb);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
|
@ -332,7 +332,6 @@ namespace ams::kern::KDumpObject {
|
||||||
MESOSPHERE_RELEASE_LOG(#__OBJECT__ "\n"); \
|
MESOSPHERE_RELEASE_LOG(#__OBJECT__ "\n"); \
|
||||||
MESOSPHERE_RELEASE_LOG(" Cur=%3zu Peak=%3zu Max=%3zu\n", __OBJECT__::GetSlabHeapSize() - __OBJECT__::GetNumRemaining(), __OBJECT__::GetPeakIndex(), __OBJECT__::GetSlabHeapSize())
|
MESOSPHERE_RELEASE_LOG(" Cur=%3zu Peak=%3zu Max=%3zu\n", __OBJECT__::GetSlabHeapSize() - __OBJECT__::GetNumRemaining(), __OBJECT__::GetPeakIndex(), __OBJECT__::GetSlabHeapSize())
|
||||||
|
|
||||||
DUMP_KSLABOBJ(KPageBuffer);
|
|
||||||
DUMP_KSLABOBJ(KEvent);
|
DUMP_KSLABOBJ(KEvent);
|
||||||
DUMP_KSLABOBJ(KInterruptEvent);
|
DUMP_KSLABOBJ(KInterruptEvent);
|
||||||
DUMP_KSLABOBJ(KProcess);
|
DUMP_KSLABOBJ(KProcess);
|
||||||
|
|
|
@ -127,7 +127,7 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t KMemoryLayout::GetResourceRegionSizeForInit(bool use_extra_resource) {
|
size_t KMemoryLayout::GetResourceRegionSizeForInit(bool use_extra_resource) {
|
||||||
return KernelResourceSize + (use_extra_resource ? KernelSlabHeapAdditionalSize : 0);
|
return KernelResourceSize + KSystemControl::SecureAppletMemorySize + (use_extra_resource ? KernelSlabHeapAdditionalSize + KernelPageBufferAdditionalSize : 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -88,6 +88,10 @@ namespace ams::kern {
|
||||||
return m_recv_list_count > ipc::MessageBuffer::MessageHeader::ReceiveListCountType_CountOffset;
|
return m_recv_list_count > ipc::MessageBuffer::MessageHeader::ReceiveListCountType_CountOffset;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE bool IsToMessageBuffer() const {
|
||||||
|
return m_recv_list_count == ipc::MessageBuffer::MessageHeader::ReceiveListCountType_ToMessageBuffer;
|
||||||
|
}
|
||||||
|
|
||||||
void GetBuffer(uintptr_t &out, size_t size, int &key) const {
|
void GetBuffer(uintptr_t &out, size_t size, int &key) const {
|
||||||
switch (m_recv_list_count) {
|
switch (m_recv_list_count) {
|
||||||
case ipc::MessageBuffer::MessageHeader::ReceiveListCountType_None:
|
case ipc::MessageBuffer::MessageHeader::ReceiveListCountType_None:
|
||||||
|
@ -264,12 +268,12 @@ namespace ams::kern {
|
||||||
static_cast<KMemoryPermission>(KMemoryPermission_NotMapped | KMemoryPermission_KernelReadWrite),
|
static_cast<KMemoryPermission>(KMemoryPermission_NotMapped | KMemoryPermission_KernelReadWrite),
|
||||||
KMemoryAttribute_Uncached | KMemoryAttribute_Locked, KMemoryAttribute_Locked,
|
KMemoryAttribute_Uncached | KMemoryAttribute_Locked, KMemoryAttribute_Locked,
|
||||||
src_pointer,
|
src_pointer,
|
||||||
KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted,
|
KMemoryState_FlagLinearMapped, KMemoryState_FlagLinearMapped,
|
||||||
KMemoryPermission_UserRead,
|
KMemoryPermission_UserRead,
|
||||||
KMemoryAttribute_Uncached, KMemoryAttribute_None));
|
KMemoryAttribute_Uncached, KMemoryAttribute_None));
|
||||||
} else {
|
} else {
|
||||||
R_TRY(src_page_table.CopyMemoryFromLinearToUser(recv_pointer, recv_size, src_pointer,
|
R_TRY(src_page_table.CopyMemoryFromLinearToUser(recv_pointer, recv_size, src_pointer,
|
||||||
KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted,
|
KMemoryState_FlagLinearMapped, KMemoryState_FlagLinearMapped,
|
||||||
KMemoryPermission_UserRead,
|
KMemoryPermission_UserRead,
|
||||||
KMemoryAttribute_Uncached, KMemoryAttribute_None));
|
KMemoryAttribute_Uncached, KMemoryAttribute_None));
|
||||||
}
|
}
|
||||||
|
@ -642,12 +646,15 @@ namespace ams::kern {
|
||||||
const size_t max_fast_size = std::min<size_t>(offset_words + raw_size, PageSize);
|
const size_t max_fast_size = std::min<size_t>(offset_words + raw_size, PageSize);
|
||||||
const size_t fast_size = max_fast_size - offset_words;
|
const size_t fast_size = max_fast_size - offset_words;
|
||||||
|
|
||||||
|
/* Determine source state; if user buffer, we require heap, and otherwise only linear mapped (to enable tls use). */
|
||||||
|
const auto src_state = src_user ? KMemoryState_FlagReferenceCounted : KMemoryState_FlagLinearMapped;
|
||||||
|
|
||||||
/* Determine the source permission. User buffer should be unmapped + read, TLS should be user readable. */
|
/* Determine the source permission. User buffer should be unmapped + read, TLS should be user readable. */
|
||||||
const KMemoryPermission src_perm = static_cast<KMemoryPermission>(src_user ? KMemoryPermission_NotMapped | KMemoryPermission_KernelRead : KMemoryPermission_UserRead);
|
const KMemoryPermission src_perm = static_cast<KMemoryPermission>(src_user ? KMemoryPermission_NotMapped | KMemoryPermission_KernelRead : KMemoryPermission_UserRead);
|
||||||
|
|
||||||
/* Perform the fast part of the copy. */
|
/* Perform the fast part of the copy. */
|
||||||
R_TRY(src_page_table.CopyMemoryFromLinearToKernel(reinterpret_cast<uintptr_t>(dst_msg_ptr) + offset_words, fast_size, src_message_buffer + offset_words,
|
R_TRY(src_page_table.CopyMemoryFromLinearToKernel(reinterpret_cast<uintptr_t>(dst_msg_ptr) + offset_words, fast_size, src_message_buffer + offset_words,
|
||||||
KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted,
|
src_state, src_state,
|
||||||
src_perm,
|
src_perm,
|
||||||
KMemoryAttribute_Uncached, KMemoryAttribute_None));
|
KMemoryAttribute_Uncached, KMemoryAttribute_None));
|
||||||
|
|
||||||
|
@ -658,7 +665,7 @@ namespace ams::kern {
|
||||||
static_cast<KMemoryPermission>(KMemoryPermission_NotMapped | KMemoryPermission_KernelReadWrite),
|
static_cast<KMemoryPermission>(KMemoryPermission_NotMapped | KMemoryPermission_KernelReadWrite),
|
||||||
KMemoryAttribute_Uncached | KMemoryAttribute_Locked, KMemoryAttribute_Locked,
|
KMemoryAttribute_Uncached | KMemoryAttribute_Locked, KMemoryAttribute_Locked,
|
||||||
src_message_buffer + max_fast_size,
|
src_message_buffer + max_fast_size,
|
||||||
KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted,
|
src_state, src_state,
|
||||||
src_perm,
|
src_perm,
|
||||||
KMemoryAttribute_Uncached, KMemoryAttribute_None));
|
KMemoryAttribute_Uncached, KMemoryAttribute_None));
|
||||||
}
|
}
|
||||||
|
@ -744,9 +751,11 @@ namespace ams::kern {
|
||||||
R_UNLESS(recv_pointer != 0, svc::ResultOutOfResource());
|
R_UNLESS(recv_pointer != 0, svc::ResultOutOfResource());
|
||||||
|
|
||||||
/* Perform the pointer data copy. */
|
/* Perform the pointer data copy. */
|
||||||
const KMemoryPermission dst_perm = static_cast<KMemoryPermission>(dst_user ? KMemoryPermission_NotMapped | KMemoryPermission_KernelReadWrite : KMemoryPermission_UserReadWrite);
|
const bool dst_heap = dst_user && dst_recv_list.IsToMessageBuffer();
|
||||||
|
const auto dst_state = dst_heap ? KMemoryState_FlagReferenceCounted : KMemoryState_FlagLinearMapped;
|
||||||
|
const KMemoryPermission dst_perm = static_cast<KMemoryPermission>(dst_heap ? KMemoryPermission_NotMapped | KMemoryPermission_KernelReadWrite : KMemoryPermission_UserReadWrite);
|
||||||
R_TRY(dst_page_table.CopyMemoryFromUserToLinear(recv_pointer, recv_size,
|
R_TRY(dst_page_table.CopyMemoryFromUserToLinear(recv_pointer, recv_size,
|
||||||
KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted,
|
dst_state, dst_state,
|
||||||
dst_perm,
|
dst_perm,
|
||||||
KMemoryAttribute_Uncached, KMemoryAttribute_None,
|
KMemoryAttribute_Uncached, KMemoryAttribute_None,
|
||||||
src_pointer));
|
src_pointer));
|
||||||
|
@ -898,12 +907,15 @@ namespace ams::kern {
|
||||||
const size_t max_fast_size = std::min<size_t>(offset_words + raw_size, PageSize);
|
const size_t max_fast_size = std::min<size_t>(offset_words + raw_size, PageSize);
|
||||||
const size_t fast_size = max_fast_size - offset_words;
|
const size_t fast_size = max_fast_size - offset_words;
|
||||||
|
|
||||||
|
/* Determine dst state; if user buffer, we require heap, and otherwise only linear mapped (to enable tls use). */
|
||||||
|
const auto dst_state = dst_user ? KMemoryState_FlagReferenceCounted : KMemoryState_FlagLinearMapped;
|
||||||
|
|
||||||
/* Determine the dst permission. User buffer should be unmapped + read, TLS should be user readable. */
|
/* Determine the dst permission. User buffer should be unmapped + read, TLS should be user readable. */
|
||||||
const KMemoryPermission dst_perm = static_cast<KMemoryPermission>(dst_user ? KMemoryPermission_NotMapped | KMemoryPermission_KernelReadWrite : KMemoryPermission_UserReadWrite);
|
const KMemoryPermission dst_perm = static_cast<KMemoryPermission>(dst_user ? KMemoryPermission_NotMapped | KMemoryPermission_KernelReadWrite : KMemoryPermission_UserReadWrite);
|
||||||
|
|
||||||
/* Perform the fast part of the copy. */
|
/* Perform the fast part of the copy. */
|
||||||
R_TRY(dst_page_table.CopyMemoryFromKernelToLinear(dst_message_buffer + offset_words, fast_size,
|
R_TRY(dst_page_table.CopyMemoryFromKernelToLinear(dst_message_buffer + offset_words, fast_size,
|
||||||
KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted,
|
dst_state, dst_state,
|
||||||
dst_perm,
|
dst_perm,
|
||||||
KMemoryAttribute_Uncached, KMemoryAttribute_None,
|
KMemoryAttribute_Uncached, KMemoryAttribute_None,
|
||||||
reinterpret_cast<uintptr_t>(src_msg_ptr) + offset_words));
|
reinterpret_cast<uintptr_t>(src_msg_ptr) + offset_words));
|
||||||
|
@ -911,7 +923,7 @@ namespace ams::kern {
|
||||||
/* If the fast part of the copy didn't get everything, perform the slow part of the copy. */
|
/* If the fast part of the copy didn't get everything, perform the slow part of the copy. */
|
||||||
if (fast_size < raw_size) {
|
if (fast_size < raw_size) {
|
||||||
R_TRY(dst_page_table.CopyMemoryFromHeapToHeap(dst_page_table, dst_message_buffer + max_fast_size, raw_size - fast_size,
|
R_TRY(dst_page_table.CopyMemoryFromHeapToHeap(dst_page_table, dst_message_buffer + max_fast_size, raw_size - fast_size,
|
||||||
KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted,
|
dst_state, dst_state,
|
||||||
dst_perm,
|
dst_perm,
|
||||||
KMemoryAttribute_Uncached, KMemoryAttribute_None,
|
KMemoryAttribute_Uncached, KMemoryAttribute_None,
|
||||||
src_message_buffer + max_fast_size,
|
src_message_buffer + max_fast_size,
|
||||||
|
|
|
@ -42,7 +42,7 @@ namespace ams::kern {
|
||||||
R_UNLESS(m_resource_size > rc_size, svc::ResultOutOfMemory());
|
R_UNLESS(m_resource_size > rc_size, svc::ResultOutOfMemory());
|
||||||
|
|
||||||
/* Initialize slab heaps. */
|
/* Initialize slab heaps. */
|
||||||
m_dynamic_page_manager.Initialize(m_resource_address + rc_size, m_resource_size - rc_size);
|
m_dynamic_page_manager.Initialize(m_resource_address + rc_size, m_resource_size - rc_size, PageSize);
|
||||||
m_page_table_heap.Initialize(std::addressof(m_dynamic_page_manager), 0, GetPointer<KPageTableManager::RefCount>(m_resource_address));
|
m_page_table_heap.Initialize(std::addressof(m_dynamic_page_manager), 0, GetPointer<KPageTableManager::RefCount>(m_resource_address));
|
||||||
m_memory_block_heap.Initialize(std::addressof(m_dynamic_page_manager), 0);
|
m_memory_block_heap.Initialize(std::addressof(m_dynamic_page_manager), 0);
|
||||||
m_block_info_heap.Initialize(std::addressof(m_dynamic_page_manager), 0);
|
m_block_info_heap.Initialize(std::addressof(m_dynamic_page_manager), 0);
|
||||||
|
|
|
@ -46,7 +46,7 @@ namespace ams::kern {
|
||||||
MESOSPHERE_R_ABORT_UNLESS(Kernel::GetKernelPageTable().UnmapPages(stack_bottom, 1, KMemoryState_Kernel));
|
MESOSPHERE_R_ABORT_UNLESS(Kernel::GetKernelPageTable().UnmapPages(stack_bottom, 1, KMemoryState_Kernel));
|
||||||
|
|
||||||
/* Free the stack page. */
|
/* Free the stack page. */
|
||||||
KPageBuffer::Free(KPageBuffer::FromPhysicalAddress(stack_paddr));
|
KPageBuffer::FreeChecked<PageSize>(KPageBuffer::FromPhysicalAddress(stack_paddr));
|
||||||
}
|
}
|
||||||
|
|
||||||
class ThreadQueueImplForKThreadSleep final : public KThreadQueueWithoutEndWait { /* ... */ };
|
class ThreadQueueImplForKThreadSleep final : public KThreadQueueWithoutEndWait { /* ... */ };
|
||||||
|
@ -334,7 +334,7 @@ namespace ams::kern {
|
||||||
MESOSPHERE_ABORT_UNLESS(stack_region.GetEndAddress() != 0);
|
MESOSPHERE_ABORT_UNLESS(stack_region.GetEndAddress() != 0);
|
||||||
|
|
||||||
/* Allocate a page to use as the thread. */
|
/* Allocate a page to use as the thread. */
|
||||||
KPageBuffer *page = KPageBuffer::Allocate();
|
KPageBuffer *page = KPageBuffer::AllocateChecked<PageSize>();
|
||||||
R_UNLESS(page != nullptr, svc::ResultOutOfResource());
|
R_UNLESS(page != nullptr, svc::ResultOutOfResource());
|
||||||
|
|
||||||
/* Map the stack page. */
|
/* Map the stack page. */
|
||||||
|
|
|
@ -24,7 +24,7 @@ namespace ams::kern {
|
||||||
m_owner = process;
|
m_owner = process;
|
||||||
|
|
||||||
/* Allocate a new page. */
|
/* Allocate a new page. */
|
||||||
KPageBuffer *page_buf = KPageBuffer::Allocate();
|
KPageBuffer *page_buf = KPageBuffer::AllocateChecked<PageSize>();
|
||||||
R_UNLESS(page_buf != nullptr, svc::ResultOutOfMemory());
|
R_UNLESS(page_buf != nullptr, svc::ResultOutOfMemory());
|
||||||
ON_RESULT_FAILURE { KPageBuffer::Free(page_buf); };
|
ON_RESULT_FAILURE { KPageBuffer::Free(page_buf); };
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ namespace ams::kern {
|
||||||
R_TRY(m_owner->GetPageTable().UnmapPages(this->GetAddress(), 1, KMemoryState_ThreadLocal));
|
R_TRY(m_owner->GetPageTable().UnmapPages(this->GetAddress(), 1, KMemoryState_ThreadLocal));
|
||||||
|
|
||||||
/* Free the page. */
|
/* Free the page. */
|
||||||
KPageBuffer::Free(KPageBuffer::FromPhysicalAddress(phys_addr));
|
KPageBuffer::FreeChecked<PageSize>(KPageBuffer::FromPhysicalAddress(phys_addr));
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -75,7 +75,10 @@ namespace ams::kern {
|
||||||
size -= rc_size;
|
size -= rc_size;
|
||||||
|
|
||||||
/* Initialize the resource managers' shared page manager. */
|
/* Initialize the resource managers' shared page manager. */
|
||||||
g_resource_manager_page_manager.Initialize(address, size);
|
g_resource_manager_page_manager.Initialize(address, size, std::max<size_t>(PageSize, KPageBufferSlabHeap::BufferSize));
|
||||||
|
|
||||||
|
/* Initialize the KPageBuffer slab heap. */
|
||||||
|
KPageBuffer::InitializeSlabHeap(g_resource_manager_page_manager);
|
||||||
|
|
||||||
/* Initialize the fixed-size slabheaps. */
|
/* Initialize the fixed-size slabheaps. */
|
||||||
s_app_memory_block_heap.Initialize(std::addressof(g_resource_manager_page_manager), ApplicationMemoryBlockSlabHeapSize);
|
s_app_memory_block_heap.Initialize(std::addressof(g_resource_manager_page_manager), ApplicationMemoryBlockSlabHeapSize);
|
||||||
|
@ -143,6 +146,9 @@ namespace ams::kern {
|
||||||
PrintMemoryRegion(" KernelRegion", KMemoryLayout::GetKernelRegionPhysicalExtents());
|
PrintMemoryRegion(" KernelRegion", KMemoryLayout::GetKernelRegionPhysicalExtents());
|
||||||
PrintMemoryRegion(" Code", KMemoryLayout::GetKernelCodeRegionPhysicalExtents());
|
PrintMemoryRegion(" Code", KMemoryLayout::GetKernelCodeRegionPhysicalExtents());
|
||||||
PrintMemoryRegion(" Slab", KMemoryLayout::GetKernelSlabRegionPhysicalExtents());
|
PrintMemoryRegion(" Slab", KMemoryLayout::GetKernelSlabRegionPhysicalExtents());
|
||||||
|
if constexpr (KSystemControl::SecureAppletMemorySize > 0) {
|
||||||
|
PrintMemoryRegion(" SecureApplet", KMemoryLayout::GetKernelSecureAppletMemoryRegionPhysicalExtents());
|
||||||
|
}
|
||||||
PrintMemoryRegion(" PageTableHeap", KMemoryLayout::GetKernelPageTableHeapRegionPhysicalExtents());
|
PrintMemoryRegion(" PageTableHeap", KMemoryLayout::GetKernelPageTableHeapRegionPhysicalExtents());
|
||||||
PrintMemoryRegion(" InitPageTable", KMemoryLayout::GetKernelInitPageTableRegionPhysicalExtents());
|
PrintMemoryRegion(" InitPageTable", KMemoryLayout::GetKernelInitPageTableRegionPhysicalExtents());
|
||||||
if constexpr (IsKTraceEnabled) {
|
if constexpr (IsKTraceEnabled) {
|
||||||
|
|
|
@ -58,7 +58,6 @@ namespace ams::kern {
|
||||||
MESOSPHERE_ABORT_UNLESS(management_region.GetEndAddress() != 0);
|
MESOSPHERE_ABORT_UNLESS(management_region.GetEndAddress() != 0);
|
||||||
|
|
||||||
Kernel::GetMemoryManager().Initialize(management_region.GetAddress(), management_region.GetSize());
|
Kernel::GetMemoryManager().Initialize(management_region.GetAddress(), management_region.GetSize());
|
||||||
init::InitializeKPageBufferSlabHeap();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Copy the Initial Process Binary to safe memory. */
|
/* Copy the Initial Process Binary to safe memory. */
|
||||||
|
|
|
@ -418,12 +418,18 @@ namespace ams::kern::init {
|
||||||
/* NOTE: Nintendo does this only on 10.0.0+ */
|
/* NOTE: Nintendo does this only on 10.0.0+ */
|
||||||
init_pt.PhysicallyRandomize(slab_region_start, slab_region_size, false);
|
init_pt.PhysicallyRandomize(slab_region_start, slab_region_size, false);
|
||||||
|
|
||||||
|
/* Insert a physical region for the secure applet memory. */
|
||||||
|
const auto secure_applet_end_phys_addr = slab_end_phys_addr + KSystemControl::SecureAppletMemorySize;
|
||||||
|
if constexpr (KSystemControl::SecureAppletMemorySize > 0) {
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(slab_end_phys_addr), KSystemControl::SecureAppletMemorySize, KMemoryRegionType_DramKernelSecureAppletMemory));
|
||||||
|
}
|
||||||
|
|
||||||
/* Determine size available for kernel page table heaps. */
|
/* Determine size available for kernel page table heaps. */
|
||||||
const KPhysicalAddress resource_end_phys_addr = slab_start_phys_addr + resource_region_size;
|
const KPhysicalAddress resource_end_phys_addr = slab_start_phys_addr + resource_region_size;
|
||||||
const size_t page_table_heap_size = GetInteger(resource_end_phys_addr) - GetInteger(slab_end_phys_addr);
|
const size_t page_table_heap_size = GetInteger(resource_end_phys_addr) - GetInteger(secure_applet_end_phys_addr);
|
||||||
|
|
||||||
/* Insert a physical region for the kernel page table heap region */
|
/* Insert a physical region for the kernel page table heap region */
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(slab_end_phys_addr), page_table_heap_size, KMemoryRegionType_DramKernelPtHeap));
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(secure_applet_end_phys_addr), page_table_heap_size, KMemoryRegionType_DramKernelPtHeap));
|
||||||
|
|
||||||
/* All DRAM regions that we haven't tagged by this point will be mapped under the linear mapping. Tag them. */
|
/* All DRAM regions that we haven't tagged by this point will be mapped under the linear mapping. Tag them. */
|
||||||
for (auto ®ion : KMemoryLayout::GetPhysicalMemoryRegionTree()) {
|
for (auto ®ion : KMemoryLayout::GetPhysicalMemoryRegionTree()) {
|
||||||
|
|
Loading…
Reference in a new issue