mirror of
https://github.com/Atmosphere-NX/Atmosphere
synced 2024-12-22 20:31:14 +00:00
kern: implement enough of KPageTable to initialize a thread
This commit is contained in:
parent
c6d1579265
commit
8c93eb5712
31 changed files with 1475 additions and 270 deletions
|
@ -18,169 +18,9 @@
|
||||||
#include <mesosphere/kern_common.hpp>
|
#include <mesosphere/kern_common.hpp>
|
||||||
#include <mesosphere/kern_k_typed_address.hpp>
|
#include <mesosphere/kern_k_typed_address.hpp>
|
||||||
#include <mesosphere/kern_select_cpu.hpp>
|
#include <mesosphere/kern_select_cpu.hpp>
|
||||||
|
#include <mesosphere/arch/arm64/kern_k_page_table_entry.hpp>
|
||||||
|
|
||||||
namespace ams::kern::init {
|
namespace ams::kern::arm64::init {
|
||||||
|
|
||||||
constexpr size_t L1BlockSize = 1_GB;
|
|
||||||
constexpr size_t L2BlockSize = 2_MB;
|
|
||||||
constexpr size_t L2ContiguousBlockSize = 0x10 * L2BlockSize;
|
|
||||||
constexpr size_t L3BlockSize = PageSize;
|
|
||||||
constexpr size_t L3ContiguousBlockSize = 0x10 * L3BlockSize;
|
|
||||||
|
|
||||||
class PageTableEntry {
|
|
||||||
public:
|
|
||||||
enum Permission : u64 {
|
|
||||||
Permission_KernelRWX = ((0ul << 53) | (1ul << 54) | (0ul << 6)),
|
|
||||||
Permission_KernelRX = ((0ul << 53) | (1ul << 54) | (2ul << 6)),
|
|
||||||
Permission_KernelR = ((1ul << 53) | (1ul << 54) | (2ul << 6)),
|
|
||||||
Permission_KernelRW = ((1ul << 53) | (1ul << 54) | (0ul << 6)),
|
|
||||||
|
|
||||||
Permission_UserRX = ((1ul << 53) | (0ul << 54) | (3ul << 6)),
|
|
||||||
Permission_UserR = ((1ul << 53) | (1ul << 54) | (3ul << 6)),
|
|
||||||
Permission_UserRW = ((1ul << 53) | (1ul << 54) | (1ul << 6)),
|
|
||||||
};
|
|
||||||
|
|
||||||
enum Shareable : u64 {
|
|
||||||
Shareable_NonShareable = (0 << 8),
|
|
||||||
Shareable_OuterShareable = (2 << 8),
|
|
||||||
Shareable_InnerShareable = (3 << 8),
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Official attributes are: */
|
|
||||||
/* 0x00, 0x04, 0xFF, 0x44. 4-7 are unused. */
|
|
||||||
enum PageAttribute : u64 {
|
|
||||||
PageAttribute_Device_nGnRnE = (0 << 2),
|
|
||||||
PageAttribute_Device_nGnRE = (1 << 2),
|
|
||||||
PageAttribute_NormalMemory = (2 << 2),
|
|
||||||
PageAttribute_NormalMemoryNotCacheable = (3 << 2),
|
|
||||||
};
|
|
||||||
|
|
||||||
enum AccessFlag : u64 {
|
|
||||||
AccessFlag_NotAccessed = (0 << 10),
|
|
||||||
AccessFlag_Accessed = (1 << 10),
|
|
||||||
};
|
|
||||||
protected:
|
|
||||||
u64 attributes;
|
|
||||||
public:
|
|
||||||
/* Take in a raw attribute. */
|
|
||||||
constexpr ALWAYS_INLINE PageTableEntry(u64 attr) : attributes(attr) { /* ... */ }
|
|
||||||
|
|
||||||
/* Extend a previous attribute. */
|
|
||||||
constexpr ALWAYS_INLINE PageTableEntry(const PageTableEntry &rhs, u64 new_attr) : attributes(rhs.attributes | new_attr) { /* ... */ }
|
|
||||||
|
|
||||||
/* Construct a new attribute. */
|
|
||||||
constexpr ALWAYS_INLINE PageTableEntry(Permission perm, PageAttribute p_a, Shareable share)
|
|
||||||
: attributes(static_cast<u64>(perm) | static_cast<u64>(AccessFlag_Accessed) | static_cast<u64>(p_a) | static_cast<u64>(share))
|
|
||||||
{
|
|
||||||
/* ... */
|
|
||||||
}
|
|
||||||
protected:
|
|
||||||
constexpr ALWAYS_INLINE u64 GetBits(size_t offset, size_t count) const {
|
|
||||||
return (this->attributes >> offset) & ((1ul << count) - 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE u64 SelectBits(size_t offset, size_t count) const {
|
|
||||||
return this->attributes & (((1ul << count) - 1) << offset);
|
|
||||||
}
|
|
||||||
public:
|
|
||||||
constexpr ALWAYS_INLINE bool IsUserExecuteNever() const { return this->GetBits(54, 1) != 0; }
|
|
||||||
constexpr ALWAYS_INLINE bool IsPrivilegedExecuteNever() const { return this->GetBits(53, 1) != 0; }
|
|
||||||
constexpr ALWAYS_INLINE bool IsContiguous() const { return this->GetBits(52, 1) != 0; }
|
|
||||||
constexpr ALWAYS_INLINE AccessFlag GetAccessFlag() const { return static_cast<AccessFlag>(this->GetBits(10, 1)); }
|
|
||||||
constexpr ALWAYS_INLINE Shareable GetShareable() const { return static_cast<Shareable>(this->GetBits(8, 2)); }
|
|
||||||
constexpr ALWAYS_INLINE PageAttribute GetPageAttribute() const { return static_cast<PageAttribute>(this->GetBits(2, 3)); }
|
|
||||||
constexpr ALWAYS_INLINE bool IsNonSecure() const { return this->GetBits(5, 1) != 0; }
|
|
||||||
constexpr ALWAYS_INLINE bool IsBlock() const { return this->GetBits(0, 2) == 0x1; }
|
|
||||||
constexpr ALWAYS_INLINE bool IsTable() const { return this->GetBits(0, 2) == 0x3; }
|
|
||||||
|
|
||||||
/* Should not be called except by derived classes. */
|
|
||||||
constexpr ALWAYS_INLINE u64 GetRawAttributes() const {
|
|
||||||
return this->attributes;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
static_assert(sizeof(PageTableEntry) == sizeof(u64));
|
|
||||||
|
|
||||||
constexpr PageTableEntry InvalidPageTableEntry = PageTableEntry(0);
|
|
||||||
|
|
||||||
constexpr size_t MaxPageTableEntries = PageSize / sizeof(PageTableEntry);
|
|
||||||
|
|
||||||
class L1PageTableEntry : public PageTableEntry {
|
|
||||||
public:
|
|
||||||
constexpr ALWAYS_INLINE L1PageTableEntry(KPhysicalAddress phys_addr, bool pxn)
|
|
||||||
: PageTableEntry((0x3ul << 60) | (static_cast<u64>(pxn) << 59) | GetInteger(phys_addr) | 0x3)
|
|
||||||
{
|
|
||||||
/* ... */
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE L1PageTableEntry(KPhysicalAddress phys_addr, const PageTableEntry &attr, bool contig)
|
|
||||||
: PageTableEntry(attr, (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | 0x1)
|
|
||||||
{
|
|
||||||
/* ... */
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE KPhysicalAddress GetBlock() const {
|
|
||||||
return this->SelectBits(30, 18);
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE KPhysicalAddress GetTable() const {
|
|
||||||
return this->SelectBits(12, 36);
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, bool contig) const {
|
|
||||||
/* Check whether this has the same permission/etc as the desired attributes. */
|
|
||||||
return L1PageTableEntry(this->GetBlock(), rhs, contig).GetRawAttributes() == this->GetRawAttributes();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
class L2PageTableEntry : public PageTableEntry {
|
|
||||||
public:
|
|
||||||
constexpr ALWAYS_INLINE L2PageTableEntry(KPhysicalAddress phys_addr, bool pxn)
|
|
||||||
: PageTableEntry((0x3ul << 60) | (static_cast<u64>(pxn) << 59) | GetInteger(phys_addr) | 0x3)
|
|
||||||
{
|
|
||||||
/* ... */
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE L2PageTableEntry(KPhysicalAddress phys_addr, const PageTableEntry &attr, bool contig)
|
|
||||||
: PageTableEntry(attr, (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | 0x1)
|
|
||||||
{
|
|
||||||
/* ... */
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE KPhysicalAddress GetBlock() const {
|
|
||||||
return this->SelectBits(21, 27);
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE KPhysicalAddress GetTable() const {
|
|
||||||
return this->SelectBits(12, 36);
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, bool contig) const {
|
|
||||||
/* Check whether this has the same permission/etc as the desired attributes. */
|
|
||||||
return L2PageTableEntry(this->GetBlock(), rhs, contig).GetRawAttributes() == this->GetRawAttributes();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
class L3PageTableEntry : public PageTableEntry {
|
|
||||||
public:
|
|
||||||
constexpr ALWAYS_INLINE L3PageTableEntry(KPhysicalAddress phys_addr, const PageTableEntry &attr, bool contig)
|
|
||||||
: PageTableEntry(attr, (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | 0x3)
|
|
||||||
{
|
|
||||||
/* ... */
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE bool IsBlock() const { return this->GetBits(0, 2) == 0x3; }
|
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE KPhysicalAddress GetBlock() const {
|
|
||||||
return this->SelectBits(12, 36);
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, bool contig) const {
|
|
||||||
/* Check whether this has the same permission/etc as the desired attributes. */
|
|
||||||
return L3PageTableEntry(this->GetBlock(), rhs, contig).GetRawAttributes() == this->GetRawAttributes();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
class KInitialPageTable {
|
class KInitialPageTable {
|
||||||
public:
|
public:
|
||||||
|
|
|
@ -124,11 +124,28 @@ namespace ams::kern::arm64::cpu {
|
||||||
ClearPageToZeroImpl(page);
|
ClearPageToZeroImpl(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void InvalidateTlbByAsid(u32 asid) {
|
||||||
|
const u64 value = (static_cast<u64>(asid) << 48);
|
||||||
|
__asm__ __volatile__("tlbi aside1is, %[value]" :: [value]"r"(static_cast<u64>(value) << 48) : "memory");
|
||||||
|
EnsureInstructionConsistency();
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void InvalidateTlbByAsidAndVa(u32 asid, KProcessAddress virt_addr) {
|
||||||
|
const u64 value = (static_cast<u64>(asid) << 48) | ((GetInteger(virt_addr) >> 12) & 0xFFFFFFFFFFFul);
|
||||||
|
__asm__ __volatile__("tlbi aside1is, %[value]" :: [value]"r"(value) : "memory");
|
||||||
|
EnsureInstructionConsistency();
|
||||||
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE void InvalidateEntireTlb() {
|
ALWAYS_INLINE void InvalidateEntireTlb() {
|
||||||
__asm__ __volatile__("tlbi vmalle1is" ::: "memory");
|
__asm__ __volatile__("tlbi vmalle1is" ::: "memory");
|
||||||
EnsureInstructionConsistency();
|
EnsureInstructionConsistency();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void InvalidateEntireTlbDataOnly() {
|
||||||
|
__asm__ __volatile__("tlbi vmalle1is" ::: "memory");
|
||||||
|
DataSynchronizationBarrier();
|
||||||
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE uintptr_t GetCoreLocalRegionAddress() {
|
ALWAYS_INLINE uintptr_t GetCoreLocalRegionAddress() {
|
||||||
register uintptr_t x18 asm("x18");
|
register uintptr_t x18 asm("x18");
|
||||||
__asm__ __volatile__("" : [x18]"=r"(x18));
|
__asm__ __volatile__("" : [x18]"=r"(x18));
|
||||||
|
|
|
@ -97,7 +97,13 @@ namespace ams::kern::arm64::cpu {
|
||||||
constexpr ALWAYS_INLINE void SetBits(size_t offset, size_t count, u64 value) {
|
constexpr ALWAYS_INLINE void SetBits(size_t offset, size_t count, u64 value) {
|
||||||
const u64 mask = ((1ul << count) - 1) << offset;
|
const u64 mask = ((1ul << count) - 1) << offset;
|
||||||
this->value &= ~mask;
|
this->value &= ~mask;
|
||||||
this->value |= (value & mask) << offset;
|
this->value |= (value & (mask >> offset)) << offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE void SetBitsDirect(size_t offset, size_t count, u64 value) {
|
||||||
|
const u64 mask = ((1ul << count) - 1) << offset;
|
||||||
|
this->value &= ~mask;
|
||||||
|
this->value |= (value & mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE void SetBit(size_t offset, bool enabled) {
|
constexpr ALWAYS_INLINE void SetBit(size_t offset, bool enabled) {
|
||||||
|
|
|
@ -29,6 +29,127 @@ namespace ams::kern::arm64 {
|
||||||
KPageTableManager *manager;
|
KPageTableManager *manager;
|
||||||
u64 ttbr;
|
u64 ttbr;
|
||||||
u8 asid;
|
u8 asid;
|
||||||
|
private:
|
||||||
|
enum BlockType {
|
||||||
|
BlockType_L3Block,
|
||||||
|
BlockType_L3ContiguousBlock,
|
||||||
|
BlockType_L2Block,
|
||||||
|
|
||||||
|
#ifdef ATMOSPHERE_BOARD_NINTENDO_SWITCH
|
||||||
|
BlockType_L2TegraSmmuBlock,
|
||||||
|
#endif
|
||||||
|
|
||||||
|
BlockType_L2ContiguousBlock,
|
||||||
|
BlockType_L1Block,
|
||||||
|
|
||||||
|
BlockType_Count,
|
||||||
|
};
|
||||||
|
|
||||||
|
static_assert(L3BlockSize == PageSize);
|
||||||
|
static constexpr size_t ContiguousPageSize = L3ContiguousBlockSize;
|
||||||
|
|
||||||
|
#ifdef ATMOSPHERE_BOARD_NINTENDO_SWITCH
|
||||||
|
static constexpr size_t L2TegraSmmuBlockSize = 2 * L2BlockSize;
|
||||||
|
#endif
|
||||||
|
static constexpr size_t BlockSizes[BlockType_Count] = {
|
||||||
|
[BlockType_L3Block] = L3BlockSize,
|
||||||
|
[BlockType_L3ContiguousBlock] = L3ContiguousBlockSize,
|
||||||
|
[BlockType_L2Block] = L2BlockSize,
|
||||||
|
#ifdef ATMOSPHERE_BOARD_NINTENDO_SWITCH
|
||||||
|
[BlockType_L2TegraSmmuBlock] = L2TegraSmmuBlockSize,
|
||||||
|
#endif
|
||||||
|
[BlockType_L2ContiguousBlock] = L2ContiguousBlockSize,
|
||||||
|
[BlockType_L1Block] = L1BlockSize,
|
||||||
|
};
|
||||||
|
|
||||||
|
static constexpr size_t GetBlockSize(BlockType type) {
|
||||||
|
return BlockSizes[type];
|
||||||
|
}
|
||||||
|
|
||||||
|
static constexpr BlockType GetBlockType(size_t size) {
|
||||||
|
switch (size) {
|
||||||
|
case L3BlockSize: return BlockType_L3Block;
|
||||||
|
case L3ContiguousBlockSize: return BlockType_L3ContiguousBlock;
|
||||||
|
case L2BlockSize: return BlockType_L2Block;
|
||||||
|
#ifdef ATMOSPHERE_BOARD_NINTENDO_SWITCH
|
||||||
|
case L2TegraSmmuBlockSize: return BlockType_L2TegraSmmuBlock;
|
||||||
|
#endif
|
||||||
|
case L2ContiguousBlockSize: return BlockType_L2ContiguousBlock;
|
||||||
|
case L1BlockSize: return BlockType_L1Block;
|
||||||
|
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
protected:
|
||||||
|
virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, OperationType operation, bool reuse_ll) override;
|
||||||
|
virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, const KPageGroup *page_group, const KPageProperties properties, OperationType operation, bool reuse_ll) override;
|
||||||
|
virtual void FinalizeUpdate(PageLinkedList *page_list) override;
|
||||||
|
|
||||||
|
KPageTableManager &GetPageTableManager() { return *this->manager; }
|
||||||
|
const KPageTableManager &GetPageTableManager() const { return *this->manager; }
|
||||||
|
private:
|
||||||
|
constexpr PageTableEntry GetEntryTemplate(const KPageProperties properties) const {
|
||||||
|
/* Set basic attributes. */
|
||||||
|
PageTableEntry entry;
|
||||||
|
entry.SetPrivilegedExecuteNever(true);
|
||||||
|
entry.SetAccessFlag(PageTableEntry::AccessFlag_Accessed);
|
||||||
|
entry.SetShareable(PageTableEntry::Shareable_InnerShareable);
|
||||||
|
|
||||||
|
if (!this->IsKernel()) {
|
||||||
|
entry.SetGlobal(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Set page attribute. */
|
||||||
|
if (properties.io) {
|
||||||
|
MESOSPHERE_ABORT_UNLESS(!properties.io);
|
||||||
|
MESOSPHERE_ABORT_UNLESS((properties.perm & (KMemoryPermission_KernelExecute | KMemoryPermission_UserExecute)) == 0);
|
||||||
|
|
||||||
|
entry.SetPageAttribute(PageTableEntry::PageAttribute_Device_nGnRnE)
|
||||||
|
.SetUserExecuteNever(true);
|
||||||
|
} else if (properties.uncached) {
|
||||||
|
MESOSPHERE_ABORT_UNLESS((properties.perm & (KMemoryPermission_KernelExecute | KMemoryPermission_UserExecute)) == 0);
|
||||||
|
|
||||||
|
entry.SetPageAttribute(PageTableEntry::PageAttribute_NormalMemoryNotCacheable);
|
||||||
|
} else {
|
||||||
|
entry.SetPageAttribute(PageTableEntry::PageAttribute_NormalMemory);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Set user execute never bit. */
|
||||||
|
if (properties.perm != KMemoryPermission_UserReadExecute) {
|
||||||
|
MESOSPHERE_ABORT_UNLESS((properties.perm & (KMemoryPermission_KernelExecute | KMemoryPermission_UserExecute)) == 0);
|
||||||
|
entry.SetUserExecuteNever(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Set can be contiguous. */
|
||||||
|
entry.SetContiguousAllowed(!properties.non_contiguous);
|
||||||
|
|
||||||
|
/* Set AP[1] based on perm. */
|
||||||
|
switch (properties.perm & KMemoryPermission_UserReadWrite) {
|
||||||
|
case KMemoryPermission_UserReadWrite:
|
||||||
|
case KMemoryPermission_UserRead:
|
||||||
|
entry.SetUserAccessible(true);
|
||||||
|
break;
|
||||||
|
case KMemoryPermission_KernelReadWrite:
|
||||||
|
case KMemoryPermission_KernelRead:
|
||||||
|
entry.SetUserAccessible(false);
|
||||||
|
break;
|
||||||
|
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Set AP[2] based on perm. */
|
||||||
|
switch (properties.perm & KMemoryPermission_UserReadWrite) {
|
||||||
|
case KMemoryPermission_UserReadWrite:
|
||||||
|
case KMemoryPermission_KernelReadWrite:
|
||||||
|
entry.SetReadOnly(false);
|
||||||
|
break;
|
||||||
|
case KMemoryPermission_KernelRead:
|
||||||
|
case KMemoryPermission_UserRead:
|
||||||
|
entry.SetReadOnly(true);
|
||||||
|
break;
|
||||||
|
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||||
|
}
|
||||||
|
|
||||||
|
return entry;
|
||||||
|
}
|
||||||
public:
|
public:
|
||||||
constexpr KPageTable() : KPageTableBase(), manager(), ttbr(), asid() { /* ... */ }
|
constexpr KPageTable() : KPageTableBase(), manager(), ttbr(), asid() { /* ... */ }
|
||||||
|
|
||||||
|
@ -41,6 +162,63 @@ namespace ams::kern::arm64 {
|
||||||
|
|
||||||
NOINLINE Result InitializeForKernel(void *table, KVirtualAddress start, KVirtualAddress end);
|
NOINLINE Result InitializeForKernel(void *table, KVirtualAddress start, KVirtualAddress end);
|
||||||
Result Finalize();
|
Result Finalize();
|
||||||
|
private:
|
||||||
|
Result Map(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll);
|
||||||
|
Result Unmap(KProcessAddress virt_addr, size_t num_pages, KPageGroup *pg, PageLinkedList *page_list, bool force, bool reuse_ll);
|
||||||
|
|
||||||
|
Result MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll);
|
||||||
|
|
||||||
|
bool MergePages(KProcessAddress virt_addr, PageLinkedList *page_list);
|
||||||
|
|
||||||
|
static void PteDataSynchronizationBarrier() {
|
||||||
|
cpu::DataSynchronizationBarrierInnerShareable();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ClearPageTable(KVirtualAddress table) {
|
||||||
|
cpu::ClearPageToZero(GetVoidPointer(table));
|
||||||
|
}
|
||||||
|
|
||||||
|
void OnTableUpdated() const {
|
||||||
|
cpu::InvalidateTlbByAsid(this->asid);
|
||||||
|
}
|
||||||
|
|
||||||
|
void OnKernelTableUpdated() const {
|
||||||
|
cpu::InvalidateEntireTlbDataOnly();
|
||||||
|
}
|
||||||
|
|
||||||
|
void NoteUpdated() const {
|
||||||
|
cpu::DataSynchronizationBarrier();
|
||||||
|
|
||||||
|
if (this->IsKernel()) {
|
||||||
|
this->OnKernelTableUpdated();
|
||||||
|
} else {
|
||||||
|
this->OnTableUpdated();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
KVirtualAddress AllocatePageTable(PageLinkedList *page_list, bool reuse_ll) {
|
||||||
|
KVirtualAddress table = this->GetPageTableManager().Allocate();
|
||||||
|
|
||||||
|
if (table == Null<KVirtualAddress>) {
|
||||||
|
if (reuse_ll && page_list->Peek()) {
|
||||||
|
table = KVirtualAddress(reinterpret_cast<uintptr_t>(page_list->Pop()));
|
||||||
|
} else {
|
||||||
|
return Null<KVirtualAddress>;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ClearPageTable(table);
|
||||||
|
|
||||||
|
MESOSPHERE_ASSERT(this->GetPageTableManager().GetRefCount(table) == 0);
|
||||||
|
|
||||||
|
return table;
|
||||||
|
}
|
||||||
|
|
||||||
|
void FreePageTable(PageLinkedList *page_list, KVirtualAddress table) const {
|
||||||
|
MESOSPHERE_ASSERT(this->GetPageTableManager().IsInPageTableHeap(table));
|
||||||
|
MESOSPHERE_ASSERT(this->GetPageTableManager().GetRefCount(table) == 0);
|
||||||
|
page_list->Push(table);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,287 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms and conditions of the GNU General Public License,
|
||||||
|
* version 2, as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||||
|
* more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
#pragma once
|
||||||
|
#include <mesosphere/kern_common.hpp>
|
||||||
|
#include <mesosphere/kern_select_cpu.hpp>
|
||||||
|
#include <mesosphere/kern_k_typed_address.hpp>
|
||||||
|
|
||||||
|
namespace ams::kern::arm64 {
|
||||||
|
|
||||||
|
constexpr size_t L1BlockSize = 1_GB;
|
||||||
|
constexpr size_t L2BlockSize = 2_MB;
|
||||||
|
constexpr size_t L2ContiguousBlockSize = 0x10 * L2BlockSize;
|
||||||
|
constexpr size_t L3BlockSize = PageSize;
|
||||||
|
constexpr size_t L3ContiguousBlockSize = 0x10 * L3BlockSize;
|
||||||
|
|
||||||
|
class PageTableEntry {
|
||||||
|
public:
|
||||||
|
struct InvalidTag{};
|
||||||
|
|
||||||
|
enum Permission : u64 {
|
||||||
|
Permission_KernelRWX = ((0ul << 53) | (1ul << 54) | (0ul << 6)),
|
||||||
|
Permission_KernelRX = ((0ul << 53) | (1ul << 54) | (2ul << 6)),
|
||||||
|
Permission_KernelR = ((1ul << 53) | (1ul << 54) | (2ul << 6)),
|
||||||
|
Permission_KernelRW = ((1ul << 53) | (1ul << 54) | (0ul << 6)),
|
||||||
|
|
||||||
|
Permission_UserRX = ((1ul << 53) | (0ul << 54) | (3ul << 6)),
|
||||||
|
Permission_UserR = ((1ul << 53) | (1ul << 54) | (3ul << 6)),
|
||||||
|
Permission_UserRW = ((1ul << 53) | (1ul << 54) | (1ul << 6)),
|
||||||
|
};
|
||||||
|
|
||||||
|
enum Shareable : u64 {
|
||||||
|
Shareable_NonShareable = (0 << 8),
|
||||||
|
Shareable_OuterShareable = (2 << 8),
|
||||||
|
Shareable_InnerShareable = (3 << 8),
|
||||||
|
};
|
||||||
|
|
||||||
|
/* Official attributes are: */
|
||||||
|
/* 0x00, 0x04, 0xFF, 0x44. 4-7 are unused. */
|
||||||
|
enum PageAttribute : u64 {
|
||||||
|
PageAttribute_Device_nGnRnE = (0 << 2),
|
||||||
|
PageAttribute_Device_nGnRE = (1 << 2),
|
||||||
|
PageAttribute_NormalMemory = (2 << 2),
|
||||||
|
PageAttribute_NormalMemoryNotCacheable = (3 << 2),
|
||||||
|
};
|
||||||
|
|
||||||
|
enum AccessFlag : u64 {
|
||||||
|
AccessFlag_NotAccessed = (0 << 10),
|
||||||
|
AccessFlag_Accessed = (1 << 10),
|
||||||
|
};
|
||||||
|
|
||||||
|
enum Type : u64 {
|
||||||
|
Type_None = 0x0,
|
||||||
|
Type_L1Block = 0x1,
|
||||||
|
Type_L1Table = 0x3,
|
||||||
|
Type_L2Block = 0x1,
|
||||||
|
Type_L2Table = 0x3,
|
||||||
|
Type_L3Block = 0x3,
|
||||||
|
};
|
||||||
|
|
||||||
|
enum ContigType : u64 {
|
||||||
|
ContigType_NotContiguous = (0x0ul << 52),
|
||||||
|
ContigType_Contiguous = (0x1ul << 52),
|
||||||
|
};
|
||||||
|
protected:
|
||||||
|
u64 attributes;
|
||||||
|
public:
|
||||||
|
/* Take in a raw attribute. */
|
||||||
|
constexpr ALWAYS_INLINE PageTableEntry() : attributes() { /* ... */ }
|
||||||
|
constexpr ALWAYS_INLINE PageTableEntry(u64 attr) : attributes(attr) { /* ... */ }
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE PageTableEntry(InvalidTag) : attributes(0) { /* ... */ }
|
||||||
|
|
||||||
|
/* Extend a previous attribute. */
|
||||||
|
constexpr ALWAYS_INLINE PageTableEntry(const PageTableEntry &rhs, u64 new_attr) : attributes(rhs.attributes | new_attr) { /* ... */ }
|
||||||
|
|
||||||
|
/* Construct a new attribute. */
|
||||||
|
constexpr ALWAYS_INLINE PageTableEntry(Permission perm, PageAttribute p_a, Shareable share)
|
||||||
|
: attributes(static_cast<u64>(perm) | static_cast<u64>(AccessFlag_Accessed) | static_cast<u64>(p_a) | static_cast<u64>(share))
|
||||||
|
{
|
||||||
|
/* ... */
|
||||||
|
}
|
||||||
|
protected:
|
||||||
|
constexpr ALWAYS_INLINE u64 GetBits(size_t offset, size_t count) const {
|
||||||
|
return (this->attributes >> offset) & ((1ul << count) - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE u64 SelectBits(size_t offset, size_t count) const {
|
||||||
|
return this->attributes & (((1ul << count) - 1) << offset);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE void SetBits(size_t offset, size_t count, u64 value) {
|
||||||
|
const u64 mask = ((1ul << count) - 1) << offset;
|
||||||
|
this->attributes &= ~mask;
|
||||||
|
this->attributes |= (value & (mask >> offset)) << offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE void SetBitsDirect(size_t offset, size_t count, u64 value) {
|
||||||
|
const u64 mask = ((1ul << count) - 1) << offset;
|
||||||
|
this->attributes &= ~mask;
|
||||||
|
this->attributes |= (value & mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE void SetBit(size_t offset, bool enabled) {
|
||||||
|
const u64 mask = 1ul << offset;
|
||||||
|
if (enabled) {
|
||||||
|
this->attributes |= mask;
|
||||||
|
} else {
|
||||||
|
this->attributes &= ~mask;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
public:
|
||||||
|
constexpr ALWAYS_INLINE bool IsContiguousAllowed() const { return this->GetBits(55, 1) != 0; }
|
||||||
|
constexpr ALWAYS_INLINE bool IsUserExecuteNever() const { return this->GetBits(54, 1) != 0; }
|
||||||
|
constexpr ALWAYS_INLINE bool IsPrivilegedExecuteNever() const { return this->GetBits(53, 1) != 0; }
|
||||||
|
constexpr ALWAYS_INLINE bool IsContiguous() const { return this->GetBits(52, 1) != 0; }
|
||||||
|
constexpr ALWAYS_INLINE bool IsGlobal() const { return this->GetBits(11, 1) == 0; }
|
||||||
|
constexpr ALWAYS_INLINE AccessFlag GetAccessFlag() const { return static_cast<AccessFlag>(this->GetBits(10, 1)); }
|
||||||
|
constexpr ALWAYS_INLINE Shareable GetShareable() const { return static_cast<Shareable>(this->GetBits(8, 2)); }
|
||||||
|
constexpr ALWAYS_INLINE PageAttribute GetPageAttribute() const { return static_cast<PageAttribute>(this->GetBits(2, 3)); }
|
||||||
|
constexpr ALWAYS_INLINE bool IsReadOnly() const { return this->GetBits(7, 1) != 0; }
|
||||||
|
constexpr ALWAYS_INLINE bool IsUserAccessible() const { return this->GetBits(6, 1) != 0; }
|
||||||
|
constexpr ALWAYS_INLINE bool IsNonSecure() const { return this->GetBits(5, 1) != 0; }
|
||||||
|
constexpr ALWAYS_INLINE bool IsBlock() const { return this->GetBits(0, 2) == 0x1; }
|
||||||
|
constexpr ALWAYS_INLINE bool IsTable() const { return this->GetBits(0, 2) == 0x3; }
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE decltype(auto) SetContiguousAllowed(bool en) { this->SetBit(55, !en); return *this; }
|
||||||
|
constexpr ALWAYS_INLINE decltype(auto) SetUserExecuteNever(bool en) { this->SetBit(54, en); return *this; }
|
||||||
|
constexpr ALWAYS_INLINE decltype(auto) SetPrivilegedExecuteNever(bool en) { this->SetBit(53, en); return *this; }
|
||||||
|
constexpr ALWAYS_INLINE decltype(auto) SetContiguous(bool en) { this->SetBit(52, en); return *this; }
|
||||||
|
constexpr ALWAYS_INLINE decltype(auto) SetGlobal(bool en) { this->SetBit(11, !en); return *this; }
|
||||||
|
constexpr ALWAYS_INLINE decltype(auto) SetAccessFlag(AccessFlag f) { this->SetBitsDirect(10, 1, f); return *this; }
|
||||||
|
constexpr ALWAYS_INLINE decltype(auto) SetShareable(Shareable s) { this->SetBitsDirect(8, 2, s); return *this; }
|
||||||
|
constexpr ALWAYS_INLINE decltype(auto) SetReadOnly(bool en) { this->SetBit(7, en); return *this; }
|
||||||
|
constexpr ALWAYS_INLINE decltype(auto) SetUserAccessible(bool en) { this->SetBit(7, en); return *this; }
|
||||||
|
constexpr ALWAYS_INLINE decltype(auto) SetPageAttribute(PageAttribute a) { this->SetBitsDirect(2, 3, a); return *this; }
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE u64 GetEntryTemplate() const {
|
||||||
|
constexpr u64 Mask = (0xFFF0000000000FFFul & ~u64(0x3ul | (0x1ul << 52)));
|
||||||
|
return this->attributes & Mask;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE bool Is(u64 attr) const {
|
||||||
|
return this->attributes == attr;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected:
|
||||||
|
constexpr ALWAYS_INLINE u64 GetRawAttributes() const {
|
||||||
|
return this->attributes;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
static_assert(sizeof(PageTableEntry) == sizeof(u64));
|
||||||
|
|
||||||
|
constexpr inline PageTableEntry InvalidPageTableEntry = PageTableEntry(PageTableEntry::InvalidTag{});
|
||||||
|
|
||||||
|
constexpr inline size_t MaxPageTableEntries = PageSize / sizeof(PageTableEntry);
|
||||||
|
|
||||||
|
class L1PageTableEntry : public PageTableEntry {
|
||||||
|
public:
|
||||||
|
constexpr ALWAYS_INLINE L1PageTableEntry(InvalidTag) : PageTableEntry(InvalidTag{}) { /* ... */ }
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE L1PageTableEntry(KPhysicalAddress phys_addr, bool pxn)
|
||||||
|
: PageTableEntry((0x3ul << 60) | (static_cast<u64>(pxn) << 59) | GetInteger(phys_addr) | 0x3)
|
||||||
|
{
|
||||||
|
/* ... */
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE L1PageTableEntry(KPhysicalAddress phys_addr, bool is_kernel, bool pxn)
|
||||||
|
: PageTableEntry(((is_kernel ? 0x3ul : 0) << 60) | (static_cast<u64>(pxn) << 59) | GetInteger(phys_addr) | 0x3)
|
||||||
|
{
|
||||||
|
/* ... */
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE L1PageTableEntry(KPhysicalAddress phys_addr, const PageTableEntry &attr, bool contig)
|
||||||
|
: PageTableEntry(attr, (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | 0x1)
|
||||||
|
{
|
||||||
|
/* ... */
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE KPhysicalAddress GetBlock() const {
|
||||||
|
return this->SelectBits(30, 18);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE KPhysicalAddress GetTable() const {
|
||||||
|
return this->SelectBits(12, 36);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE bool GetTable(KPhysicalAddress &out) const {
|
||||||
|
if (this->IsTable()) {
|
||||||
|
out = this->GetTable();
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, bool contig) const {
|
||||||
|
/* Check whether this has the same permission/etc as the desired attributes. */
|
||||||
|
return L1PageTableEntry(this->GetBlock(), rhs, contig).GetRawAttributes() == this->GetRawAttributes();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class L2PageTableEntry : public PageTableEntry {
|
||||||
|
public:
|
||||||
|
constexpr ALWAYS_INLINE L2PageTableEntry(InvalidTag) : PageTableEntry(InvalidTag{}) { /* ... */ }
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE L2PageTableEntry(KPhysicalAddress phys_addr, bool pxn)
|
||||||
|
: PageTableEntry((0x3ul << 60) | (static_cast<u64>(pxn) << 59) | GetInteger(phys_addr) | 0x3)
|
||||||
|
{
|
||||||
|
/* ... */
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE L2PageTableEntry(KPhysicalAddress phys_addr, bool is_kernel, bool pxn)
|
||||||
|
: PageTableEntry(((is_kernel ? 0x3ul : 0) << 60) | (static_cast<u64>(pxn) << 59) | GetInteger(phys_addr) | 0x3)
|
||||||
|
{
|
||||||
|
/* ... */
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE L2PageTableEntry(KPhysicalAddress phys_addr, const PageTableEntry &attr, bool contig)
|
||||||
|
: PageTableEntry(attr, (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | 0x1)
|
||||||
|
{
|
||||||
|
/* ... */
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE KPhysicalAddress GetBlock() const {
|
||||||
|
return this->SelectBits(21, 27);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE KPhysicalAddress GetTable() const {
|
||||||
|
return this->SelectBits(12, 36);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE bool GetTable(KPhysicalAddress &out) const {
|
||||||
|
if (this->IsTable()) {
|
||||||
|
out = this->GetTable();
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, bool contig) const {
|
||||||
|
/* Check whether this has the same permission/etc as the desired attributes. */
|
||||||
|
return L2PageTableEntry(this->GetBlock(), rhs, contig).GetRawAttributes() == this->GetRawAttributes();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class L3PageTableEntry : public PageTableEntry {
|
||||||
|
public:
|
||||||
|
constexpr ALWAYS_INLINE L3PageTableEntry(InvalidTag) : PageTableEntry(InvalidTag{}) { /* ... */ }
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE L3PageTableEntry(KPhysicalAddress phys_addr, const PageTableEntry &attr, bool contig)
|
||||||
|
: PageTableEntry(attr, (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | 0x3)
|
||||||
|
{
|
||||||
|
/* ... */
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE bool IsBlock() const { return this->GetBits(0, 2) == 0x3; }
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE KPhysicalAddress GetBlock() const {
|
||||||
|
return this->SelectBits(12, 36);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, bool contig) const {
|
||||||
|
/* Check whether this has the same permission/etc as the desired attributes. */
|
||||||
|
return L3PageTableEntry(this->GetBlock(), rhs, contig).GetRawAttributes() == this->GetRawAttributes();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
constexpr inline L1PageTableEntry InvalidL1PageTableEntry = L1PageTableEntry(PageTableEntry::InvalidTag{});
|
||||||
|
constexpr inline L2PageTableEntry InvalidL2PageTableEntry = L2PageTableEntry(PageTableEntry::InvalidTag{});
|
||||||
|
constexpr inline L3PageTableEntry InvalidL3PageTableEntry = L3PageTableEntry(PageTableEntry::InvalidTag{});
|
||||||
|
|
||||||
|
}
|
|
@ -18,6 +18,7 @@
|
||||||
#include <mesosphere/kern_select_cpu.hpp>
|
#include <mesosphere/kern_select_cpu.hpp>
|
||||||
#include <mesosphere/kern_k_typed_address.hpp>
|
#include <mesosphere/kern_k_typed_address.hpp>
|
||||||
#include <mesosphere/kern_k_memory_layout.hpp>
|
#include <mesosphere/kern_k_memory_layout.hpp>
|
||||||
|
#include <mesosphere/arch/arm64/kern_k_page_table_entry.hpp>
|
||||||
|
|
||||||
namespace ams::kern::arm64 {
|
namespace ams::kern::arm64 {
|
||||||
|
|
||||||
|
@ -28,15 +29,48 @@ namespace ams::kern::arm64 {
|
||||||
NON_COPYABLE(KPageTableImpl);
|
NON_COPYABLE(KPageTableImpl);
|
||||||
NON_MOVEABLE(KPageTableImpl);
|
NON_MOVEABLE(KPageTableImpl);
|
||||||
private:
|
private:
|
||||||
u64 *table;
|
static constexpr size_t PageBits = __builtin_ctzll(PageSize);
|
||||||
|
static constexpr size_t NumLevels = 3;
|
||||||
|
static constexpr size_t LevelBits = 9;
|
||||||
|
static_assert(NumLevels > 0);
|
||||||
|
|
||||||
|
static constexpr size_t AddressBits = (NumLevels - 1) * LevelBits + PageBits;
|
||||||
|
static_assert(AddressBits <= BITSIZEOF(u64));
|
||||||
|
static constexpr size_t AddressSpaceSize = (1ull << AddressBits);
|
||||||
|
private:
|
||||||
|
L1PageTableEntry *table;
|
||||||
bool is_kernel;
|
bool is_kernel;
|
||||||
u32 num_entries;
|
u32 num_entries;
|
||||||
|
public:
|
||||||
|
ALWAYS_INLINE KVirtualAddress GetTableEntry(KVirtualAddress table, size_t index) {
|
||||||
|
return table + index * sizeof(PageTableEntry);
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE L1PageTableEntry *GetL1Entry(KProcessAddress address) {
|
||||||
|
return GetPointer<L1PageTableEntry>(GetTableEntry(KVirtualAddress(this->table), (GetInteger(address) >> (PageBits + LevelBits * 2)) & (this->num_entries - 1)));
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE L2PageTableEntry *GetL2EntryFromTable(KVirtualAddress table, KProcessAddress address) {
|
||||||
|
return GetPointer<L2PageTableEntry>(GetTableEntry(table, (GetInteger(address) >> (PageBits + LevelBits * 1)) & ((1ul << LevelBits) - 1)));
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE L2PageTableEntry *GetL2Entry(const L1PageTableEntry *entry, KProcessAddress address) {
|
||||||
|
return GetL2EntryFromTable(KMemoryLayout::GetLinearVirtualAddress(entry->GetTable()), address);
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE L3PageTableEntry *GetL3EntryFromTable(KVirtualAddress table, KProcessAddress address) {
|
||||||
|
return GetPointer<L3PageTableEntry>(GetTableEntry(table, (GetInteger(address) >> (PageBits + LevelBits * 0)) & ((1ul << LevelBits) - 1)));
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE L3PageTableEntry *GetL3Entry(const L2PageTableEntry *entry, KProcessAddress address) {
|
||||||
|
return GetL3EntryFromTable(KMemoryLayout::GetLinearVirtualAddress(entry->GetTable()), address);
|
||||||
|
}
|
||||||
public:
|
public:
|
||||||
constexpr KPageTableImpl() : table(), is_kernel(), num_entries() { /* ... */ }
|
constexpr KPageTableImpl() : table(), is_kernel(), num_entries() { /* ... */ }
|
||||||
|
|
||||||
NOINLINE void InitializeForKernel(void *tb, KVirtualAddress start, KVirtualAddress end);
|
NOINLINE void InitializeForKernel(void *tb, KVirtualAddress start, KVirtualAddress end);
|
||||||
|
|
||||||
u64 *Finalize();
|
L1PageTableEntry *Finalize();
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,6 +30,10 @@ namespace ams::kern::arm64 {
|
||||||
NOINLINE void Initialize(s32 core_id);
|
NOINLINE void Initialize(s32 core_id);
|
||||||
NOINLINE void Activate();
|
NOINLINE void Activate();
|
||||||
void Finalize(s32 core_id);
|
void Finalize(s32 core_id);
|
||||||
|
|
||||||
|
Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||||
|
return this->page_table.MapPages(out_addr, num_pages, alignment, phys_addr, region_start, region_num_pages, state, perm);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,9 +14,16 @@
|
||||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
*/
|
*/
|
||||||
#pragma once
|
#pragma once
|
||||||
|
#include <mesosphere/kern_common.hpp>
|
||||||
|
|
||||||
#ifdef ATMOSPHERE_ARCH_ARM64
|
#ifdef ATMOSPHERE_ARCH_ARM64
|
||||||
#include <mesosphere/arch/arm64/init/kern_k_init_page_table.hpp>
|
#include <mesosphere/arch/arm64/init/kern_k_init_page_table.hpp>
|
||||||
|
|
||||||
|
namespace ams::kern::init {
|
||||||
|
using ams::kern::arm64::PageTableEntry;
|
||||||
|
using ams::kern::arm64::init::KInitialPageTable;
|
||||||
|
using ams::kern::arm64::init::KInitialPageAllocator;
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
#error "Unknown architecture for KInitialPageTable"
|
#error "Unknown architecture for KInitialPageTable"
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -103,8 +103,11 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Update our tracking. */
|
|
||||||
if (AMS_LIKELY(allocated != nullptr)) {
|
if (AMS_LIKELY(allocated != nullptr)) {
|
||||||
|
/* Construct the object. */
|
||||||
|
new (allocated) T();
|
||||||
|
|
||||||
|
/* Update our tracking. */
|
||||||
size_t used = ++this->used;
|
size_t used = ++this->used;
|
||||||
size_t peak = this->peak;
|
size_t peak = this->peak;
|
||||||
while (peak < used) {
|
while (peak < used) {
|
||||||
|
|
|
@ -59,6 +59,9 @@ namespace ams::kern {
|
||||||
|
|
||||||
void LockSlowPath(uintptr_t owner, uintptr_t cur_thread);
|
void LockSlowPath(uintptr_t owner, uintptr_t cur_thread);
|
||||||
void UnlockSlowPath(uintptr_t cur_thread);
|
void UnlockSlowPath(uintptr_t cur_thread);
|
||||||
|
|
||||||
|
bool IsLocked() const { return this->tag != 0; }
|
||||||
|
bool IsLockedByCurrentThread() const { return (this->tag | 0x1ul) == (reinterpret_cast<uintptr_t>(GetCurrentThreadPointer()) | 0x1ul); }
|
||||||
};
|
};
|
||||||
|
|
||||||
using KScopedLightLock = KScopedLock<KLightLock>;
|
using KScopedLightLock = KScopedLock<KLightLock>;
|
||||||
|
|
|
@ -23,6 +23,7 @@ namespace ams::kern {
|
||||||
enum KMemoryState : u32 {
|
enum KMemoryState : u32 {
|
||||||
KMemoryState_None = 0,
|
KMemoryState_None = 0,
|
||||||
KMemoryState_Mask = 0xFF,
|
KMemoryState_Mask = 0xFF,
|
||||||
|
KMemoryState_All = ~KMemoryState_None,
|
||||||
|
|
||||||
KMemoryState_FlagCanReprotect = (1 << 8),
|
KMemoryState_FlagCanReprotect = (1 << 8),
|
||||||
KMemoryState_FlagCanDebug = (1 << 9),
|
KMemoryState_FlagCanDebug = (1 << 9),
|
||||||
|
@ -133,24 +134,25 @@ namespace ams::kern {
|
||||||
|
|
||||||
enum KMemoryPermission : u8 {
|
enum KMemoryPermission : u8 {
|
||||||
KMemoryPermission_None = 0,
|
KMemoryPermission_None = 0,
|
||||||
|
KMemoryPermission_All = static_cast<u8>(~KMemoryPermission_None),
|
||||||
KMemoryPermission_UserRead = ams::svc::MemoryPermission_Read,
|
|
||||||
KMemoryPermission_UserWrite = ams::svc::MemoryPermission_Write,
|
|
||||||
KMemoryPermission_UserExecute = ams::svc::MemoryPermission_Execute,
|
|
||||||
|
|
||||||
KMemoryPermission_UserReadWrite = ams::svc::MemoryPermission_ReadWrite,
|
|
||||||
KMemoryPermission_UserReadExecute = ams::svc::MemoryPermission_ReadExecute,
|
|
||||||
|
|
||||||
KMemoryPermission_UserMask = KMemoryPermission_UserRead | KMemoryPermission_UserWrite | KMemoryPermission_UserExecute,
|
|
||||||
|
|
||||||
KMemoryPermission_KernelShift = 3,
|
KMemoryPermission_KernelShift = 3,
|
||||||
|
|
||||||
KMemoryPermission_KernelRead = KMemoryPermission_UserRead << KMemoryPermission_KernelShift,
|
KMemoryPermission_KernelRead = ams::svc::MemoryPermission_Read << KMemoryPermission_KernelShift,
|
||||||
KMemoryPermission_KernelWrite = KMemoryPermission_UserWrite << KMemoryPermission_KernelShift,
|
KMemoryPermission_KernelWrite = ams::svc::MemoryPermission_Write << KMemoryPermission_KernelShift,
|
||||||
KMemoryPermission_KernelExecute = KMemoryPermission_UserExecute << KMemoryPermission_KernelShift,
|
KMemoryPermission_KernelExecute = ams::svc::MemoryPermission_Execute << KMemoryPermission_KernelShift,
|
||||||
|
|
||||||
KMemoryPermission_KernelReadWrite = KMemoryPermission_KernelRead | KMemoryPermission_KernelWrite,
|
KMemoryPermission_KernelReadWrite = KMemoryPermission_KernelRead | KMemoryPermission_KernelWrite,
|
||||||
KMemoryPermission_KernelReadExecute = KMemoryPermission_KernelRead | KMemoryPermission_KernelExecute,
|
KMemoryPermission_KernelReadExecute = KMemoryPermission_KernelRead | KMemoryPermission_KernelExecute,
|
||||||
|
|
||||||
|
KMemoryPermission_UserRead = ams::svc::MemoryPermission_Read | KMemoryPermission_KernelRead,
|
||||||
|
KMemoryPermission_UserWrite = ams::svc::MemoryPermission_Write | KMemoryPermission_KernelWrite,
|
||||||
|
KMemoryPermission_UserExecute = ams::svc::MemoryPermission_Execute,
|
||||||
|
|
||||||
|
KMemoryPermission_UserReadWrite = KMemoryPermission_UserRead | KMemoryPermission_UserWrite,
|
||||||
|
KMemoryPermission_UserReadExecute = KMemoryPermission_UserRead | KMemoryPermission_UserExecute,
|
||||||
|
|
||||||
|
KMemoryPermission_UserMask = ams::svc::MemoryPermission_Read | ams::svc::MemoryPermission_Write | ams::svc::MemoryPermission_Execute,
|
||||||
};
|
};
|
||||||
|
|
||||||
constexpr KMemoryPermission ConvertToKMemoryPermission(ams::svc::MemoryPermission perm) {
|
constexpr KMemoryPermission ConvertToKMemoryPermission(ams::svc::MemoryPermission perm) {
|
||||||
|
@ -160,6 +162,7 @@ namespace ams::kern {
|
||||||
enum KMemoryAttribute : u8 {
|
enum KMemoryAttribute : u8 {
|
||||||
KMemoryAttribute_None = 0x00,
|
KMemoryAttribute_None = 0x00,
|
||||||
KMemoryAttribute_Mask = 0x7F,
|
KMemoryAttribute_Mask = 0x7F,
|
||||||
|
KMemoryAttribute_All = KMemoryAttribute_Mask,
|
||||||
KMemoryAttribute_DontCareMask = 0x80,
|
KMemoryAttribute_DontCareMask = 0x80,
|
||||||
|
|
||||||
KMemoryAttribute_Locked = ams::svc::MemoryAttribute_Locked,
|
KMemoryAttribute_Locked = ams::svc::MemoryAttribute_Locked,
|
||||||
|
@ -182,17 +185,15 @@ namespace ams::kern {
|
||||||
u16 device_use_count;
|
u16 device_use_count;
|
||||||
|
|
||||||
constexpr ams::svc::MemoryInfo GetSvcMemoryInfo() const {
|
constexpr ams::svc::MemoryInfo GetSvcMemoryInfo() const {
|
||||||
ams::svc::MemoryInfo svc_info = {};
|
return {
|
||||||
|
.addr = this->address,
|
||||||
svc_info.addr = this->address;
|
.size = this->size,
|
||||||
svc_info.size = this->size;
|
.state = static_cast<ams::svc::MemoryState>(this->state & KMemoryState_Mask),
|
||||||
svc_info.state = static_cast<ams::svc::MemoryState>(this->state & KMemoryState_Mask);
|
.attr = static_cast<ams::svc::MemoryAttribute>(this->attribute & KMemoryAttribute_Mask),
|
||||||
svc_info.attr = static_cast<ams::svc::MemoryAttribute>(this->attribute & KMemoryAttribute_Mask);
|
.perm = static_cast<ams::svc::MemoryPermission>(this->perm & KMemoryPermission_UserMask),
|
||||||
svc_info.perm = static_cast<ams::svc::MemoryPermission>(this->perm & KMemoryPermission_UserMask);
|
.ipc_refcount = this->ipc_lock_count,
|
||||||
svc_info.ipc_refcount = this->ipc_lock_count;
|
.device_refcount = this->device_use_count,
|
||||||
svc_info.device_refcount = this->device_use_count;
|
};
|
||||||
|
|
||||||
return svc_info;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr uintptr_t GetAddress() const {
|
constexpr uintptr_t GetAddress() const {
|
||||||
|
@ -258,18 +259,16 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr KMemoryInfo GetMemoryInfo() const {
|
constexpr KMemoryInfo GetMemoryInfo() const {
|
||||||
KMemoryInfo info = {};
|
return {
|
||||||
|
.address = GetInteger(this->GetAddress()),
|
||||||
info.address = GetInteger(this->GetAddress());
|
.size = this->GetSize(),
|
||||||
info.size = this->GetSize();
|
.state = this->memory_state,
|
||||||
info.state = this->memory_state;
|
.perm = this->perm,
|
||||||
info.perm = this->perm;
|
.attribute = this->attribute,
|
||||||
info.attribute = this->attribute;
|
.original_perm = this->original_perm,
|
||||||
info.original_perm = this->original_perm;
|
.ipc_lock_count = this->ipc_lock_count,
|
||||||
info.ipc_lock_count = this->ipc_lock_count;
|
.device_use_count = this->device_use_count,
|
||||||
info.device_use_count = this->device_use_count;
|
};
|
||||||
|
|
||||||
return info;
|
|
||||||
}
|
}
|
||||||
public:
|
public:
|
||||||
constexpr KMemoryBlock()
|
constexpr KMemoryBlock()
|
||||||
|
|
|
@ -79,16 +79,21 @@ namespace ams::kern {
|
||||||
using const_iterator = MemoryBlockTree::const_iterator;
|
using const_iterator = MemoryBlockTree::const_iterator;
|
||||||
private:
|
private:
|
||||||
MemoryBlockTree memory_block_tree;
|
MemoryBlockTree memory_block_tree;
|
||||||
KProcessAddress start;
|
KProcessAddress start_address;
|
||||||
KProcessAddress end;
|
KProcessAddress end_address;
|
||||||
public:
|
public:
|
||||||
constexpr KMemoryBlockManager() : memory_block_tree(), start(), end() { /* ... */ }
|
constexpr KMemoryBlockManager() : memory_block_tree(), start_address(), end_address() { /* ... */ }
|
||||||
|
|
||||||
|
iterator end() { return this->memory_block_tree.end(); }
|
||||||
|
const_iterator end() const { return this->memory_block_tree.end(); }
|
||||||
|
const_iterator cend() const { return this->memory_block_tree.cend(); }
|
||||||
|
|
||||||
Result Initialize(KProcessAddress st, KProcessAddress nd, KMemoryBlockSlabManager *slab_manager);
|
Result Initialize(KProcessAddress st, KProcessAddress nd, KMemoryBlockSlabManager *slab_manager);
|
||||||
void Finalize(KMemoryBlockSlabManager *slab_manager);
|
void Finalize(KMemoryBlockSlabManager *slab_manager);
|
||||||
|
|
||||||
void Update(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr);
|
KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages, size_t num_pages, size_t alignment, size_t offset, size_t guard_pages) const;
|
||||||
|
|
||||||
|
void Update(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr);
|
||||||
|
|
||||||
iterator FindIterator(KProcessAddress address) const {
|
iterator FindIterator(KProcessAddress address) const {
|
||||||
return this->memory_block_tree.find(KMemoryBlock(address, 1, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None));
|
return this->memory_block_tree.find(KMemoryBlock(address, 1, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None));
|
||||||
|
|
|
@ -131,10 +131,10 @@ namespace ams::kern {
|
||||||
static constexpr ALWAYS_INLINE int Compare(const KMemoryRegion &lhs, const KMemoryRegion &rhs) {
|
static constexpr ALWAYS_INLINE int Compare(const KMemoryRegion &lhs, const KMemoryRegion &rhs) {
|
||||||
if (lhs.GetAddress() < rhs.GetAddress()) {
|
if (lhs.GetAddress() < rhs.GetAddress()) {
|
||||||
return -1;
|
return -1;
|
||||||
} else if (lhs.GetLastAddress() > rhs.GetLastAddress()) {
|
} else if (lhs.GetAddress() <= rhs.GetLastAddress()) {
|
||||||
return 1;
|
|
||||||
} else {
|
|
||||||
return 0;
|
return 0;
|
||||||
|
} else {
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
public:
|
public:
|
||||||
|
@ -246,11 +246,7 @@ namespace ams::kern {
|
||||||
constexpr ALWAYS_INLINE KMemoryRegionTree() : tree() { /* ... */ }
|
constexpr ALWAYS_INLINE KMemoryRegionTree() : tree() { /* ... */ }
|
||||||
public:
|
public:
|
||||||
iterator FindContainingRegion(uintptr_t address) {
|
iterator FindContainingRegion(uintptr_t address) {
|
||||||
auto it = this->find(KMemoryRegion(address, 1, 0, 0));
|
return this->find(KMemoryRegion(address, 1, 0, 0));
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(it != this->end());
|
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(it->Contains(address));
|
|
||||||
|
|
||||||
return it;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
iterator FindFirstRegionByTypeAttr(u32 type_id, u32 attr = 0) {
|
iterator FindFirstRegionByTypeAttr(u32 type_id, u32 attr = 0) {
|
||||||
|
@ -483,6 +479,16 @@ namespace ams::kern {
|
||||||
return *GetVirtualLinearMemoryRegionTree().FindContainingRegion(GetInteger(address));
|
return *GetVirtualLinearMemoryRegionTree().FindContainingRegion(GetInteger(address));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static NOINLINE bool IsHeapPhysicalAddress(KMemoryRegion **out, KPhysicalAddress address) {
|
||||||
|
if (auto it = GetPhysicalLinearMemoryRegionTree().FindContainingRegion(GetInteger(address)); it != GetPhysicalLinearMemoryRegionTree().end() && it->IsDerivedFrom(KMemoryRegionType_DramNonKernel)) {
|
||||||
|
if (out) {
|
||||||
|
*out = std::addressof(*it);
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static NOINLINE std::tuple<size_t, size_t> GetTotalAndKernelMemorySizes() {
|
static NOINLINE std::tuple<size_t, size_t> GetTotalAndKernelMemorySizes() {
|
||||||
size_t total_size = 0, kernel_size = 0;
|
size_t total_size = 0, kernel_size = 0;
|
||||||
for (auto it = GetPhysicalMemoryRegionTree().cbegin(); it != GetPhysicalMemoryRegionTree().cend(); it++) {
|
for (auto it = GetPhysicalMemoryRegionTree().cbegin(); it != GetPhysicalMemoryRegionTree().cend(); it++) {
|
||||||
|
|
|
@ -21,6 +21,8 @@
|
||||||
|
|
||||||
namespace ams::kern {
|
namespace ams::kern {
|
||||||
|
|
||||||
|
class KPageGroup;
|
||||||
|
|
||||||
class KMemoryManager {
|
class KMemoryManager {
|
||||||
public:
|
public:
|
||||||
enum Pool {
|
enum Pool {
|
||||||
|
@ -135,6 +137,18 @@ namespace ams::kern {
|
||||||
Impl &GetManager(KVirtualAddress address) {
|
Impl &GetManager(KVirtualAddress address) {
|
||||||
return this->managers[KMemoryLayout::GetVirtualLinearRegion(address).GetAttributes()];
|
return this->managers[KMemoryLayout::GetVirtualLinearRegion(address).GetAttributes()];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
constexpr Impl *GetFirstManager(Pool pool, Direction dir) {
|
||||||
|
return dir == Direction_FromBack ? this->pool_managers_tail[pool] : this->pool_managers_head[pool];
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr Impl *GetNextManager(Impl *cur, Direction dir) {
|
||||||
|
if (dir == Direction_FromBack) {
|
||||||
|
return cur->GetPrev();
|
||||||
|
} else {
|
||||||
|
return cur->GetNext();
|
||||||
|
}
|
||||||
|
}
|
||||||
public:
|
public:
|
||||||
constexpr KMemoryManager()
|
constexpr KMemoryManager()
|
||||||
: pool_locks(), pool_managers_head(), pool_managers_tail(), managers(), num_managers(), optimized_process_ids(), has_optimized_process()
|
: pool_locks(), pool_managers_head(), pool_managers_tail(), managers(), num_managers(), optimized_process_ids(), has_optimized_process()
|
||||||
|
@ -144,7 +158,8 @@ namespace ams::kern {
|
||||||
|
|
||||||
NOINLINE void Initialize(KVirtualAddress metadata_region, size_t metadata_region_size);
|
NOINLINE void Initialize(KVirtualAddress metadata_region, size_t metadata_region_size);
|
||||||
|
|
||||||
KVirtualAddress AllocateContinuous(size_t num_pages, size_t align_pages, u32 option);
|
NOINLINE KVirtualAddress AllocateContinuous(size_t num_pages, size_t align_pages, u32 option);
|
||||||
|
NOINLINE Result Allocate(KPageGroup *out, size_t num_pages, u32 option);
|
||||||
|
|
||||||
void Open(KVirtualAddress address, size_t num_pages) {
|
void Open(KVirtualAddress address, size_t num_pages) {
|
||||||
/* Repeatedly open references until we've done so for all pages. */
|
/* Repeatedly open references until we've done so for all pages. */
|
||||||
|
|
|
@ -27,6 +27,10 @@ namespace ams::kern {
|
||||||
std::memset(buffer, 0, sizeof(buffer));
|
std::memset(buffer, 0, sizeof(buffer));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE KPhysicalAddress GetPhysicalAddress() const {
|
||||||
|
return KMemoryLayout::GetLinearPhysicalAddress(KVirtualAddress(this));
|
||||||
|
}
|
||||||
|
|
||||||
static ALWAYS_INLINE KPageBuffer *FromPhysicalAddress(KPhysicalAddress phys_addr) {
|
static ALWAYS_INLINE KPageBuffer *FromPhysicalAddress(KPhysicalAddress phys_addr) {
|
||||||
const KVirtualAddress virt_addr = KMemoryLayout::GetLinearVirtualAddress(phys_addr);
|
const KVirtualAddress virt_addr = KMemoryLayout::GetLinearVirtualAddress(phys_addr);
|
||||||
|
|
||||||
|
|
|
@ -27,7 +27,7 @@ namespace ams::kern {
|
||||||
KVirtualAddress address;
|
KVirtualAddress address;
|
||||||
size_t num_pages;
|
size_t num_pages;
|
||||||
public:
|
public:
|
||||||
constexpr KBlockInfo() : address(), num_pages() { /* ... */ }
|
constexpr KBlockInfo() : util::IntrusiveListBaseNode<KBlockInfo>(), address(), num_pages() { /* ... */ }
|
||||||
|
|
||||||
constexpr void Initialize(KVirtualAddress addr, size_t np) {
|
constexpr void Initialize(KVirtualAddress addr, size_t np) {
|
||||||
this->address = addr;
|
this->address = addr;
|
||||||
|
@ -82,9 +82,9 @@ namespace ams::kern {
|
||||||
BlockInfoList block_list;
|
BlockInfoList block_list;
|
||||||
KBlockInfoManager *manager;
|
KBlockInfoManager *manager;
|
||||||
public:
|
public:
|
||||||
KPageGroup() : block_list(), manager() { /* ... */ }
|
explicit KPageGroup(KBlockInfoManager *m) : block_list(), manager(m) { /* ... */ }
|
||||||
|
~KPageGroup() { this->Finalize(); }
|
||||||
|
|
||||||
void Initialize(KBlockInfoManager *m);
|
|
||||||
void Finalize();
|
void Finalize();
|
||||||
|
|
||||||
iterator begin() const { return this->block_list.begin(); }
|
iterator begin() const { return this->block_list.begin(); }
|
||||||
|
@ -107,4 +107,14 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class KScopedPageGroup {
|
||||||
|
private:
|
||||||
|
KPageGroup *group;
|
||||||
|
public:
|
||||||
|
explicit ALWAYS_INLINE KScopedPageGroup(KPageGroup *gp) : group(gp) { group->Open(); }
|
||||||
|
explicit ALWAYS_INLINE KScopedPageGroup(KPageGroup &gp) : KScopedPageGroup(std::addressof(gp)) { /* ... */ }
|
||||||
|
ALWAYS_INLINE ~KScopedPageGroup() { group->Close(); }
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
}
|
}
|
|
@ -34,9 +34,9 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
static constexpr s32 GetBlockIndex(size_t num_pages) {
|
static constexpr s32 GetBlockIndex(size_t num_pages) {
|
||||||
for (size_t i = 0; i < NumMemoryBlockPageShifts; i++) {
|
for (s32 i = static_cast<s32>(NumMemoryBlockPageShifts) - 1; i >= 0; i--) {
|
||||||
if (num_pages >= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) {
|
if (num_pages >= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) {
|
||||||
return static_cast<s32>(i);
|
return i;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return -1;
|
return -1;
|
||||||
|
|
|
@ -24,6 +24,14 @@
|
||||||
|
|
||||||
namespace ams::kern {
|
namespace ams::kern {
|
||||||
|
|
||||||
|
struct KPageProperties {
|
||||||
|
KMemoryPermission perm;
|
||||||
|
bool io;
|
||||||
|
bool uncached;
|
||||||
|
bool non_contiguous;
|
||||||
|
};
|
||||||
|
static_assert(std::is_trivial<KPageProperties>::value);
|
||||||
|
|
||||||
class KPageTableBase {
|
class KPageTableBase {
|
||||||
NON_COPYABLE(KPageTableBase);
|
NON_COPYABLE(KPageTableBase);
|
||||||
NON_MOVEABLE(KPageTableBase);
|
NON_MOVEABLE(KPageTableBase);
|
||||||
|
@ -34,6 +42,60 @@ namespace ams::kern {
|
||||||
MemoryFillValue_Ipc = 'Y',
|
MemoryFillValue_Ipc = 'Y',
|
||||||
MemoryFillValue_Heap = 'Z',
|
MemoryFillValue_Heap = 'Z',
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum OperationType {
|
||||||
|
OperationType_Map = 0,
|
||||||
|
OperationType_MapGroup = 1,
|
||||||
|
OperationType_Unmap = 2,
|
||||||
|
/* TODO: perm/attr operations */
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
struct PageLinkedList {
|
||||||
|
private:
|
||||||
|
struct Node {
|
||||||
|
Node *next;
|
||||||
|
u8 buffer[PageSize - sizeof(Node *)];
|
||||||
|
};
|
||||||
|
static_assert(std::is_pod<Node>::value);
|
||||||
|
private:
|
||||||
|
Node *root;
|
||||||
|
public:
|
||||||
|
constexpr PageLinkedList() : root(nullptr) { /* ... */ }
|
||||||
|
|
||||||
|
void Push(Node *n) {
|
||||||
|
MESOSPHERE_ASSERT(util::IsAligned(reinterpret_cast<uintptr_t>(n), PageSize));
|
||||||
|
n->next = this->root;
|
||||||
|
this->root = n;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Push(KVirtualAddress addr) {
|
||||||
|
this->Push(GetPointer<Node>(addr));
|
||||||
|
}
|
||||||
|
|
||||||
|
Node *Peek() const { return this->root; }
|
||||||
|
|
||||||
|
Node *Pop() {
|
||||||
|
Node *r = this->root;
|
||||||
|
this->root = this->root->next;
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
static_assert(std::is_trivially_destructible<PageLinkedList>::value);
|
||||||
|
|
||||||
|
static constexpr u32 DefaultMemoryIgnoreAttr = KMemoryAttribute_DontCareMask | KMemoryAttribute_IpcLocked | KMemoryAttribute_DeviceShared;
|
||||||
|
private:
|
||||||
|
class KScopedPageTableUpdater {
|
||||||
|
private:
|
||||||
|
KPageTableBase *page_table;
|
||||||
|
PageLinkedList ll;
|
||||||
|
public:
|
||||||
|
ALWAYS_INLINE explicit KScopedPageTableUpdater(KPageTableBase *pt) : page_table(pt), ll() { /* ... */ }
|
||||||
|
ALWAYS_INLINE explicit KScopedPageTableUpdater(KPageTableBase &pt) : KScopedPageTableUpdater(std::addressof(pt)) { /* ... */ }
|
||||||
|
ALWAYS_INLINE ~KScopedPageTableUpdater() { this->page_table->FinalizeUpdate(this->GetPageList()); }
|
||||||
|
|
||||||
|
PageLinkedList *GetPageList() { return std::addressof(this->ll); }
|
||||||
|
};
|
||||||
private:
|
private:
|
||||||
KProcessAddress address_space_start;
|
KProcessAddress address_space_start;
|
||||||
KProcessAddress address_space_end;
|
KProcessAddress address_space_end;
|
||||||
|
@ -63,7 +125,7 @@ namespace ams::kern {
|
||||||
KMemoryBlockSlabManager *memory_block_slab_manager;
|
KMemoryBlockSlabManager *memory_block_slab_manager;
|
||||||
KBlockInfoManager *block_info_manager;
|
KBlockInfoManager *block_info_manager;
|
||||||
KMemoryRegion *cached_physical_linear_region;
|
KMemoryRegion *cached_physical_linear_region;
|
||||||
KMemoryRegion *cached_physical_non_kernel_dram_region;
|
KMemoryRegion *cached_physical_heap_region;
|
||||||
KMemoryRegion *cached_virtual_managed_pool_dram_region;
|
KMemoryRegion *cached_virtual_managed_pool_dram_region;
|
||||||
MemoryFillValue heap_fill_value;
|
MemoryFillValue heap_fill_value;
|
||||||
MemoryFillValue ipc_fill_value;
|
MemoryFillValue ipc_fill_value;
|
||||||
|
@ -75,7 +137,7 @@ namespace ams::kern {
|
||||||
kernel_map_region_end(), alias_code_region_start(), alias_code_region_end(), code_region_start(), code_region_end(),
|
kernel_map_region_end(), alias_code_region_start(), alias_code_region_end(), code_region_start(), code_region_end(),
|
||||||
max_heap_size(), max_physical_memory_size(), general_lock(), map_physical_memory_lock(), impl(), memory_block_manager(),
|
max_heap_size(), max_physical_memory_size(), general_lock(), map_physical_memory_lock(), impl(), memory_block_manager(),
|
||||||
allocate_option(), address_space_size(), is_kernel(), enable_aslr(), memory_block_slab_manager(), block_info_manager(),
|
allocate_option(), address_space_size(), is_kernel(), enable_aslr(), memory_block_slab_manager(), block_info_manager(),
|
||||||
cached_physical_linear_region(), cached_physical_non_kernel_dram_region(), cached_virtual_managed_pool_dram_region(),
|
cached_physical_linear_region(), cached_physical_heap_region(), cached_virtual_managed_pool_dram_region(),
|
||||||
heap_fill_value(), ipc_fill_value(), stack_fill_value()
|
heap_fill_value(), ipc_fill_value(), stack_fill_value()
|
||||||
{
|
{
|
||||||
/* ... */
|
/* ... */
|
||||||
|
@ -84,6 +146,59 @@ namespace ams::kern {
|
||||||
NOINLINE Result InitializeForKernel(bool is_64_bit, void *table, KVirtualAddress start, KVirtualAddress end);
|
NOINLINE Result InitializeForKernel(bool is_64_bit, void *table, KVirtualAddress start, KVirtualAddress end);
|
||||||
|
|
||||||
void Finalize();
|
void Finalize();
|
||||||
|
|
||||||
|
constexpr bool IsKernel() const { return this->is_kernel; }
|
||||||
|
constexpr bool IsAslrEnabled() const { return this->enable_aslr; }
|
||||||
|
|
||||||
|
constexpr bool Contains(KProcessAddress addr) const {
|
||||||
|
return this->address_space_start <= addr && addr <= this->address_space_end - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr bool Contains(KProcessAddress addr, size_t size) const {
|
||||||
|
return this->address_space_start <= addr && addr < addr + size && addr + size - 1 <= this->address_space_end - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
KProcessAddress GetRegionAddress(KMemoryState state) const;
|
||||||
|
size_t GetRegionSize(KMemoryState state) const;
|
||||||
|
bool Contains(KProcessAddress addr, size_t size, KMemoryState state) const;
|
||||||
|
protected:
|
||||||
|
virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, OperationType operation, bool reuse_ll) = 0;
|
||||||
|
virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, const KPageGroup *page_group, const KPageProperties properties, OperationType operation, bool reuse_ll) = 0;
|
||||||
|
virtual void FinalizeUpdate(PageLinkedList *page_list) = 0;
|
||||||
|
|
||||||
|
KPageTableImpl &GetImpl() { return this->impl; }
|
||||||
|
const KPageTableImpl &GetImpl() const { return this->impl; }
|
||||||
|
|
||||||
|
bool IsLockedByCurrentThread() const { return this->general_lock.IsLockedByCurrentThread(); }
|
||||||
|
|
||||||
|
bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr) {
|
||||||
|
if (this->cached_physical_heap_region && this->cached_physical_heap_region->Contains(GetInteger(phys_addr))) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return KMemoryLayout::IsHeapPhysicalAddress(&this->cached_physical_heap_region, phys_addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool ContainsPages(KProcessAddress addr, size_t num_pages) const {
|
||||||
|
return (this->address_space_start <= addr) && (num_pages <= (this->address_space_end - this->address_space_start) / PageSize) && (addr + num_pages * PageSize - 1 <= this->address_space_end - 1);
|
||||||
|
}
|
||||||
|
private:
|
||||||
|
constexpr size_t GetNumGuardPages() const { return this->IsKernel() ? 1 : 4; }
|
||||||
|
ALWAYS_INLINE KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages, size_t num_pages, size_t alignment, size_t offset, size_t guard_pages) const;
|
||||||
|
|
||||||
|
Result CheckMemoryState(const KMemoryInfo &info, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const;
|
||||||
|
Result CheckMemoryState(KMemoryState *out_state, KMemoryPermission *out_perm, KMemoryAttribute *out_attr, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr = DefaultMemoryIgnoreAttr) const;
|
||||||
|
Result CheckMemoryState(KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr = DefaultMemoryIgnoreAttr) const {
|
||||||
|
return this->CheckMemoryState(nullptr, nullptr, nullptr, addr, size, state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr);
|
||||||
|
}
|
||||||
|
|
||||||
|
Result QueryInfoImpl(KMemoryInfo *out_info, ams::svc::PageInfo *out_page, KProcessAddress address) const;
|
||||||
|
Result AllocateAndMapPagesImpl(PageLinkedList *page_list, KProcessAddress address, size_t num_pages, const KPageProperties properties);
|
||||||
|
|
||||||
|
NOINLINE Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
|
||||||
|
public:
|
||||||
|
Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||||
|
return this->MapPages(out_addr, num_pages, alignment, phys_addr, true, region_start, region_num_pages, state, perm);
|
||||||
|
}
|
||||||
public:
|
public:
|
||||||
static ALWAYS_INLINE KVirtualAddress GetLinearVirtualAddress(KPhysicalAddress addr) {
|
static ALWAYS_INLINE KVirtualAddress GetLinearVirtualAddress(KPhysicalAddress addr) {
|
||||||
return KMemoryLayout::GetLinearVirtualAddress(addr);
|
return KMemoryLayout::GetLinearVirtualAddress(addr);
|
||||||
|
@ -92,6 +207,18 @@ namespace ams::kern {
|
||||||
static ALWAYS_INLINE KPhysicalAddress GetLinearPhysicalAddress(KVirtualAddress addr) {
|
static ALWAYS_INLINE KPhysicalAddress GetLinearPhysicalAddress(KVirtualAddress addr) {
|
||||||
return KMemoryLayout::GetLinearPhysicalAddress(addr);
|
return KMemoryLayout::GetLinearPhysicalAddress(addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static ALWAYS_INLINE KVirtualAddress GetHeapVirtualAddress(KPhysicalAddress addr) {
|
||||||
|
return GetLinearVirtualAddress(addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
static ALWAYS_INLINE KVirtualAddress GetPageTableVirtualAddress(KPhysicalAddress addr) {
|
||||||
|
return GetLinearVirtualAddress(addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
static ALWAYS_INLINE KPhysicalAddress GetPageTablePhysicalAddress(KVirtualAddress addr) {
|
||||||
|
return GetLinearPhysicalAddress(addr);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -93,6 +93,10 @@ namespace ams::kern {
|
||||||
*this->GetRefCountPointer(addr) -= count;
|
*this->GetRefCountPointer(addr) -= count;
|
||||||
return this->GetRefCount(addr) == 0;
|
return this->GetRefCount(addr) == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
constexpr bool IsInPageTableHeap(KVirtualAddress addr) const {
|
||||||
|
return this->IsInRange(addr);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,7 +35,7 @@ namespace ams::kern::svc {
|
||||||
/* 103 */ using ::ams::svc::ResultOutOfResource;
|
/* 103 */ using ::ams::svc::ResultOutOfResource;
|
||||||
/* 104 */ using ::ams::svc::ResultOutOfMemory;
|
/* 104 */ using ::ams::svc::ResultOutOfMemory;
|
||||||
/* 105 */ using ::ams::svc::ResultOutOfHandles;
|
/* 105 */ using ::ams::svc::ResultOutOfHandles;
|
||||||
/* 106 */ using ::ams::svc::ResultInvalidCurrentMemoryState;
|
/* 106 */ using ::ams::svc::ResultInvalidCurrentMemory;
|
||||||
|
|
||||||
/* 108 */ using ::ams::svc::ResultInvalidNewMemoryPermissions;
|
/* 108 */ using ::ams::svc::ResultInvalidNewMemoryPermissions;
|
||||||
|
|
||||||
|
|
|
@ -42,4 +42,309 @@ namespace ams::kern::arm64 {
|
||||||
Result KPageTable::Finalize() {
|
Result KPageTable::Finalize() {
|
||||||
MESOSPHERE_TODO_IMPLEMENT();
|
MESOSPHERE_TODO_IMPLEMENT();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Result KPageTable::Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, OperationType operation, bool reuse_ll) {
|
||||||
|
/* Check validity of parameters. */
|
||||||
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
MESOSPHERE_ASSERT(num_pages > 0);
|
||||||
|
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(virt_addr), PageSize));
|
||||||
|
MESOSPHERE_ASSERT(this->ContainsPages(virt_addr, num_pages));
|
||||||
|
|
||||||
|
if (operation == OperationType_Map) {
|
||||||
|
MESOSPHERE_ABORT_UNLESS(is_pa_valid);
|
||||||
|
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), PageSize));
|
||||||
|
} else {
|
||||||
|
MESOSPHERE_ABORT_UNLESS(!is_pa_valid);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (operation == OperationType_Unmap) {
|
||||||
|
MESOSPHERE_TODO("operation == OperationType_Unmap");
|
||||||
|
} else {
|
||||||
|
auto entry_template = this->GetEntryTemplate(properties);
|
||||||
|
|
||||||
|
switch (operation) {
|
||||||
|
case OperationType_Map:
|
||||||
|
return this->MapContiguous(virt_addr, phys_addr, num_pages, entry_template, page_list, reuse_ll);
|
||||||
|
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Result KPageTable::Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, const KPageGroup *page_group, const KPageProperties properties, OperationType operation, bool reuse_ll) {
|
||||||
|
MESOSPHERE_TODO_IMPLEMENT();
|
||||||
|
}
|
||||||
|
|
||||||
|
Result KPageTable::Map(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll) {
|
||||||
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(virt_addr), PageSize));
|
||||||
|
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), PageSize));
|
||||||
|
|
||||||
|
auto &impl = this->GetImpl();
|
||||||
|
KVirtualAddress l2_virt = Null<KVirtualAddress>;
|
||||||
|
KVirtualAddress l3_virt = Null<KVirtualAddress>;
|
||||||
|
int l2_open_count = 0;
|
||||||
|
int l3_open_count = 0;
|
||||||
|
|
||||||
|
/* Iterate, mapping each page. */
|
||||||
|
for (size_t i = 0; i < num_pages; i++) {
|
||||||
|
KPhysicalAddress l3_phys = Null<KPhysicalAddress>;
|
||||||
|
bool l2_allocated = false;
|
||||||
|
|
||||||
|
/* If we have no L3 table, we should get or allocate one. */
|
||||||
|
if (l3_virt == Null<KVirtualAddress>) {
|
||||||
|
KPhysicalAddress l2_phys = Null<KPhysicalAddress>;
|
||||||
|
|
||||||
|
/* If we have no L2 table, we should get or allocate one. */
|
||||||
|
if (l2_virt == Null<KVirtualAddress>) {
|
||||||
|
if (L1PageTableEntry *l1_entry = impl.GetL1Entry(virt_addr); !l1_entry->GetTable(l2_phys)) {
|
||||||
|
/* Allocate table. */
|
||||||
|
l2_virt = AllocatePageTable(page_list, reuse_ll);
|
||||||
|
R_UNLESS(l2_virt != Null<KVirtualAddress>, svc::ResultOutOfResource());
|
||||||
|
|
||||||
|
/* Set the entry. */
|
||||||
|
l2_phys = GetPageTablePhysicalAddress(l2_virt);
|
||||||
|
PteDataSynchronizationBarrier();
|
||||||
|
*l1_entry = L1PageTableEntry(l2_phys, this->IsKernel(), true);
|
||||||
|
PteDataSynchronizationBarrier();
|
||||||
|
l2_allocated = true;
|
||||||
|
} else {
|
||||||
|
l2_virt = GetPageTableVirtualAddress(l2_phys);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
MESOSPHERE_ASSERT(l2_virt != Null<KVirtualAddress>);
|
||||||
|
|
||||||
|
if (L2PageTableEntry *l2_entry = impl.GetL2EntryFromTable(l2_virt, virt_addr); !l2_entry->GetTable(l3_phys)) {
|
||||||
|
/* Allocate table. */
|
||||||
|
l3_virt = AllocatePageTable(page_list, reuse_ll);
|
||||||
|
if (l3_virt == Null<KVirtualAddress>) {
|
||||||
|
/* Cleanup the L2 entry. */
|
||||||
|
if (l2_allocated) {
|
||||||
|
*impl.GetL1Entry(virt_addr) = InvalidL1PageTableEntry;
|
||||||
|
this->NoteUpdated();
|
||||||
|
FreePageTable(page_list, l2_virt);
|
||||||
|
} else if (this->GetPageTableManager().IsInPageTableHeap(l2_virt) && l2_open_count > 0) {
|
||||||
|
this->GetPageTableManager().Open(l2_virt, l2_open_count);
|
||||||
|
}
|
||||||
|
return svc::ResultOutOfResource();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Set the entry. */
|
||||||
|
l3_phys = GetPageTablePhysicalAddress(l3_virt);
|
||||||
|
PteDataSynchronizationBarrier();
|
||||||
|
*l2_entry = L2PageTableEntry(l3_phys, this->IsKernel(), true);
|
||||||
|
PteDataSynchronizationBarrier();
|
||||||
|
l2_open_count++;
|
||||||
|
} else {
|
||||||
|
l3_virt = GetPageTableVirtualAddress(l3_phys);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
MESOSPHERE_ASSERT(l3_virt != Null<KVirtualAddress>);
|
||||||
|
|
||||||
|
/* Map the page. */
|
||||||
|
*impl.GetL3EntryFromTable(l3_virt, virt_addr) = L3PageTableEntry(phys_addr, entry_template, false);
|
||||||
|
l3_open_count++;
|
||||||
|
virt_addr += PageSize;
|
||||||
|
phys_addr += PageSize;
|
||||||
|
|
||||||
|
/* Account for hitting end of table. */
|
||||||
|
if (util::IsAligned(GetInteger(virt_addr), L2BlockSize)) {
|
||||||
|
if (this->GetPageTableManager().IsInPageTableHeap(l3_virt)) {
|
||||||
|
this->GetPageTableManager().Open(l3_virt, l3_open_count);
|
||||||
|
}
|
||||||
|
l3_virt = Null<KVirtualAddress>;
|
||||||
|
l3_open_count = 0;
|
||||||
|
|
||||||
|
if (util::IsAligned(GetInteger(virt_addr), L1BlockSize)) {
|
||||||
|
if (this->GetPageTableManager().IsInPageTableHeap(l2_virt) && l2_open_count > 0) {
|
||||||
|
this->GetPageTableManager().Open(l2_virt, l2_open_count);
|
||||||
|
}
|
||||||
|
l2_virt = Null<KVirtualAddress>;
|
||||||
|
l2_open_count = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Perform any remaining opens. */
|
||||||
|
if (l2_open_count > 0 && this->GetPageTableManager().IsInPageTableHeap(l2_virt)) {
|
||||||
|
this->GetPageTableManager().Open(l2_virt, l2_open_count);
|
||||||
|
}
|
||||||
|
if (l3_open_count > 0 && this->GetPageTableManager().IsInPageTableHeap(l3_virt)) {
|
||||||
|
this->GetPageTableManager().Open(l3_virt, l3_open_count);
|
||||||
|
}
|
||||||
|
|
||||||
|
return ResultSuccess();
|
||||||
|
}
|
||||||
|
|
||||||
|
Result KPageTable::Unmap(KProcessAddress virt_addr, size_t num_pages, KPageGroup *pg, PageLinkedList *page_list, bool force, bool reuse_ll) {
|
||||||
|
MESOSPHERE_TODO_IMPLEMENT();
|
||||||
|
}
|
||||||
|
|
||||||
|
Result KPageTable::MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll) {
|
||||||
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
MESOSPHERE_LOG("KPageTable::MapContiguous(%016lx, %016lx, %zu)\n", GetInteger(virt_addr), GetInteger(phys_addr), num_pages);
|
||||||
|
|
||||||
|
/* Cache initial addresses for use on cleanup. */
|
||||||
|
const KProcessAddress orig_virt_addr = virt_addr;
|
||||||
|
const KPhysicalAddress orig_phys_addr = phys_addr;
|
||||||
|
|
||||||
|
size_t remaining_pages = num_pages;
|
||||||
|
|
||||||
|
if (num_pages < ContiguousPageSize / PageSize) {
|
||||||
|
auto guard = SCOPE_GUARD { MESOSPHERE_R_ABORT_UNLESS(this->Unmap(orig_virt_addr, num_pages, nullptr, page_list, true, true)); };
|
||||||
|
R_TRY(this->Map(virt_addr, phys_addr, num_pages, entry_template, page_list, reuse_ll));
|
||||||
|
guard.Cancel();
|
||||||
|
} else {
|
||||||
|
MESOSPHERE_TODO("Contiguous mapping");
|
||||||
|
(void)remaining_pages;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Perform what coalescing we can. */
|
||||||
|
this->MergePages(orig_virt_addr, page_list);
|
||||||
|
if (num_pages > 1) {
|
||||||
|
this->MergePages(orig_virt_addr + (num_pages - 1) * PageSize, page_list);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Open references to the pages, if we should. */
|
||||||
|
if (IsHeapPhysicalAddress(orig_phys_addr)) {
|
||||||
|
Kernel::GetMemoryManager().Open(GetHeapVirtualAddress(orig_phys_addr), num_pages);
|
||||||
|
}
|
||||||
|
|
||||||
|
return ResultSuccess();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool KPageTable::MergePages(KProcessAddress virt_addr, PageLinkedList *page_list) {
|
||||||
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
|
auto &impl = this->GetImpl();
|
||||||
|
bool merged = false;
|
||||||
|
|
||||||
|
/* If there's no L1 table, don't bother. */
|
||||||
|
L1PageTableEntry *l1_entry = impl.GetL1Entry(virt_addr);
|
||||||
|
if (!l1_entry->IsTable()) {
|
||||||
|
return merged;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Examine and try to merge the L2 table. */
|
||||||
|
L2PageTableEntry *l2_entry = impl.GetL2Entry(l1_entry, virt_addr);
|
||||||
|
if (l2_entry->IsTable()) {
|
||||||
|
/* We have an L3 entry. */
|
||||||
|
L3PageTableEntry *l3_entry = impl.GetL3Entry(l2_entry, virt_addr);
|
||||||
|
if (!l3_entry->IsBlock() || !l3_entry->IsContiguousAllowed()) {
|
||||||
|
return merged;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* If it's not contiguous, try to make it so. */
|
||||||
|
if (!l3_entry->IsContiguous()) {
|
||||||
|
virt_addr = util::AlignDown(GetInteger(virt_addr), L3ContiguousBlockSize);
|
||||||
|
KPhysicalAddress phys_addr = util::AlignDown(GetInteger(l3_entry->GetBlock()), L3ContiguousBlockSize);
|
||||||
|
const u64 entry_template = l3_entry->GetEntryTemplate();
|
||||||
|
|
||||||
|
/* Validate that we can merge. */
|
||||||
|
for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) {
|
||||||
|
if (!impl.GetL3Entry(l2_entry, virt_addr + L3BlockSize * i)->Is(entry_template | GetInteger(phys_addr + PageSize * i) | PageTableEntry::Type_L3Block)) {
|
||||||
|
return merged;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Merge! */
|
||||||
|
for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) {
|
||||||
|
impl.GetL3Entry(l2_entry, virt_addr + L3BlockSize * i)->SetContiguous(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Note that we updated. */
|
||||||
|
this->NoteUpdated();
|
||||||
|
merged = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* We might be able to upgrade a contiguous set of L3 entries into an L2 block. */
|
||||||
|
virt_addr = util::AlignDown(GetInteger(virt_addr), L2BlockSize);
|
||||||
|
KPhysicalAddress phys_addr = util::AlignDown(GetInteger(l3_entry->GetBlock()), L2BlockSize);
|
||||||
|
const u64 entry_template = l3_entry->GetEntryTemplate();
|
||||||
|
|
||||||
|
/* Validate that we can merge. */
|
||||||
|
for (size_t i = 0; i < L2BlockSize / L3ContiguousBlockSize; i++) {
|
||||||
|
if (!impl.GetL3Entry(l2_entry, virt_addr + L3BlockSize * i)->Is(entry_template | GetInteger(phys_addr + L3ContiguousBlockSize * i) | PageTableEntry::ContigType_Contiguous)) {
|
||||||
|
return merged;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Merge! */
|
||||||
|
PteDataSynchronizationBarrier();
|
||||||
|
*l2_entry = L2PageTableEntry(phys_addr, entry_template, false);
|
||||||
|
|
||||||
|
/* Note that we updated. */
|
||||||
|
this->NoteUpdated();
|
||||||
|
merged = true;
|
||||||
|
|
||||||
|
/* Free the L3 table. */
|
||||||
|
KVirtualAddress l3_table = util::AlignDown(reinterpret_cast<uintptr_t>(l3_entry), PageSize);
|
||||||
|
if (this->GetPageTableManager().IsInPageTableHeap(l3_table)) {
|
||||||
|
this->GetPageTableManager().Close(l3_table, L2BlockSize / L3BlockSize);
|
||||||
|
this->FreePageTable(page_list, l3_table);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (l2_entry->IsBlock()) {
|
||||||
|
/* If it's not contiguous, try to make it so. */
|
||||||
|
if (!l2_entry->IsContiguous()) {
|
||||||
|
virt_addr = util::AlignDown(GetInteger(virt_addr), L2ContiguousBlockSize);
|
||||||
|
KPhysicalAddress phys_addr = util::AlignDown(GetInteger(l2_entry->GetBlock()), L2ContiguousBlockSize);
|
||||||
|
const u64 entry_template = l2_entry->GetEntryTemplate();
|
||||||
|
|
||||||
|
/* Validate that we can merge. */
|
||||||
|
for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) {
|
||||||
|
if (!impl.GetL2Entry(l1_entry, virt_addr + L2BlockSize * i)->Is(entry_template | GetInteger(phys_addr + PageSize * i) | PageTableEntry::Type_L2Block)) {
|
||||||
|
return merged;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Merge! */
|
||||||
|
for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) {
|
||||||
|
impl.GetL2Entry(l1_entry, virt_addr + L2BlockSize * i)->SetContiguous(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Note that we updated. */
|
||||||
|
this->NoteUpdated();
|
||||||
|
merged = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* We might be able to upgrade a contiguous set of L2 entries into an L1 block. */
|
||||||
|
virt_addr = util::AlignDown(GetInteger(virt_addr), L1BlockSize);
|
||||||
|
KPhysicalAddress phys_addr = util::AlignDown(GetInteger(l2_entry->GetBlock()), L1BlockSize);
|
||||||
|
const u64 entry_template = l2_entry->GetEntryTemplate();
|
||||||
|
|
||||||
|
/* Validate that we can merge. */
|
||||||
|
for (size_t i = 0; i < L1BlockSize / L2ContiguousBlockSize; i++) {
|
||||||
|
if (!impl.GetL2Entry(l1_entry, virt_addr + L3BlockSize * i)->Is(entry_template | GetInteger(phys_addr + L2ContiguousBlockSize * i) | PageTableEntry::ContigType_Contiguous)) {
|
||||||
|
return merged;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Merge! */
|
||||||
|
PteDataSynchronizationBarrier();
|
||||||
|
*l1_entry = L1PageTableEntry(phys_addr, entry_template, false);
|
||||||
|
|
||||||
|
/* Note that we updated. */
|
||||||
|
this->NoteUpdated();
|
||||||
|
merged = true;
|
||||||
|
|
||||||
|
/* Free the L2 table. */
|
||||||
|
KVirtualAddress l2_table = util::AlignDown(reinterpret_cast<uintptr_t>(l2_entry), PageSize);
|
||||||
|
if (this->GetPageTableManager().IsInPageTableHeap(l2_table)) {
|
||||||
|
this->GetPageTableManager().Close(l2_table, L1BlockSize / L2BlockSize);
|
||||||
|
this->FreePageTable(page_list, l2_table);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return merged;
|
||||||
|
}
|
||||||
|
|
||||||
|
void KPageTable::FinalizeUpdate(PageLinkedList *page_list) {
|
||||||
|
while (page_list->Peek()) {
|
||||||
|
KVirtualAddress page = KVirtualAddress(page_list->Pop());
|
||||||
|
MESOSPHERE_ASSERT(this->GetPageTableManager().IsInPageTableHeap(page));
|
||||||
|
MESOSPHERE_ASSERT(this->GetPageTableManager().GetRefCount(page) == 0);
|
||||||
|
this->GetPageTableManager().Free(page);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,26 +17,13 @@
|
||||||
|
|
||||||
namespace ams::kern::arm64 {
|
namespace ams::kern::arm64 {
|
||||||
|
|
||||||
namespace {
|
|
||||||
|
|
||||||
constexpr size_t PageBits = __builtin_ctzll(PageSize);
|
|
||||||
constexpr size_t NumLevels = 3;
|
|
||||||
constexpr size_t LevelBits = 9;
|
|
||||||
static_assert(NumLevels > 0);
|
|
||||||
|
|
||||||
constexpr size_t AddressBits = (NumLevels - 1) * LevelBits + PageBits;
|
|
||||||
static_assert(AddressBits <= BITSIZEOF(u64));
|
|
||||||
constexpr size_t AddressSpaceSize = (1ull << AddressBits);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
void KPageTableImpl::InitializeForKernel(void *tb, KVirtualAddress start, KVirtualAddress end) {
|
void KPageTableImpl::InitializeForKernel(void *tb, KVirtualAddress start, KVirtualAddress end) {
|
||||||
this->table = static_cast<u64 *>(tb);
|
this->table = static_cast<L1PageTableEntry *>(tb);
|
||||||
this->is_kernel = true;
|
this->is_kernel = true;
|
||||||
this->num_entries = util::AlignUp(end - start, AddressSpaceSize) / AddressSpaceSize;
|
this->num_entries = util::AlignUp(end - start, AddressSpaceSize) / AddressSpaceSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 *KPageTableImpl::Finalize() {
|
L1PageTableEntry *KPageTableImpl::Finalize() {
|
||||||
return this->table;
|
return this->table;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -23,13 +23,13 @@ namespace ams::kern {
|
||||||
R_UNLESS(start_block != nullptr, svc::ResultOutOfResource());
|
R_UNLESS(start_block != nullptr, svc::ResultOutOfResource());
|
||||||
|
|
||||||
/* Set our start and end. */
|
/* Set our start and end. */
|
||||||
this->start = st;
|
this->start_address = st;
|
||||||
this->end = nd;
|
this->end_address = nd;
|
||||||
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(this->start), PageSize));
|
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(this->start_address), PageSize));
|
||||||
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(this->end), PageSize));
|
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(this->end_address), PageSize));
|
||||||
|
|
||||||
/* Initialize and insert the block. */
|
/* Initialize and insert the block. */
|
||||||
start_block->Initialize(this->start, (this->end - this->start) / PageSize, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None);
|
start_block->Initialize(this->start_address, (this->end_address - this->start_address) / PageSize, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None);
|
||||||
this->memory_block_tree.insert(*start_block);
|
this->memory_block_tree.insert(*start_block);
|
||||||
|
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
|
@ -47,6 +47,37 @@ namespace ams::kern {
|
||||||
MESOSPHERE_ASSERT(this->memory_block_tree.empty());
|
MESOSPHERE_ASSERT(this->memory_block_tree.empty());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
KProcessAddress KMemoryBlockManager::FindFreeArea(KProcessAddress region_start, size_t region_num_pages, size_t num_pages, size_t alignment, size_t offset, size_t guard_pages) const {
|
||||||
|
if (num_pages > 0) {
|
||||||
|
const KProcessAddress region_end = region_start + region_num_pages * PageSize;
|
||||||
|
const KProcessAddress region_last = region_end - 1;
|
||||||
|
for (const_iterator it = this->FindIterator(region_start); it != this->memory_block_tree.cend(); it++) {
|
||||||
|
const KMemoryInfo info = it->GetMemoryInfo();
|
||||||
|
if (region_last < info.GetAddress()) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (info.state != KMemoryState_Free) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
KProcessAddress area = (info.GetAddress() <= GetInteger(region_start)) ? region_start : info.GetAddress();
|
||||||
|
area += guard_pages * PageSize;
|
||||||
|
|
||||||
|
const KProcessAddress offset_area = util::AlignDown(GetInteger(area), alignment) + offset;
|
||||||
|
area = (area <= offset_area) ? offset_area : offset_area + alignment;
|
||||||
|
|
||||||
|
const KProcessAddress area_end = area + num_pages * PageSize + guard_pages * PageSize;
|
||||||
|
const KProcessAddress area_last = area_end - 1;
|
||||||
|
|
||||||
|
if (info.GetAddress() <= GetInteger(area) && area < area_last && area_last <= region_last && GetInteger(area_last) <= info.GetLastAddress()) {
|
||||||
|
return area;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return Null<KProcessAddress>;
|
||||||
|
}
|
||||||
|
|
||||||
void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr) {
|
void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr) {
|
||||||
/* Ensure for auditing that we never end up with an invalid tree. */
|
/* Ensure for auditing that we never end up with an invalid tree. */
|
||||||
KScopedMemoryBlockManagerAuditor auditor(this);
|
KScopedMemoryBlockManagerAuditor auditor(this);
|
||||||
|
@ -101,7 +132,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
/* Find the iterator now that we've updated. */
|
/* Find the iterator now that we've updated. */
|
||||||
it = this->FindIterator(address);
|
it = this->FindIterator(address);
|
||||||
if (address != this->start) {
|
if (address != this->start_address) {
|
||||||
it--;
|
it--;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -135,7 +135,6 @@ namespace ams::kern {
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
|
|
||||||
constexpr PageTableEntry KernelRwDataAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable);
|
constexpr PageTableEntry KernelRwDataAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable);
|
||||||
|
|
||||||
constexpr size_t CarveoutAlignment = 0x20000;
|
constexpr size_t CarveoutAlignment = 0x20000;
|
||||||
|
|
|
@ -103,19 +103,10 @@ namespace ams::kern {
|
||||||
/* Loop, trying to iterate from each block. */
|
/* Loop, trying to iterate from each block. */
|
||||||
Impl *chosen_manager = nullptr;
|
Impl *chosen_manager = nullptr;
|
||||||
KVirtualAddress allocated_block = Null<KVirtualAddress>;
|
KVirtualAddress allocated_block = Null<KVirtualAddress>;
|
||||||
if (dir == Direction_FromBack) {
|
for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr; chosen_manager = this->GetNextManager(chosen_manager, dir)) {
|
||||||
for (chosen_manager = this->pool_managers_tail[pool]; chosen_manager != nullptr; chosen_manager = chosen_manager->GetPrev()) {
|
allocated_block = chosen_manager->AllocateBlock(heap_index);
|
||||||
allocated_block = chosen_manager->AllocateBlock(heap_index);
|
if (allocated_block != Null<KVirtualAddress>) {
|
||||||
if (allocated_block != Null<KVirtualAddress>) {
|
break;
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for (chosen_manager = this->pool_managers_head[pool]; chosen_manager != nullptr; chosen_manager = chosen_manager->GetNext()) {
|
|
||||||
allocated_block = chosen_manager->AllocateBlock(heap_index);
|
|
||||||
if (allocated_block != Null<KVirtualAddress>) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -138,6 +129,70 @@ namespace ams::kern {
|
||||||
return allocated_block;
|
return allocated_block;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Result KMemoryManager::Allocate(KPageGroup *out, size_t num_pages, u32 option) {
|
||||||
|
MESOSPHERE_ASSERT(out != nullptr);
|
||||||
|
MESOSPHERE_ASSERT(out->GetNumPages() == 0);
|
||||||
|
|
||||||
|
/* Early return if we're allocating no pages. */
|
||||||
|
if (num_pages == 0) {
|
||||||
|
return ResultSuccess();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Lock the pool that we're allocating from. */
|
||||||
|
const auto [pool, dir] = DecodeOption(option);
|
||||||
|
KScopedLightLock lk(this->pool_locks[pool]);
|
||||||
|
|
||||||
|
/* Choose a heap based on our page size request. */
|
||||||
|
const s32 heap_index = KPageHeap::GetBlockIndex(num_pages);
|
||||||
|
R_UNLESS(0 <= heap_index, svc::ResultOutOfMemory());
|
||||||
|
|
||||||
|
/* Ensure that we don't leave anything un-freed. */
|
||||||
|
auto group_guard = SCOPE_GUARD {
|
||||||
|
for (const auto &it : *out) {
|
||||||
|
auto &manager = this->GetManager(it.GetAddress());
|
||||||
|
const size_t num_pages = std::min(it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize);
|
||||||
|
manager.Free(it.GetAddress(), num_pages);
|
||||||
|
}
|
||||||
|
out->Finalize();
|
||||||
|
};
|
||||||
|
|
||||||
|
/* Keep allocating until we've allocated all our pages. */
|
||||||
|
for (s32 index = heap_index; index >= 0 && num_pages > 0; index--) {
|
||||||
|
const size_t pages_per_alloc = KPageHeap::GetBlockNumPages(index);
|
||||||
|
for (Impl *cur_manager = this->GetFirstManager(pool, dir); cur_manager != nullptr; cur_manager = this->GetNextManager(cur_manager, dir)) {
|
||||||
|
while (num_pages >= pages_per_alloc) {
|
||||||
|
/* Allocate a block. */
|
||||||
|
KVirtualAddress allocated_block = cur_manager->AllocateBlock(index);
|
||||||
|
if (allocated_block == Null<KVirtualAddress>) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Safely add it to our group. */
|
||||||
|
{
|
||||||
|
auto block_guard = SCOPE_GUARD { cur_manager->Free(allocated_block, pages_per_alloc); };
|
||||||
|
R_TRY(out->AddBlock(allocated_block, pages_per_alloc));
|
||||||
|
block_guard.Cancel();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Maintain the optimized memory bitmap, if we should. */
|
||||||
|
if (this->has_optimized_process[pool]) {
|
||||||
|
cur_manager->TrackAllocationForOptimizedProcess(allocated_block, pages_per_alloc);
|
||||||
|
}
|
||||||
|
|
||||||
|
num_pages -= pages_per_alloc;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Only succeed if we allocated as many pages as we wanted. */
|
||||||
|
MESOSPHERE_ASSERT(num_pages >= 0);
|
||||||
|
R_UNLESS(num_pages == 0, svc::ResultOutOfMemory());
|
||||||
|
|
||||||
|
/* We succeeded! */
|
||||||
|
group_guard.Cancel();
|
||||||
|
return ResultSuccess();
|
||||||
|
}
|
||||||
|
|
||||||
size_t KMemoryManager::Impl::Initialize(const KMemoryRegion *region, Pool p, KVirtualAddress metadata, KVirtualAddress metadata_end) {
|
size_t KMemoryManager::Impl::Initialize(const KMemoryRegion *region, Pool p, KVirtualAddress metadata, KVirtualAddress metadata_end) {
|
||||||
/* Calculate metadata sizes. */
|
/* Calculate metadata sizes. */
|
||||||
const size_t ref_count_size = (region->GetSize() / PageSize) * sizeof(u16);
|
const size_t ref_count_size = (region->GetSize() / PageSize) * sizeof(u16);
|
||||||
|
|
|
@ -17,10 +17,6 @@
|
||||||
|
|
||||||
namespace ams::kern {
|
namespace ams::kern {
|
||||||
|
|
||||||
void KPageGroup::Initialize(KBlockInfoManager *m) {
|
|
||||||
this->manager = m;
|
|
||||||
}
|
|
||||||
|
|
||||||
void KPageGroup::Finalize() {
|
void KPageGroup::Finalize() {
|
||||||
auto it = this->block_list.begin();
|
auto it = this->block_list.begin();
|
||||||
while (it != this->block_list.end()) {
|
while (it != this->block_list.end()) {
|
||||||
|
|
|
@ -104,7 +104,7 @@ namespace ams::kern {
|
||||||
MESOSPHERE_ASSERT(big_index >= 0);
|
MESOSPHERE_ASSERT(big_index >= 0);
|
||||||
|
|
||||||
/* Free space before the big blocks. */
|
/* Free space before the big blocks. */
|
||||||
for (s32 i = big_index; i >= 0; i--) {
|
for (s32 i = big_index - 1; i >= 0; i--) {
|
||||||
const size_t block_size = this->blocks[i].GetSize();
|
const size_t block_size = this->blocks[i].GetSize();
|
||||||
while (before_start + block_size <= before_end) {
|
while (before_start + block_size <= before_end) {
|
||||||
before_end -= block_size;
|
before_end -= block_size;
|
||||||
|
@ -113,11 +113,11 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Free space after the big blocks. */
|
/* Free space after the big blocks. */
|
||||||
for (s32 i = big_index; i >= 0; i--) {
|
for (s32 i = big_index - 1; i >= 0; i--) {
|
||||||
const size_t block_size = this->blocks[i].GetSize();
|
const size_t block_size = this->blocks[i].GetSize();
|
||||||
while (after_start + block_size <= after_end) {
|
while (after_start + block_size <= after_end) {
|
||||||
after_start += block_size;
|
|
||||||
this->FreeBlock(after_start, i);
|
this->FreeBlock(after_start, i);
|
||||||
|
after_start += block_size;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -51,7 +51,7 @@ namespace ams::kern {
|
||||||
this->stack_fill_value = MemoryFillValue_Zero;
|
this->stack_fill_value = MemoryFillValue_Zero;
|
||||||
|
|
||||||
this->cached_physical_linear_region = nullptr;
|
this->cached_physical_linear_region = nullptr;
|
||||||
this->cached_physical_non_kernel_dram_region = nullptr;
|
this->cached_physical_heap_region = nullptr;
|
||||||
this->cached_virtual_managed_pool_dram_region = nullptr;
|
this->cached_virtual_managed_pool_dram_region = nullptr;
|
||||||
|
|
||||||
/* Initialize our implementation. */
|
/* Initialize our implementation. */
|
||||||
|
@ -67,4 +67,279 @@ namespace ams::kern {
|
||||||
this->memory_block_manager.Finalize(this->memory_block_slab_manager);
|
this->memory_block_manager.Finalize(this->memory_block_slab_manager);
|
||||||
MESOSPHERE_TODO("cpu::InvalidateEntireInstructionCache();");
|
MESOSPHERE_TODO("cpu::InvalidateEntireInstructionCache();");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
KProcessAddress KPageTableBase::GetRegionAddress(KMemoryState state) const {
|
||||||
|
switch (state) {
|
||||||
|
case KMemoryState_Free:
|
||||||
|
case KMemoryState_Kernel:
|
||||||
|
return this->address_space_start;
|
||||||
|
case KMemoryState_Normal:
|
||||||
|
return this->heap_region_start;
|
||||||
|
case KMemoryState_Ipc:
|
||||||
|
case KMemoryState_NonSecureIpc:
|
||||||
|
case KMemoryState_NonDeviceIpc:
|
||||||
|
return this->alias_region_start;
|
||||||
|
case KMemoryState_Stack:
|
||||||
|
return this->stack_region_start;
|
||||||
|
case KMemoryState_Io:
|
||||||
|
case KMemoryState_Static:
|
||||||
|
case KMemoryState_ThreadLocal:
|
||||||
|
return this->kernel_map_region_start;
|
||||||
|
case KMemoryState_Shared:
|
||||||
|
case KMemoryState_AliasCode:
|
||||||
|
case KMemoryState_AliasCodeData:
|
||||||
|
case KMemoryState_Transfered:
|
||||||
|
case KMemoryState_SharedTransfered:
|
||||||
|
case KMemoryState_SharedCode:
|
||||||
|
case KMemoryState_GeneratedCode:
|
||||||
|
case KMemoryState_CodeOut:
|
||||||
|
return this->alias_code_region_start;
|
||||||
|
case KMemoryState_Code:
|
||||||
|
case KMemoryState_CodeData:
|
||||||
|
return this->code_region_start;
|
||||||
|
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t KPageTableBase::GetRegionSize(KMemoryState state) const {
|
||||||
|
switch (state) {
|
||||||
|
case KMemoryState_Free:
|
||||||
|
case KMemoryState_Kernel:
|
||||||
|
return this->address_space_end - this->address_space_start;
|
||||||
|
case KMemoryState_Normal:
|
||||||
|
return this->heap_region_end - this->heap_region_start;
|
||||||
|
case KMemoryState_Ipc:
|
||||||
|
case KMemoryState_NonSecureIpc:
|
||||||
|
case KMemoryState_NonDeviceIpc:
|
||||||
|
return this->alias_region_end - this->alias_region_start;
|
||||||
|
case KMemoryState_Stack:
|
||||||
|
return this->stack_region_end - this->stack_region_start;
|
||||||
|
case KMemoryState_Io:
|
||||||
|
case KMemoryState_Static:
|
||||||
|
case KMemoryState_ThreadLocal:
|
||||||
|
return this->kernel_map_region_end - this->kernel_map_region_start;
|
||||||
|
case KMemoryState_Shared:
|
||||||
|
case KMemoryState_AliasCode:
|
||||||
|
case KMemoryState_AliasCodeData:
|
||||||
|
case KMemoryState_Transfered:
|
||||||
|
case KMemoryState_SharedTransfered:
|
||||||
|
case KMemoryState_SharedCode:
|
||||||
|
case KMemoryState_GeneratedCode:
|
||||||
|
case KMemoryState_CodeOut:
|
||||||
|
return this->alias_code_region_end - this->alias_code_region_start;
|
||||||
|
case KMemoryState_Code:
|
||||||
|
case KMemoryState_CodeData:
|
||||||
|
return this->code_region_end - this->code_region_start;
|
||||||
|
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool KPageTableBase::Contains(KProcessAddress addr, size_t size, KMemoryState state) const {
|
||||||
|
const KProcessAddress end = addr + size;
|
||||||
|
const KProcessAddress last = end - 1;
|
||||||
|
|
||||||
|
const KProcessAddress region_start = this->GetRegionAddress(state);
|
||||||
|
const size_t region_size = this->GetRegionSize(state);
|
||||||
|
|
||||||
|
const bool is_in_region = region_start <= addr && addr < end && last <= region_start + region_size - 1;
|
||||||
|
const bool is_in_heap = !(end <= this->heap_region_start || this->heap_region_end <= addr);
|
||||||
|
const bool is_in_alias = !(end <= this->alias_region_start || this->alias_region_end <= addr);
|
||||||
|
switch (state) {
|
||||||
|
case KMemoryState_Free:
|
||||||
|
case KMemoryState_Kernel:
|
||||||
|
return is_in_region;
|
||||||
|
case KMemoryState_Io:
|
||||||
|
case KMemoryState_Static:
|
||||||
|
case KMemoryState_Code:
|
||||||
|
case KMemoryState_CodeData:
|
||||||
|
case KMemoryState_Shared:
|
||||||
|
case KMemoryState_AliasCode:
|
||||||
|
case KMemoryState_AliasCodeData:
|
||||||
|
case KMemoryState_Stack:
|
||||||
|
case KMemoryState_ThreadLocal:
|
||||||
|
case KMemoryState_Transfered:
|
||||||
|
case KMemoryState_SharedTransfered:
|
||||||
|
case KMemoryState_SharedCode:
|
||||||
|
case KMemoryState_GeneratedCode:
|
||||||
|
case KMemoryState_CodeOut:
|
||||||
|
return is_in_region && !is_in_heap && !is_in_alias;
|
||||||
|
case KMemoryState_Normal:
|
||||||
|
MESOSPHERE_ASSERT(is_in_heap);
|
||||||
|
return is_in_region && !is_in_alias;
|
||||||
|
case KMemoryState_Ipc:
|
||||||
|
case KMemoryState_NonSecureIpc:
|
||||||
|
case KMemoryState_NonDeviceIpc:
|
||||||
|
MESOSPHERE_ASSERT(is_in_alias);
|
||||||
|
return is_in_region && !is_in_heap;
|
||||||
|
default:
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Result KPageTableBase::CheckMemoryState(const KMemoryInfo &info, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const {
|
||||||
|
/* Validate the states match expectation. */
|
||||||
|
R_UNLESS((info.state & state_mask) == state, svc::ResultInvalidCurrentMemory());
|
||||||
|
R_UNLESS((info.perm & perm_mask) == perm, svc::ResultInvalidCurrentMemory());
|
||||||
|
R_UNLESS((info.attribute & attr_mask) == attr, svc::ResultInvalidCurrentMemory());
|
||||||
|
|
||||||
|
return ResultSuccess();
|
||||||
|
}
|
||||||
|
|
||||||
|
Result KPageTableBase::CheckMemoryState(KMemoryState *out_state, KMemoryPermission *out_perm, KMemoryAttribute *out_attr, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr) const {
|
||||||
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
|
/* Get information about the first block. */
|
||||||
|
const KProcessAddress last_addr = addr + size - 1;
|
||||||
|
KMemoryBlockManager::const_iterator it = this->memory_block_manager.FindIterator(addr);
|
||||||
|
KMemoryInfo info = it->GetMemoryInfo();
|
||||||
|
|
||||||
|
/* Validate all blocks in the range have correct state. */
|
||||||
|
const KMemoryState first_state = info.state;
|
||||||
|
const KMemoryPermission first_perm = info.perm;
|
||||||
|
const KMemoryAttribute first_attr = info.attribute;
|
||||||
|
while (true) {
|
||||||
|
/* Validate the current block. */
|
||||||
|
R_UNLESS(info.state == first_state, svc::ResultInvalidCurrentMemory());
|
||||||
|
R_UNLESS(info.perm == first_perm, svc::ResultInvalidCurrentMemory());
|
||||||
|
R_UNLESS((info.attribute | ignore_attr) == (first_attr | ignore_attr), svc::ResultInvalidCurrentMemory());
|
||||||
|
|
||||||
|
/* Validate against the provided masks. */
|
||||||
|
R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr));
|
||||||
|
|
||||||
|
/* Break once we're done. */
|
||||||
|
if (last_addr <= info.GetLastAddress()) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Advance our iterator. */
|
||||||
|
it++;
|
||||||
|
MESOSPHERE_ASSERT(it != this->memory_block_manager.cend());
|
||||||
|
info = it->GetMemoryInfo();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Write output state. */
|
||||||
|
if (out_state) {
|
||||||
|
*out_state = first_state;
|
||||||
|
}
|
||||||
|
if (out_perm) {
|
||||||
|
*out_perm = first_perm;
|
||||||
|
}
|
||||||
|
if (out_attr) {
|
||||||
|
*out_attr = static_cast<KMemoryAttribute>(first_attr & ~ignore_attr);
|
||||||
|
}
|
||||||
|
return ResultSuccess();
|
||||||
|
}
|
||||||
|
|
||||||
|
Result KPageTableBase::QueryInfoImpl(KMemoryInfo *out_info, ams::svc::PageInfo *out_page, KProcessAddress address) const {
|
||||||
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
MESOSPHERE_ASSERT(out_info != nullptr);
|
||||||
|
MESOSPHERE_ASSERT(out_page != nullptr);
|
||||||
|
|
||||||
|
const KMemoryBlock *block = this->memory_block_manager.FindBlock(address);
|
||||||
|
R_UNLESS(block != nullptr, svc::ResultInvalidCurrentMemory());
|
||||||
|
|
||||||
|
*out_info = block->GetMemoryInfo();
|
||||||
|
out_page->flags = 0;
|
||||||
|
return ResultSuccess();
|
||||||
|
}
|
||||||
|
|
||||||
|
KProcessAddress KPageTableBase::FindFreeArea(KProcessAddress region_start, size_t region_num_pages, size_t num_pages, size_t alignment, size_t offset, size_t guard_pages) const {
|
||||||
|
KProcessAddress address = Null<KProcessAddress>;
|
||||||
|
|
||||||
|
if (num_pages <= region_num_pages) {
|
||||||
|
if (this->IsAslrEnabled()) {
|
||||||
|
/* Try to directly find a free area up to 8 times. */
|
||||||
|
for (size_t i = 0; i < 8; i++) {
|
||||||
|
const size_t random_offset = KSystemControl::GenerateRandomRange(0, (region_num_pages - num_pages - guard_pages) * PageSize / alignment) * alignment;
|
||||||
|
const KProcessAddress candidate = util::AlignDown(GetInteger(region_start + random_offset), alignment) + offset;
|
||||||
|
|
||||||
|
KMemoryInfo info;
|
||||||
|
ams::svc::PageInfo page_info;
|
||||||
|
MESOSPHERE_R_ABORT_UNLESS(this->QueryInfoImpl(&info, &page_info, candidate));
|
||||||
|
|
||||||
|
if (info.state != KMemoryState_Free) { continue; }
|
||||||
|
if (!(region_start <= candidate)) { continue; }
|
||||||
|
if (!(info.GetAddress() + guard_pages * PageSize <= GetInteger(candidate))) { continue; }
|
||||||
|
if (!(candidate + (num_pages + guard_pages) * PageSize - 1 <= info.GetLastAddress())) { continue; }
|
||||||
|
if (!(candidate + (num_pages + guard_pages) * PageSize - 1 <= region_start + region_num_pages * PageSize - 1)) { continue; }
|
||||||
|
|
||||||
|
address = candidate;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
/* Fall back to finding the first free area with a random offset. */
|
||||||
|
if (address == Null<KProcessAddress>) {
|
||||||
|
/* NOTE: Nintendo does not account for guard pages here. */
|
||||||
|
/* This may theoretically cause an offset to be chosen that cannot be mapped. */
|
||||||
|
/* TODO: Should we account for guard pages? */
|
||||||
|
const size_t offset_pages = KSystemControl::GenerateRandomRange(0, region_num_pages - num_pages);
|
||||||
|
address = this->memory_block_manager.FindFreeArea(region_start + offset_pages * PageSize, region_num_pages - offset_pages, num_pages, alignment, offset, guard_pages);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/* Find the first free area. */
|
||||||
|
if (address == Null<KProcessAddress>) {
|
||||||
|
address = this->memory_block_manager.FindFreeArea(region_start, region_num_pages, num_pages, alignment, offset, guard_pages);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return address;
|
||||||
|
}
|
||||||
|
|
||||||
|
Result KPageTableBase::AllocateAndMapPagesImpl(PageLinkedList *page_list, KProcessAddress address, size_t num_pages, const KPageProperties properties) {
|
||||||
|
/* Create a page group to hold the pages we allocate. */
|
||||||
|
KPageGroup pg(this->block_info_manager);
|
||||||
|
|
||||||
|
/* Allocate the pages. */
|
||||||
|
R_TRY(Kernel::GetMemoryManager().Allocate(std::addressof(pg), num_pages, this->allocate_option));
|
||||||
|
|
||||||
|
/* Ensure that the page group is open while we work with it. */
|
||||||
|
KScopedPageGroup spg(pg);
|
||||||
|
|
||||||
|
/* Clear all pages. */
|
||||||
|
for (const auto &it : pg) {
|
||||||
|
std::memset(GetVoidPointer(it.GetAddress()), this->heap_fill_value, it.GetSize());
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Map the pages. */
|
||||||
|
return this->Operate(page_list, address, num_pages, std::addressof(pg), properties, OperationType_MapGroup, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
Result KPageTableBase::MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||||
|
MESOSPHERE_ASSERT(util::IsAligned(alignment, PageSize) && alignment >= PageSize);
|
||||||
|
|
||||||
|
/* Ensure this is a valid map request. */
|
||||||
|
R_UNLESS(this->Contains(region_start, region_num_pages * PageSize, state), svc::ResultInvalidCurrentMemory());
|
||||||
|
R_UNLESS(num_pages < region_num_pages, svc::ResultOutOfMemory());
|
||||||
|
|
||||||
|
/* Lock the table. */
|
||||||
|
KScopedLightLock lk(this->general_lock);
|
||||||
|
|
||||||
|
/* Find a random address to map at. */
|
||||||
|
KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0, this->GetNumGuardPages());
|
||||||
|
R_UNLESS(addr != Null<KProcessAddress>, svc::ResultOutOfMemory());
|
||||||
|
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(addr), alignment));
|
||||||
|
MESOSPHERE_ASSERT(this->Contains(addr, num_pages * PageSize, state));
|
||||||
|
MESOSPHERE_R_ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState_All, KMemoryState_Free, KMemoryPermission_All, KMemoryPermission_None, KMemoryAttribute_All, KMemoryAttribute_None));
|
||||||
|
|
||||||
|
/* Create an update allocator. */
|
||||||
|
KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager);
|
||||||
|
R_TRY(allocator.GetResult());
|
||||||
|
|
||||||
|
/* We're going to perform an update, so create a helper. */
|
||||||
|
KScopedPageTableUpdater updater(this);
|
||||||
|
|
||||||
|
/* Perform mapping operation. */
|
||||||
|
const KPageProperties properties = { perm, false, false, false };
|
||||||
|
if (is_pa_valid) {
|
||||||
|
R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, phys_addr, true, properties, OperationType_Map, false));
|
||||||
|
} else {
|
||||||
|
R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), addr, num_pages, properties));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Update the blocks. */
|
||||||
|
this->memory_block_manager.Update(&allocator, addr, num_pages, state, perm, KMemoryAttribute_None);
|
||||||
|
|
||||||
|
/* We successfully mapped the pages. */
|
||||||
|
*out_addr = addr;
|
||||||
|
return ResultSuccess();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -200,14 +200,18 @@ namespace ams::kern {
|
||||||
KPageBuffer *page = KPageBuffer::Allocate();
|
KPageBuffer *page = KPageBuffer::Allocate();
|
||||||
R_UNLESS(page != nullptr, svc::ResultOutOfResource());
|
R_UNLESS(page != nullptr, svc::ResultOutOfResource());
|
||||||
|
|
||||||
|
|
||||||
/* Map the stack page. */
|
/* Map the stack page. */
|
||||||
KProcessAddress stack_top = Null<KProcessAddress>;
|
KProcessAddress stack_top = Null<KProcessAddress>;
|
||||||
{
|
{
|
||||||
|
KProcessAddress stack_bottom = Null<KProcessAddress>;
|
||||||
auto page_guard = SCOPE_GUARD { KPageBuffer::Free(page); };
|
auto page_guard = SCOPE_GUARD { KPageBuffer::Free(page); };
|
||||||
MESOSPHERE_TODO("R_TRY(Kernel::GetSupervisorPageTable().Map); ...");
|
R_TRY(Kernel::GetKernelPageTable().MapPages(std::addressof(stack_bottom), 1, PageSize, page->GetPhysicalAddress(), stack_region.GetAddress(),
|
||||||
(void)(stack_region);
|
stack_region.GetSize() / PageSize, KMemoryState_Kernel, KMemoryPermission_KernelReadWrite));
|
||||||
page_guard.Cancel();
|
page_guard.Cancel();
|
||||||
|
|
||||||
|
|
||||||
|
/* Calculate top of the stack. */
|
||||||
|
stack_top = stack_bottom + PageSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Initialize the thread. */
|
/* Initialize the thread. */
|
||||||
|
|
|
@ -21,4 +21,8 @@ void operator delete (void *deleted) throw() {
|
||||||
|
|
||||||
void operator delete (void *deleted, size_t size) throw() {
|
void operator delete (void *deleted, size_t size) throw() {
|
||||||
MESOSPHERE_PANIC("operator delete(void *, size_t) was called: %p %zu", deleted, size);
|
MESOSPHERE_PANIC("operator delete(void *, size_t) was called: %p %zu", deleted, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void abort() {
|
||||||
|
MESOSPHERE_PANIC("abort() was called");
|
||||||
|
}
|
|
@ -90,7 +90,7 @@ namespace ams::kern::init {
|
||||||
g_initial_page_allocator.Initialize(initial_page_allocator_state);
|
g_initial_page_allocator.Initialize(initial_page_allocator_state);
|
||||||
|
|
||||||
/* Ensure that the T1SZ is correct (and what we expect). */
|
/* Ensure that the T1SZ is correct (and what we expect). */
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS((cpu::TranslationControlRegisterAccessor().GetT1Size() / L1BlockSize) == MaxPageTableEntries);
|
MESOSPHERE_INIT_ABORT_UNLESS((cpu::TranslationControlRegisterAccessor().GetT1Size() / arm64::L1BlockSize) == arm64::MaxPageTableEntries);
|
||||||
|
|
||||||
/* Create page table object for use during initialization. */
|
/* Create page table object for use during initialization. */
|
||||||
KInitialPageTable ttbr1_table(util::AlignDown(cpu::GetTtbr1El1(), PageSize), KInitialPageTable::NoClear{});
|
KInitialPageTable ttbr1_table(util::AlignDown(cpu::GetTtbr1El1(), PageSize), KInitialPageTable::NoClear{});
|
||||||
|
|
Loading…
Reference in a new issue