mirror of
https://github.com/Atmosphere-NX/Atmosphere
synced 2025-02-05 23:12:51 +00:00
Compare commits
1 commit
f38d5fa15e
...
f3b2c5a6fe
Author | SHA1 | Date | |
---|---|---|---|
![]() |
f3b2c5a6fe |
7 changed files with 834 additions and 565 deletions
|
@ -201,18 +201,40 @@ namespace ams::kern::arch::arm64 {
|
||||||
NOINLINE Result InitializeForProcess(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit, size_t process_index);
|
NOINLINE Result InitializeForProcess(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit, size_t process_index);
|
||||||
Result Finalize();
|
Result Finalize();
|
||||||
private:
|
private:
|
||||||
|
Result MapL1Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll);
|
||||||
|
Result MapL2Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll);
|
||||||
|
Result MapL3Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll);
|
||||||
|
|
||||||
Result Unmap(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list, bool force, bool reuse_ll);
|
Result Unmap(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list, bool force, bool reuse_ll);
|
||||||
|
|
||||||
Result Map(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, size_t page_size, PageLinkedList *page_list, bool reuse_ll);
|
Result Map(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, size_t page_size, PageLinkedList *page_list, bool reuse_ll) {
|
||||||
|
switch (page_size) {
|
||||||
|
case L1BlockSize:
|
||||||
|
R_RETURN(this->MapL1Blocks(virt_addr, phys_addr, num_pages, entry_template, disable_head_merge, page_list, reuse_ll));
|
||||||
|
case L2ContiguousBlockSize:
|
||||||
|
entry_template.SetContiguous(true);
|
||||||
|
[[fallthrough]];
|
||||||
|
#ifdef ATMOSPHERE_BOARD_NINTENDO_NX
|
||||||
|
case L2TegraSmmuBlockSize:
|
||||||
|
#endif
|
||||||
|
case L2BlockSize:
|
||||||
|
R_RETURN(this->MapL2Blocks(virt_addr, phys_addr, num_pages, entry_template, disable_head_merge, page_list, reuse_ll));
|
||||||
|
case L3ContiguousBlockSize:
|
||||||
|
entry_template.SetContiguous(true);
|
||||||
|
[[fallthrough]];
|
||||||
|
case L3BlockSize:
|
||||||
|
R_RETURN(this->MapL3Blocks(virt_addr, phys_addr, num_pages, entry_template, disable_head_merge, page_list, reuse_ll));
|
||||||
|
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Result MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll);
|
Result MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll);
|
||||||
Result MapGroup(KProcessAddress virt_addr, const KPageGroup &pg, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, bool not_first, PageLinkedList *page_list, bool reuse_ll);
|
Result MapGroup(KProcessAddress virt_addr, const KPageGroup &pg, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, bool not_first, PageLinkedList *page_list, bool reuse_ll);
|
||||||
|
|
||||||
bool MergePages(TraversalContext *context, PageLinkedList *page_list);
|
bool MergePages(KProcessAddress virt_addr, PageLinkedList *page_list);
|
||||||
void MergePages(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list);
|
|
||||||
|
|
||||||
Result SeparatePagesImpl(TraversalEntry *entry, TraversalContext *context, KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll);
|
ALWAYS_INLINE Result SeparatePagesImpl(KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll);
|
||||||
Result SeparatePages(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list, bool reuse_ll);
|
Result SeparatePages(KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll);
|
||||||
|
|
||||||
Result ChangePermissions(KProcessAddress virt_addr, size_t num_pages, PageTableEntry entry_template, DisableMergeAttribute disable_merge_attr, bool refresh_mapping, bool flush_mapping, PageLinkedList *page_list, bool reuse_ll);
|
Result ChangePermissions(KProcessAddress virt_addr, size_t num_pages, PageTableEntry entry_template, DisableMergeAttribute disable_merge_attr, bool refresh_mapping, bool flush_mapping, PageLinkedList *page_list, bool reuse_ll);
|
||||||
|
|
||||||
|
@ -222,6 +244,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
static ALWAYS_INLINE void ClearPageTable(KVirtualAddress table) {
|
static ALWAYS_INLINE void ClearPageTable(KVirtualAddress table) {
|
||||||
cpu::ClearPageToZero(GetVoidPointer(table));
|
cpu::ClearPageToZero(GetVoidPointer(table));
|
||||||
|
cpu::DataSynchronizationBarrierInnerShareable();
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE void OnTableUpdated() const {
|
ALWAYS_INLINE void OnTableUpdated() const {
|
||||||
|
|
|
@ -20,22 +20,18 @@
|
||||||
|
|
||||||
namespace ams::kern::arch::arm64 {
|
namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
constexpr size_t BlocksPerContiguousBlock = 0x10;
|
|
||||||
constexpr size_t BlocksPerTable = PageSize / sizeof(u64);
|
|
||||||
|
|
||||||
constexpr size_t L1BlockSize = 1_GB;
|
constexpr size_t L1BlockSize = 1_GB;
|
||||||
constexpr size_t L1ContiguousBlockSize = BlocksPerContiguousBlock * L1BlockSize;
|
constexpr size_t L1ContiguousBlockSize = 0x10 * L1BlockSize;
|
||||||
constexpr size_t L2BlockSize = 2_MB;
|
constexpr size_t L2BlockSize = 2_MB;
|
||||||
constexpr size_t L2ContiguousBlockSize = BlocksPerContiguousBlock * L2BlockSize;
|
constexpr size_t L2ContiguousBlockSize = 0x10 * L2BlockSize;
|
||||||
constexpr size_t L3BlockSize = PageSize;
|
constexpr size_t L3BlockSize = PageSize;
|
||||||
constexpr size_t L3ContiguousBlockSize = BlocksPerContiguousBlock * L3BlockSize;
|
constexpr size_t L3ContiguousBlockSize = 0x10 * L3BlockSize;
|
||||||
|
|
||||||
class PageTableEntry {
|
class PageTableEntry {
|
||||||
public:
|
public:
|
||||||
struct InvalidTag{};
|
struct InvalidTag{};
|
||||||
struct TableTag{};
|
struct TableTag{};
|
||||||
struct BlockTag{};
|
struct BlockTag{};
|
||||||
struct SeparateContiguousTag{};
|
|
||||||
|
|
||||||
enum Permission : u64 {
|
enum Permission : u64 {
|
||||||
Permission_KernelRWX = ((0ul << 53) | (1ul << 54) | (0ul << 6)),
|
Permission_KernelRWX = ((0ul << 53) | (1ul << 54) | (0ul << 6)),
|
||||||
|
@ -126,25 +122,6 @@ namespace ams::kern::arch::arm64 {
|
||||||
{
|
{
|
||||||
/* ... */
|
/* ... */
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Construct a table. */
|
|
||||||
constexpr explicit ALWAYS_INLINE PageTableEntry(TableTag, KPhysicalAddress phys_addr, bool is_kernel, bool pxn, size_t num_blocks)
|
|
||||||
: PageTableEntry(((is_kernel ? 0x3ul : 0) << 60) | (static_cast<u64>(pxn) << 59) | GetInteger(phys_addr) | (num_blocks << 2) | 0x3)
|
|
||||||
{
|
|
||||||
/* ... */
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Construct a block. */
|
|
||||||
constexpr explicit ALWAYS_INLINE PageTableEntry(BlockTag, KPhysicalAddress phys_addr, const PageTableEntry &attr, u8 sw_reserved_bits, bool contig, bool page)
|
|
||||||
: PageTableEntry(attr, (static_cast<u64>(sw_reserved_bits) << 55) | (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | (page ? ExtensionFlag_TestTableMask : ExtensionFlag_Valid))
|
|
||||||
{
|
|
||||||
/* ... */
|
|
||||||
}
|
|
||||||
constexpr explicit ALWAYS_INLINE PageTableEntry(BlockTag, KPhysicalAddress phys_addr, const PageTableEntry &attr, SeparateContiguousTag)
|
|
||||||
: PageTableEntry(attr, GetInteger(phys_addr))
|
|
||||||
{
|
|
||||||
/* ... */
|
|
||||||
}
|
|
||||||
protected:
|
protected:
|
||||||
constexpr ALWAYS_INLINE u64 GetBits(size_t offset, size_t count) const {
|
constexpr ALWAYS_INLINE u64 GetBits(size_t offset, size_t count) const {
|
||||||
return (m_attributes >> offset) & ((1ul << count) - 1);
|
return (m_attributes >> offset) & ((1ul << count) - 1);
|
||||||
|
@ -188,7 +165,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
constexpr ALWAYS_INLINE Shareable GetShareable() const { return static_cast<Shareable>(this->SelectBits(8, 2)); }
|
constexpr ALWAYS_INLINE Shareable GetShareable() const { return static_cast<Shareable>(this->SelectBits(8, 2)); }
|
||||||
constexpr ALWAYS_INLINE PageAttribute GetPageAttribute() const { return static_cast<PageAttribute>(this->SelectBits(2, 3)); }
|
constexpr ALWAYS_INLINE PageAttribute GetPageAttribute() const { return static_cast<PageAttribute>(this->SelectBits(2, 3)); }
|
||||||
constexpr ALWAYS_INLINE int GetAccessFlagInteger() const { return static_cast<int>(this->GetBits(10, 1)); }
|
constexpr ALWAYS_INLINE int GetAccessFlagInteger() const { return static_cast<int>(this->GetBits(10, 1)); }
|
||||||
constexpr ALWAYS_INLINE int GetShareableInteger() const { return static_cast<int>(this->GetBits(8, 2)); }
|
constexpr ALWAYS_INLINE int GetShareableInteger() const { return static_cast<int>(this->GetBits(8, 2)); }
|
||||||
constexpr ALWAYS_INLINE int GetPageAttributeInteger() const { return static_cast<int>(this->GetBits(2, 3)); }
|
constexpr ALWAYS_INLINE int GetPageAttributeInteger() const { return static_cast<int>(this->GetBits(2, 3)); }
|
||||||
constexpr ALWAYS_INLINE bool IsReadOnly() const { return this->GetBits(7, 1) != 0; }
|
constexpr ALWAYS_INLINE bool IsReadOnly() const { return this->GetBits(7, 1) != 0; }
|
||||||
constexpr ALWAYS_INLINE bool IsUserAccessible() const { return this->GetBits(6, 1) != 0; }
|
constexpr ALWAYS_INLINE bool IsUserAccessible() const { return this->GetBits(6, 1) != 0; }
|
||||||
|
@ -217,14 +194,8 @@ namespace ams::kern::arch::arm64 {
|
||||||
constexpr ALWAYS_INLINE decltype(auto) SetPageAttribute(PageAttribute a) { this->SetBitsDirect(2, 3, a); return *this; }
|
constexpr ALWAYS_INLINE decltype(auto) SetPageAttribute(PageAttribute a) { this->SetBitsDirect(2, 3, a); return *this; }
|
||||||
constexpr ALWAYS_INLINE decltype(auto) SetMapped(bool m) { static_assert(static_cast<u64>(MappingFlag_Mapped == (1 << 0))); this->SetBit(0, m); return *this; }
|
constexpr ALWAYS_INLINE decltype(auto) SetMapped(bool m) { static_assert(static_cast<u64>(MappingFlag_Mapped == (1 << 0))); this->SetBit(0, m); return *this; }
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE size_t GetTableNumEntries() const { return this->GetBits(2, 10); }
|
|
||||||
constexpr ALWAYS_INLINE decltype(auto) SetTableNumEntries(size_t num) { this->SetBits(2, 10, num); }
|
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE decltype(auto) AddTableEntries(size_t num) { return this->SetTableNumEntries(this->GetTableNumEntries() + num); }
|
|
||||||
constexpr ALWAYS_INLINE decltype(auto) RemoveTableEntries(size_t num) { return this->SetTableNumEntries(this->GetTableNumEntries() - num); }
|
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE u64 GetEntryTemplateForMerge() const {
|
constexpr ALWAYS_INLINE u64 GetEntryTemplateForMerge() const {
|
||||||
constexpr u64 BaseMask = (0xFFFF000000000FFFul & ~static_cast<u64>((0x1ul << 52) | ExtensionFlag_TestTableMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail));
|
constexpr u64 BaseMask = (0xFFF0000000000FFFul & ~static_cast<u64>((0x1ul << 52) | ExtensionFlag_TestTableMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail));
|
||||||
return m_attributes & BaseMask;
|
return m_attributes & BaseMask;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -233,38 +204,6 @@ namespace ams::kern::arch::arm64 {
|
||||||
return (m_attributes & BaseMaskForMerge) == attr;
|
return (m_attributes & BaseMaskForMerge) == attr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static constexpr ALWAYS_INLINE u64 GetEntryTemplateForSeparateContiguousMask(size_t idx) {
|
|
||||||
constexpr u64 BaseMask = (0xFFFF000000000FFFul & ~static_cast<u64>((0x1ul << 52) | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail));
|
|
||||||
if (idx == 0) {
|
|
||||||
return BaseMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody;
|
|
||||||
} else if (idx < BlocksPerContiguousBlock - 1) {
|
|
||||||
return BaseMask;
|
|
||||||
} else {
|
|
||||||
return BaseMask | ExtensionFlag_DisableMergeTail;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE u64 GetEntryTemplateForSeparateContiguous(size_t idx) const {
|
|
||||||
return m_attributes & GetEntryTemplateForSeparateContiguousMask(idx);
|
|
||||||
}
|
|
||||||
|
|
||||||
static constexpr ALWAYS_INLINE u64 GetEntryTemplateForSeparateMask(size_t idx) {
|
|
||||||
constexpr u64 BaseMask = (0xFFFF000000000FFFul & ~static_cast<u64>((0x1ul << 52) | ExtensionFlag_TestTableMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail));
|
|
||||||
if (idx == 0) {
|
|
||||||
return BaseMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody;
|
|
||||||
} else if (idx < BlocksPerContiguousBlock) {
|
|
||||||
return BaseMask | ExtensionFlag_DisableMergeHeadAndBody;
|
|
||||||
} else if (idx < BlocksPerTable - 1) {
|
|
||||||
return BaseMask;
|
|
||||||
} else {
|
|
||||||
return BaseMask | ExtensionFlag_DisableMergeTail;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE u64 GetEntryTemplateForSeparate(size_t idx) const {
|
|
||||||
return m_attributes & GetEntryTemplateForSeparateMask(idx);
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE u64 GetRawAttributesUnsafe() const {
|
constexpr ALWAYS_INLINE u64 GetRawAttributesUnsafe() const {
|
||||||
return m_attributes;
|
return m_attributes;
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,7 +45,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct TraversalContext {
|
struct TraversalContext {
|
||||||
PageTableEntry *level_entries[EntryLevel_Count];
|
const PageTableEntry *level_entries[EntryLevel_Count];
|
||||||
EntryLevel level;
|
EntryLevel level;
|
||||||
bool is_contiguous;
|
bool is_contiguous;
|
||||||
};
|
};
|
||||||
|
@ -78,6 +78,8 @@ namespace ams::kern::arch::arm64 {
|
||||||
static constexpr ALWAYS_INLINE uintptr_t GetL2Index(KProcessAddress addr) { return GetBits<PageBits + LevelBits * (NumLevels - 2), LevelBits>(GetInteger(addr)); }
|
static constexpr ALWAYS_INLINE uintptr_t GetL2Index(KProcessAddress addr) { return GetBits<PageBits + LevelBits * (NumLevels - 2), LevelBits>(GetInteger(addr)); }
|
||||||
static constexpr ALWAYS_INLINE uintptr_t GetL3Index(KProcessAddress addr) { return GetBits<PageBits + LevelBits * (NumLevels - 3), LevelBits>(GetInteger(addr)); }
|
static constexpr ALWAYS_INLINE uintptr_t GetL3Index(KProcessAddress addr) { return GetBits<PageBits + LevelBits * (NumLevels - 3), LevelBits>(GetInteger(addr)); }
|
||||||
|
|
||||||
|
static constexpr ALWAYS_INLINE uintptr_t GetLevelIndex(KProcessAddress addr, EntryLevel level) { return GetBits(GetInteger(addr), PageBits + LevelBits * level, LevelBits); }
|
||||||
|
|
||||||
static constexpr ALWAYS_INLINE uintptr_t GetL1Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 1)>(GetInteger(addr)); }
|
static constexpr ALWAYS_INLINE uintptr_t GetL1Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 1)>(GetInteger(addr)); }
|
||||||
static constexpr ALWAYS_INLINE uintptr_t GetL2Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 2)>(GetInteger(addr)); }
|
static constexpr ALWAYS_INLINE uintptr_t GetL2Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 2)>(GetInteger(addr)); }
|
||||||
static constexpr ALWAYS_INLINE uintptr_t GetL3Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 3)>(GetInteger(addr)); }
|
static constexpr ALWAYS_INLINE uintptr_t GetL3Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 3)>(GetInteger(addr)); }
|
||||||
|
@ -91,8 +93,10 @@ namespace ams::kern::arch::arm64 {
|
||||||
static ALWAYS_INLINE KVirtualAddress GetPageTableVirtualAddress(KPhysicalAddress addr) {
|
static ALWAYS_INLINE KVirtualAddress GetPageTableVirtualAddress(KPhysicalAddress addr) {
|
||||||
return KMemoryLayout::GetLinearVirtualAddress(addr);
|
return KMemoryLayout::GetLinearVirtualAddress(addr);
|
||||||
}
|
}
|
||||||
public:
|
|
||||||
static constexpr ALWAYS_INLINE uintptr_t GetLevelIndex(KProcessAddress addr, EntryLevel level) { return GetBits(GetInteger(addr), PageBits + LevelBits * level, LevelBits); }
|
//ALWAYS_INLINE bool ExtractL1Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L1PageTableEntry *l1_entry, KProcessAddress virt_addr) const;
|
||||||
|
//ALWAYS_INLINE bool ExtractL2Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L2PageTableEntry *l2_entry, KProcessAddress virt_addr) const;
|
||||||
|
//ALWAYS_INLINE bool ExtractL3Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L3PageTableEntry *l3_entry, KProcessAddress virt_addr) const;
|
||||||
private:
|
private:
|
||||||
L1PageTableEntry *m_table;
|
L1PageTableEntry *m_table;
|
||||||
bool m_is_kernel;
|
bool m_is_kernel;
|
||||||
|
@ -121,17 +125,11 @@ namespace ams::kern::arch::arm64 {
|
||||||
ALWAYS_INLINE L3PageTableEntry *GetL3Entry(const L2PageTableEntry *entry, KProcessAddress address) const {
|
ALWAYS_INLINE L3PageTableEntry *GetL3Entry(const L2PageTableEntry *entry, KProcessAddress address) const {
|
||||||
return GetL3EntryFromTable(KMemoryLayout::GetLinearVirtualAddress(entry->GetTable()), address);
|
return GetL3EntryFromTable(KMemoryLayout::GetLinearVirtualAddress(entry->GetTable()), address);
|
||||||
}
|
}
|
||||||
|
|
||||||
static constexpr size_t GetBlockSize(EntryLevel level, bool contiguous = false) {
|
|
||||||
return 1 << (PageBits + LevelBits * level + 4 * contiguous);
|
|
||||||
}
|
|
||||||
public:
|
public:
|
||||||
constexpr explicit KPageTableImpl(util::ConstantInitializeTag) : m_table(), m_is_kernel(), m_num_entries() { /* ... */ }
|
constexpr explicit KPageTableImpl(util::ConstantInitializeTag) : m_table(), m_is_kernel(), m_num_entries() { /* ... */ }
|
||||||
|
|
||||||
explicit KPageTableImpl() { /* ... */ }
|
explicit KPageTableImpl() { /* ... */ }
|
||||||
|
|
||||||
size_t GetNumL1Entries() const { return m_num_entries; }
|
|
||||||
|
|
||||||
NOINLINE void InitializeForKernel(void *tb, KVirtualAddress start, KVirtualAddress end);
|
NOINLINE void InitializeForKernel(void *tb, KVirtualAddress start, KVirtualAddress end);
|
||||||
NOINLINE void InitializeForProcess(void *tb, KVirtualAddress start, KVirtualAddress end);
|
NOINLINE void InitializeForProcess(void *tb, KVirtualAddress start, KVirtualAddress end);
|
||||||
L1PageTableEntry *Finalize();
|
L1PageTableEntry *Finalize();
|
||||||
|
@ -143,17 +141,6 @@ namespace ams::kern::arch::arm64 {
|
||||||
bool ContinueTraversal(TraversalEntry *out_entry, TraversalContext *context) const;
|
bool ContinueTraversal(TraversalEntry *out_entry, TraversalContext *context) const;
|
||||||
|
|
||||||
bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress virt_addr) const;
|
bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress virt_addr) const;
|
||||||
|
|
||||||
static bool MergePages(KVirtualAddress *out, TraversalContext *context);
|
|
||||||
void SeparatePages(TraversalEntry *entry, TraversalContext *context, KProcessAddress address, PageTableEntry *pte) const;
|
|
||||||
|
|
||||||
KProcessAddress GetAddressForContext(const TraversalContext *context) const {
|
|
||||||
KProcessAddress addr = m_is_kernel ? static_cast<uintptr_t>(-GetBlockSize(EntryLevel_L1)) * m_num_entries : 0;
|
|
||||||
for (u32 level = context->level; level <= EntryLevel_L1; ++level) {
|
|
||||||
addr += ((reinterpret_cast<uintptr_t>(context->level_entries[level]) / sizeof(PageTableEntry)) & (BlocksPerTable - 1)) << (PageBits + LevelBits * level);
|
|
||||||
}
|
|
||||||
return addr;
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,7 @@ endif
|
||||||
DEFINES := $(ATMOSPHERE_DEFINES) -DATMOSPHERE_IS_MESOSPHERE
|
DEFINES := $(ATMOSPHERE_DEFINES) -DATMOSPHERE_IS_MESOSPHERE
|
||||||
SETTINGS := $(ATMOSPHERE_SETTINGS) $(ATMOSPHERE_OPTIMIZATION_FLAG) -mgeneral-regs-only -ffixed-x18 -Wextra -Werror -fno-non-call-exceptions
|
SETTINGS := $(ATMOSPHERE_SETTINGS) $(ATMOSPHERE_OPTIMIZATION_FLAG) -mgeneral-regs-only -ffixed-x18 -Wextra -Werror -fno-non-call-exceptions
|
||||||
CFLAGS := $(ATMOSPHERE_CFLAGS) $(SETTINGS) $(DEFINES) $(INCLUDE)
|
CFLAGS := $(ATMOSPHERE_CFLAGS) $(SETTINGS) $(DEFINES) $(INCLUDE)
|
||||||
CXXFLAGS := $(CFLAGS) $(ATMOSPHERE_CXXFLAGS) -fno-use-cxa-atexit
|
CXXFLAGS := $(CFLAGS) $(ATMOSPHERE_CXXFLAGS) -fno-use-cxa-atexit -flto
|
||||||
ASFLAGS := $(ATMOSPHERE_ASFLAGS) $(SETTINGS) $(DEFINES) $(INCLUDE)
|
ASFLAGS := $(ATMOSPHERE_ASFLAGS) $(SETTINGS) $(DEFINES) $(INCLUDE)
|
||||||
|
|
||||||
SOURCES += $(foreach v,$(call ALL_SOURCE_DIRS,../libvapours/source),$(if $(findstring ../libvapours/source/sdmmc,$v),,$v))
|
SOURCES += $(foreach v,$(call ALL_SOURCE_DIRS,../libvapours/source),$(if $(findstring ../libvapours/source/sdmmc,$v),,$v))
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -33,6 +33,94 @@ namespace ams::kern::arch::arm64 {
|
||||||
return m_table;
|
return m_table;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// bool KPageTableImpl::ExtractL3Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L3PageTableEntry *l3_entry, KProcessAddress virt_addr) const {
|
||||||
|
// /* Set the L3 entry. */
|
||||||
|
// out_context->l3_entry = l3_entry;
|
||||||
|
//
|
||||||
|
// if (l3_entry->IsBlock()) {
|
||||||
|
// /* Set the output entry. */
|
||||||
|
// out_entry->phys_addr = l3_entry->GetBlock() + (virt_addr & (L3BlockSize - 1));
|
||||||
|
// if (l3_entry->IsContiguous()) {
|
||||||
|
// out_entry->block_size = L3ContiguousBlockSize;
|
||||||
|
// } else {
|
||||||
|
// out_entry->block_size = L3BlockSize;
|
||||||
|
// }
|
||||||
|
// out_entry->sw_reserved_bits = l3_entry->GetSoftwareReservedBits();
|
||||||
|
// out_entry->attr = 0;
|
||||||
|
//
|
||||||
|
// return true;
|
||||||
|
// } else {
|
||||||
|
// out_entry->phys_addr = Null<KPhysicalAddress>;
|
||||||
|
// out_entry->block_size = L3BlockSize;
|
||||||
|
// out_entry->sw_reserved_bits = 0;
|
||||||
|
// out_entry->attr = 0;
|
||||||
|
// return false;
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// bool KPageTableImpl::ExtractL2Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L2PageTableEntry *l2_entry, KProcessAddress virt_addr) const {
|
||||||
|
// /* Set the L2 entry. */
|
||||||
|
// out_context->l2_entry = l2_entry;
|
||||||
|
//
|
||||||
|
// if (l2_entry->IsBlock()) {
|
||||||
|
// /* Set the output entry. */
|
||||||
|
// out_entry->phys_addr = l2_entry->GetBlock() + (virt_addr & (L2BlockSize - 1));
|
||||||
|
// if (l2_entry->IsContiguous()) {
|
||||||
|
// out_entry->block_size = L2ContiguousBlockSize;
|
||||||
|
// } else {
|
||||||
|
// out_entry->block_size = L2BlockSize;
|
||||||
|
// }
|
||||||
|
// out_entry->sw_reserved_bits = l2_entry->GetSoftwareReservedBits();
|
||||||
|
// out_entry->attr = 0;
|
||||||
|
//
|
||||||
|
// /* Set the output context. */
|
||||||
|
// out_context->l3_entry = nullptr;
|
||||||
|
// return true;
|
||||||
|
// } else if (l2_entry->IsTable()) {
|
||||||
|
// return this->ExtractL3Entry(out_entry, out_context, this->GetL3EntryFromTable(GetPageTableVirtualAddress(l2_entry->GetTable()), virt_addr), virt_addr);
|
||||||
|
// } else {
|
||||||
|
// out_entry->phys_addr = Null<KPhysicalAddress>;
|
||||||
|
// out_entry->block_size = L2BlockSize;
|
||||||
|
// out_entry->sw_reserved_bits = 0;
|
||||||
|
// out_entry->attr = 0;
|
||||||
|
//
|
||||||
|
// out_context->l3_entry = nullptr;
|
||||||
|
// return false;
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// bool KPageTableImpl::ExtractL1Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L1PageTableEntry *l1_entry, KProcessAddress virt_addr) const {
|
||||||
|
// /* Set the L1 entry. */
|
||||||
|
// out_context->level_entries[EntryLevel_L1] = l1_entry;
|
||||||
|
//
|
||||||
|
// if (l1_entry->IsBlock()) {
|
||||||
|
// /* Set the output entry. */
|
||||||
|
// out_entry->phys_addr = l1_entry->GetBlock() + (virt_addr & (L1BlockSize - 1));
|
||||||
|
// if (l1_entry->IsContiguous()) {
|
||||||
|
// out_entry->block_size = L1ContiguousBlockSize;
|
||||||
|
// } else {
|
||||||
|
// out_entry->block_size = L1BlockSize;
|
||||||
|
// }
|
||||||
|
// out_entry->sw_reserved_bits = l1_entry->GetSoftwareReservedBits();
|
||||||
|
//
|
||||||
|
// /* Set the output context. */
|
||||||
|
// out_context->l2_entry = nullptr;
|
||||||
|
// out_context->l3_entry = nullptr;
|
||||||
|
// return true;
|
||||||
|
// } else if (l1_entry->IsTable()) {
|
||||||
|
// return this->ExtractL2Entry(out_entry, out_context, this->GetL2EntryFromTable(GetPageTableVirtualAddress(l1_entry->GetTable()), virt_addr), virt_addr);
|
||||||
|
// } else {
|
||||||
|
// out_entry->phys_addr = Null<KPhysicalAddress>;
|
||||||
|
// out_entry->block_size = L1BlockSize;
|
||||||
|
// out_entry->sw_reserved_bits = 0;
|
||||||
|
// out_entry->attr = 0;
|
||||||
|
//
|
||||||
|
// out_context->l2_entry = nullptr;
|
||||||
|
// out_context->l3_entry = nullptr;
|
||||||
|
// return false;
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
bool KPageTableImpl::BeginTraversal(TraversalEntry *out_entry, TraversalContext *out_context, KProcessAddress address) const {
|
bool KPageTableImpl::BeginTraversal(TraversalEntry *out_entry, TraversalContext *out_context, KProcessAddress address) const {
|
||||||
/* Setup invalid defaults. */
|
/* Setup invalid defaults. */
|
||||||
*out_entry = {};
|
*out_entry = {};
|
||||||
|
@ -88,8 +176,9 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
bool KPageTableImpl::ContinueTraversal(TraversalEntry *out_entry, TraversalContext *context) const {
|
bool KPageTableImpl::ContinueTraversal(TraversalEntry *out_entry, TraversalContext *context) const {
|
||||||
/* Advance entry. */
|
/* Advance entry. */
|
||||||
|
|
||||||
auto *cur_pte = context->level_entries[context->level];
|
auto *cur_pte = context->level_entries[context->level];
|
||||||
auto *next_pte = reinterpret_cast<PageTableEntry *>(context->is_contiguous ? util::AlignDown(reinterpret_cast<uintptr_t>(cur_pte), BlocksPerContiguousBlock * sizeof(PageTableEntry)) + BlocksPerContiguousBlock * sizeof(PageTableEntry) : reinterpret_cast<uintptr_t>(cur_pte) + sizeof(PageTableEntry));
|
auto *next_pte = reinterpret_cast<PageTableEntry *>(context->is_contiguous ? util::AlignDown(reinterpret_cast<uintptr_t>(cur_pte), 0x10 * sizeof(PageTableEntry)) + 0x10 * sizeof(PageTableEntry) : reinterpret_cast<uintptr_t>(cur_pte) + sizeof(PageTableEntry));
|
||||||
|
|
||||||
/* Set the pte. */
|
/* Set the pte. */
|
||||||
context->level_entries[context->level] = next_pte;
|
context->level_entries[context->level] = next_pte;
|
||||||
|
@ -166,123 +255,6 @@ namespace ams::kern::arch::arm64 {
|
||||||
return is_block;
|
return is_block;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool KPageTableImpl::MergePages(KVirtualAddress *out, TraversalContext *context) {
|
|
||||||
/* We want to upgrade the pages by one step. */
|
|
||||||
if (context->is_contiguous) {
|
|
||||||
/* We can't merge an L1 table. */
|
|
||||||
if (context->level == EntryLevel_L1) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* We want to upgrade a contiguous mapping in a table to a block. */
|
|
||||||
PageTableEntry *pte = reinterpret_cast<PageTableEntry *>(util::AlignDown(reinterpret_cast<uintptr_t>(context->level_entries[context->level]), BlocksPerTable * sizeof(PageTableEntry)));
|
|
||||||
const KPhysicalAddress phys_addr = util::AlignDown(GetBlock(pte, context->level), GetBlockSize(static_cast<EntryLevel>(context->level + 1), false));
|
|
||||||
|
|
||||||
/* First, check that all entries are valid for us to merge. */
|
|
||||||
const u64 entry_template = pte->GetEntryTemplateForMerge();
|
|
||||||
for (size_t i = 0; i < BlocksPerTable; ++i) {
|
|
||||||
if (!pte[i].IsForMerge(entry_template | GetInteger(phys_addr + (i << (PageBits + LevelBits * context->level))) | PageTableEntry::ContigType_Contiguous | pte->GetTestTableMask())) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if (i > 0 && pte[i].IsHeadOrHeadAndBodyMergeDisabled()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if (i < BlocksPerTable - 1 && pte[i].IsTailMergeDisabled()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* The entries are valid for us to merge, so merge them. */
|
|
||||||
const auto *head_pte = pte;
|
|
||||||
const auto *tail_pte = pte + BlocksPerTable - 1;
|
|
||||||
const auto sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(head_pte->IsHeadMergeDisabled(), head_pte->IsHeadAndBodyMergeDisabled(), tail_pte->IsTailMergeDisabled());
|
|
||||||
|
|
||||||
*context->level_entries[context->level + 1] = PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, PageTableEntry(entry_template), sw_reserved_bits, false, false);
|
|
||||||
|
|
||||||
/* Update our context. */
|
|
||||||
context->is_contiguous = false;
|
|
||||||
context->level = static_cast<EntryLevel>(util::ToUnderlying(context->level) + 1);
|
|
||||||
|
|
||||||
/* Set the output to the table we just freed. */
|
|
||||||
*out = KVirtualAddress(pte);
|
|
||||||
} else {
|
|
||||||
/* We want to upgrade a non-contiguous mapping to a contiguous mapping. */
|
|
||||||
PageTableEntry *pte = reinterpret_cast<PageTableEntry *>(util::AlignDown(reinterpret_cast<uintptr_t>(context->level_entries[context->level]), BlocksPerContiguousBlock * sizeof(PageTableEntry)));
|
|
||||||
const KPhysicalAddress phys_addr = util::AlignDown(GetBlock(pte, context->level), GetBlockSize(context->level, true));
|
|
||||||
|
|
||||||
/* First, check that all entries are valid for us to merge. */
|
|
||||||
const u64 entry_template = pte->GetEntryTemplateForMerge();
|
|
||||||
for (size_t i = 0; i < BlocksPerContiguousBlock; ++i) {
|
|
||||||
if (!pte[i].IsForMerge(entry_template | GetInteger(phys_addr + (i << (PageBits + LevelBits * context->level))) | pte->GetTestTableMask())) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if (i > 0 && pte[i].IsHeadOrHeadAndBodyMergeDisabled()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if (i < BlocksPerContiguousBlock - 1 && pte[i].IsTailMergeDisabled()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* The entries are valid for us to merge, so merge them. */
|
|
||||||
const auto *head_pte = pte;
|
|
||||||
const auto *tail_pte = pte + BlocksPerContiguousBlock - 1;
|
|
||||||
const auto sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(head_pte->IsHeadMergeDisabled(), head_pte->IsHeadAndBodyMergeDisabled(), tail_pte->IsTailMergeDisabled());
|
|
||||||
|
|
||||||
for (size_t i = 0; i < BlocksPerContiguousBlock; ++i) {
|
|
||||||
pte[i] = PageTableEntry(PageTableEntry::BlockTag{}, phys_addr + (i << (PageBits + LevelBits * context->level)), PageTableEntry(entry_template), sw_reserved_bits, true, context->level == EntryLevel_L3);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Update our context. */
|
|
||||||
context->level_entries[context->level] = pte;
|
|
||||||
context->is_contiguous = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void KPageTableImpl::SeparatePages(TraversalEntry *entry, TraversalContext *context, KProcessAddress address, PageTableEntry *pte) const {
|
|
||||||
/* We want to downgrade the pages by one step. */
|
|
||||||
if (context->is_contiguous) {
|
|
||||||
/* We want to downgrade a contiguous mapping to a non-contiguous mapping. */
|
|
||||||
pte = reinterpret_cast<PageTableEntry *>(util::AlignDown(reinterpret_cast<uintptr_t>(context->level_entries[context->level]), BlocksPerContiguousBlock * sizeof(PageTableEntry)));
|
|
||||||
|
|
||||||
auto * const first = pte;
|
|
||||||
const KPhysicalAddress block = this->GetBlock(first, context->level);
|
|
||||||
for (size_t i = 0; i < BlocksPerContiguousBlock; ++i) {
|
|
||||||
pte[i] = PageTableEntry(PageTableEntry::BlockTag{}, block + (i << (PageBits + LevelBits * context->level)), PageTableEntry(first->GetEntryTemplateForSeparateContiguous(i)), PageTableEntry::SeparateContiguousTag{});
|
|
||||||
}
|
|
||||||
|
|
||||||
context->is_contiguous = false;
|
|
||||||
|
|
||||||
context->level_entries[context->level] = pte + (this->GetLevelIndex(address, context->level) & (BlocksPerContiguousBlock - 1));
|
|
||||||
} else {
|
|
||||||
/* We want to downgrade a block into a table. */
|
|
||||||
auto * const first = context->level_entries[context->level];
|
|
||||||
const KPhysicalAddress block = this->GetBlock(first, context->level);
|
|
||||||
for (size_t i = 0; i < BlocksPerTable; ++i) {
|
|
||||||
pte[i] = PageTableEntry(PageTableEntry::BlockTag{}, block + (i << (PageBits + LevelBits * (context->level - 1))), PageTableEntry(first->GetEntryTemplateForSeparate(i)), PageTableEntry::SoftwareReservedBit_None, true, context->level - 1 == EntryLevel_L3);
|
|
||||||
}
|
|
||||||
|
|
||||||
context->is_contiguous = true;
|
|
||||||
context->level = static_cast<EntryLevel>(util::ToUnderlying(context->level) - 1);
|
|
||||||
|
|
||||||
/* Wait for pending stores to complete. */
|
|
||||||
cpu::DataSynchronizationBarrierInnerShareableStore();
|
|
||||||
|
|
||||||
/* Update the block entry to be a table entry. */
|
|
||||||
*context->level_entries[context->level + 1] = PageTableEntry(PageTableEntry::TableTag{}, KPageTable::GetPageTablePhysicalAddress(KVirtualAddress(pte)), m_is_kernel, true, BlocksPerTable);
|
|
||||||
|
|
||||||
|
|
||||||
context->level_entries[context->level] = pte + this->GetLevelIndex(address, context->level);
|
|
||||||
}
|
|
||||||
|
|
||||||
entry->sw_reserved_bits = 0;
|
|
||||||
entry->attr = 0;
|
|
||||||
entry->phys_addr = this->GetBlock(context->level_entries[context->level], context->level) + this->GetOffset(address, context->level);
|
|
||||||
entry->block_size = static_cast<size_t>(1) << (PageBits + LevelBits * context->level + 4 * context->is_contiguous);
|
|
||||||
}
|
|
||||||
|
|
||||||
void KPageTableImpl::Dump(uintptr_t start, size_t size) const {
|
void KPageTableImpl::Dump(uintptr_t start, size_t size) const {
|
||||||
/* If zero size, there's nothing to dump. */
|
/* If zero size, there's nothing to dump. */
|
||||||
if (size == 0) {
|
if (size == 0) {
|
||||||
|
|
|
@ -1438,10 +1438,7 @@ namespace ams::kern {
|
||||||
this->SetState(ThreadState_Waiting);
|
this->SetState(ThreadState_Waiting);
|
||||||
|
|
||||||
/* Set our wait queue. */
|
/* Set our wait queue. */
|
||||||
#pragma GCC diagnostic push
|
|
||||||
#pragma GCC diagnostic ignored "-Wdangling-pointer"
|
|
||||||
m_wait_queue = queue;
|
m_wait_queue = queue;
|
||||||
#pragma GCC diagnostic pop
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void KThread::NotifyAvailable(KSynchronizationObject *signaled_object, Result wait_result) {
|
void KThread::NotifyAvailable(KSynchronizationObject *signaled_object, Result wait_result) {
|
||||||
|
|
Loading…
Add table
Reference in a new issue