kern: implement 10.x perm change, fix many page table bugs

This commit is contained in:
Michael Scire 2020-04-19 17:16:19 -07:00
parent dcfb3bc9b5
commit b39b6f0d5b
8 changed files with 262 additions and 194 deletions

View file

@ -292,7 +292,7 @@ namespace ams::kern::arch::arm64::init {
/* Can we make an L1 block? */
if (util::IsAligned(GetInteger(virt_addr), L1BlockSize) && util::IsAligned(GetInteger(phys_addr), L1BlockSize) && size >= L1BlockSize) {
*l1_entry = L1PageTableEntry(phys_addr, attr, false);
*l1_entry = L1PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, attr, false);
cpu::DataSynchronizationBarrierInnerShareable();
virt_addr += L1BlockSize;
@ -305,7 +305,7 @@ namespace ams::kern::arch::arm64::init {
if (!l1_entry->IsTable()) {
KPhysicalAddress new_table = allocator.Allocate();
ClearNewPageTable(new_table);
*l1_entry = L1PageTableEntry(new_table, attr.IsPrivilegedExecuteNever());
*l1_entry = L1PageTableEntry(PageTableEntry::TableTag{}, new_table, attr.IsPrivilegedExecuteNever());
cpu::DataSynchronizationBarrierInnerShareable();
}
@ -314,7 +314,7 @@ namespace ams::kern::arch::arm64::init {
/* Can we make a contiguous L2 block? */
if (util::IsAligned(GetInteger(virt_addr), L2ContiguousBlockSize) && util::IsAligned(GetInteger(phys_addr), L2ContiguousBlockSize) && size >= L2ContiguousBlockSize) {
for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) {
l2_entry[i] = L2PageTableEntry(phys_addr, attr, true);
l2_entry[i] = L2PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, attr, true);
cpu::DataSynchronizationBarrierInnerShareable();
virt_addr += L2BlockSize;
@ -326,7 +326,7 @@ namespace ams::kern::arch::arm64::init {
/* Can we make an L2 block? */
if (util::IsAligned(GetInteger(virt_addr), L2BlockSize) && util::IsAligned(GetInteger(phys_addr), L2BlockSize) && size >= L2BlockSize) {
*l2_entry = L2PageTableEntry(phys_addr, attr, false);
*l2_entry = L2PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, attr, false);
cpu::DataSynchronizationBarrierInnerShareable();
virt_addr += L2BlockSize;
@ -339,7 +339,7 @@ namespace ams::kern::arch::arm64::init {
if (!l2_entry->IsTable()) {
KPhysicalAddress new_table = allocator.Allocate();
ClearNewPageTable(new_table);
*l2_entry = L2PageTableEntry(new_table, attr.IsPrivilegedExecuteNever());
*l2_entry = L2PageTableEntry(PageTableEntry::TableTag{}, new_table, attr.IsPrivilegedExecuteNever());
cpu::DataSynchronizationBarrierInnerShareable();
}
@ -348,7 +348,7 @@ namespace ams::kern::arch::arm64::init {
/* Can we make a contiguous L3 block? */
if (util::IsAligned(GetInteger(virt_addr), L3ContiguousBlockSize) && util::IsAligned(GetInteger(phys_addr), L3ContiguousBlockSize) && size >= L3ContiguousBlockSize) {
for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) {
l3_entry[i] = L3PageTableEntry(phys_addr, attr, true);
l3_entry[i] = L3PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, attr, true);
cpu::DataSynchronizationBarrierInnerShareable();
virt_addr += L3BlockSize;
@ -359,7 +359,7 @@ namespace ams::kern::arch::arm64::init {
}
/* Make an L3 block. */
*l3_entry = L3PageTableEntry(phys_addr, attr, false);
*l3_entry = L3PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, attr, false);
cpu::DataSynchronizationBarrierInnerShareable();
virt_addr += L3BlockSize;
phys_addr += L3BlockSize;
@ -537,7 +537,7 @@ namespace ams::kern::arch::arm64::init {
cpu::InvalidateEntireTlb();
/* Create new L1 block. */
*l1_entry = L1PageTableEntry(block, attr_after, false);
*l1_entry = L1PageTableEntry(PageTableEntry::BlockTag{}, block, attr_after, false);
virt_addr += L1BlockSize;
size -= L1BlockSize;
@ -568,7 +568,7 @@ namespace ams::kern::arch::arm64::init {
/* Create a new contiguous L2 block. */
for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) {
l2_entry[i] = L2PageTableEntry(block + L2BlockSize * i, attr_after, true);
l2_entry[i] = L2PageTableEntry(PageTableEntry::BlockTag{}, block + L2BlockSize * i, attr_after, true);
}
virt_addr += L2ContiguousBlockSize;
@ -586,7 +586,7 @@ namespace ams::kern::arch::arm64::init {
cpu::InvalidateEntireTlb();
/* Create new L2 block. */
*l2_entry = L2PageTableEntry(block, attr_after, false);
*l2_entry = L2PageTableEntry(PageTableEntry::BlockTag{}, block, attr_after, false);
virt_addr += L2BlockSize;
size -= L2BlockSize;
@ -620,7 +620,7 @@ namespace ams::kern::arch::arm64::init {
/* Create a new contiguous L3 block. */
for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) {
l3_entry[i] = L3PageTableEntry(block + L3BlockSize * i, attr_after, true);
l3_entry[i] = L3PageTableEntry(PageTableEntry::BlockTag{}, block + L3BlockSize * i, attr_after, true);
}
virt_addr += L3ContiguousBlockSize;
@ -638,7 +638,7 @@ namespace ams::kern::arch::arm64::init {
cpu::InvalidateEntireTlb();
/* Create new L3 block. */
*l3_entry = L3PageTableEntry(block, attr_after, false);
*l3_entry = L3PageTableEntry(PageTableEntry::BlockTag{}, block, attr_after, false);
virt_addr += L3BlockSize;
size -= L3BlockSize;

View file

@ -104,7 +104,7 @@ namespace ams::kern::arch::arm64 {
private:
constexpr PageTableEntry GetEntryTemplate(const KPageProperties properties) const {
/* Set basic attributes. */
PageTableEntry entry;
PageTableEntry entry{PageTableEntry::ExtensionFlag_Valid};
entry.SetPrivilegedExecuteNever(true);
entry.SetAccessFlag(PageTableEntry::AccessFlag_Accessed);
entry.SetShareable(PageTableEntry::Shareable_InnerShareable);
@ -163,6 +163,9 @@ namespace ams::kern::arch::arm64 {
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
}
/* Set the fault bit based on whether the page is mapped. */
entry.SetMapped((properties.perm & KMemoryPermission_NotMapped) == 0);
return entry;
}
public:

View file

@ -30,12 +30,8 @@ namespace ams::kern::arch::arm64 {
class PageTableEntry {
public:
struct InvalidTag{};
enum ExtensionTag : u64 {
ExtensionTag_IsValidBit = (1ul << 56),
ExtensionTag_IsValid = (ExtensionTag_IsValidBit | (1ul << 0)),
ExtensionTag_IsBlockMask = (ExtensionTag_IsValidBit | (1ul << 1)),
};
struct TableTag{};
struct BlockTag{};
enum Permission : u64 {
Permission_KernelRWX = ((0ul << 53) | (1ul << 54) | (0ul << 6)),
@ -68,13 +64,26 @@ namespace ams::kern::arch::arm64 {
AccessFlag_Accessed = (1 << 10),
};
enum MappingFlag : u64 {
MappingFlag_NotMapped = (0 << 0),
MappingFlag_Mapped = (1 << 0),
};
enum ExtensionFlag : u64 {
ExtensionFlag_NotContiguous = (1ul << 55),
ExtensionFlag_Valid = (1ul << 56),
ExtensionFlag_ValidAndMapped = (ExtensionFlag_Valid | MappingFlag_Mapped),
ExtensionFlag_TestTableMask = (ExtensionFlag_Valid | (1ul << 1)),
};
enum Type : u64 {
Type_None = 0x0,
Type_L1Block = 0x1,
Type_L1Table = 0x3,
Type_L2Block = 0x1,
Type_L2Table = 0x3,
Type_L3Block = 0x3,
Type_L1Block = ExtensionFlag_Valid,
Type_L1Table = 0x2,
Type_L2Block = ExtensionFlag_Valid,
Type_L2Table = 0x2,
Type_L3Block = ExtensionFlag_TestTableMask,
};
enum ContigType : u64 {
@ -85,17 +94,17 @@ namespace ams::kern::arch::arm64 {
u64 attributes;
public:
/* Take in a raw attribute. */
constexpr ALWAYS_INLINE PageTableEntry() : attributes() { /* ... */ }
constexpr ALWAYS_INLINE PageTableEntry(u64 attr) : attributes(attr) { /* ... */ }
constexpr explicit ALWAYS_INLINE PageTableEntry() : attributes() { /* ... */ }
constexpr explicit ALWAYS_INLINE PageTableEntry(u64 attr) : attributes(attr) { /* ... */ }
constexpr ALWAYS_INLINE PageTableEntry(InvalidTag) : attributes(0) { /* ... */ }
constexpr explicit ALWAYS_INLINE PageTableEntry(InvalidTag) : attributes(0) { /* ... */ }
/* Extend a previous attribute. */
constexpr ALWAYS_INLINE PageTableEntry(const PageTableEntry &rhs, u64 new_attr) : attributes(rhs.attributes | new_attr) { /* ... */ }
constexpr explicit ALWAYS_INLINE PageTableEntry(const PageTableEntry &rhs, u64 new_attr) : attributes(rhs.attributes | new_attr) { /* ... */ }
/* Construct a new attribute. */
constexpr ALWAYS_INLINE PageTableEntry(Permission perm, PageAttribute p_a, Shareable share)
: attributes(static_cast<u64>(perm) | static_cast<u64>(AccessFlag_Accessed) | static_cast<u64>(p_a) | static_cast<u64>(share) | static_cast<u64>(ExtensionTag_IsValid))
constexpr explicit ALWAYS_INLINE PageTableEntry(Permission perm, PageAttribute p_a, Shareable share, MappingFlag m)
: attributes(static_cast<u64>(perm) | static_cast<u64>(AccessFlag_Accessed) | static_cast<u64>(p_a) | static_cast<u64>(share) | static_cast<u64>(ExtensionFlag_Valid) | static_cast<u64>(m))
{
/* ... */
}
@ -129,7 +138,7 @@ namespace ams::kern::arch::arm64 {
}
}
public:
constexpr ALWAYS_INLINE bool IsContiguousAllowed() const { return this->GetBits(55, 1) != 0; }
constexpr ALWAYS_INLINE bool IsContiguousAllowed() const { return this->GetBits(55, 1) == 0; }
constexpr ALWAYS_INLINE bool IsUserExecuteNever() const { return this->GetBits(54, 1) != 0; }
constexpr ALWAYS_INLINE bool IsPrivilegedExecuteNever() const { return this->GetBits(53, 1) != 0; }
constexpr ALWAYS_INLINE bool IsContiguous() const { return this->GetBits(52, 1) != 0; }
@ -140,9 +149,10 @@ namespace ams::kern::arch::arm64 {
constexpr ALWAYS_INLINE bool IsReadOnly() const { return this->GetBits(7, 1) != 0; }
constexpr ALWAYS_INLINE bool IsUserAccessible() const { return this->GetBits(6, 1) != 0; }
constexpr ALWAYS_INLINE bool IsNonSecure() const { return this->GetBits(5, 1) != 0; }
constexpr ALWAYS_INLINE bool IsBlock() const { return (this->attributes & ExtensionTag_IsBlockMask) == ExtensionTag_IsValidBit; }
constexpr ALWAYS_INLINE bool IsTable() const { return this->GetBits(0, 2) == 0x3; }
constexpr ALWAYS_INLINE bool IsEmpty() const { return this->GetBits(0, 2) == 0x0; }
constexpr ALWAYS_INLINE bool IsBlock() const { return (this->attributes & ExtensionFlag_TestTableMask) == ExtensionFlag_Valid; }
constexpr ALWAYS_INLINE bool IsTable() const { return (this->attributes & ExtensionFlag_TestTableMask) == 2; }
constexpr ALWAYS_INLINE bool IsEmpty() const { return (this->attributes & ExtensionFlag_TestTableMask) == 0; }
constexpr ALWAYS_INLINE bool IsMapped() const { return this->GetBits(0, 1) != 0; }
constexpr ALWAYS_INLINE decltype(auto) SetContiguousAllowed(bool en) { this->SetBit(55, !en); return *this; }
constexpr ALWAYS_INLINE decltype(auto) SetUserExecuteNever(bool en) { this->SetBit(54, en); return *this; }
@ -154,9 +164,10 @@ namespace ams::kern::arch::arm64 {
constexpr ALWAYS_INLINE decltype(auto) SetReadOnly(bool en) { this->SetBit(7, en); return *this; }
constexpr ALWAYS_INLINE decltype(auto) SetUserAccessible(bool en) { this->SetBit(6, en); return *this; }
constexpr ALWAYS_INLINE decltype(auto) SetPageAttribute(PageAttribute a) { this->SetBitsDirect(2, 3, a); return *this; }
constexpr ALWAYS_INLINE decltype(auto) SetMapped(bool m) { static_assert(static_cast<u64>(MappingFlag_Mapped == (1 << 0))); this->SetBit(0, m); return *this; }
constexpr ALWAYS_INLINE u64 GetEntryTemplate() const {
constexpr u64 Mask = (0xFFF0000000000FFFul & ~u64(0x3ul | (0x1ul << 52)));
constexpr u64 Mask = (0xFFF0000000000FFFul & ~u64((0x1ul << 52) | ExtensionFlag_TestTableMask));
return this->attributes & Mask;
}
@ -182,22 +193,22 @@ namespace ams::kern::arch::arm64 {
class L1PageTableEntry : public PageTableEntry {
public:
constexpr ALWAYS_INLINE L1PageTableEntry(InvalidTag) : PageTableEntry(InvalidTag{}) { /* ... */ }
constexpr explicit ALWAYS_INLINE L1PageTableEntry(InvalidTag) : PageTableEntry(InvalidTag{}) { /* ... */ }
constexpr ALWAYS_INLINE L1PageTableEntry(KPhysicalAddress phys_addr, bool pxn)
constexpr explicit ALWAYS_INLINE L1PageTableEntry(TableTag, KPhysicalAddress phys_addr, bool pxn)
: PageTableEntry((0x3ul << 60) | (static_cast<u64>(pxn) << 59) | GetInteger(phys_addr) | 0x3)
{
/* ... */
}
constexpr ALWAYS_INLINE L1PageTableEntry(KPhysicalAddress phys_addr, bool is_kernel, bool pxn)
constexpr explicit ALWAYS_INLINE L1PageTableEntry(TableTag, KPhysicalAddress phys_addr, bool is_kernel, bool pxn)
: PageTableEntry(((is_kernel ? 0x3ul : 0) << 60) | (static_cast<u64>(pxn) << 59) | GetInteger(phys_addr) | 0x3)
{
/* ... */
}
constexpr ALWAYS_INLINE L1PageTableEntry(KPhysicalAddress phys_addr, const PageTableEntry &attr, bool contig)
: PageTableEntry(attr, (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | PageTableEntry::ExtensionTag_IsValid)
constexpr explicit ALWAYS_INLINE L1PageTableEntry(BlockTag, KPhysicalAddress phys_addr, const PageTableEntry &attr, bool contig)
: PageTableEntry(attr, (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | PageTableEntry::ExtensionFlag_Valid)
{
/* ... */
}
@ -221,28 +232,28 @@ namespace ams::kern::arch::arm64 {
constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, bool contig) const {
/* Check whether this has the same permission/etc as the desired attributes. */
return L1PageTableEntry(this->GetBlock(), rhs, contig).GetRawAttributes() == this->GetRawAttributes();
return L1PageTableEntry(BlockTag{}, this->GetBlock(), rhs, contig).GetRawAttributes() == this->GetRawAttributes();
}
};
class L2PageTableEntry : public PageTableEntry {
public:
constexpr ALWAYS_INLINE L2PageTableEntry(InvalidTag) : PageTableEntry(InvalidTag{}) { /* ... */ }
constexpr explicit ALWAYS_INLINE L2PageTableEntry(InvalidTag) : PageTableEntry(InvalidTag{}) { /* ... */ }
constexpr ALWAYS_INLINE L2PageTableEntry(KPhysicalAddress phys_addr, bool pxn)
constexpr explicit ALWAYS_INLINE L2PageTableEntry(TableTag, KPhysicalAddress phys_addr, bool pxn)
: PageTableEntry((0x3ul << 60) | (static_cast<u64>(pxn) << 59) | GetInteger(phys_addr) | 0x3)
{
/* ... */
}
constexpr ALWAYS_INLINE L2PageTableEntry(KPhysicalAddress phys_addr, bool is_kernel, bool pxn)
constexpr explicit ALWAYS_INLINE L2PageTableEntry(TableTag, KPhysicalAddress phys_addr, bool is_kernel, bool pxn)
: PageTableEntry(((is_kernel ? 0x3ul : 0) << 60) | (static_cast<u64>(pxn) << 59) | GetInteger(phys_addr) | 0x3)
{
/* ... */
}
constexpr ALWAYS_INLINE L2PageTableEntry(KPhysicalAddress phys_addr, const PageTableEntry &attr, bool contig)
: PageTableEntry(attr, (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | PageTableEntry::ExtensionTag_IsValid)
constexpr explicit ALWAYS_INLINE L2PageTableEntry(BlockTag, KPhysicalAddress phys_addr, const PageTableEntry &attr, bool contig)
: PageTableEntry(attr, (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | PageTableEntry::ExtensionFlag_Valid)
{
/* ... */
}
@ -266,21 +277,21 @@ namespace ams::kern::arch::arm64 {
constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, bool contig) const {
/* Check whether this has the same permission/etc as the desired attributes. */
return L2PageTableEntry(this->GetBlock(), rhs, contig).GetRawAttributes() == this->GetRawAttributes();
return L2PageTableEntry(BlockTag{}, this->GetBlock(), rhs, contig).GetRawAttributes() == this->GetRawAttributes();
}
};
class L3PageTableEntry : public PageTableEntry {
public:
constexpr ALWAYS_INLINE L3PageTableEntry(InvalidTag) : PageTableEntry(InvalidTag{}) { /* ... */ }
constexpr explicit ALWAYS_INLINE L3PageTableEntry(InvalidTag) : PageTableEntry(InvalidTag{}) { /* ... */ }
constexpr ALWAYS_INLINE L3PageTableEntry(KPhysicalAddress phys_addr, const PageTableEntry &attr, bool contig)
: PageTableEntry(attr, (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | 0x2 | PageTableEntry::ExtensionTag_IsValid)
constexpr explicit ALWAYS_INLINE L3PageTableEntry(BlockTag, KPhysicalAddress phys_addr, const PageTableEntry &attr, bool contig)
: PageTableEntry(attr, (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | static_cast<u64>(ExtensionFlag_TestTableMask))
{
/* ... */
}
constexpr ALWAYS_INLINE bool IsBlock() const { return (GetRawAttributes() & ExtensionTag_IsBlockMask) == ExtensionTag_IsBlockMask; }
constexpr ALWAYS_INLINE bool IsBlock() const { return (GetRawAttributes() & ExtensionFlag_TestTableMask) == ExtensionFlag_TestTableMask; }
constexpr ALWAYS_INLINE KPhysicalAddress GetBlock() const {
return this->SelectBits(12, 36);
@ -288,7 +299,7 @@ namespace ams::kern::arch::arm64 {
constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, bool contig) const {
/* Check whether this has the same permission/etc as the desired attributes. */
return L3PageTableEntry(this->GetBlock(), rhs, contig).GetRawAttributes() == this->GetRawAttributes();
return L3PageTableEntry(BlockTag{}, this->GetBlock(), rhs, contig).GetRawAttributes() == this->GetRawAttributes();
}
};

View file

@ -142,6 +142,8 @@ namespace ams::kern {
KMemoryPermission_KernelWrite = ams::svc::MemoryPermission_Write << KMemoryPermission_KernelShift,
KMemoryPermission_KernelExecute = ams::svc::MemoryPermission_Execute << KMemoryPermission_KernelShift,
KMemoryPermission_NotMapped = (1 << (2 * KMemoryPermission_KernelShift)),
KMemoryPermission_KernelReadWrite = KMemoryPermission_KernelRead | KMemoryPermission_KernelWrite,
KMemoryPermission_KernelReadExecute = KMemoryPermission_KernelRead | KMemoryPermission_KernelExecute,
@ -156,7 +158,7 @@ namespace ams::kern {
};
constexpr KMemoryPermission ConvertToKMemoryPermission(ams::svc::MemoryPermission perm) {
return static_cast<KMemoryPermission>((perm & KMemoryPermission_UserMask) | KMemoryPermission_KernelRead | ((perm & KMemoryPermission_UserWrite) << KMemoryPermission_KernelShift));
return static_cast<KMemoryPermission>((perm & KMemoryPermission_UserMask) | KMemoryPermission_KernelRead | ((perm & KMemoryPermission_UserWrite) << KMemoryPermission_KernelShift) | (perm == ams::svc::MemoryPermission_None ? KMemoryPermission_NotMapped : KMemoryPermission_None));
}
enum KMemoryAttribute : u8 {

View file

@ -292,7 +292,7 @@ namespace ams::kern::arch::arm64 {
/* Set the entry. */
l2_phys = GetPageTablePhysicalAddress(l2_virt);
PteDataSynchronizationBarrier();
*l1_entry = L1PageTableEntry(l2_phys, this->IsKernel(), true);
*l1_entry = L1PageTableEntry(PageTableEntry::TableTag{}, l2_phys, this->IsKernel(), true);
PteDataSynchronizationBarrier();
l2_allocated = true;
} else {
@ -319,7 +319,7 @@ namespace ams::kern::arch::arm64 {
/* Set the entry. */
l3_phys = GetPageTablePhysicalAddress(l3_virt);
PteDataSynchronizationBarrier();
*l2_entry = L2PageTableEntry(l3_phys, this->IsKernel(), true);
*l2_entry = L2PageTableEntry(PageTableEntry::TableTag{}, l3_phys, this->IsKernel(), true);
PteDataSynchronizationBarrier();
l2_open_count++;
} else {
@ -329,7 +329,7 @@ namespace ams::kern::arch::arm64 {
MESOSPHERE_ASSERT(l3_virt != Null<KVirtualAddress>);
/* Map the page. */
*impl.GetL3EntryFromTable(l3_virt, virt_addr) = L3PageTableEntry(phys_addr, entry_template, false);
*impl.GetL3EntryFromTable(l3_virt, virt_addr) = L3PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, PageTableEntry(entry_template), false);
l3_open_count++;
virt_addr += PageSize;
phys_addr += PageSize;
@ -706,6 +706,8 @@ namespace ams::kern::arch::arm64 {
/* If there's no L1 table, don't bother. */
L1PageTableEntry *l1_entry = impl.GetL1Entry(virt_addr);
if (!l1_entry->IsTable()) {
/* Ensure the table is not corrupted. */
MESOSPHERE_ABORT_UNLESS(l1_entry->IsBlock() || l1_entry->IsEmpty());
return merged;
}
@ -726,7 +728,7 @@ namespace ams::kern::arch::arm64 {
/* Validate that we can merge. */
for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) {
if (!impl.GetL3Entry(l2_entry, virt_addr + L3BlockSize * i)->Is(entry_template | GetInteger(phys_addr + PageSize * i) | PageTableEntry::Type_L3Block)) {
if (!impl.GetL3Entry(l2_entry, virt_addr + L3BlockSize * i)->Is(entry_template | GetInteger(phys_addr + L3BlockSize * i) | PageTableEntry::Type_L3Block)) {
return merged;
}
}
@ -748,14 +750,14 @@ namespace ams::kern::arch::arm64 {
/* Validate that we can merge. */
for (size_t i = 0; i < L2BlockSize / L3ContiguousBlockSize; i++) {
if (!impl.GetL3Entry(l2_entry, virt_addr + L3BlockSize * i)->Is(entry_template | GetInteger(phys_addr + L3ContiguousBlockSize * i) | PageTableEntry::ContigType_Contiguous)) {
if (!impl.GetL3Entry(l2_entry, virt_addr + L3ContiguousBlockSize * i)->Is(entry_template | GetInteger(phys_addr + L3ContiguousBlockSize * i) | PageTableEntry::ContigType_Contiguous | PageTableEntry::Type_L3Block)) {
return merged;
}
}
/* Merge! */
PteDataSynchronizationBarrier();
*l2_entry = L2PageTableEntry(phys_addr, entry_template, false);
*l2_entry = L2PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, PageTableEntry(entry_template), false);
/* Note that we updated. */
this->NoteUpdated();
@ -769,59 +771,63 @@ namespace ams::kern::arch::arm64 {
this->FreePageTable(page_list, l3_table);
}
}
if (l2_entry->IsBlock()) {
/* If it's not contiguous, try to make it so. */
if (!l2_entry->IsContiguous()) {
virt_addr = util::AlignDown(GetInteger(virt_addr), L2ContiguousBlockSize);
KPhysicalAddress phys_addr = util::AlignDown(GetInteger(l2_entry->GetBlock()), L2ContiguousBlockSize);
const u64 entry_template = l2_entry->GetEntryTemplate();
/* Validate that we can merge. */
for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) {
if (!impl.GetL2Entry(l1_entry, virt_addr + L2BlockSize * i)->Is(entry_template | GetInteger(phys_addr + PageSize * i) | PageTableEntry::Type_L2Block)) {
return merged;
}
}
/* If the l2 entry is not a block or we can't make it contiguous, we're done. */
if (!l2_entry->IsBlock() || !l2_entry->IsContiguousAllowed()) {
return merged;
}
/* Merge! */
for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) {
impl.GetL2Entry(l1_entry, virt_addr + L2BlockSize * i)->SetContiguous(true);
}
/* Note that we updated. */
this->NoteUpdated();
merged = true;
}
/* We might be able to upgrade a contiguous set of L2 entries into an L1 block. */
virt_addr = util::AlignDown(GetInteger(virt_addr), L1BlockSize);
KPhysicalAddress phys_addr = util::AlignDown(GetInteger(l2_entry->GetBlock()), L1BlockSize);
/* If it's not contiguous, try to make it so. */
if (!l2_entry->IsContiguous()) {
virt_addr = util::AlignDown(GetInteger(virt_addr), L2ContiguousBlockSize);
KPhysicalAddress phys_addr = util::AlignDown(GetInteger(l2_entry->GetBlock()), L2ContiguousBlockSize);
const u64 entry_template = l2_entry->GetEntryTemplate();
/* Validate that we can merge. */
for (size_t i = 0; i < L1BlockSize / L2ContiguousBlockSize; i++) {
if (!impl.GetL2Entry(l1_entry, virt_addr + L3BlockSize * i)->Is(entry_template | GetInteger(phys_addr + L2ContiguousBlockSize * i) | PageTableEntry::ContigType_Contiguous)) {
for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) {
if (!impl.GetL2Entry(l1_entry, virt_addr + L2BlockSize * i)->Is(entry_template | GetInteger(phys_addr + L2BlockSize * i) | PageTableEntry::Type_L2Block)) {
return merged;
}
}
/* Merge! */
PteDataSynchronizationBarrier();
*l1_entry = L1PageTableEntry(phys_addr, entry_template, false);
for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) {
impl.GetL2Entry(l1_entry, virt_addr + L2BlockSize * i)->SetContiguous(true);
}
/* Note that we updated. */
this->NoteUpdated();
merged = true;
}
/* Free the L2 table. */
KVirtualAddress l2_table = util::AlignDown(reinterpret_cast<uintptr_t>(l2_entry), PageSize);
if (this->GetPageTableManager().IsInPageTableHeap(l2_table)) {
this->GetPageTableManager().Close(l2_table, L1BlockSize / L2BlockSize);
ClearPageTable(l2_table);
this->FreePageTable(page_list, l2_table);
/* We might be able to upgrade a contiguous set of L2 entries into an L1 block. */
virt_addr = util::AlignDown(GetInteger(virt_addr), L1BlockSize);
KPhysicalAddress phys_addr = util::AlignDown(GetInteger(l2_entry->GetBlock()), L1BlockSize);
const u64 entry_template = l2_entry->GetEntryTemplate();
/* Validate that we can merge. */
for (size_t i = 0; i < L1BlockSize / L2ContiguousBlockSize; i++) {
if (!impl.GetL2Entry(l1_entry, virt_addr + L2ContiguousBlockSize * i)->Is(entry_template | GetInteger(phys_addr + L2ContiguousBlockSize * i) | PageTableEntry::ContigType_Contiguous | PageTableEntry::Type_L2Block)) {
return merged;
}
}
/* Merge! */
PteDataSynchronizationBarrier();
*l1_entry = L1PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, PageTableEntry(entry_template), false);
/* Note that we updated. */
this->NoteUpdated();
merged = true;
/* Free the L2 table. */
KVirtualAddress l2_table = util::AlignDown(reinterpret_cast<uintptr_t>(l2_entry), PageSize);
if (this->GetPageTableManager().IsInPageTableHeap(l2_table)) {
this->GetPageTableManager().Close(l2_table, L1BlockSize / L2BlockSize);
ClearPageTable(l2_table);
this->FreePageTable(page_list, l2_table);
}
return merged;
}
@ -848,7 +854,7 @@ namespace ams::kern::arch::arm64 {
/* Set the entries in the L2 table. */
const u64 entry_template = l1_entry->GetEntryTemplate();
for (size_t i = 0; i < L1BlockSize / L2BlockSize; i++) {
*(impl.GetL2EntryFromTable(l2_table, block_virt_addr + L2BlockSize * i)) = L2PageTableEntry(block_phys_addr + L2BlockSize * i, entry_template, true);
*(impl.GetL2EntryFromTable(l2_table, block_virt_addr + L2BlockSize * i)) = L2PageTableEntry(PageTableEntry::BlockTag{}, block_phys_addr + L2BlockSize * i, PageTableEntry(entry_template), true);
}
/* Open references to the L2 table. */
@ -856,11 +862,12 @@ namespace ams::kern::arch::arm64 {
/* Replace the L1 entry with one to the new table. */
PteDataSynchronizationBarrier();
*l1_entry = L1PageTableEntry(l2_phys, this->IsKernel(), true);
*l1_entry = L1PageTableEntry(PageTableEntry::TableTag{}, l2_phys, this->IsKernel(), true);
this->NoteUpdated();
}
/* If we don't have an l1 table, we're done. */
MESOSPHERE_ABORT_UNLESS(l1_entry->IsTable() || l1_entry->IsEmpty());
R_SUCCEED_IF(!l1_entry->IsTable());
/* We want to separate L2 contiguous blocks into L2 blocks, so check that our size permits that. */
@ -894,7 +901,7 @@ namespace ams::kern::arch::arm64 {
/* Set the entries in the L3 table. */
const u64 entry_template = l2_entry->GetEntryTemplate();
for (size_t i = 0; i < L2BlockSize / L3BlockSize; i++) {
*(impl.GetL3EntryFromTable(l3_table, block_virt_addr + L3BlockSize * i)) = L3PageTableEntry(block_phys_addr + L3BlockSize * i, entry_template, true);
*(impl.GetL3EntryFromTable(l3_table, block_virt_addr + L3BlockSize * i)) = L3PageTableEntry(PageTableEntry::BlockTag{}, block_phys_addr + L3BlockSize * i, PageTableEntry(entry_template), true);
}
/* Open references to the L3 table. */
@ -902,11 +909,12 @@ namespace ams::kern::arch::arm64 {
/* Replace the L2 entry with one to the new table. */
PteDataSynchronizationBarrier();
*l2_entry = L2PageTableEntry(l3_phys, this->IsKernel(), true);
*l2_entry = L2PageTableEntry(PageTableEntry::TableTag{}, l3_phys, this->IsKernel(), true);
this->NoteUpdated();
}
/* If we don't have an L3 table, we're done. */
MESOSPHERE_ABORT_UNLESS(l2_entry->IsTable() || l2_entry->IsEmpty());
R_SUCCEED_IF(!l2_entry->IsTable());
/* We want to separate L3 contiguous blocks into L2 blocks, so check that our size permits that. */
@ -942,8 +950,6 @@ namespace ams::kern::arch::arm64 {
Result KPageTable::ChangePermissions(KProcessAddress virt_addr, size_t num_pages, PageTableEntry entry_template, bool refresh_mapping, PageLinkedList *page_list, bool reuse_ll) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
auto &impl = this->GetImpl();
/* Separate pages before we change permissions. */
const size_t size = num_pages * PageSize;
R_TRY(this->SeparatePages(virt_addr, std::min(GetInteger(virt_addr) & -GetInteger(virt_addr), size), page_list, reuse_ll));
@ -956,117 +962,163 @@ namespace ams::kern::arch::arm64 {
merge_guard.Cancel();
}
/* Cache initial addresses for use on cleanup. */
const KProcessAddress orig_virt_addr = virt_addr;
size_t remaining_pages = num_pages;
/* ===================================================== */
/* Begin traversal. */
TraversalContext context;
TraversalEntry next_entry;
MESOSPHERE_ABORT_UNLESS(impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), virt_addr));
/* Define a helper function which will apply our template to entries. */
/* Continue changing properties until we've changed them for all pages. */
while (remaining_pages > 0) {
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(next_entry.phys_addr), next_entry.block_size));
MESOSPHERE_ABORT_UNLESS(next_entry.block_size <= remaining_pages * PageSize);
enum ApplyOption : u32 {
ApplyOption_None = 0,
ApplyOption_FlushDataCache = (1u << 0),
ApplyOption_MergeMappings = (1u << 1),
};
L1PageTableEntry *l1_entry = impl.GetL1Entry(virt_addr);
switch (next_entry.block_size) {
case L1BlockSize:
{
/* Clear the entry, if we should. */
if (refresh_mapping) {
*l1_entry = InvalidL1PageTableEntry;
this->NoteUpdated();
if (IsHeapPhysicalAddress(next_entry.phys_addr)) {
cpu::FlushDataCache(GetVoidPointer(GetHeapVirtualAddress(next_entry.phys_addr)), L1BlockSize);
}
}
auto ApplyEntryTemplate = [this, virt_addr, num_pages, page_list](PageTableEntry entry_template, u32 apply_option) -> void {
/* Create work variables for us to use. */
KProcessAddress cur_virt_addr = virt_addr;
size_t remaining_pages = num_pages;
/* Write the updated entry. */
*l1_entry = L1PageTableEntry(next_entry.phys_addr, entry_template, false);
auto &impl = this->GetImpl();
/* Begin traversal. */
TraversalContext context;
TraversalEntry next_entry;
MESOSPHERE_ABORT_UNLESS(impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), cur_virt_addr));
/* Continue changing properties until we've changed them for all pages. */
while (remaining_pages > 0) {
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(next_entry.phys_addr), next_entry.block_size));
MESOSPHERE_ABORT_UNLESS(next_entry.block_size <= remaining_pages * PageSize);
/* If we should flush entries, do so. */
if ((apply_option & ApplyOption_FlushDataCache) != 0) {
if (IsHeapPhysicalAddress(next_entry.phys_addr)) {
cpu::FlushDataCache(GetVoidPointer(GetHeapVirtualAddress(next_entry.phys_addr)), next_entry.block_size);
}
break;
case L2ContiguousBlockSize:
case L2BlockSize:
{
/* Get the number of L2 blocks. */
const size_t num_l2_blocks = next_entry.block_size / L2BlockSize;
}
/* Get the L2 entry. */
KPhysicalAddress l2_phys = Null<KPhysicalAddress>;
MESOSPHERE_ABORT_UNLESS(l1_entry->GetTable(l2_phys));
const KVirtualAddress l2_virt = GetPageTableVirtualAddress(l2_phys);
/* Apply the entry template. */
L1PageTableEntry *l1_entry = impl.GetL1Entry(cur_virt_addr);
switch (next_entry.block_size) {
case L1BlockSize:
{
/* Write the updated entry. */
*l1_entry = L1PageTableEntry(PageTableEntry::BlockTag{}, next_entry.phys_addr, entry_template, false);
}
break;
case L2ContiguousBlockSize:
case L2BlockSize:
{
/* Get the number of L2 blocks. */
const size_t num_l2_blocks = next_entry.block_size / L2BlockSize;
/* Clear the entry, if we should. */
if (refresh_mapping) {
/* Get the L2 entry. */
KPhysicalAddress l2_phys = Null<KPhysicalAddress>;
MESOSPHERE_ABORT_UNLESS(l1_entry->GetTable(l2_phys));
const KVirtualAddress l2_virt = GetPageTableVirtualAddress(l2_phys);
/* Write the updated entry. */
const bool contig = next_entry.block_size == L2ContiguousBlockSize;
for (size_t i = 0; i < num_l2_blocks; i++) {
*impl.GetL2EntryFromTable(l2_virt, virt_addr + L2BlockSize * i) = InvalidL2PageTableEntry;
}
this->NoteUpdated();
if (IsHeapPhysicalAddress(next_entry.phys_addr)) {
cpu::FlushDataCache(GetVoidPointer(GetHeapVirtualAddress(next_entry.phys_addr)), next_entry.block_size);
*impl.GetL2EntryFromTable(l2_virt, cur_virt_addr + L2BlockSize * i) = L2PageTableEntry(PageTableEntry::BlockTag{}, next_entry.phys_addr + L2BlockSize * i, entry_template, contig);
}
}
break;
case L3ContiguousBlockSize:
case L3BlockSize:
{
/* Get the number of L3 blocks. */
const size_t num_l3_blocks = next_entry.block_size / L3BlockSize;
/* Write the updated entry. */
const bool contig = next_entry.block_size == L2ContiguousBlockSize;
for (size_t i = 0; i < num_l2_blocks; i++) {
*impl.GetL2EntryFromTable(l2_virt, virt_addr + L2BlockSize * i) = L2PageTableEntry(next_entry.phys_addr + L2BlockSize * i, entry_template, contig);
}
}
break;
case L3ContiguousBlockSize:
case L3BlockSize:
{
/* Get the number of L3 blocks. */
const size_t num_l3_blocks = next_entry.block_size / L3BlockSize;
/* Get the L2 entry. */
KPhysicalAddress l2_phys = Null<KPhysicalAddress>;
MESOSPHERE_ABORT_UNLESS(l1_entry->GetTable(l2_phys));
const KVirtualAddress l2_virt = GetPageTableVirtualAddress(l2_phys);
L2PageTableEntry *l2_entry = impl.GetL2EntryFromTable(l2_virt, cur_virt_addr);
/* Get the L2 entry. */
KPhysicalAddress l2_phys = Null<KPhysicalAddress>;
MESOSPHERE_ABORT_UNLESS(l1_entry->GetTable(l2_phys));
const KVirtualAddress l2_virt = GetPageTableVirtualAddress(l2_phys);
L2PageTableEntry *l2_entry = impl.GetL2EntryFromTable(l2_virt, virt_addr);
/* Get the L3 entry. */
KPhysicalAddress l3_phys = Null<KPhysicalAddress>;
MESOSPHERE_ABORT_UNLESS(l2_entry->GetTable(l3_phys));
const KVirtualAddress l3_virt = GetPageTableVirtualAddress(l3_phys);
/* Get the L3 entry. */
KPhysicalAddress l3_phys = Null<KPhysicalAddress>;
MESOSPHERE_ABORT_UNLESS(l2_entry->GetTable(l3_phys));
const KVirtualAddress l3_virt = GetPageTableVirtualAddress(l3_phys);
/* Clear the entry, if we should. */
if (refresh_mapping) {
/* Write the updated entry. */
const bool contig = next_entry.block_size == L3ContiguousBlockSize;
for (size_t i = 0; i < num_l3_blocks; i++) {
*impl.GetL3EntryFromTable(l3_virt, virt_addr + L3BlockSize * i) = InvalidL3PageTableEntry;
}
this->NoteUpdated();
if (IsHeapPhysicalAddress(next_entry.phys_addr)) {
cpu::FlushDataCache(GetVoidPointer(GetHeapVirtualAddress(next_entry.phys_addr)), next_entry.block_size);
*impl.GetL3EntryFromTable(l3_virt, cur_virt_addr + L3BlockSize * i) = L3PageTableEntry(PageTableEntry::BlockTag{}, next_entry.phys_addr + L3BlockSize * i, entry_template, contig);
}
}
break;
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
}
/* Write the updated entry. */
const bool contig = next_entry.block_size == L3ContiguousBlockSize;
for (size_t i = 0; i < num_l3_blocks; i++) {
*impl.GetL3EntryFromTable(l3_virt, virt_addr + L3BlockSize * i) = L3PageTableEntry(next_entry.phys_addr + L3BlockSize * i, entry_template, contig);
/* If our option asks us to, try to merge mappings. */
bool merge = ((apply_option & ApplyOption_MergeMappings) != 0) && next_entry.block_size < L1BlockSize;
if (merge) {
const size_t larger_align = GetLargerAlignment(next_entry.block_size);
if (util::IsAligned(GetInteger(cur_virt_addr) + next_entry.block_size, larger_align)) {
const uintptr_t aligned_start = util::AlignDown(GetInteger(cur_virt_addr), larger_align);
if (virt_addr <= aligned_start && aligned_start + larger_align - 1 < GetInteger(virt_addr) + (num_pages * PageSize) - 1) {
merge = this->MergePages(cur_virt_addr, page_list);
} else {
merge = false;
}
} else {
merge = false;
}
}
/* If we merged, correct the traversal to a sane state. */
if (merge) {
/* NOTE: Nintendo does not verify the result of this BeginTraversal call. */
MESOSPHERE_ABORT_UNLESS(impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), cur_virt_addr));
/* The actual size needs to not take into account the portion of the block before our virtual address. */
const size_t actual_size = next_entry.block_size - (GetInteger(next_entry.phys_addr) & (next_entry.block_size - 1));
remaining_pages -= std::min(remaining_pages, actual_size / PageSize);
cur_virt_addr += actual_size;
} else {
/* If we didn't merge, just advance. */
remaining_pages -= next_entry.block_size / PageSize;
cur_virt_addr += next_entry.block_size;
}
/* Continue our traversal. */
if (remaining_pages == 0) {
break;
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
}
MESOSPHERE_ABORT_UNLESS(impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)));
}
};
/* ===================================================== */
/* If we don't need to refresh the pages, we can just apply the mappings. */
if (!refresh_mapping) {
ApplyEntryTemplate(entry_template, ApplyOption_None);
this->NoteUpdated();
} else {
/* We need to refresh the mappings. */
/* First, apply the changes without the mapped bit. This will cause all entries to page fault if accessed. */
{
PageTableEntry unmapped_template = entry_template;
unmapped_template.SetMapped(false);
ApplyEntryTemplate(unmapped_template, ApplyOption_MergeMappings);
this->NoteUpdated();
}
/* Advance. */
virt_addr += next_entry.block_size;
remaining_pages -= next_entry.block_size / PageSize;
if (remaining_pages == 0) {
break;
/* Next, take and immediately release the scheduler lock. This will force a reschedule. */
{
KScopedSchedulerLock sl;
}
MESOSPHERE_ABORT_UNLESS(impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)));
/* Finally, apply the changes as directed, flushing the mappings before they're applied. */
ApplyEntryTemplate(entry_template, ApplyOption_FlushDataCache);
}
/* We've succeeded, now perform what coalescing we can. */
this->MergePages(orig_virt_addr, page_list);
this->MergePages(virt_addr, page_list);
if (num_pages > 1) {
this->MergePages(orig_virt_addr + (num_pages - 1) * PageSize, page_list);
this->MergePages(virt_addr + (num_pages - 1) * PageSize, page_list);
}
return ResultSuccess();

View file

@ -135,7 +135,7 @@ namespace ams::kern {
namespace {
constexpr PageTableEntry KernelRwDataAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable);
constexpr PageTableEntry KernelRwDataAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
constexpr size_t CarveoutAlignment = 0x20000;
constexpr size_t CarveoutSizeMax = 512_MB - CarveoutAlignment;

View file

@ -57,9 +57,9 @@ namespace ams::kern::init {
}
/* Page table attributes. */
constexpr PageTableEntry KernelRoDataAttribute(PageTableEntry::Permission_KernelR, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable);
constexpr PageTableEntry KernelRwDataAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable);
constexpr PageTableEntry KernelMmioAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_Device_nGnRE, PageTableEntry::Shareable_OuterShareable);
constexpr PageTableEntry KernelRoDataAttribute(PageTableEntry::Permission_KernelR, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
constexpr PageTableEntry KernelRwDataAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
constexpr PageTableEntry KernelMmioAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_Device_nGnRE, PageTableEntry::Shareable_OuterShareable, PageTableEntry::MappingFlag_Mapped);
void MapStackForCore(KInitialPageTable &page_table, KMemoryRegionType type, u32 core_id) {
constexpr size_t StackSize = PageSize;

View file

@ -93,17 +93,17 @@ namespace ams::kern::init::loader {
KInitialPageTable ttbr0_table(allocator.Allocate());
/* Map in an RWX identity mapping for the kernel. */
constexpr PageTableEntry KernelRWXIdentityAttribute(PageTableEntry::Permission_KernelRWX, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable);
constexpr PageTableEntry KernelRWXIdentityAttribute(PageTableEntry::Permission_KernelRWX, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
ttbr0_table.Map(base_address, kernel_size, base_address, KernelRWXIdentityAttribute, allocator);
/* Map in an RWX identity mapping for ourselves. */
constexpr PageTableEntry KernelLdrRWXIdentityAttribute(PageTableEntry::Permission_KernelRWX, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable);
constexpr PageTableEntry KernelLdrRWXIdentityAttribute(PageTableEntry::Permission_KernelRWX, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
const uintptr_t kernel_ldr_base = util::AlignDown(reinterpret_cast<uintptr_t>(__start__), PageSize);
const uintptr_t kernel_ldr_size = util::AlignUp(reinterpret_cast<uintptr_t>(__end__), PageSize) - kernel_ldr_base;
ttbr0_table.Map(kernel_ldr_base, kernel_ldr_size, kernel_ldr_base, KernelRWXIdentityAttribute, allocator);
/* Map in the page table region as RW- for ourselves. */
constexpr PageTableEntry PageTableRegionRWAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable);
constexpr PageTableEntry PageTableRegionRWAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
ttbr0_table.Map(page_table_region, page_table_region_size, page_table_region, KernelRWXIdentityAttribute, allocator);
/* Place the L1 table addresses in the relevant system registers. */
@ -294,13 +294,13 @@ namespace ams::kern::init::loader {
const KVirtualAddress virtual_base_address = GetRandomKernelBaseAddress(ttbr1_table, base_address, bss_end_offset);
/* Map kernel .text as R-X. */
constexpr PageTableEntry KernelTextAttribute(PageTableEntry::Permission_KernelRX, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable);
constexpr PageTableEntry KernelTextAttribute(PageTableEntry::Permission_KernelRX, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
ttbr1_table.Map(virtual_base_address + rx_offset, rx_end_offset - rx_offset, base_address + rx_offset, KernelTextAttribute, g_initial_page_allocator);
/* Map kernel .rodata and .rwdata as RW-. */
/* Note that we will later reprotect .rodata as R-- */
constexpr PageTableEntry KernelRoDataAttribute(PageTableEntry::Permission_KernelR, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable);
constexpr PageTableEntry KernelRwDataAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable);
constexpr PageTableEntry KernelRoDataAttribute(PageTableEntry::Permission_KernelR, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
constexpr PageTableEntry KernelRwDataAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
ttbr1_table.Map(virtual_base_address + ro_offset, ro_end_offset - ro_offset, base_address + ro_offset, KernelRwDataAttribute, g_initial_page_allocator);
ttbr1_table.Map(virtual_base_address + rw_offset, bss_end_offset - rw_offset, base_address + rw_offset, KernelRwDataAttribute, g_initial_page_allocator);