kern: implement new attr tracking for memory range/traversal context

This commit is contained in:
Michael Scire 2024-03-28 02:30:24 -07:00
parent cfb12deb51
commit af7a200865
5 changed files with 26 additions and 7 deletions

View file

@ -30,6 +30,7 @@ namespace ams::kern::arch::arm64 {
KPhysicalAddress phys_addr; KPhysicalAddress phys_addr;
size_t block_size; size_t block_size;
u8 sw_reserved_bits; u8 sw_reserved_bits;
u8 attr;
constexpr bool IsHeadMergeDisabled() const { return (this->sw_reserved_bits & PageTableEntry::SoftwareReservedBit_DisableMergeHead) != 0; } constexpr bool IsHeadMergeDisabled() const { return (this->sw_reserved_bits & PageTableEntry::SoftwareReservedBit_DisableMergeHead) != 0; }
constexpr bool IsHeadAndBodyMergeDisabled() const { return (this->sw_reserved_bits & PageTableEntry::SoftwareReservedBit_DisableMergeHeadAndBody) != 0; } constexpr bool IsHeadAndBodyMergeDisabled() const { return (this->sw_reserved_bits & PageTableEntry::SoftwareReservedBit_DisableMergeHeadAndBody) != 0; }

View file

@ -62,18 +62,21 @@ namespace ams::kern {
KPhysicalAddress m_address; KPhysicalAddress m_address;
size_t m_size; size_t m_size;
bool m_heap; bool m_heap;
u8 m_attr;
public: public:
constexpr MemoryRange() : m_address(Null<KPhysicalAddress>), m_size(0), m_heap(false) { /* ... */ } constexpr MemoryRange() : m_address(Null<KPhysicalAddress>), m_size(0), m_heap(false), m_attr(0) { /* ... */ }
void Set(KPhysicalAddress address, size_t size, bool heap) { void Set(KPhysicalAddress address, size_t size, bool heap, u8 attr) {
m_address = address; m_address = address;
m_size = size; m_size = size;
m_heap = heap; m_heap = heap;
m_attr = attr;
} }
constexpr KPhysicalAddress GetAddress() const { return m_address; } constexpr KPhysicalAddress GetAddress() const { return m_address; }
constexpr size_t GetSize() const { return m_size; } constexpr size_t GetSize() const { return m_size; }
constexpr bool IsHeap() const { return m_heap; } constexpr bool IsHeap() const { return m_heap; }
constexpr u8 GetAttribute() const { return m_attr; }
void Open(); void Open();
void Close(); void Close();

View file

@ -258,7 +258,7 @@ namespace ams::kern::arch::arm64 {
/* Begin the traversal. */ /* Begin the traversal. */
TraversalContext context; TraversalContext context;
TraversalEntry cur_entry = { .phys_addr = Null<KPhysicalAddress>, .block_size = 0, .sw_reserved_bits = 0 }; TraversalEntry cur_entry = { .phys_addr = Null<KPhysicalAddress>, .block_size = 0, .sw_reserved_bits = 0, .attr = 0 };
bool cur_valid = false; bool cur_valid = false;
TraversalEntry next_entry; TraversalEntry next_entry;
bool next_valid; bool next_valid;
@ -268,7 +268,9 @@ namespace ams::kern::arch::arm64 {
/* Iterate over entries. */ /* Iterate over entries. */
while (true) { while (true) {
if ((!next_valid && !cur_valid) || (next_valid && cur_valid && next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) { /* NOTE: Nintendo really does check next_entry.attr == (cur_entry.attr != 0)...but attr is always zero as of 18.0.0, and this is "probably" for the new console or debug-only anyway, */
/* so we'll implement the weird logic verbatim even though it doesn't match the GetContiguousRange logic. */
if ((!next_valid && !cur_valid) || (next_valid && cur_valid && next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size && next_entry.attr == (cur_entry.attr ? 1 : 0))) {
cur_entry.block_size += next_entry.block_size; cur_entry.block_size += next_entry.block_size;
} else { } else {
if (cur_valid && IsHeapPhysicalAddressForFinalize(cur_entry.phys_addr)) { if (cur_valid && IsHeapPhysicalAddressForFinalize(cur_entry.phys_addr)) {

View file

@ -46,12 +46,14 @@ namespace ams::kern::arch::arm64 {
out_entry->block_size = L3BlockSize; out_entry->block_size = L3BlockSize;
} }
out_entry->sw_reserved_bits = l3_entry->GetSoftwareReservedBits(); out_entry->sw_reserved_bits = l3_entry->GetSoftwareReservedBits();
out_entry->attr = 0;
return true; return true;
} else { } else {
out_entry->phys_addr = Null<KPhysicalAddress>; out_entry->phys_addr = Null<KPhysicalAddress>;
out_entry->block_size = L3BlockSize; out_entry->block_size = L3BlockSize;
out_entry->sw_reserved_bits = 0; out_entry->sw_reserved_bits = 0;
out_entry->attr = 0;
return false; return false;
} }
} }
@ -69,6 +71,7 @@ namespace ams::kern::arch::arm64 {
out_entry->block_size = L2BlockSize; out_entry->block_size = L2BlockSize;
} }
out_entry->sw_reserved_bits = l2_entry->GetSoftwareReservedBits(); out_entry->sw_reserved_bits = l2_entry->GetSoftwareReservedBits();
out_entry->attr = 0;
/* Set the output context. */ /* Set the output context. */
out_context->l3_entry = nullptr; out_context->l3_entry = nullptr;
@ -79,6 +82,8 @@ namespace ams::kern::arch::arm64 {
out_entry->phys_addr = Null<KPhysicalAddress>; out_entry->phys_addr = Null<KPhysicalAddress>;
out_entry->block_size = L2BlockSize; out_entry->block_size = L2BlockSize;
out_entry->sw_reserved_bits = 0; out_entry->sw_reserved_bits = 0;
out_entry->attr = 0;
out_context->l3_entry = nullptr; out_context->l3_entry = nullptr;
return false; return false;
} }
@ -108,6 +113,8 @@ namespace ams::kern::arch::arm64 {
out_entry->phys_addr = Null<KPhysicalAddress>; out_entry->phys_addr = Null<KPhysicalAddress>;
out_entry->block_size = L1BlockSize; out_entry->block_size = L1BlockSize;
out_entry->sw_reserved_bits = 0; out_entry->sw_reserved_bits = 0;
out_entry->attr = 0;
out_context->l2_entry = nullptr; out_context->l2_entry = nullptr;
out_context->l3_entry = nullptr; out_context->l3_entry = nullptr;
return false; return false;
@ -119,6 +126,7 @@ namespace ams::kern::arch::arm64 {
out_entry->phys_addr = Null<KPhysicalAddress>; out_entry->phys_addr = Null<KPhysicalAddress>;
out_entry->block_size = L1BlockSize; out_entry->block_size = L1BlockSize;
out_entry->sw_reserved_bits = 0; out_entry->sw_reserved_bits = 0;
out_entry->attr = 0;
out_context->l1_entry = m_table + m_num_entries; out_context->l1_entry = m_table + m_num_entries;
out_context->l2_entry = nullptr; out_context->l2_entry = nullptr;
out_context->l3_entry = nullptr; out_context->l3_entry = nullptr;
@ -220,6 +228,7 @@ namespace ams::kern::arch::arm64 {
out_entry->phys_addr = Null<KPhysicalAddress>; out_entry->phys_addr = Null<KPhysicalAddress>;
out_entry->block_size = L1BlockSize; out_entry->block_size = L1BlockSize;
out_entry->sw_reserved_bits = 0; out_entry->sw_reserved_bits = 0;
out_entry->attr = 0;
context->l1_entry = m_table + m_num_entries; context->l1_entry = m_table + m_num_entries;
context->l2_entry = nullptr; context->l2_entry = nullptr;
context->l3_entry = nullptr; context->l3_entry = nullptr;

View file

@ -916,7 +916,7 @@ namespace ams::kern {
/* Begin traversal. */ /* Begin traversal. */
TraversalContext context; TraversalContext context;
TraversalEntry cur_entry = { .phys_addr = Null<KPhysicalAddress>, .block_size = 0, .sw_reserved_bits = 0 }; TraversalEntry cur_entry = { .phys_addr = Null<KPhysicalAddress>, .block_size = 0, .sw_reserved_bits = 0, .attr = 0 };
bool cur_valid = false; bool cur_valid = false;
TraversalEntry next_entry; TraversalEntry next_entry;
bool next_valid; bool next_valid;
@ -1687,11 +1687,12 @@ namespace ams::kern {
/* Begin a traversal. */ /* Begin a traversal. */
TraversalContext context; TraversalContext context;
TraversalEntry cur_entry = { .phys_addr = Null<KPhysicalAddress>, .block_size = 0, .sw_reserved_bits = 0 }; TraversalEntry cur_entry = { .phys_addr = Null<KPhysicalAddress>, .block_size = 0, .sw_reserved_bits = 0, .attr = 0 };
R_UNLESS(impl.BeginTraversal(std::addressof(cur_entry), std::addressof(context), address), svc::ResultInvalidCurrentMemory()); R_UNLESS(impl.BeginTraversal(std::addressof(cur_entry), std::addressof(context), address), svc::ResultInvalidCurrentMemory());
/* Traverse until we have enough size or we aren't contiguous any more. */ /* Traverse until we have enough size or we aren't contiguous any more. */
const KPhysicalAddress phys_address = cur_entry.phys_addr; const KPhysicalAddress phys_address = cur_entry.phys_addr;
const u8 entry_attr = cur_entry.attr;
size_t contig_size; size_t contig_size;
for (contig_size = cur_entry.block_size - (GetInteger(phys_address) & (cur_entry.block_size - 1)); contig_size < size; contig_size += cur_entry.block_size) { for (contig_size = cur_entry.block_size - (GetInteger(phys_address) & (cur_entry.block_size - 1)); contig_size < size; contig_size += cur_entry.block_size) {
if (!impl.ContinueTraversal(std::addressof(cur_entry), std::addressof(context))) { if (!impl.ContinueTraversal(std::addressof(cur_entry), std::addressof(context))) {
@ -1700,6 +1701,9 @@ namespace ams::kern {
if (cur_entry.phys_addr != phys_address + contig_size) { if (cur_entry.phys_addr != phys_address + contig_size) {
break; break;
} }
if (cur_entry.attr != entry_attr) {
break;
}
} }
/* Take the minimum size for our region. */ /* Take the minimum size for our region. */
@ -1713,7 +1717,7 @@ namespace ams::kern {
} }
/* The memory is contiguous, so set the output range. */ /* The memory is contiguous, so set the output range. */
out->Set(phys_address, size, is_heap); out->Set(phys_address, size, is_heap, attr);
R_SUCCEED(); R_SUCCEED();
} }