diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp index 569b8062c..17e768dd1 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp @@ -149,6 +149,7 @@ namespace ams::kern { static NOINLINE const KMemoryRegion &GetKernelTraceBufferRegion() { return Dereference(GetVirtualLinearMemoryRegionTree().FindByType(KMemoryRegionType_VirtualDramKernelTraceBuffer)); } static NOINLINE const KMemoryRegion &GetVirtualLinearRegion(KVirtualAddress address) { return Dereference(FindLinear(address)); } + static NOINLINE const KMemoryRegion &GetPhysicalLinearRegion(KPhysicalAddress address) { return Dereference(FindLinear(address)); } static NOINLINE const KMemoryRegion *GetPhysicalKernelTraceBufferRegion() { return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_KernelTraceBuffer); } static NOINLINE const KMemoryRegion *GetPhysicalOnMemoryBootImageRegion() { return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_OnMemoryBootImage); } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp index 4b17911e4..bd647eb08 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp @@ -70,37 +70,37 @@ namespace ams::kern { public: Impl() : m_heap(), m_page_reference_counts(), m_management_region(), m_pool(), m_next(), m_prev() { /* ... */ } - size_t Initialize(uintptr_t address, size_t size, KVirtualAddress management, KVirtualAddress management_end, Pool p); + size_t Initialize(KPhysicalAddress address, size_t size, KVirtualAddress management, KVirtualAddress management_end, Pool p); - KVirtualAddress AllocateBlock(s32 index, bool random) { return m_heap.AllocateBlock(index, random); } - void Free(KVirtualAddress addr, size_t num_pages) { m_heap.Free(addr, num_pages); } + KPhysicalAddress AllocateBlock(s32 index, bool random) { return m_heap.AllocateBlock(index, random); } + void Free(KPhysicalAddress addr, size_t num_pages) { m_heap.Free(addr, num_pages); } void SetInitialUsedHeapSize(size_t reserved_size) { m_heap.SetInitialUsedSize(reserved_size); } void InitializeOptimizedMemory() { std::memset(GetVoidPointer(m_management_region), 0, CalculateOptimizedProcessOverheadSize(m_heap.GetSize())); } - void TrackUnoptimizedAllocation(KVirtualAddress block, size_t num_pages); - void TrackOptimizedAllocation(KVirtualAddress block, size_t num_pages); + void TrackUnoptimizedAllocation(KPhysicalAddress block, size_t num_pages); + void TrackOptimizedAllocation(KPhysicalAddress block, size_t num_pages); - bool ProcessOptimizedAllocation(KVirtualAddress block, size_t num_pages, u8 fill_pattern); + bool ProcessOptimizedAllocation(KPhysicalAddress block, size_t num_pages, u8 fill_pattern); constexpr Pool GetPool() const { return m_pool; } constexpr size_t GetSize() const { return m_heap.GetSize(); } - constexpr KVirtualAddress GetEndAddress() const { return m_heap.GetEndAddress(); } + constexpr KPhysicalAddress GetEndAddress() const { return m_heap.GetEndAddress(); } size_t GetFreeSize() const { return m_heap.GetFreeSize(); } void DumpFreeList() const { return m_heap.DumpFreeList(); } - constexpr size_t GetPageOffset(KVirtualAddress address) const { return m_heap.GetPageOffset(address); } - constexpr size_t GetPageOffsetToEnd(KVirtualAddress address) const { return m_heap.GetPageOffsetToEnd(address); } + constexpr size_t GetPageOffset(KPhysicalAddress address) const { return m_heap.GetPageOffset(address); } + constexpr size_t GetPageOffsetToEnd(KPhysicalAddress address) const { return m_heap.GetPageOffsetToEnd(address); } constexpr void SetNext(Impl *n) { m_next = n; } constexpr void SetPrev(Impl *n) { m_prev = n; } constexpr Impl *GetNext() const { return m_next; } constexpr Impl *GetPrev() const { return m_prev; } - void OpenFirst(KVirtualAddress address, size_t num_pages) { + void OpenFirst(KPhysicalAddress address, size_t num_pages) { size_t index = this->GetPageOffset(address); const size_t end = index + num_pages; while (index < end) { @@ -111,7 +111,7 @@ namespace ams::kern { } } - void Open(KVirtualAddress address, size_t num_pages) { + void Open(KPhysicalAddress address, size_t num_pages) { size_t index = this->GetPageOffset(address); const size_t end = index + num_pages; while (index < end) { @@ -122,7 +122,7 @@ namespace ams::kern { } } - void Close(KVirtualAddress address, size_t num_pages) { + void Close(KPhysicalAddress address, size_t num_pages) { size_t index = this->GetPageOffset(address); const size_t end = index + num_pages; @@ -164,12 +164,12 @@ namespace ams::kern { u64 m_optimized_process_ids[Pool_Count]; bool m_has_optimized_process[Pool_Count]; private: - Impl &GetManager(KVirtualAddress address) { - return m_managers[KMemoryLayout::GetVirtualLinearRegion(address).GetAttributes()]; + Impl &GetManager(KPhysicalAddress address) { + return m_managers[KMemoryLayout::GetPhysicalLinearRegion(address).GetAttributes()]; } - const Impl &GetManager(KVirtualAddress address) const { - return m_managers[KMemoryLayout::GetVirtualLinearRegion(address).GetAttributes()]; + const Impl &GetManager(KPhysicalAddress address) const { + return m_managers[KMemoryLayout::GetPhysicalLinearRegion(address).GetAttributes()]; } constexpr Impl *GetFirstManager(Pool pool, Direction dir) { @@ -197,15 +197,15 @@ namespace ams::kern { NOINLINE Result InitializeOptimizedMemory(u64 process_id, Pool pool); NOINLINE void FinalizeOptimizedMemory(u64 process_id, Pool pool); - NOINLINE KVirtualAddress AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option); + NOINLINE KPhysicalAddress AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option); NOINLINE Result AllocateAndOpen(KPageGroup *out, size_t num_pages, u32 option); NOINLINE Result AllocateAndOpenForProcess(KPageGroup *out, size_t num_pages, u32 option, u64 process_id, u8 fill_pattern); - Pool GetPool(KVirtualAddress address) const { + Pool GetPool(KPhysicalAddress address) const { return this->GetManager(address).GetPool(); } - void Open(KVirtualAddress address, size_t num_pages) { + void Open(KPhysicalAddress address, size_t num_pages) { /* Repeatedly open references until we've done so for all pages. */ while (num_pages) { auto &manager = this->GetManager(address); @@ -221,7 +221,7 @@ namespace ams::kern { } } - void Close(KVirtualAddress address, size_t num_pages) { + void Close(KPhysicalAddress address, size_t num_pages) { /* Repeatedly close references until we've done so for all pages. */ while (num_pages) { auto &manager = this->GetManager(address); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_page_group.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_page_group.hpp index 2d2e2d081..7e6ca96f5 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_page_group.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_page_group.hpp @@ -22,78 +22,121 @@ namespace ams::kern { class KBlockInfoManager; - class KBlockInfo : public util::IntrusiveListBaseNode { + class KPageGroup; + + class KBlockInfo { private: - KVirtualAddress m_address; - size_t m_num_pages; + friend class KPageGroup; + private: + KBlockInfo *m_next{}; + u32 m_page_index{}; + u32 m_num_pages{}; public: - constexpr KBlockInfo() : util::IntrusiveListBaseNode(), m_address(), m_num_pages() { /* ... */ } + constexpr KBlockInfo() = default; - constexpr void Initialize(KVirtualAddress addr, size_t np) { - m_address = addr; - m_num_pages = np; + constexpr ALWAYS_INLINE void Initialize(KPhysicalAddress addr, size_t np) { + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(addr), PageSize)); + MESOSPHERE_ASSERT(static_cast(np) == np); + + m_page_index = GetInteger(addr) / PageSize; + m_num_pages = np; } - constexpr KVirtualAddress GetAddress() const { return m_address; } - constexpr size_t GetNumPages() const { return m_num_pages; } - constexpr size_t GetSize() const { return this->GetNumPages() * PageSize; } - constexpr KVirtualAddress GetEndAddress() const { return this->GetAddress() + this->GetSize(); } - constexpr KVirtualAddress GetLastAddress() const { return this->GetEndAddress() - 1; } + constexpr ALWAYS_INLINE KPhysicalAddress GetAddress() const { return m_page_index * PageSize; } + constexpr ALWAYS_INLINE size_t GetNumPages() const { return m_num_pages; } + constexpr ALWAYS_INLINE size_t GetSize() const { return this->GetNumPages() * PageSize; } + constexpr ALWAYS_INLINE KPhysicalAddress GetEndAddress() const { return (m_page_index + m_num_pages) * PageSize; } + constexpr ALWAYS_INLINE KPhysicalAddress GetLastAddress() const { return this->GetEndAddress() - 1; } - constexpr bool IsEquivalentTo(const KBlockInfo &rhs) const { - return m_address == rhs.m_address && m_num_pages == rhs.m_num_pages; + constexpr ALWAYS_INLINE KBlockInfo *GetNext() const { return m_next; } + + constexpr ALWAYS_INLINE bool IsEquivalentTo(const KBlockInfo &rhs) const { + return m_page_index == rhs.m_page_index && m_num_pages == rhs.m_num_pages; } - constexpr bool operator==(const KBlockInfo &rhs) const { + constexpr ALWAYS_INLINE bool operator==(const KBlockInfo &rhs) const { return this->IsEquivalentTo(rhs); } - constexpr bool operator!=(const KBlockInfo &rhs) const { + constexpr ALWAYS_INLINE bool operator!=(const KBlockInfo &rhs) const { return !(*this == rhs); } - constexpr bool IsStrictlyBefore(KVirtualAddress addr) const { - const KVirtualAddress end = this->GetEndAddress(); + constexpr ALWAYS_INLINE bool IsStrictlyBefore(KPhysicalAddress addr) const { + const KPhysicalAddress end = this->GetEndAddress(); - if (m_address != Null && end == Null) { + if (m_page_index != 0 && end == Null) { return false; } return end < addr; } - constexpr bool operator<(KVirtualAddress addr) const { + constexpr ALWAYS_INLINE bool operator<(KPhysicalAddress addr) const { return this->IsStrictlyBefore(addr); } - constexpr bool TryConcatenate(KVirtualAddress addr, size_t np) { - if (addr != Null && addr == this->GetEndAddress()) { + constexpr ALWAYS_INLINE bool TryConcatenate(KPhysicalAddress addr, size_t np) { + if (addr != Null && addr == this->GetEndAddress()) { m_num_pages += np; return true; } return false; } + private: + constexpr ALWAYS_INLINE void SetNext(KBlockInfo *next) { + m_next = next; + } }; + static_assert(sizeof(KBlockInfo) <= 0x10); class KPageGroup { public: - using BlockInfoList = util::IntrusiveListBaseTraits::ListType; - using iterator = BlockInfoList::const_iterator; + class Iterator { + public: + using iterator_category = std::forward_iterator_tag; + using value_type = const KBlockInfo; + using difference_type = std::ptrdiff_t; + using pointer = value_type *; + using reference = value_type &; + private: + pointer m_node; + public: + constexpr explicit ALWAYS_INLINE Iterator(pointer n) : m_node(n) { /* ... */ } + + constexpr ALWAYS_INLINE bool operator==(const Iterator &rhs) const { return m_node == rhs.m_node; } + constexpr ALWAYS_INLINE bool operator!=(const Iterator &rhs) const { return !(*this == rhs); } + + constexpr ALWAYS_INLINE pointer operator->() const { return m_node; } + constexpr ALWAYS_INLINE reference operator*() const { return *m_node; } + + constexpr ALWAYS_INLINE Iterator &operator++() { + m_node = m_node->GetNext(); + return *this; + } + + constexpr ALWAYS_INLINE Iterator operator++(int) { + const Iterator it{*this}; + ++(*this); + return it; + } + }; private: - BlockInfoList m_block_list; + KBlockInfo *m_first_block; + KBlockInfo *m_last_block; KBlockInfoManager *m_manager; public: - explicit KPageGroup(KBlockInfoManager *m) : m_block_list(), m_manager(m) { /* ... */ } + explicit KPageGroup(KBlockInfoManager *m) : m_first_block(), m_last_block(), m_manager(m) { /* ... */ } ~KPageGroup() { this->Finalize(); } void CloseAndReset(); void Finalize(); - iterator begin() const { return m_block_list.begin(); } - iterator end() const { return m_block_list.end(); } - bool empty() const { return m_block_list.empty(); } + ALWAYS_INLINE Iterator begin() const { return Iterator{m_first_block}; } + ALWAYS_INLINE Iterator end() const { return Iterator{nullptr}; } + ALWAYS_INLINE bool empty() const { return m_first_block == nullptr; } - Result AddBlock(KVirtualAddress addr, size_t num_pages); + Result AddBlock(KPhysicalAddress addr, size_t num_pages); void Open() const; void Close() const; @@ -101,11 +144,11 @@ namespace ams::kern { bool IsEquivalentTo(const KPageGroup &rhs) const; - bool operator==(const KPageGroup &rhs) const { + ALWAYS_INLINE bool operator==(const KPageGroup &rhs) const { return this->IsEquivalentTo(rhs); } - bool operator!=(const KPageGroup &rhs) const { + ALWAYS_INLINE bool operator!=(const KPageGroup &rhs) const { return !(*this == rhs); } }; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_page_heap.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_page_heap.hpp index 563bcb3be..2c53fd5e4 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_page_heap.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_page_heap.hpp @@ -54,7 +54,7 @@ namespace ams::kern { class Block { private: KPageBitmap m_bitmap; - KVirtualAddress m_heap_address; + KPhysicalAddress m_heap_address; uintptr_t m_end_offset; size_t m_block_shift; size_t m_next_block_shift; @@ -68,13 +68,13 @@ namespace ams::kern { constexpr size_t GetNumFreeBlocks() const { return m_bitmap.GetNumBits(); } constexpr size_t GetNumFreePages() const { return this->GetNumFreeBlocks() * this->GetNumPages(); } - u64 *Initialize(KVirtualAddress addr, size_t size, size_t bs, size_t nbs, u64 *bit_storage) { + u64 *Initialize(KPhysicalAddress addr, size_t size, size_t bs, size_t nbs, u64 *bit_storage) { /* Set shifts. */ m_block_shift = bs; m_next_block_shift = nbs; /* Align up the address. */ - KVirtualAddress end = addr + size; + KPhysicalAddress end = addr + size; const size_t align = (m_next_block_shift != 0) ? (u64(1) << m_next_block_shift) : (u64(1) << m_block_shift); addr = util::AlignDown(GetInteger(addr), align); end = util::AlignUp(GetInteger(end), align); @@ -84,7 +84,7 @@ namespace ams::kern { return m_bitmap.Initialize(bit_storage, m_end_offset); } - KVirtualAddress PushBlock(KVirtualAddress address) { + KPhysicalAddress PushBlock(KPhysicalAddress address) { /* Set the bit for the free block. */ size_t offset = (address - m_heap_address) >> this->GetShift(); m_bitmap.SetBit(offset); @@ -99,14 +99,14 @@ namespace ams::kern { } /* We couldn't coalesce, or we're already as big as possible. */ - return Null; + return Null; } - KVirtualAddress PopBlock(bool random) { + KPhysicalAddress PopBlock(bool random) { /* Find a free block. */ ssize_t soffset = m_bitmap.FindFreeBlock(random); if (soffset < 0) { - return Null; + return Null; } const size_t offset = static_cast(soffset); @@ -123,27 +123,27 @@ namespace ams::kern { } }; private: - KVirtualAddress m_heap_address; + KPhysicalAddress m_heap_address; size_t m_heap_size; size_t m_initial_used_size; size_t m_num_blocks; Block m_blocks[NumMemoryBlockPageShifts]; private: - void Initialize(KVirtualAddress heap_address, size_t heap_size, KVirtualAddress management_address, size_t management_size, const size_t *block_shifts, size_t num_block_shifts); + void Initialize(KPhysicalAddress heap_address, size_t heap_size, KVirtualAddress management_address, size_t management_size, const size_t *block_shifts, size_t num_block_shifts); size_t GetNumFreePages() const; - void FreeBlock(KVirtualAddress block, s32 index); + void FreeBlock(KPhysicalAddress block, s32 index); public: KPageHeap() : m_heap_address(), m_heap_size(), m_initial_used_size(), m_num_blocks(), m_blocks() { /* ... */ } - constexpr KVirtualAddress GetAddress() const { return m_heap_address; } + constexpr KPhysicalAddress GetAddress() const { return m_heap_address; } constexpr size_t GetSize() const { return m_heap_size; } - constexpr KVirtualAddress GetEndAddress() const { return this->GetAddress() + this->GetSize(); } - constexpr size_t GetPageOffset(KVirtualAddress block) const { return (block - this->GetAddress()) / PageSize; } - constexpr size_t GetPageOffsetToEnd(KVirtualAddress block) const { return (this->GetEndAddress() - block) / PageSize; } + constexpr KPhysicalAddress GetEndAddress() const { return this->GetAddress() + this->GetSize(); } + constexpr size_t GetPageOffset(KPhysicalAddress block) const { return (block - this->GetAddress()) / PageSize; } + constexpr size_t GetPageOffsetToEnd(KPhysicalAddress block) const { return (this->GetEndAddress() - block) / PageSize; } - void Initialize(KVirtualAddress heap_address, size_t heap_size, KVirtualAddress management_address, size_t management_size) { - return Initialize(heap_address, heap_size, management_address, management_size, MemoryBlockPageShifts, NumMemoryBlockPageShifts); + void Initialize(KPhysicalAddress heap_address, size_t heap_size, KVirtualAddress management_address, size_t management_size) { + return this->Initialize(heap_address, heap_size, management_address, management_size, MemoryBlockPageShifts, NumMemoryBlockPageShifts); } size_t GetFreeSize() const { return this->GetNumFreePages() * PageSize; } @@ -158,8 +158,8 @@ namespace ams::kern { m_initial_used_size = m_heap_size - free_size - reserved_size; } - KVirtualAddress AllocateBlock(s32 index, bool random); - void Free(KVirtualAddress addr, size_t num_pages); + KPhysicalAddress AllocateBlock(s32 index, bool random); + void Free(KPhysicalAddress addr, size_t num_pages); private: static size_t CalculateManagementOverheadSize(size_t region_size, const size_t *block_shifts, size_t num_block_shifts); public: diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp index 4a9116f1f..964791fdd 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp @@ -57,7 +57,7 @@ namespace ams::kern { using TraversalContext = KPageTableImpl::TraversalContext; struct MemoryRange { - KVirtualAddress address; + KPhysicalAddress address; size_t size; void Close(); @@ -178,7 +178,6 @@ namespace ams::kern { KResourceLimit *m_resource_limit{}; const KMemoryRegion *m_cached_physical_linear_region{}; const KMemoryRegion *m_cached_physical_heap_region{}; - const KMemoryRegion *m_cached_virtual_heap_region{}; MemoryFillValue m_heap_fill_value{}; MemoryFillValue m_ipc_fill_value{}; MemoryFillValue m_stack_fill_value{}; @@ -257,18 +256,6 @@ namespace ams::kern { return KMemoryLayout::IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr); } - ALWAYS_INLINE bool IsHeapVirtualAddress(KVirtualAddress virt_addr) { - MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); - - return KMemoryLayout::IsHeapVirtualAddress(m_cached_virtual_heap_region, virt_addr); - } - - ALWAYS_INLINE bool IsHeapVirtualAddress(KVirtualAddress virt_addr, size_t size) { - MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); - - return KMemoryLayout::IsHeapVirtualAddress(m_cached_virtual_heap_region, virt_addr, size); - } - ALWAYS_INLINE bool ContainsPages(KProcessAddress addr, size_t num_pages) const { return (m_address_space_start <= addr) && (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) && (addr + num_pages * PageSize - 1 <= m_address_space_end - 1); } diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp index 2b8898eed..6e3fc2c96 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp @@ -247,7 +247,7 @@ namespace ams::kern::arch::arm64 { cur_entry.block_size += next_entry.block_size; } else { if (cur_valid && IsHeapPhysicalAddressForFinalize(cur_entry.phys_addr)) { - mm.Close(GetHeapVirtualAddress(cur_entry.phys_addr), cur_entry.block_size / PageSize); + mm.Close(cur_entry.phys_addr, cur_entry.block_size / PageSize); } /* Update tracking variables. */ @@ -265,7 +265,7 @@ namespace ams::kern::arch::arm64 { /* Handle the last block. */ if (cur_valid && IsHeapPhysicalAddressForFinalize(cur_entry.phys_addr)) { - mm.Close(GetHeapVirtualAddress(cur_entry.phys_addr), cur_entry.block_size / PageSize); + mm.Close(cur_entry.phys_addr, cur_entry.block_size / PageSize); } } @@ -696,11 +696,10 @@ namespace ams::kern::arch::arm64 { /* Close the blocks. */ if (!force && IsHeapPhysicalAddress(next_entry.phys_addr)) { - const KVirtualAddress block_virt_addr = GetHeapVirtualAddress(next_entry.phys_addr); const size_t block_num_pages = next_entry.block_size / PageSize; - if (R_FAILED(pages_to_close.AddBlock(block_virt_addr, block_num_pages))) { + if (R_FAILED(pages_to_close.AddBlock(next_entry.phys_addr, block_num_pages))) { this->NoteUpdated(); - Kernel::GetMemoryManager().Close(block_virt_addr, block_num_pages); + Kernel::GetMemoryManager().Close(next_entry.phys_addr, block_num_pages); pages_to_close.CloseAndReset(); } } @@ -792,7 +791,7 @@ namespace ams::kern::arch::arm64 { /* Open references to the pages, if we should. */ if (IsHeapPhysicalAddress(orig_phys_addr)) { - Kernel::GetMemoryManager().Open(GetHeapVirtualAddress(orig_phys_addr), num_pages); + Kernel::GetMemoryManager().Open(orig_phys_addr, num_pages); } return ResultSuccess(); @@ -815,7 +814,7 @@ namespace ams::kern::arch::arm64 { if (num_pages < ContiguousPageSize / PageSize) { for (const auto &block : pg) { - const KPhysicalAddress block_phys_addr = GetLinearMappedPhysicalAddress(block.GetAddress()); + const KPhysicalAddress block_phys_addr = block.GetAddress(); const size_t cur_pages = block.GetNumPages(); R_TRY(this->Map(virt_addr, block_phys_addr, cur_pages, entry_template, disable_head_merge && virt_addr == orig_virt_addr, L3BlockSize, page_list, reuse_ll)); @@ -827,7 +826,7 @@ namespace ams::kern::arch::arm64 { AlignedMemoryBlock virt_block(GetInteger(virt_addr), num_pages, L1BlockSize); for (const auto &block : pg) { /* Create a block representing this physical group, synchronize its alignment to our virtual block. */ - const KPhysicalAddress block_phys_addr = GetLinearMappedPhysicalAddress(block.GetAddress()); + const KPhysicalAddress block_phys_addr = block.GetAddress(); size_t cur_pages = block.GetNumPages(); AlignedMemoryBlock phys_block(GetInteger(block_phys_addr), cur_pages, virt_block.GetAlignment()); diff --git a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_device_page_table.cpp b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_device_page_table.cpp index 41ae80e01..4ab0bc52e 100644 --- a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_device_page_table.cpp +++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_device_page_table.cpp @@ -1051,7 +1051,7 @@ namespace ams::kern::board::nintendo::nx { SmmuSynchronizationBarrier(); /* Open references to the pages. */ - mm.Open(GetHeapVirtualAddress(phys_addr), DeviceLargePageSize / PageSize); + mm.Open(phys_addr, DeviceLargePageSize / PageSize); /* Advance. */ phys_addr += DeviceLargePageSize; @@ -1112,7 +1112,7 @@ namespace ams::kern::board::nintendo::nx { SmmuSynchronizationBarrier(); /* Open references to the pages. */ - mm.Open(GetHeapVirtualAddress(phys_addr), (map_count * DevicePageSize) / PageSize); + mm.Open(phys_addr, (map_count * DevicePageSize) / PageSize); /* Advance. */ phys_addr += map_count * DevicePageSize; @@ -1151,7 +1151,7 @@ namespace ams::kern::board::nintendo::nx { /* Map the device page. */ size_t mapped_size = 0; - R_TRY(this->MapDevicePage(std::addressof(mapped_size), num_pt, max_pt, GetHeapPhysicalAddress(contig_range.address), contig_range.size, cur_addr, device_perm)); + R_TRY(this->MapDevicePage(std::addressof(mapped_size), num_pt, max_pt, contig_range.address, contig_range.size, cur_addr, device_perm)); /* Advance. */ cur_addr += contig_range.size; @@ -1245,7 +1245,7 @@ namespace ams::kern::board::nintendo::nx { contig_count = contig_phys_addr != Null ? 1 : 0; } else if (phys_addr == Null || phys_addr != (contig_phys_addr + (contig_count * DevicePageSize))) { /* If we're no longer contiguous, close the range we've been building. */ - mm.Close(GetHeapVirtualAddress(contig_phys_addr), (contig_count * DevicePageSize) / PageSize); + mm.Close(contig_phys_addr, (contig_count * DevicePageSize) / PageSize); contig_phys_addr = phys_addr; contig_count = contig_phys_addr != Null ? 1 : 0; @@ -1255,7 +1255,7 @@ namespace ams::kern::board::nintendo::nx { } if (contig_count > 0) { - mm.Close(GetHeapVirtualAddress(contig_phys_addr), (contig_count * DevicePageSize) / PageSize); + mm.Close(contig_phys_addr, (contig_count * DevicePageSize) / PageSize); } } @@ -1294,7 +1294,7 @@ namespace ams::kern::board::nintendo::nx { SmmuSynchronizationBarrier(); /* Close references. */ - mm.Close(GetHeapVirtualAddress(phys_addr), DeviceLargePageSize / PageSize); + mm.Close(phys_addr, DeviceLargePageSize / PageSize); /* Advance. */ address += DeviceLargePageSize; @@ -1320,7 +1320,7 @@ namespace ams::kern::board::nintendo::nx { /* Walk the directory. */ KProcessAddress cur_process_address = process_address; size_t remaining_size = size; - KPhysicalAddress cur_phys_address = GetHeapPhysicalAddress(contig_range.address); + KPhysicalAddress cur_phys_address = contig_range.address; size_t remaining_in_range = contig_range.size; bool first = true; u32 first_attr = 0; @@ -1367,7 +1367,7 @@ namespace ams::kern::board::nintendo::nx { } range_open = true; - cur_phys_address = GetHeapPhysicalAddress(contig_range.address); + cur_phys_address = contig_range.address; remaining_in_range = contig_range.size; } @@ -1410,7 +1410,7 @@ namespace ams::kern::board::nintendo::nx { } range_open = true; - cur_phys_address = GetHeapPhysicalAddress(contig_range.address); + cur_phys_address = contig_range.address; remaining_in_range = contig_range.size; } diff --git a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp index 220857520..7dc699e46 100644 --- a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp +++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp @@ -511,8 +511,10 @@ namespace ams::kern::board::nintendo::nx { MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, SecureAppletMemorySize)); constexpr auto SecureAppletAllocateOption = KMemoryManager::EncodeOption(KMemoryManager::Pool_System, KMemoryManager::Direction_FromFront); - g_secure_applet_memory_address = Kernel::GetMemoryManager().AllocateAndOpenContinuous(SecureAppletMemorySize / PageSize, 1, SecureAppletAllocateOption); - MESOSPHERE_ABORT_UNLESS(g_secure_applet_memory_address != Null); + const KPhysicalAddress secure_applet_memory_phys_addr = Kernel::GetMemoryManager().AllocateAndOpenContinuous(SecureAppletMemorySize / PageSize, 1, SecureAppletAllocateOption); + MESOSPHERE_ABORT_UNLESS(secure_applet_memory_phys_addr != Null); + + g_secure_applet_memory_address = KMemoryLayout::GetLinearVirtualAddress(secure_applet_memory_phys_addr); } /* Initialize KTrace. */ @@ -690,9 +692,7 @@ namespace ams::kern::board::nintendo::nx { MESOSPHERE_ASSERT(it != page_groups[i].end()); MESOSPHERE_ASSERT(it->GetNumPages() == 1); - KPhysicalAddress phys_addr = page_table.GetHeapPhysicalAddress(it->GetAddress()); - - args->r[reg_id] = GetInteger(phys_addr) | (GetInteger(virt_addr) & (PageSize - 1)); + args->r[reg_id] = GetInteger(it->GetAddress()) | (GetInteger(virt_addr) & (PageSize - 1)); } else { /* If we couldn't map, we should clear the address. */ args->r[reg_id] = 0; @@ -729,25 +729,21 @@ namespace ams::kern::board::nintendo::nx { /* Allocate the memory. */ const size_t num_pages = size / PageSize; - const KVirtualAddress vaddr = Kernel::GetMemoryManager().AllocateAndOpenContinuous(num_pages, alignment / PageSize, KMemoryManager::EncodeOption(static_cast(pool), KMemoryManager::Direction_FromFront)); - R_UNLESS(vaddr != Null, svc::ResultOutOfMemory()); + const KPhysicalAddress paddr = Kernel::GetMemoryManager().AllocateAndOpenContinuous(num_pages, alignment / PageSize, KMemoryManager::EncodeOption(static_cast(pool), KMemoryManager::Direction_FromFront)); + R_UNLESS(paddr != Null, svc::ResultOutOfMemory()); /* Ensure we don't leak references to the memory on error. */ - auto mem_guard = SCOPE_GUARD { Kernel::GetMemoryManager().Close(vaddr, num_pages); }; + auto mem_guard = SCOPE_GUARD { Kernel::GetMemoryManager().Close(paddr, num_pages); }; /* If the memory isn't already secure, set it as secure. */ if (pool != KMemoryManager::Pool_System) { - /* Get the physical address. */ - const KPhysicalAddress paddr = KPageTable::GetHeapPhysicalAddress(vaddr); - MESOSPHERE_ABORT_UNLESS(paddr != Null); - /* Set the secure region. */ R_UNLESS(SetSecureRegion(paddr, size), svc::ResultOutOfMemory()); } /* We succeeded. */ mem_guard.Cancel(); - *out = vaddr; + *out = KPageTable::GetHeapVirtualAddress(paddr); return ResultSuccess(); } @@ -779,7 +775,7 @@ namespace ams::kern::board::nintendo::nx { } /* Close the secure region's pages. */ - Kernel::GetMemoryManager().Close(address, size / PageSize); + Kernel::GetMemoryManager().Close(KPageTable::GetHeapPhysicalAddress(address), size / PageSize); } } \ No newline at end of file diff --git a/libraries/libmesosphere/source/init/kern_init_slab_setup.cpp b/libraries/libmesosphere/source/init/kern_init_slab_setup.cpp index c52eff90a..16fab4703 100644 --- a/libraries/libmesosphere/source/init/kern_init_slab_setup.cpp +++ b/libraries/libmesosphere/source/init/kern_init_slab_setup.cpp @@ -165,11 +165,11 @@ namespace ams::kern::init { /* Allocate memory for the slab. */ constexpr auto AllocateOption = KMemoryManager::EncodeOption(KMemoryManager::Pool_System, KMemoryManager::Direction_FromFront); - const KVirtualAddress slab_address = Kernel::GetMemoryManager().AllocateAndOpenContinuous(num_pages, 1, AllocateOption); - MESOSPHERE_ABORT_UNLESS(slab_address != Null); + const KPhysicalAddress slab_address = Kernel::GetMemoryManager().AllocateAndOpenContinuous(num_pages, 1, AllocateOption); + MESOSPHERE_ABORT_UNLESS(slab_address != Null); /* Initialize the slabheap. */ - KPageBuffer::InitializeSlabHeap(GetVoidPointer(slab_address), slab_size); + KPageBuffer::InitializeSlabHeap(GetVoidPointer(KMemoryLayout::GetLinearVirtualAddress(slab_address)), slab_size); } void InitializeSlabHeaps() { diff --git a/libraries/libmesosphere/source/kern_initial_process.cpp b/libraries/libmesosphere/source/kern_initial_process.cpp index af198ee67..a8e2f2b16 100644 --- a/libraries/libmesosphere/source/kern_initial_process.cpp +++ b/libraries/libmesosphere/source/kern_initial_process.cpp @@ -101,7 +101,7 @@ namespace ams::kern { /* If we crossed a page boundary, free the pages we're done using. */ if (KVirtualAddress aligned_current = util::AlignDown(GetInteger(current), PageSize); aligned_current != data) { const size_t freed_size = data - aligned_current; - Kernel::GetMemoryManager().Close(aligned_current, freed_size / PageSize); + Kernel::GetMemoryManager().Close(KMemoryLayout::GetLinearPhysicalAddress(aligned_current), freed_size / PageSize); Kernel::GetSystemResourceLimit().Release(ams::svc::LimitableResource_PhysicalMemoryMax, freed_size); } @@ -114,7 +114,7 @@ namespace ams::kern { const size_t binary_pages = binary_size / PageSize; /* Get the pool for both the current (compressed) image, and the decompressed process. */ - const auto src_pool = Kernel::GetMemoryManager().GetPool(data); + const auto src_pool = Kernel::GetMemoryManager().GetPool(KMemoryLayout::GetLinearPhysicalAddress(data)); const auto dst_pool = reader.UsesSecureMemory() ? secure_pool : unsafe_pool; /* Determine the process size, and how much memory isn't already reserved. */ @@ -140,7 +140,7 @@ namespace ams::kern { /* Add the previously reserved pages. */ if (src_pool == dst_pool && binary_pages != 0) { /* NOTE: Nintendo does not check the result of this operation. */ - pg.AddBlock(data, binary_pages); + pg.AddBlock(KMemoryLayout::GetLinearPhysicalAddress(data), binary_pages); } /* Add the previously unreserved pages. */ @@ -176,7 +176,7 @@ namespace ams::kern { } else { if (src_pool != dst_pool) { std::memcpy(GetVoidPointer(temp_address + process_size - binary_size), GetVoidPointer(data), aligned_size); - Kernel::GetMemoryManager().Close(data, aligned_size / PageSize); + Kernel::GetMemoryManager().Close(KMemoryLayout::GetLinearPhysicalAddress(data), aligned_size / PageSize); } } @@ -218,7 +218,7 @@ namespace ams::kern { const size_t cur_pages = std::min(block_remaining, work_remaining); const size_t cur_size = cur_pages * PageSize; - std::memcpy(GetVoidPointer(block_address), GetVoidPointer(work_address), cur_size); + std::memcpy(GetVoidPointer(KMemoryLayout::GetLinearVirtualAddress(block_address)), GetVoidPointer(KMemoryLayout::GetLinearVirtualAddress(work_address)), cur_size); block_address += cur_size; work_address += cur_size; @@ -268,7 +268,7 @@ namespace ams::kern { { const size_t remaining_size = util::AlignUp(GetInteger(g_initial_process_binary_address) + g_initial_process_binary_header.size, PageSize) - util::AlignDown(GetInteger(current), PageSize); const size_t remaining_pages = remaining_size / PageSize; - Kernel::GetMemoryManager().Close(util::AlignDown(GetInteger(current), PageSize), remaining_pages); + Kernel::GetMemoryManager().Close(KMemoryLayout::GetLinearPhysicalAddress(util::AlignDown(GetInteger(current), PageSize)), remaining_pages); Kernel::GetSystemResourceLimit().Release(ams::svc::LimitableResource_PhysicalMemoryMax, remaining_size); } } @@ -312,7 +312,7 @@ namespace ams::kern { /* The initial process binary is potentially over-allocated, so free any extra pages. */ if (total_size < InitialProcessBinarySizeMax) { - Kernel::GetMemoryManager().Close(g_initial_process_binary_address + total_size, (InitialProcessBinarySizeMax - total_size) / PageSize); + Kernel::GetMemoryManager().Close(KMemoryLayout::GetLinearPhysicalAddress(g_initial_process_binary_address + total_size), (InitialProcessBinarySizeMax - total_size) / PageSize); } return total_size; diff --git a/libraries/libmesosphere/source/kern_k_code_memory.cpp b/libraries/libmesosphere/source/kern_k_code_memory.cpp index 8fc88e13a..9b47ca22f 100644 --- a/libraries/libmesosphere/source/kern_k_code_memory.cpp +++ b/libraries/libmesosphere/source/kern_k_code_memory.cpp @@ -35,8 +35,9 @@ namespace ams::kern { /* Clear the memory. */ for (const auto &block : GetReference(m_page_group)) { /* Clear and store cache. */ - std::memset(GetVoidPointer(block.GetAddress()), 0xFF, block.GetSize()); - cpu::StoreDataCache(GetVoidPointer(block.GetAddress()), block.GetSize()); + void * const block_address = GetVoidPointer(KMemoryLayout::GetLinearVirtualAddress(block.GetAddress())); + std::memset(block_address, 0xFF, block.GetSize()); + cpu::StoreDataCache(block_address, block.GetSize()); } /* Set remaining tracking members. */ diff --git a/libraries/libmesosphere/source/kern_k_memory_manager.cpp b/libraries/libmesosphere/source/kern_k_memory_manager.cpp index 47e124a56..9c5618e78 100644 --- a/libraries/libmesosphere/source/kern_k_memory_manager.cpp +++ b/libraries/libmesosphere/source/kern_k_memory_manager.cpp @@ -20,13 +20,13 @@ namespace ams::kern { namespace { constexpr KMemoryManager::Pool GetPoolFromMemoryRegionType(u32 type) { - if ((type | KMemoryRegionType_VirtualDramApplicationPool) == type) { + if ((type | KMemoryRegionType_DramApplicationPool) == type) { return KMemoryManager::Pool_Application; - } else if ((type | KMemoryRegionType_VirtualDramAppletPool) == type) { + } else if ((type | KMemoryRegionType_DramAppletPool) == type) { return KMemoryManager::Pool_Applet; - } else if ((type | KMemoryRegionType_VirtualDramSystemPool) == type) { + } else if ((type | KMemoryRegionType_DramSystemPool) == type) { return KMemoryManager::Pool_System; - } else if ((type | KMemoryRegionType_VirtualDramSystemNonSecurePool) == type) { + } else if ((type | KMemoryRegionType_DramSystemNonSecurePool) == type) { return KMemoryManager::Pool_SystemNonSecure; } else { MESOSPHERE_PANIC("InvalidMemoryRegionType for conversion to Pool"); @@ -37,19 +37,21 @@ namespace ams::kern { void KMemoryManager::Initialize(KVirtualAddress management_region, size_t management_region_size) { /* Clear the management region to zero. */ - const KVirtualAddress management_region_end = management_region + management_region_size; std::memset(GetVoidPointer(management_region), 0, management_region_size); + /* Reset our manager count. */ + m_num_managers = 0; + /* Traverse the virtual memory layout tree, initializing each manager as appropriate. */ while (m_num_managers != MaxManagerCount) { /* Locate the region that should initialize the current manager. */ - uintptr_t region_address = 0; + KPhysicalAddress region_address = Null; size_t region_size = 0; Pool region_pool = Pool_Count; - for (const auto &it : KMemoryLayout::GetVirtualMemoryRegionTree()) { + for (const auto &it : KMemoryLayout::GetPhysicalMemoryRegionTree()) { /* We only care about regions that we need to create managers for. */ - if (!it.IsDerivedFrom(KMemoryRegionType_VirtualDramUserPool)) { + if (!it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) { continue; } @@ -58,21 +60,24 @@ namespace ams::kern { continue; } + const KPhysicalAddress cur_start = it.GetAddress(); + const KPhysicalAddress cur_end = it.GetEndAddress(); + /* Validate the region. */ - MESOSPHERE_ABORT_UNLESS(it.GetEndAddress() != 0); - MESOSPHERE_ASSERT(it.GetAddress() != Null); - MESOSPHERE_ASSERT(it.GetSize() > 0); + MESOSPHERE_ABORT_UNLESS(cur_end != Null); + MESOSPHERE_ASSERT(cur_start != Null); + MESOSPHERE_ASSERT(it.GetSize() > 0); /* Update the region's extents. */ - if (region_address == 0) { - region_address = it.GetAddress(); + if (region_address == Null) { + region_address = cur_start; region_size = it.GetSize(); region_pool = GetPoolFromMemoryRegionType(it.GetType()); } else { - MESOSPHERE_ASSERT(it.GetAddress() == region_address + region_size); + MESOSPHERE_ASSERT(cur_start == region_address + region_size); /* Update the size. */ - region_size = it.GetEndAddress() - region_address; + region_size = cur_end - region_address; MESOSPHERE_ABORT_UNLESS(GetPoolFromMemoryRegionType(it.GetType()) == region_pool); } } @@ -102,18 +107,22 @@ namespace ams::kern { /* Free each region to its corresponding heap. */ size_t reserved_sizes[MaxManagerCount] = {}; - const uintptr_t ini_start = GetInteger(GetInitialProcessBinaryAddress()); - const uintptr_t ini_end = ini_start + InitialProcessBinarySizeMax; - const uintptr_t ini_last = ini_end - 1; - for (const auto &it : KMemoryLayout::GetVirtualMemoryRegionTree()) { - if (it.IsDerivedFrom(KMemoryRegionType_VirtualDramUserPool)) { + const KPhysicalAddress ini_start = KMemoryLayout::GetLinearPhysicalAddress(GetInitialProcessBinaryAddress()); + const KPhysicalAddress ini_end = ini_start + InitialProcessBinarySizeMax; + const KPhysicalAddress ini_last = ini_end - 1; + for (const auto &it : KMemoryLayout::GetPhysicalMemoryRegionTree()) { + if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) { /* Get the manager for the region. */ auto &manager = m_managers[it.GetAttributes()]; - if (it.GetAddress() <= ini_start && ini_last <= it.GetLastAddress()) { + const KPhysicalAddress cur_start = it.GetAddress(); + const KPhysicalAddress cur_last = it.GetLastAddress(); + const KPhysicalAddress cur_end = it.GetEndAddress(); + + if (cur_start <= ini_start && ini_last <= cur_last) { /* Free memory before the ini to the heap. */ - if (it.GetAddress() != ini_start) { - manager.Free(it.GetAddress(), (ini_start - it.GetAddress()) / PageSize); + if (cur_start != ini_start) { + manager.Free(cur_start, (ini_start - cur_start) / PageSize); } /* Open/reserve the ini memory. */ @@ -121,21 +130,21 @@ namespace ams::kern { reserved_sizes[it.GetAttributes()] += InitialProcessBinarySizeMax; /* Free memory after the ini to the heap. */ - if (ini_last != it.GetLastAddress()) { - MESOSPHERE_ABORT_UNLESS(it.GetEndAddress() != 0); - manager.Free(ini_end, it.GetEndAddress() - ini_end); + if (ini_last != cur_last) { + MESOSPHERE_ABORT_UNLESS(cur_end != Null); + manager.Free(ini_end, cur_end - ini_end); } } else { /* Ensure there's no partial overlap with the ini image. */ - if (it.GetAddress() <= ini_last) { - MESOSPHERE_ABORT_UNLESS(it.GetLastAddress() < ini_start); + if (cur_start <= ini_last) { + MESOSPHERE_ABORT_UNLESS(cur_last < ini_start); } else { /* Otherwise, check the region for general validity. */ - MESOSPHERE_ABORT_UNLESS(it.GetEndAddress() != 0); + MESOSPHERE_ABORT_UNLESS(cur_end != Null); } /* Free the memory to the heap. */ - manager.Free(it.GetAddress(), it.GetSize() / PageSize); + manager.Free(cur_start, it.GetSize() / PageSize); } } } @@ -176,10 +185,10 @@ namespace ams::kern { } - KVirtualAddress KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option) { + KPhysicalAddress KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option) { /* Early return if we're allocating no pages. */ if (num_pages == 0) { - return Null; + return Null; } /* Lock the pool that we're allocating from. */ @@ -191,17 +200,17 @@ namespace ams::kern { /* Loop, trying to iterate from each block. */ Impl *chosen_manager = nullptr; - KVirtualAddress allocated_block = Null; + KPhysicalAddress allocated_block = Null; for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr; chosen_manager = this->GetNextManager(chosen_manager, dir)) { allocated_block = chosen_manager->AllocateBlock(heap_index, true); - if (allocated_block != Null) { + if (allocated_block != Null) { break; } } /* If we failed to allocate, quit now. */ - if (allocated_block == Null) { - return Null; + if (allocated_block == Null) { + return Null; } /* If we allocated more than we need, free some. */ @@ -242,8 +251,8 @@ namespace ams::kern { for (Impl *cur_manager = this->GetFirstManager(pool, dir); cur_manager != nullptr; cur_manager = this->GetNextManager(cur_manager, dir)) { while (num_pages >= pages_per_alloc) { /* Allocate a block. */ - KVirtualAddress allocated_block = cur_manager->AllocateBlock(index, random); - if (allocated_block == Null) { + KPhysicalAddress allocated_block = cur_manager->AllocateBlock(index, random); + if (allocated_block == Null) { break; } @@ -288,8 +297,8 @@ namespace ams::kern { /* Open the first reference to the pages. */ for (const auto &block : *out) { - KVirtualAddress cur_address = block.GetAddress(); - size_t remaining_pages = block.GetNumPages(); + KPhysicalAddress cur_address = block.GetAddress(); + size_t remaining_pages = block.GetNumPages(); while (remaining_pages > 0) { /* Get the manager for the current address. */ auto &manager = this->GetManager(cur_address); @@ -332,8 +341,8 @@ namespace ams::kern { /* Open the first reference to the pages. */ for (const auto &block : *out) { - KVirtualAddress cur_address = block.GetAddress(); - size_t remaining_pages = block.GetNumPages(); + KPhysicalAddress cur_address = block.GetAddress(); + size_t remaining_pages = block.GetNumPages(); while (remaining_pages > 0) { /* Get the manager for the current address. */ auto &manager = this->GetManager(cur_address); @@ -354,8 +363,8 @@ namespace ams::kern { /* Iterate over the allocated blocks. */ for (const auto &block : *out) { /* Get the block extents. */ - const KVirtualAddress block_address = block.GetAddress(); - const size_t block_pages = block.GetNumPages(); + const KPhysicalAddress block_address = block.GetAddress(); + const size_t block_pages = block.GetNumPages(); /* If it has no pages, we don't need to do anything. */ if (block_pages == 0) { @@ -365,8 +374,8 @@ namespace ams::kern { /* Fill all the pages that we need to fill. */ bool any_new = false; { - KVirtualAddress cur_address = block_address; - size_t remaining_pages = block_pages; + KPhysicalAddress cur_address = block_address; + size_t remaining_pages = block_pages; while (remaining_pages > 0) { /* Get the manager for the current address. */ auto &manager = this->GetManager(cur_address); @@ -384,8 +393,8 @@ namespace ams::kern { /* If there are new pages, update tracking for the allocation. */ if (any_new) { /* Update tracking for the allocation. */ - KVirtualAddress cur_address = block_address; - size_t remaining_pages = block_pages; + KPhysicalAddress cur_address = block_address; + size_t remaining_pages = block_pages; while (remaining_pages > 0) { /* Get the manager for the current address. */ auto &manager = this->GetManager(cur_address); @@ -406,14 +415,14 @@ namespace ams::kern { } else { /* Set all the allocated memory. */ for (const auto &block : *out) { - std::memset(GetVoidPointer(block.GetAddress()), fill_pattern, block.GetSize()); + std::memset(GetVoidPointer(KMemoryLayout::GetLinearVirtualAddress(block.GetAddress())), fill_pattern, block.GetSize()); } } return ResultSuccess(); } - size_t KMemoryManager::Impl::Initialize(uintptr_t address, size_t size, KVirtualAddress management, KVirtualAddress management_end, Pool p) { + size_t KMemoryManager::Impl::Initialize(KPhysicalAddress address, size_t size, KVirtualAddress management, KVirtualAddress management_end, Pool p) { /* Calculate management sizes. */ const size_t ref_count_size = (size / PageSize) * sizeof(u16); const size_t optimize_map_size = CalculateOptimizedProcessOverheadSize(size); @@ -436,7 +445,7 @@ namespace ams::kern { return total_management_size; } - void KMemoryManager::Impl::TrackUnoptimizedAllocation(KVirtualAddress block, size_t num_pages) { + void KMemoryManager::Impl::TrackUnoptimizedAllocation(KPhysicalAddress block, size_t num_pages) { /* Get the range we're tracking. */ size_t offset = this->GetPageOffset(block); const size_t last = offset + num_pages - 1; @@ -451,7 +460,7 @@ namespace ams::kern { } } - void KMemoryManager::Impl::TrackOptimizedAllocation(KVirtualAddress block, size_t num_pages) { + void KMemoryManager::Impl::TrackOptimizedAllocation(KPhysicalAddress block, size_t num_pages) { /* Get the range we're tracking. */ size_t offset = this->GetPageOffset(block); const size_t last = offset + num_pages - 1; @@ -466,7 +475,7 @@ namespace ams::kern { } } - bool KMemoryManager::Impl::ProcessOptimizedAllocation(KVirtualAddress block, size_t num_pages, u8 fill_pattern) { + bool KMemoryManager::Impl::ProcessOptimizedAllocation(KPhysicalAddress block, size_t num_pages, u8 fill_pattern) { /* We want to return whether any pages were newly allocated. */ bool any_new = false; @@ -483,7 +492,7 @@ namespace ams::kern { any_new = true; /* Fill the page. */ - std::memset(GetVoidPointer(m_heap.GetAddress() + offset * PageSize), fill_pattern, PageSize); + std::memset(GetVoidPointer(KMemoryLayout::GetLinearVirtualAddress(m_heap.GetAddress()) + offset * PageSize), fill_pattern, PageSize); } offset++; diff --git a/libraries/libmesosphere/source/kern_k_page_group.cpp b/libraries/libmesosphere/source/kern_k_page_group.cpp index 0b51446c7..1b8e3e576 100644 --- a/libraries/libmesosphere/source/kern_k_page_group.cpp +++ b/libraries/libmesosphere/source/kern_k_page_group.cpp @@ -18,24 +18,30 @@ namespace ams::kern { void KPageGroup::Finalize() { - auto it = m_block_list.begin(); - while (it != m_block_list.end()) { - KBlockInfo *info = std::addressof(*it); - it = m_block_list.erase(it); - m_manager->Free(info); + KBlockInfo *cur = m_first_block; + while (cur != nullptr) { + KBlockInfo *next = cur->GetNext(); + m_manager->Free(cur); + cur = next; } + + m_first_block = nullptr; + m_last_block = nullptr; } void KPageGroup::CloseAndReset() { auto &mm = Kernel::GetMemoryManager(); - auto it = m_block_list.begin(); - while (it != m_block_list.end()) { - KBlockInfo *info = std::addressof(*it); - it = m_block_list.erase(it); - mm.Close(info->GetAddress(), info->GetNumPages()); - m_manager->Free(info); + KBlockInfo *cur = m_first_block; + while (cur != nullptr) { + KBlockInfo *next = cur->GetNext(); + mm.Close(cur->GetAddress(), cur->GetNumPages()); + m_manager->Free(cur); + cur = next; } + + m_first_block = nullptr; + m_last_block = nullptr; } size_t KPageGroup::GetNumPages() const { @@ -48,7 +54,7 @@ namespace ams::kern { return num_pages; } - Result KPageGroup::AddBlock(KVirtualAddress addr, size_t num_pages) { + Result KPageGroup::AddBlock(KPhysicalAddress addr, size_t num_pages) { /* Succeed immediately if we're adding no pages. */ R_SUCCEED_IF(num_pages == 0); @@ -56,9 +62,8 @@ namespace ams::kern { MESOSPHERE_ASSERT(addr < addr + num_pages * PageSize); /* Try to just append to the last block. */ - if (!m_block_list.empty()) { - auto it = --(m_block_list.end()); - R_SUCCEED_IF(it->TryConcatenate(addr, num_pages)); + if (m_last_block != nullptr) { + R_SUCCEED_IF(m_last_block->TryConcatenate(addr, num_pages)); } /* Allocate a new block. */ @@ -67,7 +72,14 @@ namespace ams::kern { /* Initialize the block. */ new_block->Initialize(addr, num_pages); - m_block_list.push_back(*new_block); + + /* Add the block to our list. */ + if (m_last_block != nullptr) { + m_last_block->SetNext(new_block); + } else { + m_first_block = new_block; + } + m_last_block = new_block; return ResultSuccess(); } @@ -89,10 +101,10 @@ namespace ams::kern { } bool KPageGroup::IsEquivalentTo(const KPageGroup &rhs) const { - auto lit = m_block_list.cbegin(); - auto rit = rhs.m_block_list.cbegin(); - auto lend = m_block_list.cend(); - auto rend = rhs.m_block_list.cend(); + auto lit = this->begin(); + auto rit = rhs.begin(); + auto lend = this->end(); + auto rend = rhs.end(); while (lit != lend && rit != rend) { if (*lit != *rit) { diff --git a/libraries/libmesosphere/source/kern_k_page_heap.cpp b/libraries/libmesosphere/source/kern_k_page_heap.cpp index ee55bb01d..d7eec9b67 100644 --- a/libraries/libmesosphere/source/kern_k_page_heap.cpp +++ b/libraries/libmesosphere/source/kern_k_page_heap.cpp @@ -17,7 +17,7 @@ namespace ams::kern { - void KPageHeap::Initialize(KVirtualAddress address, size_t size, KVirtualAddress management_address, size_t management_size, const size_t *block_shifts, size_t num_block_shifts) { + void KPageHeap::Initialize(KPhysicalAddress address, size_t size, KVirtualAddress management_address, size_t management_size, const size_t *block_shifts, size_t num_block_shifts) { /* Check our assumptions. */ MESOSPHERE_ASSERT(util::IsAligned(GetInteger(address), PageSize)); MESOSPHERE_ASSERT(util::IsAligned(size, PageSize)); @@ -51,11 +51,11 @@ namespace ams::kern { return num_free; } - KVirtualAddress KPageHeap::AllocateBlock(s32 index, bool random) { + KPhysicalAddress KPageHeap::AllocateBlock(s32 index, bool random) { const size_t needed_size = m_blocks[index].GetSize(); for (s32 i = index; i < static_cast(m_num_blocks); i++) { - if (const KVirtualAddress addr = m_blocks[i].PopBlock(random); addr != Null) { + if (const KPhysicalAddress addr = m_blocks[i].PopBlock(random); addr != Null) { if (const size_t allocated_size = m_blocks[i].GetSize(); allocated_size > needed_size) { this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize); } @@ -63,16 +63,16 @@ namespace ams::kern { } } - return Null; + return Null; } - void KPageHeap::FreeBlock(KVirtualAddress block, s32 index) { + void KPageHeap::FreeBlock(KPhysicalAddress block, s32 index) { do { block = m_blocks[index++].PushBlock(block); - } while (block != Null); + } while (block != Null); } - void KPageHeap::Free(KVirtualAddress addr, size_t num_pages) { + void KPageHeap::Free(KPhysicalAddress addr, size_t num_pages) { /* Freeing no pages is a no-op. */ if (num_pages == 0) { return; @@ -80,16 +80,16 @@ namespace ams::kern { /* Find the largest block size that we can free, and free as many as possible. */ s32 big_index = static_cast(m_num_blocks) - 1; - const KVirtualAddress start = addr; - const KVirtualAddress end = addr + num_pages * PageSize; - KVirtualAddress before_start = start; - KVirtualAddress before_end = start; - KVirtualAddress after_start = end; - KVirtualAddress after_end = end; + const KPhysicalAddress start = addr; + const KPhysicalAddress end = addr + num_pages * PageSize; + KPhysicalAddress before_start = start; + KPhysicalAddress before_end = start; + KPhysicalAddress after_start = end; + KPhysicalAddress after_end = end; while (big_index >= 0) { const size_t block_size = m_blocks[big_index].GetSize(); - const KVirtualAddress big_start = util::AlignUp(GetInteger(start), block_size); - const KVirtualAddress big_end = util::AlignDown(GetInteger(end), block_size); + const KPhysicalAddress big_start = util::AlignUp(GetInteger(start), block_size); + const KPhysicalAddress big_end = util::AlignDown(GetInteger(end), block_size); if (big_start < big_end) { /* Free as many big blocks as we can. */ for (auto block = big_start; block < big_end; block += block_size) { diff --git a/libraries/libmesosphere/source/kern_k_page_table_base.cpp b/libraries/libmesosphere/source/kern_k_page_table_base.cpp index ecbbb0cfd..87665115a 100644 --- a/libraries/libmesosphere/source/kern_k_page_table_base.cpp +++ b/libraries/libmesosphere/source/kern_k_page_table_base.cpp @@ -116,7 +116,6 @@ namespace ams::kern { m_cached_physical_linear_region = nullptr; m_cached_physical_heap_region = nullptr; - m_cached_virtual_heap_region = nullptr; /* Initialize our implementation. */ m_impl.InitializeForKernel(table, start, end); @@ -1145,7 +1144,7 @@ namespace ams::kern { /* Clear all pages. */ for (const auto &it : pg) { - std::memset(GetVoidPointer(it.GetAddress()), m_heap_fill_value, it.GetSize()); + std::memset(GetVoidPointer(GetHeapVirtualAddress(it.GetAddress())), m_heap_fill_value, it.GetSize()); } /* Map the pages. */ @@ -1171,13 +1170,9 @@ namespace ams::kern { /* Iterate, mapping all pages in the group. */ for (const auto &block : pg) { - /* We only allow mapping pages in the heap, and we require we're mapping non-empty blocks. */ - MESOSPHERE_ABORT_UNLESS(block.GetAddress() < block.GetLastAddress()); - MESOSPHERE_ABORT_UNLESS(IsHeapVirtualAddress(block.GetAddress(), block.GetSize())); - /* Map and advance. */ const KPageProperties cur_properties = (cur_address == start_address) ? properties : KPageProperties{ properties.perm, properties.io, properties.uncached, DisableMergeAttribute_None }; - R_TRY(this->Operate(page_list, cur_address, block.GetNumPages(), GetHeapPhysicalAddress(block.GetAddress()), true, cur_properties, OperationType_Map, reuse_ll)); + R_TRY(this->Operate(page_list, cur_address, block.GetNumPages(), block.GetAddress(), true, cur_properties, OperationType_Map, reuse_ll)); cur_address += block.GetSize(); } @@ -1198,7 +1193,7 @@ namespace ams::kern { auto pg_it = pg.begin(); MESOSPHERE_ABORT_UNLESS(pg_it != pg.end()); - KPhysicalAddress pg_phys_addr = GetHeapPhysicalAddress(pg_it->GetAddress()); + KPhysicalAddress pg_phys_addr = pg_it->GetAddress(); size_t pg_pages = pg_it->GetNumPages(); auto it = m_memory_block_manager.FindIterator(start_address); @@ -1228,7 +1223,7 @@ namespace ams::kern { /* Advance our physical block. */ ++pg_it; - pg_phys_addr = GetHeapPhysicalAddress(pg_it->GetAddress()); + pg_phys_addr = pg_it->GetAddress(); pg_pages = pg_it->GetNumPages(); } @@ -1285,7 +1280,7 @@ namespace ams::kern { const size_t cur_pages = cur_size / PageSize; R_UNLESS(IsHeapPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory()); - R_TRY(pg.AddBlock(GetHeapVirtualAddress(cur_addr), cur_pages)); + R_TRY(pg.AddBlock(cur_addr, cur_pages)); cur_addr = next_entry.phys_addr; cur_size = next_entry.block_size; @@ -1304,7 +1299,7 @@ namespace ams::kern { /* add the last block. */ const size_t cur_pages = cur_size / PageSize; R_UNLESS(IsHeapPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory()); - R_TRY(pg.AddBlock(GetHeapVirtualAddress(cur_addr), cur_pages)); + R_TRY(pg.AddBlock(cur_addr, cur_pages)); return ResultSuccess(); } @@ -1323,7 +1318,7 @@ namespace ams::kern { /* We're going to validate that the group we'd expect is the group we see. */ auto cur_it = pg.begin(); - KVirtualAddress cur_block_address = cur_it->GetAddress(); + KPhysicalAddress cur_block_address = cur_it->GetAddress(); size_t cur_block_pages = cur_it->GetNumPages(); auto UpdateCurrentIterator = [&]() ALWAYS_INLINE_LAMBDA { @@ -1367,7 +1362,7 @@ namespace ams::kern { return false; } - if (cur_block_address != GetHeapVirtualAddress(cur_addr) || cur_block_pages < cur_pages) { + if (cur_block_address != cur_addr || cur_block_pages < cur_pages) { return false; } @@ -1395,7 +1390,7 @@ namespace ams::kern { return false; } - return cur_block_address == GetHeapVirtualAddress(cur_addr) && cur_block_pages == (cur_size / PageSize); + return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize); } Result KPageTableBase::GetContiguousMemoryRangeWithState(MemoryRange *out, KProcessAddress address, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) { @@ -1434,7 +1429,7 @@ namespace ams::kern { /* The memory is contiguous, so set the output range. */ *out = { - .address = GetLinearMappedVirtualAddress(phys_address), + .address = phys_address, .size = size, }; @@ -1533,7 +1528,7 @@ namespace ams::kern { /* Ensure cache coherency, if we're setting pages as executable. */ if (is_x) { for (const auto &block : pg) { - cpu::StoreDataCache(GetVoidPointer(block.GetAddress()), block.GetSize()); + cpu::StoreDataCache(GetVoidPointer(GetHeapVirtualAddress(block.GetAddress())), block.GetSize()); } cpu::InvalidateEntireInstructionCache(); } @@ -1658,7 +1653,7 @@ namespace ams::kern { /* Clear all the newly allocated pages. */ for (const auto &it : pg) { - std::memset(GetVoidPointer(it.GetAddress()), m_heap_fill_value, it.GetSize()); + std::memset(GetVoidPointer(GetHeapVirtualAddress(it.GetAddress())), m_heap_fill_value, it.GetSize()); } /* Map the pages. */ @@ -3579,16 +3574,16 @@ namespace ams::kern { R_UNLESS(memory_reservation.Succeeded(), svc::ResultLimitReached()); /* Ensure that we manage page references correctly. */ - KVirtualAddress start_partial_page = Null; - KVirtualAddress end_partial_page = Null; - KProcessAddress cur_mapped_addr = dst_addr; + KPhysicalAddress start_partial_page = Null; + KPhysicalAddress end_partial_page = Null; + KProcessAddress cur_mapped_addr = dst_addr; /* If the partial pages are mapped, an extra reference will have been opened. Otherwise, they'll free on scope exit. */ ON_SCOPE_EXIT { - if (start_partial_page != Null) { + if (start_partial_page != Null) { Kernel::GetMemoryManager().Close(start_partial_page, 1); } - if (end_partial_page != Null) { + if (end_partial_page != Null) { Kernel::GetMemoryManager().Close(end_partial_page, 1); } }; @@ -3603,13 +3598,13 @@ namespace ams::kern { /* Allocate the start page as needed. */ if (aligned_src_start < mapping_src_start) { start_partial_page = Kernel::GetMemoryManager().AllocateAndOpenContinuous(1, 0, m_allocate_option); - R_UNLESS(start_partial_page != Null, svc::ResultOutOfMemory()); + R_UNLESS(start_partial_page != Null, svc::ResultOutOfMemory()); } /* Allocate the end page as needed. */ if (mapping_src_end < aligned_src_end && (aligned_src_start < mapping_src_end || aligned_src_start == mapping_src_start)) { end_partial_page = Kernel::GetMemoryManager().AllocateAndOpenContinuous(1, 0, m_allocate_option); - R_UNLESS(end_partial_page != Null, svc::ResultOutOfMemory()); + R_UNLESS(end_partial_page != Null, svc::ResultOutOfMemory()); } /* Get the implementation. */ @@ -3631,8 +3626,9 @@ namespace ams::kern { size_t tot_block_size = cur_block_size; /* Map the start page, if we have one. */ - if (start_partial_page != Null) { + if (start_partial_page != Null) { /* Ensure the page holds correct data. */ + const KVirtualAddress start_partial_virt = GetHeapVirtualAddress(start_partial_page); if (send) { const size_t partial_offset = src_start - aligned_src_start; size_t copy_size, clear_size; @@ -3644,18 +3640,18 @@ namespace ams::kern { clear_size = 0; } - std::memset(GetVoidPointer(start_partial_page), fill_val, partial_offset); - std::memcpy(GetVoidPointer(start_partial_page + partial_offset), GetVoidPointer(GetHeapVirtualAddress(cur_block_addr) + partial_offset), copy_size); + std::memset(GetVoidPointer(start_partial_virt), fill_val, partial_offset); + std::memcpy(GetVoidPointer(start_partial_virt + partial_offset), GetVoidPointer(GetHeapVirtualAddress(cur_block_addr) + partial_offset), copy_size); if (clear_size > 0) { - std::memset(GetVoidPointer(start_partial_page + partial_offset + copy_size), fill_val, clear_size); + std::memset(GetVoidPointer(start_partial_virt + partial_offset + copy_size), fill_val, clear_size); } } else { - std::memset(GetVoidPointer(start_partial_page), fill_val, PageSize); + std::memset(GetVoidPointer(start_partial_virt), fill_val, PageSize); } /* Map the page. */ const KPageProperties start_map_properties = { test_perm, false, false, DisableMergeAttribute_DisableHead }; - R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, 1, GetHeapPhysicalAddress(start_partial_page), true, start_map_properties, OperationType_Map, false)); + R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, 1, start_partial_page, true, start_map_properties, OperationType_Map, false)); /* Update tracking extents. */ cur_mapped_addr += PageSize; @@ -3715,19 +3711,20 @@ namespace ams::kern { } /* Map the end page, if we have one. */ - if (end_partial_page != Null) { + if (end_partial_page != Null) { /* Ensure the page holds correct data. */ + const KVirtualAddress end_partial_virt = GetHeapVirtualAddress(end_partial_page); if (send) { const size_t copy_size = src_end - mapping_src_end; - std::memcpy(GetVoidPointer(end_partial_page), GetVoidPointer(GetHeapVirtualAddress(cur_block_addr)), copy_size); - std::memset(GetVoidPointer(end_partial_page + copy_size), fill_val, PageSize - copy_size); + std::memcpy(GetVoidPointer(end_partial_virt), GetVoidPointer(GetHeapVirtualAddress(cur_block_addr)), copy_size); + std::memset(GetVoidPointer(end_partial_virt + copy_size), fill_val, PageSize - copy_size); } else { - std::memset(GetVoidPointer(end_partial_page), fill_val, PageSize); + std::memset(GetVoidPointer(end_partial_virt), fill_val, PageSize); } /* Map the page. */ const KPageProperties map_properties = { test_perm, false, false, (cur_mapped_addr == dst_addr) ? DisableMergeAttribute_DisableHead : DisableMergeAttribute_None }; - R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, 1, GetHeapPhysicalAddress(end_partial_page), true, map_properties, OperationType_Map, false)); + R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, 1, end_partial_page, true, map_properties, OperationType_Map, false)); } /* Update memory blocks to reflect our changes */ @@ -4246,7 +4243,7 @@ namespace ams::kern { /* Iterate over the memory. */ auto pg_it = pg.begin(); - KPhysicalAddress pg_phys_addr = GetHeapPhysicalAddress(pg_it->GetAddress()); + KPhysicalAddress pg_phys_addr = pg_it->GetAddress(); size_t pg_pages = pg_it->GetNumPages(); auto it = m_memory_block_manager.FindIterator(cur_address); @@ -4272,7 +4269,7 @@ namespace ams::kern { /* Advance our physical block. */ ++pg_it; - pg_phys_addr = GetHeapPhysicalAddress(pg_it->GetAddress()); + pg_phys_addr = pg_it->GetAddress(); pg_pages = pg_it->GetNumPages(); } @@ -4410,7 +4407,7 @@ namespace ams::kern { } else { if (cur_valid) { MESOSPHERE_ABORT_UNLESS(IsHeapPhysicalAddress(cur_entry.phys_addr)); - R_TRY(pg.AddBlock(GetHeapVirtualAddress(cur_entry.phys_addr), cur_entry.block_size / PageSize)); + R_TRY(pg.AddBlock(cur_entry.phys_addr, cur_entry.block_size / PageSize)); } /* Update tracking variables. */ @@ -4429,7 +4426,7 @@ namespace ams::kern { /* Add the last block. */ if (cur_valid) { MESOSPHERE_ABORT_UNLESS(IsHeapPhysicalAddress(cur_entry.phys_addr)); - R_TRY(pg.AddBlock(GetHeapVirtualAddress(cur_entry.phys_addr), (size - tot_size) / PageSize)); + R_TRY(pg.AddBlock(cur_entry.phys_addr, (size - tot_size) / PageSize)); } } MESOSPHERE_ASSERT(pg.GetNumPages() == mapped_size / PageSize); @@ -4457,7 +4454,7 @@ namespace ams::kern { /* Iterate over the memory we unmapped. */ auto it = m_memory_block_manager.FindIterator(cur_address); auto pg_it = pg.begin(); - KPhysicalAddress pg_phys_addr = GetHeapPhysicalAddress(pg_it->GetAddress()); + KPhysicalAddress pg_phys_addr = pg_it->GetAddress(); size_t pg_pages = pg_it->GetNumPages(); while (true) { @@ -4479,7 +4476,7 @@ namespace ams::kern { /* Advance our physical block. */ ++pg_it; - pg_phys_addr = GetHeapPhysicalAddress(pg_it->GetAddress()); + pg_phys_addr = pg_it->GetAddress(); pg_pages = pg_it->GetNumPages(); } @@ -4567,7 +4564,7 @@ namespace ams::kern { /* Clear the new memory. */ for (const auto &block : pg) { - std::memset(GetVoidPointer(block.GetAddress()), m_heap_fill_value, block.GetSize()); + std::memset(GetVoidPointer(GetHeapVirtualAddress(block.GetAddress())), m_heap_fill_value, block.GetSize()); } /* Map the new memory. */ diff --git a/libraries/libmesosphere/source/kern_k_shared_memory.cpp b/libraries/libmesosphere/source/kern_k_shared_memory.cpp index cc4118682..a80516588 100644 --- a/libraries/libmesosphere/source/kern_k_shared_memory.cpp +++ b/libraries/libmesosphere/source/kern_k_shared_memory.cpp @@ -51,7 +51,7 @@ namespace ams::kern { /* Clear all pages in the memory. */ for (const auto &block : m_page_group) { - std::memset(GetVoidPointer(block.GetAddress()), 0, block.GetSize()); + std::memset(GetVoidPointer(KMemoryLayout::GetLinearVirtualAddress(block.GetAddress())), 0, block.GetSize()); } return ResultSuccess(); diff --git a/libraries/libmesosphere/source/svc/kern_svc_cache.cpp b/libraries/libmesosphere/source/svc/kern_svc_cache.cpp index e5b6e3ae4..546d08a1e 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_cache.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_cache.cpp @@ -43,7 +43,7 @@ namespace ams::kern::svc { ON_SCOPE_EXIT { contig_range.Close(); }; /* Adjust to remain within range. */ - KVirtualAddress operate_address = contig_range.address; + KVirtualAddress operate_address = KMemoryLayout::GetLinearVirtualAddress(contig_range.address); size_t operate_size = contig_range.size; if (cur_address < address) { operate_address += (address - cur_address);