mirror of
https://github.com/Atmosphere-NX/Atmosphere
synced 2024-11-15 09:36:35 +00:00
kern: KMemoryManager/KPageGroup use physical addresses instead of virtual, now
This commit is contained in:
parent
f8fd072349
commit
1cf3b24c2d
17 changed files with 305 additions and 260 deletions
|
@ -149,6 +149,7 @@ namespace ams::kern {
|
||||||
static NOINLINE const KMemoryRegion &GetKernelTraceBufferRegion() { return Dereference(GetVirtualLinearMemoryRegionTree().FindByType(KMemoryRegionType_VirtualDramKernelTraceBuffer)); }
|
static NOINLINE const KMemoryRegion &GetKernelTraceBufferRegion() { return Dereference(GetVirtualLinearMemoryRegionTree().FindByType(KMemoryRegionType_VirtualDramKernelTraceBuffer)); }
|
||||||
|
|
||||||
static NOINLINE const KMemoryRegion &GetVirtualLinearRegion(KVirtualAddress address) { return Dereference(FindLinear(address)); }
|
static NOINLINE const KMemoryRegion &GetVirtualLinearRegion(KVirtualAddress address) { return Dereference(FindLinear(address)); }
|
||||||
|
static NOINLINE const KMemoryRegion &GetPhysicalLinearRegion(KPhysicalAddress address) { return Dereference(FindLinear(address)); }
|
||||||
|
|
||||||
static NOINLINE const KMemoryRegion *GetPhysicalKernelTraceBufferRegion() { return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_KernelTraceBuffer); }
|
static NOINLINE const KMemoryRegion *GetPhysicalKernelTraceBufferRegion() { return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_KernelTraceBuffer); }
|
||||||
static NOINLINE const KMemoryRegion *GetPhysicalOnMemoryBootImageRegion() { return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_OnMemoryBootImage); }
|
static NOINLINE const KMemoryRegion *GetPhysicalOnMemoryBootImageRegion() { return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_OnMemoryBootImage); }
|
||||||
|
|
|
@ -70,37 +70,37 @@ namespace ams::kern {
|
||||||
public:
|
public:
|
||||||
Impl() : m_heap(), m_page_reference_counts(), m_management_region(), m_pool(), m_next(), m_prev() { /* ... */ }
|
Impl() : m_heap(), m_page_reference_counts(), m_management_region(), m_pool(), m_next(), m_prev() { /* ... */ }
|
||||||
|
|
||||||
size_t Initialize(uintptr_t address, size_t size, KVirtualAddress management, KVirtualAddress management_end, Pool p);
|
size_t Initialize(KPhysicalAddress address, size_t size, KVirtualAddress management, KVirtualAddress management_end, Pool p);
|
||||||
|
|
||||||
KVirtualAddress AllocateBlock(s32 index, bool random) { return m_heap.AllocateBlock(index, random); }
|
KPhysicalAddress AllocateBlock(s32 index, bool random) { return m_heap.AllocateBlock(index, random); }
|
||||||
void Free(KVirtualAddress addr, size_t num_pages) { m_heap.Free(addr, num_pages); }
|
void Free(KPhysicalAddress addr, size_t num_pages) { m_heap.Free(addr, num_pages); }
|
||||||
|
|
||||||
void SetInitialUsedHeapSize(size_t reserved_size) { m_heap.SetInitialUsedSize(reserved_size); }
|
void SetInitialUsedHeapSize(size_t reserved_size) { m_heap.SetInitialUsedSize(reserved_size); }
|
||||||
|
|
||||||
void InitializeOptimizedMemory() { std::memset(GetVoidPointer(m_management_region), 0, CalculateOptimizedProcessOverheadSize(m_heap.GetSize())); }
|
void InitializeOptimizedMemory() { std::memset(GetVoidPointer(m_management_region), 0, CalculateOptimizedProcessOverheadSize(m_heap.GetSize())); }
|
||||||
|
|
||||||
void TrackUnoptimizedAllocation(KVirtualAddress block, size_t num_pages);
|
void TrackUnoptimizedAllocation(KPhysicalAddress block, size_t num_pages);
|
||||||
void TrackOptimizedAllocation(KVirtualAddress block, size_t num_pages);
|
void TrackOptimizedAllocation(KPhysicalAddress block, size_t num_pages);
|
||||||
|
|
||||||
bool ProcessOptimizedAllocation(KVirtualAddress block, size_t num_pages, u8 fill_pattern);
|
bool ProcessOptimizedAllocation(KPhysicalAddress block, size_t num_pages, u8 fill_pattern);
|
||||||
|
|
||||||
constexpr Pool GetPool() const { return m_pool; }
|
constexpr Pool GetPool() const { return m_pool; }
|
||||||
constexpr size_t GetSize() const { return m_heap.GetSize(); }
|
constexpr size_t GetSize() const { return m_heap.GetSize(); }
|
||||||
constexpr KVirtualAddress GetEndAddress() const { return m_heap.GetEndAddress(); }
|
constexpr KPhysicalAddress GetEndAddress() const { return m_heap.GetEndAddress(); }
|
||||||
|
|
||||||
size_t GetFreeSize() const { return m_heap.GetFreeSize(); }
|
size_t GetFreeSize() const { return m_heap.GetFreeSize(); }
|
||||||
|
|
||||||
void DumpFreeList() const { return m_heap.DumpFreeList(); }
|
void DumpFreeList() const { return m_heap.DumpFreeList(); }
|
||||||
|
|
||||||
constexpr size_t GetPageOffset(KVirtualAddress address) const { return m_heap.GetPageOffset(address); }
|
constexpr size_t GetPageOffset(KPhysicalAddress address) const { return m_heap.GetPageOffset(address); }
|
||||||
constexpr size_t GetPageOffsetToEnd(KVirtualAddress address) const { return m_heap.GetPageOffsetToEnd(address); }
|
constexpr size_t GetPageOffsetToEnd(KPhysicalAddress address) const { return m_heap.GetPageOffsetToEnd(address); }
|
||||||
|
|
||||||
constexpr void SetNext(Impl *n) { m_next = n; }
|
constexpr void SetNext(Impl *n) { m_next = n; }
|
||||||
constexpr void SetPrev(Impl *n) { m_prev = n; }
|
constexpr void SetPrev(Impl *n) { m_prev = n; }
|
||||||
constexpr Impl *GetNext() const { return m_next; }
|
constexpr Impl *GetNext() const { return m_next; }
|
||||||
constexpr Impl *GetPrev() const { return m_prev; }
|
constexpr Impl *GetPrev() const { return m_prev; }
|
||||||
|
|
||||||
void OpenFirst(KVirtualAddress address, size_t num_pages) {
|
void OpenFirst(KPhysicalAddress address, size_t num_pages) {
|
||||||
size_t index = this->GetPageOffset(address);
|
size_t index = this->GetPageOffset(address);
|
||||||
const size_t end = index + num_pages;
|
const size_t end = index + num_pages;
|
||||||
while (index < end) {
|
while (index < end) {
|
||||||
|
@ -111,7 +111,7 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Open(KVirtualAddress address, size_t num_pages) {
|
void Open(KPhysicalAddress address, size_t num_pages) {
|
||||||
size_t index = this->GetPageOffset(address);
|
size_t index = this->GetPageOffset(address);
|
||||||
const size_t end = index + num_pages;
|
const size_t end = index + num_pages;
|
||||||
while (index < end) {
|
while (index < end) {
|
||||||
|
@ -122,7 +122,7 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Close(KVirtualAddress address, size_t num_pages) {
|
void Close(KPhysicalAddress address, size_t num_pages) {
|
||||||
size_t index = this->GetPageOffset(address);
|
size_t index = this->GetPageOffset(address);
|
||||||
const size_t end = index + num_pages;
|
const size_t end = index + num_pages;
|
||||||
|
|
||||||
|
@ -164,12 +164,12 @@ namespace ams::kern {
|
||||||
u64 m_optimized_process_ids[Pool_Count];
|
u64 m_optimized_process_ids[Pool_Count];
|
||||||
bool m_has_optimized_process[Pool_Count];
|
bool m_has_optimized_process[Pool_Count];
|
||||||
private:
|
private:
|
||||||
Impl &GetManager(KVirtualAddress address) {
|
Impl &GetManager(KPhysicalAddress address) {
|
||||||
return m_managers[KMemoryLayout::GetVirtualLinearRegion(address).GetAttributes()];
|
return m_managers[KMemoryLayout::GetPhysicalLinearRegion(address).GetAttributes()];
|
||||||
}
|
}
|
||||||
|
|
||||||
const Impl &GetManager(KVirtualAddress address) const {
|
const Impl &GetManager(KPhysicalAddress address) const {
|
||||||
return m_managers[KMemoryLayout::GetVirtualLinearRegion(address).GetAttributes()];
|
return m_managers[KMemoryLayout::GetPhysicalLinearRegion(address).GetAttributes()];
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr Impl *GetFirstManager(Pool pool, Direction dir) {
|
constexpr Impl *GetFirstManager(Pool pool, Direction dir) {
|
||||||
|
@ -197,15 +197,15 @@ namespace ams::kern {
|
||||||
NOINLINE Result InitializeOptimizedMemory(u64 process_id, Pool pool);
|
NOINLINE Result InitializeOptimizedMemory(u64 process_id, Pool pool);
|
||||||
NOINLINE void FinalizeOptimizedMemory(u64 process_id, Pool pool);
|
NOINLINE void FinalizeOptimizedMemory(u64 process_id, Pool pool);
|
||||||
|
|
||||||
NOINLINE KVirtualAddress AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
|
NOINLINE KPhysicalAddress AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
|
||||||
NOINLINE Result AllocateAndOpen(KPageGroup *out, size_t num_pages, u32 option);
|
NOINLINE Result AllocateAndOpen(KPageGroup *out, size_t num_pages, u32 option);
|
||||||
NOINLINE Result AllocateAndOpenForProcess(KPageGroup *out, size_t num_pages, u32 option, u64 process_id, u8 fill_pattern);
|
NOINLINE Result AllocateAndOpenForProcess(KPageGroup *out, size_t num_pages, u32 option, u64 process_id, u8 fill_pattern);
|
||||||
|
|
||||||
Pool GetPool(KVirtualAddress address) const {
|
Pool GetPool(KPhysicalAddress address) const {
|
||||||
return this->GetManager(address).GetPool();
|
return this->GetManager(address).GetPool();
|
||||||
}
|
}
|
||||||
|
|
||||||
void Open(KVirtualAddress address, size_t num_pages) {
|
void Open(KPhysicalAddress address, size_t num_pages) {
|
||||||
/* Repeatedly open references until we've done so for all pages. */
|
/* Repeatedly open references until we've done so for all pages. */
|
||||||
while (num_pages) {
|
while (num_pages) {
|
||||||
auto &manager = this->GetManager(address);
|
auto &manager = this->GetManager(address);
|
||||||
|
@ -221,7 +221,7 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Close(KVirtualAddress address, size_t num_pages) {
|
void Close(KPhysicalAddress address, size_t num_pages) {
|
||||||
/* Repeatedly close references until we've done so for all pages. */
|
/* Repeatedly close references until we've done so for all pages. */
|
||||||
while (num_pages) {
|
while (num_pages) {
|
||||||
auto &manager = this->GetManager(address);
|
auto &manager = this->GetManager(address);
|
||||||
|
|
|
@ -22,78 +22,121 @@ namespace ams::kern {
|
||||||
|
|
||||||
class KBlockInfoManager;
|
class KBlockInfoManager;
|
||||||
|
|
||||||
class KBlockInfo : public util::IntrusiveListBaseNode<KBlockInfo> {
|
class KPageGroup;
|
||||||
private:
|
|
||||||
KVirtualAddress m_address;
|
|
||||||
size_t m_num_pages;
|
|
||||||
public:
|
|
||||||
constexpr KBlockInfo() : util::IntrusiveListBaseNode<KBlockInfo>(), m_address(), m_num_pages() { /* ... */ }
|
|
||||||
|
|
||||||
constexpr void Initialize(KVirtualAddress addr, size_t np) {
|
class KBlockInfo {
|
||||||
m_address = addr;
|
private:
|
||||||
|
friend class KPageGroup;
|
||||||
|
private:
|
||||||
|
KBlockInfo *m_next{};
|
||||||
|
u32 m_page_index{};
|
||||||
|
u32 m_num_pages{};
|
||||||
|
public:
|
||||||
|
constexpr KBlockInfo() = default;
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE void Initialize(KPhysicalAddress addr, size_t np) {
|
||||||
|
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(addr), PageSize));
|
||||||
|
MESOSPHERE_ASSERT(static_cast<u32>(np) == np);
|
||||||
|
|
||||||
|
m_page_index = GetInteger(addr) / PageSize;
|
||||||
m_num_pages = np;
|
m_num_pages = np;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr KVirtualAddress GetAddress() const { return m_address; }
|
constexpr ALWAYS_INLINE KPhysicalAddress GetAddress() const { return m_page_index * PageSize; }
|
||||||
constexpr size_t GetNumPages() const { return m_num_pages; }
|
constexpr ALWAYS_INLINE size_t GetNumPages() const { return m_num_pages; }
|
||||||
constexpr size_t GetSize() const { return this->GetNumPages() * PageSize; }
|
constexpr ALWAYS_INLINE size_t GetSize() const { return this->GetNumPages() * PageSize; }
|
||||||
constexpr KVirtualAddress GetEndAddress() const { return this->GetAddress() + this->GetSize(); }
|
constexpr ALWAYS_INLINE KPhysicalAddress GetEndAddress() const { return (m_page_index + m_num_pages) * PageSize; }
|
||||||
constexpr KVirtualAddress GetLastAddress() const { return this->GetEndAddress() - 1; }
|
constexpr ALWAYS_INLINE KPhysicalAddress GetLastAddress() const { return this->GetEndAddress() - 1; }
|
||||||
|
|
||||||
constexpr bool IsEquivalentTo(const KBlockInfo &rhs) const {
|
constexpr ALWAYS_INLINE KBlockInfo *GetNext() const { return m_next; }
|
||||||
return m_address == rhs.m_address && m_num_pages == rhs.m_num_pages;
|
|
||||||
|
constexpr ALWAYS_INLINE bool IsEquivalentTo(const KBlockInfo &rhs) const {
|
||||||
|
return m_page_index == rhs.m_page_index && m_num_pages == rhs.m_num_pages;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr bool operator==(const KBlockInfo &rhs) const {
|
constexpr ALWAYS_INLINE bool operator==(const KBlockInfo &rhs) const {
|
||||||
return this->IsEquivalentTo(rhs);
|
return this->IsEquivalentTo(rhs);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr bool operator!=(const KBlockInfo &rhs) const {
|
constexpr ALWAYS_INLINE bool operator!=(const KBlockInfo &rhs) const {
|
||||||
return !(*this == rhs);
|
return !(*this == rhs);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr bool IsStrictlyBefore(KVirtualAddress addr) const {
|
constexpr ALWAYS_INLINE bool IsStrictlyBefore(KPhysicalAddress addr) const {
|
||||||
const KVirtualAddress end = this->GetEndAddress();
|
const KPhysicalAddress end = this->GetEndAddress();
|
||||||
|
|
||||||
if (m_address != Null<KVirtualAddress> && end == Null<KVirtualAddress>) {
|
if (m_page_index != 0 && end == Null<KPhysicalAddress>) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
return end < addr;
|
return end < addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr bool operator<(KVirtualAddress addr) const {
|
constexpr ALWAYS_INLINE bool operator<(KPhysicalAddress addr) const {
|
||||||
return this->IsStrictlyBefore(addr);
|
return this->IsStrictlyBefore(addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr bool TryConcatenate(KVirtualAddress addr, size_t np) {
|
constexpr ALWAYS_INLINE bool TryConcatenate(KPhysicalAddress addr, size_t np) {
|
||||||
if (addr != Null<KVirtualAddress> && addr == this->GetEndAddress()) {
|
if (addr != Null<KPhysicalAddress> && addr == this->GetEndAddress()) {
|
||||||
m_num_pages += np;
|
m_num_pages += np;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
private:
|
||||||
|
constexpr ALWAYS_INLINE void SetNext(KBlockInfo *next) {
|
||||||
|
m_next = next;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
static_assert(sizeof(KBlockInfo) <= 0x10);
|
||||||
|
|
||||||
class KPageGroup {
|
class KPageGroup {
|
||||||
public:
|
public:
|
||||||
using BlockInfoList = util::IntrusiveListBaseTraits<KBlockInfo>::ListType;
|
class Iterator {
|
||||||
using iterator = BlockInfoList::const_iterator;
|
public:
|
||||||
|
using iterator_category = std::forward_iterator_tag;
|
||||||
|
using value_type = const KBlockInfo;
|
||||||
|
using difference_type = std::ptrdiff_t;
|
||||||
|
using pointer = value_type *;
|
||||||
|
using reference = value_type &;
|
||||||
private:
|
private:
|
||||||
BlockInfoList m_block_list;
|
pointer m_node;
|
||||||
|
public:
|
||||||
|
constexpr explicit ALWAYS_INLINE Iterator(pointer n) : m_node(n) { /* ... */ }
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE bool operator==(const Iterator &rhs) const { return m_node == rhs.m_node; }
|
||||||
|
constexpr ALWAYS_INLINE bool operator!=(const Iterator &rhs) const { return !(*this == rhs); }
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE pointer operator->() const { return m_node; }
|
||||||
|
constexpr ALWAYS_INLINE reference operator*() const { return *m_node; }
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE Iterator &operator++() {
|
||||||
|
m_node = m_node->GetNext();
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE Iterator operator++(int) {
|
||||||
|
const Iterator it{*this};
|
||||||
|
++(*this);
|
||||||
|
return it;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
private:
|
||||||
|
KBlockInfo *m_first_block;
|
||||||
|
KBlockInfo *m_last_block;
|
||||||
KBlockInfoManager *m_manager;
|
KBlockInfoManager *m_manager;
|
||||||
public:
|
public:
|
||||||
explicit KPageGroup(KBlockInfoManager *m) : m_block_list(), m_manager(m) { /* ... */ }
|
explicit KPageGroup(KBlockInfoManager *m) : m_first_block(), m_last_block(), m_manager(m) { /* ... */ }
|
||||||
~KPageGroup() { this->Finalize(); }
|
~KPageGroup() { this->Finalize(); }
|
||||||
|
|
||||||
void CloseAndReset();
|
void CloseAndReset();
|
||||||
void Finalize();
|
void Finalize();
|
||||||
|
|
||||||
iterator begin() const { return m_block_list.begin(); }
|
ALWAYS_INLINE Iterator begin() const { return Iterator{m_first_block}; }
|
||||||
iterator end() const { return m_block_list.end(); }
|
ALWAYS_INLINE Iterator end() const { return Iterator{nullptr}; }
|
||||||
bool empty() const { return m_block_list.empty(); }
|
ALWAYS_INLINE bool empty() const { return m_first_block == nullptr; }
|
||||||
|
|
||||||
Result AddBlock(KVirtualAddress addr, size_t num_pages);
|
Result AddBlock(KPhysicalAddress addr, size_t num_pages);
|
||||||
void Open() const;
|
void Open() const;
|
||||||
void Close() const;
|
void Close() const;
|
||||||
|
|
||||||
|
@ -101,11 +144,11 @@ namespace ams::kern {
|
||||||
|
|
||||||
bool IsEquivalentTo(const KPageGroup &rhs) const;
|
bool IsEquivalentTo(const KPageGroup &rhs) const;
|
||||||
|
|
||||||
bool operator==(const KPageGroup &rhs) const {
|
ALWAYS_INLINE bool operator==(const KPageGroup &rhs) const {
|
||||||
return this->IsEquivalentTo(rhs);
|
return this->IsEquivalentTo(rhs);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool operator!=(const KPageGroup &rhs) const {
|
ALWAYS_INLINE bool operator!=(const KPageGroup &rhs) const {
|
||||||
return !(*this == rhs);
|
return !(*this == rhs);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
|
@ -54,7 +54,7 @@ namespace ams::kern {
|
||||||
class Block {
|
class Block {
|
||||||
private:
|
private:
|
||||||
KPageBitmap m_bitmap;
|
KPageBitmap m_bitmap;
|
||||||
KVirtualAddress m_heap_address;
|
KPhysicalAddress m_heap_address;
|
||||||
uintptr_t m_end_offset;
|
uintptr_t m_end_offset;
|
||||||
size_t m_block_shift;
|
size_t m_block_shift;
|
||||||
size_t m_next_block_shift;
|
size_t m_next_block_shift;
|
||||||
|
@ -68,13 +68,13 @@ namespace ams::kern {
|
||||||
constexpr size_t GetNumFreeBlocks() const { return m_bitmap.GetNumBits(); }
|
constexpr size_t GetNumFreeBlocks() const { return m_bitmap.GetNumBits(); }
|
||||||
constexpr size_t GetNumFreePages() const { return this->GetNumFreeBlocks() * this->GetNumPages(); }
|
constexpr size_t GetNumFreePages() const { return this->GetNumFreeBlocks() * this->GetNumPages(); }
|
||||||
|
|
||||||
u64 *Initialize(KVirtualAddress addr, size_t size, size_t bs, size_t nbs, u64 *bit_storage) {
|
u64 *Initialize(KPhysicalAddress addr, size_t size, size_t bs, size_t nbs, u64 *bit_storage) {
|
||||||
/* Set shifts. */
|
/* Set shifts. */
|
||||||
m_block_shift = bs;
|
m_block_shift = bs;
|
||||||
m_next_block_shift = nbs;
|
m_next_block_shift = nbs;
|
||||||
|
|
||||||
/* Align up the address. */
|
/* Align up the address. */
|
||||||
KVirtualAddress end = addr + size;
|
KPhysicalAddress end = addr + size;
|
||||||
const size_t align = (m_next_block_shift != 0) ? (u64(1) << m_next_block_shift) : (u64(1) << m_block_shift);
|
const size_t align = (m_next_block_shift != 0) ? (u64(1) << m_next_block_shift) : (u64(1) << m_block_shift);
|
||||||
addr = util::AlignDown(GetInteger(addr), align);
|
addr = util::AlignDown(GetInteger(addr), align);
|
||||||
end = util::AlignUp(GetInteger(end), align);
|
end = util::AlignUp(GetInteger(end), align);
|
||||||
|
@ -84,7 +84,7 @@ namespace ams::kern {
|
||||||
return m_bitmap.Initialize(bit_storage, m_end_offset);
|
return m_bitmap.Initialize(bit_storage, m_end_offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
KVirtualAddress PushBlock(KVirtualAddress address) {
|
KPhysicalAddress PushBlock(KPhysicalAddress address) {
|
||||||
/* Set the bit for the free block. */
|
/* Set the bit for the free block. */
|
||||||
size_t offset = (address - m_heap_address) >> this->GetShift();
|
size_t offset = (address - m_heap_address) >> this->GetShift();
|
||||||
m_bitmap.SetBit(offset);
|
m_bitmap.SetBit(offset);
|
||||||
|
@ -99,14 +99,14 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* We couldn't coalesce, or we're already as big as possible. */
|
/* We couldn't coalesce, or we're already as big as possible. */
|
||||||
return Null<KVirtualAddress>;
|
return Null<KPhysicalAddress>;
|
||||||
}
|
}
|
||||||
|
|
||||||
KVirtualAddress PopBlock(bool random) {
|
KPhysicalAddress PopBlock(bool random) {
|
||||||
/* Find a free block. */
|
/* Find a free block. */
|
||||||
ssize_t soffset = m_bitmap.FindFreeBlock(random);
|
ssize_t soffset = m_bitmap.FindFreeBlock(random);
|
||||||
if (soffset < 0) {
|
if (soffset < 0) {
|
||||||
return Null<KVirtualAddress>;
|
return Null<KPhysicalAddress>;
|
||||||
}
|
}
|
||||||
const size_t offset = static_cast<size_t>(soffset);
|
const size_t offset = static_cast<size_t>(soffset);
|
||||||
|
|
||||||
|
@ -123,27 +123,27 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
private:
|
private:
|
||||||
KVirtualAddress m_heap_address;
|
KPhysicalAddress m_heap_address;
|
||||||
size_t m_heap_size;
|
size_t m_heap_size;
|
||||||
size_t m_initial_used_size;
|
size_t m_initial_used_size;
|
||||||
size_t m_num_blocks;
|
size_t m_num_blocks;
|
||||||
Block m_blocks[NumMemoryBlockPageShifts];
|
Block m_blocks[NumMemoryBlockPageShifts];
|
||||||
private:
|
private:
|
||||||
void Initialize(KVirtualAddress heap_address, size_t heap_size, KVirtualAddress management_address, size_t management_size, const size_t *block_shifts, size_t num_block_shifts);
|
void Initialize(KPhysicalAddress heap_address, size_t heap_size, KVirtualAddress management_address, size_t management_size, const size_t *block_shifts, size_t num_block_shifts);
|
||||||
size_t GetNumFreePages() const;
|
size_t GetNumFreePages() const;
|
||||||
|
|
||||||
void FreeBlock(KVirtualAddress block, s32 index);
|
void FreeBlock(KPhysicalAddress block, s32 index);
|
||||||
public:
|
public:
|
||||||
KPageHeap() : m_heap_address(), m_heap_size(), m_initial_used_size(), m_num_blocks(), m_blocks() { /* ... */ }
|
KPageHeap() : m_heap_address(), m_heap_size(), m_initial_used_size(), m_num_blocks(), m_blocks() { /* ... */ }
|
||||||
|
|
||||||
constexpr KVirtualAddress GetAddress() const { return m_heap_address; }
|
constexpr KPhysicalAddress GetAddress() const { return m_heap_address; }
|
||||||
constexpr size_t GetSize() const { return m_heap_size; }
|
constexpr size_t GetSize() const { return m_heap_size; }
|
||||||
constexpr KVirtualAddress GetEndAddress() const { return this->GetAddress() + this->GetSize(); }
|
constexpr KPhysicalAddress GetEndAddress() const { return this->GetAddress() + this->GetSize(); }
|
||||||
constexpr size_t GetPageOffset(KVirtualAddress block) const { return (block - this->GetAddress()) / PageSize; }
|
constexpr size_t GetPageOffset(KPhysicalAddress block) const { return (block - this->GetAddress()) / PageSize; }
|
||||||
constexpr size_t GetPageOffsetToEnd(KVirtualAddress block) const { return (this->GetEndAddress() - block) / PageSize; }
|
constexpr size_t GetPageOffsetToEnd(KPhysicalAddress block) const { return (this->GetEndAddress() - block) / PageSize; }
|
||||||
|
|
||||||
void Initialize(KVirtualAddress heap_address, size_t heap_size, KVirtualAddress management_address, size_t management_size) {
|
void Initialize(KPhysicalAddress heap_address, size_t heap_size, KVirtualAddress management_address, size_t management_size) {
|
||||||
return Initialize(heap_address, heap_size, management_address, management_size, MemoryBlockPageShifts, NumMemoryBlockPageShifts);
|
return this->Initialize(heap_address, heap_size, management_address, management_size, MemoryBlockPageShifts, NumMemoryBlockPageShifts);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t GetFreeSize() const { return this->GetNumFreePages() * PageSize; }
|
size_t GetFreeSize() const { return this->GetNumFreePages() * PageSize; }
|
||||||
|
@ -158,8 +158,8 @@ namespace ams::kern {
|
||||||
m_initial_used_size = m_heap_size - free_size - reserved_size;
|
m_initial_used_size = m_heap_size - free_size - reserved_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
KVirtualAddress AllocateBlock(s32 index, bool random);
|
KPhysicalAddress AllocateBlock(s32 index, bool random);
|
||||||
void Free(KVirtualAddress addr, size_t num_pages);
|
void Free(KPhysicalAddress addr, size_t num_pages);
|
||||||
private:
|
private:
|
||||||
static size_t CalculateManagementOverheadSize(size_t region_size, const size_t *block_shifts, size_t num_block_shifts);
|
static size_t CalculateManagementOverheadSize(size_t region_size, const size_t *block_shifts, size_t num_block_shifts);
|
||||||
public:
|
public:
|
||||||
|
|
|
@ -57,7 +57,7 @@ namespace ams::kern {
|
||||||
using TraversalContext = KPageTableImpl::TraversalContext;
|
using TraversalContext = KPageTableImpl::TraversalContext;
|
||||||
|
|
||||||
struct MemoryRange {
|
struct MemoryRange {
|
||||||
KVirtualAddress address;
|
KPhysicalAddress address;
|
||||||
size_t size;
|
size_t size;
|
||||||
|
|
||||||
void Close();
|
void Close();
|
||||||
|
@ -178,7 +178,6 @@ namespace ams::kern {
|
||||||
KResourceLimit *m_resource_limit{};
|
KResourceLimit *m_resource_limit{};
|
||||||
const KMemoryRegion *m_cached_physical_linear_region{};
|
const KMemoryRegion *m_cached_physical_linear_region{};
|
||||||
const KMemoryRegion *m_cached_physical_heap_region{};
|
const KMemoryRegion *m_cached_physical_heap_region{};
|
||||||
const KMemoryRegion *m_cached_virtual_heap_region{};
|
|
||||||
MemoryFillValue m_heap_fill_value{};
|
MemoryFillValue m_heap_fill_value{};
|
||||||
MemoryFillValue m_ipc_fill_value{};
|
MemoryFillValue m_ipc_fill_value{};
|
||||||
MemoryFillValue m_stack_fill_value{};
|
MemoryFillValue m_stack_fill_value{};
|
||||||
|
@ -257,18 +256,6 @@ namespace ams::kern {
|
||||||
return KMemoryLayout::IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr);
|
return KMemoryLayout::IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE bool IsHeapVirtualAddress(KVirtualAddress virt_addr) {
|
|
||||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
|
||||||
|
|
||||||
return KMemoryLayout::IsHeapVirtualAddress(m_cached_virtual_heap_region, virt_addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
ALWAYS_INLINE bool IsHeapVirtualAddress(KVirtualAddress virt_addr, size_t size) {
|
|
||||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
|
||||||
|
|
||||||
return KMemoryLayout::IsHeapVirtualAddress(m_cached_virtual_heap_region, virt_addr, size);
|
|
||||||
}
|
|
||||||
|
|
||||||
ALWAYS_INLINE bool ContainsPages(KProcessAddress addr, size_t num_pages) const {
|
ALWAYS_INLINE bool ContainsPages(KProcessAddress addr, size_t num_pages) const {
|
||||||
return (m_address_space_start <= addr) && (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) && (addr + num_pages * PageSize - 1 <= m_address_space_end - 1);
|
return (m_address_space_start <= addr) && (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) && (addr + num_pages * PageSize - 1 <= m_address_space_end - 1);
|
||||||
}
|
}
|
||||||
|
|
|
@ -247,7 +247,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
cur_entry.block_size += next_entry.block_size;
|
cur_entry.block_size += next_entry.block_size;
|
||||||
} else {
|
} else {
|
||||||
if (cur_valid && IsHeapPhysicalAddressForFinalize(cur_entry.phys_addr)) {
|
if (cur_valid && IsHeapPhysicalAddressForFinalize(cur_entry.phys_addr)) {
|
||||||
mm.Close(GetHeapVirtualAddress(cur_entry.phys_addr), cur_entry.block_size / PageSize);
|
mm.Close(cur_entry.phys_addr, cur_entry.block_size / PageSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Update tracking variables. */
|
/* Update tracking variables. */
|
||||||
|
@ -265,7 +265,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
/* Handle the last block. */
|
/* Handle the last block. */
|
||||||
if (cur_valid && IsHeapPhysicalAddressForFinalize(cur_entry.phys_addr)) {
|
if (cur_valid && IsHeapPhysicalAddressForFinalize(cur_entry.phys_addr)) {
|
||||||
mm.Close(GetHeapVirtualAddress(cur_entry.phys_addr), cur_entry.block_size / PageSize);
|
mm.Close(cur_entry.phys_addr, cur_entry.block_size / PageSize);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -696,11 +696,10 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
/* Close the blocks. */
|
/* Close the blocks. */
|
||||||
if (!force && IsHeapPhysicalAddress(next_entry.phys_addr)) {
|
if (!force && IsHeapPhysicalAddress(next_entry.phys_addr)) {
|
||||||
const KVirtualAddress block_virt_addr = GetHeapVirtualAddress(next_entry.phys_addr);
|
|
||||||
const size_t block_num_pages = next_entry.block_size / PageSize;
|
const size_t block_num_pages = next_entry.block_size / PageSize;
|
||||||
if (R_FAILED(pages_to_close.AddBlock(block_virt_addr, block_num_pages))) {
|
if (R_FAILED(pages_to_close.AddBlock(next_entry.phys_addr, block_num_pages))) {
|
||||||
this->NoteUpdated();
|
this->NoteUpdated();
|
||||||
Kernel::GetMemoryManager().Close(block_virt_addr, block_num_pages);
|
Kernel::GetMemoryManager().Close(next_entry.phys_addr, block_num_pages);
|
||||||
pages_to_close.CloseAndReset();
|
pages_to_close.CloseAndReset();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -792,7 +791,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
/* Open references to the pages, if we should. */
|
/* Open references to the pages, if we should. */
|
||||||
if (IsHeapPhysicalAddress(orig_phys_addr)) {
|
if (IsHeapPhysicalAddress(orig_phys_addr)) {
|
||||||
Kernel::GetMemoryManager().Open(GetHeapVirtualAddress(orig_phys_addr), num_pages);
|
Kernel::GetMemoryManager().Open(orig_phys_addr, num_pages);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
|
@ -815,7 +814,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
if (num_pages < ContiguousPageSize / PageSize) {
|
if (num_pages < ContiguousPageSize / PageSize) {
|
||||||
for (const auto &block : pg) {
|
for (const auto &block : pg) {
|
||||||
const KPhysicalAddress block_phys_addr = GetLinearMappedPhysicalAddress(block.GetAddress());
|
const KPhysicalAddress block_phys_addr = block.GetAddress();
|
||||||
const size_t cur_pages = block.GetNumPages();
|
const size_t cur_pages = block.GetNumPages();
|
||||||
R_TRY(this->Map(virt_addr, block_phys_addr, cur_pages, entry_template, disable_head_merge && virt_addr == orig_virt_addr, L3BlockSize, page_list, reuse_ll));
|
R_TRY(this->Map(virt_addr, block_phys_addr, cur_pages, entry_template, disable_head_merge && virt_addr == orig_virt_addr, L3BlockSize, page_list, reuse_ll));
|
||||||
|
|
||||||
|
@ -827,7 +826,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
AlignedMemoryBlock virt_block(GetInteger(virt_addr), num_pages, L1BlockSize);
|
AlignedMemoryBlock virt_block(GetInteger(virt_addr), num_pages, L1BlockSize);
|
||||||
for (const auto &block : pg) {
|
for (const auto &block : pg) {
|
||||||
/* Create a block representing this physical group, synchronize its alignment to our virtual block. */
|
/* Create a block representing this physical group, synchronize its alignment to our virtual block. */
|
||||||
const KPhysicalAddress block_phys_addr = GetLinearMappedPhysicalAddress(block.GetAddress());
|
const KPhysicalAddress block_phys_addr = block.GetAddress();
|
||||||
size_t cur_pages = block.GetNumPages();
|
size_t cur_pages = block.GetNumPages();
|
||||||
|
|
||||||
AlignedMemoryBlock phys_block(GetInteger(block_phys_addr), cur_pages, virt_block.GetAlignment());
|
AlignedMemoryBlock phys_block(GetInteger(block_phys_addr), cur_pages, virt_block.GetAlignment());
|
||||||
|
|
|
@ -1051,7 +1051,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
SmmuSynchronizationBarrier();
|
SmmuSynchronizationBarrier();
|
||||||
|
|
||||||
/* Open references to the pages. */
|
/* Open references to the pages. */
|
||||||
mm.Open(GetHeapVirtualAddress(phys_addr), DeviceLargePageSize / PageSize);
|
mm.Open(phys_addr, DeviceLargePageSize / PageSize);
|
||||||
|
|
||||||
/* Advance. */
|
/* Advance. */
|
||||||
phys_addr += DeviceLargePageSize;
|
phys_addr += DeviceLargePageSize;
|
||||||
|
@ -1112,7 +1112,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
SmmuSynchronizationBarrier();
|
SmmuSynchronizationBarrier();
|
||||||
|
|
||||||
/* Open references to the pages. */
|
/* Open references to the pages. */
|
||||||
mm.Open(GetHeapVirtualAddress(phys_addr), (map_count * DevicePageSize) / PageSize);
|
mm.Open(phys_addr, (map_count * DevicePageSize) / PageSize);
|
||||||
|
|
||||||
/* Advance. */
|
/* Advance. */
|
||||||
phys_addr += map_count * DevicePageSize;
|
phys_addr += map_count * DevicePageSize;
|
||||||
|
@ -1151,7 +1151,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
|
|
||||||
/* Map the device page. */
|
/* Map the device page. */
|
||||||
size_t mapped_size = 0;
|
size_t mapped_size = 0;
|
||||||
R_TRY(this->MapDevicePage(std::addressof(mapped_size), num_pt, max_pt, GetHeapPhysicalAddress(contig_range.address), contig_range.size, cur_addr, device_perm));
|
R_TRY(this->MapDevicePage(std::addressof(mapped_size), num_pt, max_pt, contig_range.address, contig_range.size, cur_addr, device_perm));
|
||||||
|
|
||||||
/* Advance. */
|
/* Advance. */
|
||||||
cur_addr += contig_range.size;
|
cur_addr += contig_range.size;
|
||||||
|
@ -1245,7 +1245,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
contig_count = contig_phys_addr != Null<KPhysicalAddress> ? 1 : 0;
|
contig_count = contig_phys_addr != Null<KPhysicalAddress> ? 1 : 0;
|
||||||
} else if (phys_addr == Null<KPhysicalAddress> || phys_addr != (contig_phys_addr + (contig_count * DevicePageSize))) {
|
} else if (phys_addr == Null<KPhysicalAddress> || phys_addr != (contig_phys_addr + (contig_count * DevicePageSize))) {
|
||||||
/* If we're no longer contiguous, close the range we've been building. */
|
/* If we're no longer contiguous, close the range we've been building. */
|
||||||
mm.Close(GetHeapVirtualAddress(contig_phys_addr), (contig_count * DevicePageSize) / PageSize);
|
mm.Close(contig_phys_addr, (contig_count * DevicePageSize) / PageSize);
|
||||||
|
|
||||||
contig_phys_addr = phys_addr;
|
contig_phys_addr = phys_addr;
|
||||||
contig_count = contig_phys_addr != Null<KPhysicalAddress> ? 1 : 0;
|
contig_count = contig_phys_addr != Null<KPhysicalAddress> ? 1 : 0;
|
||||||
|
@ -1255,7 +1255,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (contig_count > 0) {
|
if (contig_count > 0) {
|
||||||
mm.Close(GetHeapVirtualAddress(contig_phys_addr), (contig_count * DevicePageSize) / PageSize);
|
mm.Close(contig_phys_addr, (contig_count * DevicePageSize) / PageSize);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1294,7 +1294,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
SmmuSynchronizationBarrier();
|
SmmuSynchronizationBarrier();
|
||||||
|
|
||||||
/* Close references. */
|
/* Close references. */
|
||||||
mm.Close(GetHeapVirtualAddress(phys_addr), DeviceLargePageSize / PageSize);
|
mm.Close(phys_addr, DeviceLargePageSize / PageSize);
|
||||||
|
|
||||||
/* Advance. */
|
/* Advance. */
|
||||||
address += DeviceLargePageSize;
|
address += DeviceLargePageSize;
|
||||||
|
@ -1320,7 +1320,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
/* Walk the directory. */
|
/* Walk the directory. */
|
||||||
KProcessAddress cur_process_address = process_address;
|
KProcessAddress cur_process_address = process_address;
|
||||||
size_t remaining_size = size;
|
size_t remaining_size = size;
|
||||||
KPhysicalAddress cur_phys_address = GetHeapPhysicalAddress(contig_range.address);
|
KPhysicalAddress cur_phys_address = contig_range.address;
|
||||||
size_t remaining_in_range = contig_range.size;
|
size_t remaining_in_range = contig_range.size;
|
||||||
bool first = true;
|
bool first = true;
|
||||||
u32 first_attr = 0;
|
u32 first_attr = 0;
|
||||||
|
@ -1367,7 +1367,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
}
|
}
|
||||||
range_open = true;
|
range_open = true;
|
||||||
|
|
||||||
cur_phys_address = GetHeapPhysicalAddress(contig_range.address);
|
cur_phys_address = contig_range.address;
|
||||||
remaining_in_range = contig_range.size;
|
remaining_in_range = contig_range.size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1410,7 +1410,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
}
|
}
|
||||||
range_open = true;
|
range_open = true;
|
||||||
|
|
||||||
cur_phys_address = GetHeapPhysicalAddress(contig_range.address);
|
cur_phys_address = contig_range.address;
|
||||||
remaining_in_range = contig_range.size;
|
remaining_in_range = contig_range.size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -511,8 +511,10 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, SecureAppletMemorySize));
|
MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, SecureAppletMemorySize));
|
||||||
|
|
||||||
constexpr auto SecureAppletAllocateOption = KMemoryManager::EncodeOption(KMemoryManager::Pool_System, KMemoryManager::Direction_FromFront);
|
constexpr auto SecureAppletAllocateOption = KMemoryManager::EncodeOption(KMemoryManager::Pool_System, KMemoryManager::Direction_FromFront);
|
||||||
g_secure_applet_memory_address = Kernel::GetMemoryManager().AllocateAndOpenContinuous(SecureAppletMemorySize / PageSize, 1, SecureAppletAllocateOption);
|
const KPhysicalAddress secure_applet_memory_phys_addr = Kernel::GetMemoryManager().AllocateAndOpenContinuous(SecureAppletMemorySize / PageSize, 1, SecureAppletAllocateOption);
|
||||||
MESOSPHERE_ABORT_UNLESS(g_secure_applet_memory_address != Null<KVirtualAddress>);
|
MESOSPHERE_ABORT_UNLESS(secure_applet_memory_phys_addr != Null<KPhysicalAddress>);
|
||||||
|
|
||||||
|
g_secure_applet_memory_address = KMemoryLayout::GetLinearVirtualAddress(secure_applet_memory_phys_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Initialize KTrace. */
|
/* Initialize KTrace. */
|
||||||
|
@ -690,9 +692,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
MESOSPHERE_ASSERT(it != page_groups[i].end());
|
MESOSPHERE_ASSERT(it != page_groups[i].end());
|
||||||
MESOSPHERE_ASSERT(it->GetNumPages() == 1);
|
MESOSPHERE_ASSERT(it->GetNumPages() == 1);
|
||||||
|
|
||||||
KPhysicalAddress phys_addr = page_table.GetHeapPhysicalAddress(it->GetAddress());
|
args->r[reg_id] = GetInteger(it->GetAddress()) | (GetInteger(virt_addr) & (PageSize - 1));
|
||||||
|
|
||||||
args->r[reg_id] = GetInteger(phys_addr) | (GetInteger(virt_addr) & (PageSize - 1));
|
|
||||||
} else {
|
} else {
|
||||||
/* If we couldn't map, we should clear the address. */
|
/* If we couldn't map, we should clear the address. */
|
||||||
args->r[reg_id] = 0;
|
args->r[reg_id] = 0;
|
||||||
|
@ -729,25 +729,21 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
|
|
||||||
/* Allocate the memory. */
|
/* Allocate the memory. */
|
||||||
const size_t num_pages = size / PageSize;
|
const size_t num_pages = size / PageSize;
|
||||||
const KVirtualAddress vaddr = Kernel::GetMemoryManager().AllocateAndOpenContinuous(num_pages, alignment / PageSize, KMemoryManager::EncodeOption(static_cast<KMemoryManager::Pool>(pool), KMemoryManager::Direction_FromFront));
|
const KPhysicalAddress paddr = Kernel::GetMemoryManager().AllocateAndOpenContinuous(num_pages, alignment / PageSize, KMemoryManager::EncodeOption(static_cast<KMemoryManager::Pool>(pool), KMemoryManager::Direction_FromFront));
|
||||||
R_UNLESS(vaddr != Null<KVirtualAddress>, svc::ResultOutOfMemory());
|
R_UNLESS(paddr != Null<KPhysicalAddress>, svc::ResultOutOfMemory());
|
||||||
|
|
||||||
/* Ensure we don't leak references to the memory on error. */
|
/* Ensure we don't leak references to the memory on error. */
|
||||||
auto mem_guard = SCOPE_GUARD { Kernel::GetMemoryManager().Close(vaddr, num_pages); };
|
auto mem_guard = SCOPE_GUARD { Kernel::GetMemoryManager().Close(paddr, num_pages); };
|
||||||
|
|
||||||
/* If the memory isn't already secure, set it as secure. */
|
/* If the memory isn't already secure, set it as secure. */
|
||||||
if (pool != KMemoryManager::Pool_System) {
|
if (pool != KMemoryManager::Pool_System) {
|
||||||
/* Get the physical address. */
|
|
||||||
const KPhysicalAddress paddr = KPageTable::GetHeapPhysicalAddress(vaddr);
|
|
||||||
MESOSPHERE_ABORT_UNLESS(paddr != Null<KPhysicalAddress>);
|
|
||||||
|
|
||||||
/* Set the secure region. */
|
/* Set the secure region. */
|
||||||
R_UNLESS(SetSecureRegion(paddr, size), svc::ResultOutOfMemory());
|
R_UNLESS(SetSecureRegion(paddr, size), svc::ResultOutOfMemory());
|
||||||
}
|
}
|
||||||
|
|
||||||
/* We succeeded. */
|
/* We succeeded. */
|
||||||
mem_guard.Cancel();
|
mem_guard.Cancel();
|
||||||
*out = vaddr;
|
*out = KPageTable::GetHeapVirtualAddress(paddr);
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -779,7 +775,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Close the secure region's pages. */
|
/* Close the secure region's pages. */
|
||||||
Kernel::GetMemoryManager().Close(address, size / PageSize);
|
Kernel::GetMemoryManager().Close(KPageTable::GetHeapPhysicalAddress(address), size / PageSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
|
@ -165,11 +165,11 @@ namespace ams::kern::init {
|
||||||
|
|
||||||
/* Allocate memory for the slab. */
|
/* Allocate memory for the slab. */
|
||||||
constexpr auto AllocateOption = KMemoryManager::EncodeOption(KMemoryManager::Pool_System, KMemoryManager::Direction_FromFront);
|
constexpr auto AllocateOption = KMemoryManager::EncodeOption(KMemoryManager::Pool_System, KMemoryManager::Direction_FromFront);
|
||||||
const KVirtualAddress slab_address = Kernel::GetMemoryManager().AllocateAndOpenContinuous(num_pages, 1, AllocateOption);
|
const KPhysicalAddress slab_address = Kernel::GetMemoryManager().AllocateAndOpenContinuous(num_pages, 1, AllocateOption);
|
||||||
MESOSPHERE_ABORT_UNLESS(slab_address != Null<KVirtualAddress>);
|
MESOSPHERE_ABORT_UNLESS(slab_address != Null<KPhysicalAddress>);
|
||||||
|
|
||||||
/* Initialize the slabheap. */
|
/* Initialize the slabheap. */
|
||||||
KPageBuffer::InitializeSlabHeap(GetVoidPointer(slab_address), slab_size);
|
KPageBuffer::InitializeSlabHeap(GetVoidPointer(KMemoryLayout::GetLinearVirtualAddress(slab_address)), slab_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void InitializeSlabHeaps() {
|
void InitializeSlabHeaps() {
|
||||||
|
|
|
@ -101,7 +101,7 @@ namespace ams::kern {
|
||||||
/* If we crossed a page boundary, free the pages we're done using. */
|
/* If we crossed a page boundary, free the pages we're done using. */
|
||||||
if (KVirtualAddress aligned_current = util::AlignDown(GetInteger(current), PageSize); aligned_current != data) {
|
if (KVirtualAddress aligned_current = util::AlignDown(GetInteger(current), PageSize); aligned_current != data) {
|
||||||
const size_t freed_size = data - aligned_current;
|
const size_t freed_size = data - aligned_current;
|
||||||
Kernel::GetMemoryManager().Close(aligned_current, freed_size / PageSize);
|
Kernel::GetMemoryManager().Close(KMemoryLayout::GetLinearPhysicalAddress(aligned_current), freed_size / PageSize);
|
||||||
Kernel::GetSystemResourceLimit().Release(ams::svc::LimitableResource_PhysicalMemoryMax, freed_size);
|
Kernel::GetSystemResourceLimit().Release(ams::svc::LimitableResource_PhysicalMemoryMax, freed_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -114,7 +114,7 @@ namespace ams::kern {
|
||||||
const size_t binary_pages = binary_size / PageSize;
|
const size_t binary_pages = binary_size / PageSize;
|
||||||
|
|
||||||
/* Get the pool for both the current (compressed) image, and the decompressed process. */
|
/* Get the pool for both the current (compressed) image, and the decompressed process. */
|
||||||
const auto src_pool = Kernel::GetMemoryManager().GetPool(data);
|
const auto src_pool = Kernel::GetMemoryManager().GetPool(KMemoryLayout::GetLinearPhysicalAddress(data));
|
||||||
const auto dst_pool = reader.UsesSecureMemory() ? secure_pool : unsafe_pool;
|
const auto dst_pool = reader.UsesSecureMemory() ? secure_pool : unsafe_pool;
|
||||||
|
|
||||||
/* Determine the process size, and how much memory isn't already reserved. */
|
/* Determine the process size, and how much memory isn't already reserved. */
|
||||||
|
@ -140,7 +140,7 @@ namespace ams::kern {
|
||||||
/* Add the previously reserved pages. */
|
/* Add the previously reserved pages. */
|
||||||
if (src_pool == dst_pool && binary_pages != 0) {
|
if (src_pool == dst_pool && binary_pages != 0) {
|
||||||
/* NOTE: Nintendo does not check the result of this operation. */
|
/* NOTE: Nintendo does not check the result of this operation. */
|
||||||
pg.AddBlock(data, binary_pages);
|
pg.AddBlock(KMemoryLayout::GetLinearPhysicalAddress(data), binary_pages);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Add the previously unreserved pages. */
|
/* Add the previously unreserved pages. */
|
||||||
|
@ -176,7 +176,7 @@ namespace ams::kern {
|
||||||
} else {
|
} else {
|
||||||
if (src_pool != dst_pool) {
|
if (src_pool != dst_pool) {
|
||||||
std::memcpy(GetVoidPointer(temp_address + process_size - binary_size), GetVoidPointer(data), aligned_size);
|
std::memcpy(GetVoidPointer(temp_address + process_size - binary_size), GetVoidPointer(data), aligned_size);
|
||||||
Kernel::GetMemoryManager().Close(data, aligned_size / PageSize);
|
Kernel::GetMemoryManager().Close(KMemoryLayout::GetLinearPhysicalAddress(data), aligned_size / PageSize);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -218,7 +218,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
const size_t cur_pages = std::min(block_remaining, work_remaining);
|
const size_t cur_pages = std::min(block_remaining, work_remaining);
|
||||||
const size_t cur_size = cur_pages * PageSize;
|
const size_t cur_size = cur_pages * PageSize;
|
||||||
std::memcpy(GetVoidPointer(block_address), GetVoidPointer(work_address), cur_size);
|
std::memcpy(GetVoidPointer(KMemoryLayout::GetLinearVirtualAddress(block_address)), GetVoidPointer(KMemoryLayout::GetLinearVirtualAddress(work_address)), cur_size);
|
||||||
|
|
||||||
block_address += cur_size;
|
block_address += cur_size;
|
||||||
work_address += cur_size;
|
work_address += cur_size;
|
||||||
|
@ -268,7 +268,7 @@ namespace ams::kern {
|
||||||
{
|
{
|
||||||
const size_t remaining_size = util::AlignUp(GetInteger(g_initial_process_binary_address) + g_initial_process_binary_header.size, PageSize) - util::AlignDown(GetInteger(current), PageSize);
|
const size_t remaining_size = util::AlignUp(GetInteger(g_initial_process_binary_address) + g_initial_process_binary_header.size, PageSize) - util::AlignDown(GetInteger(current), PageSize);
|
||||||
const size_t remaining_pages = remaining_size / PageSize;
|
const size_t remaining_pages = remaining_size / PageSize;
|
||||||
Kernel::GetMemoryManager().Close(util::AlignDown(GetInteger(current), PageSize), remaining_pages);
|
Kernel::GetMemoryManager().Close(KMemoryLayout::GetLinearPhysicalAddress(util::AlignDown(GetInteger(current), PageSize)), remaining_pages);
|
||||||
Kernel::GetSystemResourceLimit().Release(ams::svc::LimitableResource_PhysicalMemoryMax, remaining_size);
|
Kernel::GetSystemResourceLimit().Release(ams::svc::LimitableResource_PhysicalMemoryMax, remaining_size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -312,7 +312,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
/* The initial process binary is potentially over-allocated, so free any extra pages. */
|
/* The initial process binary is potentially over-allocated, so free any extra pages. */
|
||||||
if (total_size < InitialProcessBinarySizeMax) {
|
if (total_size < InitialProcessBinarySizeMax) {
|
||||||
Kernel::GetMemoryManager().Close(g_initial_process_binary_address + total_size, (InitialProcessBinarySizeMax - total_size) / PageSize);
|
Kernel::GetMemoryManager().Close(KMemoryLayout::GetLinearPhysicalAddress(g_initial_process_binary_address + total_size), (InitialProcessBinarySizeMax - total_size) / PageSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
return total_size;
|
return total_size;
|
||||||
|
|
|
@ -35,8 +35,9 @@ namespace ams::kern {
|
||||||
/* Clear the memory. */
|
/* Clear the memory. */
|
||||||
for (const auto &block : GetReference(m_page_group)) {
|
for (const auto &block : GetReference(m_page_group)) {
|
||||||
/* Clear and store cache. */
|
/* Clear and store cache. */
|
||||||
std::memset(GetVoidPointer(block.GetAddress()), 0xFF, block.GetSize());
|
void * const block_address = GetVoidPointer(KMemoryLayout::GetLinearVirtualAddress(block.GetAddress()));
|
||||||
cpu::StoreDataCache(GetVoidPointer(block.GetAddress()), block.GetSize());
|
std::memset(block_address, 0xFF, block.GetSize());
|
||||||
|
cpu::StoreDataCache(block_address, block.GetSize());
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Set remaining tracking members. */
|
/* Set remaining tracking members. */
|
||||||
|
|
|
@ -20,13 +20,13 @@ namespace ams::kern {
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
constexpr KMemoryManager::Pool GetPoolFromMemoryRegionType(u32 type) {
|
constexpr KMemoryManager::Pool GetPoolFromMemoryRegionType(u32 type) {
|
||||||
if ((type | KMemoryRegionType_VirtualDramApplicationPool) == type) {
|
if ((type | KMemoryRegionType_DramApplicationPool) == type) {
|
||||||
return KMemoryManager::Pool_Application;
|
return KMemoryManager::Pool_Application;
|
||||||
} else if ((type | KMemoryRegionType_VirtualDramAppletPool) == type) {
|
} else if ((type | KMemoryRegionType_DramAppletPool) == type) {
|
||||||
return KMemoryManager::Pool_Applet;
|
return KMemoryManager::Pool_Applet;
|
||||||
} else if ((type | KMemoryRegionType_VirtualDramSystemPool) == type) {
|
} else if ((type | KMemoryRegionType_DramSystemPool) == type) {
|
||||||
return KMemoryManager::Pool_System;
|
return KMemoryManager::Pool_System;
|
||||||
} else if ((type | KMemoryRegionType_VirtualDramSystemNonSecurePool) == type) {
|
} else if ((type | KMemoryRegionType_DramSystemNonSecurePool) == type) {
|
||||||
return KMemoryManager::Pool_SystemNonSecure;
|
return KMemoryManager::Pool_SystemNonSecure;
|
||||||
} else {
|
} else {
|
||||||
MESOSPHERE_PANIC("InvalidMemoryRegionType for conversion to Pool");
|
MESOSPHERE_PANIC("InvalidMemoryRegionType for conversion to Pool");
|
||||||
|
@ -37,19 +37,21 @@ namespace ams::kern {
|
||||||
|
|
||||||
void KMemoryManager::Initialize(KVirtualAddress management_region, size_t management_region_size) {
|
void KMemoryManager::Initialize(KVirtualAddress management_region, size_t management_region_size) {
|
||||||
/* Clear the management region to zero. */
|
/* Clear the management region to zero. */
|
||||||
|
|
||||||
const KVirtualAddress management_region_end = management_region + management_region_size;
|
const KVirtualAddress management_region_end = management_region + management_region_size;
|
||||||
std::memset(GetVoidPointer(management_region), 0, management_region_size);
|
std::memset(GetVoidPointer(management_region), 0, management_region_size);
|
||||||
|
|
||||||
|
/* Reset our manager count. */
|
||||||
|
m_num_managers = 0;
|
||||||
|
|
||||||
/* Traverse the virtual memory layout tree, initializing each manager as appropriate. */
|
/* Traverse the virtual memory layout tree, initializing each manager as appropriate. */
|
||||||
while (m_num_managers != MaxManagerCount) {
|
while (m_num_managers != MaxManagerCount) {
|
||||||
/* Locate the region that should initialize the current manager. */
|
/* Locate the region that should initialize the current manager. */
|
||||||
uintptr_t region_address = 0;
|
KPhysicalAddress region_address = Null<KPhysicalAddress>;
|
||||||
size_t region_size = 0;
|
size_t region_size = 0;
|
||||||
Pool region_pool = Pool_Count;
|
Pool region_pool = Pool_Count;
|
||||||
for (const auto &it : KMemoryLayout::GetVirtualMemoryRegionTree()) {
|
for (const auto &it : KMemoryLayout::GetPhysicalMemoryRegionTree()) {
|
||||||
/* We only care about regions that we need to create managers for. */
|
/* We only care about regions that we need to create managers for. */
|
||||||
if (!it.IsDerivedFrom(KMemoryRegionType_VirtualDramUserPool)) {
|
if (!it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -58,21 +60,24 @@ namespace ams::kern {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const KPhysicalAddress cur_start = it.GetAddress();
|
||||||
|
const KPhysicalAddress cur_end = it.GetEndAddress();
|
||||||
|
|
||||||
/* Validate the region. */
|
/* Validate the region. */
|
||||||
MESOSPHERE_ABORT_UNLESS(it.GetEndAddress() != 0);
|
MESOSPHERE_ABORT_UNLESS(cur_end != Null<KPhysicalAddress>);
|
||||||
MESOSPHERE_ASSERT(it.GetAddress() != Null<decltype(it.GetAddress())>);
|
MESOSPHERE_ASSERT(cur_start != Null<KPhysicalAddress>);
|
||||||
MESOSPHERE_ASSERT(it.GetSize() > 0);
|
MESOSPHERE_ASSERT(it.GetSize() > 0);
|
||||||
|
|
||||||
/* Update the region's extents. */
|
/* Update the region's extents. */
|
||||||
if (region_address == 0) {
|
if (region_address == Null<KPhysicalAddress>) {
|
||||||
region_address = it.GetAddress();
|
region_address = cur_start;
|
||||||
region_size = it.GetSize();
|
region_size = it.GetSize();
|
||||||
region_pool = GetPoolFromMemoryRegionType(it.GetType());
|
region_pool = GetPoolFromMemoryRegionType(it.GetType());
|
||||||
} else {
|
} else {
|
||||||
MESOSPHERE_ASSERT(it.GetAddress() == region_address + region_size);
|
MESOSPHERE_ASSERT(cur_start == region_address + region_size);
|
||||||
|
|
||||||
/* Update the size. */
|
/* Update the size. */
|
||||||
region_size = it.GetEndAddress() - region_address;
|
region_size = cur_end - region_address;
|
||||||
MESOSPHERE_ABORT_UNLESS(GetPoolFromMemoryRegionType(it.GetType()) == region_pool);
|
MESOSPHERE_ABORT_UNLESS(GetPoolFromMemoryRegionType(it.GetType()) == region_pool);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -102,18 +107,22 @@ namespace ams::kern {
|
||||||
|
|
||||||
/* Free each region to its corresponding heap. */
|
/* Free each region to its corresponding heap. */
|
||||||
size_t reserved_sizes[MaxManagerCount] = {};
|
size_t reserved_sizes[MaxManagerCount] = {};
|
||||||
const uintptr_t ini_start = GetInteger(GetInitialProcessBinaryAddress());
|
const KPhysicalAddress ini_start = KMemoryLayout::GetLinearPhysicalAddress(GetInitialProcessBinaryAddress());
|
||||||
const uintptr_t ini_end = ini_start + InitialProcessBinarySizeMax;
|
const KPhysicalAddress ini_end = ini_start + InitialProcessBinarySizeMax;
|
||||||
const uintptr_t ini_last = ini_end - 1;
|
const KPhysicalAddress ini_last = ini_end - 1;
|
||||||
for (const auto &it : KMemoryLayout::GetVirtualMemoryRegionTree()) {
|
for (const auto &it : KMemoryLayout::GetPhysicalMemoryRegionTree()) {
|
||||||
if (it.IsDerivedFrom(KMemoryRegionType_VirtualDramUserPool)) {
|
if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {
|
||||||
/* Get the manager for the region. */
|
/* Get the manager for the region. */
|
||||||
auto &manager = m_managers[it.GetAttributes()];
|
auto &manager = m_managers[it.GetAttributes()];
|
||||||
|
|
||||||
if (it.GetAddress() <= ini_start && ini_last <= it.GetLastAddress()) {
|
const KPhysicalAddress cur_start = it.GetAddress();
|
||||||
|
const KPhysicalAddress cur_last = it.GetLastAddress();
|
||||||
|
const KPhysicalAddress cur_end = it.GetEndAddress();
|
||||||
|
|
||||||
|
if (cur_start <= ini_start && ini_last <= cur_last) {
|
||||||
/* Free memory before the ini to the heap. */
|
/* Free memory before the ini to the heap. */
|
||||||
if (it.GetAddress() != ini_start) {
|
if (cur_start != ini_start) {
|
||||||
manager.Free(it.GetAddress(), (ini_start - it.GetAddress()) / PageSize);
|
manager.Free(cur_start, (ini_start - cur_start) / PageSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Open/reserve the ini memory. */
|
/* Open/reserve the ini memory. */
|
||||||
|
@ -121,21 +130,21 @@ namespace ams::kern {
|
||||||
reserved_sizes[it.GetAttributes()] += InitialProcessBinarySizeMax;
|
reserved_sizes[it.GetAttributes()] += InitialProcessBinarySizeMax;
|
||||||
|
|
||||||
/* Free memory after the ini to the heap. */
|
/* Free memory after the ini to the heap. */
|
||||||
if (ini_last != it.GetLastAddress()) {
|
if (ini_last != cur_last) {
|
||||||
MESOSPHERE_ABORT_UNLESS(it.GetEndAddress() != 0);
|
MESOSPHERE_ABORT_UNLESS(cur_end != Null<KPhysicalAddress>);
|
||||||
manager.Free(ini_end, it.GetEndAddress() - ini_end);
|
manager.Free(ini_end, cur_end - ini_end);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* Ensure there's no partial overlap with the ini image. */
|
/* Ensure there's no partial overlap with the ini image. */
|
||||||
if (it.GetAddress() <= ini_last) {
|
if (cur_start <= ini_last) {
|
||||||
MESOSPHERE_ABORT_UNLESS(it.GetLastAddress() < ini_start);
|
MESOSPHERE_ABORT_UNLESS(cur_last < ini_start);
|
||||||
} else {
|
} else {
|
||||||
/* Otherwise, check the region for general validity. */
|
/* Otherwise, check the region for general validity. */
|
||||||
MESOSPHERE_ABORT_UNLESS(it.GetEndAddress() != 0);
|
MESOSPHERE_ABORT_UNLESS(cur_end != Null<KPhysicalAddress>);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Free the memory to the heap. */
|
/* Free the memory to the heap. */
|
||||||
manager.Free(it.GetAddress(), it.GetSize() / PageSize);
|
manager.Free(cur_start, it.GetSize() / PageSize);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -176,10 +185,10 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
KVirtualAddress KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option) {
|
KPhysicalAddress KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option) {
|
||||||
/* Early return if we're allocating no pages. */
|
/* Early return if we're allocating no pages. */
|
||||||
if (num_pages == 0) {
|
if (num_pages == 0) {
|
||||||
return Null<KVirtualAddress>;
|
return Null<KPhysicalAddress>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Lock the pool that we're allocating from. */
|
/* Lock the pool that we're allocating from. */
|
||||||
|
@ -191,17 +200,17 @@ namespace ams::kern {
|
||||||
|
|
||||||
/* Loop, trying to iterate from each block. */
|
/* Loop, trying to iterate from each block. */
|
||||||
Impl *chosen_manager = nullptr;
|
Impl *chosen_manager = nullptr;
|
||||||
KVirtualAddress allocated_block = Null<KVirtualAddress>;
|
KPhysicalAddress allocated_block = Null<KPhysicalAddress>;
|
||||||
for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr; chosen_manager = this->GetNextManager(chosen_manager, dir)) {
|
for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr; chosen_manager = this->GetNextManager(chosen_manager, dir)) {
|
||||||
allocated_block = chosen_manager->AllocateBlock(heap_index, true);
|
allocated_block = chosen_manager->AllocateBlock(heap_index, true);
|
||||||
if (allocated_block != Null<KVirtualAddress>) {
|
if (allocated_block != Null<KPhysicalAddress>) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If we failed to allocate, quit now. */
|
/* If we failed to allocate, quit now. */
|
||||||
if (allocated_block == Null<KVirtualAddress>) {
|
if (allocated_block == Null<KPhysicalAddress>) {
|
||||||
return Null<KVirtualAddress>;
|
return Null<KPhysicalAddress>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If we allocated more than we need, free some. */
|
/* If we allocated more than we need, free some. */
|
||||||
|
@ -242,8 +251,8 @@ namespace ams::kern {
|
||||||
for (Impl *cur_manager = this->GetFirstManager(pool, dir); cur_manager != nullptr; cur_manager = this->GetNextManager(cur_manager, dir)) {
|
for (Impl *cur_manager = this->GetFirstManager(pool, dir); cur_manager != nullptr; cur_manager = this->GetNextManager(cur_manager, dir)) {
|
||||||
while (num_pages >= pages_per_alloc) {
|
while (num_pages >= pages_per_alloc) {
|
||||||
/* Allocate a block. */
|
/* Allocate a block. */
|
||||||
KVirtualAddress allocated_block = cur_manager->AllocateBlock(index, random);
|
KPhysicalAddress allocated_block = cur_manager->AllocateBlock(index, random);
|
||||||
if (allocated_block == Null<KVirtualAddress>) {
|
if (allocated_block == Null<KPhysicalAddress>) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -288,7 +297,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
/* Open the first reference to the pages. */
|
/* Open the first reference to the pages. */
|
||||||
for (const auto &block : *out) {
|
for (const auto &block : *out) {
|
||||||
KVirtualAddress cur_address = block.GetAddress();
|
KPhysicalAddress cur_address = block.GetAddress();
|
||||||
size_t remaining_pages = block.GetNumPages();
|
size_t remaining_pages = block.GetNumPages();
|
||||||
while (remaining_pages > 0) {
|
while (remaining_pages > 0) {
|
||||||
/* Get the manager for the current address. */
|
/* Get the manager for the current address. */
|
||||||
|
@ -332,7 +341,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
/* Open the first reference to the pages. */
|
/* Open the first reference to the pages. */
|
||||||
for (const auto &block : *out) {
|
for (const auto &block : *out) {
|
||||||
KVirtualAddress cur_address = block.GetAddress();
|
KPhysicalAddress cur_address = block.GetAddress();
|
||||||
size_t remaining_pages = block.GetNumPages();
|
size_t remaining_pages = block.GetNumPages();
|
||||||
while (remaining_pages > 0) {
|
while (remaining_pages > 0) {
|
||||||
/* Get the manager for the current address. */
|
/* Get the manager for the current address. */
|
||||||
|
@ -354,7 +363,7 @@ namespace ams::kern {
|
||||||
/* Iterate over the allocated blocks. */
|
/* Iterate over the allocated blocks. */
|
||||||
for (const auto &block : *out) {
|
for (const auto &block : *out) {
|
||||||
/* Get the block extents. */
|
/* Get the block extents. */
|
||||||
const KVirtualAddress block_address = block.GetAddress();
|
const KPhysicalAddress block_address = block.GetAddress();
|
||||||
const size_t block_pages = block.GetNumPages();
|
const size_t block_pages = block.GetNumPages();
|
||||||
|
|
||||||
/* If it has no pages, we don't need to do anything. */
|
/* If it has no pages, we don't need to do anything. */
|
||||||
|
@ -365,7 +374,7 @@ namespace ams::kern {
|
||||||
/* Fill all the pages that we need to fill. */
|
/* Fill all the pages that we need to fill. */
|
||||||
bool any_new = false;
|
bool any_new = false;
|
||||||
{
|
{
|
||||||
KVirtualAddress cur_address = block_address;
|
KPhysicalAddress cur_address = block_address;
|
||||||
size_t remaining_pages = block_pages;
|
size_t remaining_pages = block_pages;
|
||||||
while (remaining_pages > 0) {
|
while (remaining_pages > 0) {
|
||||||
/* Get the manager for the current address. */
|
/* Get the manager for the current address. */
|
||||||
|
@ -384,7 +393,7 @@ namespace ams::kern {
|
||||||
/* If there are new pages, update tracking for the allocation. */
|
/* If there are new pages, update tracking for the allocation. */
|
||||||
if (any_new) {
|
if (any_new) {
|
||||||
/* Update tracking for the allocation. */
|
/* Update tracking for the allocation. */
|
||||||
KVirtualAddress cur_address = block_address;
|
KPhysicalAddress cur_address = block_address;
|
||||||
size_t remaining_pages = block_pages;
|
size_t remaining_pages = block_pages;
|
||||||
while (remaining_pages > 0) {
|
while (remaining_pages > 0) {
|
||||||
/* Get the manager for the current address. */
|
/* Get the manager for the current address. */
|
||||||
|
@ -406,14 +415,14 @@ namespace ams::kern {
|
||||||
} else {
|
} else {
|
||||||
/* Set all the allocated memory. */
|
/* Set all the allocated memory. */
|
||||||
for (const auto &block : *out) {
|
for (const auto &block : *out) {
|
||||||
std::memset(GetVoidPointer(block.GetAddress()), fill_pattern, block.GetSize());
|
std::memset(GetVoidPointer(KMemoryLayout::GetLinearVirtualAddress(block.GetAddress())), fill_pattern, block.GetSize());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t KMemoryManager::Impl::Initialize(uintptr_t address, size_t size, KVirtualAddress management, KVirtualAddress management_end, Pool p) {
|
size_t KMemoryManager::Impl::Initialize(KPhysicalAddress address, size_t size, KVirtualAddress management, KVirtualAddress management_end, Pool p) {
|
||||||
/* Calculate management sizes. */
|
/* Calculate management sizes. */
|
||||||
const size_t ref_count_size = (size / PageSize) * sizeof(u16);
|
const size_t ref_count_size = (size / PageSize) * sizeof(u16);
|
||||||
const size_t optimize_map_size = CalculateOptimizedProcessOverheadSize(size);
|
const size_t optimize_map_size = CalculateOptimizedProcessOverheadSize(size);
|
||||||
|
@ -436,7 +445,7 @@ namespace ams::kern {
|
||||||
return total_management_size;
|
return total_management_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
void KMemoryManager::Impl::TrackUnoptimizedAllocation(KVirtualAddress block, size_t num_pages) {
|
void KMemoryManager::Impl::TrackUnoptimizedAllocation(KPhysicalAddress block, size_t num_pages) {
|
||||||
/* Get the range we're tracking. */
|
/* Get the range we're tracking. */
|
||||||
size_t offset = this->GetPageOffset(block);
|
size_t offset = this->GetPageOffset(block);
|
||||||
const size_t last = offset + num_pages - 1;
|
const size_t last = offset + num_pages - 1;
|
||||||
|
@ -451,7 +460,7 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void KMemoryManager::Impl::TrackOptimizedAllocation(KVirtualAddress block, size_t num_pages) {
|
void KMemoryManager::Impl::TrackOptimizedAllocation(KPhysicalAddress block, size_t num_pages) {
|
||||||
/* Get the range we're tracking. */
|
/* Get the range we're tracking. */
|
||||||
size_t offset = this->GetPageOffset(block);
|
size_t offset = this->GetPageOffset(block);
|
||||||
const size_t last = offset + num_pages - 1;
|
const size_t last = offset + num_pages - 1;
|
||||||
|
@ -466,7 +475,7 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool KMemoryManager::Impl::ProcessOptimizedAllocation(KVirtualAddress block, size_t num_pages, u8 fill_pattern) {
|
bool KMemoryManager::Impl::ProcessOptimizedAllocation(KPhysicalAddress block, size_t num_pages, u8 fill_pattern) {
|
||||||
/* We want to return whether any pages were newly allocated. */
|
/* We want to return whether any pages were newly allocated. */
|
||||||
bool any_new = false;
|
bool any_new = false;
|
||||||
|
|
||||||
|
@ -483,7 +492,7 @@ namespace ams::kern {
|
||||||
any_new = true;
|
any_new = true;
|
||||||
|
|
||||||
/* Fill the page. */
|
/* Fill the page. */
|
||||||
std::memset(GetVoidPointer(m_heap.GetAddress() + offset * PageSize), fill_pattern, PageSize);
|
std::memset(GetVoidPointer(KMemoryLayout::GetLinearVirtualAddress(m_heap.GetAddress()) + offset * PageSize), fill_pattern, PageSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
offset++;
|
offset++;
|
||||||
|
|
|
@ -18,24 +18,30 @@
|
||||||
namespace ams::kern {
|
namespace ams::kern {
|
||||||
|
|
||||||
void KPageGroup::Finalize() {
|
void KPageGroup::Finalize() {
|
||||||
auto it = m_block_list.begin();
|
KBlockInfo *cur = m_first_block;
|
||||||
while (it != m_block_list.end()) {
|
while (cur != nullptr) {
|
||||||
KBlockInfo *info = std::addressof(*it);
|
KBlockInfo *next = cur->GetNext();
|
||||||
it = m_block_list.erase(it);
|
m_manager->Free(cur);
|
||||||
m_manager->Free(info);
|
cur = next;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
m_first_block = nullptr;
|
||||||
|
m_last_block = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
void KPageGroup::CloseAndReset() {
|
void KPageGroup::CloseAndReset() {
|
||||||
auto &mm = Kernel::GetMemoryManager();
|
auto &mm = Kernel::GetMemoryManager();
|
||||||
|
|
||||||
auto it = m_block_list.begin();
|
KBlockInfo *cur = m_first_block;
|
||||||
while (it != m_block_list.end()) {
|
while (cur != nullptr) {
|
||||||
KBlockInfo *info = std::addressof(*it);
|
KBlockInfo *next = cur->GetNext();
|
||||||
it = m_block_list.erase(it);
|
mm.Close(cur->GetAddress(), cur->GetNumPages());
|
||||||
mm.Close(info->GetAddress(), info->GetNumPages());
|
m_manager->Free(cur);
|
||||||
m_manager->Free(info);
|
cur = next;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
m_first_block = nullptr;
|
||||||
|
m_last_block = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t KPageGroup::GetNumPages() const {
|
size_t KPageGroup::GetNumPages() const {
|
||||||
|
@ -48,7 +54,7 @@ namespace ams::kern {
|
||||||
return num_pages;
|
return num_pages;
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KPageGroup::AddBlock(KVirtualAddress addr, size_t num_pages) {
|
Result KPageGroup::AddBlock(KPhysicalAddress addr, size_t num_pages) {
|
||||||
/* Succeed immediately if we're adding no pages. */
|
/* Succeed immediately if we're adding no pages. */
|
||||||
R_SUCCEED_IF(num_pages == 0);
|
R_SUCCEED_IF(num_pages == 0);
|
||||||
|
|
||||||
|
@ -56,9 +62,8 @@ namespace ams::kern {
|
||||||
MESOSPHERE_ASSERT(addr < addr + num_pages * PageSize);
|
MESOSPHERE_ASSERT(addr < addr + num_pages * PageSize);
|
||||||
|
|
||||||
/* Try to just append to the last block. */
|
/* Try to just append to the last block. */
|
||||||
if (!m_block_list.empty()) {
|
if (m_last_block != nullptr) {
|
||||||
auto it = --(m_block_list.end());
|
R_SUCCEED_IF(m_last_block->TryConcatenate(addr, num_pages));
|
||||||
R_SUCCEED_IF(it->TryConcatenate(addr, num_pages));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Allocate a new block. */
|
/* Allocate a new block. */
|
||||||
|
@ -67,7 +72,14 @@ namespace ams::kern {
|
||||||
|
|
||||||
/* Initialize the block. */
|
/* Initialize the block. */
|
||||||
new_block->Initialize(addr, num_pages);
|
new_block->Initialize(addr, num_pages);
|
||||||
m_block_list.push_back(*new_block);
|
|
||||||
|
/* Add the block to our list. */
|
||||||
|
if (m_last_block != nullptr) {
|
||||||
|
m_last_block->SetNext(new_block);
|
||||||
|
} else {
|
||||||
|
m_first_block = new_block;
|
||||||
|
}
|
||||||
|
m_last_block = new_block;
|
||||||
|
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
@ -89,10 +101,10 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool KPageGroup::IsEquivalentTo(const KPageGroup &rhs) const {
|
bool KPageGroup::IsEquivalentTo(const KPageGroup &rhs) const {
|
||||||
auto lit = m_block_list.cbegin();
|
auto lit = this->begin();
|
||||||
auto rit = rhs.m_block_list.cbegin();
|
auto rit = rhs.begin();
|
||||||
auto lend = m_block_list.cend();
|
auto lend = this->end();
|
||||||
auto rend = rhs.m_block_list.cend();
|
auto rend = rhs.end();
|
||||||
|
|
||||||
while (lit != lend && rit != rend) {
|
while (lit != lend && rit != rend) {
|
||||||
if (*lit != *rit) {
|
if (*lit != *rit) {
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
|
|
||||||
namespace ams::kern {
|
namespace ams::kern {
|
||||||
|
|
||||||
void KPageHeap::Initialize(KVirtualAddress address, size_t size, KVirtualAddress management_address, size_t management_size, const size_t *block_shifts, size_t num_block_shifts) {
|
void KPageHeap::Initialize(KPhysicalAddress address, size_t size, KVirtualAddress management_address, size_t management_size, const size_t *block_shifts, size_t num_block_shifts) {
|
||||||
/* Check our assumptions. */
|
/* Check our assumptions. */
|
||||||
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(address), PageSize));
|
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(address), PageSize));
|
||||||
MESOSPHERE_ASSERT(util::IsAligned(size, PageSize));
|
MESOSPHERE_ASSERT(util::IsAligned(size, PageSize));
|
||||||
|
@ -51,11 +51,11 @@ namespace ams::kern {
|
||||||
return num_free;
|
return num_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
KVirtualAddress KPageHeap::AllocateBlock(s32 index, bool random) {
|
KPhysicalAddress KPageHeap::AllocateBlock(s32 index, bool random) {
|
||||||
const size_t needed_size = m_blocks[index].GetSize();
|
const size_t needed_size = m_blocks[index].GetSize();
|
||||||
|
|
||||||
for (s32 i = index; i < static_cast<s32>(m_num_blocks); i++) {
|
for (s32 i = index; i < static_cast<s32>(m_num_blocks); i++) {
|
||||||
if (const KVirtualAddress addr = m_blocks[i].PopBlock(random); addr != Null<KVirtualAddress>) {
|
if (const KPhysicalAddress addr = m_blocks[i].PopBlock(random); addr != Null<KPhysicalAddress>) {
|
||||||
if (const size_t allocated_size = m_blocks[i].GetSize(); allocated_size > needed_size) {
|
if (const size_t allocated_size = m_blocks[i].GetSize(); allocated_size > needed_size) {
|
||||||
this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize);
|
this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize);
|
||||||
}
|
}
|
||||||
|
@ -63,16 +63,16 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return Null<KVirtualAddress>;
|
return Null<KPhysicalAddress>;
|
||||||
}
|
}
|
||||||
|
|
||||||
void KPageHeap::FreeBlock(KVirtualAddress block, s32 index) {
|
void KPageHeap::FreeBlock(KPhysicalAddress block, s32 index) {
|
||||||
do {
|
do {
|
||||||
block = m_blocks[index++].PushBlock(block);
|
block = m_blocks[index++].PushBlock(block);
|
||||||
} while (block != Null<KVirtualAddress>);
|
} while (block != Null<KPhysicalAddress>);
|
||||||
}
|
}
|
||||||
|
|
||||||
void KPageHeap::Free(KVirtualAddress addr, size_t num_pages) {
|
void KPageHeap::Free(KPhysicalAddress addr, size_t num_pages) {
|
||||||
/* Freeing no pages is a no-op. */
|
/* Freeing no pages is a no-op. */
|
||||||
if (num_pages == 0) {
|
if (num_pages == 0) {
|
||||||
return;
|
return;
|
||||||
|
@ -80,16 +80,16 @@ namespace ams::kern {
|
||||||
|
|
||||||
/* Find the largest block size that we can free, and free as many as possible. */
|
/* Find the largest block size that we can free, and free as many as possible. */
|
||||||
s32 big_index = static_cast<s32>(m_num_blocks) - 1;
|
s32 big_index = static_cast<s32>(m_num_blocks) - 1;
|
||||||
const KVirtualAddress start = addr;
|
const KPhysicalAddress start = addr;
|
||||||
const KVirtualAddress end = addr + num_pages * PageSize;
|
const KPhysicalAddress end = addr + num_pages * PageSize;
|
||||||
KVirtualAddress before_start = start;
|
KPhysicalAddress before_start = start;
|
||||||
KVirtualAddress before_end = start;
|
KPhysicalAddress before_end = start;
|
||||||
KVirtualAddress after_start = end;
|
KPhysicalAddress after_start = end;
|
||||||
KVirtualAddress after_end = end;
|
KPhysicalAddress after_end = end;
|
||||||
while (big_index >= 0) {
|
while (big_index >= 0) {
|
||||||
const size_t block_size = m_blocks[big_index].GetSize();
|
const size_t block_size = m_blocks[big_index].GetSize();
|
||||||
const KVirtualAddress big_start = util::AlignUp(GetInteger(start), block_size);
|
const KPhysicalAddress big_start = util::AlignUp(GetInteger(start), block_size);
|
||||||
const KVirtualAddress big_end = util::AlignDown(GetInteger(end), block_size);
|
const KPhysicalAddress big_end = util::AlignDown(GetInteger(end), block_size);
|
||||||
if (big_start < big_end) {
|
if (big_start < big_end) {
|
||||||
/* Free as many big blocks as we can. */
|
/* Free as many big blocks as we can. */
|
||||||
for (auto block = big_start; block < big_end; block += block_size) {
|
for (auto block = big_start; block < big_end; block += block_size) {
|
||||||
|
|
|
@ -116,7 +116,6 @@ namespace ams::kern {
|
||||||
|
|
||||||
m_cached_physical_linear_region = nullptr;
|
m_cached_physical_linear_region = nullptr;
|
||||||
m_cached_physical_heap_region = nullptr;
|
m_cached_physical_heap_region = nullptr;
|
||||||
m_cached_virtual_heap_region = nullptr;
|
|
||||||
|
|
||||||
/* Initialize our implementation. */
|
/* Initialize our implementation. */
|
||||||
m_impl.InitializeForKernel(table, start, end);
|
m_impl.InitializeForKernel(table, start, end);
|
||||||
|
@ -1145,7 +1144,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
/* Clear all pages. */
|
/* Clear all pages. */
|
||||||
for (const auto &it : pg) {
|
for (const auto &it : pg) {
|
||||||
std::memset(GetVoidPointer(it.GetAddress()), m_heap_fill_value, it.GetSize());
|
std::memset(GetVoidPointer(GetHeapVirtualAddress(it.GetAddress())), m_heap_fill_value, it.GetSize());
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Map the pages. */
|
/* Map the pages. */
|
||||||
|
@ -1171,13 +1170,9 @@ namespace ams::kern {
|
||||||
|
|
||||||
/* Iterate, mapping all pages in the group. */
|
/* Iterate, mapping all pages in the group. */
|
||||||
for (const auto &block : pg) {
|
for (const auto &block : pg) {
|
||||||
/* We only allow mapping pages in the heap, and we require we're mapping non-empty blocks. */
|
|
||||||
MESOSPHERE_ABORT_UNLESS(block.GetAddress() < block.GetLastAddress());
|
|
||||||
MESOSPHERE_ABORT_UNLESS(IsHeapVirtualAddress(block.GetAddress(), block.GetSize()));
|
|
||||||
|
|
||||||
/* Map and advance. */
|
/* Map and advance. */
|
||||||
const KPageProperties cur_properties = (cur_address == start_address) ? properties : KPageProperties{ properties.perm, properties.io, properties.uncached, DisableMergeAttribute_None };
|
const KPageProperties cur_properties = (cur_address == start_address) ? properties : KPageProperties{ properties.perm, properties.io, properties.uncached, DisableMergeAttribute_None };
|
||||||
R_TRY(this->Operate(page_list, cur_address, block.GetNumPages(), GetHeapPhysicalAddress(block.GetAddress()), true, cur_properties, OperationType_Map, reuse_ll));
|
R_TRY(this->Operate(page_list, cur_address, block.GetNumPages(), block.GetAddress(), true, cur_properties, OperationType_Map, reuse_ll));
|
||||||
cur_address += block.GetSize();
|
cur_address += block.GetSize();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1198,7 +1193,7 @@ namespace ams::kern {
|
||||||
auto pg_it = pg.begin();
|
auto pg_it = pg.begin();
|
||||||
MESOSPHERE_ABORT_UNLESS(pg_it != pg.end());
|
MESOSPHERE_ABORT_UNLESS(pg_it != pg.end());
|
||||||
|
|
||||||
KPhysicalAddress pg_phys_addr = GetHeapPhysicalAddress(pg_it->GetAddress());
|
KPhysicalAddress pg_phys_addr = pg_it->GetAddress();
|
||||||
size_t pg_pages = pg_it->GetNumPages();
|
size_t pg_pages = pg_it->GetNumPages();
|
||||||
|
|
||||||
auto it = m_memory_block_manager.FindIterator(start_address);
|
auto it = m_memory_block_manager.FindIterator(start_address);
|
||||||
|
@ -1228,7 +1223,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
/* Advance our physical block. */
|
/* Advance our physical block. */
|
||||||
++pg_it;
|
++pg_it;
|
||||||
pg_phys_addr = GetHeapPhysicalAddress(pg_it->GetAddress());
|
pg_phys_addr = pg_it->GetAddress();
|
||||||
pg_pages = pg_it->GetNumPages();
|
pg_pages = pg_it->GetNumPages();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1285,7 +1280,7 @@ namespace ams::kern {
|
||||||
const size_t cur_pages = cur_size / PageSize;
|
const size_t cur_pages = cur_size / PageSize;
|
||||||
|
|
||||||
R_UNLESS(IsHeapPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory());
|
R_UNLESS(IsHeapPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory());
|
||||||
R_TRY(pg.AddBlock(GetHeapVirtualAddress(cur_addr), cur_pages));
|
R_TRY(pg.AddBlock(cur_addr, cur_pages));
|
||||||
|
|
||||||
cur_addr = next_entry.phys_addr;
|
cur_addr = next_entry.phys_addr;
|
||||||
cur_size = next_entry.block_size;
|
cur_size = next_entry.block_size;
|
||||||
|
@ -1304,7 +1299,7 @@ namespace ams::kern {
|
||||||
/* add the last block. */
|
/* add the last block. */
|
||||||
const size_t cur_pages = cur_size / PageSize;
|
const size_t cur_pages = cur_size / PageSize;
|
||||||
R_UNLESS(IsHeapPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory());
|
R_UNLESS(IsHeapPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory());
|
||||||
R_TRY(pg.AddBlock(GetHeapVirtualAddress(cur_addr), cur_pages));
|
R_TRY(pg.AddBlock(cur_addr, cur_pages));
|
||||||
|
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
@ -1323,7 +1318,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
/* We're going to validate that the group we'd expect is the group we see. */
|
/* We're going to validate that the group we'd expect is the group we see. */
|
||||||
auto cur_it = pg.begin();
|
auto cur_it = pg.begin();
|
||||||
KVirtualAddress cur_block_address = cur_it->GetAddress();
|
KPhysicalAddress cur_block_address = cur_it->GetAddress();
|
||||||
size_t cur_block_pages = cur_it->GetNumPages();
|
size_t cur_block_pages = cur_it->GetNumPages();
|
||||||
|
|
||||||
auto UpdateCurrentIterator = [&]() ALWAYS_INLINE_LAMBDA {
|
auto UpdateCurrentIterator = [&]() ALWAYS_INLINE_LAMBDA {
|
||||||
|
@ -1367,7 +1362,7 @@ namespace ams::kern {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cur_block_address != GetHeapVirtualAddress(cur_addr) || cur_block_pages < cur_pages) {
|
if (cur_block_address != cur_addr || cur_block_pages < cur_pages) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1395,7 +1390,7 @@ namespace ams::kern {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
return cur_block_address == GetHeapVirtualAddress(cur_addr) && cur_block_pages == (cur_size / PageSize);
|
return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KPageTableBase::GetContiguousMemoryRangeWithState(MemoryRange *out, KProcessAddress address, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) {
|
Result KPageTableBase::GetContiguousMemoryRangeWithState(MemoryRange *out, KProcessAddress address, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) {
|
||||||
|
@ -1434,7 +1429,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
/* The memory is contiguous, so set the output range. */
|
/* The memory is contiguous, so set the output range. */
|
||||||
*out = {
|
*out = {
|
||||||
.address = GetLinearMappedVirtualAddress(phys_address),
|
.address = phys_address,
|
||||||
.size = size,
|
.size = size,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1533,7 +1528,7 @@ namespace ams::kern {
|
||||||
/* Ensure cache coherency, if we're setting pages as executable. */
|
/* Ensure cache coherency, if we're setting pages as executable. */
|
||||||
if (is_x) {
|
if (is_x) {
|
||||||
for (const auto &block : pg) {
|
for (const auto &block : pg) {
|
||||||
cpu::StoreDataCache(GetVoidPointer(block.GetAddress()), block.GetSize());
|
cpu::StoreDataCache(GetVoidPointer(GetHeapVirtualAddress(block.GetAddress())), block.GetSize());
|
||||||
}
|
}
|
||||||
cpu::InvalidateEntireInstructionCache();
|
cpu::InvalidateEntireInstructionCache();
|
||||||
}
|
}
|
||||||
|
@ -1658,7 +1653,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
/* Clear all the newly allocated pages. */
|
/* Clear all the newly allocated pages. */
|
||||||
for (const auto &it : pg) {
|
for (const auto &it : pg) {
|
||||||
std::memset(GetVoidPointer(it.GetAddress()), m_heap_fill_value, it.GetSize());
|
std::memset(GetVoidPointer(GetHeapVirtualAddress(it.GetAddress())), m_heap_fill_value, it.GetSize());
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Map the pages. */
|
/* Map the pages. */
|
||||||
|
@ -3579,16 +3574,16 @@ namespace ams::kern {
|
||||||
R_UNLESS(memory_reservation.Succeeded(), svc::ResultLimitReached());
|
R_UNLESS(memory_reservation.Succeeded(), svc::ResultLimitReached());
|
||||||
|
|
||||||
/* Ensure that we manage page references correctly. */
|
/* Ensure that we manage page references correctly. */
|
||||||
KVirtualAddress start_partial_page = Null<KVirtualAddress>;
|
KPhysicalAddress start_partial_page = Null<KPhysicalAddress>;
|
||||||
KVirtualAddress end_partial_page = Null<KVirtualAddress>;
|
KPhysicalAddress end_partial_page = Null<KPhysicalAddress>;
|
||||||
KProcessAddress cur_mapped_addr = dst_addr;
|
KProcessAddress cur_mapped_addr = dst_addr;
|
||||||
|
|
||||||
/* If the partial pages are mapped, an extra reference will have been opened. Otherwise, they'll free on scope exit. */
|
/* If the partial pages are mapped, an extra reference will have been opened. Otherwise, they'll free on scope exit. */
|
||||||
ON_SCOPE_EXIT {
|
ON_SCOPE_EXIT {
|
||||||
if (start_partial_page != Null<KVirtualAddress>) {
|
if (start_partial_page != Null<KPhysicalAddress>) {
|
||||||
Kernel::GetMemoryManager().Close(start_partial_page, 1);
|
Kernel::GetMemoryManager().Close(start_partial_page, 1);
|
||||||
}
|
}
|
||||||
if (end_partial_page != Null<KVirtualAddress>) {
|
if (end_partial_page != Null<KPhysicalAddress>) {
|
||||||
Kernel::GetMemoryManager().Close(end_partial_page, 1);
|
Kernel::GetMemoryManager().Close(end_partial_page, 1);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -3603,13 +3598,13 @@ namespace ams::kern {
|
||||||
/* Allocate the start page as needed. */
|
/* Allocate the start page as needed. */
|
||||||
if (aligned_src_start < mapping_src_start) {
|
if (aligned_src_start < mapping_src_start) {
|
||||||
start_partial_page = Kernel::GetMemoryManager().AllocateAndOpenContinuous(1, 0, m_allocate_option);
|
start_partial_page = Kernel::GetMemoryManager().AllocateAndOpenContinuous(1, 0, m_allocate_option);
|
||||||
R_UNLESS(start_partial_page != Null<KVirtualAddress>, svc::ResultOutOfMemory());
|
R_UNLESS(start_partial_page != Null<KPhysicalAddress>, svc::ResultOutOfMemory());
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Allocate the end page as needed. */
|
/* Allocate the end page as needed. */
|
||||||
if (mapping_src_end < aligned_src_end && (aligned_src_start < mapping_src_end || aligned_src_start == mapping_src_start)) {
|
if (mapping_src_end < aligned_src_end && (aligned_src_start < mapping_src_end || aligned_src_start == mapping_src_start)) {
|
||||||
end_partial_page = Kernel::GetMemoryManager().AllocateAndOpenContinuous(1, 0, m_allocate_option);
|
end_partial_page = Kernel::GetMemoryManager().AllocateAndOpenContinuous(1, 0, m_allocate_option);
|
||||||
R_UNLESS(end_partial_page != Null<KVirtualAddress>, svc::ResultOutOfMemory());
|
R_UNLESS(end_partial_page != Null<KPhysicalAddress>, svc::ResultOutOfMemory());
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Get the implementation. */
|
/* Get the implementation. */
|
||||||
|
@ -3631,8 +3626,9 @@ namespace ams::kern {
|
||||||
size_t tot_block_size = cur_block_size;
|
size_t tot_block_size = cur_block_size;
|
||||||
|
|
||||||
/* Map the start page, if we have one. */
|
/* Map the start page, if we have one. */
|
||||||
if (start_partial_page != Null<KVirtualAddress>) {
|
if (start_partial_page != Null<KPhysicalAddress>) {
|
||||||
/* Ensure the page holds correct data. */
|
/* Ensure the page holds correct data. */
|
||||||
|
const KVirtualAddress start_partial_virt = GetHeapVirtualAddress(start_partial_page);
|
||||||
if (send) {
|
if (send) {
|
||||||
const size_t partial_offset = src_start - aligned_src_start;
|
const size_t partial_offset = src_start - aligned_src_start;
|
||||||
size_t copy_size, clear_size;
|
size_t copy_size, clear_size;
|
||||||
|
@ -3644,18 +3640,18 @@ namespace ams::kern {
|
||||||
clear_size = 0;
|
clear_size = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::memset(GetVoidPointer(start_partial_page), fill_val, partial_offset);
|
std::memset(GetVoidPointer(start_partial_virt), fill_val, partial_offset);
|
||||||
std::memcpy(GetVoidPointer(start_partial_page + partial_offset), GetVoidPointer(GetHeapVirtualAddress(cur_block_addr) + partial_offset), copy_size);
|
std::memcpy(GetVoidPointer(start_partial_virt + partial_offset), GetVoidPointer(GetHeapVirtualAddress(cur_block_addr) + partial_offset), copy_size);
|
||||||
if (clear_size > 0) {
|
if (clear_size > 0) {
|
||||||
std::memset(GetVoidPointer(start_partial_page + partial_offset + copy_size), fill_val, clear_size);
|
std::memset(GetVoidPointer(start_partial_virt + partial_offset + copy_size), fill_val, clear_size);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
std::memset(GetVoidPointer(start_partial_page), fill_val, PageSize);
|
std::memset(GetVoidPointer(start_partial_virt), fill_val, PageSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Map the page. */
|
/* Map the page. */
|
||||||
const KPageProperties start_map_properties = { test_perm, false, false, DisableMergeAttribute_DisableHead };
|
const KPageProperties start_map_properties = { test_perm, false, false, DisableMergeAttribute_DisableHead };
|
||||||
R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, 1, GetHeapPhysicalAddress(start_partial_page), true, start_map_properties, OperationType_Map, false));
|
R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, 1, start_partial_page, true, start_map_properties, OperationType_Map, false));
|
||||||
|
|
||||||
/* Update tracking extents. */
|
/* Update tracking extents. */
|
||||||
cur_mapped_addr += PageSize;
|
cur_mapped_addr += PageSize;
|
||||||
|
@ -3715,19 +3711,20 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Map the end page, if we have one. */
|
/* Map the end page, if we have one. */
|
||||||
if (end_partial_page != Null<KVirtualAddress>) {
|
if (end_partial_page != Null<KPhysicalAddress>) {
|
||||||
/* Ensure the page holds correct data. */
|
/* Ensure the page holds correct data. */
|
||||||
|
const KVirtualAddress end_partial_virt = GetHeapVirtualAddress(end_partial_page);
|
||||||
if (send) {
|
if (send) {
|
||||||
const size_t copy_size = src_end - mapping_src_end;
|
const size_t copy_size = src_end - mapping_src_end;
|
||||||
std::memcpy(GetVoidPointer(end_partial_page), GetVoidPointer(GetHeapVirtualAddress(cur_block_addr)), copy_size);
|
std::memcpy(GetVoidPointer(end_partial_virt), GetVoidPointer(GetHeapVirtualAddress(cur_block_addr)), copy_size);
|
||||||
std::memset(GetVoidPointer(end_partial_page + copy_size), fill_val, PageSize - copy_size);
|
std::memset(GetVoidPointer(end_partial_virt + copy_size), fill_val, PageSize - copy_size);
|
||||||
} else {
|
} else {
|
||||||
std::memset(GetVoidPointer(end_partial_page), fill_val, PageSize);
|
std::memset(GetVoidPointer(end_partial_virt), fill_val, PageSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Map the page. */
|
/* Map the page. */
|
||||||
const KPageProperties map_properties = { test_perm, false, false, (cur_mapped_addr == dst_addr) ? DisableMergeAttribute_DisableHead : DisableMergeAttribute_None };
|
const KPageProperties map_properties = { test_perm, false, false, (cur_mapped_addr == dst_addr) ? DisableMergeAttribute_DisableHead : DisableMergeAttribute_None };
|
||||||
R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, 1, GetHeapPhysicalAddress(end_partial_page), true, map_properties, OperationType_Map, false));
|
R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, 1, end_partial_page, true, map_properties, OperationType_Map, false));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Update memory blocks to reflect our changes */
|
/* Update memory blocks to reflect our changes */
|
||||||
|
@ -4246,7 +4243,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
/* Iterate over the memory. */
|
/* Iterate over the memory. */
|
||||||
auto pg_it = pg.begin();
|
auto pg_it = pg.begin();
|
||||||
KPhysicalAddress pg_phys_addr = GetHeapPhysicalAddress(pg_it->GetAddress());
|
KPhysicalAddress pg_phys_addr = pg_it->GetAddress();
|
||||||
size_t pg_pages = pg_it->GetNumPages();
|
size_t pg_pages = pg_it->GetNumPages();
|
||||||
|
|
||||||
auto it = m_memory_block_manager.FindIterator(cur_address);
|
auto it = m_memory_block_manager.FindIterator(cur_address);
|
||||||
|
@ -4272,7 +4269,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
/* Advance our physical block. */
|
/* Advance our physical block. */
|
||||||
++pg_it;
|
++pg_it;
|
||||||
pg_phys_addr = GetHeapPhysicalAddress(pg_it->GetAddress());
|
pg_phys_addr = pg_it->GetAddress();
|
||||||
pg_pages = pg_it->GetNumPages();
|
pg_pages = pg_it->GetNumPages();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4410,7 +4407,7 @@ namespace ams::kern {
|
||||||
} else {
|
} else {
|
||||||
if (cur_valid) {
|
if (cur_valid) {
|
||||||
MESOSPHERE_ABORT_UNLESS(IsHeapPhysicalAddress(cur_entry.phys_addr));
|
MESOSPHERE_ABORT_UNLESS(IsHeapPhysicalAddress(cur_entry.phys_addr));
|
||||||
R_TRY(pg.AddBlock(GetHeapVirtualAddress(cur_entry.phys_addr), cur_entry.block_size / PageSize));
|
R_TRY(pg.AddBlock(cur_entry.phys_addr, cur_entry.block_size / PageSize));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Update tracking variables. */
|
/* Update tracking variables. */
|
||||||
|
@ -4429,7 +4426,7 @@ namespace ams::kern {
|
||||||
/* Add the last block. */
|
/* Add the last block. */
|
||||||
if (cur_valid) {
|
if (cur_valid) {
|
||||||
MESOSPHERE_ABORT_UNLESS(IsHeapPhysicalAddress(cur_entry.phys_addr));
|
MESOSPHERE_ABORT_UNLESS(IsHeapPhysicalAddress(cur_entry.phys_addr));
|
||||||
R_TRY(pg.AddBlock(GetHeapVirtualAddress(cur_entry.phys_addr), (size - tot_size) / PageSize));
|
R_TRY(pg.AddBlock(cur_entry.phys_addr, (size - tot_size) / PageSize));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
MESOSPHERE_ASSERT(pg.GetNumPages() == mapped_size / PageSize);
|
MESOSPHERE_ASSERT(pg.GetNumPages() == mapped_size / PageSize);
|
||||||
|
@ -4457,7 +4454,7 @@ namespace ams::kern {
|
||||||
/* Iterate over the memory we unmapped. */
|
/* Iterate over the memory we unmapped. */
|
||||||
auto it = m_memory_block_manager.FindIterator(cur_address);
|
auto it = m_memory_block_manager.FindIterator(cur_address);
|
||||||
auto pg_it = pg.begin();
|
auto pg_it = pg.begin();
|
||||||
KPhysicalAddress pg_phys_addr = GetHeapPhysicalAddress(pg_it->GetAddress());
|
KPhysicalAddress pg_phys_addr = pg_it->GetAddress();
|
||||||
size_t pg_pages = pg_it->GetNumPages();
|
size_t pg_pages = pg_it->GetNumPages();
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
|
@ -4479,7 +4476,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
/* Advance our physical block. */
|
/* Advance our physical block. */
|
||||||
++pg_it;
|
++pg_it;
|
||||||
pg_phys_addr = GetHeapPhysicalAddress(pg_it->GetAddress());
|
pg_phys_addr = pg_it->GetAddress();
|
||||||
pg_pages = pg_it->GetNumPages();
|
pg_pages = pg_it->GetNumPages();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4567,7 +4564,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
/* Clear the new memory. */
|
/* Clear the new memory. */
|
||||||
for (const auto &block : pg) {
|
for (const auto &block : pg) {
|
||||||
std::memset(GetVoidPointer(block.GetAddress()), m_heap_fill_value, block.GetSize());
|
std::memset(GetVoidPointer(GetHeapVirtualAddress(block.GetAddress())), m_heap_fill_value, block.GetSize());
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Map the new memory. */
|
/* Map the new memory. */
|
||||||
|
|
|
@ -51,7 +51,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
/* Clear all pages in the memory. */
|
/* Clear all pages in the memory. */
|
||||||
for (const auto &block : m_page_group) {
|
for (const auto &block : m_page_group) {
|
||||||
std::memset(GetVoidPointer(block.GetAddress()), 0, block.GetSize());
|
std::memset(GetVoidPointer(KMemoryLayout::GetLinearVirtualAddress(block.GetAddress())), 0, block.GetSize());
|
||||||
}
|
}
|
||||||
|
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
|
|
|
@ -43,7 +43,7 @@ namespace ams::kern::svc {
|
||||||
ON_SCOPE_EXIT { contig_range.Close(); };
|
ON_SCOPE_EXIT { contig_range.Close(); };
|
||||||
|
|
||||||
/* Adjust to remain within range. */
|
/* Adjust to remain within range. */
|
||||||
KVirtualAddress operate_address = contig_range.address;
|
KVirtualAddress operate_address = KMemoryLayout::GetLinearVirtualAddress(contig_range.address);
|
||||||
size_t operate_size = contig_range.size;
|
size_t operate_size = contig_range.size;
|
||||||
if (cur_address < address) {
|
if (cur_address < address) {
|
||||||
operate_address += (address - cur_address);
|
operate_address += (address - cur_address);
|
||||||
|
|
Loading…
Reference in a new issue