mirror of
https://github.com/Atmosphere-NX/Atmosphere
synced 2024-11-13 00:26:35 +00:00
kern: implement through kip decompression
This commit is contained in:
parent
cbc73f4407
commit
92521eed2a
12 changed files with 427 additions and 42 deletions
|
@ -157,6 +157,8 @@ namespace ams::kern::arch::arm64::cpu {
|
||||||
void FlushEntireDataCacheSharedForInit();
|
void FlushEntireDataCacheSharedForInit();
|
||||||
void FlushEntireDataCacheLocalForInit();
|
void FlushEntireDataCacheLocalForInit();
|
||||||
|
|
||||||
|
void FlushEntireDataCache();
|
||||||
|
|
||||||
Result InvalidateDataCache(void *addr, size_t size);
|
Result InvalidateDataCache(void *addr, size_t size);
|
||||||
Result StoreDataCache(const void *addr, size_t size);
|
Result StoreDataCache(const void *addr, size_t size);
|
||||||
Result FlushDataCache(const void *addr, size_t size);
|
Result FlushDataCache(const void *addr, size_t size);
|
||||||
|
|
|
@ -44,7 +44,6 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
BlockType_Count,
|
BlockType_Count,
|
||||||
};
|
};
|
||||||
|
|
||||||
static_assert(L3BlockSize == PageSize);
|
static_assert(L3BlockSize == PageSize);
|
||||||
static constexpr size_t ContiguousPageSize = L3ContiguousBlockSize;
|
static constexpr size_t ContiguousPageSize = L3ContiguousBlockSize;
|
||||||
|
|
||||||
|
@ -79,6 +78,16 @@ namespace ams::kern::arch::arm64 {
|
||||||
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static constexpr size_t GetSmallerAlignment(size_t alignment) {
|
||||||
|
MESOSPHERE_ASSERT(alignment > L3BlockSize);
|
||||||
|
return KPageTable::GetBlockSize(static_cast<KPageTable::BlockType>(KPageTable::GetBlockType(alignment) - 1));
|
||||||
|
}
|
||||||
|
|
||||||
|
static constexpr size_t GetLargerAlignment(size_t alignment) {
|
||||||
|
MESOSPHERE_ASSERT(alignment < L1BlockSize);
|
||||||
|
return KPageTable::GetBlockSize(static_cast<KPageTable::BlockType>(KPageTable::GetBlockType(alignment) + 1));
|
||||||
|
}
|
||||||
protected:
|
protected:
|
||||||
virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, OperationType operation, bool reuse_ll) override;
|
virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, OperationType operation, bool reuse_ll) override;
|
||||||
virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, const KPageGroup *page_group, const KPageProperties properties, OperationType operation, bool reuse_ll) override;
|
virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, const KPageGroup *page_group, const KPageProperties properties, OperationType operation, bool reuse_ll) override;
|
||||||
|
@ -164,7 +173,25 @@ namespace ams::kern::arch::arm64 {
|
||||||
Result Finalize();
|
Result Finalize();
|
||||||
private:
|
private:
|
||||||
Result Map(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll);
|
Result Map(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll);
|
||||||
Result Unmap(KProcessAddress virt_addr, size_t num_pages, KPageGroup *pg, PageLinkedList *page_list, bool force, bool reuse_ll);
|
Result Unmap(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list, bool force, bool reuse_ll);
|
||||||
|
|
||||||
|
Result Map(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, size_t page_size, PageLinkedList *page_list, bool reuse_ll) {
|
||||||
|
switch (page_size) {
|
||||||
|
case L1BlockSize:
|
||||||
|
#ifdef ATMOSPHERE_BOARD_NINTENDO_NX
|
||||||
|
case L2TegraSmmuBlockSize:
|
||||||
|
#endif
|
||||||
|
case L2BlockSize:
|
||||||
|
case L3BlockSize:
|
||||||
|
break;
|
||||||
|
case L2ContiguousBlockSize:
|
||||||
|
case L3ContiguousBlockSize:
|
||||||
|
entry_template.SetContiguous(true);
|
||||||
|
break;
|
||||||
|
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||||
|
}
|
||||||
|
return this->Map(virt_addr, phys_addr, num_pages, entry_template, page_list, reuse_ll);
|
||||||
|
}
|
||||||
|
|
||||||
Result MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll);
|
Result MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll);
|
||||||
|
|
||||||
|
|
|
@ -35,6 +35,18 @@ namespace ams::kern::arch::arm64 {
|
||||||
return this->page_table.MapPages(out_addr, num_pages, alignment, phys_addr, region_start, region_num_pages, state, perm);
|
return this->page_table.MapPages(out_addr, num_pages, alignment, phys_addr, region_start, region_num_pages, state, perm);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) {
|
||||||
|
return this->page_table.UnmapPages(address, num_pages, state);
|
||||||
|
}
|
||||||
|
|
||||||
|
Result MapPageGroup(KProcessAddress *out_addr, const KPageGroup &pg, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||||
|
return this->page_table.MapPageGroup(out_addr, pg, region_start, region_num_pages, state, perm);
|
||||||
|
}
|
||||||
|
|
||||||
|
Result UnmapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state) {
|
||||||
|
return this->page_table.UnmapPageGroup(address, pg, state);
|
||||||
|
}
|
||||||
|
|
||||||
bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress address) const {
|
bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress address) const {
|
||||||
return this->page_table.GetPhysicalAddress(out, address);
|
return this->page_table.GetPhysicalAddress(out, address);
|
||||||
}
|
}
|
||||||
|
|
|
@ -127,6 +127,7 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
Result MakeCreateProcessParameter(ams::svc::CreateProcessParameter *out, bool enable_aslr) const;
|
Result MakeCreateProcessParameter(ams::svc::CreateProcessParameter *out, bool enable_aslr) const;
|
||||||
|
Result Load(KProcessAddress address, const ams::svc::CreateProcessParameter ¶ms) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -180,7 +180,7 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE bool Contains(uintptr_t address) const {
|
constexpr ALWAYS_INLINE bool Contains(uintptr_t address) const {
|
||||||
return this->GetAddress() <= address && address < this->GetLastAddress();
|
return this->GetAddress() <= address && address <= this->GetLastAddress();
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE bool IsDerivedFrom(u32 type) const {
|
constexpr ALWAYS_INLINE bool IsDerivedFrom(u32 type) const {
|
||||||
|
@ -231,6 +231,7 @@ namespace ams::kern {
|
||||||
};
|
};
|
||||||
private:
|
private:
|
||||||
using TreeType = util::IntrusiveRedBlackTreeBaseTraits<KMemoryRegion>::TreeType<KMemoryRegion>;
|
using TreeType = util::IntrusiveRedBlackTreeBaseTraits<KMemoryRegion>::TreeType<KMemoryRegion>;
|
||||||
|
public:
|
||||||
using value_type = TreeType::value_type;
|
using value_type = TreeType::value_type;
|
||||||
using size_type = TreeType::size_type;
|
using size_type = TreeType::size_type;
|
||||||
using difference_type = TreeType::difference_type;
|
using difference_type = TreeType::difference_type;
|
||||||
|
@ -276,7 +277,7 @@ namespace ams::kern {
|
||||||
MESOSPHERE_INIT_ABORT();
|
MESOSPHERE_INIT_ABORT();
|
||||||
}
|
}
|
||||||
|
|
||||||
DerivedRegionExtents GetDerivedRegionExtents(u32 type_id) {
|
DerivedRegionExtents GetDerivedRegionExtents(u32 type_id) const {
|
||||||
DerivedRegionExtents extents;
|
DerivedRegionExtents extents;
|
||||||
|
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(extents.first_region == nullptr);
|
MESOSPHERE_INIT_ABORT_UNLESS(extents.first_region == nullptr);
|
||||||
|
@ -479,12 +480,24 @@ namespace ams::kern {
|
||||||
return *GetVirtualMemoryRegionTree().FindFirstRegionByType(KMemoryRegionType_KernelStack);
|
return *GetVirtualMemoryRegionTree().FindFirstRegionByType(KMemoryRegionType_KernelStack);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static NOINLINE KMemoryRegion &GetTempRegion() {
|
||||||
|
return *GetVirtualMemoryRegionTree().FindFirstRegionByType(KMemoryRegionType_KernelTemp);
|
||||||
|
}
|
||||||
|
|
||||||
static NOINLINE KMemoryRegion &GetVirtualLinearRegion(KVirtualAddress address) {
|
static NOINLINE KMemoryRegion &GetVirtualLinearRegion(KVirtualAddress address) {
|
||||||
return *GetVirtualLinearMemoryRegionTree().FindContainingRegion(GetInteger(address));
|
return *GetVirtualLinearMemoryRegionTree().FindContainingRegion(GetInteger(address));
|
||||||
}
|
}
|
||||||
|
|
||||||
static NOINLINE bool IsHeapPhysicalAddress(KMemoryRegion **out, KPhysicalAddress address) {
|
static NOINLINE bool IsHeapPhysicalAddress(const KMemoryRegion **out, KPhysicalAddress address, const KMemoryRegion *hint = nullptr) {
|
||||||
if (auto it = GetPhysicalLinearMemoryRegionTree().FindContainingRegion(GetInteger(address)); it != GetPhysicalLinearMemoryRegionTree().end() && it->IsDerivedFrom(KMemoryRegionType_DramNonKernel)) {
|
auto &tree = GetPhysicalLinearMemoryRegionTree();
|
||||||
|
KMemoryRegionTree::const_iterator it = tree.end();
|
||||||
|
if (hint != nullptr) {
|
||||||
|
it = tree.iterator_to(*hint);
|
||||||
|
}
|
||||||
|
if (it == tree.end() || !it->Contains(GetInteger(address))) {
|
||||||
|
it = tree.FindContainingRegion(GetInteger(address));
|
||||||
|
}
|
||||||
|
if (it != tree.end() && it->IsDerivedFrom(KMemoryRegionType_DramNonKernel)) {
|
||||||
if (out) {
|
if (out) {
|
||||||
*out = std::addressof(*it);
|
*out = std::addressof(*it);
|
||||||
}
|
}
|
||||||
|
@ -493,6 +506,72 @@ namespace ams::kern {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static NOINLINE bool IsHeapPhysicalAddress(const KMemoryRegion **out, KPhysicalAddress address, size_t size, const KMemoryRegion *hint = nullptr) {
|
||||||
|
auto &tree = GetPhysicalLinearMemoryRegionTree();
|
||||||
|
KMemoryRegionTree::const_iterator it = tree.end();
|
||||||
|
if (hint != nullptr) {
|
||||||
|
it = tree.iterator_to(*hint);
|
||||||
|
}
|
||||||
|
if (it == tree.end() || !it->Contains(GetInteger(address))) {
|
||||||
|
it = tree.FindContainingRegion(GetInteger(address));
|
||||||
|
}
|
||||||
|
if (it != tree.end() && it->IsDerivedFrom(KMemoryRegionType_DramNonKernel)) {
|
||||||
|
const uintptr_t last_address = GetInteger(address) + size - 1;
|
||||||
|
do {
|
||||||
|
if (last_address <= it->GetLastAddress()) {
|
||||||
|
if (out) {
|
||||||
|
*out = std::addressof(*it);
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
it++;
|
||||||
|
} while (it != tree.end() && it->IsDerivedFrom(KMemoryRegionType_DramNonKernel));
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static NOINLINE bool IsHeapVirtualAddress(const KMemoryRegion **out, KVirtualAddress address, const KMemoryRegion *hint = nullptr) {
|
||||||
|
auto &tree = GetVirtualLinearMemoryRegionTree();
|
||||||
|
KMemoryRegionTree::const_iterator it = tree.end();
|
||||||
|
if (hint != nullptr) {
|
||||||
|
it = tree.iterator_to(*hint);
|
||||||
|
}
|
||||||
|
if (it == tree.end() || !it->Contains(GetInteger(address))) {
|
||||||
|
it = tree.FindContainingRegion(GetInteger(address));
|
||||||
|
}
|
||||||
|
if (it != tree.end() && it->IsDerivedFrom(KMemoryRegionType_VirtualDramManagedPool)) {
|
||||||
|
if (out) {
|
||||||
|
*out = std::addressof(*it);
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static NOINLINE bool IsHeapVirtualAddress(const KMemoryRegion **out, KVirtualAddress address, size_t size, const KMemoryRegion *hint = nullptr) {
|
||||||
|
auto &tree = GetVirtualLinearMemoryRegionTree();
|
||||||
|
KMemoryRegionTree::const_iterator it = tree.end();
|
||||||
|
if (hint != nullptr) {
|
||||||
|
it = tree.iterator_to(*hint);
|
||||||
|
}
|
||||||
|
if (it == tree.end() || !it->Contains(GetInteger(address))) {
|
||||||
|
it = tree.FindContainingRegion(GetInteger(address));
|
||||||
|
}
|
||||||
|
if (it != tree.end() && it->IsDerivedFrom(KMemoryRegionType_VirtualDramManagedPool)) {
|
||||||
|
const uintptr_t last_address = GetInteger(address) + size - 1;
|
||||||
|
do {
|
||||||
|
if (last_address <= it->GetLastAddress()) {
|
||||||
|
if (out) {
|
||||||
|
*out = std::addressof(*it);
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
it++;
|
||||||
|
} while (it != tree.end() && it->IsDerivedFrom(KMemoryRegionType_VirtualDramManagedPool));
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static NOINLINE std::tuple<size_t, size_t> GetTotalAndKernelMemorySizes() {
|
static NOINLINE std::tuple<size_t, size_t> GetTotalAndKernelMemorySizes() {
|
||||||
size_t total_size = 0, kernel_size = 0;
|
size_t total_size = 0, kernel_size = 0;
|
||||||
for (auto it = GetPhysicalMemoryRegionTree().cbegin(); it != GetPhysicalMemoryRegionTree().cend(); it++) {
|
for (auto it = GetPhysicalMemoryRegionTree().cbegin(); it != GetPhysicalMemoryRegionTree().cend(); it++) {
|
||||||
|
|
|
@ -38,6 +38,7 @@ namespace ams::kern {
|
||||||
constexpr size_t GetNumPages() const { return this->num_pages; }
|
constexpr size_t GetNumPages() const { return this->num_pages; }
|
||||||
constexpr size_t GetSize() const { return this->GetNumPages() * PageSize; }
|
constexpr size_t GetSize() const { return this->GetNumPages() * PageSize; }
|
||||||
constexpr KVirtualAddress GetEndAddress() const { return this->GetAddress() + this->GetSize(); }
|
constexpr KVirtualAddress GetEndAddress() const { return this->GetAddress() + this->GetSize(); }
|
||||||
|
constexpr KVirtualAddress GetLastAddress() const { return this->GetEndAddress() - 1; }
|
||||||
|
|
||||||
constexpr bool IsEquivalentTo(const KBlockInfo &rhs) const {
|
constexpr bool IsEquivalentTo(const KBlockInfo &rhs) const {
|
||||||
return this->address == rhs.address && this->num_pages == rhs.num_pages;
|
return this->address == rhs.address && this->num_pages == rhs.num_pages;
|
||||||
|
|
|
@ -124,9 +124,9 @@ namespace ams::kern {
|
||||||
bool enable_aslr;
|
bool enable_aslr;
|
||||||
KMemoryBlockSlabManager *memory_block_slab_manager;
|
KMemoryBlockSlabManager *memory_block_slab_manager;
|
||||||
KBlockInfoManager *block_info_manager;
|
KBlockInfoManager *block_info_manager;
|
||||||
KMemoryRegion *cached_physical_linear_region;
|
const KMemoryRegion *cached_physical_linear_region;
|
||||||
KMemoryRegion *cached_physical_heap_region;
|
const KMemoryRegion *cached_physical_heap_region;
|
||||||
KMemoryRegion *cached_virtual_managed_pool_dram_region;
|
const KMemoryRegion *cached_virtual_heap_region;
|
||||||
MemoryFillValue heap_fill_value;
|
MemoryFillValue heap_fill_value;
|
||||||
MemoryFillValue ipc_fill_value;
|
MemoryFillValue ipc_fill_value;
|
||||||
MemoryFillValue stack_fill_value;
|
MemoryFillValue stack_fill_value;
|
||||||
|
@ -137,7 +137,7 @@ namespace ams::kern {
|
||||||
kernel_map_region_end(), alias_code_region_start(), alias_code_region_end(), code_region_start(), code_region_end(),
|
kernel_map_region_end(), alias_code_region_start(), alias_code_region_end(), code_region_start(), code_region_end(),
|
||||||
max_heap_size(), max_physical_memory_size(), general_lock(), map_physical_memory_lock(), impl(), memory_block_manager(),
|
max_heap_size(), max_physical_memory_size(), general_lock(), map_physical_memory_lock(), impl(), memory_block_manager(),
|
||||||
allocate_option(), address_space_size(), is_kernel(), enable_aslr(), memory_block_slab_manager(), block_info_manager(),
|
allocate_option(), address_space_size(), is_kernel(), enable_aslr(), memory_block_slab_manager(), block_info_manager(),
|
||||||
cached_physical_linear_region(), cached_physical_heap_region(), cached_virtual_managed_pool_dram_region(),
|
cached_physical_linear_region(), cached_physical_heap_region(), cached_virtual_heap_region(),
|
||||||
heap_fill_value(), ipc_fill_value(), stack_fill_value()
|
heap_fill_value(), ipc_fill_value(), stack_fill_value()
|
||||||
{
|
{
|
||||||
/* ... */
|
/* ... */
|
||||||
|
@ -172,10 +172,27 @@ namespace ams::kern {
|
||||||
bool IsLockedByCurrentThread() const { return this->general_lock.IsLockedByCurrentThread(); }
|
bool IsLockedByCurrentThread() const { return this->general_lock.IsLockedByCurrentThread(); }
|
||||||
|
|
||||||
bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr) {
|
bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr) {
|
||||||
if (this->cached_physical_heap_region && this->cached_physical_heap_region->Contains(GetInteger(phys_addr))) {
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||||
return true;
|
|
||||||
|
return KMemoryLayout::IsHeapPhysicalAddress(std::addressof(this->cached_physical_heap_region), phys_addr, this->cached_physical_heap_region);
|
||||||
}
|
}
|
||||||
return KMemoryLayout::IsHeapPhysicalAddress(&this->cached_physical_heap_region, phys_addr);
|
|
||||||
|
bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr, size_t size) {
|
||||||
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
|
return KMemoryLayout::IsHeapPhysicalAddress(std::addressof(this->cached_physical_heap_region), phys_addr, size, this->cached_physical_heap_region);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool IsHeapVirtualAddress(KVirtualAddress virt_addr) {
|
||||||
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
|
return KMemoryLayout::IsHeapVirtualAddress(std::addressof(this->cached_virtual_heap_region), virt_addr, this->cached_virtual_heap_region);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool IsHeapVirtualAddress(KVirtualAddress virt_addr, size_t size) {
|
||||||
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
|
return KMemoryLayout::IsHeapVirtualAddress(std::addressof(this->cached_virtual_heap_region), virt_addr, size, this->cached_virtual_heap_region);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ContainsPages(KProcessAddress addr, size_t num_pages) const {
|
bool ContainsPages(KProcessAddress addr, size_t num_pages) const {
|
||||||
|
@ -193,6 +210,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
Result QueryInfoImpl(KMemoryInfo *out_info, ams::svc::PageInfo *out_page, KProcessAddress address) const;
|
Result QueryInfoImpl(KMemoryInfo *out_info, ams::svc::PageInfo *out_page, KProcessAddress address) const;
|
||||||
Result AllocateAndMapPagesImpl(PageLinkedList *page_list, KProcessAddress address, size_t num_pages, const KPageProperties properties);
|
Result AllocateAndMapPagesImpl(PageLinkedList *page_list, KProcessAddress address, size_t num_pages, const KPageProperties properties);
|
||||||
|
Result MapPageGroupImpl(PageLinkedList *page_list, KProcessAddress address, const KPageGroup &pg, const KPageProperties properties, bool reuse_ll);
|
||||||
|
|
||||||
NOINLINE Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
|
NOINLINE Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
|
||||||
public:
|
public:
|
||||||
|
@ -203,6 +221,10 @@ namespace ams::kern {
|
||||||
Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
|
Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||||
return this->MapPages(out_addr, num_pages, alignment, phys_addr, true, region_start, region_num_pages, state, perm);
|
return this->MapPages(out_addr, num_pages, alignment, phys_addr, true, region_start, region_num_pages, state, perm);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state);
|
||||||
|
Result MapPageGroup(KProcessAddress *out_addr, const KPageGroup &pg, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
|
||||||
|
Result UnmapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state);
|
||||||
public:
|
public:
|
||||||
static ALWAYS_INLINE KVirtualAddress GetLinearVirtualAddress(KPhysicalAddress addr) {
|
static ALWAYS_INLINE KVirtualAddress GetLinearVirtualAddress(KPhysicalAddress addr) {
|
||||||
return KMemoryLayout::GetLinearVirtualAddress(addr);
|
return KMemoryLayout::GetLinearVirtualAddress(addr);
|
||||||
|
|
|
@ -333,6 +333,10 @@ namespace ams::kern::arch::arm64::cpu {
|
||||||
return PerformCacheOperationBySetWayLocal<true>(FlushDataCacheLineBySetWayImpl);
|
return PerformCacheOperationBySetWayLocal<true>(FlushDataCacheLineBySetWayImpl);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void FlushEntireDataCache() {
|
||||||
|
return PerformCacheOperationBySetWayShared<false>(FlushDataCacheLineBySetWayImpl);
|
||||||
|
}
|
||||||
|
|
||||||
Result InvalidateDataCache(void *addr, size_t size) {
|
Result InvalidateDataCache(void *addr, size_t size) {
|
||||||
KScopedCoreMigrationDisable dm;
|
KScopedCoreMigrationDisable dm;
|
||||||
const uintptr_t start = reinterpret_cast<uintptr_t>(addr);
|
const uintptr_t start = reinterpret_cast<uintptr_t>(addr);
|
||||||
|
|
|
@ -58,7 +58,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (operation == OperationType_Unmap) {
|
if (operation == OperationType_Unmap) {
|
||||||
MESOSPHERE_TODO("operation == OperationType_Unmap");
|
return this->Unmap(virt_addr, num_pages, page_list, false, reuse_ll);
|
||||||
} else {
|
} else {
|
||||||
auto entry_template = this->GetEntryTemplate(properties);
|
auto entry_template = this->GetEntryTemplate(properties);
|
||||||
|
|
||||||
|
@ -175,7 +175,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KPageTable::Unmap(KProcessAddress virt_addr, size_t num_pages, KPageGroup *pg, PageLinkedList *page_list, bool force, bool reuse_ll) {
|
Result KPageTable::Unmap(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list, bool force, bool reuse_ll) {
|
||||||
MESOSPHERE_TODO_IMPLEMENT();
|
MESOSPHERE_TODO_IMPLEMENT();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -188,13 +188,57 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
size_t remaining_pages = num_pages;
|
size_t remaining_pages = num_pages;
|
||||||
|
|
||||||
|
/* Map the pages, using a guard to ensure we don't leak. */
|
||||||
|
{
|
||||||
|
auto map_guard = SCOPE_GUARD { MESOSPHERE_R_ABORT_UNLESS(this->Unmap(orig_virt_addr, num_pages, nullptr, page_list, true, true)); };
|
||||||
|
|
||||||
if (num_pages < ContiguousPageSize / PageSize) {
|
if (num_pages < ContiguousPageSize / PageSize) {
|
||||||
auto guard = SCOPE_GUARD { MESOSPHERE_R_ABORT_UNLESS(this->Unmap(orig_virt_addr, num_pages, nullptr, page_list, true, true)); };
|
R_TRY(this->Map(virt_addr, phys_addr, num_pages, entry_template, L3BlockSize, page_list, reuse_ll));
|
||||||
R_TRY(this->Map(virt_addr, phys_addr, num_pages, entry_template, page_list, reuse_ll));
|
remaining_pages -= num_pages;
|
||||||
guard.Cancel();
|
virt_addr += num_pages * PageSize;
|
||||||
|
phys_addr += num_pages * PageSize;
|
||||||
} else {
|
} else {
|
||||||
MESOSPHERE_TODO("Contiguous mapping");
|
/* Map the fractional part of the pages. */
|
||||||
(void)remaining_pages;
|
size_t alignment;
|
||||||
|
for (alignment = ContiguousPageSize; (virt_addr & (alignment - 1)) == (phys_addr & (alignment - 1)); alignment = GetLargerAlignment(alignment)) {
|
||||||
|
/* Check if this would be our last map. */
|
||||||
|
const size_t pages_to_map = (alignment - (virt_addr & (alignment - 1))) & (alignment - 1);
|
||||||
|
if (pages_to_map + (alignment / PageSize) > remaining_pages) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Map pages, if we should. */
|
||||||
|
if (pages_to_map > 0) {
|
||||||
|
R_TRY(this->Map(virt_addr, phys_addr, pages_to_map, entry_template, GetSmallerAlignment(alignment), page_list, reuse_ll));
|
||||||
|
remaining_pages -= pages_to_map;
|
||||||
|
virt_addr += pages_to_map * PageSize;
|
||||||
|
phys_addr += pages_to_map * PageSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Don't go further than L1 block. */
|
||||||
|
if (alignment == L1BlockSize) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
while (remaining_pages > 0) {
|
||||||
|
/* Select the next smallest alignment. */
|
||||||
|
alignment = GetSmallerAlignment(alignment);
|
||||||
|
MESOSPHERE_ASSERT((virt_addr & (alignment - 1)) == 0);
|
||||||
|
MESOSPHERE_ASSERT((phys_addr & (alignment - 1)) == 0);
|
||||||
|
|
||||||
|
/* Map pages, if we should. */
|
||||||
|
const size_t pages_to_map = util::AlignDown(remaining_pages, alignment / PageSize);
|
||||||
|
if (pages_to_map > 0) {
|
||||||
|
R_TRY(this->Map(virt_addr, phys_addr, pages_to_map, entry_template, alignment, page_list, reuse_ll));
|
||||||
|
remaining_pages -= pages_to_map;
|
||||||
|
virt_addr += pages_to_map * PageSize;
|
||||||
|
phys_addr += pages_to_map * PageSize;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
map_guard.Cancel();
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Perform what coalescing we can. */
|
/* Perform what coalescing we can. */
|
||||||
|
|
|
@ -52,21 +52,34 @@ namespace ams::kern {
|
||||||
/* Parse process parameters and reserve memory. */
|
/* Parse process parameters and reserve memory. */
|
||||||
ams::svc::CreateProcessParameter params;
|
ams::svc::CreateProcessParameter params;
|
||||||
MESOSPHERE_R_ABORT_UNLESS(reader.MakeCreateProcessParameter(std::addressof(params), true));
|
MESOSPHERE_R_ABORT_UNLESS(reader.MakeCreateProcessParameter(std::addressof(params), true));
|
||||||
MESOSPHERE_TODO("Reserve memory");
|
MESOSPHERE_LOG("Reserving %zx for process %zu\n", params.code_num_pages * PageSize, i);
|
||||||
|
MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, params.code_num_pages * PageSize));
|
||||||
|
|
||||||
/* Create the process, and ensure we don't leak pages. */
|
/* Create the process. */
|
||||||
|
KProcess *new_process = nullptr;
|
||||||
{
|
{
|
||||||
|
/* Declare page group to use for process memory. */
|
||||||
|
KPageGroup pg(std::addressof(Kernel::GetBlockInfoManager()));
|
||||||
|
|
||||||
/* Allocate memory for the process. */
|
/* Allocate memory for the process. */
|
||||||
MESOSPHERE_TODO("Allocate memory for the process");
|
auto &mm = Kernel::GetMemoryManager();
|
||||||
|
const auto pool = static_cast<KMemoryManager::Pool>(reader.UsesSecureMemory() ? KMemoryManager::Pool_System : KSystemControl::GetInitialProcessBinaryPool());
|
||||||
|
MESOSPHERE_R_ABORT_UNLESS(mm.Allocate(std::addressof(pg), params.code_num_pages, KMemoryManager::EncodeOption(pool, KMemoryManager::Direction_FromFront)));
|
||||||
|
|
||||||
|
{
|
||||||
|
/* Ensure that we do not leak pages. */
|
||||||
|
KScopedPageGroup spg(pg);
|
||||||
|
|
||||||
/* Map the process's memory into the temporary region. */
|
/* Map the process's memory into the temporary region. */
|
||||||
MESOSPHERE_TODO("Map the process's page group");
|
const auto &temp_region = KMemoryLayout::GetTempRegion();
|
||||||
|
KProcessAddress temp_address = Null<KProcessAddress>;
|
||||||
|
MESOSPHERE_R_ABORT_UNLESS(Kernel::GetKernelPageTable().MapPageGroup(std::addressof(temp_address), pg, temp_region.GetAddress(), temp_region.GetSize() / PageSize, KMemoryState_Kernel, KMemoryPermission_KernelReadWrite));
|
||||||
|
|
||||||
/* Load the process. */
|
/* Load the process. */
|
||||||
MESOSPHERE_TODO("Load the process");
|
MESOSPHERE_R_ABORT_UNLESS(reader.Load(temp_address, params));
|
||||||
|
|
||||||
/* Unmap the temporary mapping. */
|
/* Unmap the temporary mapping. */
|
||||||
MESOSPHERE_TODO("Unmap the process's page group");
|
MESOSPHERE_R_ABORT_UNLESS(Kernel::GetKernelPageTable().UnmapPageGroup(temp_address, pg, KMemoryState_Kernel));
|
||||||
|
|
||||||
/* Create a KProcess object. */
|
/* Create a KProcess object. */
|
||||||
MESOSPHERE_TODO("Create a KProcess");
|
MESOSPHERE_TODO("Create a KProcess");
|
||||||
|
@ -74,6 +87,7 @@ namespace ams::kern {
|
||||||
/* Initialize the process. */
|
/* Initialize the process. */
|
||||||
MESOSPHERE_TODO("Initialize the process");
|
MESOSPHERE_TODO("Initialize the process");
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* Set the process's memory permissions. */
|
/* Set the process's memory permissions. */
|
||||||
MESOSPHERE_TODO("Set process's memory permissions");
|
MESOSPHERE_TODO("Set process's memory permissions");
|
||||||
|
@ -82,7 +96,7 @@ namespace ams::kern {
|
||||||
MESOSPHERE_TODO("Register the process");
|
MESOSPHERE_TODO("Register the process");
|
||||||
|
|
||||||
/* Save the process info. */
|
/* Save the process info. */
|
||||||
infos[i].process = /* TODO */ nullptr;
|
infos[i].process = new_process;
|
||||||
infos[i].stack_size = reader.GetStackSize();
|
infos[i].stack_size = reader.GetStackSize();
|
||||||
infos[i].priority = reader.GetPriority();
|
infos[i].priority = reader.GetPriority();
|
||||||
|
|
||||||
|
|
|
@ -17,6 +17,63 @@
|
||||||
|
|
||||||
namespace ams::kern {
|
namespace ams::kern {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
struct BlzSegmentFlags {
|
||||||
|
using Offset = util::BitPack16::Field<0, 12, u32>;
|
||||||
|
using Size = util::BitPack16::Field<Offset::Next, 4, u32>;
|
||||||
|
};
|
||||||
|
|
||||||
|
NOINLINE void BlzUncompress(void *_end) {
|
||||||
|
/* Parse the footer, endian agnostic. */
|
||||||
|
static_assert(sizeof(u32) == 4);
|
||||||
|
static_assert(sizeof(u16) == 2);
|
||||||
|
static_assert(sizeof(u8) == 1);
|
||||||
|
|
||||||
|
u8 *end = static_cast<u8 *>(_end);
|
||||||
|
const u32 total_size = (end[-12] << 0) | (end[-11] << 8) | (end[-10] << 16) | (end[- 9] << 24);
|
||||||
|
const u32 footer_size = (end[- 8] << 0) | (end[- 7] << 8) | (end[- 6] << 16) | (end[- 5] << 24);
|
||||||
|
const u32 additional_size = (end[- 4] << 0) | (end[- 3] << 8) | (end[- 2] << 16) | (end[- 1] << 24);
|
||||||
|
|
||||||
|
/* Prepare to decompress. */
|
||||||
|
u8 *cmp_start = end - total_size;
|
||||||
|
u32 cmp_ofs = total_size - footer_size;
|
||||||
|
u32 out_ofs = total_size + additional_size;
|
||||||
|
|
||||||
|
/* Decompress. */
|
||||||
|
while (out_ofs) {
|
||||||
|
u8 control = cmp_start[--cmp_ofs];
|
||||||
|
|
||||||
|
/* Each bit in the control byte is a flag indicating compressed or not compressed. */
|
||||||
|
for (size_t i = 0; i < 8 && out_ofs; ++i, control <<= 1) {
|
||||||
|
if (control & 0x80) {
|
||||||
|
/* NOTE: Nintendo does not check if it's possible to decompress. */
|
||||||
|
/* As such, we will leave the following as a debug assertion, and not a release assertion. */
|
||||||
|
MESOSPHERE_ASSERT(cmp_ofs >= sizeof(u16));
|
||||||
|
cmp_ofs -= sizeof(u16);
|
||||||
|
|
||||||
|
/* Extract segment bounds. */
|
||||||
|
const util::BitPack16 seg_flags{static_cast<u16>((cmp_start[cmp_ofs] << 0) | (cmp_start[cmp_ofs + 1] << 8))};
|
||||||
|
const u32 seg_ofs = seg_flags.Get<BlzSegmentFlags::Offset>() + 3;
|
||||||
|
const u32 seg_size = std::min(seg_flags.Get<BlzSegmentFlags::Size>(), out_ofs) + 3;
|
||||||
|
|
||||||
|
/* Copy the data. */
|
||||||
|
out_ofs -= seg_size;
|
||||||
|
for (size_t j = 0; j < seg_size; j++) {
|
||||||
|
cmp_start[out_ofs + j] = cmp_start[out_ofs + seg_ofs + j];
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
/* NOTE: Nintendo does not check if it's possible to copy. */
|
||||||
|
/* As such, we will leave the following as a debug assertion, and not a release assertion. */
|
||||||
|
MESOSPHERE_ASSERT(cmp_ofs >= sizeof(u8));
|
||||||
|
cmp_start[--out_ofs] = cmp_start[--cmp_ofs];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
Result KInitialProcessReader::MakeCreateProcessParameter(ams::svc::CreateProcessParameter *out, bool enable_aslr) const {
|
Result KInitialProcessReader::MakeCreateProcessParameter(ams::svc::CreateProcessParameter *out, bool enable_aslr) const {
|
||||||
/* Get and validate addresses/sizes. */
|
/* Get and validate addresses/sizes. */
|
||||||
const uintptr_t rx_address = this->kip_header->GetRxAddress();
|
const uintptr_t rx_address = this->kip_header->GetRxAddress();
|
||||||
|
@ -56,7 +113,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
/* Set fields in parameter. */
|
/* Set fields in parameter. */
|
||||||
out->code_address = map_start + start_address;
|
out->code_address = map_start + start_address;
|
||||||
out->code_num_pages = util::AlignUp(end_address - start_address, PageSize);
|
out->code_num_pages = util::AlignUp(end_address - start_address, PageSize) / PageSize;
|
||||||
out->program_id = this->kip_header->GetProgramId();
|
out->program_id = this->kip_header->GetProgramId();
|
||||||
out->version = this->kip_header->GetVersion();
|
out->version = this->kip_header->GetVersion();
|
||||||
out->flags = 0;
|
out->flags = 0;
|
||||||
|
@ -85,4 +142,49 @@ namespace ams::kern {
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Result KInitialProcessReader::Load(KProcessAddress address, const ams::svc::CreateProcessParameter ¶ms) const {
|
||||||
|
/* Clear memory at the address. */
|
||||||
|
std::memset(GetVoidPointer(address), 0, params.code_num_pages);
|
||||||
|
|
||||||
|
/* Prepare to layout the data. */
|
||||||
|
const KProcessAddress rx_address = address + this->kip_header->GetRxAddress();
|
||||||
|
const KProcessAddress ro_address = address + this->kip_header->GetRoAddress();
|
||||||
|
const KProcessAddress rw_address = address + this->kip_header->GetRwAddress();
|
||||||
|
const u8 *rx_binary = reinterpret_cast<const u8 *>(this->kip_header + 1);
|
||||||
|
const u8 *ro_binary = rx_binary + this->kip_header->GetRxCompressedSize();
|
||||||
|
const u8 *rw_binary = ro_binary + this->kip_header->GetRoCompressedSize();
|
||||||
|
|
||||||
|
/* Copy text. */
|
||||||
|
if (util::AlignUp(this->kip_header->GetRxSize(), PageSize)) {
|
||||||
|
std::memcpy(GetVoidPointer(rx_address), rx_binary, this->kip_header->GetRxCompressedSize());
|
||||||
|
if (this->kip_header->IsRxCompressed()) {
|
||||||
|
BlzUncompress(GetVoidPointer(rx_address + this->kip_header->GetRxCompressedSize()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Copy rodata. */
|
||||||
|
if (util::AlignUp(this->kip_header->GetRoSize(), PageSize)) {
|
||||||
|
std::memcpy(GetVoidPointer(ro_address), ro_binary, this->kip_header->GetRoCompressedSize());
|
||||||
|
if (this->kip_header->IsRoCompressed()) {
|
||||||
|
BlzUncompress(GetVoidPointer(ro_address + this->kip_header->GetRoCompressedSize()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Copy rwdata. */
|
||||||
|
if (util::AlignUp(this->kip_header->GetRwSize(), PageSize)) {
|
||||||
|
std::memcpy(GetVoidPointer(rw_address), rw_binary, this->kip_header->GetRwCompressedSize());
|
||||||
|
if (this->kip_header->IsRwCompressed()) {
|
||||||
|
BlzUncompress(GetVoidPointer(rw_address + this->kip_header->GetRwCompressedSize()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Flush caches. */
|
||||||
|
/* NOTE: official kernel does an entire cache flush by set/way here, which is incorrect as other cores are online. */
|
||||||
|
/* We will simply flush by virtual address, since that's what ARM says is correct to do. */
|
||||||
|
MESOSPHERE_R_ABORT_UNLESS(cpu::FlushDataCache(GetVoidPointer(address), params.code_num_pages * PageSize));
|
||||||
|
cpu::InvalidateEntireInstructionCache();
|
||||||
|
|
||||||
|
return ResultSuccess();
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -52,7 +52,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
this->cached_physical_linear_region = nullptr;
|
this->cached_physical_linear_region = nullptr;
|
||||||
this->cached_physical_heap_region = nullptr;
|
this->cached_physical_heap_region = nullptr;
|
||||||
this->cached_virtual_managed_pool_dram_region = nullptr;
|
this->cached_virtual_heap_region = nullptr;
|
||||||
|
|
||||||
/* Initialize our implementation. */
|
/* Initialize our implementation. */
|
||||||
this->impl.InitializeForKernel(table, start, end);
|
this->impl.InitializeForKernel(table, start, end);
|
||||||
|
@ -285,6 +285,8 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KPageTableBase::AllocateAndMapPagesImpl(PageLinkedList *page_list, KProcessAddress address, size_t num_pages, const KPageProperties properties) {
|
Result KPageTableBase::AllocateAndMapPagesImpl(PageLinkedList *page_list, KProcessAddress address, size_t num_pages, const KPageProperties properties) {
|
||||||
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
/* Create a page group to hold the pages we allocate. */
|
/* Create a page group to hold the pages we allocate. */
|
||||||
KPageGroup pg(this->block_info_manager);
|
KPageGroup pg(this->block_info_manager);
|
||||||
|
|
||||||
|
@ -303,6 +305,38 @@ namespace ams::kern {
|
||||||
return this->Operate(page_list, address, num_pages, std::addressof(pg), properties, OperationType_MapGroup, false);
|
return this->Operate(page_list, address, num_pages, std::addressof(pg), properties, OperationType_MapGroup, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Result KPageTableBase::MapPageGroupImpl(PageLinkedList *page_list, KProcessAddress address, const KPageGroup &pg, const KPageProperties properties, bool reuse_ll) {
|
||||||
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
|
/* Note the current address, so that we can iterate. */
|
||||||
|
const KProcessAddress start_address = address;
|
||||||
|
KProcessAddress cur_address = address;
|
||||||
|
|
||||||
|
/* Ensure that we clean up on failure. */
|
||||||
|
auto mapping_guard = SCOPE_GUARD {
|
||||||
|
MESOSPHERE_ABORT_UNLESS(!reuse_ll);
|
||||||
|
if (cur_address != start_address) {
|
||||||
|
const KPageProperties unmap_properties = {};
|
||||||
|
MESOSPHERE_R_ABORT_UNLESS(this->Operate(page_list, start_address, (cur_address - start_address) / PageSize, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, true));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/* Iterate, mapping all pages in the group. */
|
||||||
|
for (const auto &block : pg) {
|
||||||
|
/* We only allow mapping pages in the heap, and we require we're mapping non-empty blocks. */
|
||||||
|
MESOSPHERE_ABORT_UNLESS(block.GetAddress() < block.GetLastAddress());
|
||||||
|
MESOSPHERE_ABORT_UNLESS(IsHeapVirtualAddress(block.GetAddress(), block.GetSize()));
|
||||||
|
|
||||||
|
/* Map and advance. */
|
||||||
|
R_TRY(this->Operate(page_list, cur_address, block.GetNumPages(), GetHeapPhysicalAddress(block.GetAddress()), true, properties, OperationType_Map, reuse_ll));
|
||||||
|
cur_address += block.GetSize();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* We succeeded! */
|
||||||
|
mapping_guard.Cancel();
|
||||||
|
return ResultSuccess();
|
||||||
|
}
|
||||||
|
|
||||||
Result KPageTableBase::MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
|
Result KPageTableBase::MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||||
MESOSPHERE_ASSERT(util::IsAligned(alignment, PageSize) && alignment >= PageSize);
|
MESOSPHERE_ASSERT(util::IsAligned(alignment, PageSize) && alignment >= PageSize);
|
||||||
|
|
||||||
|
@ -318,7 +352,7 @@ namespace ams::kern {
|
||||||
R_UNLESS(addr != Null<KProcessAddress>, svc::ResultOutOfMemory());
|
R_UNLESS(addr != Null<KProcessAddress>, svc::ResultOutOfMemory());
|
||||||
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(addr), alignment));
|
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(addr), alignment));
|
||||||
MESOSPHERE_ASSERT(this->Contains(addr, num_pages * PageSize, state));
|
MESOSPHERE_ASSERT(this->Contains(addr, num_pages * PageSize, state));
|
||||||
MESOSPHERE_R_ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState_All, KMemoryState_Free, KMemoryPermission_All, KMemoryPermission_None, KMemoryAttribute_All, KMemoryAttribute_None));
|
MESOSPHERE_R_ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None));
|
||||||
|
|
||||||
/* Create an update allocator. */
|
/* Create an update allocator. */
|
||||||
KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager);
|
KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager);
|
||||||
|
@ -342,4 +376,47 @@ namespace ams::kern {
|
||||||
*out_addr = addr;
|
*out_addr = addr;
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Result KPageTableBase::UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) {
|
||||||
|
MESOSPHERE_TODO_IMPLEMENT();
|
||||||
|
}
|
||||||
|
|
||||||
|
Result KPageTableBase::MapPageGroup(KProcessAddress *out_addr, const KPageGroup &pg, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||||
|
/* Ensure this is a valid map request. */
|
||||||
|
const size_t num_pages = pg.GetNumPages();
|
||||||
|
R_UNLESS(this->Contains(region_start, region_num_pages * PageSize, state), svc::ResultInvalidCurrentMemory());
|
||||||
|
R_UNLESS(num_pages < region_num_pages, svc::ResultOutOfMemory());
|
||||||
|
|
||||||
|
/* Lock the table. */
|
||||||
|
KScopedLightLock lk(this->general_lock);
|
||||||
|
|
||||||
|
/* Find a random address to map at. */
|
||||||
|
KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, PageSize, 0, this->GetNumGuardPages());
|
||||||
|
R_UNLESS(addr != Null<KProcessAddress>, svc::ResultOutOfMemory());
|
||||||
|
MESOSPHERE_ASSERT(this->Contains(addr, num_pages * PageSize, state));
|
||||||
|
MESOSPHERE_R_ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None));
|
||||||
|
|
||||||
|
/* Create an update allocator. */
|
||||||
|
KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager);
|
||||||
|
R_TRY(allocator.GetResult());
|
||||||
|
|
||||||
|
/* We're going to perform an update, so create a helper. */
|
||||||
|
KScopedPageTableUpdater updater(this);
|
||||||
|
|
||||||
|
/* Perform mapping operation. */
|
||||||
|
const KPageProperties properties = { perm, state == KMemoryState_Io, false, false };
|
||||||
|
R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
|
||||||
|
|
||||||
|
/* Update the blocks. */
|
||||||
|
this->memory_block_manager.Update(&allocator, addr, num_pages, state, perm, KMemoryAttribute_None);
|
||||||
|
|
||||||
|
/* We successfully mapped the pages. */
|
||||||
|
*out_addr = addr;
|
||||||
|
return ResultSuccess();
|
||||||
|
}
|
||||||
|
|
||||||
|
Result KPageTableBase::UnmapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state) {
|
||||||
|
MESOSPHERE_TODO_IMPLEMENT();
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue