mirror of
https://github.com/Atmosphere-NX/Atmosphere
synced 2024-11-15 09:36:35 +00:00
kern: eliminate use of KMemoryInfo, shuffle KMemoryBlock fields
This commit is contained in:
parent
93d4656d0b
commit
e3ee4cb544
4 changed files with 196 additions and 259 deletions
|
@ -200,7 +200,8 @@ namespace ams::kern {
|
|||
KMemoryBlockDisableMergeAttribute_DeviceLeft = (1u << 1),
|
||||
KMemoryBlockDisableMergeAttribute_IpcLeft = (1u << 2),
|
||||
KMemoryBlockDisableMergeAttribute_Locked = (1u << 3),
|
||||
KMemoryBlockDisableMergeAttribute_DeviceRight = (1u << 4),
|
||||
/* ... */
|
||||
KMemoryBlockDisableMergeAttribute_DeviceRight = (1u << 5),
|
||||
|
||||
KMemoryBlockDisableMergeAttribute_AllLeft = KMemoryBlockDisableMergeAttribute_Normal | KMemoryBlockDisableMergeAttribute_DeviceLeft | KMemoryBlockDisableMergeAttribute_IpcLeft | KMemoryBlockDisableMergeAttribute_Locked,
|
||||
KMemoryBlockDisableMergeAttribute_AllRight = KMemoryBlockDisableMergeAttribute_DeviceRight,
|
||||
|
@ -288,18 +289,18 @@ namespace ams::kern {
|
|||
|
||||
class KMemoryBlock : public util::IntrusiveRedBlackTreeBaseNode<KMemoryBlock> {
|
||||
private:
|
||||
u16 m_device_disable_merge_left_count;
|
||||
u16 m_device_disable_merge_right_count;
|
||||
KProcessAddress m_address;
|
||||
size_t m_num_pages;
|
||||
KMemoryState m_memory_state;
|
||||
u16 m_ipc_lock_count;
|
||||
u16 m_device_use_count;
|
||||
u16 m_ipc_disable_merge_count;
|
||||
KMemoryPermission m_permission;
|
||||
KMemoryPermission m_original_permission;
|
||||
KMemoryAttribute m_attribute;
|
||||
KMemoryBlockDisableMergeAttribute m_disable_merge_attribute;
|
||||
KProcessAddress m_address;
|
||||
u32 m_num_pages;
|
||||
KMemoryState m_memory_state;
|
||||
u16 m_ipc_lock_count;
|
||||
u16 m_ipc_disable_merge_count;
|
||||
u16 m_device_use_count;
|
||||
u16 m_device_disable_merge_left_count;
|
||||
u16 m_device_disable_merge_right_count;
|
||||
public:
|
||||
static constexpr ALWAYS_INLINE int Compare(const KMemoryBlock &lhs, const KMemoryBlock &rhs) {
|
||||
if (lhs.GetAddress() < rhs.GetAddress()) {
|
||||
|
@ -343,6 +344,10 @@ namespace ams::kern {
|
|||
return m_ipc_disable_merge_count;
|
||||
}
|
||||
|
||||
constexpr u16 GetDeviceUseCount() const {
|
||||
return m_device_use_count;
|
||||
}
|
||||
|
||||
constexpr KMemoryPermission GetPermission() const {
|
||||
return m_permission;
|
||||
}
|
||||
|
@ -374,7 +379,7 @@ namespace ams::kern {
|
|||
public:
|
||||
explicit KMemoryBlock() { /* ... */ }
|
||||
|
||||
constexpr KMemoryBlock(util::ConstantInitializeTag, KProcessAddress addr, size_t np, KMemoryState ms, KMemoryPermission p, KMemoryAttribute attr)
|
||||
constexpr KMemoryBlock(util::ConstantInitializeTag, KProcessAddress addr, u32 np, KMemoryState ms, KMemoryPermission p, KMemoryAttribute attr)
|
||||
: util::IntrusiveRedBlackTreeBaseNode<KMemoryBlock>(util::ConstantInitialize), m_device_disable_merge_left_count(),
|
||||
m_device_disable_merge_right_count(), m_address(addr), m_num_pages(np), m_memory_state(ms), m_ipc_lock_count(0),
|
||||
m_device_use_count(0), m_ipc_disable_merge_count(), m_permission(p), m_original_permission(KMemoryPermission_None),
|
||||
|
@ -383,7 +388,7 @@ namespace ams::kern {
|
|||
/* ... */
|
||||
}
|
||||
|
||||
constexpr void Initialize(KProcessAddress addr, size_t np, KMemoryState ms, KMemoryPermission p, KMemoryAttribute attr) {
|
||||
constexpr void Initialize(KProcessAddress addr, u32 np, KMemoryState ms, KMemoryPermission p, KMemoryAttribute attr) {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
m_device_disable_merge_left_count = 0;
|
||||
m_device_disable_merge_right_count = 0;
|
||||
|
|
|
@ -318,7 +318,7 @@ namespace ams::kern {
|
|||
R_RETURN(this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask, perm, attr_mask, attr));
|
||||
}
|
||||
|
||||
Result CheckMemoryState(const KMemoryInfo &info, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const;
|
||||
Result CheckMemoryState(KMemoryBlockManager::const_iterator it, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const;
|
||||
Result CheckMemoryState(KMemoryState *out_state, KMemoryPermission *out_perm, KMemoryAttribute *out_attr, size_t *out_blocks_needed, KMemoryBlockManager::const_iterator it, KProcessAddress last_addr, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr = DefaultMemoryIgnoreAttr) const;
|
||||
Result CheckMemoryState(KMemoryState *out_state, KMemoryPermission *out_perm, KMemoryAttribute *out_attr, size_t *out_blocks_needed, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr = DefaultMemoryIgnoreAttr) const;
|
||||
Result CheckMemoryState(size_t *out_blocks_needed, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr = DefaultMemoryIgnoreAttr) const {
|
||||
|
|
|
@ -54,11 +54,11 @@ namespace ams::kern {
|
|||
return "Unknown ";
|
||||
}
|
||||
|
||||
constexpr const char *GetMemoryPermissionString(const KMemoryInfo &info) {
|
||||
if (info.m_state == KMemoryState_Free) {
|
||||
constexpr const char *GetMemoryPermissionString(const KMemoryBlock &block) {
|
||||
if (block.GetState() == KMemoryState_Free) {
|
||||
return " ";
|
||||
} else {
|
||||
switch (info.m_permission) {
|
||||
switch (block.GetPermission()) {
|
||||
case KMemoryPermission_UserReadExecute:
|
||||
return "r-x";
|
||||
case KMemoryPermission_UserRead:
|
||||
|
@ -71,19 +71,19 @@ namespace ams::kern {
|
|||
}
|
||||
}
|
||||
|
||||
void DumpMemoryInfo(const KMemoryInfo &info) {
|
||||
const char *state = GetMemoryStateName(info.m_state);
|
||||
const char *perm = GetMemoryPermissionString(info);
|
||||
const uintptr_t start = info.GetAddress();
|
||||
const uintptr_t end = info.GetLastAddress();
|
||||
const size_t kb = info.GetSize() / 1_KB;
|
||||
void DumpMemoryBlock(const KMemoryBlock &block) {
|
||||
const char *state = GetMemoryStateName(block.GetState());
|
||||
const char *perm = GetMemoryPermissionString(block);
|
||||
const uintptr_t start = GetInteger(block.GetAddress());
|
||||
const uintptr_t end = GetInteger(block.GetLastAddress());
|
||||
const size_t kb = block.GetSize() / 1_KB;
|
||||
|
||||
const char l = (info.m_attribute & KMemoryAttribute_Locked) ? 'L' : '-';
|
||||
const char i = (info.m_attribute & KMemoryAttribute_IpcLocked) ? 'I' : '-';
|
||||
const char d = (info.m_attribute & KMemoryAttribute_DeviceShared) ? 'D' : '-';
|
||||
const char u = (info.m_attribute & KMemoryAttribute_Uncached) ? 'U' : '-';
|
||||
const char l = (block.GetAttribute() & KMemoryAttribute_Locked) ? 'L' : '-';
|
||||
const char i = (block.GetAttribute() & KMemoryAttribute_IpcLocked) ? 'I' : '-';
|
||||
const char d = (block.GetAttribute() & KMemoryAttribute_DeviceShared) ? 'D' : '-';
|
||||
const char u = (block.GetAttribute() & KMemoryAttribute_Uncached) ? 'U' : '-';
|
||||
|
||||
MESOSPHERE_LOG("0x%10lx - 0x%10lx (%9zu KB) %s %s %c%c%c%c [%d, %d]\n", start, end, kb, perm, state, l, i, d, u, info.m_ipc_lock_count, info.m_device_use_count);
|
||||
MESOSPHERE_LOG("0x%10lx - 0x%10lx (%9zu KB) %s %s %c%c%c%c [%d, %d]\n", start, end, kb, perm, state, l, i, d, u, block.GetIpcLockCount(), block.GetDeviceUseCount());
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -123,15 +123,14 @@ namespace ams::kern {
|
|||
const KProcessAddress region_end = region_start + region_num_pages * PageSize;
|
||||
const KProcessAddress region_last = region_end - 1;
|
||||
for (const_iterator it = this->FindIterator(region_start); it != m_memory_block_tree.cend(); it++) {
|
||||
const KMemoryInfo info = it->GetMemoryInfo();
|
||||
if (region_last < info.GetAddress()) {
|
||||
if (region_last < it->GetAddress()) {
|
||||
break;
|
||||
}
|
||||
if (info.m_state != KMemoryState_Free) {
|
||||
if (it->GetState() != KMemoryState_Free) {
|
||||
continue;
|
||||
}
|
||||
|
||||
KProcessAddress area = (info.GetAddress() <= GetInteger(region_start)) ? region_start : info.GetAddress();
|
||||
KProcessAddress area = (it->GetAddress() <= GetInteger(region_start)) ? region_start : it->GetAddress();
|
||||
area += guard_pages * PageSize;
|
||||
|
||||
const KProcessAddress offset_area = util::AlignDown(GetInteger(area), alignment) + offset;
|
||||
|
@ -140,7 +139,7 @@ namespace ams::kern {
|
|||
const KProcessAddress area_end = area + num_pages * PageSize + guard_pages * PageSize;
|
||||
const KProcessAddress area_last = area_end - 1;
|
||||
|
||||
if (info.GetAddress() <= GetInteger(area) && area < area_last && area_last <= region_last && GetInteger(area_last) <= info.GetLastAddress()) {
|
||||
if (GetInteger(it->GetAddress()) <= GetInteger(area) && area < area_last && area_last <= region_last && GetInteger(area_last) <= GetInteger(it->GetLastAddress())) {
|
||||
return area;
|
||||
}
|
||||
}
|
||||
|
@ -171,7 +170,7 @@ namespace ams::kern {
|
|||
it = prev;
|
||||
}
|
||||
|
||||
if (address + num_pages * PageSize < it->GetMemoryInfo().GetEndAddress()) {
|
||||
if (address + num_pages * PageSize < it->GetEndAddress()) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -189,43 +188,39 @@ namespace ams::kern {
|
|||
|
||||
while (remaining_pages > 0) {
|
||||
const size_t remaining_size = remaining_pages * PageSize;
|
||||
KMemoryInfo cur_info = it->GetMemoryInfo();
|
||||
if (it->HasProperties(state, perm, attr)) {
|
||||
/* If we already have the right properties, just advance. */
|
||||
if (cur_address + remaining_size < cur_info.GetEndAddress()) {
|
||||
if (cur_address + remaining_size < it->GetEndAddress()) {
|
||||
remaining_pages = 0;
|
||||
cur_address += remaining_size;
|
||||
} else {
|
||||
remaining_pages = (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize;
|
||||
cur_address = cur_info.GetEndAddress();
|
||||
remaining_pages = (cur_address + remaining_size - it->GetEndAddress()) / PageSize;
|
||||
cur_address = it->GetEndAddress();
|
||||
}
|
||||
} else {
|
||||
/* If we need to, create a new block before and insert it. */
|
||||
if (cur_info.GetAddress() != GetInteger(cur_address)) {
|
||||
if (it->GetAddress() != GetInteger(cur_address)) {
|
||||
KMemoryBlock *new_block = allocator->Allocate();
|
||||
|
||||
it->Split(new_block, cur_address);
|
||||
it = m_memory_block_tree.insert(*new_block);
|
||||
it++;
|
||||
|
||||
cur_info = it->GetMemoryInfo();
|
||||
cur_address = cur_info.GetAddress();
|
||||
cur_address = it->GetAddress();
|
||||
}
|
||||
|
||||
/* If we need to, create a new block after and insert it. */
|
||||
if (cur_info.GetSize() > remaining_size) {
|
||||
if (it->GetSize() > remaining_size) {
|
||||
KMemoryBlock *new_block = allocator->Allocate();
|
||||
|
||||
it->Split(new_block, cur_address + remaining_size);
|
||||
it = m_memory_block_tree.insert(*new_block);
|
||||
|
||||
cur_info = it->GetMemoryInfo();
|
||||
}
|
||||
|
||||
/* Update block state. */
|
||||
it->Update(state, perm, attr, it->GetAddress() == address, set_disable_attr, clear_disable_attr);
|
||||
cur_address += cur_info.GetSize();
|
||||
remaining_pages -= cur_info.GetNumPages();
|
||||
cur_address += it->GetSize();
|
||||
remaining_pages -= it->GetNumPages();
|
||||
}
|
||||
it++;
|
||||
}
|
||||
|
@ -245,42 +240,38 @@ namespace ams::kern {
|
|||
|
||||
while (remaining_pages > 0) {
|
||||
const size_t remaining_size = remaining_pages * PageSize;
|
||||
KMemoryInfo cur_info = it->GetMemoryInfo();
|
||||
if (it->HasProperties(test_state, test_perm, test_attr) && !it->HasProperties(state, perm, attr)) {
|
||||
/* If we need to, create a new block before and insert it. */
|
||||
if (cur_info.GetAddress() != GetInteger(cur_address)) {
|
||||
if (it->GetAddress() != GetInteger(cur_address)) {
|
||||
KMemoryBlock *new_block = allocator->Allocate();
|
||||
|
||||
it->Split(new_block, cur_address);
|
||||
it = m_memory_block_tree.insert(*new_block);
|
||||
it++;
|
||||
|
||||
cur_info = it->GetMemoryInfo();
|
||||
cur_address = cur_info.GetAddress();
|
||||
cur_address = it->GetAddress();
|
||||
}
|
||||
|
||||
/* If we need to, create a new block after and insert it. */
|
||||
if (cur_info.GetSize() > remaining_size) {
|
||||
if (it->GetSize() > remaining_size) {
|
||||
KMemoryBlock *new_block = allocator->Allocate();
|
||||
|
||||
it->Split(new_block, cur_address + remaining_size);
|
||||
it = m_memory_block_tree.insert(*new_block);
|
||||
|
||||
cur_info = it->GetMemoryInfo();
|
||||
}
|
||||
|
||||
/* Update block state. */
|
||||
it->Update(state, perm, attr, it->GetAddress() == address, set_disable_attr, clear_disable_attr);
|
||||
cur_address += cur_info.GetSize();
|
||||
remaining_pages -= cur_info.GetNumPages();
|
||||
cur_address += it->GetSize();
|
||||
remaining_pages -= it->GetNumPages();
|
||||
} else {
|
||||
/* If we already have the right properties, just advance. */
|
||||
if (cur_address + remaining_size < cur_info.GetEndAddress()) {
|
||||
if (cur_address + remaining_size < it->GetEndAddress()) {
|
||||
remaining_pages = 0;
|
||||
cur_address += remaining_size;
|
||||
} else {
|
||||
remaining_pages = (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize;
|
||||
cur_address = cur_info.GetEndAddress();
|
||||
remaining_pages = (cur_address + remaining_size - it->GetEndAddress()) / PageSize;
|
||||
cur_address = it->GetEndAddress();
|
||||
}
|
||||
}
|
||||
it++;
|
||||
|
@ -302,34 +293,30 @@ namespace ams::kern {
|
|||
|
||||
while (remaining_pages > 0) {
|
||||
const size_t remaining_size = remaining_pages * PageSize;
|
||||
KMemoryInfo cur_info = it->GetMemoryInfo();
|
||||
|
||||
/* If we need to, create a new block before and insert it. */
|
||||
if (cur_info.m_address != GetInteger(cur_address)) {
|
||||
if (it->GetAddress() != cur_address) {
|
||||
KMemoryBlock *new_block = allocator->Allocate();
|
||||
|
||||
it->Split(new_block, cur_address);
|
||||
it = m_memory_block_tree.insert(*new_block);
|
||||
it++;
|
||||
|
||||
cur_info = it->GetMemoryInfo();
|
||||
cur_address = cur_info.GetAddress();
|
||||
cur_address = it->GetAddress();
|
||||
}
|
||||
|
||||
if (cur_info.GetSize() > remaining_size) {
|
||||
if (it->GetSize() > remaining_size) {
|
||||
/* If we need to, create a new block after and insert it. */
|
||||
KMemoryBlock *new_block = allocator->Allocate();
|
||||
|
||||
it->Split(new_block, cur_address + remaining_size);
|
||||
it = m_memory_block_tree.insert(*new_block);
|
||||
|
||||
cur_info = it->GetMemoryInfo();
|
||||
}
|
||||
|
||||
/* Call the locked update function. */
|
||||
(std::addressof(*it)->*lock_func)(perm, cur_info.GetAddress() == address, cur_info.GetEndAddress() == end_address);
|
||||
cur_address += cur_info.GetSize();
|
||||
remaining_pages -= cur_info.GetNumPages();
|
||||
(std::addressof(*it)->*lock_func)(perm, it->GetAddress() == address, it->GetEndAddress() == end_address);
|
||||
cur_address += it->GetSize();
|
||||
remaining_pages -= it->GetNumPages();
|
||||
it++;
|
||||
}
|
||||
|
||||
|
@ -347,43 +334,39 @@ namespace ams::kern {
|
|||
|
||||
while (remaining_pages > 0) {
|
||||
const size_t remaining_size = remaining_pages * PageSize;
|
||||
KMemoryInfo cur_info = it->GetMemoryInfo();
|
||||
|
||||
if ((it->GetAttribute() & mask) != attr) {
|
||||
/* If we need to, create a new block before and insert it. */
|
||||
if (cur_info.GetAddress() != GetInteger(cur_address)) {
|
||||
if (it->GetAddress() != GetInteger(cur_address)) {
|
||||
KMemoryBlock *new_block = allocator->Allocate();
|
||||
|
||||
it->Split(new_block, cur_address);
|
||||
it = m_memory_block_tree.insert(*new_block);
|
||||
it++;
|
||||
|
||||
cur_info = it->GetMemoryInfo();
|
||||
cur_address = cur_info.GetAddress();
|
||||
cur_address = it->GetAddress();
|
||||
}
|
||||
|
||||
/* If we need to, create a new block after and insert it. */
|
||||
if (cur_info.GetSize() > remaining_size) {
|
||||
if (it->GetSize() > remaining_size) {
|
||||
KMemoryBlock *new_block = allocator->Allocate();
|
||||
|
||||
it->Split(new_block, cur_address + remaining_size);
|
||||
it = m_memory_block_tree.insert(*new_block);
|
||||
|
||||
cur_info = it->GetMemoryInfo();
|
||||
}
|
||||
|
||||
/* Update block state. */
|
||||
it->UpdateAttribute(mask, attr);
|
||||
cur_address += cur_info.GetSize();
|
||||
remaining_pages -= cur_info.GetNumPages();
|
||||
cur_address += it->GetSize();
|
||||
remaining_pages -= it->GetNumPages();
|
||||
} else {
|
||||
/* If we already have the right attributes, just advance. */
|
||||
if (cur_address + remaining_size < cur_info.GetEndAddress()) {
|
||||
if (cur_address + remaining_size < it->GetEndAddress()) {
|
||||
remaining_pages = 0;
|
||||
cur_address += remaining_size;
|
||||
} else {
|
||||
remaining_pages = (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize;
|
||||
cur_address = cur_info.GetEndAddress();
|
||||
remaining_pages = (cur_address + remaining_size - it->GetEndAddress()) / PageSize;
|
||||
cur_address = it->GetEndAddress();
|
||||
}
|
||||
}
|
||||
it++;
|
||||
|
@ -401,8 +384,6 @@ namespace ams::kern {
|
|||
auto it = m_memory_block_tree.cbegin();
|
||||
auto prev = it++;
|
||||
while (it != m_memory_block_tree.cend()) {
|
||||
const KMemoryInfo prev_info = prev->GetMemoryInfo();
|
||||
const KMemoryInfo cur_info = it->GetMemoryInfo();
|
||||
|
||||
/* Sequential blocks which can be merged should be merged. */
|
||||
if (prev->CanMergeWith(*it)) {
|
||||
|
@ -410,17 +391,17 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
/* Sequential blocks should be sequential. */
|
||||
if (prev_info.GetEndAddress() != cur_info.GetAddress()) {
|
||||
if (prev->GetEndAddress() != it->GetAddress()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* If the block is ipc locked, it must have a count. */
|
||||
if ((cur_info.m_attribute & KMemoryAttribute_IpcLocked) != 0 && cur_info.m_ipc_lock_count == 0) {
|
||||
if ((it->GetAttribute() & KMemoryAttribute_IpcLocked) != 0 && it->GetIpcLockCount() == 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* If the block is device shared, it must have a count. */
|
||||
if ((cur_info.m_attribute & KMemoryAttribute_DeviceShared) != 0 && cur_info.m_device_use_count == 0) {
|
||||
if ((it->GetAttribute() & KMemoryAttribute_DeviceShared) != 0 && it->GetDeviceUseCount() == 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -430,14 +411,13 @@ namespace ams::kern {
|
|||
|
||||
/* Our loop will miss checking the last block, potentially, so check it. */
|
||||
if (prev != m_memory_block_tree.cend()) {
|
||||
const KMemoryInfo prev_info = prev->GetMemoryInfo();
|
||||
/* If the block is ipc locked, it must have a count. */
|
||||
if ((prev_info.m_attribute & KMemoryAttribute_IpcLocked) != 0 && prev_info.m_ipc_lock_count == 0) {
|
||||
if ((prev->GetAttribute() & KMemoryAttribute_IpcLocked) != 0 && prev->GetIpcLockCount() == 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* If the block is device shared, it must have a count. */
|
||||
if ((prev_info.m_attribute & KMemoryAttribute_DeviceShared) != 0 && prev_info.m_device_use_count == 0) {
|
||||
if ((prev->GetAttribute() & KMemoryAttribute_DeviceShared) != 0 && prev->GetDeviceUseCount() == 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -450,7 +430,7 @@ namespace ams::kern {
|
|||
void KMemoryBlockManager::DumpBlocks() const {
|
||||
/* Dump each block. */
|
||||
for (const auto &block : m_memory_block_tree) {
|
||||
DumpMemoryInfo(block.GetMemoryInfo());
|
||||
DumpMemoryBlock(block);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -664,11 +664,11 @@ namespace ams::kern {
|
|||
}
|
||||
}
|
||||
|
||||
Result KPageTableBase::CheckMemoryState(const KMemoryInfo &info, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const {
|
||||
Result KPageTableBase::CheckMemoryState(KMemoryBlockManager::const_iterator it, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const {
|
||||
/* Validate the states match expectation. */
|
||||
R_UNLESS((info.m_state & state_mask) == state, svc::ResultInvalidCurrentMemory());
|
||||
R_UNLESS((info.m_permission & perm_mask) == perm, svc::ResultInvalidCurrentMemory());
|
||||
R_UNLESS((info.m_attribute & attr_mask) == attr, svc::ResultInvalidCurrentMemory());
|
||||
R_UNLESS((it->GetState() & state_mask) == state, svc::ResultInvalidCurrentMemory());
|
||||
R_UNLESS((it->GetPermission() & perm_mask) == perm, svc::ResultInvalidCurrentMemory());
|
||||
R_UNLESS((it->GetAttribute() & attr_mask) == attr, svc::ResultInvalidCurrentMemory());
|
||||
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
@ -679,28 +679,26 @@ namespace ams::kern {
|
|||
/* Get information about the first block. */
|
||||
const KProcessAddress last_addr = addr + size - 1;
|
||||
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
|
||||
KMemoryInfo info = it->GetMemoryInfo();
|
||||
|
||||
/* If the start address isn't aligned, we need a block. */
|
||||
const size_t blocks_for_start_align = (util::AlignDown(GetInteger(addr), PageSize) != info.GetAddress()) ? 1 : 0;
|
||||
const size_t blocks_for_start_align = (util::AlignDown(GetInteger(addr), PageSize) != it->GetAddress()) ? 1 : 0;
|
||||
|
||||
while (true) {
|
||||
/* Validate against the provided masks. */
|
||||
R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr));
|
||||
R_TRY(this->CheckMemoryState(it, state_mask, state, perm_mask, perm, attr_mask, attr));
|
||||
|
||||
/* Break once we're done. */
|
||||
if (last_addr <= info.GetLastAddress()) {
|
||||
if (last_addr <= it->GetLastAddress()) {
|
||||
break;
|
||||
}
|
||||
|
||||
/* Advance our iterator. */
|
||||
it++;
|
||||
MESOSPHERE_ASSERT(it != m_memory_block_manager.cend());
|
||||
info = it->GetMemoryInfo();
|
||||
}
|
||||
|
||||
/* If the end address isn't aligned, we need a block. */
|
||||
const size_t blocks_for_end_align = (util::AlignUp(GetInteger(addr) + size, PageSize) != info.GetEndAddress()) ? 1 : 0;
|
||||
const size_t blocks_for_end_align = (util::AlignUp(GetInteger(addr) + size, PageSize) != it->GetEndAddress()) ? 1 : 0;
|
||||
|
||||
if (out_blocks_needed != nullptr) {
|
||||
*out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
|
||||
|
@ -712,31 +710,27 @@ namespace ams::kern {
|
|||
Result KPageTableBase::CheckMemoryState(KMemoryState *out_state, KMemoryPermission *out_perm, KMemoryAttribute *out_attr, size_t *out_blocks_needed, KMemoryBlockManager::const_iterator it, KProcessAddress last_addr, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr) const {
|
||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
/* Get information about the first block. */
|
||||
KMemoryInfo info = it->GetMemoryInfo();
|
||||
|
||||
/* Validate all blocks in the range have correct state. */
|
||||
const KMemoryState first_state = info.m_state;
|
||||
const KMemoryPermission first_perm = info.m_permission;
|
||||
const KMemoryAttribute first_attr = info.m_attribute;
|
||||
const KMemoryState first_state = it->GetState();
|
||||
const KMemoryPermission first_perm = it->GetPermission();
|
||||
const KMemoryAttribute first_attr = it->GetAttribute();
|
||||
while (true) {
|
||||
/* Validate the current block. */
|
||||
R_UNLESS(info.m_state == first_state, svc::ResultInvalidCurrentMemory());
|
||||
R_UNLESS(info.m_permission == first_perm, svc::ResultInvalidCurrentMemory());
|
||||
R_UNLESS((info.m_attribute | ignore_attr) == (first_attr | ignore_attr), svc::ResultInvalidCurrentMemory());
|
||||
R_UNLESS(it->GetState() == first_state, svc::ResultInvalidCurrentMemory());
|
||||
R_UNLESS(it->GetPermission() == first_perm, svc::ResultInvalidCurrentMemory());
|
||||
R_UNLESS((it->GetAttribute() | ignore_attr) == (first_attr | ignore_attr), svc::ResultInvalidCurrentMemory());
|
||||
|
||||
/* Validate against the provided masks. */
|
||||
R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr));
|
||||
R_TRY(this->CheckMemoryState(it, state_mask, state, perm_mask, perm, attr_mask, attr));
|
||||
|
||||
/* Break once we're done. */
|
||||
if (last_addr <= info.GetLastAddress()) {
|
||||
if (last_addr <= it->GetLastAddress()) {
|
||||
break;
|
||||
}
|
||||
|
||||
/* Advance our iterator. */
|
||||
it++;
|
||||
MESOSPHERE_ASSERT(it != m_memory_block_manager.cend());
|
||||
info = it->GetMemoryInfo();
|
||||
}
|
||||
|
||||
/* Write output state. */
|
||||
|
@ -752,7 +746,7 @@ namespace ams::kern {
|
|||
|
||||
/* If the end address isn't aligned, we need a block. */
|
||||
if (out_blocks_needed != nullptr) {
|
||||
const size_t blocks_for_end_align = (util::AlignDown(GetInteger(last_addr), PageSize) + PageSize != info.GetEndAddress()) ? 1 : 0;
|
||||
const size_t blocks_for_end_align = (util::AlignDown(GetInteger(last_addr), PageSize) + PageSize != it->GetEndAddress()) ? 1 : 0;
|
||||
*out_blocks_needed = blocks_for_end_align;
|
||||
}
|
||||
|
||||
|
@ -1176,17 +1170,14 @@ namespace ams::kern {
|
|||
{
|
||||
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(dst_address);
|
||||
while (true) {
|
||||
/* Get the memory info. */
|
||||
const KMemoryInfo info = it->GetMemoryInfo();
|
||||
|
||||
/* Check if the memory has code flag. */
|
||||
if ((info.GetState() & KMemoryState_FlagCode) != 0) {
|
||||
if ((it->GetState() & KMemoryState_FlagCode) != 0) {
|
||||
any_code_pages = true;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Check if we're done. */
|
||||
if (dst_address + size - 1 <= info.GetLastAddress()) {
|
||||
if (dst_address + size - 1 <= it->GetLastAddress()) {
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1355,14 +1346,13 @@ namespace ams::kern {
|
|||
const size_t random_offset = KSystemControl::GenerateRandomRange(0, (region_num_pages - num_pages - guard_pages) * PageSize / alignment) * alignment;
|
||||
const KProcessAddress candidate = util::AlignDown(GetInteger(region_start + random_offset), alignment) + offset;
|
||||
|
||||
KMemoryInfo info;
|
||||
ams::svc::PageInfo page_info;
|
||||
MESOSPHERE_R_ABORT_UNLESS(this->QueryInfoImpl(std::addressof(info), std::addressof(page_info), candidate));
|
||||
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(candidate);
|
||||
MESOSPHERE_ABORT_UNLESS(it != m_memory_block_manager.end());
|
||||
|
||||
if (info.m_state != KMemoryState_Free) { continue; }
|
||||
if (it->GetState() != KMemoryState_Free) { continue; }
|
||||
if (!(region_start <= candidate)) { continue; }
|
||||
if (!(info.GetAddress() + guard_pages * PageSize <= GetInteger(candidate))) { continue; }
|
||||
if (!(candidate + (num_pages + guard_pages) * PageSize - 1 <= info.GetLastAddress())) { continue; }
|
||||
if (!(it->GetAddress() + guard_pages * PageSize <= GetInteger(candidate))) { continue; }
|
||||
if (!(candidate + (num_pages + guard_pages) * PageSize - 1 <= it->GetLastAddress())) { continue; }
|
||||
if (!(candidate + (num_pages + guard_pages) * PageSize - 1 <= region_start + region_num_pages * PageSize - 1)) { continue; }
|
||||
|
||||
address = candidate;
|
||||
|
@ -1393,10 +1383,8 @@ namespace ams::kern {
|
|||
/* Iterate, counting blocks with the desired state. */
|
||||
size_t total_size = 0;
|
||||
for (KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(m_address_space_start); it != m_memory_block_manager.end(); ++it) {
|
||||
/* Get the memory info. */
|
||||
const KMemoryInfo info = it->GetMemoryInfo();
|
||||
if (info.GetState() == state) {
|
||||
total_size += info.GetSize();
|
||||
if (it->GetState() == state) {
|
||||
total_size += it->GetSize();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1488,17 +1476,14 @@ namespace ams::kern {
|
|||
/* Check that the iterator is valid. */
|
||||
MESOSPHERE_ASSERT(it != m_memory_block_manager.end());
|
||||
|
||||
/* Get the memory info. */
|
||||
const KMemoryInfo info = it->GetMemoryInfo();
|
||||
|
||||
/* Determine the range to map. */
|
||||
KProcessAddress map_address = std::max(info.GetAddress(), GetInteger(start_address));
|
||||
const KProcessAddress map_end_address = std::min(info.GetEndAddress(), GetInteger(end_address));
|
||||
KProcessAddress map_address = std::max(GetInteger(it->GetAddress()), GetInteger(start_address));
|
||||
const KProcessAddress map_end_address = std::min(GetInteger(it->GetEndAddress()), GetInteger(end_address));
|
||||
MESOSPHERE_ABORT_UNLESS(map_end_address != map_address);
|
||||
|
||||
/* Determine if we should disable head merge. */
|
||||
const bool disable_head_merge = info.GetAddress() >= GetInteger(start_address) && (info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute_Normal) != 0;
|
||||
const KPageProperties map_properties = { info.GetPermission(), false, false, disable_head_merge ? DisableMergeAttribute_DisableHead : DisableMergeAttribute_None };
|
||||
const bool disable_head_merge = it->GetAddress() >= GetInteger(start_address) && (it->GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute_Normal) != 0;
|
||||
const KPageProperties map_properties = { it->GetPermission(), false, false, disable_head_merge ? DisableMergeAttribute_DisableHead : DisableMergeAttribute_None };
|
||||
|
||||
/* While we have pages to map, map them. */
|
||||
size_t map_pages = (map_end_address - map_address) / PageSize;
|
||||
|
@ -1527,7 +1512,7 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
/* Check if we're done. */
|
||||
if (last_address <= info.GetLastAddress()) {
|
||||
if (last_address <= it->GetLastAddress()) {
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -2032,19 +2017,18 @@ namespace ams::kern {
|
|||
address = util::AlignDown(GetInteger(address), PageSize);
|
||||
|
||||
/* Verify that we can query the address. */
|
||||
KMemoryInfo info;
|
||||
ams::svc::PageInfo page_info;
|
||||
R_TRY(this->QueryInfoImpl(std::addressof(info), std::addressof(page_info), address));
|
||||
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(address);
|
||||
R_UNLESS(it != m_memory_block_manager.end(), svc::ResultInvalidCurrentMemory());
|
||||
|
||||
/* Check the memory state. */
|
||||
R_TRY(this->CheckMemoryState(info, KMemoryState_FlagCanQueryPhysical, KMemoryState_FlagCanQueryPhysical, KMemoryPermission_UserReadExecute, KMemoryPermission_UserRead, KMemoryAttribute_None, KMemoryAttribute_None));
|
||||
R_TRY(this->CheckMemoryState(it, KMemoryState_FlagCanQueryPhysical, KMemoryState_FlagCanQueryPhysical, KMemoryPermission_UserReadExecute, KMemoryPermission_UserRead, KMemoryAttribute_None, KMemoryAttribute_None));
|
||||
|
||||
/* Prepare to traverse. */
|
||||
KPhysicalAddress phys_addr;
|
||||
size_t phys_size;
|
||||
|
||||
KProcessAddress virt_addr = info.GetAddress();
|
||||
KProcessAddress end_addr = info.GetEndAddress();
|
||||
KProcessAddress virt_addr = it->GetAddress();
|
||||
KProcessAddress end_addr = it->GetEndAddress();
|
||||
|
||||
/* Perform traversal. */
|
||||
{
|
||||
|
@ -3854,27 +3838,25 @@ namespace ams::kern {
|
|||
/* Iterate, mapping as needed. */
|
||||
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(aligned_src_start);
|
||||
while (true) {
|
||||
const KMemoryInfo info = it->GetMemoryInfo();
|
||||
|
||||
/* Validate the current block. */
|
||||
R_TRY(this->CheckMemoryState(info, test_state, test_state, test_perm, test_perm, test_attr_mask, KMemoryAttribute_None));
|
||||
R_TRY(this->CheckMemoryState(it, test_state, test_state, test_perm, test_perm, test_attr_mask, KMemoryAttribute_None));
|
||||
|
||||
if (mapping_src_start < mapping_src_end && GetInteger(mapping_src_start) < info.GetEndAddress() && info.GetAddress() < GetInteger(mapping_src_end)) {
|
||||
const auto cur_start = info.GetAddress() >= GetInteger(mapping_src_start) ? info.GetAddress() : GetInteger(mapping_src_start);
|
||||
const auto cur_end = mapping_src_last >= info.GetLastAddress() ? info.GetEndAddress() : GetInteger(mapping_src_end);
|
||||
if (mapping_src_start < mapping_src_end && GetInteger(mapping_src_start) < GetInteger(it->GetEndAddress()) && GetInteger(it->GetAddress()) < GetInteger(mapping_src_end)) {
|
||||
const auto cur_start = it->GetAddress() >= GetInteger(mapping_src_start) ? GetInteger(it->GetAddress()) : GetInteger(mapping_src_start);
|
||||
const auto cur_end = mapping_src_last >= GetInteger(it->GetLastAddress()) ? GetInteger(it->GetEndAddress()) : GetInteger(mapping_src_end);
|
||||
const size_t cur_size = cur_end - cur_start;
|
||||
|
||||
if (info.GetAddress() < GetInteger(mapping_src_start)) {
|
||||
if (GetInteger(it->GetAddress()) < GetInteger(mapping_src_start)) {
|
||||
++blocks_needed;
|
||||
}
|
||||
if (mapping_src_last < info.GetLastAddress()) {
|
||||
if (mapping_src_last < GetInteger(it->GetLastAddress())) {
|
||||
++blocks_needed;
|
||||
}
|
||||
|
||||
/* Set the permissions on the block, if we need to. */
|
||||
if ((info.GetPermission() & KMemoryPermission_IpcLockChangeMask) != src_perm) {
|
||||
const DisableMergeAttribute head_body_attr = (GetInteger(mapping_src_start) >= info.GetAddress()) ? DisableMergeAttribute_DisableHeadAndBody : DisableMergeAttribute_None;
|
||||
const DisableMergeAttribute tail_attr = (cur_end == GetInteger(mapping_src_end)) ? DisableMergeAttribute_DisableTail : DisableMergeAttribute_None;
|
||||
if ((it->GetPermission() & KMemoryPermission_IpcLockChangeMask) != src_perm) {
|
||||
const DisableMergeAttribute head_body_attr = (GetInteger(mapping_src_start) >= GetInteger(it->GetAddress())) ? DisableMergeAttribute_DisableHeadAndBody : DisableMergeAttribute_None;
|
||||
const DisableMergeAttribute tail_attr = (cur_end == GetInteger(mapping_src_end)) ? DisableMergeAttribute_DisableTail : DisableMergeAttribute_None;
|
||||
const KPageProperties properties = { src_perm, false, false, static_cast<DisableMergeAttribute>(head_body_attr | tail_attr) };
|
||||
R_TRY(this->Operate(page_list, cur_start, cur_size / PageSize, Null<KPhysicalAddress>, false, properties, OperationType_ChangePermissions, false));
|
||||
}
|
||||
|
@ -3884,7 +3866,7 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
/* If the block is at the end, we're done. */
|
||||
if (aligned_src_last <= info.GetLastAddress()) {
|
||||
if (aligned_src_last <= GetInteger(it->GetLastAddress())) {
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -4248,56 +4230,50 @@ namespace ams::kern {
|
|||
const auto mapped_last = mapped_end - 1;
|
||||
|
||||
/* Get current and next iterators. */
|
||||
KMemoryBlockManager::const_iterator start_it = m_memory_block_manager.FindIterator(mapping_start);
|
||||
KMemoryBlockManager::const_iterator next_it = start_it;
|
||||
KMemoryBlockManager::const_iterator cur_it = m_memory_block_manager.FindIterator(mapping_start);
|
||||
KMemoryBlockManager::const_iterator next_it = cur_it;
|
||||
++next_it;
|
||||
|
||||
/* Get the current block info. */
|
||||
KMemoryInfo cur_info = start_it->GetMemoryInfo();
|
||||
|
||||
/* Create tracking variables. */
|
||||
KProcessAddress cur_address = cur_info.GetAddress();
|
||||
size_t cur_size = cur_info.GetSize();
|
||||
bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission();
|
||||
bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1;
|
||||
bool first = cur_info.GetIpcDisableMergeCount() == 1 && (cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute_Locked) == 0;
|
||||
KProcessAddress cur_address = cur_it->GetAddress();
|
||||
size_t cur_size = cur_it->GetSize();
|
||||
bool cur_perm_eq = cur_it->GetPermission() == cur_it->GetOriginalPermission();
|
||||
bool cur_needs_set_perm = !cur_perm_eq && cur_it->GetIpcLockCount() == 1;
|
||||
bool first = cur_it->GetIpcDisableMergeCount() == 1 && (cur_it->GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute_Locked) == 0;
|
||||
|
||||
while ((GetInteger(cur_address) + cur_size - 1) < mapped_last) {
|
||||
/* Check that we have a next block. */
|
||||
MESOSPHERE_ABORT_UNLESS(next_it != m_memory_block_manager.end());
|
||||
|
||||
/* Get the next info. */
|
||||
const KMemoryInfo next_info = next_it->GetMemoryInfo();
|
||||
|
||||
/* Check if we can consolidate the next block's permission set with the current one. */
|
||||
const bool next_perm_eq = next_info.GetPermission() == next_info.GetOriginalPermission();
|
||||
const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1;
|
||||
if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm && cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) {
|
||||
const bool next_perm_eq = next_it->GetPermission() == next_it->GetOriginalPermission();
|
||||
const bool next_needs_set_perm = !next_perm_eq && next_it->GetIpcLockCount() == 1;
|
||||
if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm && cur_it->GetOriginalPermission() == next_it->GetOriginalPermission()) {
|
||||
/* We can consolidate the reprotection for the current and next block into a single call. */
|
||||
cur_size += next_info.GetSize();
|
||||
cur_size += next_it->GetSize();
|
||||
} else {
|
||||
/* We have to operate on the current block. */
|
||||
if ((cur_needs_set_perm || first) && !cur_perm_eq) {
|
||||
const KPageProperties properties = { cur_info.GetPermission(), false, false, first ? DisableMergeAttribute_EnableAndMergeHeadBodyTail : DisableMergeAttribute_None };
|
||||
const KPageProperties properties = { cur_it->GetPermission(), false, false, first ? DisableMergeAttribute_EnableAndMergeHeadBodyTail : DisableMergeAttribute_None };
|
||||
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, Null<KPhysicalAddress>, false, properties, OperationType_ChangePermissions, true));
|
||||
}
|
||||
|
||||
/* Advance. */
|
||||
cur_address = next_info.GetAddress();
|
||||
cur_size = next_info.GetSize();
|
||||
cur_address = next_it->GetAddress();
|
||||
cur_size = next_it->GetSize();
|
||||
first = false;
|
||||
}
|
||||
|
||||
/* Advance. */
|
||||
cur_info = next_info;
|
||||
cur_perm_eq = next_perm_eq;
|
||||
cur_needs_set_perm = next_needs_set_perm;
|
||||
++next_it;
|
||||
|
||||
cur_it = next_it++;
|
||||
}
|
||||
|
||||
/* Process the last block. */
|
||||
if ((first || cur_needs_set_perm) && !cur_perm_eq) {
|
||||
const KPageProperties properties = { cur_info.GetPermission(), false, false, first ? DisableMergeAttribute_EnableAndMergeHeadBodyTail : DisableMergeAttribute_None };
|
||||
const KPageProperties properties = { cur_it->GetPermission(), false, false, first ? DisableMergeAttribute_EnableAndMergeHeadBodyTail : DisableMergeAttribute_None };
|
||||
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, Null<KPhysicalAddress>, false, properties, OperationType_ChangePermissions, true));
|
||||
}
|
||||
}
|
||||
|
@ -4306,41 +4282,37 @@ namespace ams::kern {
|
|||
/* Iterate, reprotecting as needed. */
|
||||
{
|
||||
/* Get current and next iterators. */
|
||||
KMemoryBlockManager::const_iterator start_it = m_memory_block_manager.FindIterator(mapping_start);
|
||||
KMemoryBlockManager::const_iterator next_it = start_it;
|
||||
KMemoryBlockManager::const_iterator cur_it = m_memory_block_manager.FindIterator(mapping_start);
|
||||
KMemoryBlockManager::const_iterator next_it = cur_it;
|
||||
++next_it;
|
||||
|
||||
/* Validate the current block. */
|
||||
KMemoryInfo cur_info = start_it->GetMemoryInfo();
|
||||
MESOSPHERE_R_ABORT_UNLESS(this->CheckMemoryState(cur_info, test_state, test_state, KMemoryPermission_None, KMemoryPermission_None, test_attr_mask | KMemoryAttribute_IpcLocked, KMemoryAttribute_IpcLocked));
|
||||
MESOSPHERE_R_ABORT_UNLESS(this->CheckMemoryState(cur_it, test_state, test_state, KMemoryPermission_None, KMemoryPermission_None, test_attr_mask | KMemoryAttribute_IpcLocked, KMemoryAttribute_IpcLocked));
|
||||
|
||||
/* Create tracking variables. */
|
||||
KProcessAddress cur_address = cur_info.GetAddress();
|
||||
size_t cur_size = cur_info.GetSize();
|
||||
bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission();
|
||||
bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1;
|
||||
bool first = cur_info.GetIpcDisableMergeCount() == 1 && (cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute_Locked) == 0;
|
||||
KProcessAddress cur_address = cur_it->GetAddress();
|
||||
size_t cur_size = cur_it->GetSize();
|
||||
bool cur_perm_eq = cur_it->GetPermission() == cur_it->GetOriginalPermission();
|
||||
bool cur_needs_set_perm = !cur_perm_eq && cur_it->GetIpcLockCount() == 1;
|
||||
bool first = cur_it->GetIpcDisableMergeCount() == 1 && (cur_it->GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute_Locked) == 0;
|
||||
|
||||
while ((cur_address + cur_size - 1) < mapping_last) {
|
||||
/* Check that we have a next block. */
|
||||
MESOSPHERE_ABORT_UNLESS(next_it != m_memory_block_manager.end());
|
||||
|
||||
/* Get the next info. */
|
||||
const KMemoryInfo next_info = next_it->GetMemoryInfo();
|
||||
|
||||
/* Validate the next block. */
|
||||
MESOSPHERE_R_ABORT_UNLESS(this->CheckMemoryState(next_info, test_state, test_state, KMemoryPermission_None, KMemoryPermission_None, test_attr_mask | KMemoryAttribute_IpcLocked, KMemoryAttribute_IpcLocked));
|
||||
MESOSPHERE_R_ABORT_UNLESS(this->CheckMemoryState(next_it, test_state, test_state, KMemoryPermission_None, KMemoryPermission_None, test_attr_mask | KMemoryAttribute_IpcLocked, KMemoryAttribute_IpcLocked));
|
||||
|
||||
/* Check if we can consolidate the next block's permission set with the current one. */
|
||||
const bool next_perm_eq = next_info.GetPermission() == next_info.GetOriginalPermission();
|
||||
const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1;
|
||||
if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm && cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) {
|
||||
const bool next_perm_eq = next_it->GetPermission() == next_it->GetOriginalPermission();
|
||||
const bool next_needs_set_perm = !next_perm_eq && next_it->GetIpcLockCount() == 1;
|
||||
if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm && cur_it->GetOriginalPermission() == next_it->GetOriginalPermission()) {
|
||||
/* We can consolidate the reprotection for the current and next block into a single call. */
|
||||
cur_size += next_info.GetSize();
|
||||
cur_size += next_it->GetSize();
|
||||
} else {
|
||||
/* We have to operate on the current block. */
|
||||
if ((cur_needs_set_perm || first) && !cur_perm_eq) {
|
||||
const KPageProperties properties = { cur_needs_set_perm ? cur_info.GetOriginalPermission() : cur_info.GetPermission(), false, false, first ? DisableMergeAttribute_EnableHeadAndBody : DisableMergeAttribute_None };
|
||||
const KPageProperties properties = { cur_needs_set_perm ? cur_it->GetOriginalPermission() : cur_it->GetPermission(), false, false, first ? DisableMergeAttribute_EnableHeadAndBody : DisableMergeAttribute_None };
|
||||
R_TRY(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, Null<KPhysicalAddress>, false, properties, OperationType_ChangePermissions, false));
|
||||
}
|
||||
|
||||
|
@ -4348,24 +4320,24 @@ namespace ams::kern {
|
|||
mapped_size += cur_size;
|
||||
|
||||
/* Advance. */
|
||||
cur_address = next_info.GetAddress();
|
||||
cur_size = next_info.GetSize();
|
||||
cur_address = next_it->GetAddress();
|
||||
cur_size = next_it->GetSize();
|
||||
first = false;
|
||||
}
|
||||
|
||||
/* Advance. */
|
||||
cur_info = next_info;
|
||||
cur_perm_eq = next_perm_eq;
|
||||
cur_needs_set_perm = next_needs_set_perm;
|
||||
++next_it;
|
||||
|
||||
cur_it = next_it++;
|
||||
}
|
||||
|
||||
/* Process the last block. */
|
||||
const auto lock_count = cur_info.GetIpcLockCount() + (next_it != m_memory_block_manager.end() ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount()) : 0);
|
||||
const auto lock_count = cur_it->GetIpcLockCount() + (next_it != m_memory_block_manager.end() ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount()) : 0);
|
||||
if ((first || cur_needs_set_perm || (lock_count == 1)) && !cur_perm_eq) {
|
||||
const DisableMergeAttribute head_body_attr = first ? DisableMergeAttribute_EnableHeadAndBody : DisableMergeAttribute_None;
|
||||
const DisableMergeAttribute tail_attr = lock_count == 1 ? DisableMergeAttribute_EnableTail : DisableMergeAttribute_None;
|
||||
const KPageProperties properties = { cur_needs_set_perm ? cur_info.GetOriginalPermission() : cur_info.GetPermission(), false, false, static_cast<DisableMergeAttribute>(head_body_attr | tail_attr) };
|
||||
const KPageProperties properties = { cur_needs_set_perm ? cur_it->GetOriginalPermission() : cur_it->GetPermission(), false, false, static_cast<DisableMergeAttribute>(head_body_attr | tail_attr) };
|
||||
R_TRY(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, Null<KPhysicalAddress>, false, properties, OperationType_ChangePermissions, false));
|
||||
}
|
||||
}
|
||||
|
@ -4398,38 +4370,36 @@ namespace ams::kern {
|
|||
/* Iterate over blocks, fixing permissions. */
|
||||
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(address);
|
||||
while (true) {
|
||||
const KMemoryInfo info = it->GetMemoryInfo();
|
||||
|
||||
const auto cur_start = info.GetAddress() >= GetInteger(src_map_start) ? info.GetAddress() : GetInteger(src_map_start);
|
||||
const auto cur_end = src_map_last <= info.GetLastAddress() ? src_map_end : info.GetEndAddress();
|
||||
const auto cur_start = it->GetAddress() >= GetInteger(src_map_start) ? it->GetAddress() : GetInteger(src_map_start);
|
||||
const auto cur_end = src_map_last <= it->GetLastAddress() ? src_map_end : it->GetEndAddress();
|
||||
|
||||
/* If we can, fix the protections on the block. */
|
||||
if ((info.GetIpcLockCount() == 0 && (info.GetPermission() & KMemoryPermission_IpcLockChangeMask) != prot_perm) ||
|
||||
(info.GetIpcLockCount() != 0 && (info.GetOriginalPermission() & KMemoryPermission_IpcLockChangeMask) != prot_perm))
|
||||
if ((it->GetIpcLockCount() == 0 && (it->GetPermission() & KMemoryPermission_IpcLockChangeMask) != prot_perm) ||
|
||||
(it->GetIpcLockCount() != 0 && (it->GetOriginalPermission() & KMemoryPermission_IpcLockChangeMask) != prot_perm))
|
||||
{
|
||||
/* Check if we actually need to fix the protections on the block. */
|
||||
if (cur_end == src_map_end || info.GetAddress() <= GetInteger(src_map_start) || (info.GetPermission() & KMemoryPermission_IpcLockChangeMask) != prot_perm) {
|
||||
const bool start_nc = (info.GetAddress() == GetInteger(src_map_start)) ? ((info.GetDisableMergeAttribute() & (KMemoryBlockDisableMergeAttribute_Locked | KMemoryBlockDisableMergeAttribute_IpcLeft)) == 0) : info.GetAddress() <= GetInteger(src_map_start);
|
||||
if (cur_end == src_map_end || it->GetAddress() <= GetInteger(src_map_start) || (it->GetPermission() & KMemoryPermission_IpcLockChangeMask) != prot_perm) {
|
||||
const bool start_nc = (it->GetAddress() == GetInteger(src_map_start)) ? ((it->GetDisableMergeAttribute() & (KMemoryBlockDisableMergeAttribute_Locked | KMemoryBlockDisableMergeAttribute_IpcLeft)) == 0) : it->GetAddress() <= GetInteger(src_map_start);
|
||||
|
||||
const DisableMergeAttribute head_body_attr = start_nc ? DisableMergeAttribute_EnableHeadAndBody : DisableMergeAttribute_None;
|
||||
DisableMergeAttribute tail_attr;
|
||||
if (cur_end == src_map_end && info.GetEndAddress() == src_map_end) {
|
||||
if (cur_end == src_map_end && it->GetEndAddress() == src_map_end) {
|
||||
auto next_it = it;
|
||||
++next_it;
|
||||
|
||||
const auto lock_count = info.GetIpcLockCount() + (next_it != m_memory_block_manager.end() ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount()) : 0);
|
||||
const auto lock_count = it->GetIpcLockCount() + (next_it != m_memory_block_manager.end() ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount()) : 0);
|
||||
tail_attr = lock_count == 0 ? DisableMergeAttribute_EnableTail : DisableMergeAttribute_None;
|
||||
} else {
|
||||
tail_attr = DisableMergeAttribute_None;
|
||||
}
|
||||
|
||||
const KPageProperties properties = { info.GetPermission(), false, false, static_cast<DisableMergeAttribute>(head_body_attr | tail_attr) };
|
||||
const KPageProperties properties = { it->GetPermission(), false, false, static_cast<DisableMergeAttribute>(head_body_attr | tail_attr) };
|
||||
MESOSPHERE_R_ABORT_UNLESS(this->Operate(page_list, cur_start, (cur_end - cur_start) / PageSize, Null<KPhysicalAddress>, false, properties, OperationType_ChangePermissions, true));
|
||||
}
|
||||
}
|
||||
|
||||
/* If we're past the end of the region, we're done. */
|
||||
if (src_map_last <= info.GetLastAddress()) {
|
||||
if (src_map_last <= it->GetLastAddress()) {
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -4468,24 +4438,21 @@ namespace ams::kern {
|
|||
/* Check that the iterator is valid. */
|
||||
MESOSPHERE_ASSERT(it != m_memory_block_manager.end());
|
||||
|
||||
/* Get the memory info. */
|
||||
const KMemoryInfo info = it->GetMemoryInfo();
|
||||
|
||||
/* Check if we're done. */
|
||||
if (last_address <= info.GetLastAddress()) {
|
||||
if (info.GetState() != KMemoryState_Free) {
|
||||
if (last_address <= it->GetLastAddress()) {
|
||||
if (it->GetState() != KMemoryState_Free) {
|
||||
mapped_size += (last_address + 1 - cur_address);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
/* Track the memory if it's mapped. */
|
||||
if (info.GetState() != KMemoryState_Free) {
|
||||
mapped_size += KProcessAddress(info.GetEndAddress()) - cur_address;
|
||||
if (it->GetState() != KMemoryState_Free) {
|
||||
mapped_size += it->GetEndAddress() - cur_address;
|
||||
}
|
||||
|
||||
/* Advance. */
|
||||
cur_address = info.GetEndAddress();
|
||||
cur_address = it->GetEndAddress();
|
||||
++it;
|
||||
}
|
||||
|
||||
|
@ -4527,21 +4494,18 @@ namespace ams::kern {
|
|||
/* Check that the iterator is valid. */
|
||||
MESOSPHERE_ASSERT(it != m_memory_block_manager.end());
|
||||
|
||||
/* Get the memory info. */
|
||||
const KMemoryInfo info = it->GetMemoryInfo();
|
||||
|
||||
const bool is_free = info.GetState() == KMemoryState_Free;
|
||||
const bool is_free = it->GetState() == KMemoryState_Free;
|
||||
if (is_free) {
|
||||
if (info.GetAddress() < GetInteger(address)) {
|
||||
if (it->GetAddress() < GetInteger(address)) {
|
||||
++num_allocator_blocks;
|
||||
}
|
||||
if (last_address < info.GetLastAddress()) {
|
||||
if (last_address < it->GetLastAddress()) {
|
||||
++num_allocator_blocks;
|
||||
}
|
||||
}
|
||||
|
||||
/* Check if we're done. */
|
||||
if (last_address <= info.GetLastAddress()) {
|
||||
if (last_address <= it->GetLastAddress()) {
|
||||
if (!is_free) {
|
||||
checked_mapped_size += (last_address + 1 - cur_address);
|
||||
}
|
||||
|
@ -4550,11 +4514,11 @@ namespace ams::kern {
|
|||
|
||||
/* Track the memory if it's mapped. */
|
||||
if (!is_free) {
|
||||
checked_mapped_size += KProcessAddress(info.GetEndAddress()) - cur_address;
|
||||
checked_mapped_size += it->GetEndAddress() - cur_address;
|
||||
}
|
||||
|
||||
/* Advance. */
|
||||
cur_address = info.GetEndAddress();
|
||||
cur_address = it->GetEndAddress();
|
||||
++it;
|
||||
}
|
||||
|
||||
|
@ -4594,26 +4558,23 @@ namespace ams::kern {
|
|||
/* Check that the iterator is valid. */
|
||||
MESOSPHERE_ASSERT(it != m_memory_block_manager.end());
|
||||
|
||||
/* Get the memory info. */
|
||||
const KMemoryInfo info = it->GetMemoryInfo();
|
||||
|
||||
/* If the memory state is free, we mapped it and need to unmap it. */
|
||||
if (info.GetState() == KMemoryState_Free) {
|
||||
if (it->GetState() == KMemoryState_Free) {
|
||||
/* Determine the range to unmap. */
|
||||
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
|
||||
const size_t cur_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address, last_unmap_address + 1 - cur_address) / PageSize;
|
||||
const size_t cur_pages = std::min(it->GetEndAddress() - cur_address, last_unmap_address + 1 - cur_address) / PageSize;
|
||||
|
||||
/* Unmap. */
|
||||
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), cur_address, cur_pages, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, true));
|
||||
}
|
||||
|
||||
/* Check if we're done. */
|
||||
if (last_unmap_address <= info.GetLastAddress()) {
|
||||
if (last_unmap_address <= it->GetLastAddress()) {
|
||||
break;
|
||||
}
|
||||
|
||||
/* Advance. */
|
||||
cur_address = info.GetEndAddress();
|
||||
cur_address = it->GetEndAddress();
|
||||
++it;
|
||||
}
|
||||
}
|
||||
|
@ -4632,14 +4593,11 @@ namespace ams::kern {
|
|||
/* Check that the iterator is valid. */
|
||||
MESOSPHERE_ASSERT(it != m_memory_block_manager.end());
|
||||
|
||||
/* Get the memory info. */
|
||||
const KMemoryInfo info = it->GetMemoryInfo();
|
||||
|
||||
/* If it's unmapped, we need to map it. */
|
||||
if (info.GetState() == KMemoryState_Free) {
|
||||
if (it->GetState() == KMemoryState_Free) {
|
||||
/* Determine the range to map. */
|
||||
const KPageProperties map_properties = { KMemoryPermission_UserReadWrite, false, false, cur_address == this->GetAliasRegionStart() ? DisableMergeAttribute_DisableHead : DisableMergeAttribute_None };
|
||||
size_t map_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address, last_address + 1 - cur_address) / PageSize;
|
||||
size_t map_pages = std::min(it->GetEndAddress() - cur_address, last_address + 1 - cur_address) / PageSize;
|
||||
|
||||
/* While we have pages to map, map them. */
|
||||
{
|
||||
|
@ -4680,12 +4638,12 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
/* Check if we're done. */
|
||||
if (last_address <= info.GetLastAddress()) {
|
||||
if (last_address <= it->GetLastAddress()) {
|
||||
break;
|
||||
}
|
||||
|
||||
/* Advance. */
|
||||
cur_address = info.GetEndAddress();
|
||||
cur_address = it->GetEndAddress();
|
||||
++it;
|
||||
}
|
||||
|
||||
|
@ -4737,26 +4695,23 @@ namespace ams::kern {
|
|||
/* Check that the iterator is valid. */
|
||||
MESOSPHERE_ASSERT(it != m_memory_block_manager.end());
|
||||
|
||||
/* Get the memory info. */
|
||||
const KMemoryInfo info = it->GetMemoryInfo();
|
||||
|
||||
/* Verify the memory's state. */
|
||||
const bool is_normal = info.GetState() == KMemoryState_Normal && info.GetAttribute() == 0;
|
||||
const bool is_free = info.GetState() == KMemoryState_Free;
|
||||
const bool is_normal = it->GetState() == KMemoryState_Normal && it->GetAttribute() == 0;
|
||||
const bool is_free = it->GetState() == KMemoryState_Free;
|
||||
R_UNLESS(is_normal || is_free, svc::ResultInvalidCurrentMemory());
|
||||
|
||||
if (is_normal) {
|
||||
R_UNLESS(info.GetAttribute() == KMemoryAttribute_None, svc::ResultInvalidCurrentMemory());
|
||||
R_UNLESS(it->GetAttribute() == KMemoryAttribute_None, svc::ResultInvalidCurrentMemory());
|
||||
|
||||
if (map_start_address == Null<KProcessAddress>) {
|
||||
map_start_address = cur_address;
|
||||
}
|
||||
map_last_address = (last_address >= info.GetLastAddress()) ? info.GetLastAddress() : last_address;
|
||||
map_last_address = (last_address >= it->GetLastAddress()) ? it->GetLastAddress() : last_address;
|
||||
|
||||
if (info.GetAddress() < GetInteger(address)) {
|
||||
if (it->GetAddress() < GetInteger(address)) {
|
||||
++num_allocator_blocks;
|
||||
}
|
||||
if (last_address < info.GetLastAddress()) {
|
||||
if (last_address < it->GetLastAddress()) {
|
||||
++num_allocator_blocks;
|
||||
}
|
||||
|
||||
|
@ -4764,12 +4719,12 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
/* Check if we're done. */
|
||||
if (last_address <= info.GetLastAddress()) {
|
||||
if (last_address <= it->GetLastAddress()) {
|
||||
break;
|
||||
}
|
||||
|
||||
/* Advance. */
|
||||
cur_address = info.GetEndAddress();
|
||||
cur_address = it->GetEndAddress();
|
||||
++it;
|
||||
}
|
||||
|
||||
|
@ -4802,26 +4757,23 @@ namespace ams::kern {
|
|||
/* Check that the iterator is valid. */
|
||||
MESOSPHERE_ASSERT(it != m_memory_block_manager.end());
|
||||
|
||||
/* Get the memory info. */
|
||||
const KMemoryInfo info = it->GetMemoryInfo();
|
||||
|
||||
/* If the memory state is normal, we need to unmap it. */
|
||||
if (info.GetState() == KMemoryState_Normal) {
|
||||
if (it->GetState() == KMemoryState_Normal) {
|
||||
/* Determine the range to unmap. */
|
||||
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
|
||||
const size_t cur_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address, last_address + 1 - cur_address) / PageSize;
|
||||
const size_t cur_pages = std::min(it->GetEndAddress() - cur_address, last_address + 1 - cur_address) / PageSize;
|
||||
|
||||
/* Unmap. */
|
||||
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), cur_address, cur_pages, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, false));
|
||||
}
|
||||
|
||||
/* Check if we're done. */
|
||||
if (last_address <= info.GetLastAddress()) {
|
||||
if (last_address <= it->GetLastAddress()) {
|
||||
break;
|
||||
}
|
||||
|
||||
/* Advance. */
|
||||
cur_address = info.GetEndAddress();
|
||||
cur_address = it->GetEndAddress();
|
||||
++it;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue