svc/kern/dd: remove MapDeviceAddressSpace()

This commit is contained in:
Michael Scire 2021-09-18 11:28:39 -07:00
parent a33576e674
commit 30514c0e2c
15 changed files with 53 additions and 238 deletions

View file

@ -164,8 +164,8 @@ namespace ams::kern::arch::arm64 {
return m_page_table.UnlockForDeviceAddressSpace(address, size);
}
Result UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size, size_t mapped_size) {
return m_page_table.UnlockForDeviceAddressSpacePartialMap(address, size, mapped_size);
Result UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size) {
return m_page_table.UnlockForDeviceAddressSpacePartialMap(address, size);
}
Result OpenMemoryRangeForMapDeviceAddressSpace(KPageTableBase::MemoryRange *out, KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned) {

View file

@ -69,16 +69,16 @@ namespace ams::kern::board::nintendo::nx {
Result Attach(ams::svc::DeviceName device_name, u64 space_address, u64 space_size);
Result Detach(ams::svc::DeviceName device_name);
Result Map(size_t *out_mapped_size, KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address, ams::svc::MemoryPermission device_perm, bool is_aligned, bool refresh_mappings);
Result Map(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address, ams::svc::MemoryPermission device_perm, bool is_aligned);
Result Unmap(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address);
void Unmap(KDeviceVirtualAddress device_address, size_t size) {
return this->UnmapImpl(device_address, size, false);
}
private:
Result MapDevicePage(size_t *out_mapped_size, s32 &num_pt, s32 max_pt, KPhysicalAddress phys_addr, u64 size, KDeviceVirtualAddress address, ams::svc::MemoryPermission device_perm);
Result MapDevicePage(KPhysicalAddress phys_addr, u64 size, KDeviceVirtualAddress address, ams::svc::MemoryPermission device_perm);
Result MapImpl(size_t *out_mapped_size, s32 &num_pt, s32 max_pt, KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address, ams::svc::MemoryPermission device_perm, bool is_aligned);
Result MapImpl(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address, ams::svc::MemoryPermission device_perm, bool is_aligned);
void UnmapImpl(KDeviceVirtualAddress address, u64 size, bool force);
bool IsFree(KDeviceVirtualAddress address, u64 size) const;

View file

@ -41,18 +41,17 @@ namespace ams::kern {
Result Attach(ams::svc::DeviceName device_name);
Result Detach(ams::svc::DeviceName device_name);
Result Map(size_t *out_mapped_size, KProcessPageTable *page_table, KProcessAddress process_address, size_t size, u64 device_address, ams::svc::MemoryPermission device_perm, bool refresh_mappings) {
return this->Map(out_mapped_size, page_table, process_address, size, device_address, device_perm, false, refresh_mappings);
Result MapByForce(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, u64 device_address, ams::svc::MemoryPermission device_perm) {
return this->Map(page_table, process_address, size, device_address, device_perm, false);
}
Result MapAligned(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, u64 device_address, ams::svc::MemoryPermission device_perm) {
size_t dummy;
return this->Map(std::addressof(dummy), page_table, process_address, size, device_address, device_perm, true, false);
return this->Map(page_table, process_address, size, device_address, device_perm, true);
}
Result Unmap(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, u64 device_address);
private:
Result Map(size_t *out_mapped_size, KProcessPageTable *page_table, KProcessAddress process_address, size_t size, u64 device_address, ams::svc::MemoryPermission device_perm, bool is_aligned, bool refresh_mappings);
Result Map(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, u64 device_address, ams::svc::MemoryPermission device_perm, bool is_aligned);
public:
static void Initialize();
};

View file

@ -375,7 +375,7 @@ namespace ams::kern {
Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size);
Result UnlockForDeviceAddressSpace(KProcessAddress address, size_t size);
Result UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size, size_t mapped_size);
Result UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size);
Result OpenMemoryRangeForMapDeviceAddressSpace(KPageTableBase::MemoryRange *out, KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned);
Result OpenMemoryRangeForUnmapDeviceAddressSpace(MemoryRange *out, KProcessAddress address, size_t size);

View file

@ -1006,10 +1006,7 @@ namespace ams::kern::board::nintendo::nx {
return true;
}
Result KDevicePageTable::MapDevicePage(size_t *out_mapped_size, s32 &num_pt, s32 max_pt, KPhysicalAddress phys_addr, u64 size, KDeviceVirtualAddress address, ams::svc::MemoryPermission device_perm) {
/* Clear the output size. */
*out_mapped_size = 0;
Result KDevicePageTable::MapDevicePage(KPhysicalAddress phys_addr, u64 size, KDeviceVirtualAddress address, ams::svc::MemoryPermission device_perm) {
/* Ensure that the physical address is valid. */
R_UNLESS(IsValidPhysicalAddress(static_cast<u64>(GetInteger(phys_addr)) + size - 1), svc::ResultInvalidCurrentMemory());
MESOSPHERE_ASSERT((address & ~DeviceVirtualAddressMask) == 0);
@ -1056,11 +1053,8 @@ namespace ams::kern::board::nintendo::nx {
/* Advance. */
phys_addr += DeviceLargePageSize;
address += DeviceLargePageSize;
*out_mapped_size += DeviceLargePageSize;
remaining -= DeviceLargePageSize;
continue;
} else if (num_pt == max_pt) {
break;
} else {
/* Make an l1 table. */
const KVirtualAddress table_vaddr = ptm.Allocate();
@ -1076,9 +1070,6 @@ namespace ams::kern::board::nintendo::nx {
InvalidatePtc(GetPageTablePhysicalAddress(KVirtualAddress(std::addressof(l1[l1_index]))));
InvalidateTlbSection(m_table_asids[l0_index], address);
SmmuSynchronizationBarrier();
/* Increment the page table count. */
++num_pt;
}
}
@ -1117,7 +1108,6 @@ namespace ams::kern::board::nintendo::nx {
/* Advance. */
phys_addr += map_count * DevicePageSize;
address += map_count * DevicePageSize;
*out_mapped_size += map_count * DevicePageSize;
remaining -= map_count * DevicePageSize;
}
}
@ -1125,14 +1115,7 @@ namespace ams::kern::board::nintendo::nx {
return ResultSuccess();
}
Result KDevicePageTable::MapImpl(size_t *out_mapped_size, s32 &num_pt, s32 max_pt, KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address, ams::svc::MemoryPermission device_perm, bool is_aligned) {
/* Clear the output size. */
*out_mapped_size = 0;
/* Get the size, and validate the address. */
MESOSPHERE_ASSERT((device_address & ~DeviceVirtualAddressMask) == 0);
MESOSPHERE_ASSERT(((device_address + size - 1) & ~DeviceVirtualAddressMask) == 0);
Result KDevicePageTable::MapImpl(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address, ams::svc::MemoryPermission device_perm, bool is_aligned) {
/* Ensure that the region we're mapping to is free. */
R_UNLESS(this->IsFree(device_address, size), svc::ResultInvalidCurrentMemory());
@ -1141,31 +1124,28 @@ namespace ams::kern::board::nintendo::nx {
/* Iterate, mapping device pages. */
KDeviceVirtualAddress cur_addr = device_address;
while (true) {
size_t mapped_size = 0;
while (mapped_size < size) {
/* Map the next contiguous range. */
size_t cur_size;
{
/* Get the current contiguous range. */
KPageTableBase::MemoryRange contig_range = {};
R_TRY(page_table->OpenMemoryRangeForMapDeviceAddressSpace(std::addressof(contig_range), process_address + *out_mapped_size, size - *out_mapped_size, ConvertToKMemoryPermission(device_perm), is_aligned));
R_TRY(page_table->OpenMemoryRangeForMapDeviceAddressSpace(std::addressof(contig_range), process_address + mapped_size, size - mapped_size, ConvertToKMemoryPermission(device_perm), is_aligned));
/* Ensure we close the range when we're done. */
ON_SCOPE_EXIT { contig_range.Close(); };
/* Get the current size. */
cur_size = contig_range.size;
/* Map the device page. */
size_t mapped_size = 0;
R_TRY(this->MapDevicePage(std::addressof(mapped_size), num_pt, max_pt, contig_range.address, contig_range.size, cur_addr, device_perm));
R_TRY(this->MapDevicePage(contig_range.address, contig_range.size, cur_addr, device_perm));
}
/* Advance. */
cur_addr += contig_range.size;
*out_mapped_size += mapped_size;
/* If we didn't map as much as we wanted, break. */
if (mapped_size < contig_range.size) {
break;
}
/* Similarly, if we're done, break. */
if (*out_mapped_size >= size) {
break;
}
cur_addr += cur_size;
mapped_size += cur_size;
}
/* We're done, so cancel our guard. */
@ -1437,13 +1417,13 @@ namespace ams::kern::board::nintendo::nx {
return true;
}
Result KDevicePageTable::Map(size_t *out_mapped_size, KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address, ams::svc::MemoryPermission device_perm, bool is_aligned, bool refresh_mappings) {
/* Clear the output size. */
*out_mapped_size = 0;
Result KDevicePageTable::Map(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address, ams::svc::MemoryPermission device_perm, bool is_aligned) {
/* Validate address/size. */
MESOSPHERE_ASSERT((device_address & ~DeviceVirtualAddressMask) == 0);
MESOSPHERE_ASSERT(((device_address + size - 1) & ~DeviceVirtualAddressMask) == 0);
/* Map the pages. */
s32 num_pt = 0;
return this->MapImpl(out_mapped_size, num_pt, refresh_mappings ? 1 : std::numeric_limits<s32>::max(), page_table, process_address, size, device_address, device_perm, is_aligned);
return this->MapImpl(page_table, process_address, size, device_address, device_perm, is_aligned);
}
Result KDevicePageTable::Unmap(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address) {

View file

@ -64,7 +64,7 @@ namespace ams::kern {
return m_table.Detach(device_name);
}
Result KDeviceAddressSpace::Map(size_t *out_mapped_size, KProcessPageTable *page_table, KProcessAddress process_address, size_t size, u64 device_address, ams::svc::MemoryPermission device_perm, bool is_aligned, bool refresh_mappings) {
Result KDeviceAddressSpace::Map(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, u64 device_address, ams::svc::MemoryPermission device_perm, bool is_aligned) {
/* Check that the address falls within the space. */
R_UNLESS((m_space_address <= device_address && device_address + size - 1 <= m_space_address + m_space_size - 1), svc::ResultInvalidCurrentMemory());
@ -82,22 +82,18 @@ namespace ams::kern {
/* Map the pages. */
{
/* Clear the output size to zero on failure. */
auto mapped_size_guard = SCOPE_GUARD { *out_mapped_size = 0; };
/* Perform the mapping. */
R_TRY(m_table.Map(out_mapped_size, page_table, process_address, size, device_address, device_perm, is_aligned, refresh_mappings));
R_TRY(m_table.Map(page_table, process_address, size, device_address, device_perm, is_aligned));
/* Ensure that we unmap the pages if we fail to update the protections. */
/* NOTE: Nintendo does not check the result of this unmap call. */
auto map_guard = SCOPE_GUARD { m_table.Unmap(device_address, *out_mapped_size); };
auto map_guard = SCOPE_GUARD { m_table.Unmap(device_address, size); };
/* Update the protections in accordance with how much we mapped. */
R_TRY(page_table->UnlockForDeviceAddressSpacePartialMap(process_address, size, *out_mapped_size));
R_TRY(page_table->UnlockForDeviceAddressSpacePartialMap(process_address, size));
/* We succeeded, so cancel our guards. */
/* We succeeded, so cancel our guard. */
map_guard.Cancel();
mapped_size_guard.Cancel();
}
/* We succeeded, so we don't need to unlock our pages. */
@ -120,7 +116,7 @@ namespace ams::kern {
/* If we fail to unmap, we want to do a partial unlock. */
{
auto unlock_guard = SCOPE_GUARD { MESOSPHERE_R_ABORT_UNLESS(page_table->UnlockForDeviceAddressSpacePartialMap(process_address, size, size)); };
auto unlock_guard = SCOPE_GUARD { MESOSPHERE_R_ABORT_UNLESS(page_table->UnlockForDeviceAddressSpacePartialMap(process_address, size)); };
/* Unmap. */
R_TRY(m_table.Unmap(page_table, process_address, size, device_address));

View file

@ -2702,7 +2702,7 @@ namespace ams::kern {
return ResultSuccess();
}
Result KPageTableBase::UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size, size_t mapped_size) {
Result KPageTableBase::UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size) {
/* Lightly validate the range before doing anything else. */
const size_t num_pages = size / PageSize;
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
@ -2710,71 +2710,21 @@ namespace ams::kern {
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Determine useful extents. */
const KProcessAddress mapped_end_address = address + mapped_size;
const size_t unmapped_size = size - mapped_size;
/* Check memory state. */
size_t allocator_num_blocks = 0, unmapped_allocator_num_blocks = 0;
if (unmapped_size) {
if (m_enable_device_address_space_merge) {
size_t allocator_num_blocks = 0;
R_TRY(this->CheckMemoryStateContiguous(std::addressof(allocator_num_blocks),
address, size,
KMemoryState_FlagCanDeviceMap, KMemoryState_FlagCanDeviceMap,
KMemoryPermission_None, KMemoryPermission_None,
KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked, KMemoryAttribute_DeviceShared));
}
R_TRY(this->CheckMemoryStateContiguous(std::addressof(unmapped_allocator_num_blocks),
mapped_end_address, unmapped_size,
KMemoryState_FlagCanDeviceMap, KMemoryState_FlagCanDeviceMap,
KMemoryPermission_None, KMemoryPermission_None,
KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked, KMemoryAttribute_DeviceShared));
} else {
R_TRY(this->CheckMemoryStateContiguous(std::addressof(allocator_num_blocks),
address, size,
KMemoryState_FlagCanDeviceMap, KMemoryState_FlagCanDeviceMap,
KMemoryPermission_None, KMemoryPermission_None,
KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked, KMemoryAttribute_DeviceShared));
}
/* Create an update allocator for the region. */
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, allocator_num_blocks);
R_TRY(allocator_result);
/* Create an update allocator for the unmapped region. */
Result unmapped_allocator_result;
KMemoryBlockManagerUpdateAllocator unmapped_allocator(std::addressof(unmapped_allocator_result), m_memory_block_slab_manager, unmapped_allocator_num_blocks);
R_TRY(unmapped_allocator_result);
/* Determine parameters for the update lock call. */
KMemoryBlockManagerUpdateAllocator *lock_allocator;
KProcessAddress lock_address;
size_t lock_num_pages;
KMemoryBlockManager::MemoryBlockLockFunction lock_func;
if (unmapped_size) {
/* If device address space merge is enabled, update tracking appropriately. */
if (m_enable_device_address_space_merge) {
m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshareLeft, KMemoryPermission_None);
}
lock_allocator = std::addressof(unmapped_allocator);
lock_address = mapped_end_address;
lock_num_pages = unmapped_size / PageSize;
lock_func = &KMemoryBlock::UnshareToDeviceRight;
} else {
lock_allocator = std::addressof(allocator);
lock_address = address;
lock_num_pages = num_pages;
if (m_enable_device_address_space_merge) {
lock_func = &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshare;
} else {
lock_func = &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshareRight;
}
}
/* Update the memory blocks. */
m_memory_block_manager.UpdateLock(lock_allocator, lock_address, lock_num_pages, lock_func, KMemoryPermission_None);
m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, m_enable_device_address_space_merge ? &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshare : &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshareRight, KMemoryPermission_None);
return ResultSuccess();
}

View file

@ -80,7 +80,7 @@ namespace ams::kern::svc {
}
}
Result MapDeviceAddressSpace(size_t *out_mapped_size, ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, size_t size, uint64_t device_address, ams::svc::MemoryPermission device_perm, bool refresh_mappings) {
Result MapDeviceAddressSpaceByForce(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, size_t size, uint64_t device_address, ams::svc::MemoryPermission device_perm) {
/* Validate input. */
R_UNLESS(util::IsAligned(process_address, PageSize), svc::ResultInvalidAddress());
R_UNLESS(util::IsAligned(device_address, PageSize), svc::ResultInvalidAddress());
@ -104,7 +104,7 @@ namespace ams::kern::svc {
R_UNLESS(page_table.Contains(process_address, size), svc::ResultInvalidCurrentMemory());
/* Map. */
return das->Map(out_mapped_size, std::addressof(page_table), KProcessAddress(process_address), size, device_address, device_perm, refresh_mappings);
return das->MapByForce(std::addressof(page_table), KProcessAddress(process_address), size, device_address, device_perm);
}
Result MapDeviceAddressSpaceAligned(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, size_t size, uint64_t device_address, ams::svc::MemoryPermission device_perm) {
@ -177,19 +177,13 @@ namespace ams::kern::svc {
}
Result MapDeviceAddressSpaceByForce64(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address, ams::svc::MemoryPermission device_perm) {
size_t dummy_map_size;
return MapDeviceAddressSpace(std::addressof(dummy_map_size), das_handle, process_handle, process_address, size, device_address, device_perm, false);
return MapDeviceAddressSpaceByForce(das_handle, process_handle, process_address, size, device_address, device_perm);
}
Result MapDeviceAddressSpaceAligned64(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address, ams::svc::MemoryPermission device_perm) {
return MapDeviceAddressSpaceAligned(das_handle, process_handle, process_address, size, device_address, device_perm);
}
Result MapDeviceAddressSpace64(ams::svc::Size *out_mapped_size, ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address, ams::svc::MemoryPermission device_perm) {
static_assert(sizeof(*out_mapped_size) == sizeof(size_t));
return MapDeviceAddressSpace(reinterpret_cast<size_t *>(out_mapped_size), das_handle, process_handle, process_address, size, device_address, device_perm, true);
}
Result UnmapDeviceAddressSpace64(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address) {
return UnmapDeviceAddressSpace(das_handle, process_handle, process_address, size, device_address);
}
@ -209,19 +203,13 @@ namespace ams::kern::svc {
}
Result MapDeviceAddressSpaceByForce64From32(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address, ams::svc::MemoryPermission device_perm) {
size_t dummy_map_size;
return MapDeviceAddressSpace(std::addressof(dummy_map_size), das_handle, process_handle, process_address, size, device_address, device_perm, false);
return MapDeviceAddressSpaceByForce(das_handle, process_handle, process_address, size, device_address, device_perm);
}
Result MapDeviceAddressSpaceAligned64From32(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address, ams::svc::MemoryPermission device_perm) {
return MapDeviceAddressSpaceAligned(das_handle, process_handle, process_address, size, device_address, device_perm);
}
Result MapDeviceAddressSpace64From32(ams::svc::Size *out_mapped_size, ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address, ams::svc::MemoryPermission device_perm) {
static_assert(sizeof(*out_mapped_size) == sizeof(size_t));
return MapDeviceAddressSpace(reinterpret_cast<size_t *>(out_mapped_size), das_handle, process_handle, process_address, size, device_address, device_perm, true);
}
Result UnmapDeviceAddressSpace64From32(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address) {
return UnmapDeviceAddressSpace(das_handle, process_handle, process_address, size, device_address);
}

View file

@ -33,15 +33,6 @@ namespace ams::dd {
Result MapDeviceAddressSpaceNotAligned(DeviceAddressSpaceType *das, ProcessHandle process_handle, u64 process_address, size_t size, DeviceVirtualAddress device_address, MemoryPermission device_perm);
void UnmapDeviceAddressSpace(DeviceAddressSpaceType *das, ProcessHandle process_handle, u64 process_address, size_t size, DeviceVirtualAddress device_address);
void InitializeDeviceAddressSpaceMapInfo(DeviceAddressSpaceMapInfo *info, DeviceAddressSpaceType *das, ProcessHandle process_handle, u64 process_address, size_t size, DeviceVirtualAddress device_address, MemoryPermission device_perm);
Result MapNextDeviceAddressSpaceRegion(size_t *out_mapped_size, DeviceAddressSpaceMapInfo *info);
void UnmapDeviceAddressSpaceRegion(DeviceAddressSpaceMapInfo *info);
u64 GetMappedProcessAddress(DeviceAddressSpaceMapInfo *info);
DeviceVirtualAddress GetMappedDeviceVirtualAddress(DeviceAddressSpaceMapInfo *info);
size_t GetMappedSize(DeviceAddressSpaceMapInfo *info);
Result AttachDeviceAddressSpace(DeviceAddressSpaceType *das, DeviceName device_name);
void DetachDeviceAddressSpace(DeviceAddressSpaceType *das, DeviceName device_name);

View file

@ -34,16 +34,4 @@ namespace ams::dd {
};
static_assert(std::is_trivial<DeviceAddressSpaceType>::value);
struct DeviceAddressSpaceMapInfo {
size_t last_mapped_size;
size_t size;
u64 process_address;
DeviceVirtualAddress device_start_address;
DeviceVirtualAddress device_end_address;
ProcessHandle process_handle;
MemoryPermission device_permission;
DeviceAddressSpaceType *device_address_space;
};
static_assert(std::is_trivial<DeviceAddressSpaceMapInfo>::value);
}

View file

@ -354,10 +354,6 @@
return ::svcMapDeviceAddressSpaceAligned(das_handle, process_handle, process_address, size, device_address, static_cast<u32>(device_perm));
}
ALWAYS_INLINE Result MapDeviceAddressSpace(::ams::svc::Size *out_mapped_size, ::ams::svc::Handle das_handle, ::ams::svc::Handle process_handle, uint64_t process_address, ::ams::svc::Size size, uint64_t device_address, ::ams::svc::MemoryPermission device_perm) {
return ::svcMapDeviceAddressSpace(reinterpret_cast<u64 *>(out_mapped_size), das_handle, process_handle, process_address, size, device_address, static_cast<u32>(device_perm));
}
ALWAYS_INLINE Result UnmapDeviceAddressSpace(::ams::svc::Handle das_handle, ::ams::svc::Handle process_handle, uint64_t process_address, ::ams::svc::Size size, uint64_t device_address) {
return ::svcUnmapDeviceAddressSpace(das_handle, process_handle, process_address, size, device_address);
}

View file

@ -115,64 +115,6 @@ namespace ams::dd {
return impl::DeviceAddressSpaceImpl::Unmap(das->device_handle, process_handle, process_address, size, device_address);
}
void InitializeDeviceAddressSpaceMapInfo(DeviceAddressSpaceMapInfo *info, DeviceAddressSpaceType *das, ProcessHandle process_handle, u64 process_address, size_t size, DeviceVirtualAddress device_address, MemoryPermission device_perm) {
/* Check pre-conditions. */
AMS_ASSERT(das->state == DeviceAddressSpaceType::State_Initialized);
AMS_ASSERT(util::IsAligned(process_address, os::MemoryPageSize));
AMS_ASSERT(util::IsAligned(device_address, os::MemoryPageSize));
AMS_ASSERT(util::IsAligned(size, os::MemoryPageSize));
AMS_ASSERT(process_address + size > process_address);
AMS_ASSERT(device_address + size > device_address);
AMS_ASSERT(size > 0);
info->last_mapped_size = 0;
info->process_address = process_address;
info->size = size;
info->device_start_address = device_address;
info->device_end_address = device_address + size;
info->process_handle = process_handle;
info->device_permission = device_perm;
info->device_address_space = das;
}
Result MapNextDeviceAddressSpaceRegion(size_t *out_mapped_size, DeviceAddressSpaceMapInfo *info) {
/* Check pre-conditions. */
AMS_ASSERT(info->last_mapped_size == 0);
size_t mapped_size = 0;
if (info->device_start_address < info->device_end_address) {
R_TRY(impl::DeviceAddressSpaceImpl::MapPartially(std::addressof(mapped_size), info->device_address_space->device_handle, info->process_handle, info->process_address, info->size, info->device_start_address, info->device_permission));
}
info->last_mapped_size = mapped_size;
*out_mapped_size = mapped_size;
return ResultSuccess();
}
void UnmapDeviceAddressSpaceRegion(DeviceAddressSpaceMapInfo *info) {
/* Check pre-conditions. */
const size_t last_mapped_size = info->last_mapped_size;
AMS_ASSERT(last_mapped_size > 0);
impl::DeviceAddressSpaceImpl::Unmap(info->device_address_space->device_handle, info->process_handle, info->process_address, last_mapped_size, info->device_start_address);
info->last_mapped_size = 0;
info->process_address += last_mapped_size;
info->device_start_address += last_mapped_size;
}
u64 GetMappedProcessAddress(DeviceAddressSpaceMapInfo *info) {
return info->process_address;
}
DeviceVirtualAddress GetMappedDeviceVirtualAddress(DeviceAddressSpaceMapInfo *info) {
return info->device_start_address;
}
size_t GetMappedSize(DeviceAddressSpaceMapInfo *info) {
return info->last_mapped_size;
}
Result AttachDeviceAddressSpace(DeviceAddressSpaceType *das, DeviceName device_name) {
/* Check pre-conditions. */
AMS_ASSERT(das->state == DeviceAddressSpaceType::State_Initialized);

View file

@ -69,19 +69,6 @@ namespace ams::dd::impl {
return ResultSuccess();
}
Result DeviceAddressSpaceImplByHorizon::MapPartially(size_t *out_mapped_size, DeviceAddressSpaceHandle handle, ProcessHandle process_handle, u64 process_address, size_t process_size, DeviceVirtualAddress device_address, dd::MemoryPermission device_perm) {
ams::svc::Size mapped_size = 0;
R_TRY_CATCH(svc::MapDeviceAddressSpace(std::addressof(mapped_size), svc::Handle(handle), svc::Handle(process_handle), process_address, process_size, device_address, static_cast<svc::MemoryPermission>(device_perm))) {
R_CONVERT(svc::ResultInvalidHandle, dd::ResultInvalidHandle())
R_CONVERT(svc::ResultOutOfMemory, dd::ResultOutOfMemory())
R_CONVERT(svc::ResultOutOfResource, dd::ResultOutOfResource())
R_CONVERT(svc::ResultInvalidCurrentMemory, dd::ResultInvalidMemoryState())
} R_END_TRY_CATCH_WITH_ABORT_UNLESS;
*out_mapped_size = mapped_size;
return ResultSuccess();
}
void DeviceAddressSpaceImplByHorizon::Unmap(DeviceAddressSpaceHandle handle, ProcessHandle process_handle, u64 process_address, size_t process_size, DeviceVirtualAddress device_address) {
R_ABORT_UNLESS(svc::UnmapDeviceAddressSpace(svc::Handle(handle), svc::Handle(process_handle), process_address, process_size, device_address));
}

View file

@ -25,7 +25,6 @@ namespace ams::dd::impl {
static Result MapAligned(DeviceAddressSpaceHandle handle, ProcessHandle process_handle, u64 process_address, size_t process_size, DeviceVirtualAddress device_address, dd::MemoryPermission device_perm);
static Result MapNotAligned(DeviceAddressSpaceHandle handle, ProcessHandle process_handle, u64 process_address, size_t process_size, DeviceVirtualAddress device_address, dd::MemoryPermission device_perm);
static Result MapPartially(size_t *out_mapped_size, DeviceAddressSpaceHandle handle, ProcessHandle process_handle, u64 process_address, size_t process_size, DeviceVirtualAddress device_address, dd::MemoryPermission device_perm);
static void Unmap(DeviceAddressSpaceHandle handle, ProcessHandle process_handle, u64 process_address, size_t process_size, DeviceVirtualAddress device_address);
static Result Attach(DeviceAddressSpaceType *das, DeviceName device_name);

View file

@ -118,7 +118,6 @@
HANDLER(0x58, Result, DetachDeviceAddressSpace, INPUT(::ams::svc::DeviceName, device_name), INPUT(::ams::svc::Handle, das_handle)) \
HANDLER(0x59, Result, MapDeviceAddressSpaceByForce, INPUT(::ams::svc::Handle, das_handle), INPUT(::ams::svc::Handle, process_handle), INPUT(uint64_t, process_address), INPUT(::ams::svc::Size, size), INPUT(uint64_t, device_address), INPUT(::ams::svc::MemoryPermission, device_perm)) \
HANDLER(0x5A, Result, MapDeviceAddressSpaceAligned, INPUT(::ams::svc::Handle, das_handle), INPUT(::ams::svc::Handle, process_handle), INPUT(uint64_t, process_address), INPUT(::ams::svc::Size, size), INPUT(uint64_t, device_address), INPUT(::ams::svc::MemoryPermission, device_perm)) \
HANDLER(0x5B, Result, MapDeviceAddressSpace, OUTPUT(::ams::svc::Size, out_mapped_size), INPUT(::ams::svc::Handle, das_handle), INPUT(::ams::svc::Handle, process_handle), INPUT(uint64_t, process_address), INPUT(::ams::svc::Size, size), INPUT(uint64_t, device_address), INPUT(::ams::svc::MemoryPermission, device_perm)) \
HANDLER(0x5C, Result, UnmapDeviceAddressSpace, INPUT(::ams::svc::Handle, das_handle), INPUT(::ams::svc::Handle, process_handle), INPUT(uint64_t, process_address), INPUT(::ams::svc::Size, size), INPUT(uint64_t, device_address)) \
HANDLER(0x5D, Result, InvalidateProcessDataCache, INPUT(::ams::svc::Handle, process_handle), INPUT(uint64_t, address), INPUT(uint64_t, size)) \
HANDLER(0x5E, Result, StoreProcessDataCache, INPUT(::ams::svc::Handle, process_handle), INPUT(uint64_t, address), INPUT(uint64_t, size)) \