mirror of
https://github.com/Atmosphere-NX/Atmosphere
synced 2024-11-13 00:26:35 +00:00
Integrate new result macros. (#1780)
* result: try out some experimental shenanigans * result: sketch out some more shenanigans * result: see what it looks like to convert kernel to use result conds instead of guards * make rest of kernel use experimental new macro-ing
This commit is contained in:
parent
375ba615be
commit
96f95b9f95
109 changed files with 1355 additions and 1380 deletions
|
@ -191,7 +191,7 @@ namespace ams::kern::arch::arm64 {
|
|||
Result Map(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, size_t page_size, PageLinkedList *page_list, bool reuse_ll) {
|
||||
switch (page_size) {
|
||||
case L1BlockSize:
|
||||
return this->MapL1Blocks(virt_addr, phys_addr, num_pages, entry_template, disable_head_merge, page_list, reuse_ll);
|
||||
R_RETURN(this->MapL1Blocks(virt_addr, phys_addr, num_pages, entry_template, disable_head_merge, page_list, reuse_ll));
|
||||
case L2ContiguousBlockSize:
|
||||
entry_template.SetContiguous(true);
|
||||
[[fallthrough]];
|
||||
|
@ -199,12 +199,12 @@ namespace ams::kern::arch::arm64 {
|
|||
case L2TegraSmmuBlockSize:
|
||||
#endif
|
||||
case L2BlockSize:
|
||||
return this->MapL2Blocks(virt_addr, phys_addr, num_pages, entry_template, disable_head_merge, page_list, reuse_ll);
|
||||
R_RETURN(this->MapL2Blocks(virt_addr, phys_addr, num_pages, entry_template, disable_head_merge, page_list, reuse_ll));
|
||||
case L3ContiguousBlockSize:
|
||||
entry_template.SetContiguous(true);
|
||||
[[fallthrough]];
|
||||
case L3BlockSize:
|
||||
return this->MapL3Blocks(virt_addr, phys_addr, num_pages, entry_template, disable_head_merge, page_list, reuse_ll);
|
||||
R_RETURN(this->MapL3Blocks(virt_addr, phys_addr, num_pages, entry_template, disable_head_merge, page_list, reuse_ll));
|
||||
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ namespace ams::kern::arch::arm64 {
|
|||
}
|
||||
|
||||
Result Initialize(u32 id, ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager, KPageTableManager *pt_manager, KResourceLimit *resource_limit) {
|
||||
return m_page_table.InitializeForProcess(id, as_type, enable_aslr, enable_das_merge, from_back, pool, code_address, code_size, mem_block_slab_manager, block_info_manager, pt_manager, resource_limit);
|
||||
R_RETURN(m_page_table.InitializeForProcess(id, as_type, enable_aslr, enable_das_merge, from_back, pool, code_address, code_size, mem_block_slab_manager, block_info_manager, pt_manager, resource_limit));
|
||||
}
|
||||
|
||||
void Finalize() { m_page_table.Finalize(); }
|
||||
|
@ -39,231 +39,231 @@ namespace ams::kern::arch::arm64 {
|
|||
}
|
||||
|
||||
Result SetMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm) {
|
||||
return m_page_table.SetMemoryPermission(addr, size, perm);
|
||||
R_RETURN(m_page_table.SetMemoryPermission(addr, size, perm));
|
||||
}
|
||||
|
||||
Result SetProcessMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm) {
|
||||
return m_page_table.SetProcessMemoryPermission(addr, size, perm);
|
||||
R_RETURN(m_page_table.SetProcessMemoryPermission(addr, size, perm));
|
||||
}
|
||||
|
||||
Result SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mask, u32 attr) {
|
||||
return m_page_table.SetMemoryAttribute(addr, size, mask, attr);
|
||||
R_RETURN(m_page_table.SetMemoryAttribute(addr, size, mask, attr));
|
||||
}
|
||||
|
||||
Result SetHeapSize(KProcessAddress *out, size_t size) {
|
||||
return m_page_table.SetHeapSize(out, size);
|
||||
R_RETURN(m_page_table.SetHeapSize(out, size));
|
||||
}
|
||||
|
||||
Result SetMaxHeapSize(size_t size) {
|
||||
return m_page_table.SetMaxHeapSize(size);
|
||||
R_RETURN(m_page_table.SetMaxHeapSize(size));
|
||||
}
|
||||
|
||||
Result QueryInfo(KMemoryInfo *out_info, ams::svc::PageInfo *out_page_info, KProcessAddress addr) const {
|
||||
return m_page_table.QueryInfo(out_info, out_page_info, addr);
|
||||
R_RETURN(m_page_table.QueryInfo(out_info, out_page_info, addr));
|
||||
}
|
||||
|
||||
Result QueryPhysicalAddress(ams::svc::PhysicalMemoryInfo *out, KProcessAddress address) const {
|
||||
return m_page_table.QueryPhysicalAddress(out, address);
|
||||
R_RETURN(m_page_table.QueryPhysicalAddress(out, address));
|
||||
}
|
||||
|
||||
Result QueryStaticMapping(KProcessAddress *out, KPhysicalAddress address, size_t size) const {
|
||||
return m_page_table.QueryStaticMapping(out, address, size);
|
||||
R_RETURN(m_page_table.QueryStaticMapping(out, address, size));
|
||||
}
|
||||
|
||||
Result QueryIoMapping(KProcessAddress *out, KPhysicalAddress address, size_t size) const {
|
||||
return m_page_table.QueryIoMapping(out, address, size);
|
||||
R_RETURN(m_page_table.QueryIoMapping(out, address, size));
|
||||
}
|
||||
|
||||
Result MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
|
||||
return m_page_table.MapMemory(dst_address, src_address, size);
|
||||
R_RETURN(m_page_table.MapMemory(dst_address, src_address, size));
|
||||
}
|
||||
|
||||
Result UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
|
||||
return m_page_table.UnmapMemory(dst_address, src_address, size);
|
||||
R_RETURN(m_page_table.UnmapMemory(dst_address, src_address, size));
|
||||
}
|
||||
|
||||
Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
|
||||
return m_page_table.MapCodeMemory(dst_address, src_address, size);
|
||||
R_RETURN(m_page_table.MapCodeMemory(dst_address, src_address, size));
|
||||
}
|
||||
|
||||
Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
|
||||
return m_page_table.UnmapCodeMemory(dst_address, src_address, size);
|
||||
R_RETURN(m_page_table.UnmapCodeMemory(dst_address, src_address, size));
|
||||
}
|
||||
|
||||
Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
|
||||
return m_page_table.MapIo(phys_addr, size, perm);
|
||||
R_RETURN(m_page_table.MapIo(phys_addr, size, perm));
|
||||
}
|
||||
|
||||
Result MapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size, ams::svc::MemoryMapping mapping, ams::svc::MemoryPermission perm) {
|
||||
return m_page_table.MapIoRegion(dst_address, phys_addr, size, mapping, perm);
|
||||
R_RETURN(m_page_table.MapIoRegion(dst_address, phys_addr, size, mapping, perm));
|
||||
}
|
||||
|
||||
Result UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size) {
|
||||
return m_page_table.UnmapIoRegion(dst_address, phys_addr, size);
|
||||
R_RETURN(m_page_table.UnmapIoRegion(dst_address, phys_addr, size));
|
||||
}
|
||||
|
||||
Result MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
|
||||
return m_page_table.MapStatic(phys_addr, size, perm);
|
||||
R_RETURN(m_page_table.MapStatic(phys_addr, size, perm));
|
||||
}
|
||||
|
||||
Result MapRegion(KMemoryRegionType region_type, KMemoryPermission perm) {
|
||||
return m_page_table.MapRegion(region_type, perm);
|
||||
R_RETURN(m_page_table.MapRegion(region_type, perm));
|
||||
}
|
||||
|
||||
Result MapPageGroup(KProcessAddress addr, const KPageGroup &pg, KMemoryState state, KMemoryPermission perm) {
|
||||
return m_page_table.MapPageGroup(addr, pg, state, perm);
|
||||
R_RETURN(m_page_table.MapPageGroup(addr, pg, state, perm));
|
||||
}
|
||||
|
||||
Result UnmapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state) {
|
||||
return m_page_table.UnmapPageGroup(address, pg, state);
|
||||
R_RETURN(m_page_table.UnmapPageGroup(address, pg, state));
|
||||
}
|
||||
|
||||
Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) {
|
||||
return m_page_table.MapPages(out_addr, num_pages, alignment, phys_addr, state, perm);
|
||||
R_RETURN(m_page_table.MapPages(out_addr, num_pages, alignment, phys_addr, state, perm));
|
||||
}
|
||||
|
||||
Result MapPages(KProcessAddress *out_addr, size_t num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||
return m_page_table.MapPages(out_addr, num_pages, state, perm);
|
||||
R_RETURN(m_page_table.MapPages(out_addr, num_pages, state, perm));
|
||||
}
|
||||
|
||||
Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||
return m_page_table.MapPages(address, num_pages, state, perm);
|
||||
R_RETURN(m_page_table.MapPages(address, num_pages, state, perm));
|
||||
}
|
||||
|
||||
Result UnmapPages(KProcessAddress addr, size_t num_pages, KMemoryState state) {
|
||||
return m_page_table.UnmapPages(addr, num_pages, state);
|
||||
R_RETURN(m_page_table.UnmapPages(addr, num_pages, state));
|
||||
}
|
||||
|
||||
Result MakeAndOpenPageGroup(KPageGroup *out, KProcessAddress address, size_t num_pages, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) {
|
||||
return m_page_table.MakeAndOpenPageGroup(out, address, num_pages, state_mask, state, perm_mask, perm, attr_mask, attr);
|
||||
R_RETURN(m_page_table.MakeAndOpenPageGroup(out, address, num_pages, state_mask, state, perm_mask, perm, attr_mask, attr));
|
||||
}
|
||||
|
||||
Result InvalidateProcessDataCache(KProcessAddress address, size_t size) {
|
||||
return m_page_table.InvalidateProcessDataCache(address, size);
|
||||
R_RETURN(m_page_table.InvalidateProcessDataCache(address, size));
|
||||
}
|
||||
|
||||
Result ReadDebugMemory(void *buffer, KProcessAddress address, size_t size) {
|
||||
return m_page_table.ReadDebugMemory(buffer, address, size);
|
||||
R_RETURN(m_page_table.ReadDebugMemory(buffer, address, size));
|
||||
}
|
||||
|
||||
Result ReadDebugIoMemory(void *buffer, KProcessAddress address, size_t size) {
|
||||
return m_page_table.ReadDebugIoMemory(buffer, address, size);
|
||||
R_RETURN(m_page_table.ReadDebugIoMemory(buffer, address, size));
|
||||
}
|
||||
|
||||
Result WriteDebugMemory(KProcessAddress address, const void *buffer, size_t size) {
|
||||
return m_page_table.WriteDebugMemory(address, buffer, size);
|
||||
R_RETURN(m_page_table.WriteDebugMemory(address, buffer, size));
|
||||
}
|
||||
|
||||
Result WriteDebugIoMemory(KProcessAddress address, const void *buffer, size_t size) {
|
||||
return m_page_table.WriteDebugIoMemory(address, buffer, size);
|
||||
R_RETURN(m_page_table.WriteDebugIoMemory(address, buffer, size));
|
||||
}
|
||||
|
||||
Result LockForMapDeviceAddressSpace(KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned) {
|
||||
return m_page_table.LockForMapDeviceAddressSpace(address, size, perm, is_aligned);
|
||||
R_RETURN(m_page_table.LockForMapDeviceAddressSpace(address, size, perm, is_aligned));
|
||||
}
|
||||
|
||||
Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size) {
|
||||
return m_page_table.LockForUnmapDeviceAddressSpace(address, size);
|
||||
R_RETURN(m_page_table.LockForUnmapDeviceAddressSpace(address, size));
|
||||
}
|
||||
|
||||
Result UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) {
|
||||
return m_page_table.UnlockForDeviceAddressSpace(address, size);
|
||||
R_RETURN(m_page_table.UnlockForDeviceAddressSpace(address, size));
|
||||
}
|
||||
|
||||
Result UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size) {
|
||||
return m_page_table.UnlockForDeviceAddressSpacePartialMap(address, size);
|
||||
R_RETURN(m_page_table.UnlockForDeviceAddressSpacePartialMap(address, size));
|
||||
}
|
||||
|
||||
Result OpenMemoryRangeForMapDeviceAddressSpace(KPageTableBase::MemoryRange *out, KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned) {
|
||||
return m_page_table.OpenMemoryRangeForMapDeviceAddressSpace(out, address, size, perm, is_aligned);
|
||||
R_RETURN(m_page_table.OpenMemoryRangeForMapDeviceAddressSpace(out, address, size, perm, is_aligned));
|
||||
}
|
||||
|
||||
Result OpenMemoryRangeForUnmapDeviceAddressSpace(KPageTableBase::MemoryRange *out, KProcessAddress address, size_t size) {
|
||||
return m_page_table.OpenMemoryRangeForUnmapDeviceAddressSpace(out, address, size);
|
||||
R_RETURN(m_page_table.OpenMemoryRangeForUnmapDeviceAddressSpace(out, address, size));
|
||||
}
|
||||
|
||||
Result LockForIpcUserBuffer(KPhysicalAddress *out, KProcessAddress address, size_t size) {
|
||||
return m_page_table.LockForIpcUserBuffer(out, address, size);
|
||||
R_RETURN(m_page_table.LockForIpcUserBuffer(out, address, size));
|
||||
}
|
||||
|
||||
Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size) {
|
||||
return m_page_table.UnlockForIpcUserBuffer(address, size);
|
||||
R_RETURN(m_page_table.UnlockForIpcUserBuffer(address, size));
|
||||
}
|
||||
|
||||
Result LockForTransferMemory(KPageGroup *out, KProcessAddress address, size_t size, KMemoryPermission perm) {
|
||||
return m_page_table.LockForTransferMemory(out, address, size, perm);
|
||||
R_RETURN(m_page_table.LockForTransferMemory(out, address, size, perm));
|
||||
}
|
||||
|
||||
Result UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup &pg) {
|
||||
return m_page_table.UnlockForTransferMemory(address, size, pg);
|
||||
R_RETURN(m_page_table.UnlockForTransferMemory(address, size, pg));
|
||||
}
|
||||
|
||||
Result LockForCodeMemory(KPageGroup *out, KProcessAddress address, size_t size) {
|
||||
return m_page_table.LockForCodeMemory(out, address, size);
|
||||
R_RETURN(m_page_table.LockForCodeMemory(out, address, size));
|
||||
}
|
||||
|
||||
Result UnlockForCodeMemory(KProcessAddress address, size_t size, const KPageGroup &pg) {
|
||||
return m_page_table.UnlockForCodeMemory(address, size, pg);
|
||||
R_RETURN(m_page_table.UnlockForCodeMemory(address, size, pg));
|
||||
}
|
||||
|
||||
Result OpenMemoryRangeForProcessCacheOperation(KPageTableBase::MemoryRange *out, KProcessAddress address, size_t size) {
|
||||
return m_page_table.OpenMemoryRangeForProcessCacheOperation(out, address, size);
|
||||
R_RETURN(m_page_table.OpenMemoryRangeForProcessCacheOperation(out, address, size));
|
||||
}
|
||||
|
||||
Result CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
|
||||
return m_page_table.CopyMemoryFromLinearToUser(dst_addr, size, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr);
|
||||
R_RETURN(m_page_table.CopyMemoryFromLinearToUser(dst_addr, size, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr));
|
||||
}
|
||||
|
||||
Result CopyMemoryFromLinearToKernel(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
|
||||
return m_page_table.CopyMemoryFromLinearToKernel(dst_addr, size, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr);
|
||||
R_RETURN(m_page_table.CopyMemoryFromLinearToKernel(dst_addr, size, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr));
|
||||
}
|
||||
|
||||
Result CopyMemoryFromUserToLinear(KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr) {
|
||||
return m_page_table.CopyMemoryFromUserToLinear(dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr);
|
||||
R_RETURN(m_page_table.CopyMemoryFromUserToLinear(dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr));
|
||||
}
|
||||
|
||||
Result CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr) {
|
||||
return m_page_table.CopyMemoryFromKernelToLinear(dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr);
|
||||
R_RETURN(m_page_table.CopyMemoryFromKernelToLinear(dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr));
|
||||
}
|
||||
|
||||
Result CopyMemoryFromHeapToHeap(KProcessPageTable &dst_page_table, KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
|
||||
return m_page_table.CopyMemoryFromHeapToHeap(dst_page_table.m_page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr);
|
||||
R_RETURN(m_page_table.CopyMemoryFromHeapToHeap(dst_page_table.m_page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr));
|
||||
}
|
||||
|
||||
Result CopyMemoryFromHeapToHeapWithoutCheckDestination(KProcessPageTable &dst_page_table, KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
|
||||
return m_page_table.CopyMemoryFromHeapToHeapWithoutCheckDestination(dst_page_table.m_page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr);
|
||||
R_RETURN(m_page_table.CopyMemoryFromHeapToHeapWithoutCheckDestination(dst_page_table.m_page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr));
|
||||
}
|
||||
|
||||
Result SetupForIpc(KProcessAddress *out_dst_addr, size_t size, KProcessAddress src_addr, KProcessPageTable &src_page_table, KMemoryPermission test_perm, KMemoryState dst_state, bool send) {
|
||||
return m_page_table.SetupForIpc(out_dst_addr, size, src_addr, src_page_table.m_page_table, test_perm, dst_state, send);
|
||||
R_RETURN(m_page_table.SetupForIpc(out_dst_addr, size, src_addr, src_page_table.m_page_table, test_perm, dst_state, send));
|
||||
}
|
||||
|
||||
Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state) {
|
||||
return m_page_table.CleanupForIpcServer(address, size, dst_state);
|
||||
R_RETURN(m_page_table.CleanupForIpcServer(address, size, dst_state));
|
||||
}
|
||||
|
||||
Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state) {
|
||||
return m_page_table.CleanupForIpcClient(address, size, dst_state);
|
||||
R_RETURN(m_page_table.CleanupForIpcClient(address, size, dst_state));
|
||||
}
|
||||
|
||||
Result MapPhysicalMemory(KProcessAddress address, size_t size) {
|
||||
return m_page_table.MapPhysicalMemory(address, size);
|
||||
R_RETURN(m_page_table.MapPhysicalMemory(address, size));
|
||||
}
|
||||
|
||||
Result UnmapPhysicalMemory(KProcessAddress address, size_t size) {
|
||||
return m_page_table.UnmapPhysicalMemory(address, size);
|
||||
R_RETURN(m_page_table.UnmapPhysicalMemory(address, size));
|
||||
}
|
||||
|
||||
Result MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
|
||||
return m_page_table.MapPhysicalMemoryUnsafe(address, size);
|
||||
R_RETURN(m_page_table.MapPhysicalMemoryUnsafe(address, size));
|
||||
}
|
||||
|
||||
Result UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
|
||||
return m_page_table.UnmapPhysicalMemoryUnsafe(address, size);
|
||||
R_RETURN(m_page_table.UnmapPhysicalMemoryUnsafe(address, size));
|
||||
}
|
||||
|
||||
Result UnmapProcessMemory(KProcessAddress dst_address, size_t size, KProcessPageTable &src_page_table, KProcessAddress src_address) {
|
||||
return m_page_table.UnmapProcessMemory(dst_address, size, src_page_table.m_page_table, src_address);
|
||||
R_RETURN(m_page_table.UnmapProcessMemory(dst_address, size, src_page_table.m_page_table, src_address));
|
||||
}
|
||||
|
||||
void DumpMemoryBlocks() const {
|
||||
|
|
|
@ -42,19 +42,19 @@ namespace ams::kern::arch::arm64 {
|
|||
}
|
||||
|
||||
Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||
return m_page_table.MapPages(out_addr, num_pages, alignment, phys_addr, region_start, region_num_pages, state, perm);
|
||||
R_RETURN(m_page_table.MapPages(out_addr, num_pages, alignment, phys_addr, region_start, region_num_pages, state, perm));
|
||||
}
|
||||
|
||||
Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) {
|
||||
return m_page_table.UnmapPages(address, num_pages, state);
|
||||
R_RETURN(m_page_table.UnmapPages(address, num_pages, state));
|
||||
}
|
||||
|
||||
Result MapPageGroup(KProcessAddress *out_addr, const KPageGroup &pg, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||
return m_page_table.MapPageGroup(out_addr, pg, region_start, region_num_pages, state, perm);
|
||||
R_RETURN(m_page_table.MapPageGroup(out_addr, pg, region_start, region_num_pages, state, perm));
|
||||
}
|
||||
|
||||
Result UnmapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state) {
|
||||
return m_page_table.UnmapPageGroup(address, pg, state);
|
||||
R_RETURN(m_page_table.UnmapPageGroup(address, pg, state));
|
||||
}
|
||||
|
||||
bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress address) const {
|
||||
|
|
|
@ -29,29 +29,29 @@ namespace ams::kern::board::generic {
|
|||
|
||||
Result ALWAYS_INLINE Initialize(u64 space_address, u64 space_size) {
|
||||
MESOSPHERE_UNUSED(space_address, space_size);
|
||||
return ams::kern::svc::ResultNotImplemented();
|
||||
R_THROW(ams::kern::svc::ResultNotImplemented());
|
||||
}
|
||||
|
||||
void ALWAYS_INLINE Finalize() { /* ... */ }
|
||||
|
||||
Result ALWAYS_INLINE Attach(ams::svc::DeviceName device_name, u64 space_address, u64 space_size) {
|
||||
MESOSPHERE_UNUSED(device_name, space_address, space_size);
|
||||
return ams::kern::svc::ResultNotImplemented();
|
||||
R_THROW(ams::kern::svc::ResultNotImplemented());
|
||||
}
|
||||
|
||||
Result ALWAYS_INLINE Detach(ams::svc::DeviceName device_name) {
|
||||
MESOSPHERE_UNUSED(device_name);
|
||||
return ams::kern::svc::ResultNotImplemented();
|
||||
R_THROW(ams::kern::svc::ResultNotImplemented());
|
||||
}
|
||||
|
||||
Result ALWAYS_INLINE Map(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address, ams::svc::MemoryPermission device_perm, bool is_aligned) {
|
||||
MESOSPHERE_UNUSED(page_table, process_address, size, device_address, device_perm, is_aligned);
|
||||
return ams::kern::svc::ResultNotImplemented();
|
||||
R_THROW(ams::kern::svc::ResultNotImplemented());
|
||||
}
|
||||
|
||||
Result ALWAYS_INLINE Unmap(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address) {
|
||||
MESOSPHERE_UNUSED(page_table, process_address, size, device_address);
|
||||
return ams::kern::svc::ResultNotImplemented();
|
||||
R_THROW(ams::kern::svc::ResultNotImplemented());
|
||||
}
|
||||
|
||||
void ALWAYS_INLINE Unmap(KDeviceVirtualAddress device_address, size_t size) {
|
||||
|
|
|
@ -30,11 +30,11 @@ namespace ams::kern {
|
|||
Result SignalToAddress(uintptr_t addr, ams::svc::SignalType type, s32 value, s32 count) {
|
||||
switch (type) {
|
||||
case ams::svc::SignalType_Signal:
|
||||
return this->Signal(addr, count);
|
||||
R_RETURN(this->Signal(addr, count));
|
||||
case ams::svc::SignalType_SignalAndIncrementIfEqual:
|
||||
return this->SignalAndIncrementIfEqual(addr, value, count);
|
||||
R_RETURN(this->SignalAndIncrementIfEqual(addr, value, count));
|
||||
case ams::svc::SignalType_SignalAndModifyByWaitingCountIfEqual:
|
||||
return this->SignalAndModifyByWaitingCountIfEqual(addr, value, count);
|
||||
R_RETURN(this->SignalAndModifyByWaitingCountIfEqual(addr, value, count));
|
||||
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||
}
|
||||
}
|
||||
|
@ -42,11 +42,11 @@ namespace ams::kern {
|
|||
Result WaitForAddress(uintptr_t addr, ams::svc::ArbitrationType type, s32 value, s64 timeout) {
|
||||
switch (type) {
|
||||
case ams::svc::ArbitrationType_WaitIfLessThan:
|
||||
return this->WaitIfLessThan(addr, value, false, timeout);
|
||||
R_RETURN(this->WaitIfLessThan(addr, value, false, timeout));
|
||||
case ams::svc::ArbitrationType_DecrementAndWaitIfLessThan:
|
||||
return this->WaitIfLessThan(addr, value, true, timeout);
|
||||
R_RETURN(this->WaitIfLessThan(addr, value, true, timeout));
|
||||
case ams::svc::ArbitrationType_WaitIfEqual:
|
||||
return this->WaitIfEqual(addr, value, timeout);
|
||||
R_RETURN(this->WaitIfEqual(addr, value, timeout));
|
||||
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,11 +42,11 @@ namespace ams::kern {
|
|||
Result Detach(ams::svc::DeviceName device_name);
|
||||
|
||||
Result MapByForce(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, u64 device_address, ams::svc::MemoryPermission device_perm) {
|
||||
return this->Map(page_table, process_address, size, device_address, device_perm, false);
|
||||
R_RETURN(this->Map(page_table, process_address, size, device_address, device_perm, false));
|
||||
}
|
||||
|
||||
Result MapAligned(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, u64 device_address, ams::svc::MemoryPermission device_perm) {
|
||||
return this->Map(page_table, process_address, size, device_address, device_perm, true);
|
||||
R_RETURN(this->Map(page_table, process_address, size, device_address, device_perm, true));
|
||||
}
|
||||
|
||||
Result Unmap(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, u64 device_address);
|
||||
|
|
|
@ -72,7 +72,7 @@ namespace ams::kern {
|
|||
m_page_bitmap.SetBit(i);
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
constexpr KVirtualAddress GetAddress() const { return m_address; }
|
||||
|
|
|
@ -93,7 +93,7 @@ namespace ams::kern {
|
|||
m_free_head_index = i;
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE size_t GetTableSize() const { return m_table_size; }
|
||||
|
|
|
@ -46,7 +46,7 @@ namespace ams::kern {
|
|||
/* Try to perform a reset, succeeding unconditionally. */
|
||||
this->Reset();
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
bool IsInitialized() const { return m_is_initialized; }
|
||||
|
|
|
@ -62,7 +62,7 @@ namespace ams::kern {
|
|||
bool IsServerClosed() const { return m_state != State::Normal; }
|
||||
bool IsClientClosed() const { return m_state != State::Normal; }
|
||||
|
||||
Result OnRequest(KThread *request_thread) { return m_server.OnRequest(request_thread); }
|
||||
Result OnRequest(KThread *request_thread) { R_RETURN(m_server.OnRequest(request_thread)); }
|
||||
|
||||
KLightClientSession &GetClientSession() { return m_client; }
|
||||
KLightServerSession &GetServerSession() { return m_server; }
|
||||
|
|
|
@ -41,7 +41,7 @@ namespace ams::kern {
|
|||
R_UNLESS(m_blocks[m_index + i] != nullptr, svc::ResultOutOfResource());
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
public:
|
||||
KMemoryBlockManagerUpdateAllocator(Result *out_result, KMemoryBlockSlabManager *sm, size_t num_blocks = MaxBlocks) : m_blocks(), m_index(MaxBlocks), m_slab_manager(sm) {
|
||||
|
|
|
@ -284,16 +284,16 @@ namespace ams::kern {
|
|||
|
||||
Result CheckMemoryStateContiguous(size_t *out_blocks_needed, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const;
|
||||
Result CheckMemoryStateContiguous(KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const {
|
||||
return this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask, perm, attr_mask, attr);
|
||||
R_RETURN(this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask, perm, attr_mask, attr));
|
||||
}
|
||||
|
||||
Result CheckMemoryState(const KMemoryInfo &info, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const;
|
||||
Result CheckMemoryState(KMemoryState *out_state, KMemoryPermission *out_perm, KMemoryAttribute *out_attr, size_t *out_blocks_needed, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr = DefaultMemoryIgnoreAttr) const;
|
||||
Result CheckMemoryState(size_t *out_blocks_needed, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr = DefaultMemoryIgnoreAttr) const {
|
||||
return this->CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size, state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr);
|
||||
R_RETURN(this->CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size, state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr));
|
||||
}
|
||||
Result CheckMemoryState(KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr = DefaultMemoryIgnoreAttr) const {
|
||||
return this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr);
|
||||
R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr));
|
||||
}
|
||||
|
||||
Result LockMemoryAndOpen(KPageGroup *out_pg, KPhysicalAddress *out_paddr, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, KMemoryPermission new_perm, u32 lock_attr);
|
||||
|
@ -351,8 +351,8 @@ namespace ams::kern {
|
|||
Result SetMaxHeapSize(size_t size);
|
||||
Result QueryInfo(KMemoryInfo *out_info, ams::svc::PageInfo *out_page_info, KProcessAddress addr) const;
|
||||
Result QueryPhysicalAddress(ams::svc::PhysicalMemoryInfo *out, KProcessAddress address) const;
|
||||
Result QueryStaticMapping(KProcessAddress *out, KPhysicalAddress address, size_t size) const { return this->QueryMappingImpl(out, address, size, KMemoryState_Static); }
|
||||
Result QueryIoMapping(KProcessAddress *out, KPhysicalAddress address, size_t size) const { return this->QueryMappingImpl(out, address, size, KMemoryState_Io); }
|
||||
Result QueryStaticMapping(KProcessAddress *out, KPhysicalAddress address, size_t size) const { R_RETURN(this->QueryMappingImpl(out, address, size, KMemoryState_Static)); }
|
||||
Result QueryIoMapping(KProcessAddress *out, KPhysicalAddress address, size_t size) const { R_RETURN(this->QueryMappingImpl(out, address, size, KMemoryState_Io)); }
|
||||
Result MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
|
||||
Result UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
|
||||
Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
|
||||
|
@ -364,15 +364,15 @@ namespace ams::kern {
|
|||
Result MapRegion(KMemoryRegionType region_type, KMemoryPermission perm);
|
||||
|
||||
Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||
return this->MapPages(out_addr, num_pages, alignment, phys_addr, true, region_start, region_num_pages, state, perm);
|
||||
R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, region_start, region_num_pages, state, perm));
|
||||
}
|
||||
|
||||
Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) {
|
||||
return this->MapPages(out_addr, num_pages, alignment, phys_addr, true, this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize, state, perm);
|
||||
R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize, state, perm));
|
||||
}
|
||||
|
||||
Result MapPages(KProcessAddress *out_addr, size_t num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||
return this->MapPages(out_addr, num_pages, PageSize, Null<KPhysicalAddress>, false, this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize, state, perm);
|
||||
R_RETURN(this->MapPages(out_addr, num_pages, PageSize, Null<KPhysicalAddress>, false, this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize, state, perm));
|
||||
}
|
||||
|
||||
Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state, KMemoryPermission perm);
|
||||
|
|
|
@ -356,15 +356,15 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
Result WaitConditionVariable(KProcessAddress address, uintptr_t cv_key, u32 tag, s64 ns) {
|
||||
return m_cond_var.Wait(address, cv_key, tag, ns);
|
||||
R_RETURN(m_cond_var.Wait(address, cv_key, tag, ns));
|
||||
}
|
||||
|
||||
Result SignalAddressArbiter(uintptr_t address, ams::svc::SignalType signal_type, s32 value, s32 count) {
|
||||
return m_address_arbiter.SignalToAddress(address, signal_type, value, count);
|
||||
R_RETURN(m_address_arbiter.SignalToAddress(address, signal_type, value, count));
|
||||
}
|
||||
|
||||
Result WaitAddressArbiter(uintptr_t address, ams::svc::ArbitrationType arb_type, s32 value, s64 timeout) {
|
||||
return m_address_arbiter.WaitForAddress(address, arb_type, value, timeout);
|
||||
R_RETURN(m_address_arbiter.WaitForAddress(address, arb_type, value, timeout));
|
||||
}
|
||||
|
||||
Result GetThreadList(s32 *out_num_threads, ams::kern::svc::KUserPointer<u64 *> out_thread_ids, s32 max_out_count);
|
||||
|
@ -415,7 +415,7 @@ namespace ams::kern {
|
|||
|
||||
/* We succeeded, so note that we did. */
|
||||
m_is_handle_table_initialized = true;
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void FinalizeHandleTable() {
|
||||
|
|
|
@ -44,7 +44,7 @@ namespace ams::kern {
|
|||
/* Try to perform a reset, succeeding unconditionally. */
|
||||
this->Reset();
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
virtual bool IsSignaled() const override;
|
||||
|
|
|
@ -75,7 +75,7 @@ namespace ams::kern {
|
|||
bool IsServerClosed() const { return this->GetState() != State::Normal; }
|
||||
bool IsClientClosed() const { return this->GetState() != State::Normal; }
|
||||
|
||||
Result OnRequest(KSessionRequest *request) { return m_server.OnRequest(request); }
|
||||
Result OnRequest(KSessionRequest *request) { R_RETURN(m_server.OnRequest(request)); }
|
||||
|
||||
KClientSession &GetClientSession() { return m_client; }
|
||||
KServerSession &GetServerSession() { return m_server; }
|
||||
|
|
|
@ -190,15 +190,15 @@ namespace ams::kern {
|
|||
constexpr ALWAYS_INLINE size_t GetExchangeCount() const { return m_mappings.GetExchangeCount(); }
|
||||
|
||||
ALWAYS_INLINE Result PushSend(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state) {
|
||||
return m_mappings.PushSend(client, server, size, state);
|
||||
R_RETURN(m_mappings.PushSend(client, server, size, state));
|
||||
}
|
||||
|
||||
ALWAYS_INLINE Result PushReceive(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state) {
|
||||
return m_mappings.PushReceive(client, server, size, state);
|
||||
R_RETURN(m_mappings.PushReceive(client, server, size, state));
|
||||
}
|
||||
|
||||
ALWAYS_INLINE Result PushExchange(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state) {
|
||||
return m_mappings.PushExchange(client, server, size, state);
|
||||
R_RETURN(m_mappings.PushExchange(client, server, size, state));
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE KProcessAddress GetSendClientAddress(size_t i) const { return m_mappings.GetSendClientAddress(i); }
|
||||
|
|
|
@ -256,15 +256,15 @@ namespace ams::kern {
|
|||
static Result InitializeThread(KThread *thread, KThreadFunction func, uintptr_t arg, KProcessAddress user_stack_top, s32 prio, s32 virt_core, KProcess *owner, ThreadType type);
|
||||
public:
|
||||
static Result InitializeKernelThread(KThread *thread, KThreadFunction func, uintptr_t arg, s32 prio, s32 virt_core) {
|
||||
return InitializeThread(thread, func, arg, Null<KProcessAddress>, prio, virt_core, nullptr, ThreadType_Kernel);
|
||||
R_RETURN(InitializeThread(thread, func, arg, Null<KProcessAddress>, prio, virt_core, nullptr, ThreadType_Kernel));
|
||||
}
|
||||
|
||||
static Result InitializeHighPriorityThread(KThread *thread, KThreadFunction func, uintptr_t arg) {
|
||||
return InitializeThread(thread, func, arg, Null<KProcessAddress>, 0, GetCurrentCoreId(), nullptr, ThreadType_HighPriority);
|
||||
R_RETURN(InitializeThread(thread, func, arg, Null<KProcessAddress>, 0, GetCurrentCoreId(), nullptr, ThreadType_HighPriority));
|
||||
}
|
||||
|
||||
static Result InitializeUserThread(KThread *thread, KThreadFunction func, uintptr_t arg, KProcessAddress user_stack_top, s32 prio, s32 virt_core, KProcess *owner) {
|
||||
return InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner, ThreadType_User);
|
||||
R_RETURN(InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner, ThreadType_User));
|
||||
}
|
||||
|
||||
static void ResumeThreadsSuspendedForInit();
|
||||
|
|
|
@ -72,7 +72,7 @@ namespace ams::kern {
|
|||
|
||||
R_UNLESS(size >= m_current_size, svc::ResultLimitReached());
|
||||
m_limit_size = size;
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
|
|
@ -41,15 +41,15 @@ namespace ams::kern {
|
|||
static_assert(std::derived_from<KPageTable, KPageTableBase>);
|
||||
|
||||
ALWAYS_INLINE Result KPageTableBase::Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, OperationType operation, bool reuse_ll) {
|
||||
return static_cast<KPageTable *>(this)->OperateImpl(page_list, virt_addr, num_pages, phys_addr, is_pa_valid, properties, operation, reuse_ll);
|
||||
R_RETURN(static_cast<KPageTable *>(this)->OperateImpl(page_list, virt_addr, num_pages, phys_addr, is_pa_valid, properties, operation, reuse_ll));
|
||||
}
|
||||
|
||||
ALWAYS_INLINE Result KPageTableBase::Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, const KPageGroup &page_group, const KPageProperties properties, OperationType operation, bool reuse_ll) {
|
||||
return static_cast<KPageTable *>(this)->OperateImpl(page_list, virt_addr, num_pages, page_group, properties, operation, reuse_ll);
|
||||
R_RETURN(static_cast<KPageTable *>(this)->OperateImpl(page_list, virt_addr, num_pages, page_group, properties, operation, reuse_ll));
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void KPageTableBase::FinalizeUpdate(PageLinkedList *page_list) {
|
||||
return static_cast<KPageTable *>(this)->FinalizeUpdateImpl(page_list);
|
||||
static_cast<KPageTable *>(this)->FinalizeUpdateImpl(page_list);
|
||||
}
|
||||
|
||||
}
|
|
@ -56,12 +56,12 @@ namespace ams::kern::svc {
|
|||
public:
|
||||
static ALWAYS_INLINE Result CopyFromUserspace(void *dst, const void *src, size_t size) {
|
||||
R_UNLESS(UserspaceAccess::CopyMemoryFromUser(dst, src, size), svc::ResultInvalidPointer());
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE Result CopyToUserspace(void *dst, const void *src, size_t size) {
|
||||
R_UNLESS(UserspaceAccess::CopyMemoryToUser(dst, src, size), svc::ResultInvalidPointer());
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -72,12 +72,12 @@ namespace ams::kern::svc {
|
|||
public:
|
||||
static ALWAYS_INLINE Result CopyFromUserspace(void *dst, const void *src, size_t size) {
|
||||
R_UNLESS(UserspaceAccess::CopyMemoryFromUserAligned32Bit(dst, src, size), svc::ResultInvalidPointer());
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE Result CopyToUserspace(void *dst, const void *src, size_t size) {
|
||||
R_UNLESS(UserspaceAccess::CopyMemoryToUserAligned32Bit(dst, src, size), svc::ResultInvalidPointer());
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -88,12 +88,12 @@ namespace ams::kern::svc {
|
|||
public:
|
||||
static ALWAYS_INLINE Result CopyFromUserspace(void *dst, const void *src, size_t size) {
|
||||
R_UNLESS(UserspaceAccess::CopyMemoryFromUserAligned64Bit(dst, src, size), svc::ResultInvalidPointer());
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE Result CopyToUserspace(void *dst, const void *src, size_t size) {
|
||||
R_UNLESS(UserspaceAccess::CopyMemoryToUserAligned64Bit(dst, src, size), svc::ResultInvalidPointer());
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -111,21 +111,21 @@ namespace ams::kern::svc {
|
|||
CT *m_ptr;
|
||||
private:
|
||||
ALWAYS_INLINE Result CopyToImpl(void *p, size_t size) const {
|
||||
return Traits::CopyFromUserspace(p, m_ptr, size);
|
||||
R_RETURN(Traits::CopyFromUserspace(p, m_ptr, size));
|
||||
}
|
||||
|
||||
ALWAYS_INLINE Result CopyFromImpl(const void *p, size_t size) const {
|
||||
return Traits::CopyToUserspace(m_ptr, p, size);
|
||||
R_RETURN(Traits::CopyToUserspace(m_ptr, p, size));
|
||||
}
|
||||
protected:
|
||||
ALWAYS_INLINE Result CopyTo(T *p) const { return this->CopyToImpl(p, sizeof(*p)); }
|
||||
ALWAYS_INLINE Result CopyFrom(const T *p) const { return this->CopyFromImpl(p, sizeof(*p)); }
|
||||
ALWAYS_INLINE Result CopyTo(T *p) const { R_RETURN(this->CopyToImpl(p, sizeof(*p))); }
|
||||
ALWAYS_INLINE Result CopyFrom(const T *p) const { R_RETURN(this->CopyFromImpl(p, sizeof(*p))); }
|
||||
|
||||
ALWAYS_INLINE Result CopyArrayElementTo(T *p, size_t index) const { return Traits::CopyFromUserspace(p, m_ptr + index, sizeof(*p)); }
|
||||
ALWAYS_INLINE Result CopyArrayElementFrom(const T *p, size_t index) const { return Traits::CopyToUserspace(m_ptr + index, p, sizeof(*p)); }
|
||||
ALWAYS_INLINE Result CopyArrayElementTo(T *p, size_t index) const { R_RETURN(Traits::CopyFromUserspace(p, m_ptr + index, sizeof(*p))); }
|
||||
ALWAYS_INLINE Result CopyArrayElementFrom(const T *p, size_t index) const { R_RETURN(Traits::CopyToUserspace(m_ptr + index, p, sizeof(*p))); }
|
||||
|
||||
ALWAYS_INLINE Result CopyArrayTo(T *arr, size_t count) const { return this->CopyToImpl(arr, sizeof(*arr) * count); }
|
||||
ALWAYS_INLINE Result CopyArrayFrom(const T *arr, size_t count) const { return this->CopyFromImpl(arr, sizeof(*arr) * count); }
|
||||
ALWAYS_INLINE Result CopyArrayTo(T *arr, size_t count) const { R_RETURN(this->CopyToImpl(arr, sizeof(*arr) * count)); }
|
||||
ALWAYS_INLINE Result CopyArrayFrom(const T *arr, size_t count) const { R_RETURN(this->CopyFromImpl(arr, sizeof(*arr) * count)); }
|
||||
|
||||
constexpr ALWAYS_INLINE bool IsNull() const { return m_ptr == nullptr; }
|
||||
|
||||
|
@ -145,11 +145,11 @@ namespace ams::kern::svc {
|
|||
ALWAYS_INLINE Result CopyStringTo(char *dst, size_t size) const {
|
||||
static_assert(sizeof(char) == 1);
|
||||
R_UNLESS(UserspaceAccess::CopyStringFromUser(dst, m_ptr, size) > 0, svc::ResultInvalidPointer());
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE Result CopyArrayElementTo(char *dst, size_t index) const {
|
||||
return Traits::CopyFromUserspace(dst, m_ptr + index, sizeof(*dst));
|
||||
R_RETURN(Traits::CopyFromUserspace(dst, m_ptr + index, sizeof(*dst)));
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE bool IsNull() const { return m_ptr == nullptr; }
|
||||
|
|
|
@ -304,7 +304,7 @@ namespace ams::kern::arch::arm64::cpu {
|
|||
MESOSPHERE_ASSERT(util::IsAligned(end, DataCacheLineSize));
|
||||
R_UNLESS(UserspaceAccess::InvalidateDataCache(start, end), svc::ResultInvalidCurrentMemory());
|
||||
DataSynchronizationBarrier();
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE Result StoreDataCacheRange(uintptr_t start, uintptr_t end) {
|
||||
|
@ -312,7 +312,7 @@ namespace ams::kern::arch::arm64::cpu {
|
|||
MESOSPHERE_ASSERT(util::IsAligned(end, DataCacheLineSize));
|
||||
R_UNLESS(UserspaceAccess::StoreDataCache(start, end), svc::ResultInvalidCurrentMemory());
|
||||
DataSynchronizationBarrier();
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE Result FlushDataCacheRange(uintptr_t start, uintptr_t end) {
|
||||
|
@ -320,7 +320,7 @@ namespace ams::kern::arch::arm64::cpu {
|
|||
MESOSPHERE_ASSERT(util::IsAligned(end, DataCacheLineSize));
|
||||
R_UNLESS(UserspaceAccess::FlushDataCache(start, end), svc::ResultInvalidCurrentMemory());
|
||||
DataSynchronizationBarrier();
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE Result InvalidateInstructionCacheRange(uintptr_t start, uintptr_t end) {
|
||||
|
@ -328,7 +328,7 @@ namespace ams::kern::arch::arm64::cpu {
|
|||
MESOSPHERE_ASSERT(util::IsAligned(end, InstructionCacheLineSize));
|
||||
R_UNLESS(UserspaceAccess::InvalidateInstructionCache(start, end), svc::ResultInvalidCurrentMemory());
|
||||
EnsureInstructionConsistency();
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void InvalidateEntireInstructionCacheLocalImpl() {
|
||||
|
@ -440,7 +440,7 @@ namespace ams::kern::arch::arm64::cpu {
|
|||
R_TRY(InvalidateDataCacheRange(aligned_start, aligned_end));
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result StoreDataCache(const void *addr, size_t size) {
|
||||
|
@ -448,7 +448,7 @@ namespace ams::kern::arch::arm64::cpu {
|
|||
const uintptr_t start = util::AlignDown(reinterpret_cast<uintptr_t>(addr), DataCacheLineSize);
|
||||
const uintptr_t end = util::AlignUp( reinterpret_cast<uintptr_t>(addr) + size, DataCacheLineSize);
|
||||
|
||||
return StoreDataCacheRange(start, end);
|
||||
R_RETURN(StoreDataCacheRange(start, end));
|
||||
}
|
||||
|
||||
Result FlushDataCache(const void *addr, size_t size) {
|
||||
|
@ -456,7 +456,7 @@ namespace ams::kern::arch::arm64::cpu {
|
|||
const uintptr_t start = util::AlignDown(reinterpret_cast<uintptr_t>(addr), DataCacheLineSize);
|
||||
const uintptr_t end = util::AlignUp( reinterpret_cast<uintptr_t>(addr) + size, DataCacheLineSize);
|
||||
|
||||
return FlushDataCacheRange(start, end);
|
||||
R_RETURN(FlushDataCacheRange(start, end));
|
||||
}
|
||||
|
||||
Result InvalidateInstructionCache(void *addr, size_t size) {
|
||||
|
@ -469,7 +469,7 @@ namespace ams::kern::arch::arm64::cpu {
|
|||
/* Request the interrupt helper to perform an instruction memory barrier. */
|
||||
g_cache_operation_handler.RequestOperation(KCacheHelperInterruptHandler::Operation::InstructionMemoryBarrier);
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void InvalidateEntireInstructionCache() {
|
||||
|
|
|
@ -131,7 +131,7 @@ namespace ams::kern::arch::arm64 {
|
|||
}
|
||||
|
||||
/* Get the FPU context. */
|
||||
return this->GetFpuContext(out, thread, context_flags);
|
||||
R_RETURN(this->GetFpuContext(out, thread, context_flags));
|
||||
}
|
||||
|
||||
Result KDebug::SetThreadContextImpl(const ams::svc::ThreadContext &ctx, KThread *thread, u32 context_flags) {
|
||||
|
@ -180,7 +180,7 @@ namespace ams::kern::arch::arm64 {
|
|||
}
|
||||
|
||||
/* Set the FPU context. */
|
||||
return this->SetFpuContext(ctx, thread, context_flags);
|
||||
R_RETURN(this->SetFpuContext(ctx, thread, context_flags));
|
||||
}
|
||||
|
||||
Result KDebug::GetFpuContext(ams::svc::ThreadContext *out, KThread *thread, u32 context_flags) {
|
||||
|
@ -218,7 +218,7 @@ namespace ams::kern::arch::arm64 {
|
|||
}
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KDebug::SetFpuContext(const ams::svc::ThreadContext &ctx, KThread *thread, u32 context_flags) {
|
||||
|
@ -243,11 +243,11 @@ namespace ams::kern::arch::arm64 {
|
|||
t_ctx->SetFpuRegisters(ctx.v, this->Is64Bit());
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KDebug::BreakIfAttached(ams::svc::BreakReason break_reason, uintptr_t address, size_t size) {
|
||||
return KDebugBase::OnDebugEvent(ams::svc::DebugEvent_Exception, ams::svc::DebugException_UserBreak, GetProgramCounter(GetCurrentThread()), break_reason, address, size);
|
||||
R_RETURN(KDebugBase::OnDebugEvent(ams::svc::DebugEvent_Exception, ams::svc::DebugException_UserBreak, GetProgramCounter(GetCurrentThread()), break_reason, address, size));
|
||||
}
|
||||
|
||||
#define MESOSPHERE_SET_HW_BREAK_POINT(ID, FLAGS, VALUE) \
|
||||
|
@ -384,10 +384,10 @@ namespace ams::kern::arch::arm64 {
|
|||
}
|
||||
} else {
|
||||
/* Invalid name. */
|
||||
return svc::ResultInvalidEnumValue();
|
||||
R_THROW(svc::ResultInvalidEnumValue());
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
#undef MESOSPHERE_SET_HW_WATCH_POINT
|
||||
|
|
|
@ -215,12 +215,12 @@ namespace ams::kern::arch::arm64 {
|
|||
if (KInterruptController::IsGlobal(irq)) {
|
||||
KScopedInterruptDisable di;
|
||||
KScopedSpinLock lk(this->GetGlobalInterruptLock());
|
||||
return this->BindGlobal(handler, irq, core_id, priority, manual_clear, level);
|
||||
R_RETURN(this->BindGlobal(handler, irq, core_id, priority, manual_clear, level));
|
||||
} else {
|
||||
MESOSPHERE_ASSERT(core_id == GetCurrentCoreId());
|
||||
|
||||
KScopedInterruptDisable di;
|
||||
return this->BindLocal(handler, irq, priority, manual_clear);
|
||||
R_RETURN(this->BindLocal(handler, irq, priority, manual_clear));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -234,12 +234,12 @@ namespace ams::kern::arch::arm64 {
|
|||
KScopedInterruptDisable di;
|
||||
|
||||
KScopedSpinLock lk(this->GetGlobalInterruptLock());
|
||||
return this->UnbindGlobal(irq);
|
||||
R_RETURN(this->UnbindGlobal(irq));
|
||||
} else {
|
||||
MESOSPHERE_ASSERT(core_id == GetCurrentCoreId());
|
||||
|
||||
KScopedInterruptDisable di;
|
||||
return this->UnbindLocal(irq);
|
||||
R_RETURN(this->UnbindLocal(irq));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -252,12 +252,12 @@ namespace ams::kern::arch::arm64 {
|
|||
if (KInterruptController::IsGlobal(irq)) {
|
||||
KScopedInterruptDisable di;
|
||||
KScopedSpinLock lk(this->GetGlobalInterruptLock());
|
||||
return this->ClearGlobal(irq);
|
||||
R_RETURN(this->ClearGlobal(irq));
|
||||
} else {
|
||||
MESOSPHERE_ASSERT(core_id == GetCurrentCoreId());
|
||||
|
||||
KScopedInterruptDisable di;
|
||||
return this->ClearLocal(irq);
|
||||
R_RETURN(this->ClearLocal(irq));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -288,7 +288,7 @@ namespace ams::kern::arch::arm64 {
|
|||
m_interrupt_controller.SetPriorityLevel(irq, priority);
|
||||
m_interrupt_controller.Enable(irq);
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KInterruptManager::BindLocal(KInterruptHandler *handler, s32 irq, s32 priority, bool manual_clear) {
|
||||
|
@ -311,7 +311,7 @@ namespace ams::kern::arch::arm64 {
|
|||
m_interrupt_controller.SetPriorityLevel(irq, priority);
|
||||
m_interrupt_controller.Enable(irq);
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KInterruptManager::UnbindGlobal(s32 irq) {
|
||||
|
@ -323,7 +323,7 @@ namespace ams::kern::arch::arm64 {
|
|||
|
||||
GetGlobalInterruptEntry(irq).handler = nullptr;
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KInterruptManager::UnbindLocal(s32 irq) {
|
||||
|
@ -335,7 +335,7 @@ namespace ams::kern::arch::arm64 {
|
|||
|
||||
entry.handler = nullptr;
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KInterruptManager::ClearGlobal(s32 irq) {
|
||||
|
@ -350,7 +350,7 @@ namespace ams::kern::arch::arm64 {
|
|||
/* Clear and enable. */
|
||||
entry.needs_clear = false;
|
||||
m_interrupt_controller.Enable(irq);
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KInterruptManager::ClearLocal(s32 irq) {
|
||||
|
@ -365,7 +365,7 @@ namespace ams::kern::arch::arm64 {
|
|||
/* Clear and set priority. */
|
||||
entry.needs_clear = false;
|
||||
m_interrupt_controller.SetPriorityLevel(irq, entry.priority);
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -178,7 +178,7 @@ namespace ams::kern::arch::arm64 {
|
|||
/* Initialize the base page table. */
|
||||
MESOSPHERE_R_ABORT_UNLESS(KPageTableBase::InitializeForKernel(true, table, start, end));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTable::InitializeForProcess(u32 id, ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager, KPageTableManager *pt_manager, KResourceLimit *resource_limit) {
|
||||
|
@ -187,7 +187,7 @@ namespace ams::kern::arch::arm64 {
|
|||
|
||||
/* Get an ASID */
|
||||
m_asid = g_asid_manager.Reserve();
|
||||
auto asid_guard = SCOPE_GUARD { g_asid_manager.Release(m_asid); };
|
||||
ON_RESULT_FAILURE { g_asid_manager.Release(m_asid); };
|
||||
|
||||
/* Set our manager. */
|
||||
m_manager = pt_manager;
|
||||
|
@ -196,7 +196,7 @@ namespace ams::kern::arch::arm64 {
|
|||
const KVirtualAddress new_table = m_manager->Allocate();
|
||||
R_UNLESS(new_table != Null<KVirtualAddress>, svc::ResultOutOfResource());
|
||||
m_ttbr = EncodeTtbr(GetPageTablePhysicalAddress(new_table), m_asid);
|
||||
auto table_guard = SCOPE_GUARD { m_manager->Free(new_table); };
|
||||
ON_RESULT_FAILURE_2 { m_manager->Free(new_table); };
|
||||
|
||||
/* Initialize our base table. */
|
||||
const size_t as_width = GetAddressSpaceWidth(as_type);
|
||||
|
@ -204,13 +204,9 @@ namespace ams::kern::arch::arm64 {
|
|||
const KProcessAddress as_end = (1ul << as_width);
|
||||
R_TRY(KPageTableBase::InitializeForProcess(as_type, enable_aslr, enable_das_merge, from_back, pool, GetVoidPointer(new_table), as_start, as_end, code_address, code_size, mem_block_slab_manager, block_info_manager, resource_limit));
|
||||
|
||||
/* We succeeded! */
|
||||
table_guard.Cancel();
|
||||
asid_guard.Cancel();
|
||||
|
||||
/* Note that we've updated the table (since we created it). */
|
||||
this->NoteUpdated();
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTable::Finalize() {
|
||||
|
@ -316,7 +312,7 @@ namespace ams::kern::arch::arm64 {
|
|||
/* Release our asid. */
|
||||
g_asid_manager.Release(m_asid);
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTable::OperateImpl(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, OperationType operation, bool reuse_ll) {
|
||||
|
@ -334,17 +330,17 @@ namespace ams::kern::arch::arm64 {
|
|||
}
|
||||
|
||||
if (operation == OperationType_Unmap) {
|
||||
return this->Unmap(virt_addr, num_pages, page_list, false, reuse_ll);
|
||||
R_RETURN(this->Unmap(virt_addr, num_pages, page_list, false, reuse_ll));
|
||||
} else {
|
||||
auto entry_template = this->GetEntryTemplate(properties);
|
||||
|
||||
switch (operation) {
|
||||
case OperationType_Map:
|
||||
return this->MapContiguous(virt_addr, phys_addr, num_pages, entry_template, properties.disable_merge_attributes == DisableMergeAttribute_DisableHead, page_list, reuse_ll);
|
||||
R_RETURN(this->MapContiguous(virt_addr, phys_addr, num_pages, entry_template, properties.disable_merge_attributes == DisableMergeAttribute_DisableHead, page_list, reuse_ll));
|
||||
case OperationType_ChangePermissions:
|
||||
return this->ChangePermissions(virt_addr, num_pages, entry_template, properties.disable_merge_attributes, false, page_list, reuse_ll);
|
||||
R_RETURN(this->ChangePermissions(virt_addr, num_pages, entry_template, properties.disable_merge_attributes, false, page_list, reuse_ll));
|
||||
case OperationType_ChangePermissionsAndRefresh:
|
||||
return this->ChangePermissions(virt_addr, num_pages, entry_template, properties.disable_merge_attributes, true, page_list, reuse_ll);
|
||||
R_RETURN(this->ChangePermissions(virt_addr, num_pages, entry_template, properties.disable_merge_attributes, true, page_list, reuse_ll));
|
||||
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||
}
|
||||
}
|
||||
|
@ -361,7 +357,7 @@ namespace ams::kern::arch::arm64 {
|
|||
auto entry_template = this->GetEntryTemplate(properties);
|
||||
switch (operation) {
|
||||
case OperationType_MapGroup:
|
||||
return this->MapGroup(virt_addr, page_group, num_pages, entry_template, properties.disable_merge_attributes == DisableMergeAttribute_DisableHead, page_list, reuse_ll);
|
||||
R_RETURN(this->MapGroup(virt_addr, page_group, num_pages, entry_template, properties.disable_merge_attributes == DisableMergeAttribute_DisableHead, page_list, reuse_ll));
|
||||
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||
}
|
||||
}
|
||||
|
@ -388,7 +384,7 @@ namespace ams::kern::arch::arm64 {
|
|||
phys_addr += L1BlockSize;
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTable::MapL2Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll) {
|
||||
|
@ -447,7 +443,7 @@ namespace ams::kern::arch::arm64 {
|
|||
this->GetPageTableManager().Open(l2_virt, l2_open_count);
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTable::MapL3Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll) {
|
||||
|
@ -503,7 +499,8 @@ namespace ams::kern::arch::arm64 {
|
|||
} else if (this->GetPageTableManager().IsInPageTableHeap(l2_virt) && l2_open_count > 0) {
|
||||
this->GetPageTableManager().Open(l2_virt, l2_open_count);
|
||||
}
|
||||
return svc::ResultOutOfResource();
|
||||
|
||||
R_THROW(svc::ResultOutOfResource());
|
||||
}
|
||||
|
||||
/* Set the entry. */
|
||||
|
@ -551,7 +548,7 @@ namespace ams::kern::arch::arm64 {
|
|||
this->GetPageTableManager().Open(l3_virt, l3_open_count);
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTable::Unmap(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list, bool force, bool reuse_ll) {
|
||||
|
@ -563,13 +560,13 @@ namespace ams::kern::arch::arm64 {
|
|||
if (!force) {
|
||||
const size_t size = num_pages * PageSize;
|
||||
R_TRY(this->SeparatePages(virt_addr, std::min(util::GetAlignment(GetInteger(virt_addr)), size), page_list, reuse_ll));
|
||||
ON_RESULT_FAILURE { this->MergePages(virt_addr, page_list); };
|
||||
|
||||
if (num_pages > 1) {
|
||||
const auto end_page = virt_addr + size;
|
||||
const auto last_page = end_page - PageSize;
|
||||
|
||||
auto merge_guard = SCOPE_GUARD { this->MergePages(virt_addr, page_list); };
|
||||
R_TRY(this->SeparatePages(last_page, std::min(util::GetAlignment(GetInteger(end_page)), size), page_list, reuse_ll));
|
||||
merge_guard.Cancel();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -717,7 +714,7 @@ namespace ams::kern::arch::arm64 {
|
|||
this->NoteUpdated();
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTable::MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll) {
|
||||
|
@ -731,7 +728,7 @@ namespace ams::kern::arch::arm64 {
|
|||
|
||||
/* Map the pages, using a guard to ensure we don't leak. */
|
||||
{
|
||||
auto map_guard = SCOPE_GUARD { MESOSPHERE_R_ABORT_UNLESS(this->Unmap(orig_virt_addr, num_pages, page_list, true, true)); };
|
||||
ON_RESULT_FAILURE { MESOSPHERE_R_ABORT_UNLESS(this->Unmap(orig_virt_addr, num_pages, page_list, true, true)); };
|
||||
|
||||
if (num_pages < ContiguousPageSize / PageSize) {
|
||||
R_TRY(this->Map(virt_addr, phys_addr, num_pages, entry_template, disable_head_merge && virt_addr == orig_virt_addr, L3BlockSize, page_list, reuse_ll));
|
||||
|
@ -778,9 +775,6 @@ namespace ams::kern::arch::arm64 {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* We successfully mapped, so cancel our guard. */
|
||||
map_guard.Cancel();
|
||||
}
|
||||
|
||||
/* Perform what coalescing we can. */
|
||||
|
@ -794,7 +788,7 @@ namespace ams::kern::arch::arm64 {
|
|||
Kernel::GetMemoryManager().Open(orig_phys_addr, num_pages);
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTable::MapGroup(KProcessAddress virt_addr, const KPageGroup &pg, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll) {
|
||||
|
@ -810,7 +804,7 @@ namespace ams::kern::arch::arm64 {
|
|||
|
||||
/* Map the pages, using a guard to ensure we don't leak. */
|
||||
{
|
||||
auto map_guard = SCOPE_GUARD { MESOSPHERE_R_ABORT_UNLESS(this->Unmap(orig_virt_addr, num_pages, page_list, true, true)); };
|
||||
ON_RESULT_FAILURE { MESOSPHERE_R_ABORT_UNLESS(this->Unmap(orig_virt_addr, num_pages, page_list, true, true)); };
|
||||
|
||||
if (num_pages < ContiguousPageSize / PageSize) {
|
||||
for (const auto &block : pg) {
|
||||
|
@ -875,9 +869,6 @@ namespace ams::kern::arch::arm64 {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* We successfully mapped, so cancel our guard. */
|
||||
map_guard.Cancel();
|
||||
}
|
||||
MESOSPHERE_ASSERT(mapped_pages == num_pages);
|
||||
|
||||
|
@ -889,7 +880,7 @@ namespace ams::kern::arch::arm64 {
|
|||
|
||||
/* We succeeded! We want to persist the reference to the pages. */
|
||||
spg.CancelClose();
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
bool KPageTable::MergePages(KProcessAddress virt_addr, PageLinkedList *page_list) {
|
||||
|
@ -1184,18 +1175,17 @@ namespace ams::kern::arch::arm64 {
|
|||
}
|
||||
|
||||
/* We're done! */
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTable::SeparatePages(KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll) {
|
||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
/* Try to separate pages, re-merging if we fail. */
|
||||
auto guard = SCOPE_GUARD { this->MergePages(virt_addr, page_list); };
|
||||
R_TRY(this->SeparatePagesImpl(virt_addr, block_size, page_list, reuse_ll));
|
||||
guard.Cancel();
|
||||
/* If we fail while separating, re-merge. */
|
||||
ON_RESULT_FAILURE { this->MergePages(virt_addr, page_list); };
|
||||
|
||||
return ResultSuccess();
|
||||
/* Try to separate pages. */
|
||||
R_RETURN(this->SeparatePagesImpl(virt_addr, block_size, page_list, reuse_ll));
|
||||
}
|
||||
|
||||
Result KPageTable::ChangePermissions(KProcessAddress virt_addr, size_t num_pages, PageTableEntry entry_template, DisableMergeAttribute disable_merge_attr, bool refresh_mapping, PageLinkedList *page_list, bool reuse_ll) {
|
||||
|
@ -1208,9 +1198,9 @@ namespace ams::kern::arch::arm64 {
|
|||
const auto end_page = virt_addr + size;
|
||||
const auto last_page = end_page - PageSize;
|
||||
|
||||
auto merge_guard = SCOPE_GUARD { this->MergePages(virt_addr, page_list); };
|
||||
ON_RESULT_FAILURE { this->MergePages(virt_addr, page_list); };
|
||||
|
||||
R_TRY(this->SeparatePages(last_page, std::min(util::GetAlignment(GetInteger(end_page)), size), page_list, reuse_ll));
|
||||
merge_guard.Cancel();
|
||||
}
|
||||
|
||||
/* ===================================================== */
|
||||
|
@ -1426,7 +1416,7 @@ namespace ams::kern::arch::arm64 {
|
|||
this->MergePages(virt_addr + (num_pages - 1) * PageSize, page_list);
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KPageTable::FinalizeUpdateImpl(PageLinkedList *page_list) {
|
||||
|
|
|
@ -150,12 +150,12 @@ namespace ams::kern::arch::arm64 {
|
|||
/* Lock the context, if we're a main thread. */
|
||||
m_locked = is_main;
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KThreadContext::Finalize() {
|
||||
/* This doesn't actually do anything. */
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KThreadContext::SetArguments(uintptr_t arg0, uintptr_t arg1) {
|
||||
|
|
|
@ -334,7 +334,7 @@ namespace ams::kern::board::nintendo::nx {
|
|||
for (size_t i = 0; i < num_reserved; i++) {
|
||||
this->ReleaseImpl(out[i]);
|
||||
}
|
||||
return svc::ResultOutOfResource();
|
||||
R_THROW(svc::ResultOutOfResource());
|
||||
}
|
||||
|
||||
void Release(u8 asid) {
|
||||
|
@ -788,7 +788,7 @@ namespace ams::kern::board::nintendo::nx {
|
|||
}
|
||||
|
||||
/* Ensure that we clean up the tables on failure. */
|
||||
auto table_guard = SCOPE_GUARD {
|
||||
ON_RESULT_FAILURE {
|
||||
for (size_t i = start_index; i <= end_index; ++i) {
|
||||
if (m_tables[i] != Null<KVirtualAddress> && ptm.Close(m_tables[i], 1)) {
|
||||
ptm.Free(m_tables[i]);
|
||||
|
@ -834,8 +834,7 @@ namespace ams::kern::board::nintendo::nx {
|
|||
}
|
||||
|
||||
/* We succeeded. */
|
||||
table_guard.Cancel();
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KDevicePageTable::Finalize() {
|
||||
|
@ -915,14 +914,15 @@ namespace ams::kern::board::nintendo::nx {
|
|||
if (ReadMcRegister(reg_offset) != new_val) {
|
||||
WriteMcRegister(reg_offset, old_val);
|
||||
SmmuSynchronizationBarrier();
|
||||
return svc::ResultNotFound();
|
||||
|
||||
R_THROW(svc::ResultNotFound());
|
||||
}
|
||||
}
|
||||
|
||||
/* Mark the device as attached. */
|
||||
m_attached_device |= (1ul << device_name);
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KDevicePageTable::Detach(ams::svc::DeviceName device_name) {
|
||||
|
@ -962,7 +962,7 @@ namespace ams::kern::board::nintendo::nx {
|
|||
/* Mark the device as detached. */
|
||||
m_attached_device &= ~(1ul << device_name);
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
bool KDevicePageTable::IsFree(KDeviceVirtualAddress address, u64 size) const {
|
||||
|
@ -1112,7 +1112,7 @@ namespace ams::kern::board::nintendo::nx {
|
|||
}
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KDevicePageTable::MapImpl(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address, ams::svc::MemoryPermission device_perm, bool is_aligned) {
|
||||
|
@ -1120,7 +1120,7 @@ namespace ams::kern::board::nintendo::nx {
|
|||
R_UNLESS(this->IsFree(device_address, size), svc::ResultInvalidCurrentMemory());
|
||||
|
||||
/* Ensure that if we fail, we unmap anything we mapped. */
|
||||
auto unmap_guard = SCOPE_GUARD { this->UnmapImpl(device_address, size, false); };
|
||||
ON_RESULT_FAILURE { this->UnmapImpl(device_address, size, false); };
|
||||
|
||||
/* Iterate, mapping device pages. */
|
||||
KDeviceVirtualAddress cur_addr = device_address;
|
||||
|
@ -1148,10 +1148,7 @@ namespace ams::kern::board::nintendo::nx {
|
|||
mapped_size += cur_size;
|
||||
}
|
||||
|
||||
/* We're done, so cancel our guard. */
|
||||
unmap_guard.Cancel();
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KDevicePageTable::UnmapImpl(KDeviceVirtualAddress address, u64 size, bool force) {
|
||||
|
@ -1423,7 +1420,7 @@ namespace ams::kern::board::nintendo::nx {
|
|||
MESOSPHERE_ASSERT(((device_address + size - 1) & ~DeviceVirtualAddressMask) == 0);
|
||||
|
||||
/* Map the pages. */
|
||||
return this->MapImpl(page_table, process_address, size, device_address, device_perm, is_aligned);
|
||||
R_RETURN(this->MapImpl(page_table, process_address, size, device_address, device_perm, is_aligned));
|
||||
}
|
||||
|
||||
Result KDevicePageTable::Unmap(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address) {
|
||||
|
@ -1437,7 +1434,7 @@ namespace ams::kern::board::nintendo::nx {
|
|||
/* Unmap the pages. */
|
||||
this->UnmapImpl(device_address, size, false);
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -256,7 +256,7 @@ namespace ams::kern::board::nintendo::nx {
|
|||
g_secure_applet_memory_used = true;
|
||||
*out = g_secure_applet_memory_address;
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void FreeSecureMemoryForApplet(KVirtualAddress address, size_t size) {
|
||||
|
@ -475,7 +475,7 @@ namespace ams::kern::board::nintendo::nx {
|
|||
R_UNLESS(AMS_LIKELY(util::IsAligned(address, sizeof(u32))), svc::ResultInvalidAddress());
|
||||
R_UNLESS(AMS_LIKELY(IsRegisterAccessibleToUser(address)), svc::ResultInvalidAddress());
|
||||
R_UNLESS(AMS_LIKELY(smc::ReadWriteRegister(out, address, mask, value)), svc::ResultInvalidAddress());
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
/* Randomness. */
|
||||
|
@ -622,7 +622,7 @@ namespace ams::kern::board::nintendo::nx {
|
|||
Result KSystemControl::AllocateSecureMemory(KVirtualAddress *out, size_t size, u32 pool) {
|
||||
/* Applet secure memory is handled separately. */
|
||||
if (pool == KMemoryManager::Pool_Applet) {
|
||||
return AllocateSecureMemoryForApplet(out, size);
|
||||
R_RETURN(AllocateSecureMemoryForApplet(out, size));
|
||||
}
|
||||
|
||||
/* Ensure the size is aligned. */
|
||||
|
@ -635,7 +635,7 @@ namespace ams::kern::board::nintendo::nx {
|
|||
R_UNLESS(paddr != Null<KPhysicalAddress>, svc::ResultOutOfMemory());
|
||||
|
||||
/* Ensure we don't leak references to the memory on error. */
|
||||
auto mem_guard = SCOPE_GUARD { Kernel::GetMemoryManager().Close(paddr, num_pages); };
|
||||
ON_RESULT_FAILURE { Kernel::GetMemoryManager().Close(paddr, num_pages); };
|
||||
|
||||
/* If the memory isn't already secure, set it as secure. */
|
||||
if (pool != KMemoryManager::Pool_System) {
|
||||
|
@ -644,9 +644,8 @@ namespace ams::kern::board::nintendo::nx {
|
|||
}
|
||||
|
||||
/* We succeeded. */
|
||||
mem_guard.Cancel();
|
||||
*out = KPageTable::GetHeapVirtualAddress(paddr);
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KSystemControl::FreeSecureMemory(KVirtualAddress address, size_t size, u32 pool) {
|
||||
|
|
|
@ -392,7 +392,7 @@ namespace ams::kern::board::nintendo::nx::lps {
|
|||
/* Instruct BPMP to enable suspend-to-sc7. */
|
||||
R_UNLESS(BpmpEnableSuspend(TEGRA_BPMP_PM_SC7, 0) == 0, svc::ResultInvalidState());
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void InvokeCpuSleepHandler(uintptr_t arg, uintptr_t entry) {
|
||||
|
|
|
@ -55,7 +55,7 @@ namespace ams::kern {
|
|||
Result PutUserString(ams::kern::svc::KUserPointer<const char *> user_str, size_t len) {
|
||||
/* Only print if the implementation is initialized. */
|
||||
if (!g_initialized_impl) {
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
#if defined(MESOSPHERE_DEBUG_LOG_USE_SEMIHOSTING)
|
||||
|
@ -78,7 +78,7 @@ namespace ams::kern {
|
|||
KDebugLogImpl::Flush();
|
||||
#endif
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -173,7 +173,7 @@ namespace ams::kern {
|
|||
MESOSPHERE_UNUSED(user_str, len);
|
||||
#endif
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KDebugLog::Save() {
|
||||
|
|
|
@ -82,7 +82,7 @@ namespace ams::kern {
|
|||
++num_waiters;
|
||||
}
|
||||
}
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KAddressArbiter::SignalAndIncrementIfEqual(uintptr_t addr, s32 value, s32 count) {
|
||||
|
@ -109,7 +109,7 @@ namespace ams::kern {
|
|||
++num_waiters;
|
||||
}
|
||||
}
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(uintptr_t addr, s32 value, s32 count) {
|
||||
|
@ -171,7 +171,7 @@ namespace ams::kern {
|
|||
++num_waiters;
|
||||
}
|
||||
}
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KAddressArbiter::WaitIfLessThan(uintptr_t addr, s32 value, bool decrement, s64 timeout) {
|
||||
|
@ -186,7 +186,7 @@ namespace ams::kern {
|
|||
/* Check that the thread isn't terminating. */
|
||||
if (cur_thread->IsTerminationRequested()) {
|
||||
slp.CancelSleep();
|
||||
return svc::ResultTerminationRequested();
|
||||
R_THROW(svc::ResultTerminationRequested());
|
||||
}
|
||||
|
||||
/* Read the value from userspace. */
|
||||
|
@ -200,19 +200,19 @@ namespace ams::kern {
|
|||
|
||||
if (!succeeded) {
|
||||
slp.CancelSleep();
|
||||
return svc::ResultInvalidCurrentMemory();
|
||||
R_THROW(svc::ResultInvalidCurrentMemory());
|
||||
}
|
||||
|
||||
/* Check that the value is less than the specified one. */
|
||||
if (user_value >= value) {
|
||||
slp.CancelSleep();
|
||||
return svc::ResultInvalidState();
|
||||
R_THROW(svc::ResultInvalidState());
|
||||
}
|
||||
|
||||
/* Check that the timeout is non-zero. */
|
||||
if (timeout == 0) {
|
||||
slp.CancelSleep();
|
||||
return svc::ResultTimedOut();
|
||||
R_THROW(svc::ResultTimedOut());
|
||||
}
|
||||
|
||||
/* Set the arbiter. */
|
||||
|
@ -225,7 +225,7 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
/* Get the wait result. */
|
||||
return cur_thread->GetWaitResult();
|
||||
R_RETURN(cur_thread->GetWaitResult());
|
||||
}
|
||||
|
||||
Result KAddressArbiter::WaitIfEqual(uintptr_t addr, s32 value, s64 timeout) {
|
||||
|
@ -240,26 +240,26 @@ namespace ams::kern {
|
|||
/* Check that the thread isn't terminating. */
|
||||
if (cur_thread->IsTerminationRequested()) {
|
||||
slp.CancelSleep();
|
||||
return svc::ResultTerminationRequested();
|
||||
R_THROW(svc::ResultTerminationRequested());
|
||||
}
|
||||
|
||||
/* Read the value from userspace. */
|
||||
s32 user_value;
|
||||
if (!ReadFromUser(std::addressof(user_value), addr)) {
|
||||
slp.CancelSleep();
|
||||
return svc::ResultInvalidCurrentMemory();
|
||||
R_THROW(svc::ResultInvalidCurrentMemory());
|
||||
}
|
||||
|
||||
/* Check that the value is equal. */
|
||||
if (value != user_value) {
|
||||
slp.CancelSleep();
|
||||
return svc::ResultInvalidState();
|
||||
R_THROW(svc::ResultInvalidState());
|
||||
}
|
||||
|
||||
/* Check that the timeout is non-zero. */
|
||||
if (timeout == 0) {
|
||||
slp.CancelSleep();
|
||||
return svc::ResultTimedOut();
|
||||
R_THROW(svc::ResultTimedOut());
|
||||
}
|
||||
|
||||
/* Set the arbiter. */
|
||||
|
@ -272,7 +272,7 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
/* Get the wait result. */
|
||||
return cur_thread->GetWaitResult();
|
||||
R_RETURN(cur_thread->GetWaitResult());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -38,7 +38,7 @@ namespace ams::kern {
|
|||
m_intended_kernel_version.Set<KernelVersion::MinorVersion>(ams::svc::SupportedKernelMinorVersion);
|
||||
|
||||
/* Parse the capabilities array. */
|
||||
return this->SetCapabilities(caps, num_caps, page_table);
|
||||
R_RETURN(this->SetCapabilities(caps, num_caps, page_table));
|
||||
}
|
||||
|
||||
Result KCapabilities::Initialize(svc::KUserPointer<const u32 *> user_caps, s32 num_caps, KProcessPageTable *page_table) {
|
||||
|
@ -55,7 +55,7 @@ namespace ams::kern {
|
|||
m_priority_mask = 0;
|
||||
|
||||
/* Parse the user capabilities array. */
|
||||
return this->SetCapabilities(user_caps, num_caps, page_table);
|
||||
R_RETURN(this->SetCapabilities(user_caps, num_caps, page_table));
|
||||
}
|
||||
|
||||
Result KCapabilities::SetCorePriorityCapability(const util::BitPack32 cap) {
|
||||
|
@ -93,7 +93,7 @@ namespace ams::kern {
|
|||
/* Processes must not have access to kernel thread priorities. */
|
||||
R_UNLESS((m_priority_mask & 0xF) == 0, svc::ResultInvalidArgument());
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KCapabilities::SetSyscallMaskCapability(const util::BitPack32 cap, u32 &set_svc) {
|
||||
|
@ -113,7 +113,7 @@ namespace ams::kern {
|
|||
}
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KCapabilities::MapRange(const util::BitPack32 cap, const util::BitPack32 size_cap, KProcessPageTable *page_table) {
|
||||
|
@ -136,9 +136,9 @@ namespace ams::kern {
|
|||
/* Do the mapping. */
|
||||
const KMemoryPermission perm = cap.Get<MapRange::ReadOnly>() ? KMemoryPermission_UserRead : KMemoryPermission_UserReadWrite;
|
||||
if (size_cap.Get<MapRangeSize::Normal>()) {
|
||||
return page_table->MapStatic(phys_addr, size, perm);
|
||||
R_RETURN(page_table->MapStatic(phys_addr, size, perm));
|
||||
} else {
|
||||
return page_table->MapIo(phys_addr, size, perm);
|
||||
R_RETURN(page_table->MapIo(phys_addr, size, perm));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -153,7 +153,7 @@ namespace ams::kern {
|
|||
R_UNLESS(((phys_addr + size - 1) & ~PhysicalMapAllowedMask) == 0, svc::ResultInvalidAddress());
|
||||
|
||||
/* Do the mapping. */
|
||||
return page_table->MapIo(phys_addr, size, KMemoryPermission_UserReadWrite);
|
||||
R_RETURN(page_table->MapIo(phys_addr, size, KMemoryPermission_UserReadWrite));
|
||||
}
|
||||
|
||||
Result KCapabilities::MapRegion(const util::BitPack32 cap, KProcessPageTable *page_table) {
|
||||
|
@ -181,11 +181,11 @@ namespace ams::kern {
|
|||
R_TRY(page_table->MapRegion(MemoryRegions[static_cast<u32>(type)], perm));
|
||||
break;
|
||||
default:
|
||||
return svc::ResultNotFound();
|
||||
R_THROW(svc::ResultNotFound());
|
||||
}
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KCapabilities::SetInterruptPairCapability(const util::BitPack32 cap) {
|
||||
|
@ -199,7 +199,7 @@ namespace ams::kern {
|
|||
}
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KCapabilities::SetProgramTypeCapability(const util::BitPack32 cap) {
|
||||
|
@ -207,7 +207,7 @@ namespace ams::kern {
|
|||
R_UNLESS(cap.Get<ProgramType::Reserved>() == 0, svc::ResultReservedUsed());
|
||||
|
||||
m_program_type = cap.Get<ProgramType::Type>();
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KCapabilities::SetKernelVersionCapability(const util::BitPack32 cap) {
|
||||
|
@ -218,7 +218,7 @@ namespace ams::kern {
|
|||
m_intended_kernel_version = cap;
|
||||
R_UNLESS(m_intended_kernel_version.Get<KernelVersion::MajorVersion>() != 0, svc::ResultInvalidArgument());
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KCapabilities::SetHandleTableCapability(const util::BitPack32 cap) {
|
||||
|
@ -226,7 +226,7 @@ namespace ams::kern {
|
|||
R_UNLESS(cap.Get<HandleTable::Reserved>() == 0, svc::ResultReservedUsed());
|
||||
|
||||
m_handle_table_size = cap.Get<HandleTable::Size>();
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KCapabilities::SetDebugFlagsCapability(const util::BitPack32 cap) {
|
||||
|
@ -235,7 +235,7 @@ namespace ams::kern {
|
|||
|
||||
m_debug_capabilities.Set<DebugFlags::AllowDebug>(cap.Get<DebugFlags::AllowDebug>());
|
||||
m_debug_capabilities.Set<DebugFlags::ForceDebug>(cap.Get<DebugFlags::ForceDebug>());
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KCapabilities::SetCapability(const util::BitPack32 cap, u32 &set_flags, u32 &set_svc, KProcessPageTable *page_table) {
|
||||
|
@ -253,16 +253,16 @@ namespace ams::kern {
|
|||
|
||||
/* Process the capability. */
|
||||
switch (type) {
|
||||
case CapabilityType::CorePriority: return this->SetCorePriorityCapability(cap);
|
||||
case CapabilityType::SyscallMask: return this->SetSyscallMaskCapability(cap, set_svc);
|
||||
case CapabilityType::MapIoPage: return this->MapIoPage(cap, page_table);
|
||||
case CapabilityType::MapRegion: return this->MapRegion(cap, page_table);
|
||||
case CapabilityType::InterruptPair: return this->SetInterruptPairCapability(cap);
|
||||
case CapabilityType::ProgramType: return this->SetProgramTypeCapability(cap);
|
||||
case CapabilityType::KernelVersion: return this->SetKernelVersionCapability(cap);
|
||||
case CapabilityType::HandleTable: return this->SetHandleTableCapability(cap);
|
||||
case CapabilityType::DebugFlags: return this->SetDebugFlagsCapability(cap);
|
||||
default: return svc::ResultInvalidArgument();
|
||||
case CapabilityType::CorePriority: R_RETURN(this->SetCorePriorityCapability(cap));
|
||||
case CapabilityType::SyscallMask: R_RETURN(this->SetSyscallMaskCapability(cap, set_svc));
|
||||
case CapabilityType::MapIoPage: R_RETURN(this->MapIoPage(cap, page_table));
|
||||
case CapabilityType::MapRegion: R_RETURN(this->MapRegion(cap, page_table));
|
||||
case CapabilityType::InterruptPair: R_RETURN(this->SetInterruptPairCapability(cap));
|
||||
case CapabilityType::ProgramType: R_RETURN(this->SetProgramTypeCapability(cap));
|
||||
case CapabilityType::KernelVersion: R_RETURN(this->SetKernelVersionCapability(cap));
|
||||
case CapabilityType::HandleTable: R_RETURN(this->SetHandleTableCapability(cap));
|
||||
case CapabilityType::DebugFlags: R_RETURN(this->SetDebugFlagsCapability(cap));
|
||||
default: R_THROW(svc::ResultInvalidArgument());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -286,7 +286,7 @@ namespace ams::kern {
|
|||
}
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KCapabilities::SetCapabilities(svc::KUserPointer<const u32 *> user_caps, s32 num_caps, KProcessPageTable *page_table) {
|
||||
|
@ -317,7 +317,7 @@ namespace ams::kern {
|
|||
}
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -77,19 +77,15 @@ namespace ams::kern {
|
|||
/* Try to allocate a session from unused slab memory. */
|
||||
session = KSession::CreateFromUnusedSlabMemory();
|
||||
R_UNLESS(session != nullptr, svc::ResultLimitReached());
|
||||
ON_RESULT_FAILURE { session->Close(); };
|
||||
|
||||
/* Ensure that if we fail to allocate our session requests, we close the session we created. */
|
||||
auto session_guard = SCOPE_GUARD { session->Close(); };
|
||||
{
|
||||
/* We want to add two KSessionRequests to the heap, to prevent request exhaustion. */
|
||||
for (size_t i = 0; i < 2; ++i) {
|
||||
KSessionRequest *request = KSessionRequest::CreateFromUnusedSlabMemory();
|
||||
R_UNLESS(request != nullptr, svc::ResultLimitReached());
|
||||
/* We want to add two KSessionRequests to the heap, to prevent request exhaustion. */
|
||||
for (size_t i = 0; i < 2; ++i) {
|
||||
KSessionRequest *request = KSessionRequest::CreateFromUnusedSlabMemory();
|
||||
R_UNLESS(request != nullptr, svc::ResultLimitReached());
|
||||
|
||||
request->Close();
|
||||
}
|
||||
request->Close();
|
||||
}
|
||||
session_guard.Cancel();
|
||||
|
||||
/* We successfully allocated a session, so add the object we allocated to the resource limit. */
|
||||
Kernel::GetSystemResourceLimit().Add(ams::svc::LimitableResource_SessionCountMax, 1);
|
||||
|
@ -99,8 +95,9 @@ namespace ams::kern {
|
|||
R_UNLESS(session != nullptr, svc::ResultOutOfResource());
|
||||
|
||||
/* Update the session counts. */
|
||||
auto count_guard = SCOPE_GUARD { session->Close(); };
|
||||
{
|
||||
ON_RESULT_FAILURE { session->Close(); };
|
||||
|
||||
/* Atomically increment the number of sessions. */
|
||||
s32 new_sessions;
|
||||
{
|
||||
|
@ -123,7 +120,6 @@ namespace ams::kern {
|
|||
} while (!m_peak_sessions.CompareExchangeWeak<std::memory_order_relaxed>(peak, new_sessions));
|
||||
}
|
||||
}
|
||||
count_guard.Cancel();
|
||||
|
||||
/* Initialize the session. */
|
||||
session->Initialize(this, m_parent->GetName());
|
||||
|
@ -133,7 +129,7 @@ namespace ams::kern {
|
|||
|
||||
/* Register the session. */
|
||||
KSession::Register(session);
|
||||
auto session_guard = SCOPE_GUARD {
|
||||
ON_RESULT_FAILURE {
|
||||
session->GetClientSession().Close();
|
||||
session->GetServerSession().Close();
|
||||
};
|
||||
|
@ -142,9 +138,8 @@ namespace ams::kern {
|
|||
R_TRY(m_parent->EnqueueSession(std::addressof(session->GetServerSession())));
|
||||
|
||||
/* We succeeded, so set the output. */
|
||||
session_guard.Cancel();
|
||||
*out = std::addressof(session->GetClientSession());
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KClientPort::CreateLightSession(KLightClientSession **out) {
|
||||
|
@ -175,8 +170,9 @@ namespace ams::kern {
|
|||
R_UNLESS(session != nullptr, svc::ResultOutOfResource());
|
||||
|
||||
/* Update the session counts. */
|
||||
auto count_guard = SCOPE_GUARD { session->Close(); };
|
||||
{
|
||||
ON_RESULT_FAILURE { session->Close(); };
|
||||
|
||||
/* Atomically increment the number of sessions. */
|
||||
s32 new_sessions;
|
||||
{
|
||||
|
@ -199,7 +195,6 @@ namespace ams::kern {
|
|||
} while (!m_peak_sessions.CompareExchangeWeak<std::memory_order_relaxed>(peak, new_sessions));
|
||||
}
|
||||
}
|
||||
count_guard.Cancel();
|
||||
|
||||
/* Initialize the session. */
|
||||
session->Initialize(this, m_parent->GetName());
|
||||
|
@ -209,7 +204,7 @@ namespace ams::kern {
|
|||
|
||||
/* Register the session. */
|
||||
KLightSession::Register(session);
|
||||
auto session_guard = SCOPE_GUARD {
|
||||
ON_RESULT_FAILURE {
|
||||
session->GetClientSession().Close();
|
||||
session->GetServerSession().Close();
|
||||
};
|
||||
|
@ -218,9 +213,8 @@ namespace ams::kern {
|
|||
R_TRY(m_parent->EnqueueSession(std::addressof(session->GetServerSession())));
|
||||
|
||||
/* We succeeded, so set the output. */
|
||||
session_guard.Cancel();
|
||||
*out = std::addressof(session->GetClientSession());
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@ namespace ams::kern {
|
|||
request->Initialize(nullptr, address, size);
|
||||
|
||||
/* Send the request. */
|
||||
return m_parent->OnRequest(request);
|
||||
R_RETURN(m_parent->OnRequest(request));
|
||||
}
|
||||
|
||||
Result KClientSession::SendAsyncRequest(KEvent *event, uintptr_t address, size_t size) {
|
||||
|
@ -55,7 +55,7 @@ namespace ams::kern {
|
|||
request->Initialize(event, address, size);
|
||||
|
||||
/* Send the request. */
|
||||
return m_parent->OnRequest(request);
|
||||
R_RETURN(m_parent->OnRequest(request));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -49,7 +49,7 @@ namespace ams::kern {
|
|||
|
||||
/* We succeeded. */
|
||||
pg_guard.Cancel();
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KCodeMemory::Finalize() {
|
||||
|
@ -87,7 +87,7 @@ namespace ams::kern {
|
|||
/* Mark ourselves as mapped. */
|
||||
m_is_mapped = true;
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KCodeMemory::Unmap(KProcessAddress address, size_t size) {
|
||||
|
@ -106,7 +106,7 @@ namespace ams::kern {
|
|||
MESOSPHERE_ASSERT(m_is_mapped);
|
||||
m_is_mapped = false;
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KCodeMemory::MapToOwner(KProcessAddress address, size_t size, ams::svc::MemoryPermission perm) {
|
||||
|
@ -135,7 +135,7 @@ namespace ams::kern {
|
|||
/* Mark ourselves as mapped. */
|
||||
m_is_owner_mapped = true;
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KCodeMemory::UnmapFromOwner(KProcessAddress address, size_t size) {
|
||||
|
@ -154,7 +154,7 @@ namespace ams::kern {
|
|||
MESOSPHERE_ASSERT(m_is_owner_mapped);
|
||||
m_is_owner_mapped = false;
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -98,12 +98,12 @@ namespace ams::kern {
|
|||
|
||||
/* Signal the next owner thread. */
|
||||
next_owner_thread->EndWait(result);
|
||||
return result;
|
||||
R_RETURN(result);
|
||||
} else {
|
||||
/* Just write the value to userspace. */
|
||||
R_UNLESS(WriteToUser(addr, std::addressof(next_value)), svc::ResultInvalidCurrentMemory());
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -143,7 +143,7 @@ namespace ams::kern {
|
|||
owner_thread->Close();
|
||||
|
||||
/* Get the wait result. */
|
||||
return cur_thread->GetWaitResult();
|
||||
R_RETURN(cur_thread->GetWaitResult());
|
||||
}
|
||||
|
||||
void KConditionVariable::SignalImpl(KThread *thread) {
|
||||
|
@ -224,7 +224,7 @@ namespace ams::kern {
|
|||
/* Check that the thread isn't terminating. */
|
||||
if (cur_thread->IsTerminationRequested()) {
|
||||
slp.CancelSleep();
|
||||
return svc::ResultTerminationRequested();
|
||||
R_THROW(svc::ResultTerminationRequested());
|
||||
}
|
||||
|
||||
/* Update the value and process for the next owner. */
|
||||
|
@ -256,7 +256,7 @@ namespace ams::kern {
|
|||
/* Write the value to userspace. */
|
||||
if (!WriteToUser(addr, std::addressof(next_value))) {
|
||||
slp.CancelSleep();
|
||||
return svc::ResultInvalidCurrentMemory();
|
||||
R_THROW(svc::ResultInvalidCurrentMemory());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -273,7 +273,7 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
/* Get the wait result. */
|
||||
return cur_thread->GetWaitResult();
|
||||
R_RETURN(cur_thread->GetWaitResult());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ namespace ams::kern {
|
|||
/* Write output. */
|
||||
*out_memory_info = info.GetSvcMemoryInfo();
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KDebugBase::ReadMemory(KProcessAddress buffer, KProcessAddress address, size_t size) {
|
||||
|
@ -132,7 +132,7 @@ namespace ams::kern {
|
|||
remaining -= cur_size;
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KDebugBase::WriteMemory(KProcessAddress buffer, KProcessAddress address, size_t size) {
|
||||
|
@ -195,7 +195,7 @@ namespace ams::kern {
|
|||
remaining -= cur_size;
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KDebugBase::GetRunningThreadInfo(ams::svc::LastThreadContext *out_context, u64 *out_thread_id) {
|
||||
|
@ -233,7 +233,7 @@ namespace ams::kern {
|
|||
*out_thread_id = thread->GetId();
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KDebugBase::Attach(KProcess *target) {
|
||||
|
@ -265,10 +265,10 @@ namespace ams::kern {
|
|||
case KProcess::State_CreatedAttached:
|
||||
case KProcess::State_RunningAttached:
|
||||
case KProcess::State_DebugBreak:
|
||||
return svc::ResultBusy();
|
||||
R_THROW(svc::ResultBusy());
|
||||
case KProcess::State_Terminating:
|
||||
case KProcess::State_Terminated:
|
||||
return svc::ResultProcessTerminated();
|
||||
R_THROW(svc::ResultProcessTerminated());
|
||||
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||
}
|
||||
|
||||
|
@ -313,7 +313,7 @@ namespace ams::kern {
|
|||
}
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KDebugBase::BreakProcess() {
|
||||
|
@ -385,7 +385,7 @@ namespace ams::kern {
|
|||
/* Set the process as breaked. */
|
||||
target->SetDebugBreak();
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KDebugBase::TerminateProcess() {
|
||||
|
@ -467,7 +467,7 @@ namespace ams::kern {
|
|||
/* Terminate the process. */
|
||||
target->Terminate();
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KDebugBase::GetThreadContext(ams::svc::ThreadContext *out, u64 thread_id, u32 context_flags) {
|
||||
|
@ -529,7 +529,7 @@ namespace ams::kern {
|
|||
|
||||
/* Get the thread context. */
|
||||
static_assert(std::derived_from<KDebug, KDebugBase>);
|
||||
return static_cast<KDebug *>(this)->GetThreadContextImpl(out, thread, context_flags);
|
||||
R_RETURN(static_cast<KDebug *>(this)->GetThreadContextImpl(out, thread, context_flags));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -621,7 +621,7 @@ namespace ams::kern {
|
|||
|
||||
/* Set the thread context. */
|
||||
static_assert(std::derived_from<KDebug, KDebugBase>);
|
||||
return static_cast<KDebug *>(this)->SetThreadContextImpl(ctx, thread, context_flags);
|
||||
R_RETURN(static_cast<KDebug *>(this)->SetThreadContextImpl(ctx, thread, context_flags));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -722,7 +722,7 @@ namespace ams::kern {
|
|||
target->SetAttached();
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
KEventInfo *KDebugBase::CreateDebugEvent(ams::svc::DebugEvent event, uintptr_t param0, uintptr_t param1, uintptr_t param2, uintptr_t param3, uintptr_t param4, u64 cur_thread_id) {
|
||||
|
@ -975,15 +975,15 @@ namespace ams::kern {
|
|||
break;
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KDebugBase::GetDebugEventInfo(ams::svc::lp64::DebugEventInfo *out) {
|
||||
return this->GetDebugEventInfoImpl(out);
|
||||
R_RETURN(this->GetDebugEventInfoImpl(out));
|
||||
}
|
||||
|
||||
Result KDebugBase::GetDebugEventInfo(ams::svc::ilp32::DebugEventInfo *out) {
|
||||
return this->GetDebugEventInfoImpl(out);
|
||||
R_RETURN(this->GetDebugEventInfoImpl(out));
|
||||
}
|
||||
|
||||
void KDebugBase::Finalize() {
|
||||
|
@ -1091,7 +1091,7 @@ namespace ams::kern {
|
|||
/* If the event is an exception and we don't have exception events enabled, we can't handle the event. */
|
||||
if (event == ams::svc::DebugEvent_Exception && (debug->m_continue_flags & ams::svc::ContinueFlag_EnableExceptionEvent) == 0) {
|
||||
GetCurrentThread().SetDebugExceptionResult(ResultSuccess());
|
||||
return svc::ResultNotHandled();
|
||||
R_THROW(svc::ResultNotHandled());
|
||||
}
|
||||
|
||||
/* If the current thread is suspended, retry. */
|
||||
|
@ -1134,21 +1134,21 @@ namespace ams::kern {
|
|||
/* Get the debug object. */
|
||||
if (KDebugBase *debug = GetDebugObject(process); debug != nullptr) {
|
||||
/* If we have one, check the debug exception. */
|
||||
return GetCurrentThread().GetDebugExceptionResult();
|
||||
R_RETURN(GetCurrentThread().GetDebugExceptionResult());
|
||||
} else {
|
||||
/* We don't have a debug object, so stop processing the exception. */
|
||||
return svc::ResultStopProcessingException();
|
||||
R_THROW(svc::ResultStopProcessingException());
|
||||
}
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KDebugBase::OnDebugEvent(ams::svc::DebugEvent event, uintptr_t param0, uintptr_t param1, uintptr_t param2, uintptr_t param3, uintptr_t param4) {
|
||||
if (KProcess *process = GetCurrentProcessPointer(); process != nullptr && process->IsAttachedToDebugger()) {
|
||||
return ProcessDebugEvent(event, param0, param1, param2, param3, param4);
|
||||
R_RETURN(ProcessDebugEvent(event, param0, param1, param2, param3, param4));
|
||||
}
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KDebugBase::OnExitProcess(KProcess *process) {
|
||||
|
@ -1166,7 +1166,7 @@ namespace ams::kern {
|
|||
}
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KDebugBase::OnTerminateProcess(KProcess *process) {
|
||||
|
@ -1184,7 +1184,7 @@ namespace ams::kern {
|
|||
}
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KDebugBase::OnExitThread(KThread *thread) {
|
||||
|
@ -1196,7 +1196,7 @@ namespace ams::kern {
|
|||
R_TRY(OnDebugEvent(ams::svc::DebugEvent_ExitThread, thread->GetId(), thread->IsTerminationRequested() ? ams::svc::ThreadExitReason_TerminateThread : ams::svc::ThreadExitReason_ExitThread));
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ namespace ams::kern {
|
|||
m_space_size = size;
|
||||
m_is_initialized = true;
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KDeviceAddressSpace::Finalize() {
|
||||
|
@ -50,7 +50,7 @@ namespace ams::kern {
|
|||
KScopedLightLock lk(m_lock);
|
||||
|
||||
/* Attach. */
|
||||
return m_table.Attach(device_name, m_space_address, m_space_size);
|
||||
R_RETURN(m_table.Attach(device_name, m_space_address, m_space_size));
|
||||
}
|
||||
|
||||
Result KDeviceAddressSpace::Detach(ams::svc::DeviceName device_name) {
|
||||
|
@ -58,7 +58,7 @@ namespace ams::kern {
|
|||
KScopedLightLock lk(m_lock);
|
||||
|
||||
/* Detach. */
|
||||
return m_table.Detach(device_name);
|
||||
R_RETURN(m_table.Detach(device_name));
|
||||
}
|
||||
|
||||
Result KDeviceAddressSpace::Map(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, u64 device_address, ams::svc::MemoryPermission device_perm, bool is_aligned) {
|
||||
|
@ -75,7 +75,7 @@ namespace ams::kern {
|
|||
R_TRY(page_table->LockForMapDeviceAddressSpace(process_address, size, ConvertToKMemoryPermission(device_perm), is_aligned));
|
||||
|
||||
/* Ensure that if we fail, we don't keep unmapped pages locked. */
|
||||
auto unlock_guard = SCOPE_GUARD { MESOSPHERE_R_ABORT_UNLESS(page_table->UnlockForDeviceAddressSpace(process_address, size)); };
|
||||
ON_RESULT_FAILURE { MESOSPHERE_R_ABORT_UNLESS(page_table->UnlockForDeviceAddressSpace(process_address, size)); };
|
||||
|
||||
/* Map the pages. */
|
||||
{
|
||||
|
@ -84,18 +84,14 @@ namespace ams::kern {
|
|||
|
||||
/* Ensure that we unmap the pages if we fail to update the protections. */
|
||||
/* NOTE: Nintendo does not check the result of this unmap call. */
|
||||
auto map_guard = SCOPE_GUARD { m_table.Unmap(device_address, size); };
|
||||
ON_RESULT_FAILURE { m_table.Unmap(device_address, size); };
|
||||
|
||||
/* Update the protections in accordance with how much we mapped. */
|
||||
R_TRY(page_table->UnlockForDeviceAddressSpacePartialMap(process_address, size));
|
||||
|
||||
/* We succeeded, so cancel our guard. */
|
||||
map_guard.Cancel();
|
||||
}
|
||||
|
||||
/* We succeeded, so we don't need to unlock our pages. */
|
||||
unlock_guard.Cancel();
|
||||
return ResultSuccess();
|
||||
/* We succeeded. */
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KDeviceAddressSpace::Unmap(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, u64 device_address) {
|
||||
|
@ -111,20 +107,19 @@ namespace ams::kern {
|
|||
/* Lock the pages. */
|
||||
R_TRY(page_table->LockForUnmapDeviceAddressSpace(process_address, size));
|
||||
|
||||
/* If we fail to unmap, we want to do a partial unlock. */
|
||||
/* Unmap the pages. */
|
||||
{
|
||||
auto unlock_guard = SCOPE_GUARD { MESOSPHERE_R_ABORT_UNLESS(page_table->UnlockForDeviceAddressSpacePartialMap(process_address, size)); };
|
||||
/* If we fail to unmap, we want to do a partial unlock. */
|
||||
ON_RESULT_FAILURE { MESOSPHERE_R_ABORT_UNLESS(page_table->UnlockForDeviceAddressSpacePartialMap(process_address, size)); };
|
||||
|
||||
/* Unmap. */
|
||||
/* Perform the unmap. */
|
||||
R_TRY(m_table.Unmap(page_table, process_address, size, device_address));
|
||||
|
||||
unlock_guard.Cancel();
|
||||
}
|
||||
|
||||
/* Unlock the pages. */
|
||||
MESOSPHERE_R_ABORT_UNLESS(page_table->UnlockForDeviceAddressSpace(process_address, size));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -43,7 +43,7 @@ namespace ams::kern {
|
|||
|
||||
R_SUCCEED_IF(m_readable_event_destroyed);
|
||||
|
||||
return m_readable_event.Signal();
|
||||
R_RETURN(m_readable_event.Signal());
|
||||
}
|
||||
|
||||
Result KEvent::Clear() {
|
||||
|
@ -51,7 +51,7 @@ namespace ams::kern {
|
|||
|
||||
R_SUCCEED_IF(m_readable_event_destroyed);
|
||||
|
||||
return m_readable_event.Clear();
|
||||
R_RETURN(m_readable_event.Clear());
|
||||
}
|
||||
|
||||
void KEvent::PostDestroy(uintptr_t arg) {
|
||||
|
|
|
@ -36,7 +36,7 @@ namespace ams::kern {
|
|||
}
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
bool KHandleTable::Remove(ams::svc::Handle handle) {
|
||||
|
@ -95,7 +95,7 @@ namespace ams::kern {
|
|||
*out_handle = EncodeHandle(index, linear_id);
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KHandleTable::Reserve(ams::svc::Handle *out_handle) {
|
||||
|
@ -107,7 +107,7 @@ namespace ams::kern {
|
|||
R_UNLESS(m_count < m_table_size, svc::ResultOutOfHandles());
|
||||
|
||||
*out_handle = EncodeHandle(this->AllocateEntry(), this->AllocateLinearId());
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KHandleTable::Unreserve(ams::svc::Handle handle) {
|
||||
|
|
|
@ -143,7 +143,7 @@ namespace ams::kern {
|
|||
/* All initial processes should disable device address space merge. */
|
||||
out->flags |= ams::svc::CreateProcessFlag_DisableDeviceAddressSpaceMerge;
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KInitialProcessReader::Load(KProcessAddress address, const ams::svc::CreateProcessParameter ¶ms, KProcessAddress src) const {
|
||||
|
@ -187,7 +187,7 @@ namespace ams::kern {
|
|||
cpu::FlushEntireDataCache();
|
||||
cpu::InvalidateEntireInstructionCache();
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KInitialProcessReader::SetMemoryPermissions(KProcessPageTable &page_table, const ams::svc::CreateProcessParameter ¶ms) const {
|
||||
|
@ -215,7 +215,7 @@ namespace ams::kern {
|
|||
R_TRY(page_table.SetProcessMemoryPermission(start, util::AlignUp(end - start, PageSize), ams::svc::MemoryPermission_ReadWrite));
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -45,7 +45,7 @@ namespace ams::kern {
|
|||
|
||||
/* Mark initialized. */
|
||||
m_is_initialized = true;
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KInterruptEvent::Finalize() {
|
||||
|
@ -69,7 +69,7 @@ namespace ams::kern {
|
|||
/* Clear the interrupt. */
|
||||
Kernel::GetInterruptManager().ClearInterrupt(m_interrupt_id, m_core_id);
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KInterruptEventTask::Register(s32 interrupt_id, s32 core_id, bool level, KInterruptEvent *event) {
|
||||
|
@ -91,7 +91,7 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
/* Ensure that the task is cleaned up if anything goes wrong. */
|
||||
auto task_guard = SCOPE_GUARD { if (allocated) { KInterruptEventTask::Free(task); } };
|
||||
ON_RESULT_FAILURE { if (allocated) { KInterruptEventTask::Free(task); } };
|
||||
|
||||
/* Register/bind the interrupt task. */
|
||||
{
|
||||
|
@ -110,9 +110,7 @@ namespace ams::kern {
|
|||
g_interrupt_event_task_table[interrupt_id] = task;
|
||||
}
|
||||
|
||||
/* We successfully registered, so we don't need to free the task. */
|
||||
task_guard.Cancel();
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KInterruptEventTask::Unregister(s32 interrupt_id, s32 core_id) {
|
||||
|
|
|
@ -80,7 +80,7 @@ namespace ams::kern {
|
|||
m_pool_type = pool_type;
|
||||
m_is_initialized = true;
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KIoPool::Finalize() {
|
||||
|
@ -113,7 +113,7 @@ namespace ams::kern {
|
|||
/* Add the region to our pool. */
|
||||
m_io_region_list.push_back(*new_region);
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KIoPool::RemoveIoRegion(KIoRegion *region) {
|
||||
|
|
|
@ -37,7 +37,7 @@ namespace ams::kern {
|
|||
/* Mark ourselves as initialized. */
|
||||
m_is_initialized = true;
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KIoRegion::Finalize() {
|
||||
|
@ -72,7 +72,7 @@ namespace ams::kern {
|
|||
/* Note that we're mapped. */
|
||||
m_is_mapped = true;
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KIoRegion::Unmap(KProcessAddress address, size_t size) {
|
||||
|
@ -93,7 +93,7 @@ namespace ams::kern {
|
|||
/* Note that we're unmapped. */
|
||||
m_is_mapped = false;
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@ namespace ams::kern {
|
|||
cur_thread->SetLightSessionData(data);
|
||||
|
||||
/* Send the request. */
|
||||
return m_parent->OnRequest(cur_thread);
|
||||
R_RETURN(m_parent->OnRequest(cur_thread));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -120,7 +120,7 @@ namespace ams::kern {
|
|||
/* NOTE: Nintendo returns GetCurrentThread().GetWaitResult() here. */
|
||||
/* This is technically incorrect, although it doesn't cause problems in practice */
|
||||
/* because this is only ever called with request_thread = GetCurrentThreadPointer(). */
|
||||
return request_thread->GetWaitResult();
|
||||
R_RETURN(request_thread->GetWaitResult());
|
||||
}
|
||||
|
||||
Result KLightServerSession::ReplyAndReceive(u32 *data) {
|
||||
|
@ -193,7 +193,7 @@ namespace ams::kern {
|
|||
std::memcpy(GetCurrentThread().GetLightSessionData(), m_current_request->GetLightSessionData(), KLightSession::DataSize);
|
||||
|
||||
/* We successfully received. */
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
/* We need to wait for a request to come in. */
|
||||
|
@ -201,7 +201,7 @@ namespace ams::kern {
|
|||
/* Check if we were cancelled. */
|
||||
if (GetCurrentThread().IsWaitCancelled()) {
|
||||
GetCurrentThread().ClearWaitCancelled();
|
||||
return svc::ResultCancelled();
|
||||
R_THROW(svc::ResultCancelled());
|
||||
}
|
||||
|
||||
/* Mark ourselves as cancellable. */
|
||||
|
|
|
@ -102,7 +102,7 @@ namespace ams::kern {
|
|||
start_block->Initialize(m_start_address, (m_end_address - m_start_address) / PageSize, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None);
|
||||
m_memory_block_tree.insert(*start_block);
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KMemoryBlockManager::Finalize(KMemoryBlockSlabManager *slab_manager) {
|
||||
|
|
|
@ -171,7 +171,7 @@ namespace ams::kern {
|
|||
manager->InitializeOptimizedMemory();
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KMemoryManager::FinalizeOptimizedMemory(u64 process_id, Pool pool) {
|
||||
|
@ -236,7 +236,7 @@ namespace ams::kern {
|
|||
R_UNLESS(0 <= heap_index, svc::ResultOutOfMemory());
|
||||
|
||||
/* Ensure that we don't leave anything un-freed. */
|
||||
auto group_guard = SCOPE_GUARD {
|
||||
ON_RESULT_FAILURE {
|
||||
for (const auto &it : *out) {
|
||||
auto &manager = this->GetManager(it.GetAddress());
|
||||
const size_t num_pages = std::min(it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize);
|
||||
|
@ -256,12 +256,11 @@ namespace ams::kern {
|
|||
break;
|
||||
}
|
||||
|
||||
/* Safely add it to our group. */
|
||||
{
|
||||
auto block_guard = SCOPE_GUARD { cur_manager->Free(allocated_block, pages_per_alloc); };
|
||||
R_TRY(out->AddBlock(allocated_block, pages_per_alloc));
|
||||
block_guard.Cancel();
|
||||
}
|
||||
/* Ensure we don't leak the block if we fail. */
|
||||
ON_RESULT_FAILURE { cur_manager->Free(allocated_block, pages_per_alloc); };
|
||||
|
||||
/* Add the block to our group. */
|
||||
R_TRY(out->AddBlock(allocated_block, pages_per_alloc));
|
||||
|
||||
/* Maintain the optimized memory bitmap, if we should. */
|
||||
if (unoptimized) {
|
||||
|
@ -277,8 +276,7 @@ namespace ams::kern {
|
|||
R_UNLESS(num_pages == 0, svc::ResultOutOfMemory());
|
||||
|
||||
/* We succeeded! */
|
||||
group_guard.Cancel();
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KMemoryManager::AllocateAndOpen(KPageGroup *out, size_t num_pages, u32 option) {
|
||||
|
@ -313,7 +311,7 @@ namespace ams::kern {
|
|||
}
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup *out, size_t num_pages, u32 option, u64 process_id, u8 fill_pattern) {
|
||||
|
@ -419,7 +417,7 @@ namespace ams::kern {
|
|||
}
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
size_t KMemoryManager::Impl::Initialize(KPhysicalAddress address, size_t size, KVirtualAddress management, KVirtualAddress management_end, Pool p) {
|
||||
|
|
|
@ -55,14 +55,14 @@ namespace ams::kern {
|
|||
KScopedAutoObject existing_object = FindImpl(name);
|
||||
if (existing_object.IsNull()) {
|
||||
g_object_list.push_back(*new_name);
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
}
|
||||
|
||||
/* The object already exists, which is an error condition. Perform cleanup. */
|
||||
obj->Close();
|
||||
KObjectName::Free(new_name);
|
||||
return svc::ResultInvalidState();
|
||||
R_THROW(svc::ResultInvalidState());
|
||||
}
|
||||
|
||||
Result KObjectName::Delete(KAutoObject *obj, const char *compare_name) {
|
||||
|
@ -76,12 +76,12 @@ namespace ams::kern {
|
|||
obj->Close();
|
||||
g_object_list.erase(g_object_list.iterator_to(name));
|
||||
KObjectName::Free(std::addressof(name));
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
}
|
||||
|
||||
/* We didn't find the object in the list. */
|
||||
return svc::ResultNotFound();
|
||||
R_THROW(svc::ResultNotFound());
|
||||
}
|
||||
|
||||
KScopedAutoObject<KAutoObject> KObjectName::Find(const char *name) {
|
||||
|
|
|
@ -81,7 +81,7 @@ namespace ams::kern {
|
|||
}
|
||||
m_last_block = new_block;
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KPageGroup::Open() const {
|
||||
|
|
|
@ -121,9 +121,7 @@ namespace ams::kern {
|
|||
m_impl.InitializeForKernel(table, start, end);
|
||||
|
||||
/* Initialize our memory block manager. */
|
||||
return m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end, m_memory_block_slab_manager);
|
||||
|
||||
return ResultSuccess();
|
||||
R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end, m_memory_block_slab_manager));
|
||||
}
|
||||
|
||||
Result KPageTableBase::InitializeForProcess(ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, void *table, KProcessAddress start, KProcessAddress end, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager, KResourceLimit *resource_limit) {
|
||||
|
@ -132,8 +130,6 @@ namespace ams::kern {
|
|||
MESOSPHERE_ABORT_UNLESS(code_address < code_address + code_size);
|
||||
MESOSPHERE_ABORT_UNLESS(code_address + code_size - 1 <= end - 1);
|
||||
|
||||
/* Declare variables to hold our region sizes. */
|
||||
|
||||
/* Define helpers. */
|
||||
auto GetSpaceStart = [&](KAddressSpaceInfo::Type type) ALWAYS_INLINE_LAMBDA {
|
||||
return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type);
|
||||
|
@ -331,9 +327,7 @@ namespace ams::kern {
|
|||
m_impl.InitializeForProcess(table, GetInteger(start), GetInteger(end));
|
||||
|
||||
/* Initialize our memory block manager. */
|
||||
return m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end, m_memory_block_slab_manager);
|
||||
|
||||
return ResultSuccess();
|
||||
R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end, m_memory_block_slab_manager));
|
||||
}
|
||||
|
||||
|
||||
|
@ -472,7 +466,7 @@ namespace ams::kern {
|
|||
R_UNLESS((info.m_permission & perm_mask) == perm, svc::ResultInvalidCurrentMemory());
|
||||
R_UNLESS((info.m_attribute & attr_mask) == attr, svc::ResultInvalidCurrentMemory());
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::CheckMemoryStateContiguous(size_t *out_blocks_needed, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const {
|
||||
|
@ -508,7 +502,7 @@ namespace ams::kern {
|
|||
*out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::CheckMemoryState(KMemoryState *out_state, KMemoryPermission *out_perm, KMemoryAttribute *out_attr, size_t *out_blocks_needed, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr) const {
|
||||
|
@ -562,7 +556,7 @@ namespace ams::kern {
|
|||
if (out_blocks_needed != nullptr) {
|
||||
*out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
|
||||
}
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::LockMemoryAndOpen(KPageGroup *out_pg, KPhysicalAddress *out_paddr, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, KMemoryPermission new_perm, u32 lock_attr) {
|
||||
|
@ -625,7 +619,7 @@ namespace ams::kern {
|
|||
out_pg->Open();
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::UnlockMemory(KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, KMemoryPermission new_perm, u32 lock_attr, const KPageGroup *pg) {
|
||||
|
@ -673,7 +667,7 @@ namespace ams::kern {
|
|||
/* Apply the memory block updates. */
|
||||
m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, new_attr, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Locked);
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::QueryInfoImpl(KMemoryInfo *out_info, ams::svc::PageInfo *out_page, KProcessAddress address) const {
|
||||
|
@ -686,7 +680,7 @@ namespace ams::kern {
|
|||
|
||||
*out_info = block->GetMemoryInfo();
|
||||
out_page->flags = 0;
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::QueryMappingImpl(KProcessAddress *out, KPhysicalAddress address, size_t size, KMemoryState state) const {
|
||||
|
@ -726,7 +720,7 @@ namespace ams::kern {
|
|||
if (R_SUCCEEDED(this->CheckMemoryState(mapped_address, size, KMemoryState_All, state, KMemoryPermission_UserRead, KMemoryPermission_UserRead, KMemoryAttribute_None, KMemoryAttribute_None))) {
|
||||
/* It is! */
|
||||
*out = mapped_address;
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -756,7 +750,7 @@ namespace ams::kern {
|
|||
|
||||
/* We found the region. */
|
||||
*out = mapped_address;
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
|
||||
|
@ -803,7 +797,7 @@ namespace ams::kern {
|
|||
R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, Null<KPhysicalAddress>, false, src_properties, OperationType_ChangePermissions, false));
|
||||
|
||||
/* Ensure that we unprotect the source pages on failure. */
|
||||
auto unprot_guard = SCOPE_GUARD {
|
||||
ON_RESULT_FAILURE {
|
||||
const KPageProperties unprotect_properties = { KMemoryPermission_UserReadWrite, false, false, DisableMergeAttribute_EnableHeadBodyTail };
|
||||
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), src_address, num_pages, Null<KPhysicalAddress>, false, unprotect_properties, OperationType_ChangePermissions, true));
|
||||
};
|
||||
|
@ -812,15 +806,12 @@ namespace ams::kern {
|
|||
const KPageProperties dst_map_properties = { KMemoryPermission_UserReadWrite, false, false, DisableMergeAttribute_DisableHead };
|
||||
R_TRY(this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_map_properties, false));
|
||||
|
||||
/* We successfully mapped the alias pages, so we don't need to unprotect the src pages on failure. */
|
||||
unprot_guard.Cancel();
|
||||
|
||||
/* Apply the memory block updates. */
|
||||
m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, new_src_perm, new_src_attr, KMemoryBlockDisableMergeAttribute_Locked, KMemoryBlockDisableMergeAttribute_None);
|
||||
m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, KMemoryState_Stack, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
|
||||
|
@ -869,7 +860,7 @@ namespace ams::kern {
|
|||
R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, Null<KPhysicalAddress>, false, dst_unmap_properties, OperationType_Unmap, false));
|
||||
|
||||
/* Ensure that we re-map the aliased pages on failure. */
|
||||
auto remap_guard = SCOPE_GUARD {
|
||||
ON_RESULT_FAILURE {
|
||||
this->RemapPageGroup(updater.GetPageList(), dst_address, size, pg);
|
||||
};
|
||||
|
||||
|
@ -877,15 +868,12 @@ namespace ams::kern {
|
|||
const KPageProperties src_properties = { KMemoryPermission_UserReadWrite, false, false, DisableMergeAttribute_EnableAndMergeHeadBodyTail };
|
||||
R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, Null<KPhysicalAddress>, false, src_properties, OperationType_ChangePermissions, false));
|
||||
|
||||
/* We successfully changed the permissions for the source pages, so we don't need to re-map the dst pages on failure. */
|
||||
remap_guard.Cancel();
|
||||
|
||||
/* Apply the memory block updates. */
|
||||
m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Locked);
|
||||
m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, KMemoryState_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal);
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
|
||||
|
@ -935,7 +923,7 @@ namespace ams::kern {
|
|||
R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, Null<KPhysicalAddress>, false, src_properties, OperationType_ChangePermissions, false));
|
||||
|
||||
/* Ensure that we unprotect the source pages on failure. */
|
||||
auto unprot_guard = SCOPE_GUARD {
|
||||
ON_RESULT_FAILURE {
|
||||
const KPageProperties unprotect_properties = { src_perm, false, false, DisableMergeAttribute_EnableHeadBodyTail };
|
||||
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), src_address, num_pages, Null<KPhysicalAddress>, false, unprotect_properties, OperationType_ChangePermissions, true));
|
||||
};
|
||||
|
@ -944,15 +932,12 @@ namespace ams::kern {
|
|||
const KPageProperties dst_properties = { new_perm, false, false, DisableMergeAttribute_DisableHead };
|
||||
R_TRY(this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_properties, false));
|
||||
|
||||
/* We successfully mapped the alias pages, so we don't need to unprotect the src pages on failure. */
|
||||
unprot_guard.Cancel();
|
||||
|
||||
/* Apply the memory block updates. */
|
||||
m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, new_perm, KMemoryAttribute_Locked, KMemoryBlockDisableMergeAttribute_Locked, KMemoryBlockDisableMergeAttribute_None);
|
||||
m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, KMemoryState_AliasCode, new_perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
|
||||
|
@ -1034,7 +1019,7 @@ namespace ams::kern {
|
|||
R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, Null<KPhysicalAddress>, false, dst_unmap_properties, OperationType_Unmap, false));
|
||||
|
||||
/* Ensure that we re-map the aliased pages on failure. */
|
||||
auto remap_guard = SCOPE_GUARD {
|
||||
ON_RESULT_FAILURE {
|
||||
this->RemapPageGroup(updater.GetPageList(), dst_address, size, pg);
|
||||
};
|
||||
|
||||
|
@ -1042,9 +1027,6 @@ namespace ams::kern {
|
|||
const KPageProperties src_properties = { KMemoryPermission_UserReadWrite, false, false, DisableMergeAttribute_EnableAndMergeHeadBodyTail };
|
||||
R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, Null<KPhysicalAddress>, false, src_properties, OperationType_ChangePermissions, false));
|
||||
|
||||
/* We successfully changed the permissions for the source pages, so we don't need to re-map the dst pages on failure. */
|
||||
remap_guard.Cancel();
|
||||
|
||||
/* Apply the memory block updates. */
|
||||
m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, KMemoryState_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal);
|
||||
m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, KMemoryState_Normal, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Locked);
|
||||
|
@ -1053,7 +1035,7 @@ namespace ams::kern {
|
|||
reprotected_pages = true;
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
KProcessAddress KPageTableBase::FindFreeArea(KProcessAddress region_start, size_t region_num_pages, size_t num_pages, size_t alignment, size_t offset, size_t guard_pages) const {
|
||||
|
@ -1149,7 +1131,7 @@ namespace ams::kern {
|
|||
|
||||
/* Map the pages. */
|
||||
const KPageProperties properties = { perm, false, false, DisableMergeAttribute_None };
|
||||
return this->Operate(page_list, address, num_pages, pg, properties, OperationType_MapGroup, false);
|
||||
R_RETURN(this->Operate(page_list, address, num_pages, pg, properties, OperationType_MapGroup, false));
|
||||
}
|
||||
|
||||
Result KPageTableBase::MapPageGroupImpl(PageLinkedList *page_list, KProcessAddress address, const KPageGroup &pg, const KPageProperties properties, bool reuse_ll) {
|
||||
|
@ -1160,7 +1142,7 @@ namespace ams::kern {
|
|||
KProcessAddress cur_address = address;
|
||||
|
||||
/* Ensure that we clean up on failure. */
|
||||
auto mapping_guard = SCOPE_GUARD {
|
||||
ON_RESULT_FAILURE {
|
||||
MESOSPHERE_ABORT_UNLESS(!reuse_ll);
|
||||
if (cur_address != start_address) {
|
||||
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
|
||||
|
@ -1177,8 +1159,7 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
/* We succeeded! */
|
||||
mapping_guard.Cancel();
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KPageTableBase::RemapPageGroup(PageLinkedList *page_list, KProcessAddress address, size_t size, const KPageGroup &pg) {
|
||||
|
@ -1301,7 +1282,7 @@ namespace ams::kern {
|
|||
R_UNLESS(IsHeapPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory());
|
||||
R_TRY(pg.AddBlock(cur_addr, cur_pages));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
bool KPageTableBase::IsValidPageGroup(const KPageGroup &pg, KProcessAddress addr, size_t num_pages) {
|
||||
|
@ -1433,7 +1414,7 @@ namespace ams::kern {
|
|||
.size = size,
|
||||
};
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::SetMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission svc_perm) {
|
||||
|
@ -1467,7 +1448,7 @@ namespace ams::kern {
|
|||
/* Update the blocks. */
|
||||
m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_None);
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::SetProcessMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission svc_perm) {
|
||||
|
@ -1533,7 +1514,7 @@ namespace ams::kern {
|
|||
cpu::InvalidateEntireInstructionCache();
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mask, u32 attr) {
|
||||
|
@ -1573,7 +1554,7 @@ namespace ams::kern {
|
|||
/* Update the blocks. */
|
||||
m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, old_perm, new_attr, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_None);
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::SetHeapSize(KProcessAddress *out, size_t size) {
|
||||
|
@ -1627,11 +1608,11 @@ namespace ams::kern {
|
|||
|
||||
/* Set the output. */
|
||||
*out = m_heap_region_start;
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
} else if (size == static_cast<size_t>(m_current_heap_end - m_heap_region_start)) {
|
||||
/* The size requested is exactly the current size. */
|
||||
*out = m_heap_region_start;
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
} else {
|
||||
/* We have to allocate memory. Determine how much to allocate and where while the table is locked. */
|
||||
cur_address = m_current_heap_end;
|
||||
|
@ -1692,7 +1673,7 @@ namespace ams::kern {
|
|||
|
||||
/* Set the output. */
|
||||
*out = m_heap_region_start;
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1705,7 +1686,7 @@ namespace ams::kern {
|
|||
|
||||
m_max_heap_size = size;
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::QueryInfo(KMemoryInfo *out_info, ams::svc::PageInfo *out_page_info, KProcessAddress addr) const {
|
||||
|
@ -1727,12 +1708,12 @@ namespace ams::kern {
|
|||
};
|
||||
out_page_info->flags = 0;
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
/* Otherwise, lock the table and query. */
|
||||
KScopedLightLock lk(m_general_lock);
|
||||
return this->QueryInfoImpl(out_info, out_page_info, addr);
|
||||
R_RETURN(this->QueryInfoImpl(out_info, out_page_info, addr));
|
||||
}
|
||||
|
||||
Result KPageTableBase::QueryPhysicalAddress(ams::svc::PhysicalMemoryInfo *out, KProcessAddress address) const {
|
||||
|
@ -1808,7 +1789,7 @@ namespace ams::kern {
|
|||
out->physical_address = GetInteger(phys_addr);
|
||||
out->virtual_address = GetInteger(virt_addr);
|
||||
out->size = phys_size;
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::MapIoImpl(KProcessAddress *out, PageLinkedList *page_list, KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
|
||||
|
@ -1882,7 +1863,7 @@ namespace ams::kern {
|
|||
/* Set the output address. */
|
||||
*out = addr;
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
|
||||
|
@ -1905,7 +1886,7 @@ namespace ams::kern {
|
|||
m_memory_block_manager.Update(std::addressof(allocator), addr, size / PageSize, KMemoryState_Io, perm, KMemoryAttribute_Locked, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
|
||||
|
||||
/* We successfully mapped the pages. */
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::MapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size, ams::svc::MemoryMapping mapping, ams::svc::MemoryPermission svc_perm) {
|
||||
|
@ -1935,7 +1916,7 @@ namespace ams::kern {
|
|||
m_memory_block_manager.Update(std::addressof(allocator), dst_address, num_pages, KMemoryState_Io, perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
|
||||
|
||||
/* We successfully mapped the pages. */
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size) {
|
||||
|
@ -1986,7 +1967,7 @@ namespace ams::kern {
|
|||
/* Update the blocks. */
|
||||
m_memory_block_manager.Update(std::addressof(allocator), dst_address, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal);
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
|
||||
|
@ -2054,7 +2035,7 @@ namespace ams::kern {
|
|||
m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, KMemoryState_Static, perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
|
||||
|
||||
/* We successfully mapped the pages. */
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::MapRegion(KMemoryRegionType region_type, KMemoryPermission perm) {
|
||||
|
@ -2070,7 +2051,7 @@ namespace ams::kern {
|
|||
R_CONVERT(svc::ResultInvalidAddress, svc::ResultOutOfRange())
|
||||
} R_END_TRY_CATCH;
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||
|
@ -2111,7 +2092,7 @@ namespace ams::kern {
|
|||
|
||||
/* We successfully mapped the pages. */
|
||||
*out_addr = addr;
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::MapPages(KProcessAddress address, size_t num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||
|
@ -2140,7 +2121,7 @@ namespace ams::kern {
|
|||
/* Update the blocks. */
|
||||
m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) {
|
||||
|
@ -2170,7 +2151,7 @@ namespace ams::kern {
|
|||
/* Update the blocks. */
|
||||
m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal);
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::MapPageGroup(KProcessAddress *out_addr, const KPageGroup &pg, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||
|
@ -2207,7 +2188,7 @@ namespace ams::kern {
|
|||
|
||||
/* We successfully mapped the pages. */
|
||||
*out_addr = addr;
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::MapPageGroup(KProcessAddress addr, const KPageGroup &pg, KMemoryState state, KMemoryPermission perm) {
|
||||
|
@ -2241,7 +2222,7 @@ namespace ams::kern {
|
|||
m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
|
||||
|
||||
/* We successfully mapped the pages. */
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::UnmapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state) {
|
||||
|
@ -2277,7 +2258,7 @@ namespace ams::kern {
|
|||
/* Update the blocks. */
|
||||
m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal);
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::MakeAndOpenPageGroup(KPageGroup *out, KProcessAddress address, size_t num_pages, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) {
|
||||
|
@ -2300,7 +2281,7 @@ namespace ams::kern {
|
|||
/* Open a new reference to the pages in the group. */
|
||||
out->Open();
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::InvalidateProcessDataCache(KProcessAddress address, size_t size) {
|
||||
|
@ -2367,7 +2348,7 @@ namespace ams::kern {
|
|||
cpu::InvalidateDataCache(GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), cur_size);
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::ReadDebugMemory(void *buffer, KProcessAddress address, size_t size) {
|
||||
|
@ -2420,7 +2401,7 @@ namespace ams::kern {
|
|||
R_UNLESS(UserspaceAccess::CopyMemoryToUser(buffer, copy_src, cur_size), svc::ResultInvalidPointer());
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
};
|
||||
|
||||
/* Iterate. */
|
||||
|
@ -2453,7 +2434,7 @@ namespace ams::kern {
|
|||
/* Perform copy for the last block. */
|
||||
R_TRY(PerformCopy());
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::WriteDebugMemory(KProcessAddress address, const void *buffer, size_t size) {
|
||||
|
@ -2505,7 +2486,7 @@ namespace ams::kern {
|
|||
cpu::StoreDataCache(GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), cur_size);
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
};
|
||||
|
||||
/* Iterate. */
|
||||
|
@ -2541,7 +2522,7 @@ namespace ams::kern {
|
|||
/* Invalidate the entire instruction cache, as this svc allows modifying executable pages. */
|
||||
cpu::InvalidateEntireInstructionCache();
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::ReadIoMemoryImpl(void *buffer, KPhysicalAddress phys_addr, size_t size) {
|
||||
|
@ -2586,7 +2567,7 @@ namespace ams::kern {
|
|||
break;
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::WriteIoMemoryImpl(KPhysicalAddress phys_addr, const void *buffer, size_t size) {
|
||||
|
@ -2631,7 +2612,7 @@ namespace ams::kern {
|
|||
break;
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::ReadDebugIoMemory(void *buffer, KProcessAddress address, size_t size) {
|
||||
|
@ -2667,7 +2648,7 @@ namespace ams::kern {
|
|||
dst += cur_size;
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::WriteDebugIoMemory(KProcessAddress address, const void *buffer, size_t size) {
|
||||
|
@ -2703,7 +2684,7 @@ namespace ams::kern {
|
|||
src += cur_size;
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::LockForMapDeviceAddressSpace(KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned) {
|
||||
|
@ -2727,7 +2708,7 @@ namespace ams::kern {
|
|||
/* Update the memory blocks. */
|
||||
m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, &KMemoryBlock::ShareToDevice, KMemoryPermission_None);
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size) {
|
||||
|
@ -2755,7 +2736,7 @@ namespace ams::kern {
|
|||
const KMemoryBlockManager::MemoryBlockLockFunction lock_func = m_enable_device_address_space_merge ? &KMemoryBlock::UpdateDeviceDisableMergeStateForShare : &KMemoryBlock::UpdateDeviceDisableMergeStateForShareRight;
|
||||
m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, lock_func, KMemoryPermission_None);
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) {
|
||||
|
@ -2782,7 +2763,7 @@ namespace ams::kern {
|
|||
/* Update the memory blocks. */
|
||||
m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, &KMemoryBlock::UnshareToDevice, KMemoryPermission_None);
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size) {
|
||||
|
@ -2809,7 +2790,7 @@ namespace ams::kern {
|
|||
/* Update the memory blocks. */
|
||||
m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, m_enable_device_address_space_merge ? &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshare : &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshareRight, KMemoryPermission_None);
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::OpenMemoryRangeForMapDeviceAddressSpace(KPageTableBase::MemoryRange *out, KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned) {
|
||||
|
@ -2827,7 +2808,7 @@ namespace ams::kern {
|
|||
/* We got the range, so open it. */
|
||||
Kernel::GetMemoryManager().Open(out->address, out->size / PageSize);
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::OpenMemoryRangeForUnmapDeviceAddressSpace(MemoryRange *out, KProcessAddress address, size_t size) {
|
||||
|
@ -2844,61 +2825,61 @@ namespace ams::kern {
|
|||
/* We got the range, so open it. */
|
||||
Kernel::GetMemoryManager().Open(out->address, out->size / PageSize);
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::LockForIpcUserBuffer(KPhysicalAddress *out, KProcessAddress address, size_t size) {
|
||||
return this->LockMemoryAndOpen(nullptr, out, address, size,
|
||||
R_RETURN(this->LockMemoryAndOpen(nullptr, out, address, size,
|
||||
KMemoryState_FlagCanIpcUserBuffer, KMemoryState_FlagCanIpcUserBuffer,
|
||||
KMemoryPermission_All, KMemoryPermission_UserReadWrite,
|
||||
KMemoryAttribute_All, KMemoryAttribute_None,
|
||||
static_cast<KMemoryPermission>(KMemoryPermission_NotMapped | KMemoryPermission_KernelReadWrite),
|
||||
KMemoryAttribute_Locked);
|
||||
KMemoryAttribute_Locked));
|
||||
}
|
||||
|
||||
Result KPageTableBase::UnlockForIpcUserBuffer(KProcessAddress address, size_t size) {
|
||||
return this->UnlockMemory(address, size,
|
||||
R_RETURN(this->UnlockMemory(address, size,
|
||||
KMemoryState_FlagCanIpcUserBuffer, KMemoryState_FlagCanIpcUserBuffer,
|
||||
KMemoryPermission_None, KMemoryPermission_None,
|
||||
KMemoryAttribute_All, KMemoryAttribute_Locked,
|
||||
KMemoryPermission_UserReadWrite,
|
||||
KMemoryAttribute_Locked, nullptr);
|
||||
KMemoryAttribute_Locked, nullptr));
|
||||
}
|
||||
|
||||
Result KPageTableBase::LockForTransferMemory(KPageGroup *out, KProcessAddress address, size_t size, KMemoryPermission perm) {
|
||||
return this->LockMemoryAndOpen(out, nullptr, address, size,
|
||||
R_RETURN(this->LockMemoryAndOpen(out, nullptr, address, size,
|
||||
KMemoryState_FlagCanTransfer, KMemoryState_FlagCanTransfer,
|
||||
KMemoryPermission_All, KMemoryPermission_UserReadWrite,
|
||||
KMemoryAttribute_All, KMemoryAttribute_None,
|
||||
perm,
|
||||
KMemoryAttribute_Locked);
|
||||
KMemoryAttribute_Locked));
|
||||
}
|
||||
|
||||
Result KPageTableBase::UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup &pg) {
|
||||
return this->UnlockMemory(address, size,
|
||||
R_RETURN(this->UnlockMemory(address, size,
|
||||
KMemoryState_FlagCanTransfer, KMemoryState_FlagCanTransfer,
|
||||
KMemoryPermission_None, KMemoryPermission_None,
|
||||
KMemoryAttribute_All, KMemoryAttribute_Locked,
|
||||
KMemoryPermission_UserReadWrite,
|
||||
KMemoryAttribute_Locked, std::addressof(pg));
|
||||
KMemoryAttribute_Locked, std::addressof(pg)));
|
||||
}
|
||||
|
||||
Result KPageTableBase::LockForCodeMemory(KPageGroup *out, KProcessAddress address, size_t size) {
|
||||
return this->LockMemoryAndOpen(out, nullptr, address, size,
|
||||
R_RETURN(this->LockMemoryAndOpen(out, nullptr, address, size,
|
||||
KMemoryState_FlagCanCodeMemory, KMemoryState_FlagCanCodeMemory,
|
||||
KMemoryPermission_All, KMemoryPermission_UserReadWrite,
|
||||
KMemoryAttribute_All, KMemoryAttribute_None,
|
||||
static_cast<KMemoryPermission>(KMemoryPermission_NotMapped | KMemoryPermission_KernelReadWrite),
|
||||
KMemoryAttribute_Locked);
|
||||
KMemoryAttribute_Locked));
|
||||
}
|
||||
|
||||
Result KPageTableBase::UnlockForCodeMemory(KProcessAddress address, size_t size, const KPageGroup &pg) {
|
||||
return this->UnlockMemory(address, size,
|
||||
R_RETURN(this->UnlockMemory(address, size,
|
||||
KMemoryState_FlagCanCodeMemory, KMemoryState_FlagCanCodeMemory,
|
||||
KMemoryPermission_None, KMemoryPermission_None,
|
||||
KMemoryAttribute_All, KMemoryAttribute_Locked,
|
||||
KMemoryPermission_UserReadWrite,
|
||||
KMemoryAttribute_Locked, std::addressof(pg));
|
||||
KMemoryAttribute_Locked, std::addressof(pg)));
|
||||
}
|
||||
|
||||
Result KPageTableBase::OpenMemoryRangeForProcessCacheOperation(MemoryRange *out, KProcessAddress address, size_t size) {
|
||||
|
@ -2915,7 +2896,7 @@ namespace ams::kern {
|
|||
/* We got the range, so open it. */
|
||||
Kernel::GetMemoryManager().Open(out->address, out->size / PageSize);
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
|
||||
|
@ -2961,7 +2942,7 @@ namespace ams::kern {
|
|||
R_UNLESS(UserspaceAccess::CopyMemoryToUser(GetVoidPointer(dst_addr), GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), cur_size), svc::ResultInvalidCurrentMemory());
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
};
|
||||
|
||||
/* Iterate. */
|
||||
|
@ -2995,7 +2976,7 @@ namespace ams::kern {
|
|||
R_TRY(PerformCopy());
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::CopyMemoryFromLinearToKernel(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
|
||||
|
@ -3030,7 +3011,7 @@ namespace ams::kern {
|
|||
/* Copy the data. */
|
||||
std::memcpy(GetVoidPointer(dst_addr), GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), cur_size);
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
};
|
||||
|
||||
/* Iterate. */
|
||||
|
@ -3064,7 +3045,7 @@ namespace ams::kern {
|
|||
R_TRY(PerformCopy());
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::CopyMemoryFromUserToLinear(KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr) {
|
||||
|
@ -3110,7 +3091,7 @@ namespace ams::kern {
|
|||
R_UNLESS(UserspaceAccess::CopyMemoryFromUser(GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), GetVoidPointer(src_addr), cur_size), svc::ResultInvalidCurrentMemory());
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
};
|
||||
|
||||
/* Iterate. */
|
||||
|
@ -3144,7 +3125,7 @@ namespace ams::kern {
|
|||
R_TRY(PerformCopy());
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr) {
|
||||
|
@ -3179,7 +3160,7 @@ namespace ams::kern {
|
|||
/* Copy the data. */
|
||||
std::memcpy(GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), GetVoidPointer(src_addr), cur_size);
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
};
|
||||
|
||||
/* Iterate. */
|
||||
|
@ -3213,7 +3194,7 @@ namespace ams::kern {
|
|||
R_TRY(PerformCopy());
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::CopyMemoryFromHeapToHeap(KPageTableBase &dst_page_table, KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
|
||||
|
@ -3330,7 +3311,7 @@ namespace ams::kern {
|
|||
}
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::CopyMemoryFromHeapToHeapWithoutCheckDestination(KPageTableBase &dst_page_table, KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
|
||||
|
@ -3449,7 +3430,7 @@ namespace ams::kern {
|
|||
}
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
#pragma GCC push_options
|
||||
|
@ -3492,12 +3473,12 @@ namespace ams::kern {
|
|||
test_attr_mask = KMemoryAttribute_Uncached | KMemoryAttribute_Locked;
|
||||
break;
|
||||
default:
|
||||
return svc::ResultInvalidCombination();
|
||||
R_THROW(svc::ResultInvalidCombination());
|
||||
}
|
||||
|
||||
/* Ensure that on failure, we roll back appropriately. */
|
||||
size_t mapped_size = 0;
|
||||
auto cleanup_guard = SCOPE_GUARD {
|
||||
ON_RESULT_FAILURE {
|
||||
if (mapped_size > 0) {
|
||||
this->CleanupForIpcClientOnServerSetupFailure(page_list, mapping_src_start, mapped_size, src_perm);
|
||||
}
|
||||
|
@ -3547,15 +3528,12 @@ namespace ams::kern {
|
|||
MESOSPHERE_ABORT_UNLESS(it != m_memory_block_manager.end());
|
||||
}
|
||||
|
||||
/* We succeeded, so no need to cleanup. */
|
||||
cleanup_guard.Cancel();
|
||||
|
||||
if (out_blocks_needed != nullptr) {
|
||||
MESOSPHERE_ASSERT(blocks_needed <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
|
||||
*out_blocks_needed = blocks_needed;
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::SetupForIpcServer(KProcessAddress *out_addr, size_t size, KProcessAddress src_addr, KMemoryPermission test_perm, KMemoryState dst_state, KPageTableBase &src_page_table, bool send) {
|
||||
|
@ -3621,7 +3599,7 @@ namespace ams::kern {
|
|||
}
|
||||
};
|
||||
|
||||
auto cleanup_guard = SCOPE_GUARD {
|
||||
ON_RESULT_FAILURE {
|
||||
if (cur_mapped_addr != dst_addr) {
|
||||
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
|
||||
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), dst_addr, (cur_mapped_addr - dst_addr) / PageSize, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, true));
|
||||
|
@ -3767,9 +3745,8 @@ namespace ams::kern {
|
|||
*out_addr = dst_addr + (src_start - aligned_src_start);
|
||||
|
||||
/* We succeeded. */
|
||||
cleanup_guard.Cancel();
|
||||
memory_reservation.Commit();
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::SetupForIpc(KProcessAddress *out_dst_addr, size_t size, KProcessAddress src_addr, KPageTableBase &src_page_table, KMemoryPermission test_perm, KMemoryState dst_state, bool send) {
|
||||
|
@ -3798,7 +3775,7 @@ namespace ams::kern {
|
|||
|
||||
/* Ensure that we clean up appropriately if we fail after this. */
|
||||
const auto src_perm = static_cast<KMemoryPermission>((test_perm == KMemoryPermission_UserReadWrite) ? KMemoryPermission_KernelReadWrite | KMemoryPermission_NotMapped : KMemoryPermission_UserRead);
|
||||
auto cleanup_guard = SCOPE_GUARD {
|
||||
ON_RESULT_FAILURE {
|
||||
if (src_map_end > src_map_start) {
|
||||
src_page_table.CleanupForIpcClientOnServerSetupFailure(updater.GetPageList(), src_map_start, src_map_size, src_perm);
|
||||
}
|
||||
|
@ -3813,10 +3790,7 @@ namespace ams::kern {
|
|||
src_page_table.m_memory_block_manager.UpdateLock(std::addressof(allocator), src_map_start, (src_map_end - src_map_start) / PageSize, &KMemoryBlock::LockForIpc, src_perm);
|
||||
}
|
||||
|
||||
/* We succeeded, so cancel our cleanup guard. */
|
||||
cleanup_guard.Cancel();
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state) {
|
||||
|
@ -3857,7 +3831,7 @@ namespace ams::kern {
|
|||
const size_t mapping_size = (mapping_start < mapping_end) ? mapping_end - mapping_start : 0;
|
||||
m_resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, aligned_size - mapping_size);
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state) {
|
||||
|
@ -3890,7 +3864,7 @@ namespace ams::kern {
|
|||
test_attr_mask = KMemoryAttribute_Uncached | KMemoryAttribute_Locked;
|
||||
break;
|
||||
default:
|
||||
return svc::ResultInvalidCombination();
|
||||
R_THROW(svc::ResultInvalidCombination());
|
||||
}
|
||||
|
||||
/* Lock the table. */
|
||||
|
@ -3902,7 +3876,7 @@ namespace ams::kern {
|
|||
|
||||
/* Ensure that on failure, we roll back appropriately. */
|
||||
size_t mapped_size = 0;
|
||||
auto unmap_guard = SCOPE_GUARD {
|
||||
ON_RESULT_FAILURE {
|
||||
if (mapped_size > 0) {
|
||||
/* Determine where the mapping ends. */
|
||||
const auto mapped_end = GetInteger(mapping_start) + mapped_size;
|
||||
|
@ -4040,10 +4014,7 @@ namespace ams::kern {
|
|||
/* Unlock the pages. */
|
||||
m_memory_block_manager.UpdateLock(std::addressof(allocator), mapping_start, mapping_size / PageSize, &KMemoryBlock::UnlockForIpc, KMemoryPermission_None);
|
||||
|
||||
/* We succeeded, so no need to unmap. */
|
||||
unmap_guard.Cancel();
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KPageTableBase::CleanupForIpcClientOnServerSetupFailure(PageLinkedList *page_list, KProcessAddress address, size_t size, KMemoryPermission prot_perm) {
|
||||
|
@ -4237,7 +4208,7 @@ namespace ams::kern {
|
|||
|
||||
/* Reset the current tracking address, and make sure we clean up on failure. */
|
||||
cur_address = address;
|
||||
auto unmap_guard = SCOPE_GUARD {
|
||||
ON_RESULT_FAILURE {
|
||||
if (cur_address > address) {
|
||||
const KProcessAddress last_unmap_address = cur_address - 1;
|
||||
|
||||
|
@ -4340,10 +4311,7 @@ namespace ams::kern {
|
|||
KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None,
|
||||
KMemoryState_Normal, KMemoryPermission_UserReadWrite, KMemoryAttribute_None);
|
||||
|
||||
/* Cancel our guard. */
|
||||
unmap_guard.Cancel();
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -4479,7 +4447,7 @@ namespace ams::kern {
|
|||
|
||||
/* Reset the current tracking address, and make sure we clean up on failure. */
|
||||
cur_address = address;
|
||||
auto remap_guard = SCOPE_GUARD {
|
||||
ON_RESULT_FAILURE {
|
||||
if (cur_address > address) {
|
||||
const KProcessAddress last_map_address = cur_address - 1;
|
||||
cur_address = address;
|
||||
|
@ -4574,8 +4542,7 @@ namespace ams::kern {
|
|||
m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_None);
|
||||
|
||||
/* We succeeded. */
|
||||
remap_guard.Cancel();
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
|
||||
|
@ -4583,7 +4550,7 @@ namespace ams::kern {
|
|||
R_UNLESS(Kernel::GetUnsafeMemory().TryReserve(size), svc::ResultLimitReached());
|
||||
|
||||
/* Ensure we release our reservation on failure. */
|
||||
auto reserve_guard = SCOPE_GUARD { Kernel::GetUnsafeMemory().Release(size); };
|
||||
ON_RESULT_FAILURE { Kernel::GetUnsafeMemory().Release(size); };
|
||||
|
||||
/* Create a page group for the new memory. */
|
||||
KPageGroup pg(m_block_info_manager);
|
||||
|
@ -4628,8 +4595,7 @@ namespace ams::kern {
|
|||
m_mapped_unsafe_physical_memory += size;
|
||||
|
||||
/* We succeeded. */
|
||||
reserve_guard.Cancel();
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4666,7 +4632,7 @@ namespace ams::kern {
|
|||
/* Update our mapped unsafe size. */
|
||||
m_mapped_unsafe_physical_memory -= size;
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTableBase::UnmapProcessMemory(KProcessAddress dst_address, size_t size, KPageTableBase &src_page_table, KProcessAddress src_address) {
|
||||
|
@ -4770,7 +4736,7 @@ namespace ams::kern {
|
|||
/* Apply the memory block update. */
|
||||
m_memory_block_manager.Update(std::addressof(allocator), dst_address, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal);
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -59,7 +59,7 @@ namespace ams::kern {
|
|||
R_UNLESS(m_state == State::Normal, svc::ResultPortClosed());
|
||||
|
||||
m_server.EnqueueSession(session);
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPort::EnqueueSession(KLightServerSession *session) {
|
||||
|
@ -68,7 +68,7 @@ namespace ams::kern {
|
|||
R_UNLESS(m_state == State::Normal, svc::ResultPortClosed());
|
||||
|
||||
m_server.EnqueueSession(session);
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -79,12 +79,12 @@ namespace ams::kern {
|
|||
/* Terminate and close the thread. */
|
||||
ON_SCOPE_EXIT { cur_child->Close(); };
|
||||
|
||||
if (Result terminate_result = cur_child->Terminate(); svc::ResultTerminationRequested::Includes(terminate_result)) {
|
||||
return terminate_result;
|
||||
if (const Result terminate_result = cur_child->Terminate(); svc::ResultTerminationRequested::Includes(terminate_result)) {
|
||||
R_THROW(terminate_result);
|
||||
}
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
class ThreadQueueImplForKProcessEnterUserException final : public KThreadQueue {
|
||||
|
@ -259,7 +259,7 @@ namespace ams::kern {
|
|||
/* We're initialized! */
|
||||
m_is_initialized = true;
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KProcess::Initialize(const ams::svc::CreateProcessParameter ¶ms, const KPageGroup &pg, const u32 *caps, s32 num_caps, KResourceLimit *res_limit, KMemoryManager::Pool pool, bool immortal) {
|
||||
|
@ -287,7 +287,7 @@ namespace ams::kern {
|
|||
auto *pt_manager = std::addressof(is_app ? Kernel::GetApplicationPageTableManager() : Kernel::GetSystemPageTableManager());
|
||||
R_TRY(m_page_table.Initialize(m_process_id, as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address, params.code_num_pages * PageSize, mem_block_manager, block_info_manager, pt_manager, res_limit));
|
||||
}
|
||||
auto pt_guard = SCOPE_GUARD { m_page_table.Finalize(); };
|
||||
ON_RESULT_FAILURE { m_page_table.Finalize(); };
|
||||
|
||||
/* Ensure we can insert the code region. */
|
||||
R_UNLESS(m_page_table.CanContain(params.code_address, params.code_num_pages * PageSize, KMemoryState_Code), svc::ResultInvalidMemoryRegion());
|
||||
|
@ -310,8 +310,7 @@ namespace ams::kern {
|
|||
m_resource_limit->Open();
|
||||
|
||||
/* We succeeded! */
|
||||
pt_guard.Cancel();
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KProcess::Initialize(const ams::svc::CreateProcessParameter ¶ms, svc::KUserPointer<const u32 *> user_caps, s32 num_caps, KResourceLimit *res_limit, KMemoryManager::Pool pool) {
|
||||
|
@ -372,7 +371,7 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
/* Ensure we don't leak any secure memory we allocated. */
|
||||
auto sys_resource_guard = SCOPE_GUARD {
|
||||
ON_RESULT_FAILURE {
|
||||
if (m_system_resource_address != Null<KVirtualAddress>) {
|
||||
/* Check that we have no outstanding allocations. */
|
||||
MESOSPHERE_ABORT_UNLESS(m_memory_block_slab_manager.GetUsed() == 0);
|
||||
|
@ -397,7 +396,7 @@ namespace ams::kern {
|
|||
const bool enable_das_merge = (params.flags & ams::svc::CreateProcessFlag_DisableDeviceAddressSpaceMerge) == 0;
|
||||
R_TRY(m_page_table.Initialize(m_process_id, as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address, code_size, mem_block_manager, block_info_manager, pt_manager, res_limit));
|
||||
}
|
||||
auto pt_guard = SCOPE_GUARD { m_page_table.Finalize(); };
|
||||
ON_RESULT_FAILURE_2 { m_page_table.Finalize(); };
|
||||
|
||||
/* Ensure we can insert the code region. */
|
||||
R_UNLESS(m_page_table.CanContain(params.code_address, code_size, KMemoryState_Code), svc::ResultInvalidMemoryRegion());
|
||||
|
@ -424,12 +423,9 @@ namespace ams::kern {
|
|||
/* Open a reference to the resource limit. */
|
||||
m_resource_limit->Open();
|
||||
|
||||
/* We succeeded, so commit our memory reservation and cancel our guards. */
|
||||
sys_resource_guard.Cancel();
|
||||
pt_guard.Cancel();
|
||||
/* We succeeded, so commit our memory reservation. */
|
||||
memory_reservation.Commit();
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KProcess::DoWorkerTaskImpl() {
|
||||
|
@ -457,7 +453,7 @@ namespace ams::kern {
|
|||
};
|
||||
|
||||
/* Terminate child threads other than the current one. */
|
||||
return TerminateChildren(this, GetCurrentThreadPointer());
|
||||
R_RETURN(TerminateChildren(this, GetCurrentThreadPointer()));
|
||||
}
|
||||
|
||||
void KProcess::FinishTermination() {
|
||||
|
@ -556,7 +552,7 @@ namespace ams::kern {
|
|||
}
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KProcess::AddSharedMemory(KSharedMemory *shmem, KProcessAddress address, size_t size) {
|
||||
|
@ -590,7 +586,7 @@ namespace ams::kern {
|
|||
shmem->Open();
|
||||
info->Open();
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KProcess::RemoveSharedMemory(KSharedMemory *shmem, KProcessAddress address, size_t size) {
|
||||
|
@ -665,14 +661,14 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
*out = tlr;
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
}
|
||||
|
||||
/* Allocate a new page. */
|
||||
tlp = KThreadLocalPage::Allocate();
|
||||
R_UNLESS(tlp != nullptr, svc::ResultOutOfMemory());
|
||||
auto tlp_guard = SCOPE_GUARD { KThreadLocalPage::Free(tlp); };
|
||||
ON_RESULT_FAILURE { KThreadLocalPage::Free(tlp); };
|
||||
|
||||
/* Initialize the new page. */
|
||||
R_TRY(tlp->Initialize(this));
|
||||
|
@ -692,9 +688,8 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
/* We succeeded! */
|
||||
tlp_guard.Cancel();
|
||||
*out = tlr;
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KProcess::DeleteThreadLocalRegion(KProcessAddress addr) {
|
||||
|
@ -742,7 +737,7 @@ namespace ams::kern {
|
|||
KThreadLocalPage::Free(page_to_free);
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void *KProcess::GetThreadLocalRegionPointer(KProcessAddress addr) {
|
||||
|
@ -961,7 +956,7 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
/* Ensure our stack is safe to clean up on exit. */
|
||||
auto stack_guard = SCOPE_GUARD {
|
||||
ON_RESULT_FAILURE {
|
||||
if (m_main_thread_stack_size) {
|
||||
MESOSPHERE_R_ABORT_UNLESS(m_page_table.UnmapPages(stack_top - m_main_thread_stack_size, m_main_thread_stack_size / PageSize, KMemoryState_Stack));
|
||||
m_main_thread_stack_size = 0;
|
||||
|
@ -973,7 +968,7 @@ namespace ams::kern {
|
|||
|
||||
/* Initialize our handle table. */
|
||||
R_TRY(this->InitializeHandleTable(m_capabilities.GetHandleTableSize()));
|
||||
auto ht_guard = SCOPE_GUARD { this->FinalizeHandleTable(); };
|
||||
ON_RESULT_FAILURE_2 { this->FinalizeHandleTable(); };
|
||||
|
||||
/* Create a new thread for the process. */
|
||||
KThread *main_thread = KThread::Create();
|
||||
|
@ -996,7 +991,7 @@ namespace ams::kern {
|
|||
|
||||
/* Update our state. */
|
||||
this->ChangeState((state == State_Created) ? State_Running : State_RunningAttached);
|
||||
auto state_guard = SCOPE_GUARD { this->ChangeState(state); };
|
||||
ON_RESULT_FAILURE_2 { this->ChangeState(state); };
|
||||
|
||||
/* Run our thread. */
|
||||
R_TRY(main_thread->Run());
|
||||
|
@ -1004,16 +999,13 @@ namespace ams::kern {
|
|||
/* Open a reference to represent that we're running. */
|
||||
this->Open();
|
||||
|
||||
/* We succeeded! Cancel our guards. */
|
||||
state_guard.Cancel();
|
||||
ht_guard.Cancel();
|
||||
stack_guard.Cancel();
|
||||
/* We succeeded! Commit our memory reservation. */
|
||||
mem_reservation.Commit();
|
||||
|
||||
/* Note for debug that we're running a new process. */
|
||||
MESOSPHERE_LOG("KProcess::Run() pid=%ld name=%-12s thread=%ld affinity=0x%lx ideal_core=%d active_core=%d\n", m_process_id, m_name, main_thread->GetId(), main_thread->GetVirtualAffinityMask(), main_thread->GetIdealVirtualCore(), main_thread->GetActiveCore());
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KProcess::Reset() {
|
||||
|
@ -1029,7 +1021,7 @@ namespace ams::kern {
|
|||
|
||||
/* Clear signaled. */
|
||||
m_is_signaled = false;
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KProcess::SetActivity(ams::svc::ProcessActivity activity) {
|
||||
|
@ -1071,7 +1063,7 @@ namespace ams::kern {
|
|||
this->SetSuspended(false);
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KProcess::PinCurrentThread() {
|
||||
|
@ -1145,7 +1137,7 @@ namespace ams::kern {
|
|||
|
||||
/* We successfully iterated the list. */
|
||||
*out_num_threads = count;
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
KProcess::State KProcess::SetDebugObject(void *debug_object) {
|
||||
|
@ -1343,7 +1335,7 @@ namespace ams::kern {
|
|||
|
||||
/* We successfully iterated the list. */
|
||||
*out_num_processes = count;
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -56,7 +56,7 @@ namespace ams::kern {
|
|||
this->NotifyAvailable();
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KReadableEvent::Reset() {
|
||||
|
@ -67,7 +67,7 @@ namespace ams::kern {
|
|||
R_UNLESS(m_is_signaled, svc::ResultInvalidState());
|
||||
|
||||
m_is_signaled = false;
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -108,7 +108,7 @@ namespace ams::kern {
|
|||
m_limit_values[which] = value;
|
||||
m_peak_values[which] = m_current_values[which];
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KResourceLimit::Add(ams::svc::LimitableResource which, s64 value) {
|
||||
|
|
|
@ -230,7 +230,7 @@ namespace ams::kern {
|
|||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
R_RETURN(result);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE Result ProcessReceiveMessagePointerDescriptors(int &offset, int &pointer_key, KProcessPageTable &dst_page_table, KProcessPageTable &src_page_table, const ipc::MessageBuffer &dst_msg, const ipc::MessageBuffer &src_msg, const ReceiveList &dst_recv_list, bool dst_user) {
|
||||
|
@ -278,7 +278,7 @@ namespace ams::kern {
|
|||
/* Set the output descriptor. */
|
||||
dst_msg.Set(cur_offset, ipc::MessageBuffer::PointerDescriptor(reinterpret_cast<void *>(recv_pointer), recv_size, src_desc.GetIndex()));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE Result GetMapAliasMemoryState(KMemoryState &out, ipc::MessageBuffer::MapAliasDescriptor::Attribute attr) {
|
||||
|
@ -286,10 +286,10 @@ namespace ams::kern {
|
|||
case ipc::MessageBuffer::MapAliasDescriptor::Attribute_Ipc: out = KMemoryState_Ipc; break;
|
||||
case ipc::MessageBuffer::MapAliasDescriptor::Attribute_NonSecureIpc: out = KMemoryState_NonSecureIpc; break;
|
||||
case ipc::MessageBuffer::MapAliasDescriptor::Attribute_NonDeviceIpc: out = KMemoryState_NonDeviceIpc; break;
|
||||
default: return svc::ResultInvalidCombination();
|
||||
default: R_THROW(svc::ResultInvalidCombination());
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE Result GetMapAliasTestStateAndAttributeMask(u32 &out_state, u32 &out_attr_mask, KMemoryState state) {
|
||||
|
@ -307,10 +307,10 @@ namespace ams::kern {
|
|||
out_attr_mask = KMemoryAttribute_Uncached | KMemoryAttribute_Locked;
|
||||
break;
|
||||
default:
|
||||
return svc::ResultInvalidCombination();
|
||||
R_THROW(svc::ResultInvalidCombination());
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void CleanupSpecialData(KProcess &dst_process, u32 *dst_msg_ptr, size_t dst_buffer_size) {
|
||||
|
@ -388,7 +388,7 @@ namespace ams::kern {
|
|||
}
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE Result CleanupServerMap(KSessionRequest *request, KProcess *server_process) {
|
||||
|
@ -413,7 +413,7 @@ namespace ams::kern {
|
|||
R_TRY(server_page_table.CleanupForIpcServer(request->GetExchangeServerAddress(i), request->GetExchangeSize(i), request->GetExchangeMemoryState(i)));
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE Result CleanupClientMap(KSessionRequest *request, KProcessPageTable *client_page_table) {
|
||||
|
@ -435,7 +435,7 @@ namespace ams::kern {
|
|||
R_TRY(client_page_table->CleanupForIpcClient(request->GetExchangeClientAddress(i), request->GetExchangeSize(i), request->GetExchangeMemoryState(i)));
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE Result CleanupMap(KSessionRequest *request, KProcess *server_process, KProcessPageTable *client_page_table) {
|
||||
|
@ -445,7 +445,7 @@ namespace ams::kern {
|
|||
/* Cleanup the client map. */
|
||||
R_TRY(CleanupClientMap(request, client_page_table));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE Result ProcessReceiveMessageMapAliasDescriptors(int &offset, KProcessPageTable &dst_page_table, KProcessPageTable &src_page_table, const ipc::MessageBuffer &dst_msg, const ipc::MessageBuffer &src_msg, KSessionRequest *request, KMemoryPermission perm, bool send) {
|
||||
|
@ -471,7 +471,7 @@ namespace ams::kern {
|
|||
R_TRY(dst_page_table.SetupForIpc(std::addressof(dst_address), size, src_address, src_page_table, perm, dst_state, send));
|
||||
|
||||
/* Ensure that we clean up on failure. */
|
||||
auto setup_guard = SCOPE_GUARD {
|
||||
ON_RESULT_FAILURE {
|
||||
dst_page_table.CleanupForIpcServer(dst_address, size, dst_state);
|
||||
src_page_table.CleanupForIpcClient(src_address, size, dst_state);
|
||||
};
|
||||
|
@ -484,15 +484,12 @@ namespace ams::kern {
|
|||
} else {
|
||||
R_TRY(request->PushReceive(src_address, dst_address, size, dst_state));
|
||||
}
|
||||
|
||||
/* We successfully pushed the mapping. */
|
||||
setup_guard.Cancel();
|
||||
}
|
||||
|
||||
/* Set the output descriptor. */
|
||||
dst_msg.Set(cur_offset, ipc::MessageBuffer::MapAliasDescriptor(GetVoidPointer(dst_address), size, src_desc.GetAttribute()));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE Result ReceiveMessage(bool &recv_list_broken, uintptr_t dst_message_buffer, size_t dst_buffer_size, KPhysicalAddress dst_message_paddr, KThread &src_thread, uintptr_t src_message_buffer, size_t src_buffer_size, KServerSession *session, KSessionRequest *request) {
|
||||
|
@ -579,7 +576,7 @@ namespace ams::kern {
|
|||
int offset = dst_msg.Set(src_header);
|
||||
|
||||
/* Set up a guard to make sure that we end up in a clean state on error. */
|
||||
auto cleanup_guard = SCOPE_GUARD {
|
||||
ON_RESULT_FAILURE {
|
||||
/* Cleanup mappings. */
|
||||
CleanupMap(request, std::addressof(dst_process), std::addressof(src_page_table));
|
||||
|
||||
|
@ -678,8 +675,7 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
/* We succeeded! */
|
||||
cleanup_guard.Cancel();
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE Result ProcessSendMessageReceiveMapping(KProcessPageTable &dst_page_table, KProcessAddress client_address, KProcessAddress server_address, size_t size, KMemoryState src_state) {
|
||||
|
@ -720,7 +716,7 @@ namespace ams::kern {
|
|||
mapping_src_end));
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE Result ProcessSendMessagePointerDescriptors(int &offset, int &pointer_key, KProcessPageTable &dst_page_table, const ipc::MessageBuffer &dst_msg, const ipc::MessageBuffer &src_msg, const ReceiveList &dst_recv_list, bool dst_user) {
|
||||
|
@ -759,7 +755,7 @@ namespace ams::kern {
|
|||
/* Set the output descriptor. */
|
||||
dst_msg.Set(cur_offset, ipc::MessageBuffer::PointerDescriptor(reinterpret_cast<void *>(recv_pointer), recv_size, src_desc.GetIndex()));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE Result SendMessage(uintptr_t src_message_buffer, size_t src_buffer_size, KPhysicalAddress src_message_paddr, KThread &dst_thread, uintptr_t dst_message_buffer, size_t dst_buffer_size, KServerSession *session, KSessionRequest *request) {
|
||||
|
@ -820,124 +816,125 @@ namespace ams::kern {
|
|||
int pointer_key = 0;
|
||||
bool processed_special_data = false;
|
||||
|
||||
/* Set up a guard to make sure that we end up in a clean state on error. */
|
||||
auto cleanup_guard = SCOPE_GUARD {
|
||||
/* Cleanup special data. */
|
||||
if (processed_special_data) {
|
||||
if (src_header.GetHasSpecialHeader()) {
|
||||
CleanupSpecialData(dst_process, dst_msg_ptr, dst_buffer_size);
|
||||
/* Send the message. */
|
||||
{
|
||||
/* Make sure that we end up in a clean state on error. */
|
||||
ON_RESULT_FAILURE {
|
||||
/* Cleanup special data. */
|
||||
if (processed_special_data) {
|
||||
if (src_header.GetHasSpecialHeader()) {
|
||||
CleanupSpecialData(dst_process, dst_msg_ptr, dst_buffer_size);
|
||||
}
|
||||
} else {
|
||||
CleanupServerHandles(src_user ? src_message_buffer : 0, src_buffer_size, src_message_paddr);
|
||||
}
|
||||
} else {
|
||||
CleanupServerHandles(src_user ? src_message_buffer : 0, src_buffer_size, src_message_paddr);
|
||||
|
||||
/* Cleanup mappings. */
|
||||
CleanupMap(request, std::addressof(src_process), std::addressof(dst_page_table));
|
||||
};
|
||||
|
||||
/* Ensure that the headers fit. */
|
||||
R_UNLESS(ipc::MessageBuffer::GetMessageBufferSize(src_header, src_special_header) <= src_buffer_size, svc::ResultInvalidCombination());
|
||||
R_UNLESS(ipc::MessageBuffer::GetMessageBufferSize(dst_header, dst_special_header) <= dst_buffer_size, svc::ResultInvalidCombination());
|
||||
|
||||
/* Ensure the receive list offset is after the end of raw data. */
|
||||
if (dst_header.GetReceiveListOffset()) {
|
||||
R_UNLESS(dst_header.GetReceiveListOffset() >= ipc::MessageBuffer::GetRawDataIndex(dst_header, dst_special_header) + dst_header.GetRawCount(), svc::ResultInvalidCombination());
|
||||
}
|
||||
|
||||
/* Cleanup mappings. */
|
||||
CleanupMap(request, std::addressof(src_process), std::addressof(dst_page_table));
|
||||
};
|
||||
/* Ensure that the destination buffer is big enough to receive the source. */
|
||||
R_UNLESS(dst_buffer_size >= src_end_offset * sizeof(u32), svc::ResultMessageTooLarge());
|
||||
|
||||
/* Ensure that the headers fit. */
|
||||
R_UNLESS(ipc::MessageBuffer::GetMessageBufferSize(src_header, src_special_header) <= src_buffer_size, svc::ResultInvalidCombination());
|
||||
R_UNLESS(ipc::MessageBuffer::GetMessageBufferSize(dst_header, dst_special_header) <= dst_buffer_size, svc::ResultInvalidCombination());
|
||||
/* Replies must have no buffers. */
|
||||
R_UNLESS(src_header.GetSendCount() == 0, svc::ResultInvalidCombination());
|
||||
R_UNLESS(src_header.GetReceiveCount() == 0, svc::ResultInvalidCombination());
|
||||
R_UNLESS(src_header.GetExchangeCount() == 0, svc::ResultInvalidCombination());
|
||||
|
||||
/* Ensure the receive list offset is after the end of raw data. */
|
||||
if (dst_header.GetReceiveListOffset()) {
|
||||
R_UNLESS(dst_header.GetReceiveListOffset() >= ipc::MessageBuffer::GetRawDataIndex(dst_header, dst_special_header) + dst_header.GetRawCount(), svc::ResultInvalidCombination());
|
||||
}
|
||||
/* Get the receive list. */
|
||||
const s32 dst_recv_list_idx = ipc::MessageBuffer::GetReceiveListIndex(dst_header, dst_special_header);
|
||||
ReceiveList dst_recv_list(dst_msg_ptr, dst_message_buffer, dst_page_table, dst_header, dst_special_header, dst_buffer_size, src_end_offset, dst_recv_list_idx, !dst_user);
|
||||
|
||||
/* Ensure that the destination buffer is big enough to receive the source. */
|
||||
R_UNLESS(dst_buffer_size >= src_end_offset * sizeof(u32), svc::ResultMessageTooLarge());
|
||||
/* Handle any receive buffers. */
|
||||
for (size_t i = 0; i < request->GetReceiveCount(); ++i) {
|
||||
R_TRY(ProcessSendMessageReceiveMapping(dst_page_table, request->GetReceiveClientAddress(i), request->GetReceiveServerAddress(i), request->GetReceiveSize(i), request->GetReceiveMemoryState(i)));
|
||||
}
|
||||
|
||||
/* Replies must have no buffers. */
|
||||
R_UNLESS(src_header.GetSendCount() == 0, svc::ResultInvalidCombination());
|
||||
R_UNLESS(src_header.GetReceiveCount() == 0, svc::ResultInvalidCombination());
|
||||
R_UNLESS(src_header.GetExchangeCount() == 0, svc::ResultInvalidCombination());
|
||||
/* Handle any exchange buffers. */
|
||||
for (size_t i = 0; i < request->GetExchangeCount(); ++i) {
|
||||
R_TRY(ProcessSendMessageReceiveMapping(dst_page_table, request->GetExchangeClientAddress(i), request->GetExchangeServerAddress(i), request->GetExchangeSize(i), request->GetExchangeMemoryState(i)));
|
||||
}
|
||||
|
||||
/* Get the receive list. */
|
||||
const s32 dst_recv_list_idx = ipc::MessageBuffer::GetReceiveListIndex(dst_header, dst_special_header);
|
||||
ReceiveList dst_recv_list(dst_msg_ptr, dst_message_buffer, dst_page_table, dst_header, dst_special_header, dst_buffer_size, src_end_offset, dst_recv_list_idx, !dst_user);
|
||||
/* Set the header. */
|
||||
offset = dst_msg.Set(src_header);
|
||||
|
||||
/* Handle any receive buffers. */
|
||||
for (size_t i = 0; i < request->GetReceiveCount(); ++i) {
|
||||
R_TRY(ProcessSendMessageReceiveMapping(dst_page_table, request->GetReceiveClientAddress(i), request->GetReceiveServerAddress(i), request->GetReceiveSize(i), request->GetReceiveMemoryState(i)));
|
||||
}
|
||||
/* Process any special data. */
|
||||
MESOSPHERE_ASSERT(GetCurrentThreadPointer() == std::addressof(src_thread));
|
||||
processed_special_data = true;
|
||||
if (src_header.GetHasSpecialHeader()) {
|
||||
R_TRY(ProcessMessageSpecialData<true>(offset, dst_process, src_process, src_thread, dst_msg, src_msg, src_special_header));
|
||||
}
|
||||
|
||||
/* Handle any exchange buffers. */
|
||||
for (size_t i = 0; i < request->GetExchangeCount(); ++i) {
|
||||
R_TRY(ProcessSendMessageReceiveMapping(dst_page_table, request->GetExchangeClientAddress(i), request->GetExchangeServerAddress(i), request->GetExchangeSize(i), request->GetExchangeMemoryState(i)));
|
||||
}
|
||||
/* Process any pointer buffers. */
|
||||
for (auto i = 0; i < src_header.GetPointerCount(); ++i) {
|
||||
R_TRY(ProcessSendMessagePointerDescriptors(offset, pointer_key, dst_page_table, dst_msg, src_msg, dst_recv_list, dst_user && dst_header.GetReceiveListCount() == ipc::MessageBuffer::MessageHeader::ReceiveListCountType_ToMessageBuffer));
|
||||
}
|
||||
|
||||
/* Set the header. */
|
||||
offset = dst_msg.Set(src_header);
|
||||
/* Clear any map alias buffers. */
|
||||
for (auto i = 0; i < src_header.GetMapAliasCount(); ++i) {
|
||||
offset = dst_msg.Set(offset, ipc::MessageBuffer::MapAliasDescriptor());
|
||||
}
|
||||
|
||||
/* Process any special data. */
|
||||
MESOSPHERE_ASSERT(GetCurrentThreadPointer() == std::addressof(src_thread));
|
||||
processed_special_data = true;
|
||||
if (src_header.GetHasSpecialHeader()) {
|
||||
R_TRY(ProcessMessageSpecialData<true>(offset, dst_process, src_process, src_thread, dst_msg, src_msg, src_special_header));
|
||||
}
|
||||
/* Process any raw data. */
|
||||
if (const auto raw_count = src_header.GetRawCount(); raw_count != 0) {
|
||||
/* Get the offset and size. */
|
||||
const size_t offset_words = offset * sizeof(u32);
|
||||
const size_t raw_size = raw_count * sizeof(u32);
|
||||
|
||||
/* Process any pointer buffers. */
|
||||
for (auto i = 0; i < src_header.GetPointerCount(); ++i) {
|
||||
R_TRY(ProcessSendMessagePointerDescriptors(offset, pointer_key, dst_page_table, dst_msg, src_msg, dst_recv_list, dst_user && dst_header.GetReceiveListCount() == ipc::MessageBuffer::MessageHeader::ReceiveListCountType_ToMessageBuffer));
|
||||
}
|
||||
/* Fast case is TLS -> TLS, do raw memcpy if we can. */
|
||||
if (!dst_user && !src_user) {
|
||||
std::memcpy(dst_msg_ptr + offset, src_msg_ptr + offset, raw_size);
|
||||
} else if (src_user) {
|
||||
/* Determine how much fast size we can copy. */
|
||||
const size_t max_fast_size = std::min<size_t>(offset_words + raw_size, PageSize);
|
||||
const size_t fast_size = max_fast_size - offset_words;
|
||||
|
||||
/* Clear any map alias buffers. */
|
||||
for (auto i = 0; i < src_header.GetMapAliasCount(); ++i) {
|
||||
offset = dst_msg.Set(offset, ipc::MessageBuffer::MapAliasDescriptor());
|
||||
}
|
||||
/* Determine the dst permission. User buffer should be unmapped + read, TLS should be user readable. */
|
||||
const KMemoryPermission dst_perm = static_cast<KMemoryPermission>(dst_user ? KMemoryPermission_NotMapped | KMemoryPermission_KernelReadWrite : KMemoryPermission_UserReadWrite);
|
||||
|
||||
/* Process any raw data. */
|
||||
if (const auto raw_count = src_header.GetRawCount(); raw_count != 0) {
|
||||
/* Get the offset and size. */
|
||||
const size_t offset_words = offset * sizeof(u32);
|
||||
const size_t raw_size = raw_count * sizeof(u32);
|
||||
/* Perform the fast part of the copy. */
|
||||
R_TRY(dst_page_table.CopyMemoryFromKernelToLinear(dst_message_buffer + offset_words, fast_size,
|
||||
KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted,
|
||||
dst_perm,
|
||||
KMemoryAttribute_Uncached, KMemoryAttribute_None,
|
||||
reinterpret_cast<uintptr_t>(src_msg_ptr) + offset_words));
|
||||
|
||||
/* Fast case is TLS -> TLS, do raw memcpy if we can. */
|
||||
if (!dst_user && !src_user) {
|
||||
std::memcpy(dst_msg_ptr + offset, src_msg_ptr + offset, raw_size);
|
||||
} else if (src_user) {
|
||||
/* Determine how much fast size we can copy. */
|
||||
const size_t max_fast_size = std::min<size_t>(offset_words + raw_size, PageSize);
|
||||
const size_t fast_size = max_fast_size - offset_words;
|
||||
/* If the fast part of the copy didn't get everything, perform the slow part of the copy. */
|
||||
if (fast_size < raw_size) {
|
||||
R_TRY(dst_page_table.CopyMemoryFromHeapToHeap(dst_page_table, dst_message_buffer + max_fast_size, raw_size - fast_size,
|
||||
KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted,
|
||||
dst_perm,
|
||||
KMemoryAttribute_Uncached, KMemoryAttribute_None,
|
||||
src_message_buffer + max_fast_size,
|
||||
KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted,
|
||||
static_cast<KMemoryPermission>(KMemoryPermission_NotMapped | KMemoryPermission_KernelRead),
|
||||
KMemoryAttribute_Uncached | KMemoryAttribute_Locked, KMemoryAttribute_Locked));
|
||||
}
|
||||
} else /* if (dst_user) */ {
|
||||
/* The destination is a user buffer, so it should be unmapped + readable. */
|
||||
constexpr KMemoryPermission DestinationPermission = static_cast<KMemoryPermission>(KMemoryPermission_NotMapped | KMemoryPermission_KernelReadWrite);
|
||||
|
||||
/* Determine the dst permission. User buffer should be unmapped + read, TLS should be user readable. */
|
||||
const KMemoryPermission dst_perm = static_cast<KMemoryPermission>(dst_user ? KMemoryPermission_NotMapped | KMemoryPermission_KernelReadWrite : KMemoryPermission_UserReadWrite);
|
||||
|
||||
/* Perform the fast part of the copy. */
|
||||
R_TRY(dst_page_table.CopyMemoryFromKernelToLinear(dst_message_buffer + offset_words, fast_size,
|
||||
KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted,
|
||||
dst_perm,
|
||||
KMemoryAttribute_Uncached, KMemoryAttribute_None,
|
||||
reinterpret_cast<uintptr_t>(src_msg_ptr) + offset_words));
|
||||
|
||||
/* If the fast part of the copy didn't get everything, perform the slow part of the copy. */
|
||||
if (fast_size < raw_size) {
|
||||
R_TRY(dst_page_table.CopyMemoryFromHeapToHeap(dst_page_table, dst_message_buffer + max_fast_size, raw_size - fast_size,
|
||||
KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted,
|
||||
dst_perm,
|
||||
KMemoryAttribute_Uncached, KMemoryAttribute_None,
|
||||
src_message_buffer + max_fast_size,
|
||||
KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted,
|
||||
static_cast<KMemoryPermission>(KMemoryPermission_NotMapped | KMemoryPermission_KernelRead),
|
||||
KMemoryAttribute_Uncached | KMemoryAttribute_Locked, KMemoryAttribute_Locked));
|
||||
/* Copy the memory. */
|
||||
R_TRY(dst_page_table.CopyMemoryFromUserToLinear(dst_message_buffer + offset_words, raw_size,
|
||||
KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted,
|
||||
DestinationPermission,
|
||||
KMemoryAttribute_Uncached, KMemoryAttribute_None,
|
||||
src_message_buffer + offset_words));
|
||||
}
|
||||
} else /* if (dst_user) */ {
|
||||
/* The destination is a user buffer, so it should be unmapped + readable. */
|
||||
constexpr KMemoryPermission DestinationPermission = static_cast<KMemoryPermission>(KMemoryPermission_NotMapped | KMemoryPermission_KernelReadWrite);
|
||||
|
||||
/* Copy the memory. */
|
||||
R_TRY(dst_page_table.CopyMemoryFromUserToLinear(dst_message_buffer + offset_words, raw_size,
|
||||
KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted,
|
||||
DestinationPermission,
|
||||
KMemoryAttribute_Uncached, KMemoryAttribute_None,
|
||||
src_message_buffer + offset_words));
|
||||
}
|
||||
}
|
||||
|
||||
/* We succeeded. Perform cleanup with validation. */
|
||||
cleanup_guard.Cancel();
|
||||
|
||||
return CleanupMap(request, std::addressof(src_process), std::addressof(dst_page_table));
|
||||
/* Perform (and validate) any remaining cleanup. */
|
||||
R_RETURN(CleanupMap(request, std::addressof(src_process), std::addressof(dst_page_table)));
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void ReplyAsyncError(KProcess *to_process, uintptr_t to_msg_buf, size_t to_msg_buf_size, Result result) {
|
||||
|
@ -1065,7 +1062,7 @@ namespace ams::kern {
|
|||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
R_RETURN(result);
|
||||
}
|
||||
|
||||
Result KServerSession::SendReply(uintptr_t server_message, uintptr_t server_buffer_size, KPhysicalAddress server_message_paddr) {
|
||||
|
@ -1161,7 +1158,7 @@ namespace ams::kern {
|
|||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
R_RETURN(result);
|
||||
}
|
||||
|
||||
Result KServerSession::OnRequest(KSessionRequest *request) {
|
||||
|
@ -1200,7 +1197,7 @@ namespace ams::kern {
|
|||
GetCurrentThread().BeginWait(std::addressof(wait_queue));
|
||||
}
|
||||
|
||||
return GetCurrentThread().GetWaitResult();
|
||||
R_RETURN(GetCurrentThread().GetWaitResult());
|
||||
}
|
||||
|
||||
bool KServerSession::IsSignaledImpl() const {
|
||||
|
|
|
@ -40,22 +40,22 @@ namespace ams::kern {
|
|||
/* Set the mapping. */
|
||||
mapping->Set(client, server, size, state);
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KSessionRequest::SessionMappings::PushSend(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state) {
|
||||
MESOSPHERE_ASSERT(m_num_recv == 0);
|
||||
MESOSPHERE_ASSERT(m_num_exch == 0);
|
||||
return this->PushMap(client, server, size, state, m_num_send++);
|
||||
R_RETURN(this->PushMap(client, server, size, state, m_num_send++));
|
||||
}
|
||||
|
||||
Result KSessionRequest::SessionMappings::PushReceive(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state) {
|
||||
MESOSPHERE_ASSERT(m_num_exch == 0);
|
||||
return this->PushMap(client, server, size, state, m_num_send + m_num_recv++);
|
||||
R_RETURN(this->PushMap(client, server, size, state, m_num_send + m_num_recv++));
|
||||
}
|
||||
|
||||
Result KSessionRequest::SessionMappings::PushExchange(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state) {
|
||||
return this->PushMap(client, server, size, state, m_num_send + m_num_recv + m_num_exch++);
|
||||
R_RETURN(this->PushMap(client, server, size, state, m_num_send + m_num_recv + m_num_exch++));
|
||||
}
|
||||
|
||||
void KSessionRequest::SessionMappings::Finalize() {
|
||||
|
|
|
@ -54,7 +54,7 @@ namespace ams::kern {
|
|||
std::memset(GetVoidPointer(KMemoryLayout::GetLinearVirtualAddress(block.GetAddress())), 0, block.GetSize());
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KSharedMemory::Finalize() {
|
||||
|
@ -88,7 +88,7 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
/* Map the memory. */
|
||||
return table->MapPageGroup(address, m_page_group, KMemoryState_Shared, ConvertToKMemoryPermission(map_perm));
|
||||
R_RETURN(table->MapPageGroup(address, m_page_group, KMemoryState_Shared, ConvertToKMemoryPermission(map_perm)));
|
||||
}
|
||||
|
||||
Result KSharedMemory::Unmap(KProcessPageTable *table, KProcessAddress address, size_t size, KProcess *process) {
|
||||
|
@ -99,7 +99,7 @@ namespace ams::kern {
|
|||
R_UNLESS(m_page_group.GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize());
|
||||
|
||||
/* Unmap the memory. */
|
||||
return table->UnmapPageGroup(address, m_page_group, KMemoryState_Shared);
|
||||
R_RETURN(table->UnmapPageGroup(address, m_page_group, KMemoryState_Shared));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -103,7 +103,7 @@ namespace ams::kern {
|
|||
/* Check if the thread should terminate. */
|
||||
if (thread->IsTerminationRequested()) {
|
||||
slp.CancelSleep();
|
||||
return svc::ResultTerminationRequested();
|
||||
R_THROW(svc::ResultTerminationRequested());
|
||||
}
|
||||
|
||||
/* Check if any of the objects are already signaled. */
|
||||
|
@ -113,21 +113,21 @@ namespace ams::kern {
|
|||
if (objects[i]->IsSignaled()) {
|
||||
*out_index = i;
|
||||
slp.CancelSleep();
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
}
|
||||
|
||||
/* Check if the timeout is zero. */
|
||||
if (timeout == 0) {
|
||||
slp.CancelSleep();
|
||||
return svc::ResultTimedOut();
|
||||
R_THROW(svc::ResultTimedOut());
|
||||
}
|
||||
|
||||
/* Check if waiting was canceled. */
|
||||
if (thread->IsWaitCancelled()) {
|
||||
slp.CancelSleep();
|
||||
thread->ClearWaitCancelled();
|
||||
return svc::ResultCancelled();
|
||||
R_THROW(svc::ResultCancelled());
|
||||
}
|
||||
|
||||
/* Add the waiters. */
|
||||
|
@ -153,7 +153,7 @@ namespace ams::kern {
|
|||
*out_index = thread->GetSyncedIndex();
|
||||
|
||||
/* Get the wait result. */
|
||||
return thread->GetWaitResult();
|
||||
R_RETURN(thread->GetWaitResult());
|
||||
}
|
||||
|
||||
void KSynchronizationObject::NotifyAvailable(Result result) {
|
||||
|
|
|
@ -177,7 +177,7 @@ namespace ams::kern {
|
|||
|
||||
Result KSystemControlBase::ReadWriteRegister(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value) {
|
||||
MESOSPHERE_UNUSED(out, address, mask, value);
|
||||
return svc::ResultNotImplemented();
|
||||
R_THROW(svc::ResultNotImplemented());
|
||||
}
|
||||
|
||||
/* Randomness. */
|
||||
|
@ -278,7 +278,7 @@ namespace ams::kern {
|
|||
R_UNLESS(paddr != Null<KPhysicalAddress>, svc::ResultOutOfMemory());
|
||||
|
||||
*out = KPageTable::GetHeapVirtualAddress(paddr);
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KSystemControlBase::FreeSecureMemory(KVirtualAddress address, size_t size, u32 pool) {
|
||||
|
|
|
@ -235,7 +235,7 @@ namespace ams::kern {
|
|||
}
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KThread::InitializeThread(KThread *thread, KThreadFunction func, uintptr_t arg, KProcessAddress user_stack_top, s32 prio, s32 core, KProcess *owner, ThreadType type) {
|
||||
|
@ -250,23 +250,23 @@ namespace ams::kern {
|
|||
/* Map the stack page. */
|
||||
KProcessAddress stack_top = Null<KProcessAddress>;
|
||||
{
|
||||
/* If we fail to map, avoid leaking the page. */
|
||||
ON_RESULT_FAILURE { KPageBuffer::Free(page); };
|
||||
|
||||
/* Perform the mapping. */
|
||||
KProcessAddress stack_bottom = Null<KProcessAddress>;
|
||||
auto page_guard = SCOPE_GUARD { KPageBuffer::Free(page); };
|
||||
R_TRY(Kernel::GetKernelPageTable().MapPages(std::addressof(stack_bottom), 1, PageSize, page->GetPhysicalAddress(), stack_region.GetAddress(),
|
||||
stack_region.GetSize() / PageSize, KMemoryState_Kernel, KMemoryPermission_KernelReadWrite));
|
||||
page_guard.Cancel();
|
||||
|
||||
|
||||
/* Calculate top of the stack. */
|
||||
stack_top = stack_bottom + PageSize;
|
||||
}
|
||||
|
||||
/* Initialize the thread. */
|
||||
auto map_guard = SCOPE_GUARD { CleanupKernelStack(GetInteger(stack_top)); };
|
||||
R_TRY(thread->Initialize(func, arg, GetVoidPointer(stack_top), user_stack_top, prio, core, owner, type));
|
||||
map_guard.Cancel();
|
||||
/* If we fail, cleanup the stack we mapped. */
|
||||
ON_RESULT_FAILURE { CleanupKernelStack(GetInteger(stack_top)); };
|
||||
|
||||
return ResultSuccess();
|
||||
/* Initialize the thread. */
|
||||
R_RETURN(thread->Initialize(func, arg, GetVoidPointer(stack_top), user_stack_top, prio, core, owner, type));
|
||||
}
|
||||
|
||||
void KThread::PostDestroy(uintptr_t arg) {
|
||||
|
@ -576,7 +576,7 @@ namespace ams::kern {
|
|||
*out_affinity_mask = m_virtual_affinity_mask;
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KThread::GetPhysicalCoreMask(int32_t *out_ideal_core, u64 *out_affinity_mask) {
|
||||
|
@ -595,7 +595,7 @@ namespace ams::kern {
|
|||
}
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KThread::SetCoreMask(int32_t core_id, u64 v_affinity_mask) {
|
||||
|
@ -700,7 +700,7 @@ namespace ams::kern {
|
|||
} while (retry_update);
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KThread::SetBasePriority(s32 priority) {
|
||||
|
@ -752,7 +752,7 @@ namespace ams::kern {
|
|||
m_base_priority = IdleThreadPriority;
|
||||
KScheduler::OnThreadPriorityChanged(this, old_priority);
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KThread::RequestSuspend(SuspendType type) {
|
||||
|
@ -923,7 +923,7 @@ namespace ams::kern {
|
|||
} while (thread_is_current);
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KThread::GetThreadContext3(ams::svc::ThreadContext *out) {
|
||||
|
@ -944,7 +944,7 @@ namespace ams::kern {
|
|||
}
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KThread::AddWaiterImpl(KThread *thread) {
|
||||
|
@ -1121,7 +1121,7 @@ namespace ams::kern {
|
|||
|
||||
/* Set our state and finish. */
|
||||
this->SetState(KThread::ThreadState_Runnable);
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1165,15 +1165,15 @@ namespace ams::kern {
|
|||
MESOSPHERE_ASSERT_THIS();
|
||||
MESOSPHERE_ASSERT(this != GetCurrentThreadPointer());
|
||||
|
||||
/* Request the thread terminate. */
|
||||
/* Request the thread terminate if it hasn't already. */
|
||||
if (const auto new_state = this->RequestTerminate(); new_state != ThreadState_Terminated) {
|
||||
/* If the thread isn't terminated, wait for it to terminate. */
|
||||
s32 index;
|
||||
KSynchronizationObject *objects[] = { this };
|
||||
return KSynchronizationObject::Wait(std::addressof(index), objects, 1, ams::svc::WaitInfinite);
|
||||
} else {
|
||||
return ResultSuccess();
|
||||
R_TRY(KSynchronizationObject::Wait(std::addressof(index), objects, 1, ams::svc::WaitInfinite));
|
||||
}
|
||||
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
KThread::ThreadState KThread::RequestTerminate() {
|
||||
|
@ -1248,7 +1248,7 @@ namespace ams::kern {
|
|||
/* Check if the thread should terminate. */
|
||||
if (this->IsTerminationRequested()) {
|
||||
slp.CancelSleep();
|
||||
return svc::ResultTerminationRequested();
|
||||
R_THROW(svc::ResultTerminationRequested());
|
||||
}
|
||||
|
||||
/* Wait for the sleep to end. */
|
||||
|
@ -1256,7 +1256,7 @@ namespace ams::kern {
|
|||
this->BeginWait(std::addressof(wait_queue));
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KThread::BeginWait(KThreadQueue *queue) {
|
||||
|
@ -1357,7 +1357,7 @@ namespace ams::kern {
|
|||
|
||||
/* We successfully iterated the list. */
|
||||
*out_num_threads = count;
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -26,14 +26,10 @@ namespace ams::kern {
|
|||
/* Allocate a new page. */
|
||||
KPageBuffer *page_buf = KPageBuffer::Allocate();
|
||||
R_UNLESS(page_buf != nullptr, svc::ResultOutOfMemory());
|
||||
auto page_buf_guard = SCOPE_GUARD { KPageBuffer::Free(page_buf); };
|
||||
ON_RESULT_FAILURE { KPageBuffer::Free(page_buf); };
|
||||
|
||||
/* Map the address in. */
|
||||
R_TRY(m_owner->GetPageTable().MapPages(std::addressof(m_virt_addr), 1, PageSize, page_buf->GetPhysicalAddress(), KMemoryState_ThreadLocal, KMemoryPermission_UserReadWrite));
|
||||
|
||||
/* We succeeded. */
|
||||
page_buf_guard.Cancel();
|
||||
return ResultSuccess();
|
||||
R_RETURN(m_owner->GetPageTable().MapPages(std::addressof(m_virt_addr), 1, PageSize, page_buf->GetPhysicalAddress(), KMemoryState_ThreadLocal, KMemoryPermission_UserReadWrite));
|
||||
}
|
||||
|
||||
Result KThreadLocalPage::Finalize() {
|
||||
|
@ -48,7 +44,7 @@ namespace ams::kern {
|
|||
|
||||
/* Free the page. */
|
||||
KPageBuffer::Free(KPageBuffer::FromPhysicalAddress(phys_addr));
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
KProcessAddress KThreadLocalPage::Reserve() {
|
||||
|
|
|
@ -41,7 +41,7 @@ namespace ams::kern {
|
|||
|
||||
/* We succeeded. */
|
||||
pg_guard.Cancel();
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KTransferMemory::Finalize() {
|
||||
|
@ -86,7 +86,7 @@ namespace ams::kern {
|
|||
/* Mark ourselves as mapped. */
|
||||
m_is_mapped = true;
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KTransferMemory::Unmap(KProcessAddress address, size_t size) {
|
||||
|
@ -106,7 +106,7 @@ namespace ams::kern {
|
|||
MESOSPHERE_ASSERT(m_is_mapped);
|
||||
m_is_mapped = false;
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -61,7 +61,7 @@ namespace ams::kern {
|
|||
/* Check that the thread isn't terminating. */
|
||||
if (cur_thread->IsTerminationRequested()) {
|
||||
slp.CancelSleep();
|
||||
return svc::ResultTerminationRequested();
|
||||
R_THROW(svc::ResultTerminationRequested());
|
||||
}
|
||||
|
||||
/* Handle the case where timeout is non-negative/infinite. */
|
||||
|
@ -69,7 +69,7 @@ namespace ams::kern {
|
|||
/* Check if we're already waiting. */
|
||||
if (m_next_thread != nullptr) {
|
||||
slp.CancelSleep();
|
||||
return svc::ResultBusy();
|
||||
R_THROW(svc::ResultBusy());
|
||||
}
|
||||
|
||||
/* If timeout is zero, handle the special case by canceling all waiting threads. */
|
||||
|
@ -79,7 +79,7 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
slp.CancelSleep();
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -96,7 +96,7 @@ namespace ams::kern {
|
|||
cur_thread->BeginWait(std::addressof(wait_queue));
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -56,7 +56,7 @@ namespace ams::kern::svc {
|
|||
/* Set the activity. */
|
||||
R_TRY(thread->SetActivity(thread_activity));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result SetProcessActivity(ams::svc::Handle process_handle, ams::svc::ProcessActivity process_activity) {
|
||||
|
@ -73,7 +73,7 @@ namespace ams::kern::svc {
|
|||
/* Set the activity. */
|
||||
R_TRY(process->SetActivity(process_activity));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -81,21 +81,21 @@ namespace ams::kern::svc {
|
|||
/* ============================= 64 ABI ============================= */
|
||||
|
||||
Result SetThreadActivity64(ams::svc::Handle thread_handle, ams::svc::ThreadActivity thread_activity) {
|
||||
return SetThreadActivity(thread_handle, thread_activity);
|
||||
R_RETURN(SetThreadActivity(thread_handle, thread_activity));
|
||||
}
|
||||
|
||||
Result SetProcessActivity64(ams::svc::Handle process_handle, ams::svc::ProcessActivity process_activity) {
|
||||
return SetProcessActivity(process_handle, process_activity);
|
||||
R_RETURN(SetProcessActivity(process_handle, process_activity));
|
||||
}
|
||||
|
||||
/* ============================= 64From32 ABI ============================= */
|
||||
|
||||
Result SetThreadActivity64From32(ams::svc::Handle thread_handle, ams::svc::ThreadActivity thread_activity) {
|
||||
return SetThreadActivity(thread_handle, thread_activity);
|
||||
R_RETURN(SetThreadActivity(thread_handle, thread_activity));
|
||||
}
|
||||
|
||||
Result SetProcessActivity64From32(ams::svc::Handle process_handle, ams::svc::ProcessActivity process_activity) {
|
||||
return SetProcessActivity(process_handle, process_activity);
|
||||
R_RETURN(SetProcessActivity(process_handle, process_activity));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ namespace ams::kern::svc {
|
|||
timeout = timeout_ns;
|
||||
}
|
||||
|
||||
return GetCurrentProcess().WaitAddressArbiter(address, arb_type, value, timeout);
|
||||
R_RETURN(GetCurrentProcess().WaitAddressArbiter(address, arb_type, value, timeout));
|
||||
}
|
||||
|
||||
Result SignalToAddress(uintptr_t address, ams::svc::SignalType signal_type, int32_t value, int32_t count) {
|
||||
|
@ -78,7 +78,7 @@ namespace ams::kern::svc {
|
|||
R_UNLESS(util::IsAligned(address, sizeof(int32_t)), svc::ResultInvalidAddress());
|
||||
R_UNLESS(IsValidSignalType(signal_type), svc::ResultInvalidEnumValue());
|
||||
|
||||
return GetCurrentProcess().SignalAddressArbiter(address, signal_type, value, count);
|
||||
R_RETURN(GetCurrentProcess().SignalAddressArbiter(address, signal_type, value, count));
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -86,21 +86,21 @@ namespace ams::kern::svc {
|
|||
/* ============================= 64 ABI ============================= */
|
||||
|
||||
Result WaitForAddress64(ams::svc::Address address, ams::svc::ArbitrationType arb_type, int32_t value, int64_t timeout_ns) {
|
||||
return WaitForAddress(address, arb_type, value, timeout_ns);
|
||||
R_RETURN(WaitForAddress(address, arb_type, value, timeout_ns));
|
||||
}
|
||||
|
||||
Result SignalToAddress64(ams::svc::Address address, ams::svc::SignalType signal_type, int32_t value, int32_t count) {
|
||||
return SignalToAddress(address, signal_type, value, count);
|
||||
R_RETURN(SignalToAddress(address, signal_type, value, count));
|
||||
}
|
||||
|
||||
/* ============================= 64From32 ABI ============================= */
|
||||
|
||||
Result WaitForAddress64From32(ams::svc::Address address, ams::svc::ArbitrationType arb_type, int32_t value, int64_t timeout_ns) {
|
||||
return WaitForAddress(address, arb_type, value, timeout_ns);
|
||||
R_RETURN(WaitForAddress(address, arb_type, value, timeout_ns));
|
||||
}
|
||||
|
||||
Result SignalToAddress64From32(ams::svc::Address address, ams::svc::SignalType signal_type, int32_t value, int32_t count) {
|
||||
return SignalToAddress(address, signal_type, value, count);
|
||||
R_RETURN(SignalToAddress(address, signal_type, value, count));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ namespace ams::kern::svc {
|
|||
/* Query the physical mapping. */
|
||||
R_TRY(pt.QueryPhysicalAddress(out_info, address));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result QueryIoMapping(uintptr_t *out_address, size_t *out_size, uint64_t phys_addr, size_t size) {
|
||||
|
@ -61,7 +61,7 @@ namespace ams::kern::svc {
|
|||
/* Use the size as the found size. */
|
||||
found_size = size;
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
};
|
||||
|
||||
if (aligned) {
|
||||
|
@ -109,7 +109,7 @@ namespace ams::kern::svc {
|
|||
if (out_size != nullptr) {
|
||||
*out_size = found_size;
|
||||
}
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -117,18 +117,18 @@ namespace ams::kern::svc {
|
|||
/* ============================= 64 ABI ============================= */
|
||||
|
||||
Result QueryPhysicalAddress64(ams::svc::lp64::PhysicalMemoryInfo *out_info, ams::svc::Address address) {
|
||||
return QueryPhysicalAddress(out_info, address);
|
||||
R_RETURN(QueryPhysicalAddress(out_info, address));
|
||||
}
|
||||
|
||||
Result QueryIoMapping64(ams::svc::Address *out_address, ams::svc::Size *out_size, ams::svc::PhysicalAddress physical_address, ams::svc::Size size) {
|
||||
static_assert(sizeof(*out_address) == sizeof(uintptr_t));
|
||||
static_assert(sizeof(*out_size) == sizeof(size_t));
|
||||
return QueryIoMapping(reinterpret_cast<uintptr_t *>(out_address), reinterpret_cast<size_t *>(out_size), physical_address, size);
|
||||
R_RETURN(QueryIoMapping(reinterpret_cast<uintptr_t *>(out_address), reinterpret_cast<size_t *>(out_size), physical_address, size));
|
||||
}
|
||||
|
||||
Result LegacyQueryIoMapping64(ams::svc::Address *out_address, ams::svc::PhysicalAddress physical_address, ams::svc::Size size) {
|
||||
static_assert(sizeof(*out_address) == sizeof(uintptr_t));
|
||||
return QueryIoMapping(reinterpret_cast<uintptr_t *>(out_address), nullptr, physical_address, size);
|
||||
R_RETURN(QueryIoMapping(reinterpret_cast<uintptr_t *>(out_address), nullptr, physical_address, size));
|
||||
}
|
||||
|
||||
/* ============================= 64From32 ABI ============================= */
|
||||
|
@ -142,18 +142,18 @@ namespace ams::kern::svc {
|
|||
.virtual_address = static_cast<u32>(info.virtual_address),
|
||||
.size = static_cast<u32>(info.size),
|
||||
};
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result QueryIoMapping64From32(ams::svc::Address *out_address, ams::svc::Size *out_size, ams::svc::PhysicalAddress physical_address, ams::svc::Size size) {
|
||||
static_assert(sizeof(*out_address) == sizeof(uintptr_t));
|
||||
static_assert(sizeof(*out_size) == sizeof(size_t));
|
||||
return QueryIoMapping(reinterpret_cast<uintptr_t *>(out_address), reinterpret_cast<size_t *>(out_size), physical_address, size);
|
||||
R_RETURN(QueryIoMapping(reinterpret_cast<uintptr_t *>(out_address), reinterpret_cast<size_t *>(out_size), physical_address, size));
|
||||
}
|
||||
|
||||
Result LegacyQueryIoMapping64From32(ams::svc::Address *out_address, ams::svc::PhysicalAddress physical_address, ams::svc::Size size) {
|
||||
static_assert(sizeof(*out_address) == sizeof(uintptr_t));
|
||||
return QueryIoMapping(reinterpret_cast<uintptr_t *>(out_address), nullptr, physical_address, size);
|
||||
R_RETURN(QueryIoMapping(reinterpret_cast<uintptr_t *>(out_address), nullptr, physical_address, size));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -62,7 +62,7 @@ namespace ams::kern::svc {
|
|||
}
|
||||
MESOSPHERE_ASSERT(remaining == 0);
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void FlushEntireDataCache() {
|
||||
|
@ -88,7 +88,7 @@ namespace ams::kern::svc {
|
|||
/* Flush the cache. */
|
||||
R_TRY(cpu::FlushDataCache(reinterpret_cast<void *>(address), size));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result InvalidateProcessDataCache(ams::svc::Handle process_handle, uint64_t address, uint64_t size) {
|
||||
|
@ -104,7 +104,7 @@ namespace ams::kern::svc {
|
|||
/* Invalidate the cache. */
|
||||
R_TRY(process->GetPageTable().InvalidateProcessDataCache(address, size));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result StoreProcessDataCache(ams::svc::Handle process_handle, uint64_t address, uint64_t size) {
|
||||
|
@ -123,14 +123,14 @@ namespace ams::kern::svc {
|
|||
|
||||
/* Perform the operation. */
|
||||
if (process.GetPointerUnsafe() == GetCurrentProcessPointer()) {
|
||||
return cpu::StoreDataCache(reinterpret_cast<void *>(address), size);
|
||||
R_RETURN(cpu::StoreDataCache(reinterpret_cast<void *>(address), size));
|
||||
} else {
|
||||
class StoreCacheOperation : public CacheOperation {
|
||||
public:
|
||||
virtual void Operate(void *address, size_t size) const override { cpu::StoreDataCache(address, size); }
|
||||
} operation;
|
||||
|
||||
return DoProcessCacheOperation(operation, page_table, address, size);
|
||||
R_RETURN(DoProcessCacheOperation(operation, page_table, address, size));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -150,14 +150,14 @@ namespace ams::kern::svc {
|
|||
|
||||
/* Perform the operation. */
|
||||
if (process.GetPointerUnsafe() == GetCurrentProcessPointer()) {
|
||||
return cpu::FlushDataCache(reinterpret_cast<void *>(address), size);
|
||||
R_RETURN(cpu::FlushDataCache(reinterpret_cast<void *>(address), size));
|
||||
} else {
|
||||
class FlushCacheOperation : public CacheOperation {
|
||||
public:
|
||||
virtual void Operate(void *address, size_t size) const override { cpu::FlushDataCache(address, size); }
|
||||
} operation;
|
||||
|
||||
return DoProcessCacheOperation(operation, page_table, address, size);
|
||||
R_RETURN(DoProcessCacheOperation(operation, page_table, address, size));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -170,19 +170,19 @@ namespace ams::kern::svc {
|
|||
}
|
||||
|
||||
Result FlushDataCache64(ams::svc::Address address, ams::svc::Size size) {
|
||||
return FlushDataCache(address, size);
|
||||
R_RETURN(FlushDataCache(address, size));
|
||||
}
|
||||
|
||||
Result InvalidateProcessDataCache64(ams::svc::Handle process_handle, uint64_t address, uint64_t size) {
|
||||
return InvalidateProcessDataCache(process_handle, address, size);
|
||||
R_RETURN(InvalidateProcessDataCache(process_handle, address, size));
|
||||
}
|
||||
|
||||
Result StoreProcessDataCache64(ams::svc::Handle process_handle, uint64_t address, uint64_t size) {
|
||||
return StoreProcessDataCache(process_handle, address, size);
|
||||
R_RETURN(StoreProcessDataCache(process_handle, address, size));
|
||||
}
|
||||
|
||||
Result FlushProcessDataCache64(ams::svc::Handle process_handle, uint64_t address, uint64_t size) {
|
||||
return FlushProcessDataCache(process_handle, address, size);
|
||||
R_RETURN(FlushProcessDataCache(process_handle, address, size));
|
||||
}
|
||||
|
||||
/* ============================= 64From32 ABI ============================= */
|
||||
|
@ -192,19 +192,19 @@ namespace ams::kern::svc {
|
|||
}
|
||||
|
||||
Result FlushDataCache64From32(ams::svc::Address address, ams::svc::Size size) {
|
||||
return FlushDataCache(address, size);
|
||||
R_RETURN(FlushDataCache(address, size));
|
||||
}
|
||||
|
||||
Result InvalidateProcessDataCache64From32(ams::svc::Handle process_handle, uint64_t address, uint64_t size) {
|
||||
return InvalidateProcessDataCache(process_handle, address, size);
|
||||
R_RETURN(InvalidateProcessDataCache(process_handle, address, size));
|
||||
}
|
||||
|
||||
Result StoreProcessDataCache64From32(ams::svc::Handle process_handle, uint64_t address, uint64_t size) {
|
||||
return StoreProcessDataCache(process_handle, address, size);
|
||||
R_RETURN(StoreProcessDataCache(process_handle, address, size));
|
||||
}
|
||||
|
||||
Result FlushProcessDataCache64From32(ams::svc::Handle process_handle, uint64_t address, uint64_t size) {
|
||||
return FlushProcessDataCache(process_handle, address, size);
|
||||
R_RETURN(FlushProcessDataCache(process_handle, address, size));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -61,7 +61,7 @@ namespace ams::kern::svc {
|
|||
/* Add the code memory to the handle table. */
|
||||
R_TRY(GetCurrentProcess().GetHandleTable().Add(out, code_mem));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result ControlCodeMemory(ams::svc::Handle code_memory_handle, ams::svc::CodeMemoryOperation operation, uint64_t address, uint64_t size, ams::svc::MemoryPermission perm) {
|
||||
|
@ -132,10 +132,10 @@ namespace ams::kern::svc {
|
|||
}
|
||||
break;
|
||||
default:
|
||||
return svc::ResultInvalidEnumValue();
|
||||
R_THROW(svc::ResultInvalidEnumValue());
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -143,21 +143,21 @@ namespace ams::kern::svc {
|
|||
/* ============================= 64 ABI ============================= */
|
||||
|
||||
Result CreateCodeMemory64(ams::svc::Handle *out_handle, ams::svc::Address address, ams::svc::Size size) {
|
||||
return CreateCodeMemory(out_handle, address, size);
|
||||
R_RETURN(CreateCodeMemory(out_handle, address, size));
|
||||
}
|
||||
|
||||
Result ControlCodeMemory64(ams::svc::Handle code_memory_handle, ams::svc::CodeMemoryOperation operation, uint64_t address, uint64_t size, ams::svc::MemoryPermission perm) {
|
||||
return ControlCodeMemory(code_memory_handle, operation, address, size, perm);
|
||||
R_RETURN(ControlCodeMemory(code_memory_handle, operation, address, size, perm));
|
||||
}
|
||||
|
||||
/* ============================= 64From32 ABI ============================= */
|
||||
|
||||
Result CreateCodeMemory64From32(ams::svc::Handle *out_handle, ams::svc::Address address, ams::svc::Size size) {
|
||||
return CreateCodeMemory(out_handle, address, size);
|
||||
R_RETURN(CreateCodeMemory(out_handle, address, size));
|
||||
}
|
||||
|
||||
Result ControlCodeMemory64From32(ams::svc::Handle code_memory_handle, ams::svc::CodeMemoryOperation operation, uint64_t address, uint64_t size, ams::svc::MemoryPermission perm) {
|
||||
return ControlCodeMemory(code_memory_handle, operation, address, size, perm);
|
||||
R_RETURN(ControlCodeMemory(code_memory_handle, operation, address, size, perm));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -47,7 +47,7 @@ namespace ams::kern::svc {
|
|||
}
|
||||
|
||||
/* Wait on the condition variable. */
|
||||
return GetCurrentProcess().WaitConditionVariable(address, util::AlignDown(cv_key, sizeof(u32)), tag, timeout);
|
||||
R_RETURN(GetCurrentProcess().WaitConditionVariable(address, util::AlignDown(cv_key, sizeof(u32)), tag, timeout));
|
||||
}
|
||||
|
||||
void SignalProcessWideKey(uintptr_t cv_key, int32_t count) {
|
||||
|
@ -60,7 +60,7 @@ namespace ams::kern::svc {
|
|||
/* ============================= 64 ABI ============================= */
|
||||
|
||||
Result WaitProcessWideKeyAtomic64(ams::svc::Address address, ams::svc::Address cv_key, uint32_t tag, int64_t timeout_ns) {
|
||||
return WaitProcessWideKeyAtomic(address, cv_key, tag, timeout_ns);
|
||||
R_RETURN(WaitProcessWideKeyAtomic(address, cv_key, tag, timeout_ns));
|
||||
}
|
||||
|
||||
void SignalProcessWideKey64(ams::svc::Address cv_key, int32_t count) {
|
||||
|
@ -70,7 +70,7 @@ namespace ams::kern::svc {
|
|||
/* ============================= 64From32 ABI ============================= */
|
||||
|
||||
Result WaitProcessWideKeyAtomic64From32(ams::svc::Address address, ams::svc::Address cv_key, uint32_t tag, int64_t timeout_ns) {
|
||||
return WaitProcessWideKeyAtomic(address, cv_key, tag, timeout_ns);
|
||||
R_RETURN(WaitProcessWideKeyAtomic(address, cv_key, tag, timeout_ns));
|
||||
}
|
||||
|
||||
void SignalProcessWideKey64From32(ams::svc::Address cv_key, int32_t count) {
|
||||
|
|
|
@ -59,7 +59,7 @@ namespace ams::kern::svc {
|
|||
/* Add the new debug object to the handle table. */
|
||||
R_TRY(handle_table.Add(out_handle, debug));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result BreakDebugProcess(ams::svc::Handle debug_handle) {
|
||||
|
@ -73,7 +73,7 @@ namespace ams::kern::svc {
|
|||
/* Break the process. */
|
||||
R_TRY(debug->BreakProcess());
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result TerminateDebugProcess(ams::svc::Handle debug_handle) {
|
||||
|
@ -87,7 +87,7 @@ namespace ams::kern::svc {
|
|||
/* Terminate the process. */
|
||||
R_TRY(debug->TerminateProcess());
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
template<typename EventInfoType>
|
||||
|
@ -106,7 +106,7 @@ namespace ams::kern::svc {
|
|||
/* Copy the info out to the user. */
|
||||
R_TRY(out_info.CopyFrom(std::addressof(info)));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result ContinueDebugEventImpl(ams::svc::Handle debug_handle, uint32_t flags, const uint64_t *thread_ids, int32_t num_thread_ids) {
|
||||
|
@ -117,7 +117,7 @@ namespace ams::kern::svc {
|
|||
/* Continue the event. */
|
||||
R_TRY(debug->ContinueDebug(flags, thread_ids, num_thread_ids));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result ContinueDebugEvent(ams::svc::Handle debug_handle, uint32_t flags, KUserPointer<const uint64_t *> user_thread_ids, int32_t num_thread_ids) {
|
||||
|
@ -143,7 +143,7 @@ namespace ams::kern::svc {
|
|||
/* Continue the event. */
|
||||
R_TRY(ContinueDebugEventImpl(debug_handle, flags, thread_ids, num_thread_ids));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result LegacyContinueDebugEvent(ams::svc::Handle debug_handle, uint32_t flags, uint64_t thread_id) {
|
||||
|
@ -160,7 +160,7 @@ namespace ams::kern::svc {
|
|||
/* Continue the event. */
|
||||
R_TRY(ContinueDebugEventImpl(debug_handle, flags, std::addressof(thread_id), 1));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result GetDebugThreadContext(KUserPointer<ams::svc::ThreadContext *> out_context, ams::svc::Handle debug_handle, uint64_t thread_id, uint32_t context_flags) {
|
||||
|
@ -178,7 +178,7 @@ namespace ams::kern::svc {
|
|||
/* Copy the context to userspace. */
|
||||
R_TRY(out_context.CopyFrom(std::addressof(context)));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result SetDebugThreadContext(ams::svc::Handle debug_handle, uint64_t thread_id, KUserPointer<const ams::svc::ThreadContext *> user_context, uint32_t context_flags) {
|
||||
|
@ -216,7 +216,7 @@ namespace ams::kern::svc {
|
|||
/* Set the thread context. */
|
||||
R_TRY(debug->SetThreadContext(context, thread_id, context_flags));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result QueryDebugProcessMemory(ams::svc::MemoryInfo *out_memory_info, ams::svc::PageInfo *out_page_info, ams::svc::Handle debug_handle, uintptr_t address) {
|
||||
|
@ -227,7 +227,7 @@ namespace ams::kern::svc {
|
|||
/* Query the mapping's info. */
|
||||
R_TRY(debug->QueryMemoryInfo(out_memory_info, out_page_info, address));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
|
@ -257,7 +257,7 @@ namespace ams::kern::svc {
|
|||
R_TRY(out_memory_info.CopyFrom(std::addressof(converted_info)));
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result ReadDebugProcessMemory(uintptr_t buffer, ams::svc::Handle debug_handle, uintptr_t address, size_t size) {
|
||||
|
@ -273,7 +273,7 @@ namespace ams::kern::svc {
|
|||
/* Read the memory. */
|
||||
R_TRY(debug->ReadMemory(buffer, address, size));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result WriteDebugProcessMemory(ams::svc::Handle debug_handle, uintptr_t buffer, uintptr_t address, size_t size) {
|
||||
|
@ -292,7 +292,7 @@ namespace ams::kern::svc {
|
|||
/* Write the memory. */
|
||||
R_TRY(debug->WriteMemory(buffer, address, size));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result SetHardwareBreakPoint(ams::svc::HardwareBreakPointRegisterName name, uint64_t flags, uint64_t value) {
|
||||
|
@ -302,7 +302,7 @@ namespace ams::kern::svc {
|
|||
/* Set the breakpoint. */
|
||||
R_TRY(KDebug::SetHardwareBreakPoint(name, flags, value));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result GetDebugThreadParam(uint64_t *out_64, uint32_t *out_32, ams::svc::Handle debug_handle, uint64_t thread_id, ams::svc::DebugThreadParam param) {
|
||||
|
@ -382,7 +382,7 @@ namespace ams::kern::svc {
|
|||
}
|
||||
break;
|
||||
default:
|
||||
return svc::ResultInvalidState();
|
||||
R_THROW(svc::ResultInvalidState());
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
@ -413,10 +413,10 @@ namespace ams::kern::svc {
|
|||
}
|
||||
break;
|
||||
default:
|
||||
return ams::svc::ResultInvalidEnumValue();
|
||||
R_THROW(ams::svc::ResultInvalidEnumValue());
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -424,109 +424,109 @@ namespace ams::kern::svc {
|
|||
/* ============================= 64 ABI ============================= */
|
||||
|
||||
Result DebugActiveProcess64(ams::svc::Handle *out_handle, uint64_t process_id) {
|
||||
return DebugActiveProcess(out_handle, process_id);
|
||||
R_RETURN(DebugActiveProcess(out_handle, process_id));
|
||||
}
|
||||
|
||||
Result BreakDebugProcess64(ams::svc::Handle debug_handle) {
|
||||
return BreakDebugProcess(debug_handle);
|
||||
R_RETURN(BreakDebugProcess(debug_handle));
|
||||
}
|
||||
|
||||
Result TerminateDebugProcess64(ams::svc::Handle debug_handle) {
|
||||
return TerminateDebugProcess(debug_handle);
|
||||
R_RETURN(TerminateDebugProcess(debug_handle));
|
||||
}
|
||||
|
||||
Result GetDebugEvent64(KUserPointer<ams::svc::lp64::DebugEventInfo *> out_info, ams::svc::Handle debug_handle) {
|
||||
return GetDebugEvent(out_info, debug_handle);
|
||||
R_RETURN(GetDebugEvent(out_info, debug_handle));
|
||||
}
|
||||
|
||||
Result ContinueDebugEvent64(ams::svc::Handle debug_handle, uint32_t flags, KUserPointer<const uint64_t *> thread_ids, int32_t num_thread_ids) {
|
||||
return ContinueDebugEvent(debug_handle, flags, thread_ids, num_thread_ids);
|
||||
R_RETURN(ContinueDebugEvent(debug_handle, flags, thread_ids, num_thread_ids));
|
||||
}
|
||||
|
||||
Result LegacyContinueDebugEvent64(ams::svc::Handle debug_handle, uint32_t flags, uint64_t thread_id) {
|
||||
return LegacyContinueDebugEvent(debug_handle, flags, thread_id);
|
||||
R_RETURN(LegacyContinueDebugEvent(debug_handle, flags, thread_id));
|
||||
}
|
||||
|
||||
Result GetDebugThreadContext64(KUserPointer<ams::svc::ThreadContext *> out_context, ams::svc::Handle debug_handle, uint64_t thread_id, uint32_t context_flags) {
|
||||
return GetDebugThreadContext(out_context, debug_handle, thread_id, context_flags);
|
||||
R_RETURN(GetDebugThreadContext(out_context, debug_handle, thread_id, context_flags));
|
||||
}
|
||||
|
||||
Result SetDebugThreadContext64(ams::svc::Handle debug_handle, uint64_t thread_id, KUserPointer<const ams::svc::ThreadContext *> context, uint32_t context_flags) {
|
||||
return SetDebugThreadContext(debug_handle, thread_id, context, context_flags);
|
||||
R_RETURN(SetDebugThreadContext(debug_handle, thread_id, context, context_flags));
|
||||
}
|
||||
|
||||
Result QueryDebugProcessMemory64(KUserPointer<ams::svc::lp64::MemoryInfo *> out_memory_info, ams::svc::PageInfo *out_page_info, ams::svc::Handle debug_handle, ams::svc::Address address) {
|
||||
return QueryDebugProcessMemory(out_memory_info, out_page_info, debug_handle, address);
|
||||
R_RETURN(QueryDebugProcessMemory(out_memory_info, out_page_info, debug_handle, address));
|
||||
}
|
||||
|
||||
Result ReadDebugProcessMemory64(ams::svc::Address buffer, ams::svc::Handle debug_handle, ams::svc::Address address, ams::svc::Size size) {
|
||||
return ReadDebugProcessMemory(buffer, debug_handle, address, size);
|
||||
R_RETURN(ReadDebugProcessMemory(buffer, debug_handle, address, size));
|
||||
}
|
||||
|
||||
Result WriteDebugProcessMemory64(ams::svc::Handle debug_handle, ams::svc::Address buffer, ams::svc::Address address, ams::svc::Size size) {
|
||||
return WriteDebugProcessMemory(debug_handle, buffer, address, size);
|
||||
R_RETURN(WriteDebugProcessMemory(debug_handle, buffer, address, size));
|
||||
}
|
||||
|
||||
Result SetHardwareBreakPoint64(ams::svc::HardwareBreakPointRegisterName name, uint64_t flags, uint64_t value) {
|
||||
return SetHardwareBreakPoint(name, flags, value);
|
||||
R_RETURN(SetHardwareBreakPoint(name, flags, value));
|
||||
}
|
||||
|
||||
Result GetDebugThreadParam64(uint64_t *out_64, uint32_t *out_32, ams::svc::Handle debug_handle, uint64_t thread_id, ams::svc::DebugThreadParam param) {
|
||||
return GetDebugThreadParam(out_64, out_32, debug_handle, thread_id, param);
|
||||
R_RETURN(GetDebugThreadParam(out_64, out_32, debug_handle, thread_id, param));
|
||||
}
|
||||
|
||||
/* ============================= 64From32 ABI ============================= */
|
||||
|
||||
Result DebugActiveProcess64From32(ams::svc::Handle *out_handle, uint64_t process_id) {
|
||||
return DebugActiveProcess(out_handle, process_id);
|
||||
R_RETURN(DebugActiveProcess(out_handle, process_id));
|
||||
}
|
||||
|
||||
Result BreakDebugProcess64From32(ams::svc::Handle debug_handle) {
|
||||
return BreakDebugProcess(debug_handle);
|
||||
R_RETURN(BreakDebugProcess(debug_handle));
|
||||
}
|
||||
|
||||
Result TerminateDebugProcess64From32(ams::svc::Handle debug_handle) {
|
||||
return TerminateDebugProcess(debug_handle);
|
||||
R_RETURN(TerminateDebugProcess(debug_handle));
|
||||
}
|
||||
|
||||
Result GetDebugEvent64From32(KUserPointer<ams::svc::ilp32::DebugEventInfo *> out_info, ams::svc::Handle debug_handle) {
|
||||
return GetDebugEvent(out_info, debug_handle);
|
||||
R_RETURN(GetDebugEvent(out_info, debug_handle));
|
||||
}
|
||||
|
||||
Result ContinueDebugEvent64From32(ams::svc::Handle debug_handle, uint32_t flags, KUserPointer<const uint64_t *> thread_ids, int32_t num_thread_ids) {
|
||||
return ContinueDebugEvent(debug_handle, flags, thread_ids, num_thread_ids);
|
||||
R_RETURN(ContinueDebugEvent(debug_handle, flags, thread_ids, num_thread_ids));
|
||||
}
|
||||
|
||||
Result LegacyContinueDebugEvent64From32(ams::svc::Handle debug_handle, uint32_t flags, uint64_t thread_id) {
|
||||
return LegacyContinueDebugEvent(debug_handle, flags, thread_id);
|
||||
R_RETURN(LegacyContinueDebugEvent(debug_handle, flags, thread_id));
|
||||
}
|
||||
|
||||
Result GetDebugThreadContext64From32(KUserPointer<ams::svc::ThreadContext *> out_context, ams::svc::Handle debug_handle, uint64_t thread_id, uint32_t context_flags) {
|
||||
return GetDebugThreadContext(out_context, debug_handle, thread_id, context_flags);
|
||||
R_RETURN(GetDebugThreadContext(out_context, debug_handle, thread_id, context_flags));
|
||||
}
|
||||
|
||||
Result SetDebugThreadContext64From32(ams::svc::Handle debug_handle, uint64_t thread_id, KUserPointer<const ams::svc::ThreadContext *> context, uint32_t context_flags) {
|
||||
return SetDebugThreadContext(debug_handle, thread_id, context, context_flags);
|
||||
R_RETURN(SetDebugThreadContext(debug_handle, thread_id, context, context_flags));
|
||||
}
|
||||
|
||||
Result QueryDebugProcessMemory64From32(KUserPointer<ams::svc::ilp32::MemoryInfo *> out_memory_info, ams::svc::PageInfo *out_page_info, ams::svc::Handle debug_handle, ams::svc::Address address) {
|
||||
return QueryDebugProcessMemory(out_memory_info, out_page_info, debug_handle, address);
|
||||
R_RETURN(QueryDebugProcessMemory(out_memory_info, out_page_info, debug_handle, address));
|
||||
}
|
||||
|
||||
Result ReadDebugProcessMemory64From32(ams::svc::Address buffer, ams::svc::Handle debug_handle, ams::svc::Address address, ams::svc::Size size) {
|
||||
return ReadDebugProcessMemory(buffer, debug_handle, address, size);
|
||||
R_RETURN(ReadDebugProcessMemory(buffer, debug_handle, address, size));
|
||||
}
|
||||
|
||||
Result WriteDebugProcessMemory64From32(ams::svc::Handle debug_handle, ams::svc::Address buffer, ams::svc::Address address, ams::svc::Size size) {
|
||||
return WriteDebugProcessMemory(debug_handle, buffer, address, size);
|
||||
R_RETURN(WriteDebugProcessMemory(debug_handle, buffer, address, size));
|
||||
}
|
||||
|
||||
Result SetHardwareBreakPoint64From32(ams::svc::HardwareBreakPointRegisterName name, uint64_t flags, uint64_t value) {
|
||||
return SetHardwareBreakPoint(name, flags, value);
|
||||
R_RETURN(SetHardwareBreakPoint(name, flags, value));
|
||||
}
|
||||
|
||||
Result GetDebugThreadParam64From32(uint64_t *out_64, uint32_t *out_32, ams::svc::Handle debug_handle, uint64_t thread_id, ams::svc::DebugThreadParam param) {
|
||||
return GetDebugThreadParam(out_64, out_32, debug_handle, thread_id, param);
|
||||
R_RETURN(GetDebugThreadParam(out_64, out_32, debug_handle, thread_id, param));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ namespace ams::kern::svc {
|
|||
R_UNLESS(GetCurrentProcess().GetPageTable().Contains(KProcessAddress(debug_str.GetUnsafePointer()), len), svc::ResultInvalidCurrentMemory());
|
||||
|
||||
/* Output the string. */
|
||||
return KDebugLog::PrintUserString(debug_str, len);
|
||||
R_RETURN(KDebugLog::PrintUserString(debug_str, len));
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -37,13 +37,13 @@ namespace ams::kern::svc {
|
|||
/* ============================= 64 ABI ============================= */
|
||||
|
||||
Result OutputDebugString64(KUserPointer<const char *> debug_str, ams::svc::Size len) {
|
||||
return OutputDebugString(debug_str, len);
|
||||
R_RETURN(OutputDebugString(debug_str, len));
|
||||
}
|
||||
|
||||
/* ============================= 64From32 ABI ============================= */
|
||||
|
||||
Result OutputDebugString64From32(KUserPointer<const char *> debug_str, ams::svc::Size len) {
|
||||
return OutputDebugString(debug_str, len);
|
||||
R_RETURN(OutputDebugString(debug_str, len));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -48,7 +48,7 @@ namespace ams::kern::svc {
|
|||
/* Add to the handle table. */
|
||||
R_TRY(GetCurrentProcess().GetHandleTable().Add(out, das));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result AttachDeviceAddressSpace(ams::svc::DeviceName device_name, ams::svc::Handle das_handle) {
|
||||
|
@ -57,7 +57,7 @@ namespace ams::kern::svc {
|
|||
R_UNLESS(das.IsNotNull(), svc::ResultInvalidHandle());
|
||||
|
||||
/* Attach. */
|
||||
return das->Attach(device_name);
|
||||
R_RETURN(das->Attach(device_name));
|
||||
}
|
||||
|
||||
Result DetachDeviceAddressSpace(ams::svc::DeviceName device_name, ams::svc::Handle das_handle) {
|
||||
|
@ -66,7 +66,7 @@ namespace ams::kern::svc {
|
|||
R_UNLESS(das.IsNotNull(), svc::ResultInvalidHandle());
|
||||
|
||||
/* Detach. */
|
||||
return das->Detach(device_name);
|
||||
R_RETURN(das->Detach(device_name));
|
||||
}
|
||||
|
||||
constexpr bool IsValidDeviceMemoryPermission(ams::svc::MemoryPermission device_perm) {
|
||||
|
@ -104,7 +104,7 @@ namespace ams::kern::svc {
|
|||
R_UNLESS(page_table.Contains(process_address, size), svc::ResultInvalidCurrentMemory());
|
||||
|
||||
/* Map. */
|
||||
return das->MapByForce(std::addressof(page_table), KProcessAddress(process_address), size, device_address, device_perm);
|
||||
R_RETURN(das->MapByForce(std::addressof(page_table), KProcessAddress(process_address), size, device_address, device_perm));
|
||||
}
|
||||
|
||||
Result MapDeviceAddressSpaceAligned(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, size_t size, uint64_t device_address, ams::svc::MemoryPermission device_perm) {
|
||||
|
@ -132,7 +132,7 @@ namespace ams::kern::svc {
|
|||
R_UNLESS(page_table.Contains(process_address, size), svc::ResultInvalidCurrentMemory());
|
||||
|
||||
/* Map. */
|
||||
return das->MapAligned(std::addressof(page_table), KProcessAddress(process_address), size, device_address, device_perm);
|
||||
R_RETURN(das->MapAligned(std::addressof(page_table), KProcessAddress(process_address), size, device_address, device_perm));
|
||||
}
|
||||
|
||||
Result UnmapDeviceAddressSpace(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, size_t size, uint64_t device_address) {
|
||||
|
@ -157,7 +157,7 @@ namespace ams::kern::svc {
|
|||
auto &page_table = process->GetPageTable();
|
||||
R_UNLESS(page_table.Contains(process_address, size), svc::ResultInvalidCurrentMemory());
|
||||
|
||||
return das->Unmap(std::addressof(page_table), KProcessAddress(process_address), size, device_address);
|
||||
R_RETURN(das->Unmap(std::addressof(page_table), KProcessAddress(process_address), size, device_address));
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -165,53 +165,53 @@ namespace ams::kern::svc {
|
|||
/* ============================= 64 ABI ============================= */
|
||||
|
||||
Result CreateDeviceAddressSpace64(ams::svc::Handle *out_handle, uint64_t das_address, uint64_t das_size) {
|
||||
return CreateDeviceAddressSpace(out_handle, das_address, das_size);
|
||||
R_RETURN(CreateDeviceAddressSpace(out_handle, das_address, das_size));
|
||||
}
|
||||
|
||||
Result AttachDeviceAddressSpace64(ams::svc::DeviceName device_name, ams::svc::Handle das_handle) {
|
||||
return AttachDeviceAddressSpace(device_name, das_handle);
|
||||
R_RETURN(AttachDeviceAddressSpace(device_name, das_handle));
|
||||
}
|
||||
|
||||
Result DetachDeviceAddressSpace64(ams::svc::DeviceName device_name, ams::svc::Handle das_handle) {
|
||||
return DetachDeviceAddressSpace(device_name, das_handle);
|
||||
R_RETURN(DetachDeviceAddressSpace(device_name, das_handle));
|
||||
}
|
||||
|
||||
Result MapDeviceAddressSpaceByForce64(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address, ams::svc::MemoryPermission device_perm) {
|
||||
return MapDeviceAddressSpaceByForce(das_handle, process_handle, process_address, size, device_address, device_perm);
|
||||
R_RETURN(MapDeviceAddressSpaceByForce(das_handle, process_handle, process_address, size, device_address, device_perm));
|
||||
}
|
||||
|
||||
Result MapDeviceAddressSpaceAligned64(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address, ams::svc::MemoryPermission device_perm) {
|
||||
return MapDeviceAddressSpaceAligned(das_handle, process_handle, process_address, size, device_address, device_perm);
|
||||
R_RETURN(MapDeviceAddressSpaceAligned(das_handle, process_handle, process_address, size, device_address, device_perm));
|
||||
}
|
||||
|
||||
Result UnmapDeviceAddressSpace64(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address) {
|
||||
return UnmapDeviceAddressSpace(das_handle, process_handle, process_address, size, device_address);
|
||||
R_RETURN(UnmapDeviceAddressSpace(das_handle, process_handle, process_address, size, device_address));
|
||||
}
|
||||
|
||||
/* ============================= 64From32 ABI ============================= */
|
||||
|
||||
Result CreateDeviceAddressSpace64From32(ams::svc::Handle *out_handle, uint64_t das_address, uint64_t das_size) {
|
||||
return CreateDeviceAddressSpace(out_handle, das_address, das_size);
|
||||
R_RETURN(CreateDeviceAddressSpace(out_handle, das_address, das_size));
|
||||
}
|
||||
|
||||
Result AttachDeviceAddressSpace64From32(ams::svc::DeviceName device_name, ams::svc::Handle das_handle) {
|
||||
return AttachDeviceAddressSpace(device_name, das_handle);
|
||||
R_RETURN(AttachDeviceAddressSpace(device_name, das_handle));
|
||||
}
|
||||
|
||||
Result DetachDeviceAddressSpace64From32(ams::svc::DeviceName device_name, ams::svc::Handle das_handle) {
|
||||
return DetachDeviceAddressSpace(device_name, das_handle);
|
||||
R_RETURN(DetachDeviceAddressSpace(device_name, das_handle));
|
||||
}
|
||||
|
||||
Result MapDeviceAddressSpaceByForce64From32(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address, ams::svc::MemoryPermission device_perm) {
|
||||
return MapDeviceAddressSpaceByForce(das_handle, process_handle, process_address, size, device_address, device_perm);
|
||||
R_RETURN(MapDeviceAddressSpaceByForce(das_handle, process_handle, process_address, size, device_address, device_perm));
|
||||
}
|
||||
|
||||
Result MapDeviceAddressSpaceAligned64From32(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address, ams::svc::MemoryPermission device_perm) {
|
||||
return MapDeviceAddressSpaceAligned(das_handle, process_handle, process_address, size, device_address, device_perm);
|
||||
R_RETURN(MapDeviceAddressSpaceAligned(das_handle, process_handle, process_address, size, device_address, device_perm));
|
||||
}
|
||||
|
||||
Result UnmapDeviceAddressSpace64From32(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address) {
|
||||
return UnmapDeviceAddressSpace(das_handle, process_handle, process_address, size, device_address);
|
||||
R_RETURN(UnmapDeviceAddressSpace(das_handle, process_handle, process_address, size, device_address));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ namespace ams::kern::svc {
|
|||
KScopedAutoObject event = handle_table.GetObject<KEvent>(event_handle);
|
||||
R_UNLESS(event.IsNotNull(), svc::ResultInvalidHandle());
|
||||
|
||||
return event->Signal();
|
||||
R_RETURN(event->Signal());
|
||||
}
|
||||
|
||||
Result ClearEvent(ams::svc::Handle event_handle) {
|
||||
|
@ -40,7 +40,7 @@ namespace ams::kern::svc {
|
|||
{
|
||||
KScopedAutoObject event = handle_table.GetObject<KEvent>(event_handle);
|
||||
if (event.IsNotNull()) {
|
||||
return event->Clear();
|
||||
R_RETURN(event->Clear());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -49,14 +49,14 @@ namespace ams::kern::svc {
|
|||
KScopedAutoObject readable_event = handle_table.GetObject<KReadableEvent>(event_handle);
|
||||
if (readable_event.IsNotNull()) {
|
||||
if (auto * const interrupt_event = readable_event->DynamicCast<KInterruptEvent *>(); interrupt_event != nullptr) {
|
||||
return interrupt_event->Clear();
|
||||
R_RETURN(interrupt_event->Clear());
|
||||
} else {
|
||||
return readable_event->Clear();
|
||||
R_RETURN(readable_event->Clear());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return svc::ResultInvalidHandle();
|
||||
R_THROW(svc::ResultInvalidHandle());
|
||||
}
|
||||
|
||||
Result CreateEvent(ams::svc::Handle *out_write, ams::svc::Handle *out_read) {
|
||||
|
@ -107,14 +107,10 @@ namespace ams::kern::svc {
|
|||
R_TRY(handle_table.Add(out_write, event));
|
||||
|
||||
/* Ensure that we maintaing a clean handle state on exit. */
|
||||
auto handle_guard = SCOPE_GUARD { handle_table.Remove(*out_write); };
|
||||
ON_RESULT_FAILURE { handle_table.Remove(*out_write); };
|
||||
|
||||
/* Add the readable event to the handle table. */
|
||||
R_TRY(handle_table.Add(out_read, std::addressof(event->GetReadableEvent())));
|
||||
|
||||
/* We succeeded! */
|
||||
handle_guard.Cancel();
|
||||
return ResultSuccess();
|
||||
R_RETURN(handle_table.Add(out_read, std::addressof(event->GetReadableEvent())));
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -122,29 +118,29 @@ namespace ams::kern::svc {
|
|||
/* ============================= 64 ABI ============================= */
|
||||
|
||||
Result SignalEvent64(ams::svc::Handle event_handle) {
|
||||
return SignalEvent(event_handle);
|
||||
R_RETURN(SignalEvent(event_handle));
|
||||
}
|
||||
|
||||
Result ClearEvent64(ams::svc::Handle event_handle) {
|
||||
return ClearEvent(event_handle);
|
||||
R_RETURN(ClearEvent(event_handle));
|
||||
}
|
||||
|
||||
Result CreateEvent64(ams::svc::Handle *out_write_handle, ams::svc::Handle *out_read_handle) {
|
||||
return CreateEvent(out_write_handle, out_read_handle);
|
||||
R_RETURN(CreateEvent(out_write_handle, out_read_handle));
|
||||
}
|
||||
|
||||
/* ============================= 64From32 ABI ============================= */
|
||||
|
||||
Result SignalEvent64From32(ams::svc::Handle event_handle) {
|
||||
return SignalEvent(event_handle);
|
||||
R_RETURN(SignalEvent(event_handle));
|
||||
}
|
||||
|
||||
Result ClearEvent64From32(ams::svc::Handle event_handle) {
|
||||
return ClearEvent(event_handle);
|
||||
R_RETURN(ClearEvent(event_handle));
|
||||
}
|
||||
|
||||
Result CreateEvent64From32(ams::svc::Handle *out_write_handle, ams::svc::Handle *out_read_handle) {
|
||||
return CreateEvent(out_write_handle, out_read_handle);
|
||||
R_RETURN(CreateEvent(out_write_handle, out_read_handle));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -32,10 +32,10 @@ namespace ams::kern::svc {
|
|||
*out = GetInitialProcessIdMax();
|
||||
break;
|
||||
default:
|
||||
return svc::ResultInvalidCombination();
|
||||
R_THROW(svc::ResultInvalidCombination());
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result GetInfoImpl(u64 *out, ams::svc::InfoType info_type, KProcess *process) {
|
||||
|
@ -109,7 +109,7 @@ namespace ams::kern::svc {
|
|||
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result GetInfo(u64 *out, ams::svc::InfoType info_type, ams::svc::Handle handle, u64 info_subtype) {
|
||||
|
@ -144,7 +144,7 @@ namespace ams::kern::svc {
|
|||
#if defined(MESOSPHERE_ENABLE_GET_INFO_OF_DEBUG_PROCESS)
|
||||
/* If we the process is valid, use it. */
|
||||
if (process.IsNotNull()) {
|
||||
return GetInfoImpl(out, info_type, process.GetPointerUnsafe());
|
||||
R_RETURN(GetInfoImpl(out, info_type, process.GetPointerUnsafe()));
|
||||
}
|
||||
|
||||
/* Otherwise, as a mesosphere extension check if we were passed a usable KDebug. */
|
||||
|
@ -160,13 +160,13 @@ namespace ams::kern::svc {
|
|||
ON_SCOPE_EXIT { debug->CloseProcess(); };
|
||||
|
||||
/* Return the info. */
|
||||
return GetInfoImpl(out, info_type, debug->GetProcessUnsafe());
|
||||
R_RETURN(GetInfoImpl(out, info_type, debug->GetProcessUnsafe()));
|
||||
#else
|
||||
/* Verify that the process is valid. */
|
||||
R_UNLESS(process.IsNotNull(), svc::ResultInvalidHandle());
|
||||
|
||||
/* Return the relevant info. */
|
||||
return GetInfoImpl(out, info_type, process.GetPointerUnsafe());
|
||||
R_RETURN(GetInfoImpl(out, info_type, process.GetPointerUnsafe()));
|
||||
#endif
|
||||
}
|
||||
break;
|
||||
|
@ -315,7 +315,7 @@ namespace ams::kern::svc {
|
|||
}
|
||||
break;
|
||||
default:
|
||||
return svc::ResultInvalidCombination();
|
||||
R_THROW(svc::ResultInvalidCombination());
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
@ -343,10 +343,10 @@ namespace ams::kern::svc {
|
|||
/* For debug, log the invalid info call. */
|
||||
MESOSPHERE_LOG("GetInfo(%p, %u, %08x, %lu) was called\n", out, static_cast<u32>(info_type), static_cast<u32>(handle), info_subtype);
|
||||
}
|
||||
return svc::ResultInvalidEnumValue();
|
||||
R_THROW(svc::ResultInvalidEnumValue());
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
constexpr bool IsValidMemoryPool(u64 pool) {
|
||||
|
@ -398,10 +398,10 @@ namespace ams::kern::svc {
|
|||
}
|
||||
break;
|
||||
default:
|
||||
return svc::ResultInvalidEnumValue();
|
||||
R_THROW(svc::ResultInvalidEnumValue());
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -409,21 +409,21 @@ namespace ams::kern::svc {
|
|||
/* ============================= 64 ABI ============================= */
|
||||
|
||||
Result GetInfo64(uint64_t *out, ams::svc::InfoType info_type, ams::svc::Handle handle, uint64_t info_subtype) {
|
||||
return GetInfo(out, info_type, handle, info_subtype);
|
||||
R_RETURN(GetInfo(out, info_type, handle, info_subtype));
|
||||
}
|
||||
|
||||
Result GetSystemInfo64(uint64_t *out, ams::svc::SystemInfoType info_type, ams::svc::Handle handle, uint64_t info_subtype) {
|
||||
return GetSystemInfo(out, info_type, handle, info_subtype);
|
||||
R_RETURN(GetSystemInfo(out, info_type, handle, info_subtype));
|
||||
}
|
||||
|
||||
/* ============================= 64From32 ABI ============================= */
|
||||
|
||||
Result GetInfo64From32(uint64_t *out, ams::svc::InfoType info_type, ams::svc::Handle handle, uint64_t info_subtype) {
|
||||
return GetInfo(out, info_type, handle, info_subtype);
|
||||
R_RETURN(GetInfo(out, info_type, handle, info_subtype));
|
||||
}
|
||||
|
||||
Result GetSystemInfo64From32(uint64_t *out, ams::svc::SystemInfoType info_type, ams::svc::Handle handle, uint64_t info_subtype) {
|
||||
return GetSystemInfo(out, info_type, handle, info_subtype);
|
||||
R_RETURN(GetSystemInfo(out, info_type, handle, info_subtype));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -56,7 +56,7 @@ namespace ams::kern::svc {
|
|||
/* Add the event to the handle table. */
|
||||
R_TRY(handle_table.Add(out, event));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -64,13 +64,13 @@ namespace ams::kern::svc {
|
|||
/* ============================= 64 ABI ============================= */
|
||||
|
||||
Result CreateInterruptEvent64(ams::svc::Handle *out_read_handle, int32_t interrupt_id, ams::svc::InterruptType interrupt_type) {
|
||||
return CreateInterruptEvent(out_read_handle, interrupt_id, interrupt_type);
|
||||
R_RETURN(CreateInterruptEvent(out_read_handle, interrupt_id, interrupt_type));
|
||||
}
|
||||
|
||||
/* ============================= 64From32 ABI ============================= */
|
||||
|
||||
Result CreateInterruptEvent64From32(ams::svc::Handle *out_read_handle, int32_t interrupt_id, ams::svc::InterruptType interrupt_type) {
|
||||
return CreateInterruptEvent(out_read_handle, interrupt_id, interrupt_type);
|
||||
R_RETURN(CreateInterruptEvent(out_read_handle, interrupt_id, interrupt_type));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -69,10 +69,10 @@ namespace ams::kern::svc {
|
|||
/* Add the io pool to the handle table. */
|
||||
R_TRY(GetCurrentProcess().GetHandleTable().Add(out, io_pool));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
} else {
|
||||
MESOSPHERE_UNUSED(out, pool_type);
|
||||
return svc::ResultNotImplemented();
|
||||
R_THROW(svc::ResultNotImplemented());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -111,10 +111,10 @@ namespace ams::kern::svc {
|
|||
/* Add the io region to the handle table. */
|
||||
R_TRY(handle_table.Add(out, io_region));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
} else {
|
||||
MESOSPHERE_UNUSED(out, io_pool_handle, phys_addr, size, mapping, perm);
|
||||
return svc::ResultNotImplemented();
|
||||
R_THROW(svc::ResultNotImplemented());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -140,10 +140,10 @@ namespace ams::kern::svc {
|
|||
R_TRY(io_region->Map(address, size, map_perm));
|
||||
|
||||
/* We succeeded. */
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
} else {
|
||||
MESOSPHERE_UNUSED(io_region_handle, address, size, map_perm);
|
||||
return svc::ResultNotImplemented();
|
||||
R_THROW(svc::ResultNotImplemented());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -166,10 +166,10 @@ namespace ams::kern::svc {
|
|||
R_TRY(io_region->Unmap(address, size));
|
||||
|
||||
/* We succeeded. */
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
} else {
|
||||
MESOSPHERE_UNUSED(io_region_handle, address, size);
|
||||
return svc::ResultNotImplemented();
|
||||
R_THROW(svc::ResultNotImplemented());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -178,37 +178,37 @@ namespace ams::kern::svc {
|
|||
/* ============================= 64 ABI ============================= */
|
||||
|
||||
Result CreateIoPool64(ams::svc::Handle *out_handle, ams::svc::IoPoolType pool_type) {
|
||||
return CreateIoPool(out_handle, pool_type);
|
||||
R_RETURN(CreateIoPool(out_handle, pool_type));
|
||||
}
|
||||
|
||||
Result CreateIoRegion64(ams::svc::Handle *out_handle, ams::svc::Handle io_pool, ams::svc::PhysicalAddress physical_address, ams::svc::Size size, ams::svc::MemoryMapping mapping, ams::svc::MemoryPermission perm) {
|
||||
return CreateIoRegion(out_handle, io_pool, physical_address, size, mapping, perm);
|
||||
R_RETURN(CreateIoRegion(out_handle, io_pool, physical_address, size, mapping, perm));
|
||||
}
|
||||
|
||||
Result MapIoRegion64(ams::svc::Handle io_region, ams::svc::Address address, ams::svc::Size size, ams::svc::MemoryPermission perm) {
|
||||
return MapIoRegion(io_region, address, size, perm);
|
||||
R_RETURN(MapIoRegion(io_region, address, size, perm));
|
||||
}
|
||||
|
||||
Result UnmapIoRegion64(ams::svc::Handle io_region, ams::svc::Address address, ams::svc::Size size) {
|
||||
return UnmapIoRegion(io_region, address, size);
|
||||
R_RETURN(UnmapIoRegion(io_region, address, size));
|
||||
}
|
||||
|
||||
/* ============================= 64From32 ABI ============================= */
|
||||
|
||||
Result CreateIoPool64From32(ams::svc::Handle *out_handle, ams::svc::IoPoolType pool_type) {
|
||||
return CreateIoPool(out_handle, pool_type);
|
||||
R_RETURN(CreateIoPool(out_handle, pool_type));
|
||||
}
|
||||
|
||||
Result CreateIoRegion64From32(ams::svc::Handle *out_handle, ams::svc::Handle io_pool, ams::svc::PhysicalAddress physical_address, ams::svc::Size size, ams::svc::MemoryMapping mapping, ams::svc::MemoryPermission perm) {
|
||||
return CreateIoRegion(out_handle, io_pool, physical_address, size, mapping, perm);
|
||||
R_RETURN(CreateIoRegion(out_handle, io_pool, physical_address, size, mapping, perm));
|
||||
}
|
||||
|
||||
Result MapIoRegion64From32(ams::svc::Handle io_region, ams::svc::Address address, ams::svc::Size size, ams::svc::MemoryPermission perm) {
|
||||
return MapIoRegion(io_region, address, size, perm);
|
||||
R_RETURN(MapIoRegion(io_region, address, size, perm));
|
||||
}
|
||||
|
||||
Result UnmapIoRegion64From32(ams::svc::Handle io_region, ams::svc::Address address, ams::svc::Size size) {
|
||||
return UnmapIoRegion(io_region, address, size);
|
||||
R_RETURN(UnmapIoRegion(io_region, address, size));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ namespace ams::kern::svc {
|
|||
MESOSPHERE_ASSERT(parent.IsNotNull());
|
||||
|
||||
/* Send the request. */
|
||||
return session->SendSyncRequest(message, buffer_size);
|
||||
R_RETURN(session->SendSyncRequest(message, buffer_size));
|
||||
}
|
||||
|
||||
ALWAYS_INLINE Result ReplyAndReceiveImpl(int32_t *out_index, uintptr_t message, size_t buffer_size, KPhysicalAddress message_paddr, KSynchronizationObject **objs, int32_t num_objects, ams::svc::Handle reply_target, int64_t timeout_ns) {
|
||||
|
@ -44,13 +44,10 @@ namespace ams::kern::svc {
|
|||
R_UNLESS(session.IsNotNull(), svc::ResultInvalidHandle());
|
||||
|
||||
/* If we fail to reply, we want to set the output index to -1. */
|
||||
auto reply_idx_guard = SCOPE_GUARD { *out_index = -1; };
|
||||
ON_RESULT_FAILURE { *out_index = -1; };
|
||||
|
||||
/* Send the reply. */
|
||||
R_TRY(session->SendReply(message, buffer_size, message_paddr));
|
||||
|
||||
/* Cancel our guard. */
|
||||
reply_idx_guard.Cancel();
|
||||
}
|
||||
|
||||
/* Receive a message. */
|
||||
|
@ -81,7 +78,7 @@ namespace ams::kern::svc {
|
|||
s32 index;
|
||||
Result result = KSynchronizationObject::Wait(std::addressof(index), objs, num_objects, timeout);
|
||||
if (svc::ResultTimedOut::Includes(result)) {
|
||||
return result;
|
||||
R_THROW(result);
|
||||
}
|
||||
|
||||
/* Receive the request. */
|
||||
|
@ -96,7 +93,7 @@ namespace ams::kern::svc {
|
|||
}
|
||||
|
||||
*out_index = index;
|
||||
return result;
|
||||
R_RETURN(result);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -129,11 +126,11 @@ namespace ams::kern::svc {
|
|||
}
|
||||
};
|
||||
|
||||
return ReplyAndReceiveImpl(out_index, message, buffer_size, message_paddr, objs, num_handles, reply_target, timeout_ns);
|
||||
R_RETURN(ReplyAndReceiveImpl(out_index, message, buffer_size, message_paddr, objs, num_handles, reply_target, timeout_ns));
|
||||
}
|
||||
|
||||
ALWAYS_INLINE Result SendSyncRequest(ams::svc::Handle session_handle) {
|
||||
return SendSyncRequestImpl(0, 0, session_handle);
|
||||
R_RETURN(SendSyncRequestImpl(0, 0, session_handle));
|
||||
}
|
||||
|
||||
ALWAYS_INLINE Result SendSyncRequestWithUserBuffer(uintptr_t message, size_t buffer_size, ams::svc::Handle session_handle) {
|
||||
|
@ -149,16 +146,17 @@ namespace ams::kern::svc {
|
|||
/* Lock the mesage buffer. */
|
||||
R_TRY(page_table.LockForIpcUserBuffer(nullptr, message, buffer_size));
|
||||
|
||||
/* Ensure that even if we fail, we unlock the message buffer when done. */
|
||||
auto unlock_guard = SCOPE_GUARD { page_table.UnlockForIpcUserBuffer(message, buffer_size); };
|
||||
{
|
||||
/* If we fail to send the message, unlock the message buffer. */
|
||||
ON_RESULT_FAILURE { page_table.UnlockForIpcUserBuffer(message, buffer_size); };
|
||||
|
||||
/* Send the request. */
|
||||
MESOSPHERE_ASSERT(message != 0);
|
||||
R_TRY(SendSyncRequestImpl(message, buffer_size, session_handle));
|
||||
/* Send the request. */
|
||||
MESOSPHERE_ASSERT(message != 0);
|
||||
R_TRY(SendSyncRequestImpl(message, buffer_size, session_handle));
|
||||
}
|
||||
|
||||
/* We sent the request successfully, so cancel our guard and check the unlock result. */
|
||||
unlock_guard.Cancel();
|
||||
return page_table.UnlockForIpcUserBuffer(message, buffer_size);
|
||||
/* We successfully processed, so try to unlock the message buffer. */
|
||||
R_RETURN(page_table.UnlockForIpcUserBuffer(message, buffer_size));
|
||||
}
|
||||
|
||||
ALWAYS_INLINE Result SendAsyncRequestWithUserBufferImpl(ams::svc::Handle *out_event_handle, uintptr_t message, size_t buffer_size, ams::svc::Handle session_handle) {
|
||||
|
@ -201,14 +199,10 @@ namespace ams::kern::svc {
|
|||
R_TRY(handle_table.Add(out_event_handle, std::addressof(event->GetReadableEvent())));
|
||||
|
||||
/* Ensure that if we fail to send the request, we close the readable handle. */
|
||||
auto read_guard = SCOPE_GUARD { handle_table.Remove(*out_event_handle); };
|
||||
ON_RESULT_FAILURE { handle_table.Remove(*out_event_handle); };
|
||||
|
||||
/* Send the async request. */
|
||||
R_TRY(session->SendAsyncRequest(event, message, buffer_size));
|
||||
|
||||
/* We succeeded. */
|
||||
read_guard.Cancel();
|
||||
return ResultSuccess();
|
||||
R_RETURN(session->SendAsyncRequest(event, message, buffer_size));
|
||||
}
|
||||
|
||||
ALWAYS_INLINE Result SendAsyncRequestWithUserBuffer(ams::svc::Handle *out_event_handle, uintptr_t message, size_t buffer_size, ams::svc::Handle session_handle) {
|
||||
|
@ -224,23 +218,18 @@ namespace ams::kern::svc {
|
|||
/* Lock the mesage buffer. */
|
||||
R_TRY(page_table.LockForIpcUserBuffer(nullptr, message, buffer_size));
|
||||
|
||||
/* Ensure that if we fail, we unlock the message buffer. */
|
||||
auto unlock_guard = SCOPE_GUARD { page_table.UnlockForIpcUserBuffer(message, buffer_size); };
|
||||
/* Ensure that if we fail and aren't terminating that we unlock the user buffer. */
|
||||
ON_RESULT_FAILURE_BESIDES(svc::ResultTerminationRequested) {
|
||||
page_table.UnlockForIpcUserBuffer(message, buffer_size);
|
||||
};
|
||||
|
||||
/* Send the request. */
|
||||
MESOSPHERE_ASSERT(message != 0);
|
||||
const Result result = SendAsyncRequestWithUserBufferImpl(out_event_handle, message, buffer_size, session_handle);
|
||||
|
||||
/* If the request succeeds (or the thread is terminating), don't unlock the user buffer. */
|
||||
if (R_SUCCEEDED(result) || svc::ResultTerminationRequested::Includes(result)) {
|
||||
unlock_guard.Cancel();
|
||||
}
|
||||
|
||||
return result;
|
||||
R_RETURN(SendAsyncRequestWithUserBufferImpl(out_event_handle, message, buffer_size, session_handle));
|
||||
}
|
||||
|
||||
ALWAYS_INLINE Result ReplyAndReceive(int32_t *out_index, KUserPointer<const ams::svc::Handle *> handles, int32_t num_handles, ams::svc::Handle reply_target, int64_t timeout_ns) {
|
||||
return ReplyAndReceiveImpl(out_index, 0, 0, Null<KPhysicalAddress>, handles, num_handles, reply_target, timeout_ns);
|
||||
R_RETURN(ReplyAndReceiveImpl(out_index, 0, 0, Null<KPhysicalAddress>, handles, num_handles, reply_target, timeout_ns));
|
||||
}
|
||||
|
||||
ALWAYS_INLINE Result ReplyAndReceiveWithUserBuffer(int32_t *out_index, uintptr_t message, size_t buffer_size, KUserPointer<const ams::svc::Handle *> handles, int32_t num_handles, ams::svc::Handle reply_target, int64_t timeout_ns) {
|
||||
|
@ -257,16 +246,17 @@ namespace ams::kern::svc {
|
|||
KPhysicalAddress message_paddr;
|
||||
R_TRY(page_table.LockForIpcUserBuffer(std::addressof(message_paddr), message, buffer_size));
|
||||
|
||||
/* Ensure that even if we fail, we unlock the message buffer when done. */
|
||||
auto unlock_guard = SCOPE_GUARD { page_table.UnlockForIpcUserBuffer(message, buffer_size); };
|
||||
{
|
||||
/* If we fail to send the message, unlock the message buffer. */
|
||||
ON_RESULT_FAILURE { page_table.UnlockForIpcUserBuffer(message, buffer_size); };
|
||||
|
||||
/* Send the request. */
|
||||
MESOSPHERE_ASSERT(message != 0);
|
||||
R_TRY(ReplyAndReceiveImpl(out_index, message, buffer_size, message_paddr, handles, num_handles, reply_target, timeout_ns));
|
||||
/* Reply/Receive the request. */
|
||||
MESOSPHERE_ASSERT(message != 0);
|
||||
R_TRY(ReplyAndReceiveImpl(out_index, message, buffer_size, message_paddr, handles, num_handles, reply_target, timeout_ns));
|
||||
}
|
||||
|
||||
/* We sent the request successfully, so cancel our guard and check the unlock result. */
|
||||
unlock_guard.Cancel();
|
||||
return page_table.UnlockForIpcUserBuffer(message, buffer_size);
|
||||
/* We successfully processed, so try to unlock the message buffer. */
|
||||
R_RETURN(page_table.UnlockForIpcUserBuffer(message, buffer_size));
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -274,45 +264,45 @@ namespace ams::kern::svc {
|
|||
/* ============================= 64 ABI ============================= */
|
||||
|
||||
Result SendSyncRequest64(ams::svc::Handle session_handle) {
|
||||
return SendSyncRequest(session_handle);
|
||||
R_RETURN(SendSyncRequest(session_handle));
|
||||
}
|
||||
|
||||
Result SendSyncRequestWithUserBuffer64(ams::svc::Address message_buffer, ams::svc::Size message_buffer_size, ams::svc::Handle session_handle) {
|
||||
return SendSyncRequestWithUserBuffer(message_buffer, message_buffer_size, session_handle);
|
||||
R_RETURN(SendSyncRequestWithUserBuffer(message_buffer, message_buffer_size, session_handle));
|
||||
}
|
||||
|
||||
Result SendAsyncRequestWithUserBuffer64(ams::svc::Handle *out_event_handle, ams::svc::Address message_buffer, ams::svc::Size message_buffer_size, ams::svc::Handle session_handle) {
|
||||
return SendAsyncRequestWithUserBuffer(out_event_handle, message_buffer, message_buffer_size, session_handle);
|
||||
R_RETURN(SendAsyncRequestWithUserBuffer(out_event_handle, message_buffer, message_buffer_size, session_handle));
|
||||
}
|
||||
|
||||
Result ReplyAndReceive64(int32_t *out_index, KUserPointer<const ams::svc::Handle *> handles, int32_t num_handles, ams::svc::Handle reply_target, int64_t timeout_ns) {
|
||||
return ReplyAndReceive(out_index, handles, num_handles, reply_target, timeout_ns);
|
||||
R_RETURN(ReplyAndReceive(out_index, handles, num_handles, reply_target, timeout_ns));
|
||||
}
|
||||
|
||||
Result ReplyAndReceiveWithUserBuffer64(int32_t *out_index, ams::svc::Address message_buffer, ams::svc::Size message_buffer_size, KUserPointer<const ams::svc::Handle *> handles, int32_t num_handles, ams::svc::Handle reply_target, int64_t timeout_ns) {
|
||||
return ReplyAndReceiveWithUserBuffer(out_index, message_buffer, message_buffer_size, handles, num_handles, reply_target, timeout_ns);
|
||||
R_RETURN(ReplyAndReceiveWithUserBuffer(out_index, message_buffer, message_buffer_size, handles, num_handles, reply_target, timeout_ns));
|
||||
}
|
||||
|
||||
/* ============================= 64From32 ABI ============================= */
|
||||
|
||||
Result SendSyncRequest64From32(ams::svc::Handle session_handle) {
|
||||
return SendSyncRequest(session_handle);
|
||||
R_RETURN(SendSyncRequest(session_handle));
|
||||
}
|
||||
|
||||
Result SendSyncRequestWithUserBuffer64From32(ams::svc::Address message_buffer, ams::svc::Size message_buffer_size, ams::svc::Handle session_handle) {
|
||||
return SendSyncRequestWithUserBuffer(message_buffer, message_buffer_size, session_handle);
|
||||
R_RETURN(SendSyncRequestWithUserBuffer(message_buffer, message_buffer_size, session_handle));
|
||||
}
|
||||
|
||||
Result SendAsyncRequestWithUserBuffer64From32(ams::svc::Handle *out_event_handle, ams::svc::Address message_buffer, ams::svc::Size message_buffer_size, ams::svc::Handle session_handle) {
|
||||
return SendAsyncRequestWithUserBuffer(out_event_handle, message_buffer, message_buffer_size, session_handle);
|
||||
R_RETURN(SendAsyncRequestWithUserBuffer(out_event_handle, message_buffer, message_buffer_size, session_handle));
|
||||
}
|
||||
|
||||
Result ReplyAndReceive64From32(int32_t *out_index, KUserPointer<const ams::svc::Handle *> handles, int32_t num_handles, ams::svc::Handle reply_target, int64_t timeout_ns) {
|
||||
return ReplyAndReceive(out_index, handles, num_handles, reply_target, timeout_ns);
|
||||
R_RETURN(ReplyAndReceive(out_index, handles, num_handles, reply_target, timeout_ns));
|
||||
}
|
||||
|
||||
Result ReplyAndReceiveWithUserBuffer64From32(int32_t *out_index, ams::svc::Address message_buffer, ams::svc::Size message_buffer_size, KUserPointer<const ams::svc::Handle *> handles, int32_t num_handles, ams::svc::Handle reply_target, int64_t timeout_ns) {
|
||||
return ReplyAndReceiveWithUserBuffer(out_index, message_buffer, message_buffer_size, handles, num_handles, reply_target, timeout_ns);
|
||||
R_RETURN(ReplyAndReceiveWithUserBuffer(out_index, message_buffer, message_buffer_size, handles, num_handles, reply_target, timeout_ns));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ namespace ams::kern::svc {
|
|||
/* Send the request. */
|
||||
R_TRY(session->SendSyncRequest(args));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE Result ReplyAndReceiveLight(ams::svc::Handle session_handle, u32 *args) {
|
||||
|
@ -40,7 +40,7 @@ namespace ams::kern::svc {
|
|||
/* Handle the request. */
|
||||
R_TRY(session->ReplyAndReceive(args));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -48,21 +48,21 @@ namespace ams::kern::svc {
|
|||
/* ============================= 64 ABI ============================= */
|
||||
|
||||
Result SendSyncRequestLight64(ams::svc::Handle session_handle, u32 *args) {
|
||||
return SendSyncRequestLight(session_handle, args);
|
||||
R_RETURN(SendSyncRequestLight(session_handle, args));
|
||||
}
|
||||
|
||||
Result ReplyAndReceiveLight64(ams::svc::Handle session_handle, u32 *args) {
|
||||
return ReplyAndReceiveLight(session_handle, args);
|
||||
R_RETURN(ReplyAndReceiveLight(session_handle, args));
|
||||
}
|
||||
|
||||
/* ============================= 64From32 ABI ============================= */
|
||||
|
||||
Result SendSyncRequestLight64From32(ams::svc::Handle session_handle, u32 *args) {
|
||||
return SendSyncRequestLight(session_handle, args);
|
||||
R_RETURN(SendSyncRequestLight(session_handle, args));
|
||||
}
|
||||
|
||||
Result ReplyAndReceiveLight64From32(ams::svc::Handle session_handle, u32 *args) {
|
||||
return ReplyAndReceiveLight(session_handle, args);
|
||||
R_RETURN(ReplyAndReceiveLight(session_handle, args));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ namespace ams::kern::svc {
|
|||
R_UNLESS(!IsKernelAddress(address), svc::ResultInvalidCurrentMemory());
|
||||
R_UNLESS(util::IsAligned(address, sizeof(u32)), svc::ResultInvalidAddress());
|
||||
|
||||
return KConditionVariable::WaitForAddress(thread_handle, address, tag);
|
||||
R_RETURN(KConditionVariable::WaitForAddress(thread_handle, address, tag));
|
||||
}
|
||||
|
||||
Result ArbitrateUnlock(uintptr_t address) {
|
||||
|
@ -38,7 +38,7 @@ namespace ams::kern::svc {
|
|||
R_UNLESS(!IsKernelAddress(address), svc::ResultInvalidCurrentMemory());
|
||||
R_UNLESS(util::IsAligned(address, sizeof(u32)), svc::ResultInvalidAddress());
|
||||
|
||||
return KConditionVariable::SignalToAddress(address);
|
||||
R_RETURN(KConditionVariable::SignalToAddress(address));
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -46,21 +46,21 @@ namespace ams::kern::svc {
|
|||
/* ============================= 64 ABI ============================= */
|
||||
|
||||
Result ArbitrateLock64(ams::svc::Handle thread_handle, ams::svc::Address address, uint32_t tag) {
|
||||
return ArbitrateLock(thread_handle, address, tag);
|
||||
R_RETURN(ArbitrateLock(thread_handle, address, tag));
|
||||
}
|
||||
|
||||
Result ArbitrateUnlock64(ams::svc::Address address) {
|
||||
return ArbitrateUnlock(address);
|
||||
R_RETURN(ArbitrateUnlock(address));
|
||||
}
|
||||
|
||||
/* ============================= 64From32 ABI ============================= */
|
||||
|
||||
Result ArbitrateLock64From32(ams::svc::Handle thread_handle, ams::svc::Address address, uint32_t tag) {
|
||||
return ArbitrateLock(thread_handle, address, tag);
|
||||
R_RETURN(ArbitrateLock(thread_handle, address, tag));
|
||||
}
|
||||
|
||||
Result ArbitrateUnlock64From32(ams::svc::Address address) {
|
||||
return ArbitrateUnlock(address);
|
||||
R_RETURN(ArbitrateUnlock(address));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -47,7 +47,7 @@ namespace ams::kern::svc {
|
|||
R_UNLESS(page_table.Contains(address, size), svc::ResultInvalidCurrentMemory());
|
||||
|
||||
/* Set the memory attribute. */
|
||||
return page_table.SetMemoryPermission(address, size, perm);
|
||||
R_RETURN(page_table.SetMemoryPermission(address, size, perm));
|
||||
}
|
||||
|
||||
Result SetMemoryAttribute(uintptr_t address, size_t size, uint32_t mask, uint32_t attr) {
|
||||
|
@ -67,7 +67,7 @@ namespace ams::kern::svc {
|
|||
R_UNLESS(page_table.Contains(address, size), svc::ResultInvalidCurrentMemory());
|
||||
|
||||
/* Set the memory attribute. */
|
||||
return page_table.SetMemoryAttribute(address, size, mask, attr);
|
||||
R_RETURN(page_table.SetMemoryAttribute(address, size, mask, attr));
|
||||
}
|
||||
|
||||
Result MapMemory(uintptr_t dst_address, uintptr_t src_address, size_t size) {
|
||||
|
@ -91,7 +91,7 @@ namespace ams::kern::svc {
|
|||
R_UNLESS(page_table.CanContain(dst_address, size, KMemoryState_Stack), svc::ResultInvalidMemoryRegion());
|
||||
|
||||
/* Map the memory. */
|
||||
return page_table.MapMemory(dst_address, src_address, size);
|
||||
R_RETURN(page_table.MapMemory(dst_address, src_address, size));
|
||||
}
|
||||
|
||||
Result UnmapMemory(uintptr_t dst_address, uintptr_t src_address, size_t size) {
|
||||
|
@ -115,7 +115,7 @@ namespace ams::kern::svc {
|
|||
R_UNLESS(page_table.CanContain(dst_address, size, KMemoryState_Stack), svc::ResultInvalidMemoryRegion());
|
||||
|
||||
/* Unmap the memory. */
|
||||
return page_table.UnmapMemory(dst_address, src_address, size);
|
||||
R_RETURN(page_table.UnmapMemory(dst_address, src_address, size));
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -123,37 +123,37 @@ namespace ams::kern::svc {
|
|||
/* ============================= 64 ABI ============================= */
|
||||
|
||||
Result SetMemoryPermission64(ams::svc::Address address, ams::svc::Size size, ams::svc::MemoryPermission perm) {
|
||||
return SetMemoryPermission(address, size, perm);
|
||||
R_RETURN(SetMemoryPermission(address, size, perm));
|
||||
}
|
||||
|
||||
Result SetMemoryAttribute64(ams::svc::Address address, ams::svc::Size size, uint32_t mask, uint32_t attr) {
|
||||
return SetMemoryAttribute(address, size, mask, attr);
|
||||
R_RETURN(SetMemoryAttribute(address, size, mask, attr));
|
||||
}
|
||||
|
||||
Result MapMemory64(ams::svc::Address dst_address, ams::svc::Address src_address, ams::svc::Size size) {
|
||||
return MapMemory(dst_address, src_address, size);
|
||||
R_RETURN(MapMemory(dst_address, src_address, size));
|
||||
}
|
||||
|
||||
Result UnmapMemory64(ams::svc::Address dst_address, ams::svc::Address src_address, ams::svc::Size size) {
|
||||
return UnmapMemory(dst_address, src_address, size);
|
||||
R_RETURN(UnmapMemory(dst_address, src_address, size));
|
||||
}
|
||||
|
||||
/* ============================= 64From32 ABI ============================= */
|
||||
|
||||
Result SetMemoryPermission64From32(ams::svc::Address address, ams::svc::Size size, ams::svc::MemoryPermission perm) {
|
||||
return SetMemoryPermission(address, size, perm);
|
||||
R_RETURN(SetMemoryPermission(address, size, perm));
|
||||
}
|
||||
|
||||
Result SetMemoryAttribute64From32(ams::svc::Address address, ams::svc::Size size, uint32_t mask, uint32_t attr) {
|
||||
return SetMemoryAttribute(address, size, mask, attr);
|
||||
R_RETURN(SetMemoryAttribute(address, size, mask, attr));
|
||||
}
|
||||
|
||||
Result MapMemory64From32(ams::svc::Address dst_address, ams::svc::Address src_address, ams::svc::Size size) {
|
||||
return MapMemory(dst_address, src_address, size);
|
||||
R_RETURN(MapMemory(dst_address, src_address, size));
|
||||
}
|
||||
|
||||
Result UnmapMemory64From32(ams::svc::Address dst_address, ams::svc::Address src_address, ams::svc::Size size) {
|
||||
return UnmapMemory(dst_address, src_address, size);
|
||||
R_RETURN(UnmapMemory(dst_address, src_address, size));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -32,7 +32,7 @@ namespace ams::kern::svc {
|
|||
|
||||
/* Set the output. */
|
||||
*out_address = GetInteger(address);
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result SetUnsafeLimit(size_t limit) {
|
||||
|
@ -43,7 +43,7 @@ namespace ams::kern::svc {
|
|||
R_UNLESS(limit <= Kernel::GetMemoryManager().GetSize(KMemoryManager::Pool_Unsafe), svc::ResultOutOfRange());
|
||||
|
||||
/* Set the size. */
|
||||
return Kernel::GetUnsafeMemory().SetLimitSize(limit);
|
||||
R_RETURN(Kernel::GetUnsafeMemory().SetLimitSize(limit));
|
||||
}
|
||||
|
||||
Result MapPhysicalMemory(uintptr_t address, size_t size) {
|
||||
|
@ -64,7 +64,7 @@ namespace ams::kern::svc {
|
|||
/* Map the memory. */
|
||||
R_TRY(page_table.MapPhysicalMemory(address, size));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result UnmapPhysicalMemory(uintptr_t address, size_t size) {
|
||||
|
@ -85,7 +85,7 @@ namespace ams::kern::svc {
|
|||
/* Unmap the memory. */
|
||||
R_TRY(page_table.UnmapPhysicalMemory(address, size));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result MapPhysicalMemoryUnsafe(uintptr_t address, size_t size) {
|
||||
|
@ -106,7 +106,7 @@ namespace ams::kern::svc {
|
|||
/* Map the memory. */
|
||||
R_TRY(page_table.MapPhysicalMemoryUnsafe(address, size));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result UnmapPhysicalMemoryUnsafe(uintptr_t address, size_t size) {
|
||||
|
@ -124,7 +124,7 @@ namespace ams::kern::svc {
|
|||
/* Unmap the memory. */
|
||||
R_TRY(page_table.UnmapPhysicalMemoryUnsafe(address, size));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -133,54 +133,54 @@ namespace ams::kern::svc {
|
|||
|
||||
Result SetHeapSize64(ams::svc::Address *out_address, ams::svc::Size size) {
|
||||
static_assert(sizeof(*out_address) == sizeof(uintptr_t));
|
||||
return SetHeapSize(reinterpret_cast<uintptr_t *>(out_address), size);
|
||||
R_RETURN(SetHeapSize(reinterpret_cast<uintptr_t *>(out_address), size));
|
||||
}
|
||||
|
||||
Result MapPhysicalMemory64(ams::svc::Address address, ams::svc::Size size) {
|
||||
return MapPhysicalMemory(address, size);
|
||||
R_RETURN(MapPhysicalMemory(address, size));
|
||||
}
|
||||
|
||||
Result UnmapPhysicalMemory64(ams::svc::Address address, ams::svc::Size size) {
|
||||
return UnmapPhysicalMemory(address, size);
|
||||
R_RETURN(UnmapPhysicalMemory(address, size));
|
||||
}
|
||||
|
||||
Result MapPhysicalMemoryUnsafe64(ams::svc::Address address, ams::svc::Size size) {
|
||||
return MapPhysicalMemoryUnsafe(address, size);
|
||||
R_RETURN(MapPhysicalMemoryUnsafe(address, size));
|
||||
}
|
||||
|
||||
Result UnmapPhysicalMemoryUnsafe64(ams::svc::Address address, ams::svc::Size size) {
|
||||
return UnmapPhysicalMemoryUnsafe(address, size);
|
||||
R_RETURN(UnmapPhysicalMemoryUnsafe(address, size));
|
||||
}
|
||||
|
||||
Result SetUnsafeLimit64(ams::svc::Size limit) {
|
||||
return SetUnsafeLimit(limit);
|
||||
R_RETURN(SetUnsafeLimit(limit));
|
||||
}
|
||||
|
||||
/* ============================= 64From32 ABI ============================= */
|
||||
|
||||
Result SetHeapSize64From32(ams::svc::Address *out_address, ams::svc::Size size) {
|
||||
static_assert(sizeof(*out_address) == sizeof(uintptr_t));
|
||||
return SetHeapSize(reinterpret_cast<uintptr_t *>(out_address), size);
|
||||
R_RETURN(SetHeapSize(reinterpret_cast<uintptr_t *>(out_address), size));
|
||||
}
|
||||
|
||||
Result MapPhysicalMemory64From32(ams::svc::Address address, ams::svc::Size size) {
|
||||
return MapPhysicalMemory(address, size);
|
||||
R_RETURN(MapPhysicalMemory(address, size));
|
||||
}
|
||||
|
||||
Result UnmapPhysicalMemory64From32(ams::svc::Address address, ams::svc::Size size) {
|
||||
return UnmapPhysicalMemory(address, size);
|
||||
R_RETURN(UnmapPhysicalMemory(address, size));
|
||||
}
|
||||
|
||||
Result MapPhysicalMemoryUnsafe64From32(ams::svc::Address address, ams::svc::Size size) {
|
||||
return MapPhysicalMemoryUnsafe(address, size);
|
||||
R_RETURN(MapPhysicalMemoryUnsafe(address, size));
|
||||
}
|
||||
|
||||
Result UnmapPhysicalMemoryUnsafe64From32(ams::svc::Address address, ams::svc::Size size) {
|
||||
return UnmapPhysicalMemoryUnsafe(address, size);
|
||||
R_RETURN(UnmapPhysicalMemoryUnsafe(address, size));
|
||||
}
|
||||
|
||||
Result SetUnsafeLimit64From32(ams::svc::Size limit) {
|
||||
return SetUnsafeLimit(limit);
|
||||
R_RETURN(SetUnsafeLimit(limit));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -52,13 +52,10 @@ namespace ams::kern::svc {
|
|||
|
||||
/* Register the handle in the table. */
|
||||
R_TRY(handle_table.Add(out_server_handle, std::addressof(port->GetServerPort())));
|
||||
auto handle_guard = SCOPE_GUARD { handle_table.Remove(*out_server_handle); };
|
||||
ON_RESULT_FAILURE { handle_table.Remove(*out_server_handle); };
|
||||
|
||||
/* Create a new object name. */
|
||||
R_TRY(KObjectName::NewFromName(std::addressof(port->GetClientPort()), name));
|
||||
|
||||
/* We succeeded, so don't leak the handle. */
|
||||
handle_guard.Cancel();
|
||||
} else /* if (max_sessions == 0) */ {
|
||||
/* Ensure that this else case is correct. */
|
||||
MESOSPHERE_AUDIT(max_sessions == 0);
|
||||
|
@ -70,7 +67,7 @@ namespace ams::kern::svc {
|
|||
R_TRY(KObjectName::Delete<KClientPort>(name));
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result CreatePort(ams::svc::Handle *out_server, ams::svc::Handle *out_client, int32_t max_sessions, bool is_light, uintptr_t name) {
|
||||
|
@ -100,14 +97,10 @@ namespace ams::kern::svc {
|
|||
R_TRY(handle_table.Add(out_client, std::addressof(port->GetClientPort())));
|
||||
|
||||
/* Ensure that we maintaing a clean handle state on exit. */
|
||||
auto handle_guard = SCOPE_GUARD { handle_table.Remove(*out_client); };
|
||||
ON_RESULT_FAILURE { handle_table.Remove(*out_client); };
|
||||
|
||||
/* Add the server to the handle table. */
|
||||
R_TRY(handle_table.Add(out_server, std::addressof(port->GetServerPort())));
|
||||
|
||||
/* We succeeded! */
|
||||
handle_guard.Cancel();
|
||||
return ResultSuccess();
|
||||
R_RETURN(handle_table.Add(out_server, std::addressof(port->GetServerPort())));
|
||||
}
|
||||
|
||||
Result ConnectToNamedPort(ams::svc::Handle *out, KUserPointer<const char *> user_name) {
|
||||
|
@ -128,7 +121,7 @@ namespace ams::kern::svc {
|
|||
/* Reserve a handle for the port. */
|
||||
/* NOTE: Nintendo really does write directly to the output handle here. */
|
||||
R_TRY(handle_table.Reserve(out));
|
||||
auto handle_guard = SCOPE_GUARD { handle_table.Unreserve(*out); };
|
||||
ON_RESULT_FAILURE { handle_table.Unreserve(*out); };
|
||||
|
||||
/* Create a session. */
|
||||
KClientSession *session;
|
||||
|
@ -139,8 +132,7 @@ namespace ams::kern::svc {
|
|||
session->Close();
|
||||
|
||||
/* We succeeded. */
|
||||
handle_guard.Cancel();
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result ConnectToPort(ams::svc::Handle *out, ams::svc::Handle port) {
|
||||
|
@ -154,7 +146,7 @@ namespace ams::kern::svc {
|
|||
/* Reserve a handle for the port. */
|
||||
/* NOTE: Nintendo really does write directly to the output handle here. */
|
||||
R_TRY(handle_table.Reserve(out));
|
||||
auto handle_guard = SCOPE_GUARD { handle_table.Unreserve(*out); };
|
||||
ON_RESULT_FAILURE { handle_table.Unreserve(*out); };
|
||||
|
||||
/* Create the session. */
|
||||
KAutoObject *session;
|
||||
|
@ -169,8 +161,7 @@ namespace ams::kern::svc {
|
|||
session->Close();
|
||||
|
||||
/* We succeeded. */
|
||||
handle_guard.Cancel();
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -178,37 +169,37 @@ namespace ams::kern::svc {
|
|||
/* ============================= 64 ABI ============================= */
|
||||
|
||||
Result ConnectToNamedPort64(ams::svc::Handle *out_handle, KUserPointer<const char *> name) {
|
||||
return ConnectToNamedPort(out_handle, name);
|
||||
R_RETURN(ConnectToNamedPort(out_handle, name));
|
||||
}
|
||||
|
||||
Result CreatePort64(ams::svc::Handle *out_server_handle, ams::svc::Handle *out_client_handle, int32_t max_sessions, bool is_light, ams::svc::Address name) {
|
||||
return CreatePort(out_server_handle, out_client_handle, max_sessions, is_light, name);
|
||||
R_RETURN(CreatePort(out_server_handle, out_client_handle, max_sessions, is_light, name));
|
||||
}
|
||||
|
||||
Result ManageNamedPort64(ams::svc::Handle *out_server_handle, KUserPointer<const char *> name, int32_t max_sessions) {
|
||||
return ManageNamedPort(out_server_handle, name, max_sessions);
|
||||
R_RETURN(ManageNamedPort(out_server_handle, name, max_sessions));
|
||||
}
|
||||
|
||||
Result ConnectToPort64(ams::svc::Handle *out_handle, ams::svc::Handle port) {
|
||||
return ConnectToPort(out_handle, port);
|
||||
R_RETURN(ConnectToPort(out_handle, port));
|
||||
}
|
||||
|
||||
/* ============================= 64From32 ABI ============================= */
|
||||
|
||||
Result ConnectToNamedPort64From32(ams::svc::Handle *out_handle, KUserPointer<const char *> name) {
|
||||
return ConnectToNamedPort(out_handle, name);
|
||||
R_RETURN(ConnectToNamedPort(out_handle, name));
|
||||
}
|
||||
|
||||
Result CreatePort64From32(ams::svc::Handle *out_server_handle, ams::svc::Handle *out_client_handle, int32_t max_sessions, bool is_light, ams::svc::Address name) {
|
||||
return CreatePort(out_server_handle, out_client_handle, max_sessions, is_light, name);
|
||||
R_RETURN(CreatePort(out_server_handle, out_client_handle, max_sessions, is_light, name));
|
||||
}
|
||||
|
||||
Result ManageNamedPort64From32(ams::svc::Handle *out_server_handle, KUserPointer<const char *> name, int32_t max_sessions) {
|
||||
return ManageNamedPort(out_server_handle, name, max_sessions);
|
||||
R_RETURN(ManageNamedPort(out_server_handle, name, max_sessions));
|
||||
}
|
||||
|
||||
Result ConnectToPort64From32(ams::svc::Handle *out_handle, ams::svc::Handle port) {
|
||||
return ConnectToPort(out_handle, port);
|
||||
R_RETURN(ConnectToPort(out_handle, port));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -65,7 +65,7 @@ namespace ams::kern::svc {
|
|||
*out_process_id = d->GetProcessUnsafe()->GetProcessId();
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result GetProcessList(int32_t *out_num_processes, KUserPointer<uint64_t *> out_process_ids, int32_t max_out_count) {
|
||||
|
@ -78,7 +78,7 @@ namespace ams::kern::svc {
|
|||
}
|
||||
|
||||
/* Get the process list. */
|
||||
return KProcess::GetProcessList(out_num_processes, out_process_ids, max_out_count);
|
||||
R_RETURN(KProcess::GetProcessList(out_num_processes, out_process_ids, max_out_count));
|
||||
}
|
||||
|
||||
Result CreateProcess(ams::svc::Handle *out, const ams::svc::CreateProcessParameter ¶ms, KUserPointer<const uint32_t *> user_caps, int32_t num_caps) {
|
||||
|
@ -135,7 +135,7 @@ namespace ams::kern::svc {
|
|||
}
|
||||
break;
|
||||
default:
|
||||
return svc::ResultInvalidEnumValue();
|
||||
R_THROW(svc::ResultInvalidEnumValue());
|
||||
}
|
||||
|
||||
/* Validate the pool partition. */
|
||||
|
@ -147,7 +147,7 @@ namespace ams::kern::svc {
|
|||
case ams::svc::CreateProcessFlag_PoolPartitionSystemNonSecure:
|
||||
break;
|
||||
default:
|
||||
return svc::ResultInvalidEnumValue();
|
||||
R_THROW(svc::ResultInvalidEnumValue());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -243,7 +243,7 @@ namespace ams::kern::svc {
|
|||
/* Add the process to the handle table. */
|
||||
R_TRY(handle_table.Add(out, process));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
|
@ -254,7 +254,7 @@ namespace ams::kern::svc {
|
|||
|
||||
/* Invoke the implementation. */
|
||||
if constexpr (std::same_as<T, ams::svc::CreateProcessParameter>) {
|
||||
return CreateProcess(out, params, user_caps, num_caps);
|
||||
R_RETURN(CreateProcess(out, params, user_caps, num_caps));
|
||||
} else {
|
||||
/* Convert the parameters. */
|
||||
ams::svc::CreateProcessParameter converted_params;
|
||||
|
@ -270,7 +270,7 @@ namespace ams::kern::svc {
|
|||
converted_params.system_resource_num_pages = params.system_resource_num_pages;
|
||||
|
||||
/* Invoke. */
|
||||
return CreateProcess(out, converted_params, user_caps, num_caps);
|
||||
R_RETURN(CreateProcess(out, converted_params, user_caps, num_caps));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -294,7 +294,7 @@ namespace ams::kern::svc {
|
|||
process->SetIdealCoreId(core_id);
|
||||
|
||||
/* Run the process. */
|
||||
return process->Run(priority, static_cast<size_t>(main_thread_stack_size));
|
||||
R_RETURN(process->Run(priority, static_cast<size_t>(main_thread_stack_size)));
|
||||
}
|
||||
|
||||
Result TerminateProcess(ams::svc::Handle process_handle) {
|
||||
|
@ -316,7 +316,7 @@ namespace ams::kern::svc {
|
|||
ExitProcess();
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result GetProcessInfo(int64_t *out, ams::svc::Handle process_handle, ams::svc::ProcessInfoType info_type) {
|
||||
|
@ -352,10 +352,10 @@ namespace ams::kern::svc {
|
|||
}
|
||||
break;
|
||||
default:
|
||||
return svc::ResultInvalidEnumValue();
|
||||
R_THROW(svc::ResultInvalidEnumValue());
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -367,27 +367,27 @@ namespace ams::kern::svc {
|
|||
}
|
||||
|
||||
Result GetProcessId64(uint64_t *out_process_id, ams::svc::Handle process_handle) {
|
||||
return GetProcessId(out_process_id, process_handle);
|
||||
R_RETURN(GetProcessId(out_process_id, process_handle));
|
||||
}
|
||||
|
||||
Result GetProcessList64(int32_t *out_num_processes, KUserPointer<uint64_t *> out_process_ids, int32_t max_out_count) {
|
||||
return GetProcessList(out_num_processes, out_process_ids, max_out_count);
|
||||
R_RETURN(GetProcessList(out_num_processes, out_process_ids, max_out_count));
|
||||
}
|
||||
|
||||
Result CreateProcess64(ams::svc::Handle *out_handle, KUserPointer<const ams::svc::lp64::CreateProcessParameter *> parameters, KUserPointer<const uint32_t *> caps, int32_t num_caps) {
|
||||
return CreateProcess(out_handle, parameters, caps, num_caps);
|
||||
R_RETURN(CreateProcess(out_handle, parameters, caps, num_caps));
|
||||
}
|
||||
|
||||
Result StartProcess64(ams::svc::Handle process_handle, int32_t priority, int32_t core_id, uint64_t main_thread_stack_size) {
|
||||
return StartProcess(process_handle, priority, core_id, main_thread_stack_size);
|
||||
R_RETURN(StartProcess(process_handle, priority, core_id, main_thread_stack_size));
|
||||
}
|
||||
|
||||
Result TerminateProcess64(ams::svc::Handle process_handle) {
|
||||
return TerminateProcess(process_handle);
|
||||
R_RETURN(TerminateProcess(process_handle));
|
||||
}
|
||||
|
||||
Result GetProcessInfo64(int64_t *out_info, ams::svc::Handle process_handle, ams::svc::ProcessInfoType info_type) {
|
||||
return GetProcessInfo(out_info, process_handle, info_type);
|
||||
R_RETURN(GetProcessInfo(out_info, process_handle, info_type));
|
||||
}
|
||||
|
||||
/* ============================= 64From32 ABI ============================= */
|
||||
|
@ -397,27 +397,27 @@ namespace ams::kern::svc {
|
|||
}
|
||||
|
||||
Result GetProcessId64From32(uint64_t *out_process_id, ams::svc::Handle process_handle) {
|
||||
return GetProcessId(out_process_id, process_handle);
|
||||
R_RETURN(GetProcessId(out_process_id, process_handle));
|
||||
}
|
||||
|
||||
Result GetProcessList64From32(int32_t *out_num_processes, KUserPointer<uint64_t *> out_process_ids, int32_t max_out_count) {
|
||||
return GetProcessList(out_num_processes, out_process_ids, max_out_count);
|
||||
R_RETURN(GetProcessList(out_num_processes, out_process_ids, max_out_count));
|
||||
}
|
||||
|
||||
Result CreateProcess64From32(ams::svc::Handle *out_handle, KUserPointer<const ams::svc::ilp32::CreateProcessParameter *> parameters, KUserPointer<const uint32_t *> caps, int32_t num_caps) {
|
||||
return CreateProcess(out_handle, parameters, caps, num_caps);
|
||||
R_RETURN(CreateProcess(out_handle, parameters, caps, num_caps));
|
||||
}
|
||||
|
||||
Result StartProcess64From32(ams::svc::Handle process_handle, int32_t priority, int32_t core_id, uint64_t main_thread_stack_size) {
|
||||
return StartProcess(process_handle, priority, core_id, main_thread_stack_size);
|
||||
R_RETURN(StartProcess(process_handle, priority, core_id, main_thread_stack_size));
|
||||
}
|
||||
|
||||
Result TerminateProcess64From32(ams::svc::Handle process_handle) {
|
||||
return TerminateProcess(process_handle);
|
||||
R_RETURN(TerminateProcess(process_handle));
|
||||
}
|
||||
|
||||
Result GetProcessInfo64From32(int64_t *out_info, ams::svc::Handle process_handle, ams::svc::ProcessInfoType info_type) {
|
||||
return GetProcessInfo(out_info, process_handle, info_type);
|
||||
R_RETURN(GetProcessInfo(out_info, process_handle, info_type));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ namespace ams::kern::svc {
|
|||
R_UNLESS(page_table.Contains(address, size), svc::ResultInvalidCurrentMemory());
|
||||
|
||||
/* Set the memory permission. */
|
||||
return page_table.SetProcessMemoryPermission(address, size, perm);
|
||||
R_RETURN(page_table.SetProcessMemoryPermission(address, size, perm));
|
||||
}
|
||||
|
||||
Result MapProcessMemory(uintptr_t dst_address, ams::svc::Handle process_handle, uint64_t src_address, size_t size) {
|
||||
|
@ -96,7 +96,7 @@ namespace ams::kern::svc {
|
|||
/* Map the group. */
|
||||
R_TRY(dst_pt.MapPageGroup(dst_address, pg, KMemoryState_SharedCode, KMemoryPermission_UserReadWrite));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result UnmapProcessMemory(uintptr_t dst_address, ams::svc::Handle process_handle, uint64_t src_address, size_t size) {
|
||||
|
@ -125,7 +125,7 @@ namespace ams::kern::svc {
|
|||
/* Unmap the memory. */
|
||||
R_TRY(dst_pt.UnmapProcessMemory(dst_address, size, src_pt, src_address));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result MapProcessCodeMemory(ams::svc::Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size) {
|
||||
|
@ -152,7 +152,7 @@ namespace ams::kern::svc {
|
|||
/* Map the memory. */
|
||||
R_TRY(page_table.MapCodeMemory(dst_address, src_address, size));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result UnmapProcessCodeMemory(ams::svc::Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size) {
|
||||
|
@ -179,7 +179,7 @@ namespace ams::kern::svc {
|
|||
/* Unmap the memory. */
|
||||
R_TRY(page_table.UnmapCodeMemory(dst_address, src_address, size));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -187,45 +187,45 @@ namespace ams::kern::svc {
|
|||
/* ============================= 64 ABI ============================= */
|
||||
|
||||
Result SetProcessMemoryPermission64(ams::svc::Handle process_handle, uint64_t address, uint64_t size, ams::svc::MemoryPermission perm) {
|
||||
return SetProcessMemoryPermission(process_handle, address, size, perm);
|
||||
R_RETURN(SetProcessMemoryPermission(process_handle, address, size, perm));
|
||||
}
|
||||
|
||||
Result MapProcessMemory64(ams::svc::Address dst_address, ams::svc::Handle process_handle, uint64_t src_address, ams::svc::Size size) {
|
||||
return MapProcessMemory(dst_address, process_handle, src_address, size);
|
||||
R_RETURN(MapProcessMemory(dst_address, process_handle, src_address, size));
|
||||
}
|
||||
|
||||
Result UnmapProcessMemory64(ams::svc::Address dst_address, ams::svc::Handle process_handle, uint64_t src_address, ams::svc::Size size) {
|
||||
return UnmapProcessMemory(dst_address, process_handle, src_address, size);
|
||||
R_RETURN(UnmapProcessMemory(dst_address, process_handle, src_address, size));
|
||||
}
|
||||
|
||||
Result MapProcessCodeMemory64(ams::svc::Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size) {
|
||||
return MapProcessCodeMemory(process_handle, dst_address, src_address, size);
|
||||
R_RETURN(MapProcessCodeMemory(process_handle, dst_address, src_address, size));
|
||||
}
|
||||
|
||||
Result UnmapProcessCodeMemory64(ams::svc::Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size) {
|
||||
return UnmapProcessCodeMemory(process_handle, dst_address, src_address, size);
|
||||
R_RETURN(UnmapProcessCodeMemory(process_handle, dst_address, src_address, size));
|
||||
}
|
||||
|
||||
/* ============================= 64From32 ABI ============================= */
|
||||
|
||||
Result SetProcessMemoryPermission64From32(ams::svc::Handle process_handle, uint64_t address, uint64_t size, ams::svc::MemoryPermission perm) {
|
||||
return SetProcessMemoryPermission(process_handle, address, size, perm);
|
||||
R_RETURN(SetProcessMemoryPermission(process_handle, address, size, perm));
|
||||
}
|
||||
|
||||
Result MapProcessMemory64From32(ams::svc::Address dst_address, ams::svc::Handle process_handle, uint64_t src_address, ams::svc::Size size) {
|
||||
return MapProcessMemory(dst_address, process_handle, src_address, size);
|
||||
R_RETURN(MapProcessMemory(dst_address, process_handle, src_address, size));
|
||||
}
|
||||
|
||||
Result UnmapProcessMemory64From32(ams::svc::Address dst_address, ams::svc::Handle process_handle, uint64_t src_address, ams::svc::Size size) {
|
||||
return UnmapProcessMemory(dst_address, process_handle, src_address, size);
|
||||
R_RETURN(UnmapProcessMemory(dst_address, process_handle, src_address, size));
|
||||
}
|
||||
|
||||
Result MapProcessCodeMemory64From32(ams::svc::Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size) {
|
||||
return MapProcessCodeMemory(process_handle, dst_address, src_address, size);
|
||||
R_RETURN(MapProcessCodeMemory(process_handle, dst_address, src_address, size));
|
||||
}
|
||||
|
||||
Result UnmapProcessCodeMemory64From32(ams::svc::Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size) {
|
||||
return UnmapProcessCodeMemory(process_handle, dst_address, src_address, size);
|
||||
R_RETURN(UnmapProcessCodeMemory(process_handle, dst_address, src_address, size));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -32,7 +32,7 @@ namespace ams::kern::svc {
|
|||
|
||||
/* Write output. */
|
||||
*out_memory_info = info.GetSvcMemoryInfo();
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
|
@ -62,14 +62,14 @@ namespace ams::kern::svc {
|
|||
R_TRY(out_memory_info.CopyFrom(std::addressof(converted_info)));
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
|
||||
template<typename T>
|
||||
Result QueryMemory(KUserPointer<T *> out_memory_info, ams::svc::PageInfo *out_page_info, uintptr_t address) {
|
||||
/* Query memory is just QueryProcessMemory on the current process. */
|
||||
return QueryProcessMemory(out_memory_info, out_page_info, ams::svc::PseudoHandle::CurrentProcess, address);
|
||||
R_RETURN(QueryProcessMemory(out_memory_info, out_page_info, ams::svc::PseudoHandle::CurrentProcess, address));
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -77,21 +77,21 @@ namespace ams::kern::svc {
|
|||
/* ============================= 64 ABI ============================= */
|
||||
|
||||
Result QueryMemory64(KUserPointer<ams::svc::lp64::MemoryInfo *> out_memory_info, ams::svc::PageInfo *out_page_info, ams::svc::Address address) {
|
||||
return QueryMemory(out_memory_info, out_page_info, address);
|
||||
R_RETURN(QueryMemory(out_memory_info, out_page_info, address));
|
||||
}
|
||||
|
||||
Result QueryProcessMemory64(KUserPointer<ams::svc::lp64::MemoryInfo *> out_memory_info, ams::svc::PageInfo *out_page_info, ams::svc::Handle process_handle, uint64_t address) {
|
||||
return QueryProcessMemory(out_memory_info, out_page_info, process_handle, address);
|
||||
R_RETURN(QueryProcessMemory(out_memory_info, out_page_info, process_handle, address));
|
||||
}
|
||||
|
||||
/* ============================= 64From32 ABI ============================= */
|
||||
|
||||
Result QueryMemory64From32(KUserPointer<ams::svc::ilp32::MemoryInfo *> out_memory_info, ams::svc::PageInfo *out_page_info, ams::svc::Address address) {
|
||||
return QueryMemory(out_memory_info, out_page_info, address);
|
||||
R_RETURN(QueryMemory(out_memory_info, out_page_info, address));
|
||||
}
|
||||
|
||||
Result QueryProcessMemory64From32(KUserPointer<ams::svc::ilp32::MemoryInfo *> out_memory_info, ams::svc::PageInfo *out_page_info, ams::svc::Handle process_handle, uint64_t address) {
|
||||
return QueryProcessMemory(out_memory_info, out_page_info, process_handle, address);
|
||||
R_RETURN(QueryProcessMemory(out_memory_info, out_page_info, process_handle, address));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ namespace ams::kern::svc {
|
|||
*out = 0;
|
||||
|
||||
/* Read/write the register. */
|
||||
return KSystemControl::ReadWriteRegister(out, address, mask, value);
|
||||
R_RETURN(KSystemControl::ReadWriteRegister(out, address, mask, value));
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -34,13 +34,13 @@ namespace ams::kern::svc {
|
|||
/* ============================= 64 ABI ============================= */
|
||||
|
||||
Result ReadWriteRegister64(uint32_t *out_value, ams::svc::PhysicalAddress address, uint32_t mask, uint32_t value) {
|
||||
return ReadWriteRegister(out_value, address, mask, value);
|
||||
R_RETURN(ReadWriteRegister(out_value, address, mask, value));
|
||||
}
|
||||
|
||||
/* ============================= 64From32 ABI ============================= */
|
||||
|
||||
Result ReadWriteRegister64From32(uint32_t *out_value, ams::svc::PhysicalAddress address, uint32_t mask, uint32_t value) {
|
||||
return ReadWriteRegister(out_value, address, mask, value);
|
||||
R_RETURN(ReadWriteRegister(out_value, address, mask, value));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ namespace ams::kern::svc {
|
|||
/* Get the limit value. */
|
||||
*out_limit_value = resource_limit->GetLimitValue(which);
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result GetResourceLimitCurrentValue(int64_t *out_current_value, ams::svc::Handle resource_limit_handle, ams::svc::LimitableResource which) {
|
||||
|
@ -50,7 +50,7 @@ namespace ams::kern::svc {
|
|||
/* Get the current value. */
|
||||
*out_current_value = resource_limit->GetCurrentValue(which);
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result GetResourceLimitPeakValue(int64_t *out_peak_value, ams::svc::Handle resource_limit_handle, ams::svc::LimitableResource which) {
|
||||
|
@ -64,7 +64,7 @@ namespace ams::kern::svc {
|
|||
/* Get the peak value. */
|
||||
*out_peak_value = resource_limit->GetPeakValue(which);
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result CreateResourceLimit(ams::svc::Handle *out_handle) {
|
||||
|
@ -84,7 +84,7 @@ namespace ams::kern::svc {
|
|||
/* Add the limit to the handle table. */
|
||||
R_TRY(GetCurrentProcess().GetHandleTable().Add(out_handle, resource_limit));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result SetResourceLimitLimitValue(ams::svc::Handle resource_limit_handle, ams::svc::LimitableResource which, int64_t limit_value) {
|
||||
|
@ -98,7 +98,7 @@ namespace ams::kern::svc {
|
|||
/* Set the limit value. */
|
||||
R_TRY(resource_limit->SetLimitValue(which, limit_value));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -106,45 +106,45 @@ namespace ams::kern::svc {
|
|||
/* ============================= 64 ABI ============================= */
|
||||
|
||||
Result GetResourceLimitLimitValue64(int64_t *out_limit_value, ams::svc::Handle resource_limit_handle, ams::svc::LimitableResource which) {
|
||||
return GetResourceLimitLimitValue(out_limit_value, resource_limit_handle, which);
|
||||
R_RETURN(GetResourceLimitLimitValue(out_limit_value, resource_limit_handle, which));
|
||||
}
|
||||
|
||||
Result GetResourceLimitCurrentValue64(int64_t *out_current_value, ams::svc::Handle resource_limit_handle, ams::svc::LimitableResource which) {
|
||||
return GetResourceLimitCurrentValue(out_current_value, resource_limit_handle, which);
|
||||
R_RETURN(GetResourceLimitCurrentValue(out_current_value, resource_limit_handle, which));
|
||||
}
|
||||
|
||||
Result GetResourceLimitPeakValue64(int64_t *out_peak_value, ams::svc::Handle resource_limit_handle, ams::svc::LimitableResource which) {
|
||||
return GetResourceLimitPeakValue(out_peak_value, resource_limit_handle, which);
|
||||
R_RETURN(GetResourceLimitPeakValue(out_peak_value, resource_limit_handle, which));
|
||||
}
|
||||
|
||||
Result CreateResourceLimit64(ams::svc::Handle *out_handle) {
|
||||
return CreateResourceLimit(out_handle);
|
||||
R_RETURN(CreateResourceLimit(out_handle));
|
||||
}
|
||||
|
||||
Result SetResourceLimitLimitValue64(ams::svc::Handle resource_limit_handle, ams::svc::LimitableResource which, int64_t limit_value) {
|
||||
return SetResourceLimitLimitValue(resource_limit_handle, which, limit_value);
|
||||
R_RETURN(SetResourceLimitLimitValue(resource_limit_handle, which, limit_value));
|
||||
}
|
||||
|
||||
/* ============================= 64From32 ABI ============================= */
|
||||
|
||||
Result GetResourceLimitLimitValue64From32(int64_t *out_limit_value, ams::svc::Handle resource_limit_handle, ams::svc::LimitableResource which) {
|
||||
return GetResourceLimitLimitValue(out_limit_value, resource_limit_handle, which);
|
||||
R_RETURN(GetResourceLimitLimitValue(out_limit_value, resource_limit_handle, which));
|
||||
}
|
||||
|
||||
Result GetResourceLimitCurrentValue64From32(int64_t *out_current_value, ams::svc::Handle resource_limit_handle, ams::svc::LimitableResource which) {
|
||||
return GetResourceLimitCurrentValue(out_current_value, resource_limit_handle, which);
|
||||
R_RETURN(GetResourceLimitCurrentValue(out_current_value, resource_limit_handle, which));
|
||||
}
|
||||
|
||||
Result GetResourceLimitPeakValue64From32(int64_t *out_peak_value, ams::svc::Handle resource_limit_handle, ams::svc::LimitableResource which) {
|
||||
return GetResourceLimitPeakValue(out_peak_value, resource_limit_handle, which);
|
||||
R_RETURN(GetResourceLimitPeakValue(out_peak_value, resource_limit_handle, which));
|
||||
}
|
||||
|
||||
Result CreateResourceLimit64From32(ams::svc::Handle *out_handle) {
|
||||
return CreateResourceLimit(out_handle);
|
||||
R_RETURN(CreateResourceLimit(out_handle));
|
||||
}
|
||||
|
||||
Result SetResourceLimitLimitValue64From32(ams::svc::Handle resource_limit_handle, ams::svc::LimitableResource which, int64_t limit_value) {
|
||||
return SetResourceLimitLimitValue(resource_limit_handle, which, limit_value);
|
||||
R_RETURN(SetResourceLimitLimitValue(resource_limit_handle, which, limit_value));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -43,21 +43,17 @@ namespace ams::kern::svc {
|
|||
/* Try to allocate a session from unused slab memory. */
|
||||
session = T::CreateFromUnusedSlabMemory();
|
||||
R_UNLESS(session != nullptr, svc::ResultLimitReached());
|
||||
ON_RESULT_FAILURE { session->Close(); };
|
||||
|
||||
/* If we're creating a KSession, we want to add two KSessionRequests to the heap, to prevent request exhaustion. */
|
||||
/* NOTE: Nintendo checks if session->DynamicCast<KSession *>() != nullptr, but there's no reason to not do this statically. */
|
||||
if constexpr (std::same_as<T, KSession>) {
|
||||
/* Ensure that if we fail to allocate our session requests, we close the session we created. */
|
||||
auto session_guard = SCOPE_GUARD { session->Close(); };
|
||||
{
|
||||
for (size_t i = 0; i < 2; ++i) {
|
||||
KSessionRequest *request = KSessionRequest::CreateFromUnusedSlabMemory();
|
||||
R_UNLESS(request != nullptr, svc::ResultLimitReached());
|
||||
for (size_t i = 0; i < 2; ++i) {
|
||||
KSessionRequest *request = KSessionRequest::CreateFromUnusedSlabMemory();
|
||||
R_UNLESS(request != nullptr, svc::ResultLimitReached());
|
||||
|
||||
request->Close();
|
||||
}
|
||||
request->Close();
|
||||
}
|
||||
session_guard.Cancel();
|
||||
}
|
||||
|
||||
/* We successfully allocated a session, so add the object we allocated to the resource limit. */
|
||||
|
@ -86,21 +82,17 @@ namespace ams::kern::svc {
|
|||
R_TRY(handle_table.Add(out_server, std::addressof(session->GetServerSession())));
|
||||
|
||||
/* Ensure that we maintaing a clean handle state on exit. */
|
||||
auto handle_guard = SCOPE_GUARD { handle_table.Remove(*out_server); };
|
||||
ON_RESULT_FAILURE { handle_table.Remove(*out_server); };
|
||||
|
||||
/* Add the client session to the handle table. */
|
||||
R_TRY(handle_table.Add(out_client, std::addressof(session->GetClientSession())));
|
||||
|
||||
/* We succeeded! */
|
||||
handle_guard.Cancel();
|
||||
return ResultSuccess();
|
||||
R_RETURN(handle_table.Add(out_client, std::addressof(session->GetClientSession())));
|
||||
}
|
||||
|
||||
Result CreateSession(ams::svc::Handle *out_server, ams::svc::Handle *out_client, bool is_light, uintptr_t name) {
|
||||
if (is_light) {
|
||||
return CreateSession<KLightSession>(out_server, out_client, name);
|
||||
R_RETURN(CreateSession<KLightSession>(out_server, out_client, name));
|
||||
} else {
|
||||
return CreateSession<KSession>(out_server, out_client, name);
|
||||
R_RETURN(CreateSession<KSession>(out_server, out_client, name));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -114,7 +106,7 @@ namespace ams::kern::svc {
|
|||
|
||||
/* Reserve an entry for the new session. */
|
||||
R_TRY(handle_table.Reserve(out));
|
||||
auto handle_guard = SCOPE_GUARD { handle_table.Unreserve(*out); };
|
||||
ON_RESULT_FAILURE { handle_table.Unreserve(*out); };
|
||||
|
||||
/* Accept the session. */
|
||||
KAutoObject *session;
|
||||
|
@ -129,10 +121,9 @@ namespace ams::kern::svc {
|
|||
|
||||
/* Register the session. */
|
||||
handle_table.Register(*out, session);
|
||||
handle_guard.Cancel();
|
||||
session->Close();
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -140,21 +131,21 @@ namespace ams::kern::svc {
|
|||
/* ============================= 64 ABI ============================= */
|
||||
|
||||
Result CreateSession64(ams::svc::Handle *out_server_session_handle, ams::svc::Handle *out_client_session_handle, bool is_light, ams::svc::Address name) {
|
||||
return CreateSession(out_server_session_handle, out_client_session_handle, is_light, name);
|
||||
R_RETURN(CreateSession(out_server_session_handle, out_client_session_handle, is_light, name));
|
||||
}
|
||||
|
||||
Result AcceptSession64(ams::svc::Handle *out_handle, ams::svc::Handle port) {
|
||||
return AcceptSession(out_handle, port);
|
||||
R_RETURN(AcceptSession(out_handle, port));
|
||||
}
|
||||
|
||||
/* ============================= 64From32 ABI ============================= */
|
||||
|
||||
Result CreateSession64From32(ams::svc::Handle *out_server_session_handle, ams::svc::Handle *out_client_session_handle, bool is_light, ams::svc::Address name) {
|
||||
return CreateSession(out_server_session_handle, out_client_session_handle, is_light, name);
|
||||
R_RETURN(CreateSession(out_server_session_handle, out_client_session_handle, is_light, name));
|
||||
}
|
||||
|
||||
Result AcceptSession64From32(ams::svc::Handle *out_handle, ams::svc::Handle port) {
|
||||
return AcceptSession(out_handle, port);
|
||||
R_RETURN(AcceptSession(out_handle, port));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -60,14 +60,10 @@ namespace ams::kern::svc {
|
|||
R_TRY(process.AddSharedMemory(shmem.GetPointerUnsafe(), address, size));
|
||||
|
||||
/* Ensure that we clean up the shared memory if we fail to map it. */
|
||||
auto guard = SCOPE_GUARD { process.RemoveSharedMemory(shmem.GetPointerUnsafe(), address, size); };
|
||||
ON_RESULT_FAILURE { process.RemoveSharedMemory(shmem.GetPointerUnsafe(), address, size); };
|
||||
|
||||
/* Map the shared memory. */
|
||||
R_TRY(shmem->Map(std::addressof(page_table), address, size, std::addressof(process), map_perm));
|
||||
|
||||
/* We succeeded. */
|
||||
guard.Cancel();
|
||||
return ResultSuccess();
|
||||
R_RETURN(shmem->Map(std::addressof(page_table), address, size, std::addressof(process), map_perm));
|
||||
}
|
||||
|
||||
Result UnmapSharedMemory(ams::svc::Handle shmem_handle, uintptr_t address, size_t size) {
|
||||
|
@ -94,7 +90,7 @@ namespace ams::kern::svc {
|
|||
/* Remove the shared memory from the process. */
|
||||
process.RemoveSharedMemory(shmem.GetPointerUnsafe(), address, size);
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result CreateSharedMemory(ams::svc::Handle *out, size_t size, ams::svc::MemoryPermission owner_perm, ams::svc::MemoryPermission remote_perm) {
|
||||
|
@ -122,7 +118,7 @@ namespace ams::kern::svc {
|
|||
/* Add the shared memory to the handle table. */
|
||||
R_TRY(GetCurrentProcess().GetHandleTable().Add(out, shmem));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -130,29 +126,29 @@ namespace ams::kern::svc {
|
|||
/* ============================= 64 ABI ============================= */
|
||||
|
||||
Result MapSharedMemory64(ams::svc::Handle shmem_handle, ams::svc::Address address, ams::svc::Size size, ams::svc::MemoryPermission map_perm) {
|
||||
return MapSharedMemory(shmem_handle, address, size, map_perm);
|
||||
R_RETURN(MapSharedMemory(shmem_handle, address, size, map_perm));
|
||||
}
|
||||
|
||||
Result UnmapSharedMemory64(ams::svc::Handle shmem_handle, ams::svc::Address address, ams::svc::Size size) {
|
||||
return UnmapSharedMemory(shmem_handle, address, size);
|
||||
R_RETURN(UnmapSharedMemory(shmem_handle, address, size));
|
||||
}
|
||||
|
||||
Result CreateSharedMemory64(ams::svc::Handle *out_handle, ams::svc::Size size, ams::svc::MemoryPermission owner_perm, ams::svc::MemoryPermission remote_perm) {
|
||||
return CreateSharedMemory(out_handle, size, owner_perm, remote_perm);
|
||||
R_RETURN(CreateSharedMemory(out_handle, size, owner_perm, remote_perm));
|
||||
}
|
||||
|
||||
/* ============================= 64From32 ABI ============================= */
|
||||
|
||||
Result MapSharedMemory64From32(ams::svc::Handle shmem_handle, ams::svc::Address address, ams::svc::Size size, ams::svc::MemoryPermission map_perm) {
|
||||
return MapSharedMemory(shmem_handle, address, size, map_perm);
|
||||
R_RETURN(MapSharedMemory(shmem_handle, address, size, map_perm));
|
||||
}
|
||||
|
||||
Result UnmapSharedMemory64From32(ams::svc::Handle shmem_handle, ams::svc::Address address, ams::svc::Size size) {
|
||||
return UnmapSharedMemory(shmem_handle, address, size);
|
||||
R_RETURN(UnmapSharedMemory(shmem_handle, address, size));
|
||||
}
|
||||
|
||||
Result CreateSharedMemory64From32(ams::svc::Handle *out_handle, ams::svc::Size size, ams::svc::MemoryPermission owner_perm, ams::svc::MemoryPermission remote_perm) {
|
||||
return CreateSharedMemory(out_handle, size, owner_perm, remote_perm);
|
||||
R_RETURN(CreateSharedMemory(out_handle, size, owner_perm, remote_perm));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@ namespace ams::kern::svc {
|
|||
Result CloseHandle(ams::svc::Handle handle) {
|
||||
/* Remove the handle. */
|
||||
R_UNLESS(GetCurrentProcess().GetHandleTable().Remove(handle), svc::ResultInvalidHandle());
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result ResetSignal(ams::svc::Handle handle) {
|
||||
|
@ -36,9 +36,9 @@ namespace ams::kern::svc {
|
|||
KScopedAutoObject readable_event = handle_table.GetObject<KReadableEvent>(handle);
|
||||
if (readable_event.IsNotNull()) {
|
||||
if (auto * const interrupt_event = readable_event->DynamicCast<KInterruptEvent *>(); interrupt_event != nullptr) {
|
||||
return interrupt_event->Reset();
|
||||
R_RETURN(interrupt_event->Reset());
|
||||
} else {
|
||||
return readable_event->Reset();
|
||||
R_RETURN(readable_event->Reset());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -47,11 +47,11 @@ namespace ams::kern::svc {
|
|||
{
|
||||
KScopedAutoObject process = handle_table.GetObject<KProcess>(handle);
|
||||
if (process.IsNotNull()) {
|
||||
return process->Reset();
|
||||
R_RETURN(process->Reset());
|
||||
}
|
||||
}
|
||||
|
||||
return svc::ResultInvalidHandle();
|
||||
R_THROW(svc::ResultInvalidHandle());
|
||||
}
|
||||
|
||||
Result WaitSynchronizationImpl(int32_t *out_index, KSynchronizationObject **objs, int32_t num_handles, int64_t timeout_ns) {
|
||||
|
@ -67,7 +67,7 @@ namespace ams::kern::svc {
|
|||
timeout = timeout_ns;
|
||||
}
|
||||
|
||||
return KSynchronizationObject::Wait(out_index, objs, num_handles, timeout);
|
||||
R_RETURN(KSynchronizationObject::Wait(out_index, objs, num_handles, timeout));
|
||||
}
|
||||
|
||||
Result WaitSynchronization(int32_t *out_index, KUserPointer<const ams::svc::Handle *> user_handles, int32_t num_handles, int64_t timeout_ns) {
|
||||
|
@ -103,7 +103,7 @@ namespace ams::kern::svc {
|
|||
R_CONVERT(svc::ResultSessionClosed, ResultSuccess())
|
||||
} R_END_TRY_CATCH;
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result CancelSynchronization(ams::svc::Handle handle) {
|
||||
|
@ -113,7 +113,7 @@ namespace ams::kern::svc {
|
|||
|
||||
/* Cancel the thread's wait. */
|
||||
thread->WaitCancel();
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void SynchronizePreemptionState() {
|
||||
|
@ -136,19 +136,19 @@ namespace ams::kern::svc {
|
|||
/* ============================= 64 ABI ============================= */
|
||||
|
||||
Result CloseHandle64(ams::svc::Handle handle) {
|
||||
return CloseHandle(handle);
|
||||
R_RETURN(CloseHandle(handle));
|
||||
}
|
||||
|
||||
Result ResetSignal64(ams::svc::Handle handle) {
|
||||
return ResetSignal(handle);
|
||||
R_RETURN(ResetSignal(handle));
|
||||
}
|
||||
|
||||
Result WaitSynchronization64(int32_t *out_index, KUserPointer<const ams::svc::Handle *> handles, int32_t num_handles, int64_t timeout_ns) {
|
||||
return WaitSynchronization(out_index, handles, num_handles, timeout_ns);
|
||||
R_RETURN(WaitSynchronization(out_index, handles, num_handles, timeout_ns));
|
||||
}
|
||||
|
||||
Result CancelSynchronization64(ams::svc::Handle handle) {
|
||||
return CancelSynchronization(handle);
|
||||
R_RETURN(CancelSynchronization(handle));
|
||||
}
|
||||
|
||||
void SynchronizePreemptionState64() {
|
||||
|
@ -158,19 +158,19 @@ namespace ams::kern::svc {
|
|||
/* ============================= 64From32 ABI ============================= */
|
||||
|
||||
Result CloseHandle64From32(ams::svc::Handle handle) {
|
||||
return CloseHandle(handle);
|
||||
R_RETURN(CloseHandle(handle));
|
||||
}
|
||||
|
||||
Result ResetSignal64From32(ams::svc::Handle handle) {
|
||||
return ResetSignal(handle);
|
||||
R_RETURN(ResetSignal(handle));
|
||||
}
|
||||
|
||||
Result WaitSynchronization64From32(int32_t *out_index, KUserPointer<const ams::svc::Handle *> handles, int32_t num_handles, int64_t timeout_ns) {
|
||||
return WaitSynchronization(out_index, handles, num_handles, timeout_ns);
|
||||
R_RETURN(WaitSynchronization(out_index, handles, num_handles, timeout_ns));
|
||||
}
|
||||
|
||||
Result CancelSynchronization64From32(ams::svc::Handle handle) {
|
||||
return CancelSynchronization(handle);
|
||||
R_RETURN(CancelSynchronization(handle));
|
||||
}
|
||||
|
||||
void SynchronizePreemptionState64From32() {
|
||||
|
|
|
@ -66,7 +66,7 @@ namespace ams::kern::svc {
|
|||
/* Add the thread to the handle table. */
|
||||
R_TRY(process.GetHandleTable().Add(out, thread));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result StartThread(ams::svc::Handle thread_handle) {
|
||||
|
@ -75,7 +75,7 @@ namespace ams::kern::svc {
|
|||
R_UNLESS(thread.IsNotNull(), svc::ResultInvalidHandle());
|
||||
|
||||
/* Try to start the thread. */
|
||||
return thread->Run();
|
||||
R_RETURN(thread->Run());
|
||||
}
|
||||
|
||||
void ExitThread() {
|
||||
|
@ -121,7 +121,7 @@ namespace ams::kern::svc {
|
|||
|
||||
/* Get the thread's priority. */
|
||||
*out_priority = thread->GetPriority();
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result SetThreadPriority(ams::svc::Handle thread_handle, int32_t priority) {
|
||||
|
@ -141,7 +141,7 @@ namespace ams::kern::svc {
|
|||
|
||||
/* Set the thread priority. */
|
||||
thread->SetBasePriority(priority);
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result GetThreadCoreMask(int32_t *out_core_id, uint64_t *out_affinity_mask, ams::svc::Handle thread_handle) {
|
||||
|
@ -152,7 +152,7 @@ namespace ams::kern::svc {
|
|||
/* Get the core mask. */
|
||||
R_TRY(thread->GetCoreMask(out_core_id, out_affinity_mask));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result SetThreadCoreMask(ams::svc::Handle thread_handle, int32_t core_id, uint64_t affinity_mask) {
|
||||
|
@ -184,7 +184,7 @@ namespace ams::kern::svc {
|
|||
/* Set the core mask. */
|
||||
R_TRY(thread->SetCoreMask(core_id, affinity_mask));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result GetThreadId(uint64_t *out_thread_id, ams::svc::Handle thread_handle) {
|
||||
|
@ -194,7 +194,7 @@ namespace ams::kern::svc {
|
|||
|
||||
/* Get the thread's id. */
|
||||
*out_thread_id = thread->GetId();
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result GetThreadContext3(KUserPointer<ams::svc::ThreadContext *> out_context, ams::svc::Handle thread_handle) {
|
||||
|
@ -213,7 +213,7 @@ namespace ams::kern::svc {
|
|||
/* Copy the thread context to user space. */
|
||||
R_TRY(out_context.CopyFrom(std::addressof(context)));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result GetThreadList(int32_t *out_num_threads, KUserPointer<uint64_t *> out_thread_ids, int32_t max_out_count, ams::svc::Handle debug_handle) {
|
||||
|
@ -252,7 +252,7 @@ namespace ams::kern::svc {
|
|||
}
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -260,11 +260,11 @@ namespace ams::kern::svc {
|
|||
/* ============================= 64 ABI ============================= */
|
||||
|
||||
Result CreateThread64(ams::svc::Handle *out_handle, ams::svc::ThreadFunc func, ams::svc::Address arg, ams::svc::Address stack_bottom, int32_t priority, int32_t core_id) {
|
||||
return CreateThread(out_handle, func, arg, stack_bottom, priority, core_id);
|
||||
R_RETURN(CreateThread(out_handle, func, arg, stack_bottom, priority, core_id));
|
||||
}
|
||||
|
||||
Result StartThread64(ams::svc::Handle thread_handle) {
|
||||
return StartThread(thread_handle);
|
||||
R_RETURN(StartThread(thread_handle));
|
||||
}
|
||||
|
||||
void ExitThread64() {
|
||||
|
@ -276,41 +276,41 @@ namespace ams::kern::svc {
|
|||
}
|
||||
|
||||
Result GetThreadPriority64(int32_t *out_priority, ams::svc::Handle thread_handle) {
|
||||
return GetThreadPriority(out_priority, thread_handle);
|
||||
R_RETURN(GetThreadPriority(out_priority, thread_handle));
|
||||
}
|
||||
|
||||
Result SetThreadPriority64(ams::svc::Handle thread_handle, int32_t priority) {
|
||||
return SetThreadPriority(thread_handle, priority);
|
||||
R_RETURN(SetThreadPriority(thread_handle, priority));
|
||||
}
|
||||
|
||||
Result GetThreadCoreMask64(int32_t *out_core_id, uint64_t *out_affinity_mask, ams::svc::Handle thread_handle) {
|
||||
return GetThreadCoreMask(out_core_id, out_affinity_mask, thread_handle);
|
||||
R_RETURN(GetThreadCoreMask(out_core_id, out_affinity_mask, thread_handle));
|
||||
}
|
||||
|
||||
Result SetThreadCoreMask64(ams::svc::Handle thread_handle, int32_t core_id, uint64_t affinity_mask) {
|
||||
return SetThreadCoreMask(thread_handle, core_id, affinity_mask);
|
||||
R_RETURN(SetThreadCoreMask(thread_handle, core_id, affinity_mask));
|
||||
}
|
||||
|
||||
Result GetThreadId64(uint64_t *out_thread_id, ams::svc::Handle thread_handle) {
|
||||
return GetThreadId(out_thread_id, thread_handle);
|
||||
R_RETURN(GetThreadId(out_thread_id, thread_handle));
|
||||
}
|
||||
|
||||
Result GetThreadContext364(KUserPointer<ams::svc::ThreadContext *> out_context, ams::svc::Handle thread_handle) {
|
||||
return GetThreadContext3(out_context, thread_handle);
|
||||
R_RETURN(GetThreadContext3(out_context, thread_handle));
|
||||
}
|
||||
|
||||
Result GetThreadList64(int32_t *out_num_threads, KUserPointer<uint64_t *> out_thread_ids, int32_t max_out_count, ams::svc::Handle debug_handle) {
|
||||
return GetThreadList(out_num_threads, out_thread_ids, max_out_count, debug_handle);
|
||||
R_RETURN(GetThreadList(out_num_threads, out_thread_ids, max_out_count, debug_handle));
|
||||
}
|
||||
|
||||
/* ============================= 64From32 ABI ============================= */
|
||||
|
||||
Result CreateThread64From32(ams::svc::Handle *out_handle, ams::svc::ThreadFunc func, ams::svc::Address arg, ams::svc::Address stack_bottom, int32_t priority, int32_t core_id) {
|
||||
return CreateThread(out_handle, func, arg, stack_bottom, priority, core_id);
|
||||
R_RETURN(CreateThread(out_handle, func, arg, stack_bottom, priority, core_id));
|
||||
}
|
||||
|
||||
Result StartThread64From32(ams::svc::Handle thread_handle) {
|
||||
return StartThread(thread_handle);
|
||||
R_RETURN(StartThread(thread_handle));
|
||||
}
|
||||
|
||||
void ExitThread64From32() {
|
||||
|
@ -322,31 +322,31 @@ namespace ams::kern::svc {
|
|||
}
|
||||
|
||||
Result GetThreadPriority64From32(int32_t *out_priority, ams::svc::Handle thread_handle) {
|
||||
return GetThreadPriority(out_priority, thread_handle);
|
||||
R_RETURN(GetThreadPriority(out_priority, thread_handle));
|
||||
}
|
||||
|
||||
Result SetThreadPriority64From32(ams::svc::Handle thread_handle, int32_t priority) {
|
||||
return SetThreadPriority(thread_handle, priority);
|
||||
R_RETURN(SetThreadPriority(thread_handle, priority));
|
||||
}
|
||||
|
||||
Result GetThreadCoreMask64From32(int32_t *out_core_id, uint64_t *out_affinity_mask, ams::svc::Handle thread_handle) {
|
||||
return GetThreadCoreMask(out_core_id, out_affinity_mask, thread_handle);
|
||||
R_RETURN(GetThreadCoreMask(out_core_id, out_affinity_mask, thread_handle));
|
||||
}
|
||||
|
||||
Result SetThreadCoreMask64From32(ams::svc::Handle thread_handle, int32_t core_id, uint64_t affinity_mask) {
|
||||
return SetThreadCoreMask(thread_handle, core_id, affinity_mask);
|
||||
R_RETURN(SetThreadCoreMask(thread_handle, core_id, affinity_mask));
|
||||
}
|
||||
|
||||
Result GetThreadId64From32(uint64_t *out_thread_id, ams::svc::Handle thread_handle) {
|
||||
return GetThreadId(out_thread_id, thread_handle);
|
||||
R_RETURN(GetThreadId(out_thread_id, thread_handle));
|
||||
}
|
||||
|
||||
Result GetThreadContext364From32(KUserPointer<ams::svc::ThreadContext *> out_context, ams::svc::Handle thread_handle) {
|
||||
return GetThreadContext3(out_context, thread_handle);
|
||||
R_RETURN(GetThreadContext3(out_context, thread_handle));
|
||||
}
|
||||
|
||||
Result GetThreadList64From32(int32_t *out_num_threads, KUserPointer<uint64_t *> out_thread_ids, int32_t max_out_count, ams::svc::Handle debug_handle) {
|
||||
return GetThreadList(out_num_threads, out_thread_ids, max_out_count, debug_handle);
|
||||
R_RETURN(GetThreadList(out_num_threads, out_thread_ids, max_out_count, debug_handle));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ namespace ams::kern::svc {
|
|||
*out_flags |= ams::svc::LastThreadInfoFlag_ThreadInSystemCall;
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result SynchronizeCurrentProcessToFutureTime(int64_t ns) {
|
||||
|
@ -68,7 +68,7 @@ namespace ams::kern::svc {
|
|||
/* Synchronize to the desired time. */
|
||||
R_TRY(wait_object->Synchronize(timeout));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result GetDebugFutureThreadInfo(ams::svc::LastThreadContext *out_context, uint64_t *out_thread_id, ams::svc::Handle debug_handle, int64_t ns) {
|
||||
|
@ -85,7 +85,7 @@ namespace ams::kern::svc {
|
|||
/* Get the running thread info. */
|
||||
R_TRY(debug->GetRunningThreadInfo(out_context, out_thread_id));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result LegacyGetFutureThreadInfo(ams::svc::LastThreadContext *out_context, uintptr_t *out_tls_address, uint32_t *out_flags, int64_t ns) {
|
||||
|
@ -98,7 +98,7 @@ namespace ams::kern::svc {
|
|||
/* Get the thread info. */
|
||||
R_TRY(GetLastThreadInfoImpl(out_context, out_tls_address, out_flags));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result GetLastThreadInfo(ams::svc::LastThreadContext *out_context, uintptr_t *out_tls_address, uint32_t *out_flags) {
|
||||
|
@ -108,7 +108,7 @@ namespace ams::kern::svc {
|
|||
/* Get the thread info. */
|
||||
R_TRY(GetLastThreadInfoImpl(out_context, out_tls_address, out_flags));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -116,16 +116,16 @@ namespace ams::kern::svc {
|
|||
/* ============================= 64 ABI ============================= */
|
||||
|
||||
Result GetDebugFutureThreadInfo64(ams::svc::lp64::LastThreadContext *out_context, uint64_t *out_thread_id, ams::svc::Handle debug_handle, int64_t ns) {
|
||||
return GetDebugFutureThreadInfo(out_context, out_thread_id, debug_handle, ns);
|
||||
R_RETURN(GetDebugFutureThreadInfo(out_context, out_thread_id, debug_handle, ns));
|
||||
}
|
||||
|
||||
Result LegacyGetFutureThreadInfo64(ams::svc::lp64::LastThreadContext *out_context, ams::svc::Address *out_tls_address, uint32_t *out_flags, int64_t ns) {
|
||||
return LegacyGetFutureThreadInfo(out_context, reinterpret_cast<uintptr_t *>(out_tls_address), out_flags, ns);
|
||||
R_RETURN(LegacyGetFutureThreadInfo(out_context, reinterpret_cast<uintptr_t *>(out_tls_address), out_flags, ns));
|
||||
}
|
||||
|
||||
Result GetLastThreadInfo64(ams::svc::lp64::LastThreadContext *out_context, ams::svc::Address *out_tls_address, uint32_t *out_flags) {
|
||||
static_assert(sizeof(*out_tls_address) == sizeof(uintptr_t));
|
||||
return GetLastThreadInfo(out_context, reinterpret_cast<uintptr_t *>(out_tls_address), out_flags);
|
||||
R_RETURN(GetLastThreadInfo(out_context, reinterpret_cast<uintptr_t *>(out_tls_address), out_flags));
|
||||
}
|
||||
|
||||
/* ============================= 64From32 ABI ============================= */
|
||||
|
@ -140,7 +140,7 @@ namespace ams::kern::svc {
|
|||
.lr = static_cast<u32>(context.lr),
|
||||
.pc = static_cast<u32>(context.pc),
|
||||
};
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result LegacyGetFutureThreadInfo64From32(ams::svc::ilp32::LastThreadContext *out_context, ams::svc::Address *out_tls_address, uint32_t *out_flags, int64_t ns) {
|
||||
|
@ -155,7 +155,7 @@ namespace ams::kern::svc {
|
|||
.lr = static_cast<u32>(context.lr),
|
||||
.pc = static_cast<u32>(context.pc),
|
||||
};
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result GetLastThreadInfo64From32(ams::svc::ilp32::LastThreadContext *out_context, ams::svc::Address *out_tls_address, uint32_t *out_flags) {
|
||||
|
@ -170,7 +170,7 @@ namespace ams::kern::svc {
|
|||
.lr = static_cast<u32>(context.lr),
|
||||
.pc = static_cast<u32>(context.pc),
|
||||
};
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -53,7 +53,7 @@ namespace ams::kern::svc {
|
|||
R_TRY(trmem->Map(address, size, map_perm));
|
||||
|
||||
/* We succeeded. */
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result UnmapTransferMemory(ams::svc::Handle trmem_handle, uintptr_t address, size_t size) {
|
||||
|
@ -73,7 +73,7 @@ namespace ams::kern::svc {
|
|||
/* Unmap the transfer memory. */
|
||||
R_TRY(trmem->Unmap(address, size));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result CreateTransferMemory(ams::svc::Handle *out, uintptr_t address, size_t size, ams::svc::MemoryPermission map_perm) {
|
||||
|
@ -116,7 +116,7 @@ namespace ams::kern::svc {
|
|||
/* Add the transfer memory to the handle table. */
|
||||
R_TRY(handle_table.Add(out, trmem));
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -124,29 +124,29 @@ namespace ams::kern::svc {
|
|||
/* ============================= 64 ABI ============================= */
|
||||
|
||||
Result MapTransferMemory64(ams::svc::Handle trmem_handle, ams::svc::Address address, ams::svc::Size size, ams::svc::MemoryPermission owner_perm) {
|
||||
return MapTransferMemory(trmem_handle, address, size, owner_perm);
|
||||
R_RETURN(MapTransferMemory(trmem_handle, address, size, owner_perm));
|
||||
}
|
||||
|
||||
Result UnmapTransferMemory64(ams::svc::Handle trmem_handle, ams::svc::Address address, ams::svc::Size size) {
|
||||
return UnmapTransferMemory(trmem_handle, address, size);
|
||||
R_RETURN(UnmapTransferMemory(trmem_handle, address, size));
|
||||
}
|
||||
|
||||
Result CreateTransferMemory64(ams::svc::Handle *out_handle, ams::svc::Address address, ams::svc::Size size, ams::svc::MemoryPermission map_perm) {
|
||||
return CreateTransferMemory(out_handle, address, size, map_perm);
|
||||
R_RETURN(CreateTransferMemory(out_handle, address, size, map_perm));
|
||||
}
|
||||
|
||||
/* ============================= 64From32 ABI ============================= */
|
||||
|
||||
Result MapTransferMemory64From32(ams::svc::Handle trmem_handle, ams::svc::Address address, ams::svc::Size size, ams::svc::MemoryPermission owner_perm) {
|
||||
return MapTransferMemory(trmem_handle, address, size, owner_perm);
|
||||
R_RETURN(MapTransferMemory(trmem_handle, address, size, owner_perm));
|
||||
}
|
||||
|
||||
Result UnmapTransferMemory64From32(ams::svc::Handle trmem_handle, ams::svc::Address address, ams::svc::Size size) {
|
||||
return UnmapTransferMemory(trmem_handle, address, size);
|
||||
R_RETURN(UnmapTransferMemory(trmem_handle, address, size));
|
||||
}
|
||||
|
||||
Result CreateTransferMemory64From32(ams::svc::Handle *out_handle, ams::svc::Address address, ams::svc::Size size, ams::svc::MemoryPermission map_perm) {
|
||||
return CreateTransferMemory(out_handle, address, size, map_perm);
|
||||
R_RETURN(CreateTransferMemory(out_handle, address, size, map_perm));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -97,7 +97,7 @@ namespace ams::fssrv::impl {
|
|||
|
||||
/* Clear the map, and ensure we remain clear if we fail after this point. */
|
||||
this->ClearImpl();
|
||||
auto clear_guard = SCOPE_GUARD { this->ClearImpl(); };
|
||||
ON_RESULT_FAILURE { this->ClearImpl(); };
|
||||
|
||||
/* Add each info to the list. */
|
||||
for (int i = 0; i < count; ++i) {
|
||||
|
@ -115,8 +115,7 @@ namespace ams::fssrv::impl {
|
|||
}
|
||||
|
||||
/* We successfully imported the map. */
|
||||
clear_guard.Cancel();
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
private:
|
||||
void ClearImpl() {
|
||||
|
|
|
@ -143,12 +143,11 @@ namespace ams::fssystem::save {
|
|||
Result StoreOrDestroyBuffer(const MemoryRange &range, CacheEntry *entry) {
|
||||
AMS_ASSERT(entry != nullptr);
|
||||
|
||||
auto buf_guard = SCOPE_GUARD { this->DestroyBuffer(entry, range); };
|
||||
ON_RESULT_FAILURE { this->DestroyBuffer(entry, range); };
|
||||
|
||||
R_TRY(this->StoreAssociateBuffer(range, *entry));
|
||||
|
||||
buf_guard.Cancel();
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result FlushCacheEntry(CacheIndex index, bool invalidate);
|
||||
|
|
|
@ -325,19 +325,18 @@ namespace ams::kvdb {
|
|||
/* Allocate memory for value. */
|
||||
void *new_value = m_memory_resource->Allocate(value_size);
|
||||
R_UNLESS(new_value != nullptr, kvdb::ResultAllocationFailed());
|
||||
auto value_guard = SCOPE_GUARD { m_memory_resource->Deallocate(new_value, value_size); };
|
||||
|
||||
/* If we fail before adding to the index, deallocate our value. */
|
||||
ON_RESULT_FAILURE { m_memory_resource->Deallocate(new_value, value_size); };
|
||||
|
||||
/* Read key and value. */
|
||||
Key key;
|
||||
R_TRY(reader.ReadEntry(std::addressof(key), sizeof(key), new_value, value_size));
|
||||
R_TRY(m_index.AddUnsafe(key, new_value, value_size));
|
||||
|
||||
/* We succeeded, so cancel the value guard to prevent deallocation. */
|
||||
value_guard.Cancel();
|
||||
}
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result Save(bool destructive = false) {
|
||||
|
|
|
@ -98,15 +98,14 @@ namespace ams::sf::hipc {
|
|||
/* Allocate session. */
|
||||
ServerSession *session_memory = this->AllocateSession();
|
||||
R_UNLESS(session_memory != nullptr, sf::hipc::ResultOutOfSessionMemory());
|
||||
ON_RESULT_FAILURE { this->DestroySession(session_memory); };
|
||||
|
||||
/* Register session. */
|
||||
auto register_guard = SCOPE_GUARD { this->DestroySession(session_memory); };
|
||||
R_TRY(ctor(session_memory));
|
||||
|
||||
/* Save new session to output. */
|
||||
register_guard.Cancel();
|
||||
*out = session_memory;
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
void DestroySession(ServerSession *session);
|
||||
|
||||
|
|
|
@ -96,6 +96,8 @@ namespace ams::tipc {
|
|||
};
|
||||
static_assert(std::is_standard_layout<DeferrableBaseImpl>::value);
|
||||
|
||||
#define TIPC_REGISTER_RETRY_ON_RESULT_REQUEST_DEFERRED(KEY) ON_RESULT_INCLUDED(tipc::ResultRequestDeferred) { this->RegisterRetry(KEY); }
|
||||
|
||||
template<size_t _MessageBufferRequiredSize>
|
||||
class DeferrableBaseImplWithBuffer : public DeferrableBaseImpl {
|
||||
private:
|
||||
|
|
|
@ -145,38 +145,44 @@ namespace ams::tipc {
|
|||
/* Get the method id. */
|
||||
const auto method_id = svc::ipc::MessageBuffer::MessageHeader(message_buffer).GetTag();
|
||||
|
||||
/* Ensure that we clean up any handles that get sent our way. */
|
||||
auto handle_guard = SCOPE_GUARD {
|
||||
const svc::ipc::MessageBuffer::MessageHeader message_header(message_buffer);
|
||||
const svc::ipc::MessageBuffer::SpecialHeader special_header(message_buffer, message_header);
|
||||
/* Process for the method id. */
|
||||
{
|
||||
/* Ensure that if we fail, we clean up any handles that get sent our way. */
|
||||
ON_RESULT_FAILURE {
|
||||
const svc::ipc::MessageBuffer::MessageHeader message_header(message_buffer);
|
||||
const svc::ipc::MessageBuffer::SpecialHeader special_header(message_buffer, message_header);
|
||||
|
||||
/* Determine the offset to the start of handles. */
|
||||
auto offset = message_buffer.GetSpecialDataIndex(message_header, special_header);
|
||||
if (special_header.GetHasProcessId()) {
|
||||
offset += sizeof(u64) / sizeof(u32);
|
||||
}
|
||||
/* Determine the offset to the start of handles. */
|
||||
auto offset = message_buffer.GetSpecialDataIndex(message_header, special_header);
|
||||
if (special_header.GetHasProcessId()) {
|
||||
offset += sizeof(u64) / sizeof(u32);
|
||||
}
|
||||
|
||||
/* Close all copy handles. */
|
||||
for (auto i = 0; i < special_header.GetCopyHandleCount(); ++i) {
|
||||
svc::CloseHandle(message_buffer.GetHandle(offset));
|
||||
offset += sizeof(ams::svc::Handle) / sizeof(u32);
|
||||
}
|
||||
};
|
||||
/* Close all copy handles. */
|
||||
for (auto i = 0; i < special_header.GetCopyHandleCount(); ++i) {
|
||||
svc::CloseHandle(message_buffer.GetHandle(offset));
|
||||
offset += sizeof(ams::svc::Handle) / sizeof(u32);
|
||||
}
|
||||
};
|
||||
|
||||
/* Check that the method id is valid. */
|
||||
R_UNLESS(method_id != MethodId_Invalid, tipc::ResultInvalidMethod());
|
||||
/* Check that the method id is valid. */
|
||||
R_UNLESS(method_id != MethodId_Invalid, tipc::ResultInvalidMethod());
|
||||
|
||||
/* If we're closing the object, do so. */
|
||||
if (method_id == MethodId_CloseSession) {
|
||||
/* Validate the command format. */
|
||||
{
|
||||
/* Process the request. */
|
||||
if (method_id != MethodId_CloseSession) {
|
||||
/* Process the generic method for the object. */
|
||||
R_TRY(object.GetObject()->ProcessRequest());
|
||||
} else {
|
||||
/* Validate that the close request is of valid format. */
|
||||
using CloseSessionCommandMeta = impl::CommandMetaInfo<MethodId_CloseSession, std::tuple<>>;
|
||||
using CloseSessionProcessor = impl::CommandProcessor<CloseSessionCommandMeta>;
|
||||
|
||||
/* Validate that the command is valid. */
|
||||
R_TRY(CloseSessionProcessor::ValidateCommandFormat(message_buffer));
|
||||
}
|
||||
}
|
||||
|
||||
/* If we were asked to close the object, do so. */
|
||||
if (method_id == MethodId_CloseSession) {
|
||||
/* Get the object handle. */
|
||||
const auto handle = object.GetHandle();
|
||||
|
||||
|
@ -187,17 +193,12 @@ namespace ams::tipc {
|
|||
/* NOTE: Nintendo does not check that this succeeds. */
|
||||
R_ABORT_UNLESS(svc::CloseHandle(handle));
|
||||
|
||||
/* Return result to signify we closed the object (and don't close input handles). */
|
||||
handle_guard.Cancel();
|
||||
return tipc::ResultSessionClosed();
|
||||
/* Return an error to signify we closed the object. */
|
||||
R_THROW(tipc::ResultSessionClosed());
|
||||
}
|
||||
|
||||
/* Process the generic method for the object. */
|
||||
R_TRY(object.GetObject()->ProcessRequest());
|
||||
|
||||
/* We successfully processed, so we don't need to clean up handles. */
|
||||
handle_guard.Cancel();
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -202,7 +202,7 @@ namespace ams::ncm {
|
|||
/* Obtain a reader and get the storage id. */
|
||||
const auto reader = content_meta.GetReader();
|
||||
const auto storage_id = reader.GetStorageId();
|
||||
R_SUCCEED_IF(storage_id== StorageId::None);
|
||||
R_SUCCEED_IF(storage_id == StorageId::None);
|
||||
|
||||
/* Open the relevant content storage. */
|
||||
ContentStorage content_storage;
|
||||
|
@ -340,10 +340,10 @@ namespace ams::ncm {
|
|||
InstallContentMeta content_meta;
|
||||
R_TRY(m_data->Get(std::addressof(content_meta), i));
|
||||
|
||||
/* Update the data (and check result) when we are done. */
|
||||
const auto DoUpdate = [&]() ALWAYS_INLINE_LAMBDA { return m_data->Update(content_meta, i); };
|
||||
/* Write all prepared content infos. */
|
||||
{
|
||||
auto update_guard = SCOPE_GUARD { DoUpdate(); };
|
||||
/* If we fail while writing, update (but don't check the result). */
|
||||
ON_RESULT_FAILURE { m_data->Update(content_meta, i); };
|
||||
|
||||
/* Create a writer. */
|
||||
const auto writer = content_meta.GetWriter();
|
||||
|
@ -358,11 +358,10 @@ namespace ams::ncm {
|
|||
content_info->install_state = InstallState::Installed;
|
||||
}
|
||||
}
|
||||
|
||||
/* Cancel so we can check the result of updating. */
|
||||
update_guard.Cancel();
|
||||
}
|
||||
R_TRY(DoUpdate());
|
||||
|
||||
/* Update our data. */
|
||||
R_TRY(m_data->Update(content_meta, i));
|
||||
}
|
||||
|
||||
/* Execution has finished, signal this and update the state. */
|
||||
|
@ -370,7 +369,7 @@ namespace ams::ncm {
|
|||
|
||||
this->SetProgressState(InstallProgressState::Downloaded);
|
||||
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result InstallTaskBase::PrepareAndExecute() {
|
||||
|
@ -446,13 +445,10 @@ namespace ams::ncm {
|
|||
continue;
|
||||
}
|
||||
|
||||
/* Helper for performing an update. */
|
||||
const auto DoUpdate = [&]() ALWAYS_INLINE_LAMBDA { return m_data->Update(content_meta, i); };
|
||||
|
||||
/* Commit the current meta. */
|
||||
{
|
||||
/* Ensure that if something goes wrong during commit, we still try to update. */
|
||||
auto update_guard = SCOPE_GUARD { DoUpdate(); };
|
||||
ON_RESULT_FAILURE { m_data->Update(content_meta, i); };
|
||||
|
||||
/* Open a writer. */
|
||||
const auto writer = content_meta.GetWriter();
|
||||
|
@ -488,13 +484,10 @@ namespace ams::ncm {
|
|||
|
||||
/* Mark storage id to be committed later. */
|
||||
commit_list.Push(reader.GetStorageId());
|
||||
|
||||
/* We successfully commited this meta, so we want to check for errors when updating. */
|
||||
update_guard.Cancel();
|
||||
}
|
||||
|
||||
/* Try to update, checking for failure. */
|
||||
R_TRY(DoUpdate());
|
||||
/* Try to update our data. */
|
||||
R_TRY(m_data->Update(content_meta, i));
|
||||
}
|
||||
|
||||
/* Commit all applicable content meta databases. */
|
||||
|
@ -513,10 +506,9 @@ namespace ams::ncm {
|
|||
}
|
||||
|
||||
Result InstallTaskBase::Commit(const StorageContentMetaKey *keys, s32 num_keys) {
|
||||
auto fatal_guard = SCOPE_GUARD { SetProgressState(InstallProgressState::Fatal); };
|
||||
R_TRY(this->SetLastResultOnFailure(this->CommitImpl(keys, num_keys)));
|
||||
fatal_guard.Cancel();
|
||||
return ResultSuccess();
|
||||
ON_RESULT_FAILURE { this->SetProgressState(InstallProgressState::Fatal); };
|
||||
|
||||
R_RETURN(this->SetLastResultOnFailure(this->CommitImpl(keys, num_keys)));
|
||||
}
|
||||
|
||||
Result InstallTaskBase::IncludesExFatDriver(bool *out) {
|
||||
|
@ -642,9 +634,8 @@ namespace ams::ncm {
|
|||
R_TRY(m_data->Get(std::addressof(content_meta), i));
|
||||
|
||||
/* Update the data (and check result) when we are done. */
|
||||
const auto DoUpdate = [&]() ALWAYS_INLINE_LAMBDA { return m_data->Update(content_meta, i); };
|
||||
{
|
||||
auto update_guard = SCOPE_GUARD { DoUpdate(); };
|
||||
ON_RESULT_FAILURE { m_data->Update(content_meta, i); };
|
||||
|
||||
/* Automatically choose a suitable storage id. */
|
||||
auto reader = content_meta.GetReader();
|
||||
|
@ -705,15 +696,14 @@ namespace ams::ncm {
|
|||
/* Add the size of this content info to the total size. */
|
||||
total_size += content_info->GetSize();
|
||||
}
|
||||
|
||||
/* Cancel so that we can check the result of updating. */
|
||||
update_guard.Cancel();
|
||||
}
|
||||
R_TRY(DoUpdate());
|
||||
|
||||
/* Try to update our data. */
|
||||
R_TRY(m_data->Update(content_meta, i));
|
||||
}
|
||||
|
||||
this->SetTotalSize(total_size);
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result InstallTaskBase::WriteContentMetaToPlaceHolder(InstallContentInfo *out_install_content_info, ContentStorage *storage, const InstallContentMetaInfo &meta_info, util::optional<bool> is_temporary) {
|
||||
|
@ -722,7 +712,7 @@ namespace ams::ncm {
|
|||
|
||||
/* Create the placeholder. */
|
||||
R_TRY(storage->CreatePlaceHolder(placeholder_id, meta_info.content_id, meta_info.content_size));
|
||||
auto placeholder_guard = SCOPE_GUARD { storage->DeletePlaceHolder(placeholder_id); };
|
||||
ON_RESULT_FAILURE { storage->DeletePlaceHolder(placeholder_id); }
|
||||
|
||||
/* Output install content info. */
|
||||
*out_install_content_info = this->MakeInstallContentInfoFrom(meta_info, placeholder_id, is_temporary);
|
||||
|
@ -731,9 +721,8 @@ namespace ams::ncm {
|
|||
R_TRY(this->WritePlaceHolder(meta_info.key, out_install_content_info));
|
||||
|
||||
/* Don't delete the placeholder. Set state to installed. */
|
||||
placeholder_guard.Cancel();
|
||||
out_install_content_info->install_state = InstallState::Installed;
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result InstallTaskBase::PrepareContentMeta(const InstallContentMetaInfo &meta_info, util::optional<ContentMetaKey> expected_key, util::optional<u32> source_version) {
|
||||
|
@ -749,10 +738,11 @@ namespace ams::ncm {
|
|||
Path path;
|
||||
content_storage.GetPlaceHolderPath(std::addressof(path), content_info.GetPlaceHolderId());
|
||||
|
||||
const bool is_temporary = content_info.is_temporary;
|
||||
auto temporary_guard = SCOPE_GUARD { content_storage.DeletePlaceHolder(content_info.GetPlaceHolderId()); };
|
||||
/* If we fail, delete the placeholder. */
|
||||
ON_RESULT_FAILURE { content_storage.DeletePlaceHolder(content_info.GetPlaceHolderId()); };
|
||||
|
||||
/* Create a new temporary InstallContentInfo if relevant. */
|
||||
const bool is_temporary = content_info.is_temporary;
|
||||
if (is_temporary) {
|
||||
content_info = {
|
||||
.digest = content_info.digest,
|
||||
|
@ -782,11 +772,12 @@ namespace ams::ncm {
|
|||
/* Push the data. */
|
||||
R_TRY(m_data->Push(meta.Get(), meta.GetSize()));
|
||||
|
||||
/* Don't delete the placeholder if not temporary. */
|
||||
if (!is_temporary) {
|
||||
temporary_guard.Cancel();
|
||||
/* If the placeholder is temporary, delete it. */
|
||||
if (is_temporary) {
|
||||
content_storage.DeletePlaceHolder(content_info.GetPlaceHolderId());
|
||||
}
|
||||
return ResultSuccess();
|
||||
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result InstallTaskBase::PrepareContentMeta(ContentId content_id, s64 size, ContentMetaType meta_type, AutoBuffer *buffer) {
|
||||
|
@ -815,7 +806,7 @@ namespace ams::ncm {
|
|||
|
||||
Result InstallTaskBase::PrepareSystemUpdateDependency() {
|
||||
/* Cleanup on failure. */
|
||||
auto guard = SCOPE_GUARD { this->Cleanup(); };
|
||||
ON_RESULT_FAILURE { this->Cleanup(); };
|
||||
|
||||
/* Count the number of content meta entries. */
|
||||
s32 count;
|
||||
|
@ -852,8 +843,7 @@ namespace ams::ncm {
|
|||
}
|
||||
}
|
||||
|
||||
guard.Cancel();
|
||||
return ResultSuccess();
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result InstallTaskBase::GetSystemUpdateTaskApplyInfo(SystemUpdateTaskApplyInfo *out) {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue