From 34fb48b4129c630ab2242bdf6dd60b4c5df0c1a5 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Sat, 22 Feb 2020 05:42:46 -0800 Subject: [PATCH] kern: mem access prep for svc streams, TODO_IMPLEMENT -> UNIMPLEMENTED --- .../arm64/kern_userspace_memory_access.hpp | 1 + .../include/mesosphere/kern_panic.hpp | 2 +- .../source/arch/arm64/kern_k_page_table.cpp | 2 +- .../arm64/kern_k_supervisor_page_table.cpp | 2 +- .../arm64/kern_userspace_memory_access_asm.s | 380 +++++++++++++++++- .../nintendo/nx/kern_k_system_control.cpp | 2 +- .../source/kern_k_page_table_base.cpp | 10 +- .../libmesosphere/source/kern_k_process.cpp | 6 +- .../source/kern_k_synchronization.cpp | 2 +- .../libmesosphere/source/kern_k_thread.cpp | 12 +- .../source/kern_k_wait_object.cpp | 2 +- 11 files changed, 396 insertions(+), 25 deletions(-) diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_userspace_memory_access.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_userspace_memory_access.hpp index 2fe110342..db67b0acb 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_userspace_memory_access.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_userspace_memory_access.hpp @@ -36,6 +36,7 @@ namespace ams::kern::arch::arm64 { static bool ClearMemory(void *dst, size_t size); static bool ClearMemoryAligned32Bit(void *dst, size_t size); + static bool ClearMemoryAligned64Bit(void *dst, size_t size); static bool ClearMemorySize32Bit(void *dst); static bool StoreDataCache(uintptr_t start, uintptr_t end); diff --git a/libraries/libmesosphere/include/mesosphere/kern_panic.hpp b/libraries/libmesosphere/include/mesosphere/kern_panic.hpp index fb6252124..3df06b178 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_panic.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_panic.hpp @@ -66,7 +66,7 @@ namespace ams::kern { #endif #define MESOSPHERE_TODO(arg) ({ constexpr const char *__mesosphere_todo = arg; static_cast(__mesosphere_todo); MESOSPHERE_PANIC("TODO (%s): %s\n", __PRETTY_FUNCTION__, __mesosphere_todo); }) -#define MESOSPHERE_TODO_IMPLEMENT() MESOSPHERE_TODO("Implement") +#define MESOSPHERE_UNIMPLEMENTED() MESOSPHERE_PANIC("%s: Unimplemented\n", __PRETTY_FUNCTION__) #define MESOSPHERE_ABORT() MESOSPHERE_PANIC("Abort()\n"); #define MESOSPHERE_INIT_ABORT() do { /* ... */ } while (true) diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp index bc9e8fa81..db1c1e27b 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp @@ -212,7 +212,7 @@ namespace ams::kern::arch::arm64 { } Result KPageTable::Finalize() { - MESOSPHERE_TODO_IMPLEMENT(); + MESOSPHERE_UNIMPLEMENTED(); } Result KPageTable::Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, OperationType operation, bool reuse_ll) { diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_supervisor_page_table.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_supervisor_page_table.cpp index d8a75c7dd..846766e5d 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_supervisor_page_table.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_supervisor_page_table.cpp @@ -40,6 +40,6 @@ namespace ams::kern::arch::arm64 { } void KSupervisorPageTable::Finalize(s32 core_id) { - MESOSPHERE_TODO_IMPLEMENT(); + MESOSPHERE_UNIMPLEMENTED(); } } diff --git a/libraries/libmesosphere/source/arch/arm64/kern_userspace_memory_access_asm.s b/libraries/libmesosphere/source/arch/arm64/kern_userspace_memory_access_asm.s index 6dd53045f..d210466c9 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_userspace_memory_access_asm.s +++ b/libraries/libmesosphere/source/arch/arm64/kern_userspace_memory_access_asm.s @@ -18,15 +18,231 @@ .section .text._ZN3ams4kern4arch5arm6432UserspaceAccessFunctionAreaBeginEv, "ax", %progbits .global _ZN3ams4kern4arch5arm6432UserspaceAccessFunctionAreaBeginEv .type _ZN3ams4kern4arch5arm6432UserspaceAccessFunctionAreaBeginEv, %function +.balign 0x10 _ZN3ams4kern4arch5arm6432UserspaceAccessFunctionAreaBeginEv: /* NOTE: This is not a real function, and only exists as a label for safety. */ /* ================ All Userspace Access Functions after this line. ================ */ +/* ams::kern::arch::arm64::UserspaceAccess::CopyMemoryFromUser(void *dst, const void *src, size_t size) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess18CopyMemoryFromUserEPvPKvm, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess18CopyMemoryFromUserEPvPKvm +.type _ZN3ams4kern4arch5arm6415UserspaceAccess18CopyMemoryFromUserEPvPKvm, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess18CopyMemoryFromUserEPvPKvm: + /* Check if there's anything to copy. */ + cmp x2, #0 + b.eq 2f + + /* Keep track of the last address. */ + add x3, x1, x2 + +1: /* We're copying memory byte-by-byte. */ + ldtrb w2, [x1] + strb w2, [x0], #1 + add x1, x1, #1 + cmp x1, x3 + b.ne 1b + +2: /* We're done. */ + mov x0, #1 + ret + +/* ams::kern::arch::arm64::UserspaceAccess::CopyMemoryFromUserAligned32Bit(void *dst, const void *src, size_t size) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess30CopyMemoryFromUserAligned32BitEPvPKvm, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess30CopyMemoryFromUserAligned32BitEPvPKvm +.type _ZN3ams4kern4arch5arm6415UserspaceAccess30CopyMemoryFromUserAligned32BitEPvPKvm, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess30CopyMemoryFromUserAligned32BitEPvPKvm: + /* Check if there are 0x40 bytes to copy */ + cmp x2, #0x3F + b.ls 1f + ldtr x4, [x1, #0x00] + ldtr x5, [x1, #0x08] + ldtr x6, [x1, #0x10] + ldtr x7, [x1, #0x18] + ldtr x8, [x1, #0x20] + ldtr x9, [x1, #0x28] + ldtr x10, [x1, #0x30] + ldtr x11, [x1, #0x38] + stp x4, x5, [x0, #0x00] + stp x6, x7, [x0, #0x10] + stp x8, x9, [x0, #0x20] + stp x10, x11, [x0, #0x30] + add x0, x0, #0x40 + add x1, x1, #0x40 + sub x2, x2, #0x40 + b _ZN3ams4kern4arch5arm6415UserspaceAccess30CopyMemoryFromUserAligned32BitEPvPKvm + +1: /* We have less than 0x40 bytes to copy. */ + cmp x2, #0 + b.eq 2f + ldtr w4, [x1] + str w4, [x0], #4 + add x1, x1, #4 + sub x2, x2, #4 + b 1b + +2: /* We're done. */ + mov x0, #1 + ret + +/* ams::kern::arch::arm64::UserspaceAccess::CopyMemoryFromUserAligned64Bit(void *dst, const void *src, size_t size) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess30CopyMemoryFromUserAligned64BitEPvPKvm, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess30CopyMemoryFromUserAligned64BitEPvPKvm +.type _ZN3ams4kern4arch5arm6415UserspaceAccess30CopyMemoryFromUserAligned64BitEPvPKvm, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess30CopyMemoryFromUserAligned64BitEPvPKvm: + /* Check if there are 0x40 bytes to copy */ + cmp x2, #0x3F + b.ls 1f + ldtr x4, [x1, #0x00] + ldtr x5, [x1, #0x08] + ldtr x6, [x1, #0x10] + ldtr x7, [x1, #0x18] + ldtr x8, [x1, #0x20] + ldtr x9, [x1, #0x28] + ldtr x10, [x1, #0x30] + ldtr x11, [x1, #0x38] + stp x4, x5, [x0, #0x00] + stp x6, x7, [x0, #0x10] + stp x8, x9, [x0, #0x20] + stp x10, x11, [x0, #0x30] + add x0, x0, #0x40 + add x1, x1, #0x40 + sub x2, x2, #0x40 + b _ZN3ams4kern4arch5arm6415UserspaceAccess30CopyMemoryFromUserAligned64BitEPvPKvm + +1: /* We have less than 0x40 bytes to copy. */ + cmp x2, #0 + b.eq 2f + ldtr x4, [x1] + str x4, [x0], #8 + add x1, x1, #8 + sub x2, x2, #8 + b 1b + +2: /* We're done. */ + mov x0, #1 + ret + +/* ams::kern::arch::arm64::UserspaceAccess::CopyMemoryFromUserSize32Bit(void *dst, const void *src) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess27CopyMemoryFromUserSize32BitEPvPKv, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess27CopyMemoryFromUserSize32BitEPvPKv +.type _ZN3ams4kern4arch5arm6415UserspaceAccess27CopyMemoryFromUserSize32BitEPvPKv, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess27CopyMemoryFromUserSize32BitEPvPKv: + /* Just load and store a u32. */ + ldtr w2, [x1] + str w2, [x0] + + /* We're done. */ + mov x0, #1 + ret + +/* ams::kern::arch::arm64::UserspaceAccess::CopyStringFromUser(void *dst, const void *src, size_t size) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess18CopyStringFromUserEPvPKvm, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess18CopyStringFromUserEPvPKvm +.type _ZN3ams4kern4arch5arm6415UserspaceAccess18CopyStringFromUserEPvPKvm, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess18CopyStringFromUserEPvPKvm: + /* Check if there's anything to copy. */ + cmp x2, #0 + b.eq 3f + + /* Keep track of the start address and last address. */ + mov x4, x1 + add x3, x1, x2 + +1: /* We're copying memory byte-by-byte. */ + ldtrb w2, [x1] + strb w2, [x0], #1 + add x1, x1, #1 + + /* If we read a null terminator, we're done. */ + cmp w2, #0 + b.eq 2f + + /* Check if we're done. */ + cmp x1, x3 + b.ne 1b + +2: /* We're done, and we copied some amount of data from the string. */ + sub x0, x1, x4 + ret + +3: /* We're done, and there was no string data. */ + mov x0, #0 + ret + +/* ams::kern::arch::arm64::UserspaceAccess::CopyMemoryToUser(void *dst, const void *src, size_t size) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess16CopyMemoryToUserEPvPKvm, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess16CopyMemoryToUserEPvPKvm +.type _ZN3ams4kern4arch5arm6415UserspaceAccess16CopyMemoryToUserEPvPKvm, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess16CopyMemoryToUserEPvPKvm: + /* Check if there's anything to copy. */ + cmp x2, #0 + b.eq 2f + + /* Keep track of the last address. */ + add x3, x1, x2 + +1: /* We're copying memory byte-by-byte. */ + ldrb w2, [x1], #1 + sttrb w2, [x0] + add x0, x0, #1 + cmp x1, x3 + b.ne 1b + +2: /* We're done. */ + mov x0, #1 + ret + +/* ams::kern::arch::arm64::UserspaceAccess::CopyMemoryToUserAligned32Bit(void *dst, const void *src, size_t size) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess28CopyMemoryToUserAligned32BitEPvPKvm, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess28CopyMemoryToUserAligned32BitEPvPKvm +.type _ZN3ams4kern4arch5arm6415UserspaceAccess28CopyMemoryToUserAligned32BitEPvPKvm, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess28CopyMemoryToUserAligned32BitEPvPKvm: + /* Check if there are 0x40 bytes to copy */ + cmp x2, #0x3F + b.ls 1f + ldp x4, x5, [x1, #0x00] + ldp x6, x7, [x1, #0x10] + ldp x8, x9, [x1, #0x20] + ldp x10, x11, [x1, #0x30] + sttr x4, [x0, #0x00] + sttr x5, [x0, #0x08] + sttr x6, [x0, #0x10] + sttr x7, [x0, #0x18] + sttr x8, [x0, #0x20] + sttr x9, [x0, #0x28] + sttr x10, [x0, #0x30] + sttr x11, [x0, #0x38] + add x0, x0, #0x40 + add x1, x1, #0x40 + sub x2, x2, #0x40 + b _ZN3ams4kern4arch5arm6415UserspaceAccess28CopyMemoryToUserAligned32BitEPvPKvm + +1: /* We have less than 0x40 bytes to copy. */ + cmp x2, #0 + b.eq 2f + ldr w4, [x1], #4 + sttr w4, [x0] + add x0, x0, #4 + sub x2, x2, #4 + b 1b + +2: /* We're done. */ + mov x0, #1 + ret + /* ams::kern::arch::arm64::UserspaceAccess::CopyMemoryToUserAligned64Bit(void *dst, const void *src, size_t size) */ .section .text._ZN3ams4kern4arch5arm6415UserspaceAccess28CopyMemoryToUserAligned64BitEPvPKvm, "ax", %progbits .global _ZN3ams4kern4arch5arm6415UserspaceAccess28CopyMemoryToUserAligned64BitEPvPKvm .type _ZN3ams4kern4arch5arm6415UserspaceAccess28CopyMemoryToUserAligned64BitEPvPKvm, %function +.balign 0x10 _ZN3ams4kern4arch5arm6415UserspaceAccess28CopyMemoryToUserAligned64BitEPvPKvm: /* Check if there are 0x40 bytes to copy */ cmp x2, #0x3F @@ -49,22 +265,174 @@ _ZN3ams4kern4arch5arm6415UserspaceAccess28CopyMemoryToUserAligned64BitEPvPKvm: b _ZN3ams4kern4arch5arm6415UserspaceAccess28CopyMemoryToUserAligned64BitEPvPKvm 1: /* We have less than 0x40 bytes to copy. */ - cmp x2, #0x0 + cmp x2, #0 b.eq 2f - ldr x4, [x1], #0x8 + ldr x4, [x1], #8 sttr x4, [x0] - add x0, x0, #0x8 - sub x2, x2, #0x8 + add x0, x0, #8 + sub x2, x2, #8 b 1b 2: /* We're done. */ mov x0, #1 ret +/* ams::kern::arch::arm64::UserspaceAccess::CopyMemoryToUserSize32Bit(void *dst, const void *src) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess25CopyMemoryToUserSize32BitEPvPKv, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess25CopyMemoryToUserSize32BitEPvPKv +.type _ZN3ams4kern4arch5arm6415UserspaceAccess25CopyMemoryToUserSize32BitEPvPKv, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess25CopyMemoryToUserSize32BitEPvPKv: + /* Just load and store a u32. */ + ldr w2, [x1] + sttr w2, [x0] + + /* We're done. */ + mov x0, #1 + ret + +/* ams::kern::arch::arm64::UserspaceAccess::CopyStringToUser(void *dst, const void *src, size_t size) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess16CopyStringToUserEPvPKvm, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess16CopyStringToUserEPvPKvm +.type _ZN3ams4kern4arch5arm6415UserspaceAccess16CopyStringToUserEPvPKvm, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess16CopyStringToUserEPvPKvm: + /* Check if there's anything to copy. */ + cmp x2, #0 + b.eq 3f + + /* Keep track of the start address and last address. */ + mov x4, x1 + add x3, x1, x2 + +1: /* We're copying memory byte-by-byte. */ + ldrb w2, [x1], #1 + sttrb w2, [x0] + add x0, x0, #1 + + /* If we read a null terminator, we're done. */ + cmp w2, #0 + b.eq 2f + + /* Check if we're done. */ + cmp x1, x3 + b.ne 1b + +2: /* We're done, and we copied some amount of data from the string. */ + sub x0, x1, x4 + ret + +3: /* We're done, and there was no string data. */ + mov x0, #0 + ret + +/* ams::kern::arch::arm64::UserspaceAccess::ClearMemory(void *dst, size_t size) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess11ClearMemoryEPvm, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess11ClearMemoryEPvm +.type _ZN3ams4kern4arch5arm6415UserspaceAccess11ClearMemoryEPvm, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess11ClearMemoryEPvm: + /* Check if there's anything to clear. */ + cmp x1, #0 + b.eq 2f + + /* Keep track of the last address. */ + add x2, x0, x1 + +1: /* We're copying memory byte-by-byte. */ + sttrb wzr, [x0] + add x0, x0, #1 + cmp x0, x2 + b.ne 1b + +2: /* We're done. */ + mov x0, #1 + ret + +/* ams::kern::arch::arm64::UserspaceAccess::ClearMemoryAligned32Bit(void *dst, size_t size) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess23ClearMemoryAligned32BitEPvm, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess23ClearMemoryAligned32BitEPvm +.type _ZN3ams4kern4arch5arm6415UserspaceAccess23ClearMemoryAligned32BitEPvm, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess23ClearMemoryAligned32BitEPvm: + /* Check if there are 0x40 bytes to clear. */ + cmp x1, #0x3F + b.ls 2f + sttr xzr, [x0, #0x00] + sttr xzr, [x0, #0x08] + sttr xzr, [x0, #0x10] + sttr xzr, [x0, #0x18] + sttr xzr, [x0, #0x20] + sttr xzr, [x0, #0x28] + sttr xzr, [x0, #0x30] + sttr xzr, [x0, #0x38] + add x0, x0, #0x40 + sub x1, x1, #0x40 + b _ZN3ams4kern4arch5arm6415UserspaceAccess23ClearMemoryAligned32BitEPvm + +1: /* We have less than 0x40 bytes to clear. */ + cmp x1, #0 + b.eq 2f + sttr wzr, [x0] + add x0, x0, #4 + sub x1, x1, #4 + b 1b + +2: /* We're done. */ + mov x0, #1 + ret + +/* ams::kern::arch::arm64::UserspaceAccess::ClearMemoryAligned64Bit(void *dst, size_t size) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess23ClearMemoryAligned64BitEPvm, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess23ClearMemoryAligned64BitEPvm +.type _ZN3ams4kern4arch5arm6415UserspaceAccess23ClearMemoryAligned64BitEPvm, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess23ClearMemoryAligned64BitEPvm: + /* Check if there are 0x40 bytes to clear. */ + cmp x1, #0x3F + b.ls 2f + sttr xzr, [x0, #0x00] + sttr xzr, [x0, #0x08] + sttr xzr, [x0, #0x10] + sttr xzr, [x0, #0x18] + sttr xzr, [x0, #0x20] + sttr xzr, [x0, #0x28] + sttr xzr, [x0, #0x30] + sttr xzr, [x0, #0x38] + add x0, x0, #0x40 + sub x1, x1, #0x40 + b _ZN3ams4kern4arch5arm6415UserspaceAccess23ClearMemoryAligned64BitEPvm + +1: /* We have less than 0x40 bytes to clear. */ + cmp x1, #0 + b.eq 2f + sttr xzr, [x0] + add x0, x0, #8 + sub x1, x1, #8 + b 1b + +2: /* We're done. */ + mov x0, #1 + ret + +/* ams::kern::arch::arm64::UserspaceAccess::ClearMemorySize32Bit(void *dst) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess20ClearMemorySize32BitEPv, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess20ClearMemorySize32BitEPv +.type _ZN3ams4kern4arch5arm6415UserspaceAccess20ClearMemorySize32BitEPv, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess20ClearMemorySize32BitEPv: + /* Just store a zero. */ + sttr wzr, [x0] + + /* We're done. */ + mov x0, #1 + ret + /* ams::kern::arch::arm64::UserspaceAccess::StoreDataCache(uintptr_t start, uintptr_t end) */ .section .text._ZN3ams4kern4arch5arm6415UserspaceAccess14StoreDataCacheEmm, "ax", %progbits .global _ZN3ams4kern4arch5arm6415UserspaceAccess14StoreDataCacheEmm .type _ZN3ams4kern4arch5arm6415UserspaceAccess14StoreDataCacheEmm, %function +.balign 0x10 _ZN3ams4kern4arch5arm6415UserspaceAccess14StoreDataCacheEmm: /* Check if we have any work to do. */ cmp x1, x0 @@ -84,6 +452,7 @@ _ZN3ams4kern4arch5arm6415UserspaceAccess14StoreDataCacheEmm: .section .text._ZN3ams4kern4arch5arm6415UserspaceAccess14FlushDataCacheEmm, "ax", %progbits .global _ZN3ams4kern4arch5arm6415UserspaceAccess14FlushDataCacheEmm .type _ZN3ams4kern4arch5arm6415UserspaceAccess14FlushDataCacheEmm, %function +.balign 0x10 _ZN3ams4kern4arch5arm6415UserspaceAccess14FlushDataCacheEmm: /* Check if we have any work to do. */ cmp x1, x0 @@ -103,6 +472,7 @@ _ZN3ams4kern4arch5arm6415UserspaceAccess14FlushDataCacheEmm: .section .text._ZN3ams4kern4arch5arm6415UserspaceAccess19InvalidateDataCacheEmm, "ax", %progbits .global _ZN3ams4kern4arch5arm6415UserspaceAccess19InvalidateDataCacheEmm .type _ZN3ams4kern4arch5arm6415UserspaceAccess19InvalidateDataCacheEmm, %function +.balign 0x10 _ZN3ams4kern4arch5arm6415UserspaceAccess19InvalidateDataCacheEmm: /* Check if we have any work to do. */ cmp x1, x0 @@ -122,6 +492,7 @@ _ZN3ams4kern4arch5arm6415UserspaceAccess19InvalidateDataCacheEmm: .section .text._ZN3ams4kern4arch5arm6415UserspaceAccess26InvalidateInstructionCacheEmm, "ax", %progbits .global _ZN3ams4kern4arch5arm6415UserspaceAccess26InvalidateInstructionCacheEmm .type _ZN3ams4kern4arch5arm6415UserspaceAccess26InvalidateInstructionCacheEmm, %function +.balign 0x10 _ZN3ams4kern4arch5arm6415UserspaceAccess26InvalidateInstructionCacheEmm: /* Check if we have any work to do. */ cmp x1, x0 @@ -143,5 +514,6 @@ _ZN3ams4kern4arch5arm6415UserspaceAccess26InvalidateInstructionCacheEmm: .section .text._ZN3ams4kern4arch5arm6430UserspaceAccessFunctionAreaEndEv, "ax", %progbits .global _ZN3ams4kern4arch5arm6430UserspaceAccessFunctionAreaEndEv .type _ZN3ams4kern4arch5arm6430UserspaceAccessFunctionAreaEndEv, %function +.balign 0x10 _ZN3ams4kern4arch5arm6430UserspaceAccessFunctionAreaEndEv: /* NOTE: This is not a real function, and only exists as a label for safety. */ \ No newline at end of file diff --git a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp index 4a2aff49d..790022ffc 100644 --- a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp +++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp @@ -291,7 +291,7 @@ namespace ams::kern::board::nintendo::nx { } void KSystemControl::ReadWriteRegister(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value) { - MESOSPHERE_TODO_IMPLEMENT(); + MESOSPHERE_UNIMPLEMENTED(); } /* Randomness. */ diff --git a/libraries/libmesosphere/source/kern_k_page_table_base.cpp b/libraries/libmesosphere/source/kern_k_page_table_base.cpp index 24aeb1c44..4b4eb1d3d 100644 --- a/libraries/libmesosphere/source/kern_k_page_table_base.cpp +++ b/libraries/libmesosphere/source/kern_k_page_table_base.cpp @@ -686,7 +686,7 @@ namespace ams::kern { } Result KPageTableBase::SetMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission svc_perm) { - MESOSPHERE_TODO_IMPLEMENT(); + MESOSPHERE_UNIMPLEMENTED(); } Result KPageTableBase::SetProcessMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission svc_perm) { @@ -750,7 +750,7 @@ namespace ams::kern { } Result KPageTableBase::SetHeapSize(KProcessAddress *out, size_t size) { - MESOSPHERE_TODO_IMPLEMENT(); + MESOSPHERE_UNIMPLEMENTED(); } Result KPageTableBase::SetMaxHeapSize(size_t size) { @@ -867,11 +867,11 @@ namespace ams::kern { } Result KPageTableBase::MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) { - MESOSPHERE_TODO_IMPLEMENT(); + MESOSPHERE_UNIMPLEMENTED(); } Result KPageTableBase::MapRegion(KMemoryRegionType region_type, KMemoryPermission perm) { - MESOSPHERE_TODO_IMPLEMENT(); + MESOSPHERE_UNIMPLEMENTED(); } Result KPageTableBase::MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) { @@ -915,7 +915,7 @@ namespace ams::kern { } Result KPageTableBase::UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) { - MESOSPHERE_TODO_IMPLEMENT(); + MESOSPHERE_UNIMPLEMENTED(); } Result KPageTableBase::MapPageGroup(KProcessAddress *out_addr, const KPageGroup &pg, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) { diff --git a/libraries/libmesosphere/source/kern_k_process.cpp b/libraries/libmesosphere/source/kern_k_process.cpp index 9d45c0d35..892e701a4 100644 --- a/libraries/libmesosphere/source/kern_k_process.cpp +++ b/libraries/libmesosphere/source/kern_k_process.cpp @@ -26,7 +26,7 @@ namespace ams::kern { } void KProcess::Finalize() { - MESOSPHERE_TODO_IMPLEMENT(); + MESOSPHERE_UNIMPLEMENTED(); } Result KProcess::Initialize(const ams::svc::CreateProcessParameter ¶ms) { @@ -153,7 +153,7 @@ namespace ams::kern { } void KProcess::DoWorkerTask() { - MESOSPHERE_TODO_IMPLEMENT(); + MESOSPHERE_UNIMPLEMENTED(); } Result KProcess::CreateThreadLocalRegion(KProcessAddress *out) { @@ -370,7 +370,7 @@ namespace ams::kern { } void KProcess::SetPreemptionState() { - MESOSPHERE_TODO_IMPLEMENT(); + MESOSPHERE_UNIMPLEMENTED(); } } diff --git a/libraries/libmesosphere/source/kern_k_synchronization.cpp b/libraries/libmesosphere/source/kern_k_synchronization.cpp index 6bd768eb6..46817e035 100644 --- a/libraries/libmesosphere/source/kern_k_synchronization.cpp +++ b/libraries/libmesosphere/source/kern_k_synchronization.cpp @@ -20,7 +20,7 @@ namespace ams::kern { Result KSynchronization::Wait(s32 *out_index, KSynchronizationObject **objects, const s32 num_objects, s64 timeout) { MESOSPHERE_ASSERT_THIS(); - MESOSPHERE_TODO_IMPLEMENT(); + MESOSPHERE_UNIMPLEMENTED(); } void KSynchronization::OnAvailable(KSynchronizationObject *object) { diff --git a/libraries/libmesosphere/source/kern_k_thread.cpp b/libraries/libmesosphere/source/kern_k_thread.cpp index 57218ae56..2fb52b283 100644 --- a/libraries/libmesosphere/source/kern_k_thread.cpp +++ b/libraries/libmesosphere/source/kern_k_thread.cpp @@ -28,11 +28,9 @@ namespace ams::kern { const uintptr_t stack_bottom = stack_top - PageSize; KPhysicalAddress stack_paddr = Null; - MESOSPHERE_TODO("MESOSPHERE_ABORT_UNLESS(Kernel::GetSupervisorPageTable().GetPhysicalAddress(&stack_paddr, stack_bottom));"); - (void)stack_bottom; + MESOSPHERE_ABORT_UNLESS(Kernel::GetKernelPageTable().GetPhysicalAddress(&stack_paddr, stack_bottom)); - MESOSPHERE_TODO("MESOSPHERE_R_ABORT_UNLESS(Kernel::GetSupervisorPageTable().Unmap(...);"); - (void)stack_paddr; + MESOSPHERE_R_ABORT_UNLESS(Kernel::GetKernelPageTable().UnmapPages(stack_bottom, 1, KMemoryState_Kernel)); /* Free the stack page. */ KPageBuffer::Free(KPageBuffer::FromPhysicalAddress(stack_paddr)); @@ -253,7 +251,7 @@ namespace ams::kern { } void KThread::Finalize() { - MESOSPHERE_TODO_IMPLEMENT(); + MESOSPHERE_UNIMPLEMENTED(); } bool KThread::IsSignaled() const { @@ -281,7 +279,7 @@ namespace ams::kern { } void KThread::DoWorkerTask() { - MESOSPHERE_TODO_IMPLEMENT(); + MESOSPHERE_UNIMPLEMENTED(); } void KThread::DisableCoreMigration() { @@ -588,7 +586,7 @@ namespace ams::kern { void KThread::Exit() { MESOSPHERE_ASSERT_THIS(); - MESOSPHERE_TODO_IMPLEMENT(); + MESOSPHERE_UNIMPLEMENTED(); MESOSPHERE_PANIC("KThread::Exit() would return"); } diff --git a/libraries/libmesosphere/source/kern_k_wait_object.cpp b/libraries/libmesosphere/source/kern_k_wait_object.cpp index 87d374b96..dc7ef1f13 100644 --- a/libraries/libmesosphere/source/kern_k_wait_object.cpp +++ b/libraries/libmesosphere/source/kern_k_wait_object.cpp @@ -18,7 +18,7 @@ namespace ams::kern { void KWaitObject::OnTimer() { - MESOSPHERE_TODO_IMPLEMENT(); + MESOSPHERE_UNIMPLEMENTED(); } }