diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp index 59ea13690..16a450a63 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp @@ -77,11 +77,11 @@ namespace ams::kern::init { } protected: constexpr ALWAYS_INLINE u64 GetBits(size_t offset, size_t count) const { - return (this->attributes >> offset) & ((1 << count) - 1); + return (this->attributes >> offset) & ((1ul << count) - 1); } constexpr ALWAYS_INLINE u64 SelectBits(size_t offset, size_t count) const { - return this->attributes & (((1 << count) - 1) << offset); + return this->attributes & (((1ul << count) - 1) << offset); } public: constexpr ALWAYS_INLINE bool IsUserExecuteNever() const { return this->GetBits(54, 1) != 0; } @@ -128,9 +128,9 @@ namespace ams::kern::init { return this->SelectBits(12, 36); } - constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs) const { + constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, bool contig) const { /* Check whether this has the same permission/etc as the desired attributes. */ - return (this->GetBlock() | rhs.GetRawAttributes()) == this->GetRawAttributes(); + return L1PageTableEntry(this->GetBlock(), rhs, contig).GetRawAttributes() == this->GetRawAttributes(); } }; @@ -156,16 +156,16 @@ namespace ams::kern::init { return this->SelectBits(12, 36); } - constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs) const { + constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, bool contig) const { /* Check whether this has the same permission/etc as the desired attributes. */ - return (this->GetBlock() | rhs.GetRawAttributes()) == this->GetRawAttributes(); + return L2PageTableEntry(this->GetBlock(), rhs, contig).GetRawAttributes() == this->GetRawAttributes(); } }; class L3PageTableEntry : public PageTableEntry { public: constexpr ALWAYS_INLINE L3PageTableEntry(KPhysicalAddress phys_addr, const PageTableEntry &attr, bool contig) - : PageTableEntry(attr, (static_cast(contig) << 52) | GetInteger(phys_addr) | 0x1) + : PageTableEntry(attr, (static_cast(contig) << 52) | GetInteger(phys_addr) | 0x3) { /* ... */ } @@ -176,9 +176,9 @@ namespace ams::kern::init { return this->SelectBits(12, 36); } - constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs) const { + constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, bool contig) const { /* Check whether this has the same permission/etc as the desired attributes. */ - return (this->GetBlock() | rhs.GetRawAttributes()) == this->GetRawAttributes(); + return L3PageTableEntry(this->GetBlock(), rhs, contig).GetRawAttributes() == this->GetRawAttributes(); } }; @@ -247,7 +247,7 @@ namespace ams::kern::init { if (!l1_entry->IsTable()) { KPhysicalAddress new_table = allocator.Allocate(); ClearNewPageTable(new_table); - *l1_entry = L1PageTableEntry(phys_addr, attr.IsPrivilegedExecuteNever()); + *l1_entry = L1PageTableEntry(new_table, attr.IsPrivilegedExecuteNever()); cpu::DataSynchronizationBarrierInnerShareable(); } @@ -281,7 +281,7 @@ namespace ams::kern::init { if (!l2_entry->IsTable()) { KPhysicalAddress new_table = allocator.Allocate(); ClearNewPageTable(new_table); - *l2_entry = L2PageTableEntry(phys_addr, attr.IsPrivilegedExecuteNever()); + *l2_entry = L2PageTableEntry(new_table, attr.IsPrivilegedExecuteNever()); cpu::DataSynchronizationBarrierInnerShareable(); } @@ -373,7 +373,7 @@ namespace ams::kern::init { const KPhysicalAddress block = l1_entry->GetBlock(); MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L1BlockSize)); MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, L1BlockSize)); - MESOSPHERE_ABORT_UNLESS(l1_entry->IsCompatibleWithAttribute(attr_before)); + MESOSPHERE_ABORT_UNLESS(l1_entry->IsCompatibleWithAttribute(attr_before, false)); /* Invalidate the existing L1 block. */ *static_cast(l1_entry) = InvalidPageTableEntry; @@ -404,7 +404,7 @@ namespace ams::kern::init { /* Invalidate the existing contiguous L2 block. */ for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) { /* Ensure that the entry is valid. */ - MESOSPHERE_ABORT_UNLESS(l2_entry[i].IsCompatibleWithAttribute(attr_before)); + MESOSPHERE_ABORT_UNLESS(l2_entry[i].IsCompatibleWithAttribute(attr_before, true)); static_cast(l2_entry)[i] = InvalidPageTableEntry; } cpu::DataSynchronizationBarrierInnerShareable(); @@ -422,7 +422,7 @@ namespace ams::kern::init { MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L2BlockSize)); MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(block), L2BlockSize)); MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, L2BlockSize)); - MESOSPHERE_ABORT_UNLESS(l2_entry->IsCompatibleWithAttribute(attr_before)); + MESOSPHERE_ABORT_UNLESS(l2_entry->IsCompatibleWithAttribute(attr_before, false)); /* Invalidate the existing L2 block. */ *static_cast(l2_entry) = InvalidPageTableEntry; @@ -456,7 +456,7 @@ namespace ams::kern::init { /* Invalidate the existing contiguous L3 block. */ for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) { /* Ensure that the entry is valid. */ - MESOSPHERE_ABORT_UNLESS(l3_entry[i].IsCompatibleWithAttribute(attr_before)); + MESOSPHERE_ABORT_UNLESS(l3_entry[i].IsCompatibleWithAttribute(attr_before, true)); static_cast(l3_entry)[i] = InvalidPageTableEntry; } cpu::DataSynchronizationBarrierInnerShareable(); @@ -474,7 +474,7 @@ namespace ams::kern::init { MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L3BlockSize)); MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(block), L3BlockSize)); MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, L3BlockSize)); - MESOSPHERE_ABORT_UNLESS(l3_entry->IsCompatibleWithAttribute(attr_before)); + MESOSPHERE_ABORT_UNLESS(l3_entry->IsCompatibleWithAttribute(attr_before, false)); /* Invalidate the existing L3 block. */ *static_cast(l3_entry) = InvalidPageTableEntry; diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu_system_registers.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu_system_registers.hpp index 3e3bfe09a..c21345b56 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu_system_registers.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu_system_registers.hpp @@ -55,7 +55,7 @@ namespace ams::kern::arm64::cpu { ALWAYS_INLINE GenericRegisterAccessor(u64 v) : value(v) { /* ... */ } protected: constexpr ALWAYS_INLINE u64 GetBits(size_t offset, size_t count) const { - return (this->value >> offset) & ((1 << count) - 1); + return (this->value >> offset) & ((1ul << count) - 1); } }; diff --git a/libraries/libmesosphere/source/board/nintendo/switch/kern_k_system_control.cpp b/libraries/libmesosphere/source/board/nintendo/switch/kern_k_system_control.cpp index 458be858a..ba9898ad6 100644 --- a/libraries/libmesosphere/source/board/nintendo/switch/kern_k_system_control.cpp +++ b/libraries/libmesosphere/source/board/nintendo/switch/kern_k_system_control.cpp @@ -86,13 +86,11 @@ namespace ams::kern { } u64 KSystemControl::GenerateRandomRange(u64 min, u64 max) { - /* This is a biased random, but this is okay for now. */ - /* TODO: unbiased random? */ const u64 range_size = ((max + 1) - min); const u64 effective_max = (std::numeric_limits::max() / range_size) * range_size; while (true) { if (const u64 rnd = GenerateRandomU64(); rnd < effective_max) { - return rnd % effective_max; + return min + (rnd % range_size); } } } diff --git a/libraries/libmesosphere/source/board/nintendo/switch/kern_secure_monitor.cpp b/libraries/libmesosphere/source/board/nintendo/switch/kern_secure_monitor.cpp index 4047e7be7..8669ff32f 100644 --- a/libraries/libmesosphere/source/board/nintendo/switch/kern_secure_monitor.cpp +++ b/libraries/libmesosphere/source/board/nintendo/switch/kern_secure_monitor.cpp @@ -83,7 +83,7 @@ namespace ams::kern::smc { void GenerateRandomBytes(void *dst, size_t size) { /* Call SmcGenerateRandomBytes() */ /* TODO: Lock this to ensure only one core calls at once. */ - SecureMonitorArguments args = { FunctionId_GetConfig, size }; + SecureMonitorArguments args = { FunctionId_GenerateRandomBytes, size }; MESOSPHERE_ABORT_UNLESS(size <= sizeof(args) - sizeof(args.x[0])); CallPrivilegedSecureMonitorFunction(args); MESOSPHERE_ABORT_UNLESS((static_cast(args.x[0]) == SmcResult::Success)); diff --git a/libraries/libvapours/include/vapours/util/util_fourcc.hpp b/libraries/libvapours/include/vapours/util/util_fourcc.hpp index 7191d8a39..9b0c93060 100644 --- a/libraries/libvapours/include/vapours/util/util_fourcc.hpp +++ b/libraries/libvapours/include/vapours/util/util_fourcc.hpp @@ -21,20 +21,6 @@ namespace ams::util { template struct FourCC { - /* TODO: C++20 std::endian */ - static constexpr u32 Code = (static_cast(A) << 0x18) | - (static_cast(B) << 0x10) | - (static_cast(C) << 0x08) | - (static_cast(D) << 0x00); - - static constexpr const char String[] = {D, C, B, A}; - - static_assert(sizeof(Code) == 4); - static_assert(sizeof(String) == 4); - }; - - template - struct ReverseFourCC { /* TODO: C++20 std::endian */ static constexpr u32 Code = (static_cast(A) << 0x00) | (static_cast(B) << 0x08) | @@ -47,4 +33,18 @@ namespace ams::util { static_assert(sizeof(String) == 4); }; + template + struct ReverseFourCC { + /* TODO: C++20 std::endian */ + static constexpr u32 Code = (static_cast(A) << 0x18) | + (static_cast(B) << 0x10) | + (static_cast(C) << 0x08) | + (static_cast(D) << 0x00); + + static constexpr const char String[] = {D, C, B, A}; + + static_assert(sizeof(Code) == 4); + static_assert(sizeof(String) == 4); + }; + } \ No newline at end of file diff --git a/mesosphere/kernel_ldr/source/kern_init_loader.cpp b/mesosphere/kernel_ldr/source/kern_init_loader.cpp index 6e1c91160..f42e80f3b 100644 --- a/mesosphere/kernel_ldr/source/kern_init_loader.cpp +++ b/mesosphere/kernel_ldr/source/kern_init_loader.cpp @@ -44,8 +44,10 @@ namespace ams::kern::init::loader { this->next_address = address; } - ALWAYS_INLINE void Finalize() { + ALWAYS_INLINE uintptr_t Finalize() { + const uintptr_t final_address = this->next_address; this->next_address = Null; + return final_address; } public: virtual KPhysicalAddress Allocate() override { @@ -255,10 +257,10 @@ namespace ams::kern::init::loader { /* We don't have ams::os, this may go in hw:: or something. */ const uintptr_t rx_offset = layout->rx_offset; const uintptr_t rx_end_offset = layout->rx_end_offset; - const uintptr_t ro_offset = layout->rx_offset; + const uintptr_t ro_offset = layout->ro_offset; const uintptr_t ro_end_offset = layout->ro_end_offset; - const uintptr_t rw_offset = layout->rx_offset; - const uintptr_t rw_end_offset = layout->rw_end_offset; + const uintptr_t rw_offset = layout->rw_offset; + /* UNUSED: const uintptr_t rw_end_offset = layout->rw_end_offset; */ const uintptr_t bss_end_offset = layout->bss_end_offset; MESOSPHERE_ABORT_UNLESS(util::IsAligned(rx_offset, 0x1000)); MESOSPHERE_ABORT_UNLESS(util::IsAligned(rx_end_offset, 0x1000)); @@ -315,7 +317,7 @@ namespace ams::kern::init::loader { ttbr1_table.Map(virtual_base_address + rw_offset, bss_end_offset - rw_offset, base_address + rw_offset, KernelRwDataAttribute, g_initial_page_allocator); /* Clear kernel .bss. */ - std::memset(GetVoidPointer(virtual_base_address + bss_offset), 0, bss_end_offset - rw_end_offset); + std::memset(GetVoidPointer(virtual_base_address + bss_offset), 0, bss_end_offset - bss_offset); /* Apply relocations to the kernel. */ const Elf::Elf64::Dyn *kernel_dynamic = reinterpret_cast(GetInteger(virtual_base_address) + dynamic_offset); @@ -331,8 +333,8 @@ namespace ams::kern::init::loader { return GetInteger(virtual_base_address) - base_address; } - void Finalize() { - g_initial_page_allocator.Finalize(); + uintptr_t Finalize() { + return g_initial_page_allocator.Finalize(); } } \ No newline at end of file diff --git a/mesosphere/kernel_ldr/source/start.s b/mesosphere/kernel_ldr/source/start.s index 65cb4bb23..f8aed3d78 100644 --- a/mesosphere/kernel_ldr/source/start.s +++ b/mesosphere/kernel_ldr/source/start.s @@ -78,6 +78,9 @@ _start: /* Call ams::kern::init::loader::Finalize() */ bl _ZN3ams4kern4init6loader8FinalizeEv + /* X0 is now the next address for the page allocator. */ + /* We will return this to the kernel. */ + /* Return to the newly-relocated kernel. */ ldr x1, [sp, #0x18] /* Return address to Kernel */ ldr x2, [sp, #0x00] /* Relocated kernel base address diff. */