mirror of
https://github.com/Atmosphere-NX/Atmosphere
synced 2024-11-14 00:56:35 +00:00
kern: add (and use) generic KSystemControlBase
This commit is contained in:
parent
1f8bf41f0b
commit
273f4a87ae
23 changed files with 704 additions and 988 deletions
|
@ -0,0 +1,95 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) Atmosphère-NX
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms and conditions of the GNU General Public License,
|
||||||
|
* version 2, as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||||
|
* more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
#pragma once
|
||||||
|
#include <mesosphere/kern_common.hpp>
|
||||||
|
#include <mesosphere/kern_select_cpu.hpp>
|
||||||
|
#include <mesosphere/kern_select_interrupt_manager.hpp>
|
||||||
|
|
||||||
|
namespace ams::kern::arch::arm64::smc {
|
||||||
|
|
||||||
|
template<int SmcId, bool DisableInterrupt>
|
||||||
|
void SecureMonitorCall(u64 *buf) {
|
||||||
|
/* Load arguments into registers. */
|
||||||
|
register u64 x0 asm("x0") = buf[0];
|
||||||
|
register u64 x1 asm("x1") = buf[1];
|
||||||
|
register u64 x2 asm("x2") = buf[2];
|
||||||
|
register u64 x3 asm("x3") = buf[3];
|
||||||
|
register u64 x4 asm("x4") = buf[4];
|
||||||
|
register u64 x5 asm("x5") = buf[5];
|
||||||
|
register u64 x6 asm("x6") = buf[6];
|
||||||
|
register u64 x7 asm("x7") = buf[7];
|
||||||
|
|
||||||
|
/* Perform the call. */
|
||||||
|
if constexpr (DisableInterrupt) {
|
||||||
|
KScopedInterruptDisable di;
|
||||||
|
|
||||||
|
/* Backup the current thread pointer. */
|
||||||
|
const uintptr_t current_thread_pointer_value = cpu::GetCurrentThreadPointerValue();
|
||||||
|
|
||||||
|
__asm__ __volatile__("smc %c[smc_id]"
|
||||||
|
: "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5), "+r"(x6), "+r"(x7)
|
||||||
|
: [smc_id]"i"(SmcId)
|
||||||
|
: "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "cc", "memory"
|
||||||
|
);
|
||||||
|
|
||||||
|
/* Restore the current thread pointer into X18. */
|
||||||
|
cpu::SetCurrentThreadPointerValue(current_thread_pointer_value);
|
||||||
|
} else {
|
||||||
|
/* Backup the current thread pointer. */
|
||||||
|
const uintptr_t current_thread_pointer_value = cpu::GetCurrentThreadPointerValue();
|
||||||
|
|
||||||
|
__asm__ __volatile__("smc %c[smc_id]"
|
||||||
|
: "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5), "+r"(x6), "+r"(x7)
|
||||||
|
: [smc_id]"i"(SmcId)
|
||||||
|
: "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "cc", "memory"
|
||||||
|
);
|
||||||
|
|
||||||
|
/* Restore the current thread pointer into X18. */
|
||||||
|
cpu::SetCurrentThreadPointerValue(current_thread_pointer_value);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Store arguments to output. */
|
||||||
|
buf[0] = x0;
|
||||||
|
buf[1] = x1;
|
||||||
|
buf[2] = x2;
|
||||||
|
buf[3] = x3;
|
||||||
|
buf[4] = x4;
|
||||||
|
buf[5] = x5;
|
||||||
|
buf[6] = x6;
|
||||||
|
buf[7] = x7;
|
||||||
|
}
|
||||||
|
|
||||||
|
enum PsciFunction {
|
||||||
|
PsciFunction_CpuSuspend = 0xC4000001,
|
||||||
|
PsciFunction_CpuOff = 0x84000002,
|
||||||
|
PsciFunction_CpuOn = 0xC4000003,
|
||||||
|
};
|
||||||
|
|
||||||
|
template<int SmcId, bool DisableInterrupt>
|
||||||
|
u64 PsciCall(PsciFunction function, u64 x1 = 0, u64 x2 = 0, u64 x3 = 0, u64 x4 = 0, u64 x5 = 0, u64 x6 = 0, u64 x7 = 0) {
|
||||||
|
ams::svc::lp64::SecureMonitorArguments args = { { function, x1, x2, x3, x4, x5, x6, x7 } };
|
||||||
|
|
||||||
|
SecureMonitorCall<SmcId, DisableInterrupt>(args.r);
|
||||||
|
|
||||||
|
return args.r[0];
|
||||||
|
}
|
||||||
|
|
||||||
|
template<int SmcId, bool DisableInterrupt>
|
||||||
|
u64 CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
|
||||||
|
return PsciCall<SmcId, DisableInterrupt>(PsciFunction_CpuOn, core_id, entrypoint, arg);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -15,9 +15,12 @@
|
||||||
*/
|
*/
|
||||||
#pragma once
|
#pragma once
|
||||||
#include <mesosphere/kern_common.hpp>
|
#include <mesosphere/kern_common.hpp>
|
||||||
|
#include <mesosphere/kern_k_typed_address.hpp>
|
||||||
|
|
||||||
namespace ams::kern {
|
namespace ams::kern {
|
||||||
|
|
||||||
|
constexpr inline KPhysicalAddress MainMemoryAddress = 0x80000000;
|
||||||
|
|
||||||
constexpr inline size_t MainMemorySize = 4_GB;
|
constexpr inline size_t MainMemorySize = 4_GB;
|
||||||
constexpr inline size_t MainMemorySizeMax = 8_GB;
|
constexpr inline size_t MainMemorySizeMax = 8_GB;
|
||||||
|
|
||||||
|
|
|
@ -15,23 +15,17 @@
|
||||||
*/
|
*/
|
||||||
#pragma once
|
#pragma once
|
||||||
#include <mesosphere/kern_common.hpp>
|
#include <mesosphere/kern_common.hpp>
|
||||||
|
#include <mesosphere/kern_k_system_control_base.hpp>
|
||||||
namespace ams::kern {
|
|
||||||
|
|
||||||
struct InitialProcessBinaryLayout;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace ams::kern::board::nintendo::nx {
|
namespace ams::kern::board::nintendo::nx {
|
||||||
|
|
||||||
class KSystemControl {
|
class KSystemControl : public KSystemControlBase {
|
||||||
public:
|
public:
|
||||||
class Init {
|
class Init : public KSystemControlBase::Init {
|
||||||
public:
|
public:
|
||||||
/* Initialization. */
|
/* Initialization. */
|
||||||
|
static size_t GetRealMemorySize();
|
||||||
static size_t GetIntendedMemorySize();
|
static size_t GetIntendedMemorySize();
|
||||||
static KPhysicalAddress GetKernelPhysicalBaseAddress(uintptr_t base_address);
|
|
||||||
static void GetInitialProcessBinaryLayout(InitialProcessBinaryLayout *out);
|
|
||||||
static bool ShouldIncreaseThreadResourceLimit();
|
static bool ShouldIncreaseThreadResourceLimit();
|
||||||
static void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg);
|
static void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg);
|
||||||
static size_t GetApplicationPoolSize();
|
static size_t GetApplicationPoolSize();
|
||||||
|
@ -40,7 +34,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
static u8 GetDebugLogUartPort();
|
static u8 GetDebugLogUartPort();
|
||||||
|
|
||||||
/* Randomness. */
|
/* Randomness. */
|
||||||
static void GenerateRandomBytes(void *dst, size_t size);
|
static void GenerateRandom(u64 *dst, size_t count);
|
||||||
static u64 GenerateRandomRange(u64 min, u64 max);
|
static u64 GenerateRandomRange(u64 min, u64 max);
|
||||||
};
|
};
|
||||||
public:
|
public:
|
||||||
|
@ -50,7 +44,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
static NOINLINE u32 GetCreateProcessMemoryPool();
|
static NOINLINE u32 GetCreateProcessMemoryPool();
|
||||||
|
|
||||||
/* Randomness. */
|
/* Randomness. */
|
||||||
static void GenerateRandomBytes(void *dst, size_t size);
|
static void GenerateRandom(u64 *dst, size_t count);
|
||||||
static u64 GenerateRandomRange(u64 min, u64 max);
|
static u64 GenerateRandomRange(u64 min, u64 max);
|
||||||
static u64 GenerateRandomU64();
|
static u64 GenerateRandomU64();
|
||||||
|
|
||||||
|
@ -58,23 +52,12 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
static void ReadWriteRegisterPrivileged(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value);
|
static void ReadWriteRegisterPrivileged(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value);
|
||||||
static Result ReadWriteRegister(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value);
|
static Result ReadWriteRegister(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value);
|
||||||
|
|
||||||
static ALWAYS_INLINE u32 ReadRegisterPrivileged(ams::svc::PhysicalAddress address) {
|
|
||||||
u32 v;
|
|
||||||
ReadWriteRegisterPrivileged(std::addressof(v), address, 0x00000000u, 0);
|
|
||||||
return v;
|
|
||||||
}
|
|
||||||
|
|
||||||
static ALWAYS_INLINE void WriteRegisterPrivileged(ams::svc::PhysicalAddress address, u32 value) {
|
|
||||||
u32 v;
|
|
||||||
ReadWriteRegisterPrivileged(std::addressof(v), address, 0xFFFFFFFFu, value);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Power management. */
|
/* Power management. */
|
||||||
static void SleepSystem();
|
static void SleepSystem();
|
||||||
static NORETURN void StopSystem(void *arg = nullptr);
|
static NORETURN void StopSystem(void *arg = nullptr);
|
||||||
|
|
||||||
/* User access. */
|
/* User access. */
|
||||||
static void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args);
|
static void CallSecureMonitorFromUserImpl(ams::svc::lp64::SecureMonitorArguments *args);
|
||||||
|
|
||||||
/* Secure Memory. */
|
/* Secure Memory. */
|
||||||
static size_t CalculateRequiredSecureMemorySize(size_t size, u32 pool);
|
static size_t CalculateRequiredSecureMemorySize(size_t size, u32 pool);
|
||||||
|
|
|
@ -15,9 +15,12 @@
|
||||||
*/
|
*/
|
||||||
#pragma once
|
#pragma once
|
||||||
#include <mesosphere/kern_common.hpp>
|
#include <mesosphere/kern_common.hpp>
|
||||||
|
#include <mesosphere/kern_k_typed_address.hpp>
|
||||||
|
|
||||||
namespace ams::kern {
|
namespace ams::kern {
|
||||||
|
|
||||||
|
constexpr inline KPhysicalAddress MainMemoryAddress = 0x40000000;
|
||||||
|
|
||||||
constexpr inline size_t MainMemorySize = 4_GB;
|
constexpr inline size_t MainMemorySize = 4_GB;
|
||||||
constexpr inline size_t MainMemorySizeMax = 8_GB;
|
constexpr inline size_t MainMemorySizeMax = 8_GB;
|
||||||
|
|
||||||
|
|
|
@ -15,71 +15,14 @@
|
||||||
*/
|
*/
|
||||||
#pragma once
|
#pragma once
|
||||||
#include <mesosphere/kern_common.hpp>
|
#include <mesosphere/kern_common.hpp>
|
||||||
|
#include <mesosphere/kern_k_system_control_base.hpp>
|
||||||
namespace ams::kern {
|
|
||||||
|
|
||||||
struct InitialProcessBinaryLayout;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace ams::kern::board::qemu::virt {
|
namespace ams::kern::board::qemu::virt {
|
||||||
|
|
||||||
class KSystemControl {
|
class KSystemControl : public KSystemControlBase {
|
||||||
public:
|
public:
|
||||||
class Init {
|
|
||||||
public:
|
|
||||||
/* Initialization. */
|
|
||||||
static size_t GetIntendedMemorySize();
|
|
||||||
static KPhysicalAddress GetKernelPhysicalBaseAddress(uintptr_t base_address);
|
|
||||||
static void GetInitialProcessBinaryLayout(InitialProcessBinaryLayout *out);
|
|
||||||
static bool ShouldIncreaseThreadResourceLimit();
|
|
||||||
static void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg);
|
|
||||||
static size_t GetApplicationPoolSize();
|
|
||||||
static size_t GetAppletPoolSize();
|
|
||||||
static size_t GetMinimumNonSecureSystemPoolSize();
|
|
||||||
static u8 GetDebugLogUartPort();
|
|
||||||
|
|
||||||
/* Randomness. */
|
|
||||||
static void GenerateRandomBytes(void *dst, size_t size);
|
|
||||||
static u64 GenerateRandomRange(u64 min, u64 max);
|
|
||||||
};
|
|
||||||
public:
|
|
||||||
/* Initialization. */
|
|
||||||
static NOINLINE void InitializePhase1();
|
|
||||||
static NOINLINE void InitializePhase2();
|
|
||||||
static NOINLINE u32 GetCreateProcessMemoryPool();
|
|
||||||
|
|
||||||
/* Randomness. */
|
|
||||||
static void GenerateRandomBytes(void *dst, size_t size);
|
|
||||||
static u64 GenerateRandomRange(u64 min, u64 max);
|
|
||||||
static u64 GenerateRandomU64();
|
|
||||||
|
|
||||||
/* Privileged Access. */
|
|
||||||
static void ReadWriteRegisterPrivileged(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value);
|
|
||||||
static Result ReadWriteRegister(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value);
|
|
||||||
|
|
||||||
static ALWAYS_INLINE u32 ReadRegisterPrivileged(ams::svc::PhysicalAddress address) {
|
|
||||||
u32 v;
|
|
||||||
ReadWriteRegisterPrivileged(std::addressof(v), address, 0x00000000u, 0);
|
|
||||||
return v;
|
|
||||||
}
|
|
||||||
|
|
||||||
static ALWAYS_INLINE void WriteRegisterPrivileged(ams::svc::PhysicalAddress address, u32 value) {
|
|
||||||
u32 v;
|
|
||||||
ReadWriteRegisterPrivileged(std::addressof(v), address, 0xFFFFFFFFu, value);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Power management. */
|
|
||||||
static void SleepSystem();
|
|
||||||
static NORETURN void StopSystem(void *arg = nullptr);
|
|
||||||
|
|
||||||
/* User access. */
|
/* User access. */
|
||||||
static void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args);
|
static void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args);
|
||||||
|
|
||||||
/* Secure Memory. */
|
|
||||||
static size_t CalculateRequiredSecureMemorySize(size_t size, u32 pool);
|
|
||||||
static Result AllocateSecureMemory(KVirtualAddress *out, size_t size, u32 pool);
|
|
||||||
static void FreeSecureMemory(KVirtualAddress address, size_t size, u32 pool);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
|
@ -212,13 +212,17 @@ namespace ams::kern {
|
||||||
static NOINLINE auto GetKernelPageTableHeapRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelPtHeap); }
|
static NOINLINE auto GetKernelPageTableHeapRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelPtHeap); }
|
||||||
static NOINLINE auto GetKernelInitPageTableRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelInitPt); }
|
static NOINLINE auto GetKernelInitPageTableRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelInitPt); }
|
||||||
|
|
||||||
static NOINLINE auto GetKernelPoolManagementRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramPoolManagement); }
|
|
||||||
static NOINLINE auto GetKernelPoolPartitionRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramPoolPartition); }
|
static NOINLINE auto GetKernelPoolPartitionRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramPoolPartition); }
|
||||||
|
static NOINLINE auto GetKernelPoolManagementRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramPoolManagement); }
|
||||||
static NOINLINE auto GetKernelSystemPoolRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramSystemPool); }
|
static NOINLINE auto GetKernelSystemPoolRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramSystemPool); }
|
||||||
static NOINLINE auto GetKernelSystemNonSecurePoolRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramSystemNonSecurePool); }
|
static NOINLINE auto GetKernelSystemNonSecurePoolRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramSystemNonSecurePool); }
|
||||||
static NOINLINE auto GetKernelAppletPoolRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramAppletPool); }
|
static NOINLINE auto GetKernelAppletPoolRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramAppletPool); }
|
||||||
static NOINLINE auto GetKernelApplicationPoolRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramApplicationPool); }
|
static NOINLINE auto GetKernelApplicationPoolRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramApplicationPool); }
|
||||||
|
|
||||||
|
static NOINLINE bool HasKernelSystemNonSecurePoolRegion() { return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_DramSystemNonSecurePool) != nullptr; }
|
||||||
|
static NOINLINE bool HasKernelAppletPoolRegion() { return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_DramAppletPool) != nullptr; }
|
||||||
|
static NOINLINE bool HasKernelApplicationPoolRegion() { return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_DramApplicationPool) != nullptr; }
|
||||||
|
|
||||||
static NOINLINE auto GetKernelTraceBufferRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelTraceBuffer); }
|
static NOINLINE auto GetKernelTraceBufferRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelTraceBuffer); }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,110 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) Atmosphère-NX
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms and conditions of the GNU General Public License,
|
||||||
|
* version 2, as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||||
|
* more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
#pragma once
|
||||||
|
#include <mesosphere/kern_common.hpp>
|
||||||
|
#include <mesosphere/kern_k_spin_lock.hpp>
|
||||||
|
|
||||||
|
namespace ams::kern {
|
||||||
|
|
||||||
|
struct InitialProcessBinaryLayout;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace ams::kern {
|
||||||
|
|
||||||
|
class KSystemControlBase {
|
||||||
|
protected:
|
||||||
|
/* Nintendo uses std::mt19937_t for randomness. */
|
||||||
|
/* To save space (and because mt19337_t isn't secure anyway), */
|
||||||
|
/* We will use TinyMT. */
|
||||||
|
static constinit inline bool s_initialized_random_generator;
|
||||||
|
static constinit inline util::TinyMT s_random_generator{util::ConstantInitialize};
|
||||||
|
static constinit inline KSpinLock s_random_lock;
|
||||||
|
public:
|
||||||
|
class Init {
|
||||||
|
public:
|
||||||
|
/* Initialization. */
|
||||||
|
static size_t GetRealMemorySize();
|
||||||
|
static size_t GetIntendedMemorySize();
|
||||||
|
static KPhysicalAddress GetKernelPhysicalBaseAddress(KPhysicalAddress base_address);
|
||||||
|
static void GetInitialProcessBinaryLayout(InitialProcessBinaryLayout *out);
|
||||||
|
static bool ShouldIncreaseThreadResourceLimit();
|
||||||
|
static void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg);
|
||||||
|
static size_t GetApplicationPoolSize();
|
||||||
|
static size_t GetAppletPoolSize();
|
||||||
|
static size_t GetMinimumNonSecureSystemPoolSize();
|
||||||
|
static u8 GetDebugLogUartPort();
|
||||||
|
|
||||||
|
/* Randomness. */
|
||||||
|
static void GenerateRandom(u64 *dst, size_t count);
|
||||||
|
static u64 GenerateRandomRange(u64 min, u64 max);
|
||||||
|
};
|
||||||
|
public:
|
||||||
|
/* Initialization. */
|
||||||
|
static NOINLINE void InitializePhase1(bool skip_target_system = false);
|
||||||
|
static NOINLINE void InitializePhase2();
|
||||||
|
static NOINLINE u32 GetCreateProcessMemoryPool();
|
||||||
|
|
||||||
|
/* Randomness. */
|
||||||
|
static void GenerateRandom(u64 *dst, size_t count);
|
||||||
|
static u64 GenerateRandomRange(u64 min, u64 max);
|
||||||
|
static u64 GenerateRandomU64();
|
||||||
|
|
||||||
|
/* Register access Access. */
|
||||||
|
static Result ReadWriteRegister(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value);
|
||||||
|
static void ReadWriteRegisterPrivileged(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value);
|
||||||
|
|
||||||
|
static u32 ReadRegisterPrivileged(ams::svc::PhysicalAddress address);
|
||||||
|
static void WriteRegisterPrivileged(ams::svc::PhysicalAddress address, u32 value);
|
||||||
|
|
||||||
|
/* Power management. */
|
||||||
|
static void SleepSystem();
|
||||||
|
static NORETURN void StopSystem(void *arg = nullptr);
|
||||||
|
|
||||||
|
/* User access. */
|
||||||
|
#if defined(ATMOSPHERE_ARCH_ARM64)
|
||||||
|
static void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* Secure Memory. */
|
||||||
|
static size_t CalculateRequiredSecureMemorySize(size_t size, u32 pool);
|
||||||
|
static Result AllocateSecureMemory(KVirtualAddress *out, size_t size, u32 pool);
|
||||||
|
static void FreeSecureMemory(KVirtualAddress address, size_t size, u32 pool);
|
||||||
|
protected:
|
||||||
|
template<typename F>
|
||||||
|
static ALWAYS_INLINE u64 GenerateUniformRange(u64 min, u64 max, F f) {
|
||||||
|
/* Handle the case where the difference is too large to represent. */
|
||||||
|
if (max == std::numeric_limits<u64>::max() && min == std::numeric_limits<u64>::min()) {
|
||||||
|
return f();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Iterate until we get a value in range. */
|
||||||
|
const u64 range_size = ((max + 1) - min);
|
||||||
|
const u64 effective_max = (std::numeric_limits<u64>::max() / range_size) * range_size;
|
||||||
|
while (true) {
|
||||||
|
if (const u64 rnd = f(); rnd < effective_max) {
|
||||||
|
return min + (rnd % range_size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* User access. */
|
||||||
|
#if defined(ATMOSPHERE_ARCH_ARM64)
|
||||||
|
static void CallSecureMonitorFromUserImpl(ams::svc::lp64::SecureMonitorArguments *args);
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
|
@ -21,6 +21,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
class KTargetSystem {
|
class KTargetSystem {
|
||||||
private:
|
private:
|
||||||
|
friend class KSystemControlBase;
|
||||||
friend class KSystemControl;
|
friend class KSystemControl;
|
||||||
private:
|
private:
|
||||||
static inline constinit bool s_is_debug_mode;
|
static inline constinit bool s_is_debug_mode;
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
*/
|
*/
|
||||||
#pragma once
|
#pragma once
|
||||||
#include <mesosphere/kern_common.hpp>
|
#include <mesosphere/kern_common.hpp>
|
||||||
|
#include <mesosphere/kern_k_system_control_base.hpp>
|
||||||
|
|
||||||
#ifdef ATMOSPHERE_BOARD_NINTENDO_NX
|
#ifdef ATMOSPHERE_BOARD_NINTENDO_NX
|
||||||
#include <mesosphere/board/nintendo/nx/kern_k_system_control.hpp>
|
#include <mesosphere/board/nintendo/nx/kern_k_system_control.hpp>
|
||||||
|
@ -33,3 +34,18 @@
|
||||||
#else
|
#else
|
||||||
#error "Unknown board for KSystemControl"
|
#error "Unknown board for KSystemControl"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
namespace ams::kern {
|
||||||
|
|
||||||
|
ALWAYS_INLINE u32 KSystemControlBase::ReadRegisterPrivileged(ams::svc::PhysicalAddress address) {
|
||||||
|
u32 v;
|
||||||
|
KSystemControl::ReadWriteRegisterPrivileged(std::addressof(v), address, 0x00000000u, 0);
|
||||||
|
return v;
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void KSystemControlBase::WriteRegisterPrivileged(ams::svc::PhysicalAddress address, u32 value) {
|
||||||
|
u32 v;
|
||||||
|
KSystemControl::ReadWriteRegisterPrivileged(std::addressof(v), address, 0xFFFFFFFFu, value);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
|
@ -1130,7 +1130,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
size_t cur_size;
|
size_t cur_size;
|
||||||
{
|
{
|
||||||
/* Get the current contiguous range. */
|
/* Get the current contiguous range. */
|
||||||
KPageTableBase::MemoryRange contig_range = {};
|
KPageTableBase::MemoryRange contig_range = { .address = Null<KPhysicalAddress>, .size = 0 };
|
||||||
R_TRY(page_table->OpenMemoryRangeForMapDeviceAddressSpace(std::addressof(contig_range), process_address + mapped_size, size - mapped_size, ConvertToKMemoryPermission(device_perm), is_aligned));
|
R_TRY(page_table->OpenMemoryRangeForMapDeviceAddressSpace(std::addressof(contig_range), process_address + mapped_size, size - mapped_size, ConvertToKMemoryPermission(device_perm), is_aligned));
|
||||||
|
|
||||||
/* Ensure we close the range when we're done. */
|
/* Ensure we close the range when we're done. */
|
||||||
|
@ -1288,7 +1288,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
MESOSPHERE_ASSERT(((device_address + size - 1) & ~DeviceVirtualAddressMask) == 0);
|
MESOSPHERE_ASSERT(((device_address + size - 1) & ~DeviceVirtualAddressMask) == 0);
|
||||||
|
|
||||||
/* We need to traverse the ranges that make up our mapping, to make sure they're all good. Start by getting a contiguous range. */
|
/* We need to traverse the ranges that make up our mapping, to make sure they're all good. Start by getting a contiguous range. */
|
||||||
KPageTableBase::MemoryRange contig_range = {};
|
KPageTableBase::MemoryRange contig_range = { .address = Null<KPhysicalAddress>, .size = 0 };
|
||||||
if (R_FAILED(page_table->OpenMemoryRangeForUnmapDeviceAddressSpace(std::addressof(contig_range), process_address, size))) {
|
if (R_FAILED(page_table->OpenMemoryRangeForUnmapDeviceAddressSpace(std::addressof(contig_range), process_address, size))) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -73,7 +73,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
|
|
||||||
void PowerOnCpu(int core_id, KPhysicalAddress entry_phys_addr, u64 context_id) {
|
void PowerOnCpu(int core_id, KPhysicalAddress entry_phys_addr, u64 context_id) {
|
||||||
/* Request the secure monitor power on the core. */
|
/* Request the secure monitor power on the core. */
|
||||||
smc::CpuOn(cpu::MultiprocessorAffinityRegisterAccessor().GetCpuOnArgument() | core_id, GetInteger(entry_phys_addr), context_id);
|
::ams::kern::arch::arm64::smc::CpuOn<smc::SmcId_Supervisor, true>(cpu::MultiprocessorAffinityRegisterAccessor().GetCpuOnArgument() | core_id, GetInteger(entry_phys_addr), context_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
void WaitOtherCpuPowerOff() {
|
void WaitOtherCpuPowerOff() {
|
||||||
|
|
|
@ -21,7 +21,6 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
constexpr uintptr_t DramPhysicalAddress = 0x80000000;
|
|
||||||
constexpr size_t SecureAlignment = 128_KB;
|
constexpr size_t SecureAlignment = 128_KB;
|
||||||
|
|
||||||
/* Global variables for panic. */
|
/* Global variables for panic. */
|
||||||
|
@ -38,22 +37,6 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
constinit KPhysicalAddress g_secure_region_phys_addr = Null<KPhysicalAddress>;
|
constinit KPhysicalAddress g_secure_region_phys_addr = Null<KPhysicalAddress>;
|
||||||
constinit size_t g_secure_region_size = 0;
|
constinit size_t g_secure_region_size = 0;
|
||||||
|
|
||||||
/* Global variables for randomness. */
|
|
||||||
/* Nintendo uses std::mt19937_t for randomness. */
|
|
||||||
/* To save space (and because mt19337_t isn't secure anyway), */
|
|
||||||
/* We will use TinyMT. */
|
|
||||||
constinit bool g_initialized_random_generator;
|
|
||||||
constinit util::TinyMT g_random_generator{util::ConstantInitialize};
|
|
||||||
constinit KSpinLock g_random_lock;
|
|
||||||
|
|
||||||
ALWAYS_INLINE size_t GetRealMemorySizeForInit() {
|
|
||||||
/* TODO: Move this into a header for the MC in general. */
|
|
||||||
constexpr u32 MemoryControllerConfigurationRegister = 0x70019050;
|
|
||||||
u32 config_value;
|
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(smc::init::ReadWriteRegister(&config_value, MemoryControllerConfigurationRegister, 0, 0));
|
|
||||||
return static_cast<size_t>(config_value & 0x3FFF) << 20;
|
|
||||||
}
|
|
||||||
|
|
||||||
ALWAYS_INLINE util::BitPack32 GetKernelConfigurationForInit() {
|
ALWAYS_INLINE util::BitPack32 GetKernelConfigurationForInit() {
|
||||||
u64 value = 0;
|
u64 value = 0;
|
||||||
smc::init::GetConfig(&value, 1, smc::ConfigItem::KernelConfiguration);
|
smc::init::GetConfig(&value, 1, smc::ConfigItem::KernelConfiguration);
|
||||||
|
@ -86,7 +69,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
|
|
||||||
ALWAYS_INLINE u64 GenerateRandomU64ForInit() {
|
ALWAYS_INLINE u64 GenerateRandomU64ForInit() {
|
||||||
u64 value;
|
u64 value;
|
||||||
smc::init::GenerateRandomBytes(&value, sizeof(value));
|
smc::init::GenerateRandomBytes(std::addressof(value), sizeof(value));
|
||||||
return value;
|
return value;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -96,27 +79,6 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
return value;
|
return value;
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE u64 GenerateRandomU64FromGenerator() {
|
|
||||||
return g_random_generator.GenerateRandomU64();
|
|
||||||
}
|
|
||||||
|
|
||||||
template<typename F>
|
|
||||||
ALWAYS_INLINE u64 GenerateUniformRange(u64 min, u64 max, F f) {
|
|
||||||
/* Handle the case where the difference is too large to represent. */
|
|
||||||
if (max == std::numeric_limits<u64>::max() && min == std::numeric_limits<u64>::min()) {
|
|
||||||
return f();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Iterate until we get a value in range. */
|
|
||||||
const u64 range_size = ((max + 1) - min);
|
|
||||||
const u64 effective_max = (std::numeric_limits<u64>::max() / range_size) * range_size;
|
|
||||||
while (true) {
|
|
||||||
if (const u64 rnd = f(); rnd < effective_max) {
|
|
||||||
return min + (rnd % range_size);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ALWAYS_INLINE u64 GetConfigU64(smc::ConfigItem which) {
|
ALWAYS_INLINE u64 GetConfigU64(smc::ConfigItem which) {
|
||||||
u64 value;
|
u64 value;
|
||||||
smc::GetConfig(&value, 1, which);
|
smc::GetConfig(&value, 1, which);
|
||||||
|
@ -324,6 +286,14 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Initialization. */
|
/* Initialization. */
|
||||||
|
size_t KSystemControl::Init::GetRealMemorySize() {
|
||||||
|
/* TODO: Move this into a header for the MC in general. */
|
||||||
|
constexpr u32 MemoryControllerConfigurationRegister = 0x70019050;
|
||||||
|
u32 config_value;
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(smc::init::ReadWriteRegister(&config_value, MemoryControllerConfigurationRegister, 0, 0));
|
||||||
|
return static_cast<size_t>(config_value & 0x3FFF) << 20;
|
||||||
|
}
|
||||||
|
|
||||||
size_t KSystemControl::Init::GetIntendedMemorySize() {
|
size_t KSystemControl::Init::GetIntendedMemorySize() {
|
||||||
switch (GetKernelConfigurationForInit().Get<smc::KernelConfiguration::MemorySize>()) {
|
switch (GetKernelConfigurationForInit().Get<smc::KernelConfiguration::MemorySize>()) {
|
||||||
case smc::MemorySize_4GB:
|
case smc::MemorySize_4GB:
|
||||||
|
@ -336,23 +306,6 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
KPhysicalAddress KSystemControl::Init::GetKernelPhysicalBaseAddress(uintptr_t base_address) {
|
|
||||||
const size_t real_dram_size = GetRealMemorySizeForInit();
|
|
||||||
const size_t intended_dram_size = KSystemControl::Init::GetIntendedMemorySize();
|
|
||||||
if (intended_dram_size * 2 < real_dram_size) {
|
|
||||||
return base_address;
|
|
||||||
} else {
|
|
||||||
return base_address + ((real_dram_size - intended_dram_size) / 2);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void KSystemControl::Init::GetInitialProcessBinaryLayout(InitialProcessBinaryLayout *out) {
|
|
||||||
*out = {
|
|
||||||
.address = GetInteger(GetKernelPhysicalBaseAddress(DramPhysicalAddress)) + GetIntendedMemorySize() - KTraceBufferSize - InitialProcessBinarySizeMax,
|
|
||||||
._08 = 0,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
bool KSystemControl::Init::ShouldIncreaseThreadResourceLimit() {
|
bool KSystemControl::Init::ShouldIncreaseThreadResourceLimit() {
|
||||||
return GetKernelConfigurationForInit().Get<smc::KernelConfiguration::IncreaseThreadResourceLimit>();
|
return GetKernelConfigurationForInit().Get<smc::KernelConfiguration::IncreaseThreadResourceLimit>();
|
||||||
}
|
}
|
||||||
|
@ -424,17 +377,17 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
}
|
}
|
||||||
|
|
||||||
void KSystemControl::Init::CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
|
void KSystemControl::Init::CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
|
||||||
smc::init::CpuOn(core_id, entrypoint, arg);
|
MESOSPHERE_INIT_ABORT_UNLESS((::ams::kern::arch::arm64::smc::CpuOn<smc::SmcId_Supervisor, false>(core_id, entrypoint, arg)) == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Randomness for Initialization. */
|
/* Randomness for Initialization. */
|
||||||
void KSystemControl::Init::GenerateRandomBytes(void *dst, size_t size) {
|
void KSystemControl::Init::GenerateRandom(u64 *dst, size_t count) {
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(size <= 0x38);
|
MESOSPHERE_INIT_ABORT_UNLESS(count <= 7);
|
||||||
smc::init::GenerateRandomBytes(dst, size);
|
smc::init::GenerateRandomBytes(dst, count * sizeof(u64));
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 KSystemControl::Init::GenerateRandomRange(u64 min, u64 max) {
|
u64 KSystemControl::Init::GenerateRandomRange(u64 min, u64 max) {
|
||||||
return GenerateUniformRange(min, max, GenerateRandomU64ForInit);
|
return KSystemControlBase::GenerateUniformRange(min, max, GenerateRandomU64ForInit);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* System Initialization. */
|
/* System Initialization. */
|
||||||
|
@ -443,8 +396,8 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
{
|
{
|
||||||
u64 seed;
|
u64 seed;
|
||||||
smc::GenerateRandomBytes(std::addressof(seed), sizeof(seed));
|
smc::GenerateRandomBytes(std::addressof(seed), sizeof(seed));
|
||||||
g_random_generator.Initialize(reinterpret_cast<u32*>(std::addressof(seed)), sizeof(seed) / sizeof(u32));
|
s_random_generator.Initialize(reinterpret_cast<const u32*>(std::addressof(seed)), sizeof(seed) / sizeof(u32));
|
||||||
g_initialized_random_generator = true;
|
s_initialized_random_generator = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Set IsDebugMode. */
|
/* Set IsDebugMode. */
|
||||||
|
@ -483,25 +436,8 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
smc::ConfigureCarveout(0, carveout.GetAddress(), carveout.GetSize());
|
smc::ConfigureCarveout(0, carveout.GetAddress(), carveout.GetSize());
|
||||||
}
|
}
|
||||||
|
|
||||||
/* System ResourceLimit initialization. */
|
/* Initialize the system resource limit (and potentially other things). */
|
||||||
{
|
KSystemControlBase::InitializePhase1(true);
|
||||||
/* Construct the resource limit object. */
|
|
||||||
KResourceLimit &sys_res_limit = Kernel::GetSystemResourceLimit();
|
|
||||||
KAutoObject::Create<KResourceLimit>(std::addressof(sys_res_limit));
|
|
||||||
sys_res_limit.Initialize();
|
|
||||||
|
|
||||||
/* Set the initial limits. */
|
|
||||||
const auto [total_memory_size, kernel_memory_size] = KMemoryLayout::GetTotalAndKernelMemorySizes();
|
|
||||||
const auto &slab_counts = init::GetSlabResourceCounts();
|
|
||||||
MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_PhysicalMemoryMax, total_memory_size));
|
|
||||||
MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_ThreadCountMax, slab_counts.num_KThread));
|
|
||||||
MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_EventCountMax, slab_counts.num_KEvent));
|
|
||||||
MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_TransferMemoryCountMax, slab_counts.num_KTransferMemory));
|
|
||||||
MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_SessionCountMax, slab_counts.num_KSession));
|
|
||||||
|
|
||||||
/* Reserve system memory. */
|
|
||||||
MESOSPHERE_ABORT_UNLESS(sys_res_limit.Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, kernel_memory_size));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void KSystemControl::InitializePhase2() {
|
void KSystemControl::InitializePhase2() {
|
||||||
|
@ -520,11 +456,8 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
g_secure_applet_memory_address = KMemoryLayout::GetLinearVirtualAddress(secure_applet_memory_phys_addr);
|
g_secure_applet_memory_address = KMemoryLayout::GetLinearVirtualAddress(secure_applet_memory_phys_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Initialize KTrace. */
|
/* Initialize KTrace (and potentially other init). */
|
||||||
if constexpr (IsKTraceEnabled) {
|
KSystemControlBase::InitializePhase2();
|
||||||
const auto &ktrace = KMemoryLayout::GetKernelTraceBufferRegion();
|
|
||||||
KTrace::Initialize(ktrace.GetAddress(), ktrace.GetSize());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
u32 KSystemControl::GetCreateProcessMemoryPool() {
|
u32 KSystemControl::GetCreateProcessMemoryPool() {
|
||||||
|
@ -546,29 +479,29 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Randomness. */
|
/* Randomness. */
|
||||||
void KSystemControl::GenerateRandomBytes(void *dst, size_t size) {
|
void KSystemControl::GenerateRandom(u64 *dst, size_t count) {
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(size <= 0x38);
|
MESOSPHERE_INIT_ABORT_UNLESS(count <= 7);
|
||||||
smc::GenerateRandomBytes(dst, size);
|
smc::GenerateRandomBytes(dst, count * sizeof(u64));
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 KSystemControl::GenerateRandomRange(u64 min, u64 max) {
|
u64 KSystemControl::GenerateRandomRange(u64 min, u64 max) {
|
||||||
KScopedInterruptDisable intr_disable;
|
KScopedInterruptDisable intr_disable;
|
||||||
KScopedSpinLock lk(g_random_lock);
|
KScopedSpinLock lk(s_random_lock);
|
||||||
|
|
||||||
|
|
||||||
if (AMS_LIKELY(g_initialized_random_generator)) {
|
if (AMS_LIKELY(s_initialized_random_generator)) {
|
||||||
return GenerateUniformRange(min, max, GenerateRandomU64FromGenerator);
|
return KSystemControlBase::GenerateUniformRange(min, max, [] ALWAYS_INLINE_LAMBDA () -> u64 { return s_random_generator.GenerateRandomU64(); });
|
||||||
} else {
|
} else {
|
||||||
return GenerateUniformRange(min, max, GenerateRandomU64FromSmc);
|
return KSystemControlBase::GenerateUniformRange(min, max, GenerateRandomU64FromSmc);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 KSystemControl::GenerateRandomU64() {
|
u64 KSystemControl::GenerateRandomU64() {
|
||||||
KScopedInterruptDisable intr_disable;
|
KScopedInterruptDisable intr_disable;
|
||||||
KScopedSpinLock lk(g_random_lock);
|
KScopedSpinLock lk(s_random_lock);
|
||||||
|
|
||||||
if (AMS_LIKELY(g_initialized_random_generator)) {
|
if (AMS_LIKELY(s_initialized_random_generator)) {
|
||||||
return GenerateRandomU64FromGenerator();
|
return s_random_generator.GenerateRandomU64();
|
||||||
} else {
|
} else {
|
||||||
return GenerateRandomU64FromSmc();
|
return GenerateRandomU64FromSmc();
|
||||||
}
|
}
|
||||||
|
@ -672,52 +605,18 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* User access. */
|
/* User access. */
|
||||||
void KSystemControl::CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args) {
|
void KSystemControl::CallSecureMonitorFromUserImpl(ams::svc::lp64::SecureMonitorArguments *args) {
|
||||||
/* Get the function id for the current call. */
|
|
||||||
u64 function_id = args->r[0];
|
|
||||||
|
|
||||||
/* We'll need to map in pages if arguments are pointers. Prepare page groups to do so. */
|
|
||||||
auto &page_table = GetCurrentProcess().GetPageTable();
|
|
||||||
auto *bim = page_table.GetBlockInfoManager();
|
|
||||||
|
|
||||||
constexpr size_t MaxMappedRegisters = 7;
|
|
||||||
std::array<KPageGroup, MaxMappedRegisters> page_groups = { KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), };
|
|
||||||
|
|
||||||
for (size_t i = 0; i < MaxMappedRegisters; i++) {
|
|
||||||
const size_t reg_id = i + 1;
|
|
||||||
if (function_id & (1ul << (8 + reg_id))) {
|
|
||||||
/* Create and open a new page group for the address. */
|
|
||||||
KVirtualAddress virt_addr = args->r[reg_id];
|
|
||||||
|
|
||||||
if (R_SUCCEEDED(page_table.MakeAndOpenPageGroup(std::addressof(page_groups[i]), util::AlignDown(GetInteger(virt_addr), PageSize), 1, KMemoryState_None, KMemoryState_None, KMemoryPermission_UserReadWrite, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryAttribute_None))) {
|
|
||||||
/* Translate the virtual address to a physical address. */
|
|
||||||
const auto it = page_groups[i].begin();
|
|
||||||
MESOSPHERE_ASSERT(it != page_groups[i].end());
|
|
||||||
MESOSPHERE_ASSERT(it->GetNumPages() == 1);
|
|
||||||
|
|
||||||
args->r[reg_id] = GetInteger(it->GetAddress()) | (GetInteger(virt_addr) & (PageSize - 1));
|
|
||||||
} else {
|
|
||||||
/* If we couldn't map, we should clear the address. */
|
|
||||||
args->r[reg_id] = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Invoke the secure monitor. */
|
/* Invoke the secure monitor. */
|
||||||
smc::CallSecureMonitorFromUser(args);
|
return smc::CallSecureMonitorFromUser(args);
|
||||||
|
|
||||||
/* Make sure that we close any pages that we opened. */
|
|
||||||
for (size_t i = 0; i < MaxMappedRegisters; i++) {
|
|
||||||
page_groups[i].Close();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Secure Memory. */
|
/* Secure Memory. */
|
||||||
size_t KSystemControl::CalculateRequiredSecureMemorySize(size_t size, u32 pool) {
|
size_t KSystemControl::CalculateRequiredSecureMemorySize(size_t size, u32 pool) {
|
||||||
if (pool == KMemoryManager::Pool_Applet) {
|
if (pool == KMemoryManager::Pool_Applet) {
|
||||||
return 0;
|
return 0;
|
||||||
|
} else {
|
||||||
|
return KSystemControlBase::CalculateRequiredSecureMemorySize(size, pool);
|
||||||
}
|
}
|
||||||
return size;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KSystemControl::AllocateSecureMemory(KVirtualAddress *out, size_t size, u32 pool) {
|
Result KSystemControl::AllocateSecureMemory(KVirtualAddress *out, size_t size, u32 pool) {
|
||||||
|
|
|
@ -20,10 +20,6 @@ namespace ams::kern::board::nintendo::nx::smc {
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
struct SecureMonitorArguments {
|
|
||||||
u64 x[8];
|
|
||||||
};
|
|
||||||
|
|
||||||
enum UserFunctionId : u32 {
|
enum UserFunctionId : u32 {
|
||||||
UserFunctionId_SetConfig = 0xC3000401,
|
UserFunctionId_SetConfig = 0xC3000401,
|
||||||
UserFunctionId_GetConfigUser = 0xC3000002,
|
UserFunctionId_GetConfigUser = 0xC3000002,
|
||||||
|
@ -45,9 +41,6 @@ namespace ams::kern::board::nintendo::nx::smc {
|
||||||
};
|
};
|
||||||
|
|
||||||
enum FunctionId : u32 {
|
enum FunctionId : u32 {
|
||||||
FunctionId_CpuSuspend = 0xC4000001,
|
|
||||||
FunctionId_CpuOff = 0x84000002,
|
|
||||||
FunctionId_CpuOn = 0xC4000003,
|
|
||||||
FunctionId_GetConfig = 0xC3000004,
|
FunctionId_GetConfig = 0xC3000004,
|
||||||
FunctionId_GenerateRandomBytes = 0xC3000005,
|
FunctionId_GenerateRandomBytes = 0xC3000005,
|
||||||
FunctionId_Panic = 0xC3000006,
|
FunctionId_Panic = 0xC3000006,
|
||||||
|
@ -58,171 +51,60 @@ namespace ams::kern::board::nintendo::nx::smc {
|
||||||
FunctionId_SetConfig = 0xC3000409,
|
FunctionId_SetConfig = 0xC3000409,
|
||||||
};
|
};
|
||||||
|
|
||||||
void CallPrivilegedSecureMonitorFunction(SecureMonitorArguments &args) {
|
|
||||||
/* Load arguments into registers. */
|
|
||||||
register u64 x0 asm("x0") = args.x[0];
|
|
||||||
register u64 x1 asm("x1") = args.x[1];
|
|
||||||
register u64 x2 asm("x2") = args.x[2];
|
|
||||||
register u64 x3 asm("x3") = args.x[3];
|
|
||||||
register u64 x4 asm("x4") = args.x[4];
|
|
||||||
register u64 x5 asm("x5") = args.x[5];
|
|
||||||
register u64 x6 asm("x6") = args.x[6];
|
|
||||||
register u64 x7 asm("x7") = args.x[7];
|
|
||||||
|
|
||||||
/* Actually make the call. */
|
|
||||||
{
|
|
||||||
/* Disable interrupts while making the call. */
|
|
||||||
KScopedInterruptDisable intr_disable;
|
|
||||||
|
|
||||||
{
|
|
||||||
/* Backup the current thread pointer. */
|
|
||||||
const uintptr_t current_thread_pointer_value = cpu::GetCurrentThreadPointerValue();
|
|
||||||
|
|
||||||
__asm__ __volatile__("smc #1"
|
|
||||||
: "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5), "+r"(x6), "+r"(x7)
|
|
||||||
:
|
|
||||||
: "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "cc", "memory"
|
|
||||||
);
|
|
||||||
|
|
||||||
/* Restore the current thread pointer into X18. */
|
|
||||||
cpu::SetCurrentThreadPointerValue(current_thread_pointer_value);
|
|
||||||
|
|
||||||
/* Store arguments to output. */
|
|
||||||
args.x[0] = x0;
|
|
||||||
args.x[1] = x1;
|
|
||||||
args.x[2] = x2;
|
|
||||||
args.x[3] = x3;
|
|
||||||
args.x[4] = x4;
|
|
||||||
args.x[5] = x5;
|
|
||||||
args.x[6] = x6;
|
|
||||||
args.x[7] = x7;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void CallUserSecureMonitorFunction(ams::svc::lp64::SecureMonitorArguments *args) {
|
|
||||||
/* Load arguments into registers. */
|
|
||||||
register u64 x0 asm("x0") = args->r[0];
|
|
||||||
register u64 x1 asm("x1") = args->r[1];
|
|
||||||
register u64 x2 asm("x2") = args->r[2];
|
|
||||||
register u64 x3 asm("x3") = args->r[3];
|
|
||||||
register u64 x4 asm("x4") = args->r[4];
|
|
||||||
register u64 x5 asm("x5") = args->r[5];
|
|
||||||
register u64 x6 asm("x6") = args->r[6];
|
|
||||||
register u64 x7 asm("x7") = args->r[7];
|
|
||||||
|
|
||||||
/* Actually make the call. */
|
|
||||||
{
|
|
||||||
/* Disable interrupts while making the call. */
|
|
||||||
KScopedInterruptDisable intr_disable;
|
|
||||||
|
|
||||||
{
|
|
||||||
/* Backup the current thread pointer. */
|
|
||||||
const uintptr_t current_thread_pointer_value = cpu::GetCurrentThreadPointerValue();
|
|
||||||
|
|
||||||
__asm__ __volatile__("smc #0"
|
|
||||||
: "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5), "+r"(x6), "+r"(x7)
|
|
||||||
:
|
|
||||||
: "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "cc", "memory"
|
|
||||||
);
|
|
||||||
|
|
||||||
/* Restore the current thread pointer into X18. */
|
|
||||||
cpu::SetCurrentThreadPointerValue(current_thread_pointer_value);
|
|
||||||
|
|
||||||
/* Store arguments to output. */
|
|
||||||
args->r[0] = x0;
|
|
||||||
args->r[1] = x1;
|
|
||||||
args->r[2] = x2;
|
|
||||||
args->r[3] = x3;
|
|
||||||
args->r[4] = x4;
|
|
||||||
args->r[5] = x5;
|
|
||||||
args->r[6] = x6;
|
|
||||||
args->r[7] = x7;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void CallPrivilegedSecureMonitorFunctionForInit(SecureMonitorArguments &args) {
|
|
||||||
/* Load arguments into registers. */
|
|
||||||
register u64 x0 asm("x0") = args.x[0];
|
|
||||||
register u64 x1 asm("x1") = args.x[1];
|
|
||||||
register u64 x2 asm("x2") = args.x[2];
|
|
||||||
register u64 x3 asm("x3") = args.x[3];
|
|
||||||
register u64 x4 asm("x4") = args.x[4];
|
|
||||||
register u64 x5 asm("x5") = args.x[5];
|
|
||||||
register u64 x6 asm("x6") = args.x[6];
|
|
||||||
register u64 x7 asm("x7") = args.x[7];
|
|
||||||
|
|
||||||
/* Actually make the call. */
|
|
||||||
__asm__ __volatile__("smc #1"
|
|
||||||
: "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5), "+r"(x6), "+r"(x7)
|
|
||||||
:
|
|
||||||
: "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "cc", "memory"
|
|
||||||
);
|
|
||||||
|
|
||||||
/* Store arguments to output. */
|
|
||||||
args.x[0] = x0;
|
|
||||||
args.x[1] = x1;
|
|
||||||
args.x[2] = x2;
|
|
||||||
args.x[3] = x3;
|
|
||||||
args.x[4] = x4;
|
|
||||||
args.x[5] = x5;
|
|
||||||
args.x[6] = x6;
|
|
||||||
args.x[7] = x7;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Global lock for generate random bytes. */
|
/* Global lock for generate random bytes. */
|
||||||
KSpinLock g_generate_random_lock;
|
constinit KSpinLock g_generate_random_lock;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* SMC functionality needed for init. */
|
/* SMC functionality needed for init. */
|
||||||
namespace init {
|
namespace init {
|
||||||
|
|
||||||
void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
|
|
||||||
SecureMonitorArguments args = { FunctionId_CpuOn, core_id, entrypoint, arg };
|
|
||||||
CallPrivilegedSecureMonitorFunctionForInit(args);
|
|
||||||
}
|
|
||||||
|
|
||||||
void GetConfig(u64 *out, size_t num_qwords, ConfigItem config_item) {
|
void GetConfig(u64 *out, size_t num_qwords, ConfigItem config_item) {
|
||||||
SecureMonitorArguments args = { FunctionId_GetConfig, static_cast<u32>(config_item) };
|
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_GetConfig, static_cast<u32>(config_item) } };
|
||||||
CallPrivilegedSecureMonitorFunctionForInit(args);
|
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS((static_cast<SmcResult>(args.x[0]) == SmcResult::Success));
|
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, false>(args.r);
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS((static_cast<SmcResult>(args.r[0]) == SmcResult::Success));
|
||||||
|
|
||||||
for (size_t i = 0; i < num_qwords && i < 7; i++) {
|
for (size_t i = 0; i < num_qwords && i < 7; i++) {
|
||||||
out[i] = args.x[1 + i];
|
out[i] = args.r[1 + i];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void GenerateRandomBytes(void *dst, size_t size) {
|
void GenerateRandomBytes(void *dst, size_t size) {
|
||||||
/* Call SmcGenerateRandomBytes() */
|
/* Call SmcGenerateRandomBytes() */
|
||||||
SecureMonitorArguments args = { FunctionId_GenerateRandomBytes, size };
|
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_GenerateRandomBytes, size } };
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(size <= sizeof(args) - sizeof(args.x[0]));
|
MESOSPHERE_INIT_ABORT_UNLESS(size <= sizeof(args) - sizeof(args.r[0]));
|
||||||
|
|
||||||
CallPrivilegedSecureMonitorFunctionForInit(args);
|
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, false>(args.r);
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS((static_cast<SmcResult>(args.x[0]) == SmcResult::Success));
|
MESOSPHERE_INIT_ABORT_UNLESS((static_cast<SmcResult>(args.r[0]) == SmcResult::Success));
|
||||||
|
|
||||||
/* Copy output. */
|
/* Copy output. */
|
||||||
std::memcpy(dst, std::addressof(args.x[1]), size);
|
std::memcpy(dst, std::addressof(args.r[1]), size);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ReadWriteRegister(u32 *out, u64 address, u32 mask, u32 value) {
|
bool ReadWriteRegister(u32 *out, u64 address, u32 mask, u32 value) {
|
||||||
SecureMonitorArguments args = { FunctionId_ReadWriteRegister, address, mask, value };
|
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_ReadWriteRegister, address, mask, value } };
|
||||||
CallPrivilegedSecureMonitorFunctionForInit(args);
|
|
||||||
*out = args.x[1];
|
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, false>(args.r);
|
||||||
return static_cast<SmcResult>(args.x[0]) == SmcResult::Success;
|
MESOSPHERE_INIT_ABORT_UNLESS((static_cast<SmcResult>(args.r[0]) == SmcResult::Success));
|
||||||
|
|
||||||
|
*out = args.r[1];
|
||||||
|
|
||||||
|
return static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TryGetConfig(u64 *out, size_t num_qwords, ConfigItem config_item) {
|
bool TryGetConfig(u64 *out, size_t num_qwords, ConfigItem config_item) {
|
||||||
SecureMonitorArguments args = { FunctionId_GetConfig, static_cast<u32>(config_item) };
|
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_GetConfig, static_cast<u32>(config_item) } };
|
||||||
CallPrivilegedSecureMonitorFunction(args);
|
|
||||||
if (static_cast<SmcResult>(args.x[0]) != SmcResult::Success) {
|
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, true>(args.r);
|
||||||
|
if (AMS_UNLIKELY(static_cast<SmcResult>(args.r[0]) != SmcResult::Success)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (size_t i = 0; i < num_qwords && i < 7; i++) {
|
for (size_t i = 0; i < num_qwords && i < 7; i++) {
|
||||||
out[i] = args.x[1 + i];
|
out[i] = args.r[1 + i];
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
@ -233,55 +115,58 @@ namespace ams::kern::board::nintendo::nx::smc {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool SetConfig(ConfigItem config_item, u64 value) {
|
bool SetConfig(ConfigItem config_item, u64 value) {
|
||||||
SecureMonitorArguments args = { FunctionId_SetConfig, static_cast<u32>(config_item), 0, value };
|
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_SetConfig, static_cast<u32>(config_item), 0, value } };
|
||||||
CallPrivilegedSecureMonitorFunction(args);
|
|
||||||
return static_cast<SmcResult>(args.x[0]) == SmcResult::Success;
|
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, true>(args.r);
|
||||||
|
|
||||||
|
return static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ReadWriteRegister(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value) {
|
bool ReadWriteRegister(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value) {
|
||||||
SecureMonitorArguments args = { FunctionId_ReadWriteRegister, address, mask, value };
|
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_ReadWriteRegister, address, mask, value } };
|
||||||
CallPrivilegedSecureMonitorFunction(args);
|
|
||||||
*out = static_cast<u32>(args.x[1]);
|
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, true>(args.r);
|
||||||
return static_cast<SmcResult>(args.x[0]) == SmcResult::Success;
|
|
||||||
|
*out = static_cast<u32>(args.r[1]);
|
||||||
|
return static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ConfigureCarveout(size_t which, uintptr_t address, size_t size) {
|
void ConfigureCarveout(size_t which, uintptr_t address, size_t size) {
|
||||||
SecureMonitorArguments args = { FunctionId_ConfigureCarveout, static_cast<u64>(which), static_cast<u64>(address), static_cast<u64>(size) };
|
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_ConfigureCarveout, static_cast<u64>(which), static_cast<u64>(address), static_cast<u64>(size) } };
|
||||||
CallPrivilegedSecureMonitorFunction(args);
|
|
||||||
MESOSPHERE_ABORT_UNLESS((static_cast<SmcResult>(args.x[0]) == SmcResult::Success));
|
|
||||||
}
|
|
||||||
|
|
||||||
void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
|
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, true>(args.r);
|
||||||
SecureMonitorArguments args = { FunctionId_CpuOn, core_id, static_cast<u64>(entrypoint), static_cast<u64>(arg) };
|
|
||||||
CallPrivilegedSecureMonitorFunction(args);
|
MESOSPHERE_ABORT_UNLESS((static_cast<SmcResult>(args.r[0]) == SmcResult::Success));
|
||||||
MESOSPHERE_ABORT_UNLESS((static_cast<SmcResult>(args.x[0]) == SmcResult::Success));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void GenerateRandomBytes(void *dst, size_t size) {
|
void GenerateRandomBytes(void *dst, size_t size) {
|
||||||
/* Setup for call. */
|
/* Setup for call. */
|
||||||
SecureMonitorArguments args = { FunctionId_GenerateRandomBytes, size };
|
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_GenerateRandomBytes, size } };
|
||||||
MESOSPHERE_ABORT_UNLESS(size <= sizeof(args) - sizeof(args.x[0]));
|
MESOSPHERE_ABORT_UNLESS(size <= sizeof(args) - sizeof(args.r[0]));
|
||||||
|
|
||||||
/* Make call. */
|
/* Make call. */
|
||||||
{
|
{
|
||||||
KScopedInterruptDisable intr_disable;
|
KScopedInterruptDisable intr_disable;
|
||||||
KScopedSpinLock lk(g_generate_random_lock);
|
KScopedSpinLock lk(g_generate_random_lock);
|
||||||
CallPrivilegedSecureMonitorFunction(args);
|
|
||||||
|
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, true>(args.r);
|
||||||
}
|
}
|
||||||
MESOSPHERE_ABORT_UNLESS((static_cast<SmcResult>(args.x[0]) == SmcResult::Success));
|
MESOSPHERE_ABORT_UNLESS((static_cast<SmcResult>(args.r[0]) == SmcResult::Success));
|
||||||
|
|
||||||
/* Copy output. */
|
/* Copy output. */
|
||||||
std::memcpy(dst, std::addressof(args.x[1]), size);
|
std::memcpy(dst, std::addressof(args.r[1]), size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void NORETURN Panic(u32 color) {
|
void NORETURN Panic(u32 color) {
|
||||||
SecureMonitorArguments args = { FunctionId_Panic, color };
|
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_Panic, color } };
|
||||||
CallPrivilegedSecureMonitorFunction(args);
|
|
||||||
|
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, true>(args.r);
|
||||||
|
|
||||||
AMS_INFINITE_LOOP();
|
AMS_INFINITE_LOOP();
|
||||||
}
|
}
|
||||||
|
|
||||||
void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args) {
|
void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args) {
|
||||||
CallUserSecureMonitorFunction(args);
|
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_User, true>(args->r);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
|
@ -15,10 +15,16 @@
|
||||||
*/
|
*/
|
||||||
#pragma once
|
#pragma once
|
||||||
#include <mesosphere.hpp>
|
#include <mesosphere.hpp>
|
||||||
|
#include <mesosphere/arch/arm64/kern_secure_monitor_base.hpp>
|
||||||
|
|
||||||
namespace ams::kern::board::nintendo::nx::smc {
|
namespace ams::kern::board::nintendo::nx::smc {
|
||||||
|
|
||||||
/* Types. */
|
/* Types. */
|
||||||
|
enum SmcId {
|
||||||
|
SmcId_User = 0,
|
||||||
|
SmcId_Supervisor = 1,
|
||||||
|
};
|
||||||
|
|
||||||
enum MemorySize {
|
enum MemorySize {
|
||||||
MemorySize_4GB = 0,
|
MemorySize_4GB = 0,
|
||||||
MemorySize_6GB = 1,
|
MemorySize_6GB = 1,
|
||||||
|
@ -105,15 +111,12 @@ namespace ams::kern::board::nintendo::nx::smc {
|
||||||
|
|
||||||
bool SetConfig(ConfigItem config_item, u64 value);
|
bool SetConfig(ConfigItem config_item, u64 value);
|
||||||
|
|
||||||
void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg);
|
|
||||||
|
|
||||||
void NORETURN Panic(u32 color);
|
void NORETURN Panic(u32 color);
|
||||||
|
|
||||||
void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args);
|
void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args);
|
||||||
|
|
||||||
namespace init {
|
namespace init {
|
||||||
|
|
||||||
void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg);
|
|
||||||
void GetConfig(u64 *out, size_t num_qwords, ConfigItem config_item);
|
void GetConfig(u64 *out, size_t num_qwords, ConfigItem config_item);
|
||||||
void GenerateRandomBytes(void *dst, size_t size);
|
void GenerateRandomBytes(void *dst, size_t size);
|
||||||
bool ReadWriteRegister(u32 *out, u64 address, u32 mask, u32 value);
|
bool ReadWriteRegister(u32 *out, u64 address, u32 mask, u32 value);
|
||||||
|
|
|
@ -18,447 +18,10 @@
|
||||||
|
|
||||||
namespace ams::kern::board::qemu::virt {
|
namespace ams::kern::board::qemu::virt {
|
||||||
|
|
||||||
namespace {
|
|
||||||
|
|
||||||
constexpr uintptr_t DramPhysicalAddress = 0x40000000;
|
|
||||||
constexpr size_t SecureAlignment = 128_KB;
|
|
||||||
|
|
||||||
/* Global variables for secure memory. */
|
|
||||||
constexpr size_t SecureAppletMemorySize = 4_MB;
|
|
||||||
constinit KSpinLock g_secure_applet_lock;
|
|
||||||
constinit bool g_secure_applet_memory_used = false;
|
|
||||||
constinit KVirtualAddress g_secure_applet_memory_address = Null<KVirtualAddress>;
|
|
||||||
|
|
||||||
constinit KSpinLock g_secure_region_lock;
|
|
||||||
constinit bool g_secure_region_used = false;
|
|
||||||
constinit KPhysicalAddress g_secure_region_phys_addr = Null<KPhysicalAddress>;
|
|
||||||
constinit size_t g_secure_region_size = 0;
|
|
||||||
|
|
||||||
/* Global variables for randomness. */
|
|
||||||
constinit bool g_initialized_random_generator;
|
|
||||||
constinit util::TinyMT g_random_generator;
|
|
||||||
constinit KSpinLock g_random_lock;
|
|
||||||
|
|
||||||
ALWAYS_INLINE u64 GenerateRandomU64FromGenerator() {
|
|
||||||
return g_random_generator.GenerateRandomU64();
|
|
||||||
}
|
|
||||||
|
|
||||||
template<typename F>
|
|
||||||
ALWAYS_INLINE u64 GenerateUniformRange(u64 min, u64 max, F f) {
|
|
||||||
/* Handle the case where the difference is too large to represent. */
|
|
||||||
if (max == std::numeric_limits<u64>::max() && min == std::numeric_limits<u64>::min()) {
|
|
||||||
return f();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Iterate until we get a value in range. */
|
|
||||||
const u64 range_size = ((max + 1) - min);
|
|
||||||
const u64 effective_max = (std::numeric_limits<u64>::max() / range_size) * range_size;
|
|
||||||
while (true) {
|
|
||||||
if (const u64 rnd = f(); rnd < effective_max) {
|
|
||||||
return min + (rnd % range_size);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/* TODO */
|
|
||||||
|
|
||||||
ALWAYS_INLINE size_t GetRealMemorySizeForInit() {
|
|
||||||
return 4_GB;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool SetSecureRegion(KPhysicalAddress phys_addr, size_t size) {
|
|
||||||
/* Ensure address and size are aligned. */
|
|
||||||
if (!util::IsAligned(GetInteger(phys_addr), SecureAlignment)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if (!util::IsAligned(size, SecureAlignment)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Disable interrupts and acquire the secure region lock. */
|
|
||||||
KScopedInterruptDisable di;
|
|
||||||
KScopedSpinLock lk(g_secure_region_lock);
|
|
||||||
|
|
||||||
/* If size is non-zero, we're allocating the secure region. Otherwise, we're freeing it. */
|
|
||||||
if (size != 0) {
|
|
||||||
/* Verify that the secure region is free. */
|
|
||||||
if (g_secure_region_used) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Set the secure region. */
|
|
||||||
g_secure_region_used = true;
|
|
||||||
g_secure_region_phys_addr = phys_addr;
|
|
||||||
g_secure_region_size = size;
|
|
||||||
} else {
|
|
||||||
/* Verify that the secure region is in use. */
|
|
||||||
if (!g_secure_region_used) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Verify that the address being freed is the secure region. */
|
|
||||||
if (phys_addr != g_secure_region_phys_addr) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Clear the secure region. */
|
|
||||||
g_secure_region_used = false;
|
|
||||||
g_secure_region_phys_addr = Null<KPhysicalAddress>;
|
|
||||||
g_secure_region_size = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
// /* Configure the carveout with the secure monitor. */
|
|
||||||
// smc::ConfigureCarveout(1, GetInteger(phys_addr), size);
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
Result AllocateSecureMemoryForApplet(KVirtualAddress *out, size_t size) {
|
|
||||||
/* Verify that the size is valid. */
|
|
||||||
R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize());
|
|
||||||
R_UNLESS(size <= SecureAppletMemorySize, svc::ResultOutOfMemory());
|
|
||||||
|
|
||||||
/* Disable interrupts and acquire the secure applet lock. */
|
|
||||||
KScopedInterruptDisable di;
|
|
||||||
KScopedSpinLock lk(g_secure_applet_lock);
|
|
||||||
|
|
||||||
/* Check that memory is reserved for secure applet use. */
|
|
||||||
MESOSPHERE_ABORT_UNLESS(g_secure_applet_memory_address != Null<KVirtualAddress>);
|
|
||||||
|
|
||||||
/* Verify that the secure applet memory isn't already being used. */
|
|
||||||
R_UNLESS(!g_secure_applet_memory_used, svc::ResultOutOfMemory());
|
|
||||||
|
|
||||||
/* Return the secure applet memory. */
|
|
||||||
g_secure_applet_memory_used = true;
|
|
||||||
*out = g_secure_applet_memory_address;
|
|
||||||
|
|
||||||
return ResultSuccess();
|
|
||||||
}
|
|
||||||
|
|
||||||
void FreeSecureMemoryForApplet(KVirtualAddress address, size_t size) {
|
|
||||||
/* Disable interrupts and acquire the secure applet lock. */
|
|
||||||
KScopedInterruptDisable di;
|
|
||||||
KScopedSpinLock lk(g_secure_applet_lock);
|
|
||||||
|
|
||||||
/* Verify that the memory being freed is correct. */
|
|
||||||
MESOSPHERE_ABORT_UNLESS(address == g_secure_applet_memory_address);
|
|
||||||
MESOSPHERE_ABORT_UNLESS(size <= SecureAppletMemorySize);
|
|
||||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, PageSize));
|
|
||||||
MESOSPHERE_ABORT_UNLESS(g_secure_applet_memory_used);
|
|
||||||
|
|
||||||
/* Release the secure applet memory. */
|
|
||||||
g_secure_applet_memory_used = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
void EnsureRandomGeneratorSeeded() {
|
|
||||||
if (AMS_UNLIKELY(!g_initialized_random_generator)) {
|
|
||||||
u64 seed = UINT64_C(0xF5F5F5F5F5F5F5F5);
|
|
||||||
g_random_generator.Initialize(reinterpret_cast<u32*>(std::addressof(seed)), sizeof(seed) / sizeof(u32));
|
|
||||||
g_initialized_random_generator = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Initialization. */
|
|
||||||
size_t KSystemControl::Init::GetIntendedMemorySize() {
|
|
||||||
return 4_GB;
|
|
||||||
}
|
|
||||||
|
|
||||||
KPhysicalAddress KSystemControl::Init::GetKernelPhysicalBaseAddress(uintptr_t base_address) {
|
|
||||||
const size_t real_dram_size = GetRealMemorySizeForInit();
|
|
||||||
const size_t intended_dram_size = KSystemControl::Init::GetIntendedMemorySize();
|
|
||||||
if (intended_dram_size * 2 < real_dram_size) {
|
|
||||||
return base_address;
|
|
||||||
} else {
|
|
||||||
return base_address + ((real_dram_size - intended_dram_size) / 2);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void KSystemControl::Init::GetInitialProcessBinaryLayout(InitialProcessBinaryLayout *out) {
|
|
||||||
*out = {
|
|
||||||
.address = GetInteger(GetKernelPhysicalBaseAddress(DramPhysicalAddress)) + GetIntendedMemorySize() - KTraceBufferSize - InitialProcessBinarySizeMax,
|
|
||||||
._08 = 0,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
bool KSystemControl::Init::ShouldIncreaseThreadResourceLimit() {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
size_t KSystemControl::Init::GetApplicationPoolSize() {
|
|
||||||
/* Get the base pool size. */
|
|
||||||
const size_t base_pool_size = 3285_MB;
|
|
||||||
|
|
||||||
/* Return (possibly) adjusted size. */
|
|
||||||
return base_pool_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t KSystemControl::Init::GetAppletPoolSize() {
|
|
||||||
/* Get the base pool size. */
|
|
||||||
const size_t base_pool_size = 507_MB;
|
|
||||||
|
|
||||||
/* Return (possibly) adjusted size. */
|
|
||||||
constexpr size_t ExtraSystemMemoryForAtmosphere = 40_MB;
|
|
||||||
return base_pool_size - ExtraSystemMemoryForAtmosphere - KTraceBufferSize;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t KSystemControl::Init::GetMinimumNonSecureSystemPoolSize() {
|
|
||||||
return 0x29C8000;
|
|
||||||
}
|
|
||||||
|
|
||||||
void KSystemControl::Init::CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
|
|
||||||
smc::init::CpuOn(core_id, entrypoint, arg);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Randomness for Initialization. */
|
|
||||||
void KSystemControl::Init::GenerateRandomBytes(void *dst, size_t size) {
|
|
||||||
EnsureRandomGeneratorSeeded();
|
|
||||||
|
|
||||||
u8 *dst_8 = static_cast<u8 *>(dst);
|
|
||||||
while (size > 0) {
|
|
||||||
const u64 random = GenerateRandomU64FromGenerator();
|
|
||||||
std::memcpy(dst_8, std::addressof(random), std::min(size, sizeof(u64)));
|
|
||||||
size -= std::min(size, sizeof(u64));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
u64 KSystemControl::Init::GenerateRandomRange(u64 min, u64 max) {
|
|
||||||
EnsureRandomGeneratorSeeded();
|
|
||||||
|
|
||||||
return GenerateUniformRange(min, max, GenerateRandomU64FromGenerator);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* System Initialization. */
|
|
||||||
void KSystemControl::InitializePhase1() {
|
|
||||||
/* Set IsDebugMode. */
|
|
||||||
{
|
|
||||||
KTargetSystem::SetIsDebugMode(true);
|
|
||||||
|
|
||||||
/* If debug mode, we want to initialize uart logging. */
|
|
||||||
KTargetSystem::EnableDebugLogging(true);
|
|
||||||
KDebugLog::Initialize();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Set Kernel Configuration. */
|
|
||||||
{
|
|
||||||
KTargetSystem::EnableDebugMemoryFill(false);
|
|
||||||
KTargetSystem::EnableUserExceptionHandlers(true);
|
|
||||||
KTargetSystem::EnableDynamicResourceLimits(true);
|
|
||||||
KTargetSystem::EnableUserPmuAccess(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Set Kernel Debugging. */
|
|
||||||
{
|
|
||||||
/* NOTE: This is used to restrict access to SvcKernelDebug/SvcChangeKernelTraceState. */
|
|
||||||
/* Mesosphere may wish to not require this, as we'd ideally keep ProgramVerification enabled for userland. */
|
|
||||||
KTargetSystem::EnableKernelDebugging(true);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* System ResourceLimit initialization. */
|
|
||||||
{
|
|
||||||
/* Construct the resource limit object. */
|
|
||||||
KResourceLimit &sys_res_limit = Kernel::GetSystemResourceLimit();
|
|
||||||
KAutoObject::Create(std::addressof(sys_res_limit));
|
|
||||||
sys_res_limit.Initialize();
|
|
||||||
|
|
||||||
/* Set the initial limits. */
|
|
||||||
const auto [total_memory_size, kernel_memory_size] = KMemoryLayout::GetTotalAndKernelMemorySizes();
|
|
||||||
const auto &slab_counts = init::GetSlabResourceCounts();
|
|
||||||
MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_PhysicalMemoryMax, total_memory_size));
|
|
||||||
MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_ThreadCountMax, slab_counts.num_KThread));
|
|
||||||
MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_EventCountMax, slab_counts.num_KEvent));
|
|
||||||
MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_TransferMemoryCountMax, slab_counts.num_KTransferMemory));
|
|
||||||
MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_SessionCountMax, slab_counts.num_KSession));
|
|
||||||
|
|
||||||
/* Reserve system memory. */
|
|
||||||
MESOSPHERE_ABORT_UNLESS(sys_res_limit.Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, kernel_memory_size));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void KSystemControl::InitializePhase2() {
|
|
||||||
/* Reserve secure applet memory. */
|
|
||||||
if (GetTargetFirmware() >= TargetFirmware_5_0_0) {
|
|
||||||
MESOSPHERE_ABORT_UNLESS(g_secure_applet_memory_address == Null<KVirtualAddress>);
|
|
||||||
MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, SecureAppletMemorySize));
|
|
||||||
|
|
||||||
constexpr auto SecureAppletAllocateOption = KMemoryManager::EncodeOption(KMemoryManager::Pool_System, KMemoryManager::Direction_FromFront);
|
|
||||||
const KPhysicalAddress secure_applet_memory_phys_addr = Kernel::GetMemoryManager().AllocateAndOpenContinuous(SecureAppletMemorySize / PageSize, 1, SecureAppletAllocateOption);
|
|
||||||
MESOSPHERE_ABORT_UNLESS(secure_applet_memory_phys_addr != Null<KPhysicalAddress>);
|
|
||||||
|
|
||||||
g_secure_applet_memory_address = KMemoryLayout::GetLinearVirtualAddress(secure_applet_memory_phys_addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Initialize KTrace. */
|
|
||||||
if constexpr (IsKTraceEnabled) {
|
|
||||||
const auto &ktrace = KMemoryLayout::GetKernelTraceBufferRegion();
|
|
||||||
KTrace::Initialize(ktrace.GetAddress(), ktrace.GetSize());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
u32 KSystemControl::GetCreateProcessMemoryPool() {
|
|
||||||
return KMemoryManager::Pool_Unsafe;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Privileged Access. */
|
|
||||||
void KSystemControl::ReadWriteRegisterPrivileged(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value) {
|
|
||||||
MESOSPHERE_UNUSED(out, address, mask, value);
|
|
||||||
MESOSPHERE_UNIMPLEMENTED();
|
|
||||||
}
|
|
||||||
|
|
||||||
Result KSystemControl::ReadWriteRegister(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value) {
|
|
||||||
MESOSPHERE_UNUSED(out, address, mask, value);
|
|
||||||
MESOSPHERE_UNIMPLEMENTED();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Randomness. */
|
|
||||||
void KSystemControl::GenerateRandomBytes(void *dst, size_t size) {
|
|
||||||
KScopedInterruptDisable intr_disable;
|
|
||||||
KScopedSpinLock lk(g_random_lock);
|
|
||||||
|
|
||||||
u8 *dst_8 = static_cast<u8 *>(dst);
|
|
||||||
while (size > 0) {
|
|
||||||
const u64 random = GenerateRandomU64FromGenerator();
|
|
||||||
std::memcpy(dst_8, std::addressof(random), std::min(size, sizeof(u64)));
|
|
||||||
size -= std::min(size, sizeof(u64));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
u64 KSystemControl::GenerateRandomRange(u64 min, u64 max) {
|
|
||||||
KScopedInterruptDisable intr_disable;
|
|
||||||
KScopedSpinLock lk(g_random_lock);
|
|
||||||
|
|
||||||
return GenerateUniformRange(min, max, GenerateRandomU64FromGenerator);
|
|
||||||
}
|
|
||||||
|
|
||||||
u64 KSystemControl::GenerateRandomU64() {
|
|
||||||
KScopedInterruptDisable intr_disable;
|
|
||||||
KScopedSpinLock lk(g_random_lock);
|
|
||||||
|
|
||||||
return GenerateRandomU64FromGenerator();
|
|
||||||
}
|
|
||||||
|
|
||||||
void KSystemControl::SleepSystem() {
|
|
||||||
MESOSPHERE_LOG("SleepSystem() was called\n");
|
|
||||||
MESOSPHERE_UNIMPLEMENTED();
|
|
||||||
}
|
|
||||||
|
|
||||||
void KSystemControl::StopSystem(void *arg) {
|
|
||||||
MESOSPHERE_UNUSED(arg);
|
|
||||||
AMS_INFINITE_LOOP();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* User access. */
|
/* User access. */
|
||||||
void KSystemControl::CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args) {
|
void KSystemControl::CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args) {
|
||||||
/* Get the function id for the current call. */
|
|
||||||
u64 function_id = args->r[0];
|
|
||||||
|
|
||||||
/* We'll need to map in pages if arguments are pointers. Prepare page groups to do so. */
|
|
||||||
auto &page_table = GetCurrentProcess().GetPageTable();
|
|
||||||
auto *bim = page_table.GetBlockInfoManager();
|
|
||||||
|
|
||||||
constexpr size_t MaxMappedRegisters = 7;
|
|
||||||
std::array<KPageGroup, MaxMappedRegisters> page_groups = { KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), };
|
|
||||||
|
|
||||||
for (size_t i = 0; i < MaxMappedRegisters; i++) {
|
|
||||||
const size_t reg_id = i + 1;
|
|
||||||
if (function_id & (1ul << (8 + reg_id))) {
|
|
||||||
/* Create and open a new page group for the address. */
|
|
||||||
KVirtualAddress virt_addr = args->r[reg_id];
|
|
||||||
|
|
||||||
if (R_SUCCEEDED(page_table.MakeAndOpenPageGroup(std::addressof(page_groups[i]), util::AlignDown(GetInteger(virt_addr), PageSize), 1, KMemoryState_None, KMemoryState_None, KMemoryPermission_UserReadWrite, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryAttribute_None))) {
|
|
||||||
/* Translate the virtual address to a physical address. */
|
|
||||||
const auto it = page_groups[i].begin();
|
|
||||||
MESOSPHERE_ASSERT(it != page_groups[i].end());
|
|
||||||
MESOSPHERE_ASSERT(it->GetNumPages() == 1);
|
|
||||||
|
|
||||||
args->r[reg_id] = GetInteger(it->GetAddress()) | (GetInteger(virt_addr) & (PageSize - 1));
|
|
||||||
} else {
|
|
||||||
/* If we couldn't map, we should clear the address. */
|
|
||||||
args->r[reg_id] = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Invoke the secure monitor. */
|
/* Invoke the secure monitor. */
|
||||||
smc::CallSecureMonitorFromUser(args);
|
return smc::CallSecureMonitorFromUser(args);
|
||||||
|
|
||||||
/* Make sure that we close any pages that we opened. */
|
|
||||||
for (size_t i = 0; i < MaxMappedRegisters; i++) {
|
|
||||||
page_groups[i].Close();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Secure Memory. */
|
|
||||||
size_t KSystemControl::CalculateRequiredSecureMemorySize(size_t size, u32 pool) {
|
|
||||||
if (pool == KMemoryManager::Pool_Applet) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
return size;
|
|
||||||
}
|
|
||||||
|
|
||||||
Result KSystemControl::AllocateSecureMemory(KVirtualAddress *out, size_t size, u32 pool) {
|
|
||||||
/* Applet secure memory is handled separately. */
|
|
||||||
if (pool == KMemoryManager::Pool_Applet) {
|
|
||||||
return AllocateSecureMemoryForApplet(out, size);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Ensure the size is aligned. */
|
|
||||||
const size_t alignment = (pool == KMemoryManager::Pool_System ? PageSize : SecureAlignment);
|
|
||||||
R_UNLESS(util::IsAligned(size, alignment), svc::ResultInvalidSize());
|
|
||||||
|
|
||||||
/* Allocate the memory. */
|
|
||||||
const size_t num_pages = size / PageSize;
|
|
||||||
const KPhysicalAddress paddr = Kernel::GetMemoryManager().AllocateAndOpenContinuous(num_pages, alignment / PageSize, KMemoryManager::EncodeOption(static_cast<KMemoryManager::Pool>(pool), KMemoryManager::Direction_FromFront));
|
|
||||||
R_UNLESS(paddr != Null<KPhysicalAddress>, svc::ResultOutOfMemory());
|
|
||||||
|
|
||||||
/* Ensure we don't leak references to the memory on error. */
|
|
||||||
auto mem_guard = SCOPE_GUARD { Kernel::GetMemoryManager().Close(paddr, num_pages); };
|
|
||||||
|
|
||||||
/* If the memory isn't already secure, set it as secure. */
|
|
||||||
if (pool != KMemoryManager::Pool_System) {
|
|
||||||
/* Set the secure region. */
|
|
||||||
R_UNLESS(SetSecureRegion(paddr, size), svc::ResultOutOfMemory());
|
|
||||||
}
|
|
||||||
|
|
||||||
/* We succeeded. */
|
|
||||||
mem_guard.Cancel();
|
|
||||||
*out = KPageTable::GetHeapVirtualAddress(paddr);
|
|
||||||
return ResultSuccess();
|
|
||||||
}
|
|
||||||
|
|
||||||
void KSystemControl::FreeSecureMemory(KVirtualAddress address, size_t size, u32 pool) {
|
|
||||||
/* Applet secure memory is handled separately. */
|
|
||||||
if (pool == KMemoryManager::Pool_Applet) {
|
|
||||||
return FreeSecureMemoryForApplet(address, size);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Ensure the size is aligned. */
|
|
||||||
const size_t alignment = (pool == KMemoryManager::Pool_System ? PageSize : SecureAlignment);
|
|
||||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(address), alignment));
|
|
||||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, alignment));
|
|
||||||
|
|
||||||
/* If the memory isn't secure system, reset the secure region. */
|
|
||||||
if (pool != KMemoryManager::Pool_System) {
|
|
||||||
/* Check that the size being freed is the current secure region size. */
|
|
||||||
MESOSPHERE_ABORT_UNLESS(g_secure_region_size == size);
|
|
||||||
|
|
||||||
/* Get the physical address. */
|
|
||||||
const KPhysicalAddress paddr = KPageTable::GetHeapPhysicalAddress(address);
|
|
||||||
MESOSPHERE_ABORT_UNLESS(paddr != Null<KPhysicalAddress>);
|
|
||||||
|
|
||||||
/* Check that the memory being freed is the current secure region. */
|
|
||||||
MESOSPHERE_ABORT_UNLESS(paddr == g_secure_region_phys_addr);
|
|
||||||
|
|
||||||
/* Free the secure region. */
|
|
||||||
MESOSPHERE_ABORT_UNLESS(SetSecureRegion(paddr, 0));
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Close the secure region's pages. */
|
|
||||||
Kernel::GetMemoryManager().Close(KPageTable::GetHeapPhysicalAddress(address), size / PageSize);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
|
@ -20,10 +20,6 @@ namespace ams::kern::board::qemu::virt::smc {
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
struct SecureMonitorArguments {
|
|
||||||
u64 x[8];
|
|
||||||
};
|
|
||||||
|
|
||||||
enum UserFunctionId : u32 {
|
enum UserFunctionId : u32 {
|
||||||
UserFunctionId_SetConfig = 0xC3000401,
|
UserFunctionId_SetConfig = 0xC3000401,
|
||||||
UserFunctionId_GetConfig = 0xC3000002,
|
UserFunctionId_GetConfig = 0xC3000002,
|
||||||
|
@ -44,102 +40,6 @@ namespace ams::kern::board::qemu::virt::smc {
|
||||||
UserFunctionId_PrepareEsCommonTitleKey = 0xC3000012,
|
UserFunctionId_PrepareEsCommonTitleKey = 0xC3000012,
|
||||||
};
|
};
|
||||||
|
|
||||||
enum FunctionId : u32 {
|
|
||||||
FunctionId_CpuSuspend = 0xC4000001,
|
|
||||||
FunctionId_CpuOff = 0x84000002,
|
|
||||||
FunctionId_CpuOn = 0xC4000003,
|
|
||||||
};
|
|
||||||
|
|
||||||
void CallPrivilegedSecureMonitorFunction(SecureMonitorArguments &args) {
|
|
||||||
/* Load arguments into registers. */
|
|
||||||
register u64 x0 asm("x0") = args.x[0];
|
|
||||||
register u64 x1 asm("x1") = args.x[1];
|
|
||||||
register u64 x2 asm("x2") = args.x[2];
|
|
||||||
register u64 x3 asm("x3") = args.x[3];
|
|
||||||
register u64 x4 asm("x4") = args.x[4];
|
|
||||||
register u64 x5 asm("x5") = args.x[5];
|
|
||||||
register u64 x6 asm("x6") = args.x[6];
|
|
||||||
register u64 x7 asm("x7") = args.x[7];
|
|
||||||
|
|
||||||
/* Actually make the call. */
|
|
||||||
{
|
|
||||||
/* Disable interrupts while making the call. */
|
|
||||||
KScopedInterruptDisable intr_disable;
|
|
||||||
|
|
||||||
{
|
|
||||||
/* Backup the current thread pointer. */
|
|
||||||
const uintptr_t current_thread_pointer_value = cpu::GetCurrentThreadPointerValue();
|
|
||||||
|
|
||||||
__asm__ __volatile__("smc #0"
|
|
||||||
: "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5), "+r"(x6), "+r"(x7)
|
|
||||||
:
|
|
||||||
: "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "cc", "memory"
|
|
||||||
);
|
|
||||||
|
|
||||||
/* Restore the current thread pointer into X18. */
|
|
||||||
cpu::SetCurrentThreadPointerValue(current_thread_pointer_value);
|
|
||||||
|
|
||||||
/* Store arguments to output. */
|
|
||||||
args.x[0] = x0;
|
|
||||||
args.x[1] = x1;
|
|
||||||
args.x[2] = x2;
|
|
||||||
args.x[3] = x3;
|
|
||||||
args.x[4] = x4;
|
|
||||||
args.x[5] = x5;
|
|
||||||
args.x[6] = x6;
|
|
||||||
args.x[7] = x7;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void CallPrivilegedSecureMonitorFunctionForInit(SecureMonitorArguments &args) {
|
|
||||||
/* Load arguments into registers. */
|
|
||||||
register u64 x0 asm("x0") = args.x[0];
|
|
||||||
register u64 x1 asm("x1") = args.x[1];
|
|
||||||
register u64 x2 asm("x2") = args.x[2];
|
|
||||||
register u64 x3 asm("x3") = args.x[3];
|
|
||||||
register u64 x4 asm("x4") = args.x[4];
|
|
||||||
register u64 x5 asm("x5") = args.x[5];
|
|
||||||
register u64 x6 asm("x6") = args.x[6];
|
|
||||||
register u64 x7 asm("x7") = args.x[7];
|
|
||||||
|
|
||||||
/* Actually make the call. */
|
|
||||||
__asm__ __volatile__("smc #0"
|
|
||||||
: "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5), "+r"(x6), "+r"(x7)
|
|
||||||
:
|
|
||||||
: "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "cc", "memory"
|
|
||||||
);
|
|
||||||
|
|
||||||
/* Store arguments to output. */
|
|
||||||
args.x[0] = x0;
|
|
||||||
args.x[1] = x1;
|
|
||||||
args.x[2] = x2;
|
|
||||||
args.x[3] = x3;
|
|
||||||
args.x[4] = x4;
|
|
||||||
args.x[5] = x5;
|
|
||||||
args.x[6] = x6;
|
|
||||||
args.x[7] = x7;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Global lock for generate random bytes. */
|
|
||||||
KSpinLock g_generate_random_lock;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/* SMC functionality needed for init. */
|
|
||||||
namespace init {
|
|
||||||
|
|
||||||
void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
|
|
||||||
SecureMonitorArguments args = { FunctionId_CpuOn, core_id, entrypoint, arg };
|
|
||||||
CallPrivilegedSecureMonitorFunctionForInit(args);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
|
|
||||||
SecureMonitorArguments args = { FunctionId_CpuOn, core_id, static_cast<u64>(entrypoint), static_cast<u64>(arg) };
|
|
||||||
CallPrivilegedSecureMonitorFunction(args);
|
|
||||||
MESOSPHERE_ABORT_UNLESS((static_cast<SmcResult>(args.x[0]) == SmcResult::Success));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args) {
|
void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args) {
|
||||||
|
|
|
@ -63,14 +63,6 @@ namespace ams::kern::board::qemu::virt::smc {
|
||||||
NotPermitted = 6,
|
NotPermitted = 6,
|
||||||
};
|
};
|
||||||
|
|
||||||
void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg);
|
|
||||||
|
|
||||||
void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args);
|
void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args);
|
||||||
|
|
||||||
namespace init {
|
|
||||||
|
|
||||||
void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
|
@ -19,7 +19,6 @@ namespace ams::kern {
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
constexpr uintptr_t DramPhysicalAddress = 0x80000000;
|
|
||||||
constexpr size_t ReservedEarlyDramSize = 0x60000;
|
constexpr size_t ReservedEarlyDramSize = 0x60000;
|
||||||
|
|
||||||
constexpr size_t CarveoutAlignment = 0x20000;
|
constexpr size_t CarveoutAlignment = 0x20000;
|
||||||
|
@ -100,7 +99,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
void SetupDramPhysicalMemoryRegions() {
|
void SetupDramPhysicalMemoryRegions() {
|
||||||
const size_t intended_memory_size = KSystemControl::Init::GetIntendedMemorySize();
|
const size_t intended_memory_size = KSystemControl::Init::GetIntendedMemorySize();
|
||||||
const KPhysicalAddress physical_memory_base_address = KSystemControl::Init::GetKernelPhysicalBaseAddress(DramPhysicalAddress);
|
const KPhysicalAddress physical_memory_base_address = KSystemControl::Init::GetKernelPhysicalBaseAddress(ams::kern::MainMemoryAddress);
|
||||||
|
|
||||||
/* Insert blocks into the tree. */
|
/* Insert blocks into the tree. */
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(physical_memory_base_address), intended_memory_size, KMemoryRegionType_Dram));
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(physical_memory_base_address), intended_memory_size, KMemoryRegionType_Dram));
|
||||||
|
@ -173,16 +172,21 @@ namespace ams::kern {
|
||||||
InsertPoolPartitionRegionIntoBothTrees(unsafe_system_pool_start, unsafe_system_pool_size, KMemoryRegionType_DramSystemNonSecurePool, KMemoryRegionType_VirtualDramSystemNonSecurePool, cur_pool_attr);
|
InsertPoolPartitionRegionIntoBothTrees(unsafe_system_pool_start, unsafe_system_pool_size, KMemoryRegionType_DramSystemNonSecurePool, KMemoryRegionType_VirtualDramSystemNonSecurePool, cur_pool_attr);
|
||||||
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(unsafe_system_pool_size);
|
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(unsafe_system_pool_size);
|
||||||
|
|
||||||
/* Insert the pool management region. */
|
/* Determine final total overhead size. */
|
||||||
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize((unsafe_system_pool_start - pool_partitions_start) - total_overhead_size);
|
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize((unsafe_system_pool_start - pool_partitions_start) - total_overhead_size);
|
||||||
const uintptr_t pool_management_start = unsafe_system_pool_start - total_overhead_size;
|
|
||||||
|
/* NOTE: Nintendo's kernel has layout [System, Management] but we have [Management, System]. This ensures the four UserPool regions are contiguous. */
|
||||||
|
|
||||||
|
/* Insert the system pool. */
|
||||||
|
const uintptr_t system_pool_start = pool_partitions_start + total_overhead_size;
|
||||||
|
const size_t system_pool_size = unsafe_system_pool_start - system_pool_start;
|
||||||
|
InsertPoolPartitionRegionIntoBothTrees(system_pool_start, system_pool_size, KMemoryRegionType_DramSystemPool, KMemoryRegionType_VirtualDramSystemPool, cur_pool_attr);
|
||||||
|
|
||||||
|
/* Insert the pool management region. */
|
||||||
|
const uintptr_t pool_management_start = pool_partitions_start;
|
||||||
const size_t pool_management_size = total_overhead_size;
|
const size_t pool_management_size = total_overhead_size;
|
||||||
u32 pool_management_attr = 0;
|
u32 pool_management_attr = 0;
|
||||||
InsertPoolPartitionRegionIntoBothTrees(pool_management_start, pool_management_size, KMemoryRegionType_DramPoolManagement, KMemoryRegionType_VirtualDramPoolManagement, pool_management_attr);
|
InsertPoolPartitionRegionIntoBothTrees(pool_management_start, pool_management_size, KMemoryRegionType_DramPoolManagement, KMemoryRegionType_VirtualDramPoolManagement, pool_management_attr);
|
||||||
|
|
||||||
/* Insert the system pool. */
|
|
||||||
const uintptr_t system_pool_size = pool_management_start - pool_partitions_start;
|
|
||||||
InsertPoolPartitionRegionIntoBothTrees(pool_partitions_start, system_pool_size, KMemoryRegionType_DramSystemPool, KMemoryRegionType_VirtualDramSystemPool, cur_pool_attr);
|
|
||||||
} else {
|
} else {
|
||||||
/* On < 5.0.0, setup a legacy 2-pool layout for backwards compatibility. */
|
/* On < 5.0.0, setup a legacy 2-pool layout for backwards compatibility. */
|
||||||
|
|
||||||
|
@ -249,14 +253,18 @@ namespace ams::kern {
|
||||||
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(second_application_pool_size);
|
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(second_application_pool_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Insert the secure pool. */
|
/* Validate the true overhead size. */
|
||||||
InsertPoolPartitionRegionIntoBothTrees(pool_partitions_start, secure_pool_size, KMemoryRegionType_DramSystemPool, KMemoryRegionType_VirtualDramSystemPool, cur_pool_attr);
|
|
||||||
|
|
||||||
/* Insert the pool management region. */
|
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(total_overhead_size <= approximate_total_overhead_size);
|
MESOSPHERE_INIT_ABORT_UNLESS(total_overhead_size <= approximate_total_overhead_size);
|
||||||
|
|
||||||
const uintptr_t pool_management_start = pool_partitions_start + secure_pool_size;
|
/* NOTE: Nintendo's kernel has layout [System, Management] but we have [Management, System]. This ensures the UserPool regions are contiguous. */
|
||||||
const size_t pool_management_size = unsafe_memory_start - pool_management_start;
|
|
||||||
|
/* Insert the secure pool. */
|
||||||
|
const uintptr_t secure_pool_start = unsafe_memory_start - secure_pool_size;
|
||||||
|
InsertPoolPartitionRegionIntoBothTrees(secure_pool_start, secure_pool_size, KMemoryRegionType_DramSystemPool, KMemoryRegionType_VirtualDramSystemPool, cur_pool_attr);
|
||||||
|
|
||||||
|
/* Insert the pool management region. */
|
||||||
|
const uintptr_t pool_management_start = pool_partitions_start;
|
||||||
|
const size_t pool_management_size = secure_pool_start - pool_management_start;
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(total_overhead_size <= pool_management_size);
|
MESOSPHERE_INIT_ABORT_UNLESS(total_overhead_size <= pool_management_size);
|
||||||
|
|
||||||
u32 pool_management_attr = 0;
|
u32 pool_management_attr = 0;
|
||||||
|
|
|
@ -19,12 +19,8 @@ namespace ams::kern {
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
constexpr uintptr_t DramPhysicalAddress = 0x40000000;
|
|
||||||
constexpr size_t ReservedEarlyDramSize = 0x00080000;
|
constexpr size_t ReservedEarlyDramSize = 0x00080000;
|
||||||
|
|
||||||
constexpr size_t CarveoutAlignment = 0x20000;
|
|
||||||
constexpr size_t CarveoutSizeMax = 512_MB - CarveoutAlignment;
|
|
||||||
|
|
||||||
template<typename... T> requires (std::same_as<T, KMemoryRegionAttr> && ...)
|
template<typename... T> requires (std::same_as<T, KMemoryRegionAttr> && ...)
|
||||||
constexpr ALWAYS_INLINE KMemoryRegionType GetMemoryRegionType(KMemoryRegionType base, T... attr) {
|
constexpr ALWAYS_INLINE KMemoryRegionType GetMemoryRegionType(KMemoryRegionType base, T... attr) {
|
||||||
return util::FromUnderlying<KMemoryRegionType>(util::ToUnderlying(base) | (util::ToUnderlying<T>(attr) | ...));
|
return util::FromUnderlying<KMemoryRegionType>(util::ToUnderlying(base) | (util::ToUnderlying<T>(attr) | ...));
|
||||||
|
@ -32,6 +28,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
void InsertPoolPartitionRegionIntoBothTrees(size_t start, size_t size, KMemoryRegionType phys_type, KMemoryRegionType virt_type, u32 &cur_attr) {
|
void InsertPoolPartitionRegionIntoBothTrees(size_t start, size_t size, KMemoryRegionType phys_type, KMemoryRegionType virt_type, u32 &cur_attr) {
|
||||||
const u32 attr = cur_attr++;
|
const u32 attr = cur_attr++;
|
||||||
|
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(start, size, phys_type, attr));
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(start, size, phys_type, attr));
|
||||||
const KMemoryRegion *phys = KMemoryLayout::GetPhysicalMemoryRegionTree().FindByTypeAndAttribute(phys_type, attr);
|
const KMemoryRegion *phys = KMemoryLayout::GetPhysicalMemoryRegionTree().FindByTypeAndAttribute(phys_type, attr);
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(phys != nullptr);
|
MESOSPHERE_INIT_ABORT_UNLESS(phys != nullptr);
|
||||||
|
@ -50,7 +47,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
void SetupDramPhysicalMemoryRegions() {
|
void SetupDramPhysicalMemoryRegions() {
|
||||||
const size_t intended_memory_size = KSystemControl::Init::GetIntendedMemorySize();
|
const size_t intended_memory_size = KSystemControl::Init::GetIntendedMemorySize();
|
||||||
const KPhysicalAddress physical_memory_base_address = KSystemControl::Init::GetKernelPhysicalBaseAddress(DramPhysicalAddress);
|
const KPhysicalAddress physical_memory_base_address = KSystemControl::Init::GetKernelPhysicalBaseAddress(ams::kern::MainMemoryAddress);
|
||||||
|
|
||||||
/* Insert blocks into the tree. */
|
/* Insert blocks into the tree. */
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(physical_memory_base_address), intended_memory_size, KMemoryRegionType_Dram));
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(physical_memory_base_address), intended_memory_size, KMemoryRegionType_Dram));
|
||||||
|
@ -76,9 +73,6 @@ namespace ams::kern {
|
||||||
const KMemoryRegion *kernel_dram_region = KMemoryLayout::GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_DramKernelBase);
|
const KMemoryRegion *kernel_dram_region = KMemoryLayout::GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_DramKernelBase);
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(kernel_dram_region != nullptr);
|
MESOSPHERE_INIT_ABORT_UNLESS(kernel_dram_region != nullptr);
|
||||||
|
|
||||||
const uintptr_t kernel_dram_start = kernel_dram_region->GetAddress();
|
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(kernel_dram_start, CarveoutAlignment));
|
|
||||||
|
|
||||||
/* Find the start of the pool partitions region. */
|
/* Find the start of the pool partitions region. */
|
||||||
const KMemoryRegion *pool_partitions_region = KMemoryLayout::GetPhysicalMemoryRegionTree().FindByTypeAndAttribute(KMemoryRegionType_DramPoolPartition, 0);
|
const KMemoryRegion *pool_partitions_region = KMemoryLayout::GetPhysicalMemoryRegionTree().FindByTypeAndAttribute(KMemoryRegionType_DramPoolPartition, 0);
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(pool_partitions_region != nullptr);
|
MESOSPHERE_INIT_ABORT_UNLESS(pool_partitions_region != nullptr);
|
||||||
|
@ -93,13 +87,16 @@ namespace ams::kern {
|
||||||
/* Decide on starting addresses for our pools. */
|
/* Decide on starting addresses for our pools. */
|
||||||
const uintptr_t application_pool_start = pool_end - application_pool_size;
|
const uintptr_t application_pool_start = pool_end - application_pool_size;
|
||||||
const uintptr_t applet_pool_start = application_pool_start - applet_pool_size;
|
const uintptr_t applet_pool_start = application_pool_start - applet_pool_size;
|
||||||
const uintptr_t unsafe_system_pool_start = std::min(kernel_dram_start + CarveoutSizeMax, util::AlignDown(applet_pool_start - unsafe_system_pool_min_size, CarveoutAlignment));
|
const uintptr_t unsafe_system_pool_start = util::AlignDown(applet_pool_start - unsafe_system_pool_min_size, PageSize);
|
||||||
const size_t unsafe_system_pool_size = applet_pool_start - unsafe_system_pool_start;
|
const size_t unsafe_system_pool_size = applet_pool_start - unsafe_system_pool_start;
|
||||||
|
|
||||||
/* We want to arrange application pool depending on where the middle of dram is. */
|
/* We want to arrange application pool depending on where the middle of dram is. */
|
||||||
const uintptr_t dram_midpoint = (dram_extents.GetAddress() + dram_extents.GetEndAddress()) / 2;
|
const uintptr_t dram_midpoint = (dram_extents.GetAddress() + dram_extents.GetEndAddress()) / 2;
|
||||||
u32 cur_pool_attr = 0;
|
u32 cur_pool_attr = 0;
|
||||||
size_t total_overhead_size = 0;
|
size_t total_overhead_size = 0;
|
||||||
|
|
||||||
|
/* Insert the application pool. */
|
||||||
|
if (application_pool_size > 0) {
|
||||||
if (dram_extents.GetEndAddress() <= dram_midpoint || dram_midpoint <= application_pool_start) {
|
if (dram_extents.GetEndAddress() <= dram_midpoint || dram_midpoint <= application_pool_start) {
|
||||||
InsertPoolPartitionRegionIntoBothTrees(application_pool_start, application_pool_size, KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, cur_pool_attr);
|
InsertPoolPartitionRegionIntoBothTrees(application_pool_start, application_pool_size, KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, cur_pool_attr);
|
||||||
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(application_pool_size);
|
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(application_pool_size);
|
||||||
|
@ -111,25 +108,35 @@ namespace ams::kern {
|
||||||
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(first_application_pool_size);
|
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(first_application_pool_size);
|
||||||
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(second_application_pool_size);
|
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(second_application_pool_size);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* Insert the applet pool. */
|
/* Insert the applet pool. */
|
||||||
|
if (applet_pool_size > 0) {
|
||||||
InsertPoolPartitionRegionIntoBothTrees(applet_pool_start, applet_pool_size, KMemoryRegionType_DramAppletPool, KMemoryRegionType_VirtualDramAppletPool, cur_pool_attr);
|
InsertPoolPartitionRegionIntoBothTrees(applet_pool_start, applet_pool_size, KMemoryRegionType_DramAppletPool, KMemoryRegionType_VirtualDramAppletPool, cur_pool_attr);
|
||||||
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(applet_pool_size);
|
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(applet_pool_size);
|
||||||
|
}
|
||||||
|
|
||||||
/* Insert the nonsecure system pool. */
|
/* Insert the nonsecure system pool. */
|
||||||
|
if (unsafe_system_pool_size > 0) {
|
||||||
InsertPoolPartitionRegionIntoBothTrees(unsafe_system_pool_start, unsafe_system_pool_size, KMemoryRegionType_DramSystemNonSecurePool, KMemoryRegionType_VirtualDramSystemNonSecurePool, cur_pool_attr);
|
InsertPoolPartitionRegionIntoBothTrees(unsafe_system_pool_start, unsafe_system_pool_size, KMemoryRegionType_DramSystemNonSecurePool, KMemoryRegionType_VirtualDramSystemNonSecurePool, cur_pool_attr);
|
||||||
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(unsafe_system_pool_size);
|
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(unsafe_system_pool_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Determine final total overhead size. */
|
||||||
|
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize((unsafe_system_pool_start - pool_partitions_start) - total_overhead_size);
|
||||||
|
|
||||||
|
/* NOTE: Nintendo's kernel has layout [System, Management] but we have [Management, System]. This ensures the four UserPool regions are contiguous. */
|
||||||
|
|
||||||
|
/* Insert the system pool. */
|
||||||
|
const uintptr_t system_pool_start = pool_partitions_start + total_overhead_size;
|
||||||
|
const size_t system_pool_size = unsafe_system_pool_start - system_pool_start;
|
||||||
|
InsertPoolPartitionRegionIntoBothTrees(system_pool_start, system_pool_size, KMemoryRegionType_DramSystemPool, KMemoryRegionType_VirtualDramSystemPool, cur_pool_attr);
|
||||||
|
|
||||||
/* Insert the pool management region. */
|
/* Insert the pool management region. */
|
||||||
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize((unsafe_system_pool_start - pool_partitions_start) - total_overhead_size);
|
const uintptr_t pool_management_start = pool_partitions_start;
|
||||||
const uintptr_t pool_management_start = unsafe_system_pool_start - total_overhead_size;
|
|
||||||
const size_t pool_management_size = total_overhead_size;
|
const size_t pool_management_size = total_overhead_size;
|
||||||
u32 pool_management_attr = 0;
|
u32 pool_management_attr = 0;
|
||||||
InsertPoolPartitionRegionIntoBothTrees(pool_management_start, pool_management_size, KMemoryRegionType_DramPoolManagement, KMemoryRegionType_VirtualDramPoolManagement, pool_management_attr);
|
InsertPoolPartitionRegionIntoBothTrees(pool_management_start, pool_management_size, KMemoryRegionType_DramPoolManagement, KMemoryRegionType_VirtualDramPoolManagement, pool_management_attr);
|
||||||
|
|
||||||
/* Insert the system pool. */
|
|
||||||
const uintptr_t system_pool_size = pool_management_start - pool_partitions_start;
|
|
||||||
InsertPoolPartitionRegionIntoBothTrees(pool_partitions_start, system_pool_size, KMemoryRegionType_DramSystemPool, KMemoryRegionType_VirtualDramSystemPool, cur_pool_attr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -238,7 +238,7 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Generate random entropy. */
|
/* Generate random entropy. */
|
||||||
KSystemControl::GenerateRandomBytes(m_entropy, sizeof(m_entropy));
|
KSystemControl::GenerateRandom(m_entropy, util::size(m_entropy));
|
||||||
|
|
||||||
/* Clear remaining fields. */
|
/* Clear remaining fields. */
|
||||||
m_num_running_threads = 0;
|
m_num_running_threads = 0;
|
||||||
|
|
295
libraries/libmesosphere/source/kern_k_system_control_base.cpp
Normal file
295
libraries/libmesosphere/source/kern_k_system_control_base.cpp
Normal file
|
@ -0,0 +1,295 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) Atmosphère-NX
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms and conditions of the GNU General Public License,
|
||||||
|
* version 2, as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||||
|
* more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
#include <mesosphere.hpp>
|
||||||
|
#if defined(ATMOSPHERE_ARCH_ARM64)
|
||||||
|
#include <mesosphere/arch/arm64/kern_secure_monitor_base.hpp>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace ams::kern {
|
||||||
|
|
||||||
|
/* Initialization. */
|
||||||
|
size_t KSystemControlBase::Init::GetRealMemorySize() {
|
||||||
|
return ams::kern::MainMemorySize;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t KSystemControlBase::Init::GetIntendedMemorySize() {
|
||||||
|
return ams::kern::MainMemorySize;
|
||||||
|
}
|
||||||
|
|
||||||
|
KPhysicalAddress KSystemControlBase::Init::GetKernelPhysicalBaseAddress(KPhysicalAddress base_address) {
|
||||||
|
const size_t real_dram_size = KSystemControl::Init::GetRealMemorySize();
|
||||||
|
const size_t intended_dram_size = KSystemControl::Init::GetIntendedMemorySize();
|
||||||
|
if (intended_dram_size * 2 < real_dram_size) {
|
||||||
|
return base_address;
|
||||||
|
} else {
|
||||||
|
return base_address + ((real_dram_size - intended_dram_size) / 2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void KSystemControlBase::Init::GetInitialProcessBinaryLayout(InitialProcessBinaryLayout *out) {
|
||||||
|
*out = {
|
||||||
|
.address = GetInteger(KSystemControl::Init::GetKernelPhysicalBaseAddress(ams::kern::MainMemoryAddress)) + KSystemControl::Init::GetIntendedMemorySize() - KTraceBufferSize - InitialProcessBinarySizeMax,
|
||||||
|
._08 = 0,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool KSystemControlBase::Init::ShouldIncreaseThreadResourceLimit() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
size_t KSystemControlBase::Init::GetApplicationPoolSize() {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t KSystemControlBase::Init::GetAppletPoolSize() {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t KSystemControlBase::Init::GetMinimumNonSecureSystemPoolSize() {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
u8 KSystemControlBase::Init::GetDebugLogUartPort() {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void KSystemControlBase::Init::CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
|
||||||
|
#if defined(ATMOSPHERE_ARCH_ARM64)
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS((::ams::kern::arch::arm64::smc::CpuOn<0, false>(core_id, entrypoint, arg)) == 0);
|
||||||
|
#else
|
||||||
|
AMS_INFINITE_LOOP();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Randomness for Initialization. */
|
||||||
|
void KSystemControlBase::Init::GenerateRandom(u64 *dst, size_t count) {
|
||||||
|
if (AMS_UNLIKELY(!s_initialized_random_generator)) {
|
||||||
|
const u64 seed = KHardwareTimer::GetTick();
|
||||||
|
s_random_generator.Initialize(reinterpret_cast<const u32*>(std::addressof(seed)), sizeof(seed) / sizeof(u32));
|
||||||
|
s_initialized_random_generator = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (size_t i = 0; i < count; ++i) {
|
||||||
|
dst[i] = s_random_generator.GenerateRandomU64();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
u64 KSystemControlBase::Init::GenerateRandomRange(u64 min, u64 max) {
|
||||||
|
if (AMS_UNLIKELY(!s_initialized_random_generator)) {
|
||||||
|
const u64 seed = KHardwareTimer::GetTick();
|
||||||
|
s_random_generator.Initialize(reinterpret_cast<const u32*>(std::addressof(seed)), sizeof(seed) / sizeof(u32));
|
||||||
|
s_initialized_random_generator = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return KSystemControlBase::GenerateUniformRange(min, max, [] ALWAYS_INLINE_LAMBDA () -> u64 { return s_random_generator.GenerateRandomU64(); });
|
||||||
|
}
|
||||||
|
|
||||||
|
/* System Initialization. */
|
||||||
|
void KSystemControlBase::InitializePhase1(bool skip_target_system) {
|
||||||
|
/* Initialize the rng, if we somehow haven't already. */
|
||||||
|
if (AMS_UNLIKELY(!s_initialized_random_generator)) {
|
||||||
|
const u64 seed = KHardwareTimer::GetTick();
|
||||||
|
s_random_generator.Initialize(reinterpret_cast<const u32*>(std::addressof(seed)), sizeof(seed) / sizeof(u32));
|
||||||
|
s_initialized_random_generator = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Configure KTargetSystem, if we haven't already by an implementation SystemControl. */
|
||||||
|
if (!skip_target_system) {
|
||||||
|
/* Set IsDebugMode. */
|
||||||
|
{
|
||||||
|
KTargetSystem::SetIsDebugMode(true);
|
||||||
|
|
||||||
|
/* If debug mode, we want to initialize uart logging. */
|
||||||
|
KTargetSystem::EnableDebugLogging(true);
|
||||||
|
KDebugLog::Initialize();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Set Kernel Configuration. */
|
||||||
|
{
|
||||||
|
KTargetSystem::EnableDebugMemoryFill(false);
|
||||||
|
KTargetSystem::EnableUserExceptionHandlers(true);
|
||||||
|
KTargetSystem::EnableDynamicResourceLimits(true);
|
||||||
|
KTargetSystem::EnableUserPmuAccess(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Set Kernel Debugging. */
|
||||||
|
{
|
||||||
|
/* NOTE: This is used to restrict access to SvcKernelDebug/SvcChangeKernelTraceState. */
|
||||||
|
/* Mesosphere may wish to not require this, as we'd ideally keep ProgramVerification enabled for userland. */
|
||||||
|
KTargetSystem::EnableKernelDebugging(true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* System ResourceLimit initialization. */
|
||||||
|
{
|
||||||
|
/* Construct the resource limit object. */
|
||||||
|
KResourceLimit &sys_res_limit = Kernel::GetSystemResourceLimit();
|
||||||
|
KAutoObject::Create<KResourceLimit>(std::addressof(sys_res_limit));
|
||||||
|
sys_res_limit.Initialize();
|
||||||
|
|
||||||
|
/* Set the initial limits. */
|
||||||
|
const auto [total_memory_size, kernel_memory_size] = KMemoryLayout::GetTotalAndKernelMemorySizes();
|
||||||
|
const auto &slab_counts = init::GetSlabResourceCounts();
|
||||||
|
MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_PhysicalMemoryMax, total_memory_size));
|
||||||
|
MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_ThreadCountMax, slab_counts.num_KThread));
|
||||||
|
MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_EventCountMax, slab_counts.num_KEvent));
|
||||||
|
MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_TransferMemoryCountMax, slab_counts.num_KTransferMemory));
|
||||||
|
MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_SessionCountMax, slab_counts.num_KSession));
|
||||||
|
|
||||||
|
/* Reserve system memory. */
|
||||||
|
MESOSPHERE_ABORT_UNLESS(sys_res_limit.Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, kernel_memory_size));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void KSystemControlBase::InitializePhase2() {
|
||||||
|
/* Initialize KTrace. */
|
||||||
|
if constexpr (IsKTraceEnabled) {
|
||||||
|
const auto &ktrace = KMemoryLayout::GetKernelTraceBufferRegion();
|
||||||
|
KTrace::Initialize(ktrace.GetAddress(), ktrace.GetSize());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
u32 KSystemControlBase::GetCreateProcessMemoryPool() {
|
||||||
|
return KMemoryManager::Pool_System;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Privileged Access. */
|
||||||
|
void KSystemControlBase::ReadWriteRegisterPrivileged(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value) {
|
||||||
|
/* TODO */
|
||||||
|
MESOSPHERE_UNUSED(out, address, mask, value);
|
||||||
|
MESOSPHERE_UNIMPLEMENTED();
|
||||||
|
}
|
||||||
|
|
||||||
|
Result KSystemControlBase::ReadWriteRegister(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value) {
|
||||||
|
MESOSPHERE_UNUSED(out, address, mask, value);
|
||||||
|
return svc::ResultNotImplemented();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Randomness. */
|
||||||
|
void KSystemControlBase::GenerateRandom(u64 *dst, size_t count) {
|
||||||
|
KScopedInterruptDisable intr_disable;
|
||||||
|
KScopedSpinLock lk(s_random_lock);
|
||||||
|
|
||||||
|
for (size_t i = 0; i < count; ++i) {
|
||||||
|
dst[i] = s_random_generator.GenerateRandomU64();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
u64 KSystemControlBase::GenerateRandomRange(u64 min, u64 max) {
|
||||||
|
KScopedInterruptDisable intr_disable;
|
||||||
|
KScopedSpinLock lk(s_random_lock);
|
||||||
|
|
||||||
|
return KSystemControlBase::GenerateUniformRange(min, max, [] ALWAYS_INLINE_LAMBDA () -> u64 { return s_random_generator.GenerateRandomU64(); });
|
||||||
|
}
|
||||||
|
|
||||||
|
u64 KSystemControlBase::GenerateRandomU64() {
|
||||||
|
KScopedInterruptDisable intr_disable;
|
||||||
|
KScopedSpinLock lk(s_random_lock);
|
||||||
|
|
||||||
|
return s_random_generator.GenerateRandomU64();
|
||||||
|
}
|
||||||
|
|
||||||
|
void KSystemControlBase::SleepSystem() {
|
||||||
|
MESOSPHERE_LOG("SleepSystem() was called\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
void KSystemControlBase::StopSystem(void *) {
|
||||||
|
MESOSPHERE_LOG("KSystemControlBase::StopSystem\n");
|
||||||
|
AMS_INFINITE_LOOP();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* User access. */
|
||||||
|
#if defined(ATMOSPHERE_ARCH_ARM64)
|
||||||
|
void KSystemControlBase::CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args) {
|
||||||
|
/* Get the function id for the current call. */
|
||||||
|
u64 function_id = args->r[0];
|
||||||
|
|
||||||
|
/* We'll need to map in pages if arguments are pointers. Prepare page groups to do so. */
|
||||||
|
auto &page_table = GetCurrentProcess().GetPageTable();
|
||||||
|
auto *bim = page_table.GetBlockInfoManager();
|
||||||
|
|
||||||
|
constexpr size_t MaxMappedRegisters = 7;
|
||||||
|
std::array<KPageGroup, MaxMappedRegisters> page_groups = { KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), };
|
||||||
|
|
||||||
|
for (size_t i = 0; i < MaxMappedRegisters; i++) {
|
||||||
|
const size_t reg_id = i + 1;
|
||||||
|
if (function_id & (1ul << (8 + reg_id))) {
|
||||||
|
/* Create and open a new page group for the address. */
|
||||||
|
KVirtualAddress virt_addr = args->r[reg_id];
|
||||||
|
|
||||||
|
if (R_SUCCEEDED(page_table.MakeAndOpenPageGroup(std::addressof(page_groups[i]), util::AlignDown(GetInteger(virt_addr), PageSize), 1, KMemoryState_None, KMemoryState_None, KMemoryPermission_UserReadWrite, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryAttribute_None))) {
|
||||||
|
/* Translate the virtual address to a physical address. */
|
||||||
|
const auto it = page_groups[i].begin();
|
||||||
|
MESOSPHERE_ASSERT(it != page_groups[i].end());
|
||||||
|
MESOSPHERE_ASSERT(it->GetNumPages() == 1);
|
||||||
|
|
||||||
|
args->r[reg_id] = GetInteger(it->GetAddress()) | (GetInteger(virt_addr) & (PageSize - 1));
|
||||||
|
} else {
|
||||||
|
/* If we couldn't map, we should clear the address. */
|
||||||
|
args->r[reg_id] = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Invoke the secure monitor. */
|
||||||
|
KSystemControl::CallSecureMonitorFromUserImpl(args);
|
||||||
|
|
||||||
|
/* Make sure that we close any pages that we opened. */
|
||||||
|
for (size_t i = 0; i < MaxMappedRegisters; i++) {
|
||||||
|
page_groups[i].Close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void KSystemControlBase::CallSecureMonitorFromUserImpl(ams::svc::lp64::SecureMonitorArguments *args) {
|
||||||
|
/* By default, we don't actually support secure monitor, so just set args to a failure code. */
|
||||||
|
args->r[0] = 1;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* Secure Memory. */
|
||||||
|
size_t KSystemControlBase::CalculateRequiredSecureMemorySize(size_t size, u32 pool) {
|
||||||
|
MESOSPHERE_UNUSED(pool);
|
||||||
|
return size;
|
||||||
|
}
|
||||||
|
|
||||||
|
Result KSystemControlBase::AllocateSecureMemory(KVirtualAddress *out, size_t size, u32 pool) {
|
||||||
|
/* Ensure the size is aligned. */
|
||||||
|
constexpr size_t Alignment = PageSize;
|
||||||
|
R_UNLESS(util::IsAligned(size, Alignment), svc::ResultInvalidSize());
|
||||||
|
|
||||||
|
/* Allocate the memory. */
|
||||||
|
const size_t num_pages = size / PageSize;
|
||||||
|
const KPhysicalAddress paddr = Kernel::GetMemoryManager().AllocateAndOpenContinuous(num_pages, Alignment / PageSize, KMemoryManager::EncodeOption(static_cast<KMemoryManager::Pool>(pool), KMemoryManager::Direction_FromFront));
|
||||||
|
R_UNLESS(paddr != Null<KPhysicalAddress>, svc::ResultOutOfMemory());
|
||||||
|
|
||||||
|
*out = KPageTable::GetHeapVirtualAddress(paddr);
|
||||||
|
return ResultSuccess();
|
||||||
|
}
|
||||||
|
|
||||||
|
void KSystemControlBase::FreeSecureMemory(KVirtualAddress address, size_t size, u32 pool) {
|
||||||
|
/* Ensure the size is aligned. */
|
||||||
|
constexpr size_t Alignment = PageSize;
|
||||||
|
MESOSPHERE_UNUSED(pool);
|
||||||
|
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(address), Alignment));
|
||||||
|
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, Alignment));
|
||||||
|
|
||||||
|
/* Close the secure region's pages. */
|
||||||
|
Kernel::GetMemoryManager().Close(KPageTable::GetHeapPhysicalAddress(address), size / PageSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -139,14 +139,20 @@ namespace ams::kern {
|
||||||
PrintMemoryRegion(" InitPageTable", KMemoryLayout::GetKernelInitPageTableRegionPhysicalExtents());
|
PrintMemoryRegion(" InitPageTable", KMemoryLayout::GetKernelInitPageTableRegionPhysicalExtents());
|
||||||
PrintMemoryRegion(" MemoryPoolRegion", KMemoryLayout::GetKernelPoolPartitionRegionPhysicalExtents());
|
PrintMemoryRegion(" MemoryPoolRegion", KMemoryLayout::GetKernelPoolPartitionRegionPhysicalExtents());
|
||||||
if (GetTargetFirmware() >= TargetFirmware_5_0_0) {
|
if (GetTargetFirmware() >= TargetFirmware_5_0_0) {
|
||||||
|
PrintMemoryRegion(" Management", KMemoryLayout::GetKernelPoolManagementRegionPhysicalExtents());
|
||||||
PrintMemoryRegion(" System", KMemoryLayout::GetKernelSystemPoolRegionPhysicalExtents());
|
PrintMemoryRegion(" System", KMemoryLayout::GetKernelSystemPoolRegionPhysicalExtents());
|
||||||
PrintMemoryRegion(" Management", KMemoryLayout::GetKernelPoolManagementRegionPhysicalExtents());
|
if (KMemoryLayout::HasKernelSystemNonSecurePoolRegion()) {
|
||||||
PrintMemoryRegion(" SystemUnsafe", KMemoryLayout::GetKernelSystemNonSecurePoolRegionPhysicalExtents());
|
PrintMemoryRegion(" SystemUnsafe", KMemoryLayout::GetKernelSystemNonSecurePoolRegionPhysicalExtents());
|
||||||
|
}
|
||||||
|
if (KMemoryLayout::HasKernelAppletPoolRegion()) {
|
||||||
PrintMemoryRegion(" Applet", KMemoryLayout::GetKernelAppletPoolRegionPhysicalExtents());
|
PrintMemoryRegion(" Applet", KMemoryLayout::GetKernelAppletPoolRegionPhysicalExtents());
|
||||||
|
}
|
||||||
|
if (KMemoryLayout::HasKernelApplicationPoolRegion()) {
|
||||||
PrintMemoryRegion(" Application", KMemoryLayout::GetKernelApplicationPoolRegionPhysicalExtents());
|
PrintMemoryRegion(" Application", KMemoryLayout::GetKernelApplicationPoolRegionPhysicalExtents());
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
PrintMemoryRegion(" Secure", KMemoryLayout::GetKernelSystemPoolRegionPhysicalExtents());
|
|
||||||
PrintMemoryRegion(" Management", KMemoryLayout::GetKernelPoolManagementRegionPhysicalExtents());
|
PrintMemoryRegion(" Management", KMemoryLayout::GetKernelPoolManagementRegionPhysicalExtents());
|
||||||
|
PrintMemoryRegion(" Secure", KMemoryLayout::GetKernelSystemPoolRegionPhysicalExtents());
|
||||||
PrintMemoryRegion(" Unsafe", KMemoryLayout::GetKernelApplicationPoolRegionPhysicalExtents());
|
PrintMemoryRegion(" Unsafe", KMemoryLayout::GetKernelApplicationPoolRegionPhysicalExtents());
|
||||||
}
|
}
|
||||||
if constexpr (IsKTraceEnabled) {
|
if constexpr (IsKTraceEnabled) {
|
||||||
|
|
|
@ -36,7 +36,7 @@ namespace ams::kern::svc {
|
||||||
size_t remaining = size;
|
size_t remaining = size;
|
||||||
while (remaining > 0) {
|
while (remaining > 0) {
|
||||||
/* Get a contiguous range to operate on. */
|
/* Get a contiguous range to operate on. */
|
||||||
KPageTableBase::MemoryRange contig_range = {};
|
KPageTableBase::MemoryRange contig_range = { .address = Null<KPhysicalAddress>, .size = 0 };
|
||||||
R_TRY(page_table.OpenMemoryRangeForProcessCacheOperation(std::addressof(contig_range), cur_address, aligned_end - cur_address));
|
R_TRY(page_table.OpenMemoryRangeForProcessCacheOperation(std::addressof(contig_range), cur_address, aligned_end - cur_address));
|
||||||
|
|
||||||
/* Close the range when we're done operating on it. */
|
/* Close the range when we're done operating on it. */
|
||||||
|
|
Loading…
Reference in a new issue