2020-02-08 10:49:32 +00:00
|
|
|
/*
|
2021-10-04 19:59:10 +00:00
|
|
|
* Copyright (c) Atmosphère-NX
|
2020-02-08 10:49:32 +00:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms and conditions of the GNU General Public License,
|
|
|
|
* version 2, as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
|
|
* more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
#include <mesosphere.hpp>
|
|
|
|
|
|
|
|
namespace ams::kern {
|
|
|
|
|
2020-02-19 09:22:27 +00:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
constexpr u64 InitialProcessIdMin = 1;
|
|
|
|
constexpr u64 InitialProcessIdMax = 0x50;
|
2020-07-22 05:13:16 +00:00
|
|
|
|
|
|
|
constexpr u64 ProcessIdMin = InitialProcessIdMax + 1;
|
|
|
|
constexpr u64 ProcessIdMax = std::numeric_limits<u64>::max();
|
|
|
|
|
2021-10-20 18:02:17 +00:00
|
|
|
constinit util::Atomic<u64> g_initial_process_id = InitialProcessIdMin;
|
|
|
|
constinit util::Atomic<u64> g_process_id = ProcessIdMin;
|
2020-02-19 09:22:27 +00:00
|
|
|
|
2021-04-07 15:17:15 +00:00
|
|
|
Result TerminateChildren(KProcess *process, const KThread *thread_to_not_terminate) {
|
2020-07-23 06:52:29 +00:00
|
|
|
/* Request that all children threads terminate. */
|
|
|
|
{
|
|
|
|
KScopedLightLock proc_lk(process->GetListLock());
|
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
|
2021-04-07 22:30:13 +00:00
|
|
|
if (thread_to_not_terminate != nullptr && process->GetPinnedThread(GetCurrentCoreId()) == thread_to_not_terminate) {
|
|
|
|
/* NOTE: Here Nintendo unpins the current thread instead of the thread_to_not_terminate. */
|
|
|
|
/* This is valid because the only caller which uses non-nullptr as argument uses GetCurrentThreadPointer(), */
|
|
|
|
/* but it's still notable because it seems incorrect at first glance. */
|
|
|
|
process->UnpinCurrentThread();
|
|
|
|
}
|
|
|
|
|
2020-07-23 06:52:29 +00:00
|
|
|
auto &thread_list = process->GetThreadList();
|
|
|
|
for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
|
|
|
|
if (KThread *thread = std::addressof(*it); thread != thread_to_not_terminate) {
|
|
|
|
if (thread->GetState() != KThread::ThreadState_Terminated) {
|
|
|
|
thread->RequestTerminate();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Wait for all children threads to terminate.*/
|
|
|
|
while (true) {
|
|
|
|
/* Get the next child. */
|
|
|
|
KThread *cur_child = nullptr;
|
|
|
|
{
|
|
|
|
KScopedLightLock proc_lk(process->GetListLock());
|
|
|
|
|
|
|
|
auto &thread_list = process->GetThreadList();
|
|
|
|
for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
|
|
|
|
if (KThread *thread = std::addressof(*it); thread != thread_to_not_terminate) {
|
|
|
|
if (thread->GetState() != KThread::ThreadState_Terminated) {
|
|
|
|
if (AMS_LIKELY(thread->Open())) {
|
|
|
|
cur_child = thread;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If we didn't find any non-terminated children, we're done. */
|
|
|
|
if (cur_child == nullptr) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Terminate and close the thread. */
|
2021-04-07 15:17:15 +00:00
|
|
|
ON_SCOPE_EXIT { cur_child->Close(); };
|
|
|
|
|
|
|
|
if (Result terminate_result = cur_child->Terminate(); svc::ResultTerminationRequested::Includes(terminate_result)) {
|
|
|
|
return terminate_result;
|
|
|
|
}
|
2020-07-23 06:52:29 +00:00
|
|
|
}
|
2021-04-07 15:17:15 +00:00
|
|
|
|
|
|
|
return ResultSuccess();
|
2020-07-23 06:52:29 +00:00
|
|
|
}
|
|
|
|
|
2021-09-19 17:11:56 +00:00
|
|
|
class ThreadQueueImplForKProcessEnterUserException final : public KThreadQueue {
|
|
|
|
private:
|
|
|
|
KThread **m_exception_thread;
|
|
|
|
public:
|
|
|
|
constexpr ThreadQueueImplForKProcessEnterUserException(KThread **t) : KThreadQueue(), m_exception_thread(t) { /* ... */ }
|
|
|
|
|
|
|
|
virtual void EndWait(KThread *waiting_thread, Result wait_result) override {
|
|
|
|
/* Set the exception thread. */
|
|
|
|
*m_exception_thread = waiting_thread;
|
|
|
|
|
|
|
|
/* Invoke the base end wait handler. */
|
|
|
|
KThreadQueue::EndWait(waiting_thread, wait_result);
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual void CancelWait(KThread *waiting_thread, Result wait_result, bool cancel_timer_task) override {
|
|
|
|
/* Remove the thread as a waiter on its mutex owner. */
|
|
|
|
waiting_thread->GetLockOwner()->RemoveWaiter(waiting_thread);
|
|
|
|
|
|
|
|
/* Invoke the base cancel wait handler. */
|
|
|
|
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2020-02-19 09:22:27 +00:00
|
|
|
}
|
|
|
|
|
2020-02-18 13:04:49 +00:00
|
|
|
void KProcess::Finalize() {
|
2020-07-23 06:52:29 +00:00
|
|
|
/* Delete the process local region. */
|
2020-12-18 01:18:47 +00:00
|
|
|
this->DeleteThreadLocalRegion(m_plr_address);
|
2020-07-23 06:52:29 +00:00
|
|
|
|
|
|
|
/* Get the used memory size. */
|
|
|
|
const size_t used_memory_size = this->GetUsedUserPhysicalMemorySize();
|
|
|
|
|
|
|
|
/* Finalize the page table. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_page_table.Finalize();
|
2020-07-23 06:52:29 +00:00
|
|
|
|
|
|
|
/* Free the system resource. */
|
2020-12-18 01:18:47 +00:00
|
|
|
if (m_system_resource_address != Null<KVirtualAddress>) {
|
2020-07-23 06:52:29 +00:00
|
|
|
/* Check that we have no outstanding allocations. */
|
2020-12-18 01:18:47 +00:00
|
|
|
MESOSPHERE_ABORT_UNLESS(m_memory_block_slab_manager.GetUsed() == 0);
|
|
|
|
MESOSPHERE_ABORT_UNLESS(m_block_info_manager.GetUsed() == 0);
|
|
|
|
MESOSPHERE_ABORT_UNLESS(m_page_table_manager.GetUsed() == 0);
|
2020-07-23 06:52:29 +00:00
|
|
|
|
|
|
|
/* Free the memory. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KSystemControl::FreeSecureMemory(m_system_resource_address, m_system_resource_num_pages * PageSize, m_memory_pool);
|
2020-07-23 06:52:29 +00:00
|
|
|
|
|
|
|
/* Clear our tracking variables. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_system_resource_address = Null<KVirtualAddress>;
|
|
|
|
m_system_resource_num_pages = 0;
|
2020-07-25 03:44:15 +00:00
|
|
|
|
|
|
|
/* Finalize optimized memory. If memory wasn't optimized, this is a no-op. */
|
2020-12-18 01:18:47 +00:00
|
|
|
Kernel::GetMemoryManager().FinalizeOptimizedMemory(this->GetId(), m_memory_pool);
|
2020-07-23 06:52:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Release memory to the resource limit. */
|
2020-12-18 01:18:47 +00:00
|
|
|
if (m_resource_limit != nullptr) {
|
|
|
|
MESOSPHERE_ABORT_UNLESS(used_memory_size >= m_memory_release_hint);
|
|
|
|
m_resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, used_memory_size, used_memory_size - m_memory_release_hint);
|
|
|
|
m_resource_limit->Close();
|
2020-07-23 06:52:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Free all shared memory infos. */
|
|
|
|
{
|
2020-12-18 01:18:47 +00:00
|
|
|
auto it = m_shared_memory_list.begin();
|
|
|
|
while (it != m_shared_memory_list.end()) {
|
2020-07-23 06:52:29 +00:00
|
|
|
KSharedMemoryInfo *info = std::addressof(*it);
|
|
|
|
KSharedMemory *shmem = info->GetSharedMemory();
|
|
|
|
|
|
|
|
while (!info->Close()) {
|
|
|
|
shmem->Close();
|
|
|
|
}
|
|
|
|
shmem->Close();
|
|
|
|
|
2020-12-18 01:18:47 +00:00
|
|
|
it = m_shared_memory_list.erase(it);
|
2020-07-23 06:52:29 +00:00
|
|
|
KSharedMemoryInfo::Free(info);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-18 20:26:21 +00:00
|
|
|
/* Close all references to our io regions. */
|
2020-12-02 00:32:30 +00:00
|
|
|
{
|
2021-09-18 20:26:21 +00:00
|
|
|
auto it = m_io_region_list.begin();
|
|
|
|
while (it != m_io_region_list.end()) {
|
|
|
|
KIoRegion *io_region = std::addressof(*it);
|
|
|
|
it = m_io_region_list.erase(it);
|
2020-12-02 00:32:30 +00:00
|
|
|
|
2021-09-18 20:26:21 +00:00
|
|
|
io_region->Close();
|
2020-12-02 00:32:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-23 06:52:29 +00:00
|
|
|
/* Our thread local page list must be empty at this point. */
|
2020-12-18 01:18:47 +00:00
|
|
|
MESOSPHERE_ABORT_UNLESS(m_partially_used_tlp_tree.empty());
|
|
|
|
MESOSPHERE_ABORT_UNLESS(m_fully_used_tlp_tree.empty());
|
2020-07-23 06:52:29 +00:00
|
|
|
|
|
|
|
/* Log that we finalized for debug. */
|
2020-12-18 01:18:47 +00:00
|
|
|
MESOSPHERE_LOG("KProcess::Finalize() pid=%ld name=%-12s\n", m_process_id, m_name);
|
2020-07-23 06:52:29 +00:00
|
|
|
|
|
|
|
/* Perform inherited finalization. */
|
2021-10-24 04:13:26 +00:00
|
|
|
KSynchronizationObject::Finalize();
|
2020-02-18 13:04:49 +00:00
|
|
|
}
|
|
|
|
|
2020-02-19 09:22:27 +00:00
|
|
|
Result KProcess::Initialize(const ams::svc::CreateProcessParameter ¶ms) {
|
2020-08-18 11:03:01 +00:00
|
|
|
/* Validate that the intended kernel version is high enough for us to support. */
|
2020-12-18 01:18:47 +00:00
|
|
|
R_UNLESS(m_capabilities.GetIntendedKernelVersion() >= ams::svc::RequiredKernelVersion, svc::ResultInvalidCombination());
|
2020-08-18 11:03:01 +00:00
|
|
|
|
|
|
|
/* Validate that the intended kernel version isn't too high for us to support. */
|
2020-12-18 01:18:47 +00:00
|
|
|
R_UNLESS(m_capabilities.GetIntendedKernelVersion() <= ams::svc::SupportedKernelVersion, svc::ResultInvalidCombination());
|
2020-02-19 14:46:59 +00:00
|
|
|
|
|
|
|
/* Create and clear the process local region. */
|
2020-12-18 01:18:47 +00:00
|
|
|
R_TRY(this->CreateThreadLocalRegion(std::addressof(m_plr_address)));
|
|
|
|
m_plr_heap_address = this->GetThreadLocalRegionPointer(m_plr_address);
|
|
|
|
std::memset(m_plr_heap_address, 0, ams::svc::ThreadLocalRegionSize);
|
2020-02-19 14:46:59 +00:00
|
|
|
|
|
|
|
/* Copy in the name from parameters. */
|
2020-12-18 01:18:47 +00:00
|
|
|
static_assert(sizeof(params.name) < sizeof(m_name));
|
|
|
|
std::memcpy(m_name, params.name, sizeof(params.name));
|
|
|
|
m_name[sizeof(params.name)] = 0;
|
2020-02-19 14:46:59 +00:00
|
|
|
|
|
|
|
/* Set misc fields. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_state = State_Created;
|
|
|
|
m_main_thread_stack_size = 0;
|
|
|
|
m_creation_time = KHardwareTimer::GetTick();
|
|
|
|
m_used_kernel_memory_size = 0;
|
|
|
|
m_ideal_core_id = 0;
|
|
|
|
m_flags = params.flags;
|
|
|
|
m_version = params.version;
|
|
|
|
m_program_id = params.program_id;
|
|
|
|
m_code_address = params.code_address;
|
|
|
|
m_code_size = params.code_num_pages * PageSize;
|
|
|
|
m_is_application = (params.flags & ams::svc::CreateProcessFlag_IsApplication);
|
|
|
|
m_is_jit_debug = false;
|
2020-02-19 14:46:59 +00:00
|
|
|
|
|
|
|
/* Set thread fields. */
|
|
|
|
for (size_t i = 0; i < cpu::NumCores; i++) {
|
2020-12-18 01:18:47 +00:00
|
|
|
m_running_threads[i] = nullptr;
|
|
|
|
m_pinned_threads[i] = nullptr;
|
2021-10-23 22:25:20 +00:00
|
|
|
m_running_thread_idle_counts[i] = 0;
|
2020-02-19 14:46:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Set max memory based on address space type. */
|
|
|
|
switch ((params.flags & ams::svc::CreateProcessFlag_AddressSpaceMask)) {
|
|
|
|
case ams::svc::CreateProcessFlag_AddressSpace32Bit:
|
|
|
|
case ams::svc::CreateProcessFlag_AddressSpace64BitDeprecated:
|
|
|
|
case ams::svc::CreateProcessFlag_AddressSpace64Bit:
|
2020-12-18 01:18:47 +00:00
|
|
|
m_max_process_memory = m_page_table.GetHeapRegionSize();
|
2020-02-19 14:46:59 +00:00
|
|
|
break;
|
|
|
|
case ams::svc::CreateProcessFlag_AddressSpace32BitWithoutAlias:
|
2020-12-18 01:18:47 +00:00
|
|
|
m_max_process_memory = m_page_table.GetHeapRegionSize() + m_page_table.GetAliasRegionSize();
|
2020-02-19 14:46:59 +00:00
|
|
|
break;
|
|
|
|
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Generate random entropy. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KSystemControl::GenerateRandomBytes(m_entropy, sizeof(m_entropy));
|
2020-02-19 14:46:59 +00:00
|
|
|
|
|
|
|
/* Clear remaining fields. */
|
2021-04-07 22:16:11 +00:00
|
|
|
m_num_running_threads = 0;
|
|
|
|
m_num_process_switches = 0;
|
|
|
|
m_num_thread_switches = 0;
|
|
|
|
m_num_fpu_switches = 0;
|
|
|
|
m_num_supervisor_calls = 0;
|
|
|
|
m_num_ipc_messages = 0;
|
|
|
|
|
|
|
|
m_is_signaled = false;
|
|
|
|
m_attached_object = nullptr;
|
|
|
|
m_exception_thread = nullptr;
|
|
|
|
m_is_suspended = false;
|
|
|
|
m_memory_release_hint = 0;
|
|
|
|
m_schedule_count = 0;
|
|
|
|
m_is_handle_table_initialized = false;
|
2020-02-19 14:46:59 +00:00
|
|
|
|
|
|
|
/* We're initialized! */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_is_initialized = true;
|
2020-02-19 14:46:59 +00:00
|
|
|
|
|
|
|
return ResultSuccess();
|
2020-02-19 09:22:27 +00:00
|
|
|
}
|
|
|
|
|
2021-04-07 15:23:21 +00:00
|
|
|
Result KProcess::Initialize(const ams::svc::CreateProcessParameter ¶ms, const KPageGroup &pg, const u32 *caps, s32 num_caps, KResourceLimit *res_limit, KMemoryManager::Pool pool, bool immortal) {
|
2020-02-19 09:22:27 +00:00
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
MESOSPHERE_ASSERT(res_limit != nullptr);
|
2020-07-22 01:54:08 +00:00
|
|
|
MESOSPHERE_ABORT_UNLESS((params.code_num_pages * PageSize) / PageSize == static_cast<size_t>(params.code_num_pages));
|
2020-02-19 09:22:27 +00:00
|
|
|
|
|
|
|
/* Set members. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_memory_pool = pool;
|
|
|
|
m_resource_limit = res_limit;
|
|
|
|
m_system_resource_address = Null<KVirtualAddress>;
|
|
|
|
m_system_resource_num_pages = 0;
|
2021-04-07 15:23:21 +00:00
|
|
|
m_is_immortal = immortal;
|
2020-02-19 09:22:27 +00:00
|
|
|
|
|
|
|
/* Setup page table. */
|
|
|
|
/* NOTE: Nintendo passes process ID despite not having set it yet. */
|
|
|
|
/* This goes completely unused, but even so... */
|
|
|
|
{
|
2020-12-01 14:53:22 +00:00
|
|
|
const auto as_type = static_cast<ams::svc::CreateProcessFlag>(params.flags & ams::svc::CreateProcessFlag_AddressSpaceMask);
|
|
|
|
const bool enable_aslr = (params.flags & ams::svc::CreateProcessFlag_EnableAslr) != 0;
|
|
|
|
const bool enable_das_merge = (params.flags & ams::svc::CreateProcessFlag_DisableDeviceAddressSpaceMerge) == 0;
|
|
|
|
const bool is_app = (params.flags & ams::svc::CreateProcessFlag_IsApplication) != 0;
|
|
|
|
auto *mem_block_manager = std::addressof(is_app ? Kernel::GetApplicationMemoryBlockManager() : Kernel::GetSystemMemoryBlockManager());
|
2021-09-18 05:01:58 +00:00
|
|
|
auto *block_info_manager = std::addressof(is_app ? Kernel::GetApplicationBlockInfoManager() : Kernel::GetSystemBlockInfoManager());
|
|
|
|
auto *pt_manager = std::addressof(is_app ? Kernel::GetApplicationPageTableManager() : Kernel::GetSystemPageTableManager());
|
2021-04-07 16:48:25 +00:00
|
|
|
R_TRY(m_page_table.Initialize(m_process_id, as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address, params.code_num_pages * PageSize, mem_block_manager, block_info_manager, pt_manager, res_limit));
|
2020-02-19 09:22:27 +00:00
|
|
|
}
|
2020-12-18 01:18:47 +00:00
|
|
|
auto pt_guard = SCOPE_GUARD { m_page_table.Finalize(); };
|
2020-02-19 09:22:27 +00:00
|
|
|
|
|
|
|
/* Ensure we can insert the code region. */
|
2020-12-18 01:18:47 +00:00
|
|
|
R_UNLESS(m_page_table.CanContain(params.code_address, params.code_num_pages * PageSize, KMemoryState_Code), svc::ResultInvalidMemoryRegion());
|
2020-02-19 09:22:27 +00:00
|
|
|
|
|
|
|
/* Map the code region. */
|
2020-12-18 01:18:47 +00:00
|
|
|
R_TRY(m_page_table.MapPageGroup(params.code_address, pg, KMemoryState_Code, KMemoryPermission_KernelRead));
|
2020-02-19 09:22:27 +00:00
|
|
|
|
|
|
|
/* Initialize capabilities. */
|
2020-12-18 01:18:47 +00:00
|
|
|
R_TRY(m_capabilities.Initialize(caps, num_caps, std::addressof(m_page_table)));
|
2020-02-19 09:22:27 +00:00
|
|
|
|
|
|
|
/* Initialize the process id. */
|
2021-10-20 18:02:17 +00:00
|
|
|
m_process_id = g_initial_process_id++;
|
2020-12-18 01:18:47 +00:00
|
|
|
MESOSPHERE_ABORT_UNLESS(InitialProcessIdMin <= m_process_id);
|
|
|
|
MESOSPHERE_ABORT_UNLESS(m_process_id <= InitialProcessIdMax);
|
2020-02-19 09:22:27 +00:00
|
|
|
|
|
|
|
/* Initialize the rest of the process. */
|
|
|
|
R_TRY(this->Initialize(params));
|
|
|
|
|
|
|
|
/* Open a reference to the resource limit. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_resource_limit->Open();
|
2020-02-19 09:22:27 +00:00
|
|
|
|
|
|
|
/* We succeeded! */
|
|
|
|
pt_guard.Cancel();
|
|
|
|
return ResultSuccess();
|
|
|
|
}
|
|
|
|
|
2020-07-22 05:13:16 +00:00
|
|
|
Result KProcess::Initialize(const ams::svc::CreateProcessParameter ¶ms, svc::KUserPointer<const u32 *> user_caps, s32 num_caps, KResourceLimit *res_limit, KMemoryManager::Pool pool) {
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
MESOSPHERE_ASSERT(res_limit != nullptr);
|
|
|
|
|
|
|
|
/* Set pool and resource limit. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_memory_pool = pool;
|
|
|
|
m_resource_limit = res_limit;
|
2021-04-07 15:23:21 +00:00
|
|
|
m_is_immortal = false;
|
2020-07-22 05:13:16 +00:00
|
|
|
|
|
|
|
/* Get the memory sizes. */
|
|
|
|
const size_t code_num_pages = params.code_num_pages;
|
|
|
|
const size_t system_resource_num_pages = params.system_resource_num_pages;
|
|
|
|
const size_t code_size = code_num_pages * PageSize;
|
|
|
|
const size_t system_resource_size = system_resource_num_pages * PageSize;
|
|
|
|
|
|
|
|
/* Reserve memory for the system resource. */
|
|
|
|
KScopedResourceReservation memory_reservation(this, ams::svc::LimitableResource_PhysicalMemoryMax, code_size + KSystemControl::CalculateRequiredSecureMemorySize(system_resource_size, pool));
|
|
|
|
R_UNLESS(memory_reservation.Succeeded(), svc::ResultLimitReached());
|
|
|
|
|
|
|
|
/* Setup page table resource objects. */
|
|
|
|
KMemoryBlockSlabManager *mem_block_manager;
|
|
|
|
KBlockInfoManager *block_info_manager;
|
|
|
|
KPageTableManager *pt_manager;
|
|
|
|
|
2020-12-18 01:18:47 +00:00
|
|
|
m_system_resource_address = Null<KVirtualAddress>;
|
|
|
|
m_system_resource_num_pages = 0;
|
2020-07-22 05:13:16 +00:00
|
|
|
|
|
|
|
if (system_resource_num_pages != 0) {
|
|
|
|
/* Allocate secure memory. */
|
2020-12-18 01:18:47 +00:00
|
|
|
R_TRY(KSystemControl::AllocateSecureMemory(std::addressof(m_system_resource_address), system_resource_size, pool));
|
2020-07-22 05:13:16 +00:00
|
|
|
|
|
|
|
/* Set the number of system resource pages. */
|
2020-12-18 01:18:47 +00:00
|
|
|
MESOSPHERE_ASSERT(m_system_resource_address != Null<KVirtualAddress>);
|
|
|
|
m_system_resource_num_pages = system_resource_num_pages;
|
2020-07-22 05:13:16 +00:00
|
|
|
|
2021-09-18 05:01:58 +00:00
|
|
|
/* Initialize slab heaps. */
|
|
|
|
const size_t rc_size = util::AlignUp(KPageTableSlabHeap::CalculateReferenceCountSize(system_resource_size), PageSize);
|
2020-12-18 01:18:47 +00:00
|
|
|
m_dynamic_page_manager.Initialize(m_system_resource_address + rc_size, system_resource_size - rc_size);
|
2021-09-18 05:01:58 +00:00
|
|
|
m_page_table_heap.Initialize(std::addressof(m_dynamic_page_manager), 0, GetPointer<KPageTableManager::RefCount>(m_system_resource_address));
|
|
|
|
m_memory_block_heap.Initialize(std::addressof(m_dynamic_page_manager), 0);
|
|
|
|
m_block_info_heap.Initialize(std::addressof(m_dynamic_page_manager), 0);
|
|
|
|
|
|
|
|
/* Initialize managers. */
|
|
|
|
m_page_table_manager.Initialize(std::addressof(m_dynamic_page_manager), std::addressof(m_page_table_heap));
|
|
|
|
m_memory_block_slab_manager.Initialize(std::addressof(m_dynamic_page_manager), std::addressof(m_memory_block_heap));
|
|
|
|
m_block_info_manager.Initialize(std::addressof(m_dynamic_page_manager), std::addressof(m_block_info_heap));
|
2020-12-18 01:18:47 +00:00
|
|
|
|
|
|
|
mem_block_manager = std::addressof(m_memory_block_slab_manager);
|
|
|
|
block_info_manager = std::addressof(m_block_info_manager);
|
|
|
|
pt_manager = std::addressof(m_page_table_manager);
|
2020-07-22 05:13:16 +00:00
|
|
|
} else {
|
|
|
|
const bool is_app = (params.flags & ams::svc::CreateProcessFlag_IsApplication);
|
|
|
|
mem_block_manager = std::addressof(is_app ? Kernel::GetApplicationMemoryBlockManager() : Kernel::GetSystemMemoryBlockManager());
|
2021-09-18 05:01:58 +00:00
|
|
|
block_info_manager = std::addressof(is_app ? Kernel::GetApplicationBlockInfoManager() : Kernel::GetSystemBlockInfoManager());
|
|
|
|
pt_manager = std::addressof(is_app ? Kernel::GetApplicationPageTableManager() : Kernel::GetSystemPageTableManager());
|
2020-07-22 05:13:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Ensure we don't leak any secure memory we allocated. */
|
|
|
|
auto sys_resource_guard = SCOPE_GUARD {
|
2020-12-18 01:18:47 +00:00
|
|
|
if (m_system_resource_address != Null<KVirtualAddress>) {
|
2020-07-22 05:13:16 +00:00
|
|
|
/* Check that we have no outstanding allocations. */
|
2020-12-18 01:18:47 +00:00
|
|
|
MESOSPHERE_ABORT_UNLESS(m_memory_block_slab_manager.GetUsed() == 0);
|
|
|
|
MESOSPHERE_ABORT_UNLESS(m_block_info_manager.GetUsed() == 0);
|
|
|
|
MESOSPHERE_ABORT_UNLESS(m_page_table_manager.GetUsed() == 0);
|
2020-07-22 05:13:16 +00:00
|
|
|
|
|
|
|
/* Free the memory. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KSystemControl::FreeSecureMemory(m_system_resource_address, system_resource_size, pool);
|
2020-07-22 05:13:16 +00:00
|
|
|
|
|
|
|
/* Clear our tracking variables. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_system_resource_address = Null<KVirtualAddress>;
|
|
|
|
m_system_resource_num_pages = 0;
|
2020-07-22 05:13:16 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Setup page table. */
|
|
|
|
/* NOTE: Nintendo passes process ID despite not having set it yet. */
|
|
|
|
/* This goes completely unused, but even so... */
|
|
|
|
{
|
2020-12-01 14:53:22 +00:00
|
|
|
const auto as_type = static_cast<ams::svc::CreateProcessFlag>(params.flags & ams::svc::CreateProcessFlag_AddressSpaceMask);
|
|
|
|
const bool enable_aslr = (params.flags & ams::svc::CreateProcessFlag_EnableAslr) != 0;
|
|
|
|
const bool enable_das_merge = (params.flags & ams::svc::CreateProcessFlag_DisableDeviceAddressSpaceMerge) == 0;
|
2021-04-07 16:48:25 +00:00
|
|
|
R_TRY(m_page_table.Initialize(m_process_id, as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address, code_size, mem_block_manager, block_info_manager, pt_manager, res_limit));
|
2020-07-22 05:13:16 +00:00
|
|
|
}
|
2020-12-18 01:18:47 +00:00
|
|
|
auto pt_guard = SCOPE_GUARD { m_page_table.Finalize(); };
|
2020-07-22 05:13:16 +00:00
|
|
|
|
|
|
|
/* Ensure we can insert the code region. */
|
2020-12-18 01:18:47 +00:00
|
|
|
R_UNLESS(m_page_table.CanContain(params.code_address, code_size, KMemoryState_Code), svc::ResultInvalidMemoryRegion());
|
2020-07-22 05:13:16 +00:00
|
|
|
|
|
|
|
/* Map the code region. */
|
2020-12-18 01:18:47 +00:00
|
|
|
R_TRY(m_page_table.MapPages(params.code_address, code_num_pages, KMemoryState_Code, static_cast<KMemoryPermission>(KMemoryPermission_KernelRead | KMemoryPermission_NotMapped)));
|
2020-07-22 05:13:16 +00:00
|
|
|
|
|
|
|
/* Initialize capabilities. */
|
2020-12-18 01:18:47 +00:00
|
|
|
R_TRY(m_capabilities.Initialize(user_caps, num_caps, std::addressof(m_page_table)));
|
2020-07-22 05:13:16 +00:00
|
|
|
|
|
|
|
/* Initialize the process id. */
|
2021-10-20 18:02:17 +00:00
|
|
|
m_process_id = g_process_id++;
|
2020-12-18 01:18:47 +00:00
|
|
|
MESOSPHERE_ABORT_UNLESS(ProcessIdMin <= m_process_id);
|
|
|
|
MESOSPHERE_ABORT_UNLESS(m_process_id <= ProcessIdMax);
|
2020-07-22 05:13:16 +00:00
|
|
|
|
|
|
|
/* If we should optimize memory allocations, do so. */
|
2020-12-18 01:18:47 +00:00
|
|
|
if (m_system_resource_address != Null<KVirtualAddress> && (params.flags & ams::svc::CreateProcessFlag_OptimizeMemoryAllocation) != 0) {
|
|
|
|
R_TRY(Kernel::GetMemoryManager().InitializeOptimizedMemory(m_process_id, pool));
|
2020-07-22 05:13:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize the rest of the process. */
|
|
|
|
R_TRY(this->Initialize(params));
|
|
|
|
|
|
|
|
/* Open a reference to the resource limit. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_resource_limit->Open();
|
2020-07-22 05:13:16 +00:00
|
|
|
|
|
|
|
/* We succeeded, so commit our memory reservation and cancel our guards. */
|
|
|
|
sys_resource_guard.Cancel();
|
|
|
|
pt_guard.Cancel();
|
|
|
|
memory_reservation.Commit();
|
|
|
|
|
|
|
|
return ResultSuccess();
|
2020-07-22 01:54:08 +00:00
|
|
|
}
|
|
|
|
|
2021-10-25 03:41:38 +00:00
|
|
|
void KProcess::DoWorkerTaskImpl() {
|
2020-07-23 06:52:29 +00:00
|
|
|
/* Terminate child threads. */
|
|
|
|
TerminateChildren(this, nullptr);
|
|
|
|
|
2021-04-07 22:16:11 +00:00
|
|
|
/* Finalize the handle table, if we're not immortal. */
|
|
|
|
if (!m_is_immortal && m_is_handle_table_initialized) {
|
|
|
|
this->FinalizeHandleTable();
|
|
|
|
}
|
|
|
|
|
2020-07-23 06:52:29 +00:00
|
|
|
/* Call the debug callback. */
|
|
|
|
KDebug::OnExitProcess(this);
|
|
|
|
|
|
|
|
/* Finish termination. */
|
|
|
|
this->FinishTermination();
|
|
|
|
}
|
|
|
|
|
2021-04-07 15:17:15 +00:00
|
|
|
Result KProcess::StartTermination() {
|
2021-04-07 15:23:21 +00:00
|
|
|
/* Finalize the handle table when we're done, if the process isn't immortal. */
|
2021-04-07 15:17:15 +00:00
|
|
|
ON_SCOPE_EXIT {
|
2021-04-07 15:23:21 +00:00
|
|
|
if (!m_is_immortal) {
|
2021-04-07 22:16:11 +00:00
|
|
|
this->FinalizeHandleTable();
|
2021-04-07 15:23:21 +00:00
|
|
|
}
|
2021-04-07 15:17:15 +00:00
|
|
|
};
|
2020-07-23 06:52:29 +00:00
|
|
|
|
2021-04-07 15:17:15 +00:00
|
|
|
/* Terminate child threads other than the current one. */
|
|
|
|
return TerminateChildren(this, GetCurrentThreadPointer());
|
2020-07-23 06:52:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void KProcess::FinishTermination() {
|
2021-04-07 15:23:21 +00:00
|
|
|
/* Only allow termination to occur if the process isn't immortal. */
|
|
|
|
if (!m_is_immortal) {
|
|
|
|
/* Release resource limit hint. */
|
|
|
|
if (m_resource_limit != nullptr) {
|
|
|
|
m_memory_release_hint = this->GetUsedUserPhysicalMemorySize();
|
|
|
|
m_resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, 0, m_memory_release_hint);
|
|
|
|
}
|
2020-07-23 06:52:29 +00:00
|
|
|
|
2021-04-07 15:23:21 +00:00
|
|
|
/* Change state. */
|
|
|
|
{
|
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
this->ChangeState(State_Terminated);
|
|
|
|
}
|
2020-07-23 06:52:29 +00:00
|
|
|
|
2021-04-07 15:23:21 +00:00
|
|
|
/* Close. */
|
|
|
|
this->Close();
|
|
|
|
}
|
2020-02-18 13:04:49 +00:00
|
|
|
}
|
|
|
|
|
2020-07-10 03:11:41 +00:00
|
|
|
void KProcess::Exit() {
|
2020-07-23 06:52:29 +00:00
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
|
|
|
|
/* Determine whether we need to start terminating */
|
|
|
|
bool needs_terminate = false;
|
|
|
|
{
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_state_lock);
|
2020-07-23 06:52:29 +00:00
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
|
2020-12-18 01:18:47 +00:00
|
|
|
MESOSPHERE_ASSERT(m_state != State_Created);
|
|
|
|
MESOSPHERE_ASSERT(m_state != State_CreatedAttached);
|
|
|
|
MESOSPHERE_ASSERT(m_state != State_Crashed);
|
|
|
|
MESOSPHERE_ASSERT(m_state != State_Terminated);
|
|
|
|
if (m_state == State_Running || m_state == State_RunningAttached || m_state == State_DebugBreak) {
|
2020-07-23 06:52:29 +00:00
|
|
|
this->ChangeState(State_Terminating);
|
|
|
|
needs_terminate = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If we need to start termination, do so. */
|
|
|
|
if (needs_terminate) {
|
|
|
|
this->StartTermination();
|
|
|
|
|
|
|
|
/* Note for debug that we're exiting the process. */
|
2020-12-18 01:18:47 +00:00
|
|
|
MESOSPHERE_LOG("KProcess::Exit() pid=%ld name=%-12s\n", m_process_id, m_name);
|
2020-07-23 06:52:29 +00:00
|
|
|
|
|
|
|
/* Register the process as a work task. */
|
|
|
|
KWorkerTaskManager::AddTask(KWorkerTaskManager::WorkerType_Exit, this);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Exit the current thread. */
|
|
|
|
GetCurrentThread().Exit();
|
|
|
|
MESOSPHERE_PANIC("Thread survived call to exit");
|
2020-07-10 03:11:41 +00:00
|
|
|
}
|
|
|
|
|
2020-07-22 18:15:45 +00:00
|
|
|
Result KProcess::Terminate() {
|
2020-07-23 06:52:29 +00:00
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
|
|
|
|
/* Determine whether we need to start terminating */
|
|
|
|
bool needs_terminate = false;
|
|
|
|
{
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_state_lock);
|
2020-07-23 06:52:29 +00:00
|
|
|
|
|
|
|
/* Check whether we're allowed to terminate. */
|
2020-12-18 01:18:47 +00:00
|
|
|
R_UNLESS(m_state != State_Created, svc::ResultInvalidState());
|
|
|
|
R_UNLESS(m_state != State_CreatedAttached, svc::ResultInvalidState());
|
2020-07-23 06:52:29 +00:00
|
|
|
|
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
|
2020-12-18 01:18:47 +00:00
|
|
|
if (m_state == State_Running || m_state == State_RunningAttached || m_state == State_Crashed || m_state == State_DebugBreak) {
|
2020-07-23 06:52:29 +00:00
|
|
|
this->ChangeState(State_Terminating);
|
|
|
|
needs_terminate = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If we need to terminate, do so. */
|
|
|
|
if (needs_terminate) {
|
|
|
|
/* Start termination. */
|
2021-04-07 15:17:15 +00:00
|
|
|
if (R_SUCCEEDED(this->StartTermination())) {
|
|
|
|
/* Note for debug that we're terminating the process. */
|
|
|
|
MESOSPHERE_LOG("KProcess::Terminate() OK pid=%ld name=%-12s\n", m_process_id, m_name);
|
2020-07-23 06:52:29 +00:00
|
|
|
|
2021-04-07 15:17:15 +00:00
|
|
|
/* Call the debug callback. */
|
|
|
|
KDebug::OnTerminateProcess(this);
|
2020-07-23 06:52:29 +00:00
|
|
|
|
2021-04-07 15:17:15 +00:00
|
|
|
/* Finish termination. */
|
|
|
|
this->FinishTermination();
|
|
|
|
} else {
|
|
|
|
/* Note for debug that we're terminating the process. */
|
|
|
|
MESOSPHERE_LOG("KProcess::Terminate() FAIL pid=%ld name=%-12s\n", m_process_id, m_name);
|
2020-07-23 06:52:29 +00:00
|
|
|
|
2021-04-07 15:17:15 +00:00
|
|
|
/* Register the process as a work task. */
|
|
|
|
KWorkerTaskManager::AddTask(KWorkerTaskManager::WorkerType_Exit, this);
|
|
|
|
}
|
2020-07-23 06:52:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return ResultSuccess();
|
2020-07-22 18:15:45 +00:00
|
|
|
}
|
|
|
|
|
2020-07-22 09:50:19 +00:00
|
|
|
Result KProcess::AddSharedMemory(KSharedMemory *shmem, KProcessAddress address, size_t size) {
|
|
|
|
/* Lock ourselves, to prevent concurrent access. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_state_lock);
|
2020-07-22 09:50:19 +00:00
|
|
|
|
2020-08-17 21:20:24 +00:00
|
|
|
/* Address and size parameters aren't used. */
|
|
|
|
MESOSPHERE_UNUSED(address, size);
|
|
|
|
|
2020-07-22 09:50:19 +00:00
|
|
|
/* Try to find an existing info for the memory. */
|
|
|
|
KSharedMemoryInfo *info = nullptr;
|
2020-12-18 01:18:47 +00:00
|
|
|
for (auto it = m_shared_memory_list.begin(); it != m_shared_memory_list.end(); ++it) {
|
2020-07-22 09:50:19 +00:00
|
|
|
if (it->GetSharedMemory() == shmem) {
|
|
|
|
info = std::addressof(*it);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If we didn't find an info, create one. */
|
|
|
|
if (info == nullptr) {
|
|
|
|
/* Allocate a new info. */
|
|
|
|
info = KSharedMemoryInfo::Allocate();
|
|
|
|
R_UNLESS(info != nullptr, svc::ResultOutOfResource());
|
|
|
|
|
|
|
|
/* Initialize the info and add it to our list. */
|
|
|
|
info->Initialize(shmem);
|
2020-12-18 01:18:47 +00:00
|
|
|
m_shared_memory_list.push_back(*info);
|
2020-07-22 09:50:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Open a reference to the shared memory and its info. */
|
|
|
|
shmem->Open();
|
|
|
|
info->Open();
|
|
|
|
|
|
|
|
return ResultSuccess();
|
|
|
|
}
|
|
|
|
|
|
|
|
void KProcess::RemoveSharedMemory(KSharedMemory *shmem, KProcessAddress address, size_t size) {
|
|
|
|
/* Lock ourselves, to prevent concurrent access. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_state_lock);
|
2020-07-22 09:50:19 +00:00
|
|
|
|
2020-08-17 21:20:24 +00:00
|
|
|
/* Address and size parameters aren't used. */
|
|
|
|
MESOSPHERE_UNUSED(address, size);
|
|
|
|
|
2020-07-22 09:50:19 +00:00
|
|
|
/* Find an existing info for the memory. */
|
|
|
|
KSharedMemoryInfo *info = nullptr;
|
2020-12-18 01:18:47 +00:00
|
|
|
auto it = m_shared_memory_list.begin();
|
|
|
|
for (/* ... */; it != m_shared_memory_list.end(); ++it) {
|
2020-07-22 09:50:19 +00:00
|
|
|
if (it->GetSharedMemory() == shmem) {
|
|
|
|
info = std::addressof(*it);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
MESOSPHERE_ABORT_UNLESS(info != nullptr);
|
|
|
|
|
|
|
|
/* Close a reference to the info and its memory. */
|
|
|
|
if (info->Close()) {
|
2020-12-18 01:18:47 +00:00
|
|
|
m_shared_memory_list.erase(it);
|
2020-07-22 09:50:19 +00:00
|
|
|
KSharedMemoryInfo::Free(info);
|
|
|
|
}
|
|
|
|
|
|
|
|
shmem->Close();
|
|
|
|
}
|
|
|
|
|
2021-09-18 20:26:21 +00:00
|
|
|
void KProcess::AddIoRegion(KIoRegion *io_region) {
|
|
|
|
/* Lock ourselves, to prevent concurrent access. */
|
|
|
|
KScopedLightLock lk(m_state_lock);
|
|
|
|
|
|
|
|
/* Open a reference to the region. */
|
|
|
|
io_region->Open();
|
|
|
|
|
|
|
|
/* Add the region to our list. */
|
|
|
|
m_io_region_list.push_back(*io_region);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
void KProcess::RemoveIoRegion(KIoRegion *io_region) {
|
|
|
|
/* Remove the region from our list. */
|
|
|
|
{
|
|
|
|
/* Lock ourselves, to prevent concurrent access. */
|
|
|
|
KScopedLightLock lk(m_state_lock);
|
|
|
|
|
|
|
|
/* Remove the region from our list. */
|
|
|
|
m_io_region_list.erase(m_io_region_list.iterator_to(*io_region));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Close our reference to the io region. */
|
|
|
|
io_region->Close();
|
|
|
|
}
|
|
|
|
|
2020-02-19 14:46:59 +00:00
|
|
|
Result KProcess::CreateThreadLocalRegion(KProcessAddress *out) {
|
|
|
|
KThreadLocalPage *tlp = nullptr;
|
|
|
|
KProcessAddress tlr = Null<KProcessAddress>;
|
|
|
|
|
|
|
|
/* See if we can get a region from a partially used TLP. */
|
|
|
|
{
|
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
|
2020-12-18 01:18:47 +00:00
|
|
|
if (auto it = m_partially_used_tlp_tree.begin(); it != m_partially_used_tlp_tree.end()) {
|
2020-02-19 14:46:59 +00:00
|
|
|
tlr = it->Reserve();
|
|
|
|
MESOSPHERE_ABORT_UNLESS(tlr != Null<KProcessAddress>);
|
|
|
|
|
|
|
|
if (it->IsAllUsed()) {
|
|
|
|
tlp = std::addressof(*it);
|
2020-12-18 01:18:47 +00:00
|
|
|
m_partially_used_tlp_tree.erase(it);
|
|
|
|
m_fully_used_tlp_tree.insert(*tlp);
|
2020-02-19 14:46:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
*out = tlr;
|
|
|
|
return ResultSuccess();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Allocate a new page. */
|
|
|
|
tlp = KThreadLocalPage::Allocate();
|
|
|
|
R_UNLESS(tlp != nullptr, svc::ResultOutOfMemory());
|
|
|
|
auto tlp_guard = SCOPE_GUARD { KThreadLocalPage::Free(tlp); };
|
|
|
|
|
|
|
|
/* Initialize the new page. */
|
|
|
|
R_TRY(tlp->Initialize(this));
|
|
|
|
|
|
|
|
/* Reserve a TLR. */
|
|
|
|
tlr = tlp->Reserve();
|
|
|
|
MESOSPHERE_ABORT_UNLESS(tlr != Null<KProcessAddress>);
|
|
|
|
|
|
|
|
/* Insert into our tree. */
|
|
|
|
{
|
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
if (tlp->IsAllUsed()) {
|
2020-12-18 01:18:47 +00:00
|
|
|
m_fully_used_tlp_tree.insert(*tlp);
|
2020-02-19 14:46:59 +00:00
|
|
|
} else {
|
2020-12-18 01:18:47 +00:00
|
|
|
m_partially_used_tlp_tree.insert(*tlp);
|
2020-02-19 14:46:59 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We succeeded! */
|
|
|
|
tlp_guard.Cancel();
|
|
|
|
*out = tlr;
|
|
|
|
return ResultSuccess();
|
|
|
|
}
|
|
|
|
|
2020-07-21 11:58:54 +00:00
|
|
|
Result KProcess::DeleteThreadLocalRegion(KProcessAddress addr) {
|
|
|
|
KThreadLocalPage *page_to_free = nullptr;
|
|
|
|
|
|
|
|
/* Release the region. */
|
|
|
|
{
|
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
|
|
|
|
/* Try to find the page in the partially used list. */
|
2021-10-24 04:58:48 +00:00
|
|
|
auto it = m_partially_used_tlp_tree.find_key(util::AlignDown(GetInteger(addr), PageSize));
|
2020-12-18 01:18:47 +00:00
|
|
|
if (it == m_partially_used_tlp_tree.end()) {
|
2020-07-21 11:58:54 +00:00
|
|
|
/* If we don't find it, it has to be in the fully used list. */
|
2021-10-24 04:58:48 +00:00
|
|
|
it = m_fully_used_tlp_tree.find_key(util::AlignDown(GetInteger(addr), PageSize));
|
2020-12-18 01:18:47 +00:00
|
|
|
R_UNLESS(it != m_fully_used_tlp_tree.end(), svc::ResultInvalidAddress());
|
2020-07-21 11:58:54 +00:00
|
|
|
|
|
|
|
/* Release the region. */
|
|
|
|
it->Release(addr);
|
|
|
|
|
|
|
|
/* Move the page out of the fully used list. */
|
|
|
|
KThreadLocalPage *tlp = std::addressof(*it);
|
2020-12-18 01:18:47 +00:00
|
|
|
m_fully_used_tlp_tree.erase(it);
|
2020-07-21 11:58:54 +00:00
|
|
|
if (tlp->IsAllFree()) {
|
|
|
|
page_to_free = tlp;
|
|
|
|
} else {
|
2020-12-18 01:18:47 +00:00
|
|
|
m_partially_used_tlp_tree.insert(*tlp);
|
2020-07-21 11:58:54 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* Release the region. */
|
|
|
|
it->Release(addr);
|
|
|
|
|
|
|
|
/* Handle the all-free case. */
|
|
|
|
KThreadLocalPage *tlp = std::addressof(*it);
|
|
|
|
if (tlp->IsAllFree()) {
|
2020-12-18 01:18:47 +00:00
|
|
|
m_partially_used_tlp_tree.erase(it);
|
2020-07-21 11:58:54 +00:00
|
|
|
page_to_free = tlp;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If we should free the page it was in, do so. */
|
|
|
|
if (page_to_free != nullptr) {
|
|
|
|
page_to_free->Finalize();
|
|
|
|
|
|
|
|
KThreadLocalPage::Free(page_to_free);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ResultSuccess();
|
|
|
|
}
|
|
|
|
|
2020-02-19 14:46:59 +00:00
|
|
|
void *KProcess::GetThreadLocalRegionPointer(KProcessAddress addr) {
|
|
|
|
KThreadLocalPage *tlp = nullptr;
|
|
|
|
{
|
|
|
|
KScopedSchedulerLock sl;
|
2021-10-24 04:58:48 +00:00
|
|
|
if (auto it = m_partially_used_tlp_tree.find_key(util::AlignDown(GetInteger(addr), PageSize)); it != m_partially_used_tlp_tree.end()) {
|
2020-02-19 14:46:59 +00:00
|
|
|
tlp = std::addressof(*it);
|
2021-10-24 04:58:48 +00:00
|
|
|
} else if (auto it = m_fully_used_tlp_tree.find_key(util::AlignDown(GetInteger(addr), PageSize)); it != m_fully_used_tlp_tree.end()) {
|
2020-02-19 14:46:59 +00:00
|
|
|
tlp = std::addressof(*it);
|
|
|
|
} else {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return static_cast<u8 *>(tlp->GetPointer()) + (GetInteger(addr) & (PageSize - 1));
|
|
|
|
}
|
|
|
|
|
2020-02-20 04:42:21 +00:00
|
|
|
bool KProcess::ReserveResource(ams::svc::LimitableResource which, s64 value) {
|
|
|
|
if (KResourceLimit *rl = this->GetResourceLimit(); rl != nullptr) {
|
|
|
|
return rl->Reserve(which, value);
|
|
|
|
} else {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool KProcess::ReserveResource(ams::svc::LimitableResource which, s64 value, s64 timeout) {
|
|
|
|
if (KResourceLimit *rl = this->GetResourceLimit(); rl != nullptr) {
|
|
|
|
return rl->Reserve(which, value, timeout);
|
|
|
|
} else {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void KProcess::ReleaseResource(ams::svc::LimitableResource which, s64 value) {
|
|
|
|
if (KResourceLimit *rl = this->GetResourceLimit(); rl != nullptr) {
|
|
|
|
rl->Release(which, value);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void KProcess::ReleaseResource(ams::svc::LimitableResource which, s64 value, s64 hint) {
|
|
|
|
if (KResourceLimit *rl = this->GetResourceLimit(); rl != nullptr) {
|
|
|
|
rl->Release(which, value, hint);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-07 15:17:15 +00:00
|
|
|
void KProcess::IncrementRunningThreadCount() {
|
2021-10-19 22:24:15 +00:00
|
|
|
MESOSPHERE_ASSERT(m_num_running_threads.Load() >= 0);
|
2020-02-20 03:38:20 +00:00
|
|
|
|
2021-10-20 18:02:17 +00:00
|
|
|
++m_num_running_threads;
|
2020-02-20 03:38:20 +00:00
|
|
|
}
|
|
|
|
|
2021-04-07 15:17:15 +00:00
|
|
|
void KProcess::DecrementRunningThreadCount() {
|
2021-10-19 22:24:15 +00:00
|
|
|
MESOSPHERE_ASSERT(m_num_running_threads.Load() > 0);
|
2020-02-20 03:38:20 +00:00
|
|
|
|
2021-10-20 18:02:17 +00:00
|
|
|
if (const auto prev = m_num_running_threads--; prev == 1) {
|
2020-07-23 06:52:29 +00:00
|
|
|
this->Terminate();
|
2020-02-20 03:38:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-11 01:39:53 +00:00
|
|
|
bool KProcess::EnterUserException() {
|
2020-07-31 12:52:59 +00:00
|
|
|
/* Get the current thread. */
|
|
|
|
KThread *cur_thread = GetCurrentThreadPointer();
|
|
|
|
MESOSPHERE_ASSERT(this == cur_thread->GetOwnerProcess());
|
|
|
|
|
2021-09-19 17:11:56 +00:00
|
|
|
/* Check that we haven't already claimed the exception thread. */
|
|
|
|
if (m_exception_thread == cur_thread) {
|
|
|
|
return false;
|
|
|
|
}
|
2020-07-31 12:52:59 +00:00
|
|
|
|
2021-09-19 17:11:56 +00:00
|
|
|
/* Create the wait queue we'll be using. */
|
|
|
|
ThreadQueueImplForKProcessEnterUserException wait_queue(std::addressof(m_exception_thread));
|
2020-07-31 12:52:59 +00:00
|
|
|
|
2021-09-19 17:11:56 +00:00
|
|
|
/* Claim the exception thread. */
|
|
|
|
{
|
|
|
|
/* Lock the scheduler. */
|
|
|
|
KScopedSchedulerLock sl;
|
2021-04-07 08:44:27 +00:00
|
|
|
|
2021-09-19 17:11:56 +00:00
|
|
|
/* Check that we're not terminating. */
|
|
|
|
if (cur_thread->IsTerminationRequested()) {
|
|
|
|
return false;
|
|
|
|
}
|
2021-04-07 08:44:27 +00:00
|
|
|
|
2021-09-19 17:11:56 +00:00
|
|
|
/* If we don't have an exception thread, we can just claim it directly. */
|
|
|
|
if (m_exception_thread == nullptr) {
|
|
|
|
m_exception_thread = cur_thread;
|
|
|
|
KScheduler::SetSchedulerUpdateNeeded();
|
|
|
|
return true;
|
2020-07-31 12:52:59 +00:00
|
|
|
}
|
2021-09-19 17:11:56 +00:00
|
|
|
|
|
|
|
/* Otherwise, we need to wait until we don't have an exception thread. */
|
|
|
|
|
|
|
|
/* Add the current thread as a waiter on the current exception thread. */
|
|
|
|
cur_thread->SetAddressKey(reinterpret_cast<uintptr_t>(std::addressof(m_exception_thread)) | 1);
|
|
|
|
m_exception_thread->AddWaiter(cur_thread);
|
|
|
|
|
|
|
|
/* Wait to claim the exception thread. */
|
|
|
|
cur_thread->BeginWait(std::addressof(wait_queue));
|
2020-07-31 12:52:59 +00:00
|
|
|
}
|
2021-09-19 17:11:56 +00:00
|
|
|
|
|
|
|
/* If our wait didn't end due to thread termination, we succeeded. */
|
|
|
|
return !svc::ResultTerminationRequested::Includes(cur_thread->GetWaitResult());
|
2020-07-11 01:39:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool KProcess::LeaveUserException() {
|
|
|
|
return this->ReleaseUserException(GetCurrentThreadPointer());
|
|
|
|
}
|
|
|
|
|
|
|
|
bool KProcess::ReleaseUserException(KThread *thread) {
|
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
|
2020-12-18 01:18:47 +00:00
|
|
|
if (m_exception_thread == thread) {
|
|
|
|
m_exception_thread = nullptr;
|
2020-07-31 12:52:59 +00:00
|
|
|
|
|
|
|
/* Remove waiter thread. */
|
|
|
|
s32 num_waiters;
|
2021-07-13 12:27:19 +00:00
|
|
|
if (KThread *next = thread->RemoveWaiterByKey(std::addressof(num_waiters), reinterpret_cast<uintptr_t>(std::addressof(m_exception_thread)) | 1); next != nullptr) {
|
2021-09-19 17:11:56 +00:00
|
|
|
next->EndWait(ResultSuccess());
|
2020-07-31 12:52:59 +00:00
|
|
|
}
|
|
|
|
|
2021-04-07 08:44:27 +00:00
|
|
|
KScheduler::SetSchedulerUpdateNeeded();
|
|
|
|
|
2020-07-31 12:52:59 +00:00
|
|
|
return true;
|
2020-07-11 01:39:53 +00:00
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-20 03:38:20 +00:00
|
|
|
void KProcess::RegisterThread(KThread *thread) {
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_list_lock);
|
2020-02-20 03:38:20 +00:00
|
|
|
|
2020-12-18 01:18:47 +00:00
|
|
|
m_thread_list.push_back(*thread);
|
2020-02-20 03:38:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void KProcess::UnregisterThread(KThread *thread) {
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_list_lock);
|
2020-02-20 03:38:20 +00:00
|
|
|
|
2020-12-18 01:18:47 +00:00
|
|
|
m_thread_list.erase(m_thread_list.iterator_to(*thread));
|
2020-02-20 03:38:20 +00:00
|
|
|
}
|
|
|
|
|
2020-07-13 19:17:28 +00:00
|
|
|
size_t KProcess::GetUsedUserPhysicalMemorySize() const {
|
2020-12-18 01:18:47 +00:00
|
|
|
const size_t norm_size = m_page_table.GetNormalMemorySize();
|
|
|
|
const size_t other_size = m_code_size + m_main_thread_stack_size;
|
|
|
|
const size_t sec_size = KSystemControl::CalculateRequiredSecureMemorySize(m_system_resource_num_pages * PageSize, m_memory_pool);
|
2020-05-29 07:57:25 +00:00
|
|
|
|
|
|
|
return norm_size + other_size + sec_size;
|
|
|
|
}
|
|
|
|
|
2020-07-13 19:17:28 +00:00
|
|
|
size_t KProcess::GetTotalUserPhysicalMemorySize() const {
|
2020-05-29 07:57:25 +00:00
|
|
|
/* Get the amount of free and used size. */
|
2020-12-18 01:18:47 +00:00
|
|
|
const size_t free_size = m_resource_limit->GetFreeValue(ams::svc::LimitableResource_PhysicalMemoryMax);
|
2020-05-29 07:57:25 +00:00
|
|
|
const size_t used_size = this->GetUsedNonSystemUserPhysicalMemorySize();
|
2020-12-18 01:18:47 +00:00
|
|
|
const size_t max_size = m_max_process_memory;
|
2020-07-13 19:17:28 +00:00
|
|
|
|
|
|
|
if (used_size + free_size > max_size) {
|
|
|
|
return max_size;
|
|
|
|
} else {
|
|
|
|
return free_size + used_size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t KProcess::GetUsedNonSystemUserPhysicalMemorySize() const {
|
2020-12-18 01:18:47 +00:00
|
|
|
const size_t norm_size = m_page_table.GetNormalMemorySize();
|
|
|
|
const size_t other_size = m_code_size + m_main_thread_stack_size;
|
2020-07-13 19:17:28 +00:00
|
|
|
|
|
|
|
return norm_size + other_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t KProcess::GetTotalNonSystemUserPhysicalMemorySize() const {
|
|
|
|
/* Get the amount of free and used size. */
|
2020-12-18 01:18:47 +00:00
|
|
|
const size_t free_size = m_resource_limit->GetFreeValue(ams::svc::LimitableResource_PhysicalMemoryMax);
|
2020-07-13 19:17:28 +00:00
|
|
|
const size_t used_size = this->GetUsedUserPhysicalMemorySize();
|
2020-12-18 01:18:47 +00:00
|
|
|
const size_t sec_size = KSystemControl::CalculateRequiredSecureMemorySize(m_system_resource_num_pages * PageSize, m_memory_pool);
|
|
|
|
const size_t max_size = m_max_process_memory;
|
2020-05-29 07:57:25 +00:00
|
|
|
|
|
|
|
if (used_size + free_size > max_size) {
|
|
|
|
return max_size - sec_size;
|
|
|
|
} else {
|
|
|
|
return free_size + used_size - sec_size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-20 03:38:20 +00:00
|
|
|
Result KProcess::Run(s32 priority, size_t stack_size) {
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
|
|
|
|
/* Lock ourselves, to prevent concurrent access. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_state_lock);
|
2020-02-20 03:38:20 +00:00
|
|
|
|
|
|
|
/* Validate that we're in a state where we can initialize. */
|
2020-12-18 01:18:47 +00:00
|
|
|
const auto state = m_state;
|
2020-02-20 03:38:20 +00:00
|
|
|
R_UNLESS(state == State_Created || state == State_CreatedAttached, svc::ResultInvalidState());
|
|
|
|
|
|
|
|
/* Place a tentative reservation of a thread for this process. */
|
|
|
|
KScopedResourceReservation thread_reservation(this, ams::svc::LimitableResource_ThreadCountMax);
|
|
|
|
R_UNLESS(thread_reservation.Succeeded(), svc::ResultLimitReached());
|
|
|
|
|
|
|
|
/* Ensure that we haven't already allocated stack. */
|
2020-12-18 01:18:47 +00:00
|
|
|
MESOSPHERE_ABORT_UNLESS(m_main_thread_stack_size == 0);
|
2020-02-20 03:38:20 +00:00
|
|
|
|
|
|
|
/* Ensure that we're allocating a valid stack. */
|
|
|
|
stack_size = util::AlignUp(stack_size, PageSize);
|
2020-12-18 01:18:47 +00:00
|
|
|
R_UNLESS(stack_size + m_code_size <= m_max_process_memory, svc::ResultOutOfMemory());
|
|
|
|
R_UNLESS(stack_size + m_code_size >= m_code_size, svc::ResultOutOfMemory());
|
2020-02-20 03:38:20 +00:00
|
|
|
|
|
|
|
/* Place a tentative reservation of memory for our new stack. */
|
2020-07-24 02:26:46 +00:00
|
|
|
KScopedResourceReservation mem_reservation(this, ams::svc::LimitableResource_PhysicalMemoryMax, stack_size);
|
2020-02-20 03:38:20 +00:00
|
|
|
R_UNLESS(mem_reservation.Succeeded(), svc::ResultLimitReached());
|
|
|
|
|
|
|
|
/* Allocate and map our stack. */
|
|
|
|
KProcessAddress stack_top = Null<KProcessAddress>;
|
|
|
|
if (stack_size) {
|
|
|
|
KProcessAddress stack_bottom;
|
2020-12-18 01:18:47 +00:00
|
|
|
R_TRY(m_page_table.MapPages(std::addressof(stack_bottom), stack_size / PageSize, KMemoryState_Stack, KMemoryPermission_UserReadWrite));
|
2020-02-20 03:38:20 +00:00
|
|
|
|
|
|
|
stack_top = stack_bottom + stack_size;
|
2020-12-18 01:18:47 +00:00
|
|
|
m_main_thread_stack_size = stack_size;
|
2020-02-20 03:38:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Ensure our stack is safe to clean up on exit. */
|
|
|
|
auto stack_guard = SCOPE_GUARD {
|
2020-12-18 01:18:47 +00:00
|
|
|
if (m_main_thread_stack_size) {
|
|
|
|
MESOSPHERE_R_ABORT_UNLESS(m_page_table.UnmapPages(stack_top - m_main_thread_stack_size, m_main_thread_stack_size / PageSize, KMemoryState_Stack));
|
|
|
|
m_main_thread_stack_size = 0;
|
2020-02-20 03:38:20 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Set our maximum heap size. */
|
2020-12-18 01:18:47 +00:00
|
|
|
R_TRY(m_page_table.SetMaxHeapSize(m_max_process_memory - (m_main_thread_stack_size + m_code_size)));
|
2020-02-20 03:38:20 +00:00
|
|
|
|
|
|
|
/* Initialize our handle table. */
|
2021-04-07 22:16:11 +00:00
|
|
|
R_TRY(this->InitializeHandleTable(m_capabilities.GetHandleTableSize()));
|
|
|
|
auto ht_guard = SCOPE_GUARD { this->FinalizeHandleTable(); };
|
2020-02-20 03:38:20 +00:00
|
|
|
|
|
|
|
/* Create a new thread for the process. */
|
|
|
|
KThread *main_thread = KThread::Create();
|
|
|
|
R_UNLESS(main_thread != nullptr, svc::ResultOutOfResource());
|
2021-04-07 15:17:15 +00:00
|
|
|
ON_SCOPE_EXIT { main_thread->Close(); };
|
2020-02-20 03:38:20 +00:00
|
|
|
|
|
|
|
/* Initialize the thread. */
|
2020-12-18 01:18:47 +00:00
|
|
|
R_TRY(KThread::InitializeUserThread(main_thread, reinterpret_cast<KThreadFunction>(GetVoidPointer(this->GetEntryPoint())), 0, stack_top, priority, m_ideal_core_id, this));
|
2020-02-20 03:38:20 +00:00
|
|
|
|
|
|
|
/* Register the thread, and commit our reservation. */
|
|
|
|
KThread::Register(main_thread);
|
|
|
|
thread_reservation.Commit();
|
|
|
|
|
|
|
|
/* Add the thread to our handle table. */
|
|
|
|
ams::svc::Handle thread_handle;
|
2020-12-18 01:18:47 +00:00
|
|
|
R_TRY(m_handle_table.Add(std::addressof(thread_handle), main_thread));
|
2020-02-20 03:38:20 +00:00
|
|
|
|
|
|
|
/* Set the thread arguments. */
|
|
|
|
main_thread->GetContext().SetArguments(0, thread_handle);
|
|
|
|
|
|
|
|
/* Update our state. */
|
|
|
|
this->ChangeState((state == State_Created) ? State_Running : State_RunningAttached);
|
|
|
|
auto state_guard = SCOPE_GUARD { this->ChangeState(state); };
|
|
|
|
|
|
|
|
/* Run our thread. */
|
|
|
|
R_TRY(main_thread->Run());
|
|
|
|
|
2021-04-07 15:17:15 +00:00
|
|
|
/* Open a reference to represent that we're running. */
|
|
|
|
this->Open();
|
|
|
|
|
2020-02-20 03:38:20 +00:00
|
|
|
/* We succeeded! Cancel our guards. */
|
|
|
|
state_guard.Cancel();
|
|
|
|
ht_guard.Cancel();
|
|
|
|
stack_guard.Cancel();
|
|
|
|
mem_reservation.Commit();
|
|
|
|
|
2020-02-20 04:42:21 +00:00
|
|
|
/* Note for debug that we're running a new process. */
|
2020-12-18 01:18:47 +00:00
|
|
|
MESOSPHERE_LOG("KProcess::Run() pid=%ld name=%-12s thread=%ld affinity=0x%lx ideal_core=%d active_core=%d\n", m_process_id, m_name, main_thread->GetId(), main_thread->GetVirtualAffinityMask(), main_thread->GetIdealVirtualCore(), main_thread->GetActiveCore());
|
2020-02-20 04:42:21 +00:00
|
|
|
|
2020-02-20 03:38:20 +00:00
|
|
|
return ResultSuccess();
|
|
|
|
}
|
|
|
|
|
2020-07-14 09:45:06 +00:00
|
|
|
Result KProcess::Reset() {
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
|
|
|
|
/* Lock the process and the scheduler. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_state_lock);
|
2020-07-14 09:45:06 +00:00
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
|
|
|
|
/* Validate that we're in a state that we can reset. */
|
2020-12-18 01:18:47 +00:00
|
|
|
R_UNLESS(m_state != State_Terminated, svc::ResultInvalidState());
|
|
|
|
R_UNLESS(m_is_signaled, svc::ResultInvalidState());
|
2020-07-14 09:45:06 +00:00
|
|
|
|
|
|
|
/* Clear signaled. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_is_signaled = false;
|
2020-07-14 09:45:06 +00:00
|
|
|
return ResultSuccess();
|
|
|
|
}
|
|
|
|
|
2020-07-23 10:04:43 +00:00
|
|
|
Result KProcess::SetActivity(ams::svc::ProcessActivity activity) {
|
|
|
|
/* Lock ourselves and the scheduler. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_state_lock);
|
|
|
|
KScopedLightLock list_lk(m_list_lock);
|
2020-07-23 10:04:43 +00:00
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
|
|
|
|
/* Validate our state. */
|
2020-12-18 01:18:47 +00:00
|
|
|
R_UNLESS(m_state != State_Terminating, svc::ResultInvalidState());
|
|
|
|
R_UNLESS(m_state != State_Terminated, svc::ResultInvalidState());
|
2020-07-23 10:04:43 +00:00
|
|
|
|
|
|
|
/* Either pause or resume. */
|
|
|
|
if (activity == ams::svc::ProcessActivity_Paused) {
|
|
|
|
/* Verify that we're not suspended. */
|
2020-12-18 01:18:47 +00:00
|
|
|
R_UNLESS(!m_is_suspended, svc::ResultInvalidState());
|
2020-07-23 10:04:43 +00:00
|
|
|
|
|
|
|
/* Suspend all threads. */
|
|
|
|
auto end = this->GetThreadList().end();
|
|
|
|
for (auto it = this->GetThreadList().begin(); it != end; ++it) {
|
|
|
|
it->RequestSuspend(KThread::SuspendType_Process);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set ourselves as suspended. */
|
|
|
|
this->SetSuspended(true);
|
|
|
|
} else {
|
|
|
|
MESOSPHERE_ASSERT(activity == ams::svc::ProcessActivity_Runnable);
|
|
|
|
|
|
|
|
/* Verify that we're suspended. */
|
2020-12-18 01:18:47 +00:00
|
|
|
R_UNLESS(m_is_suspended, svc::ResultInvalidState());
|
2020-07-23 10:04:43 +00:00
|
|
|
|
|
|
|
/* Resume all threads. */
|
|
|
|
auto end = this->GetThreadList().end();
|
|
|
|
for (auto it = this->GetThreadList().begin(); it != end; ++it) {
|
|
|
|
it->Resume(KThread::SuspendType_Process);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set ourselves as resumed. */
|
|
|
|
this->SetSuspended(false);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ResultSuccess();
|
|
|
|
}
|
|
|
|
|
2020-08-03 06:06:29 +00:00
|
|
|
void KProcess::PinCurrentThread() {
|
|
|
|
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
|
|
|
|
|
|
|
/* Get the current thread. */
|
|
|
|
const s32 core_id = GetCurrentCoreId();
|
|
|
|
KThread *cur_thread = GetCurrentThreadPointer();
|
|
|
|
|
2021-04-07 22:30:13 +00:00
|
|
|
/* If the thread isn't terminated, pin it. */
|
|
|
|
if (!cur_thread->IsTerminationRequested()) {
|
|
|
|
/* Pin it. */
|
|
|
|
this->PinThread(core_id, cur_thread);
|
|
|
|
cur_thread->Pin();
|
2020-08-03 06:06:29 +00:00
|
|
|
|
2021-04-07 22:30:13 +00:00
|
|
|
/* An update is needed. */
|
|
|
|
KScheduler::SetSchedulerUpdateNeeded();
|
|
|
|
}
|
2020-08-03 06:06:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void KProcess::UnpinCurrentThread() {
|
|
|
|
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
|
|
|
|
|
|
|
/* Get the current thread. */
|
|
|
|
const s32 core_id = GetCurrentCoreId();
|
|
|
|
KThread *cur_thread = GetCurrentThreadPointer();
|
|
|
|
|
|
|
|
/* Unpin it. */
|
|
|
|
cur_thread->Unpin();
|
|
|
|
this->UnpinThread(core_id, cur_thread);
|
|
|
|
|
|
|
|
/* An update is needed. */
|
|
|
|
KScheduler::SetSchedulerUpdateNeeded();
|
|
|
|
}
|
|
|
|
|
2021-04-07 22:30:13 +00:00
|
|
|
void KProcess::UnpinThread(KThread *thread) {
|
|
|
|
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
|
|
|
|
|
|
|
/* Get the thread's core id. */
|
|
|
|
const auto core_id = thread->GetActiveCore();
|
|
|
|
|
|
|
|
/* Unpin it. */
|
|
|
|
this->UnpinThread(core_id, thread);
|
|
|
|
thread->Unpin();
|
|
|
|
|
|
|
|
/* An update is needed. */
|
|
|
|
KScheduler::SetSchedulerUpdateNeeded();
|
|
|
|
}
|
|
|
|
|
2020-07-30 23:52:11 +00:00
|
|
|
Result KProcess::GetThreadList(s32 *out_num_threads, ams::kern::svc::KUserPointer<u64 *> out_thread_ids, s32 max_out_count) {
|
|
|
|
/* Lock the list. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_list_lock);
|
2020-07-30 23:52:11 +00:00
|
|
|
|
|
|
|
/* Iterate over the list. */
|
|
|
|
s32 count = 0;
|
|
|
|
auto end = this->GetThreadList().end();
|
|
|
|
for (auto it = this->GetThreadList().begin(); it != end; ++it) {
|
|
|
|
/* If we're within array bounds, write the id. */
|
|
|
|
if (count < max_out_count) {
|
|
|
|
/* Get the thread id. */
|
|
|
|
KThread *thread = std::addressof(*it);
|
|
|
|
const u64 id = thread->GetId();
|
|
|
|
|
|
|
|
/* Copy the id to userland. */
|
|
|
|
R_TRY(out_thread_ids.CopyArrayElementFrom(std::addressof(id), count));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Increment the count. */
|
|
|
|
++count;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We successfully iterated the list. */
|
|
|
|
*out_num_threads = count;
|
|
|
|
return ResultSuccess();
|
|
|
|
}
|
|
|
|
|
2020-07-19 03:03:27 +00:00
|
|
|
KProcess::State KProcess::SetDebugObject(void *debug_object) {
|
2020-07-20 03:06:21 +00:00
|
|
|
/* Attaching should only happen to non-null objects while the scheduler is locked. */
|
|
|
|
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
|
|
|
MESOSPHERE_ASSERT(debug_object != nullptr);
|
|
|
|
|
2020-07-19 03:03:27 +00:00
|
|
|
/* Cache our state to return it to the debug object. */
|
2020-12-18 01:18:47 +00:00
|
|
|
const auto old_state = m_state;
|
2020-07-19 03:03:27 +00:00
|
|
|
|
|
|
|
/* Set the object. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_attached_object = debug_object;
|
2020-07-19 03:03:27 +00:00
|
|
|
|
2020-07-20 03:06:21 +00:00
|
|
|
/* Check that our state is valid for attach. */
|
2020-12-18 01:18:47 +00:00
|
|
|
MESOSPHERE_ASSERT(m_state == State_Created || m_state == State_Running || m_state == State_Crashed);
|
2020-07-20 03:06:21 +00:00
|
|
|
|
2020-07-19 03:03:27 +00:00
|
|
|
/* Update our state. */
|
2020-12-18 01:18:47 +00:00
|
|
|
if (m_state != State_DebugBreak) {
|
|
|
|
if (m_state == State_Created) {
|
2020-07-20 03:06:21 +00:00
|
|
|
this->ChangeState(State_CreatedAttached);
|
|
|
|
} else {
|
|
|
|
this->ChangeState(State_DebugBreak);
|
|
|
|
}
|
2020-07-19 03:03:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return old_state;
|
|
|
|
}
|
|
|
|
|
2020-07-20 03:06:21 +00:00
|
|
|
void KProcess::ClearDebugObject(KProcess::State old_state) {
|
|
|
|
/* Detaching from process should only happen while the scheduler is locked. */
|
|
|
|
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
|
|
|
|
|
|
|
/* Clear the attached object. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_attached_object = nullptr;
|
2020-07-20 03:06:21 +00:00
|
|
|
|
|
|
|
/* Validate that the process is in an attached state. */
|
2020-12-18 01:18:47 +00:00
|
|
|
MESOSPHERE_ASSERT(m_state == State_CreatedAttached || m_state == State_RunningAttached || m_state == State_DebugBreak || m_state == State_Terminating || m_state == State_Terminated);
|
2020-07-20 03:06:21 +00:00
|
|
|
|
|
|
|
/* Change the state appropriately. */
|
2020-12-18 01:18:47 +00:00
|
|
|
if (m_state == State_CreatedAttached) {
|
2020-07-20 03:06:21 +00:00
|
|
|
this->ChangeState(State_Created);
|
2020-12-18 01:18:47 +00:00
|
|
|
} else if (m_state == State_RunningAttached || m_state == State_DebugBreak) {
|
2020-07-20 03:06:21 +00:00
|
|
|
/* Disallow transition back to created from running. */
|
|
|
|
if (old_state == State_Created) {
|
|
|
|
old_state = State_Running;
|
|
|
|
}
|
|
|
|
this->ChangeState(old_state);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-31 07:04:43 +00:00
|
|
|
bool KProcess::EnterJitDebug(ams::svc::DebugEvent event, ams::svc::DebugException exception, uintptr_t param1, uintptr_t param2, uintptr_t param3, uintptr_t param4) {
|
|
|
|
/* Check that we're the current process. */
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
MESOSPHERE_ASSERT(this == GetCurrentProcessPointer());
|
|
|
|
|
|
|
|
/* If we aren't allowed to enter jit debug, don't. */
|
2020-12-18 01:18:47 +00:00
|
|
|
if ((m_flags & ams::svc::CreateProcessFlag_EnableDebug) == 0) {
|
2020-07-31 07:04:43 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We're the current process, so we should be some kind of running. */
|
2020-12-18 01:18:47 +00:00
|
|
|
MESOSPHERE_ASSERT(m_state != State_Created);
|
|
|
|
MESOSPHERE_ASSERT(m_state != State_CreatedAttached);
|
|
|
|
MESOSPHERE_ASSERT(m_state != State_Terminated);
|
2020-07-31 07:04:43 +00:00
|
|
|
|
|
|
|
/* Try to enter JIT debug. */
|
|
|
|
while (true) {
|
|
|
|
/* Lock ourselves and the scheduler. */
|
2020-12-18 01:18:47 +00:00
|
|
|
KScopedLightLock lk(m_state_lock);
|
|
|
|
KScopedLightLock list_lk(m_list_lock);
|
2020-07-31 07:04:43 +00:00
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
|
|
|
|
/* If we're attached to a debugger, we're necessarily in debug. */
|
|
|
|
if (this->IsAttachedToDebugger()) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If the current thread is terminating, we can't enter debug. */
|
|
|
|
if (GetCurrentThread().IsTerminationRequested()) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We're not attached to debugger, so check that. */
|
2020-12-18 01:18:47 +00:00
|
|
|
MESOSPHERE_ASSERT(m_state != State_RunningAttached);
|
|
|
|
MESOSPHERE_ASSERT(m_state != State_DebugBreak);
|
2020-07-31 07:04:43 +00:00
|
|
|
|
|
|
|
/* If we're terminating, we can't enter debug. */
|
2020-12-18 01:18:47 +00:00
|
|
|
if (m_state != State_Running && m_state != State_Crashed) {
|
|
|
|
MESOSPHERE_ASSERT(m_state == State_Terminating);
|
2020-07-31 07:04:43 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If the current thread is suspended, retry. */
|
|
|
|
if (GetCurrentThread().IsSuspended()) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Suspend all our threads. */
|
|
|
|
{
|
|
|
|
auto end = this->GetThreadList().end();
|
|
|
|
for (auto it = this->GetThreadList().begin(); it != end; ++it) {
|
|
|
|
it->RequestSuspend(KThread::SuspendType_Debug);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Change our state to crashed. */
|
|
|
|
this->ChangeState(State_Crashed);
|
|
|
|
|
|
|
|
/* Enter jit debug. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_is_jit_debug = true;
|
|
|
|
m_jit_debug_event_type = event;
|
|
|
|
m_jit_debug_exception_type = exception;
|
|
|
|
m_jit_debug_params[0] = param1;
|
|
|
|
m_jit_debug_params[1] = param2;
|
|
|
|
m_jit_debug_params[2] = param3;
|
|
|
|
m_jit_debug_params[3] = param4;
|
|
|
|
m_jit_debug_thread_id = GetCurrentThread().GetId();
|
2020-07-31 07:04:43 +00:00
|
|
|
|
|
|
|
/* Exit our retry loop. */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if our state indicates we're in jit debug. */
|
|
|
|
{
|
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
|
2020-12-18 01:18:47 +00:00
|
|
|
if (m_state == State_Running || m_state == State_RunningAttached || m_state == State_Crashed || m_state == State_DebugBreak) {
|
2020-07-31 07:04:43 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-07-19 03:03:27 +00:00
|
|
|
KEventInfo *KProcess::GetJitDebugInfo() {
|
2020-07-31 03:49:58 +00:00
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
|
|
|
|
2020-12-18 01:18:47 +00:00
|
|
|
if (m_is_jit_debug) {
|
|
|
|
return KDebugBase::CreateDebugEvent(m_jit_debug_event_type, m_jit_debug_exception_type, m_jit_debug_params[0], m_jit_debug_params[1], m_jit_debug_params[2], m_jit_debug_params[3], m_jit_debug_thread_id);
|
2020-07-19 03:03:27 +00:00
|
|
|
} else {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-31 03:49:58 +00:00
|
|
|
void KProcess::ClearJitDebugInfo() {
|
|
|
|
MESOSPHERE_ASSERT_THIS();
|
|
|
|
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
|
|
|
|
2020-12-18 01:18:47 +00:00
|
|
|
m_is_jit_debug = false;
|
2020-07-31 03:49:58 +00:00
|
|
|
}
|
|
|
|
|
2020-07-14 20:22:08 +00:00
|
|
|
KProcess *KProcess::GetProcessFromId(u64 process_id) {
|
|
|
|
/* Lock the list. */
|
2020-07-14 20:36:35 +00:00
|
|
|
KProcess::ListAccessor accessor;
|
2020-07-14 20:22:08 +00:00
|
|
|
const auto end = accessor.end();
|
|
|
|
|
|
|
|
/* Iterate over the list. */
|
|
|
|
for (auto it = accessor.begin(); it != end; ++it) {
|
|
|
|
/* Get the process. */
|
|
|
|
KProcess *process = static_cast<KProcess *>(std::addressof(*it));
|
|
|
|
|
|
|
|
if (process->GetId() == process_id) {
|
|
|
|
if (AMS_LIKELY(process->Open())) {
|
|
|
|
return process;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We failed to find the process. */
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
Result KProcess::GetProcessList(s32 *out_num_processes, ams::kern::svc::KUserPointer<u64 *> out_process_ids, s32 max_out_count) {
|
|
|
|
/* Lock the list. */
|
2020-07-14 20:36:35 +00:00
|
|
|
KProcess::ListAccessor accessor;
|
2020-07-14 20:22:08 +00:00
|
|
|
const auto end = accessor.end();
|
|
|
|
|
|
|
|
/* Iterate over the list. */
|
|
|
|
s32 count = 0;
|
|
|
|
for (auto it = accessor.begin(); it != end; ++it) {
|
|
|
|
/* If we're within array bounds, write the id. */
|
|
|
|
if (count < max_out_count) {
|
|
|
|
/* Get the process id. */
|
|
|
|
KProcess *process = static_cast<KProcess *>(std::addressof(*it));
|
|
|
|
const u64 id = process->GetId();
|
|
|
|
|
|
|
|
/* Copy the id to userland. */
|
|
|
|
R_TRY(out_process_ids.CopyArrayElementFrom(std::addressof(id), count));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Increment the count. */
|
|
|
|
++count;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We successfully iterated the list. */
|
|
|
|
*out_num_processes = count;
|
|
|
|
return ResultSuccess();
|
|
|
|
}
|
|
|
|
|
2020-02-08 10:49:32 +00:00
|
|
|
}
|