kern: implement process exit

This commit is contained in:
Michael Scire 2020-07-22 23:52:29 -07:00 committed by SciresM
parent 3917ecad46
commit 28aab09b5d
10 changed files with 466 additions and 9 deletions

View file

@ -50,7 +50,13 @@ namespace ams::kern {
public:
virtual void OnFinalizeSynchronizationObject() override;
virtual bool IsSignaled() const override;
private:
static Result ProcessDebugEvent(ams::svc::DebugEvent event, uintptr_t param0, uintptr_t param1, uintptr_t param2, uintptr_t param3, uintptr_t param4);
public:
static Result OnDebugEvent(ams::svc::DebugEvent event, uintptr_t param0 = 0, uintptr_t param1 = 0, uintptr_t param2 = 0, uintptr_t param3 = 0, uintptr_t param4 = 0);
static Result OnExitProcess(KProcess *process);
static Result OnTerminateProcess(KProcess *process);
static Result OnExitThread(KThread *thread);
static KEventInfo *CreateDebugEvent(ams::svc::DebugEvent event, uintptr_t param0, uintptr_t param1, uintptr_t param2, uintptr_t param3, uintptr_t param4, u64 thread_id);
};

View file

@ -211,6 +211,12 @@ namespace ams::kern {
return KMemoryLayout::IsHeapPhysicalAddress(std::addressof(this->cached_physical_heap_region), phys_addr, this->cached_physical_heap_region);
}
bool IsHeapPhysicalAddressForFinalize(KPhysicalAddress phys_addr) {
MESOSPHERE_ASSERT(!this->IsLockedByCurrentThread());
return KMemoryLayout::IsHeapPhysicalAddress(std::addressof(this->cached_physical_heap_region), phys_addr, this->cached_physical_heap_region);
}
bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr, size_t size) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());

View file

@ -120,6 +120,9 @@ namespace ams::kern {
KPageTableManager page_table_manager{};
private:
Result Initialize(const ams::svc::CreateProcessParameter &params);
void StartTermination();
void FinishTermination();
public:
KProcess() { /* ... */ }
virtual ~KProcess() { /* ... */ }
@ -182,6 +185,7 @@ namespace ams::kern {
ThreadList &GetThreadList() { return this->thread_list; }
const ThreadList &GetThreadList() const { return this->thread_list; }
constexpr void *GetDebugObject() const { return this->attached_object; }
KProcess::State SetDebugObject(void *debug_object);
void ClearDebugObject(KProcess::State state);

View file

@ -448,6 +448,9 @@ namespace ams::kern {
Result Run();
void Exit();
void Terminate();
ThreadState RequestTerminate();
Result Sleep(s64 timeout);
ALWAYS_INLINE void *GetStackTop() const { return reinterpret_cast<StackParameters *>(this->kernel_stack_top) - 1; }

View file

@ -212,7 +212,103 @@ namespace ams::kern::arch::arm64 {
}
Result KPageTable::Finalize() {
MESOSPHERE_UNIMPLEMENTED();
/* Only process tables should be finalized. */
MESOSPHERE_ASSERT(!this->IsKernel());
/* Note that we've updated (to ensure we're synchronized). */
this->NoteUpdated();
/* Free all pages in the table. */
{
/* Get implementation objects. */
auto &impl = this->GetImpl();
auto &mm = Kernel::GetMemoryManager();
/* Traverse, freeing all pages. */
{
/* Get the address space size. */
const size_t as_size = this->GetAddressSpaceSize();
/* Begin the traversal. */
TraversalContext context;
TraversalEntry cur_entry = {};
bool cur_valid = false;
TraversalEntry next_entry;
bool next_valid;
size_t tot_size = 0;
next_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), this->GetAddressSpaceStart());
/* Iterate over entries. */
while (true) {
if ((!next_valid && !cur_valid) || (next_valid && cur_valid && next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) {
cur_entry.block_size += next_entry.block_size;
} else {
if (cur_valid && IsHeapPhysicalAddressForFinalize(cur_entry.phys_addr)) {
mm.Close(GetHeapVirtualAddress(cur_entry.phys_addr), cur_entry.block_size / PageSize);
}
/* Update tracking variables. */
tot_size += cur_entry.block_size;
cur_entry = next_entry;
cur_valid = next_valid;
}
if (cur_entry.block_size + tot_size >= as_size) {
break;
}
next_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
}
/* Handle the last block. */
if (cur_valid && IsHeapPhysicalAddressForFinalize(cur_entry.phys_addr)) {
mm.Close(GetHeapVirtualAddress(cur_entry.phys_addr), cur_entry.block_size / PageSize);
}
}
/* Cache address space extents for convenience. */
const KProcessAddress as_start = this->GetAddressSpaceStart();
const KProcessAddress as_last = as_start + this->GetAddressSpaceSize() - 1;
/* Free all L3 tables. */
for (KProcessAddress cur_address = as_start; cur_address <= as_last; cur_address += L2BlockSize) {
L1PageTableEntry *l1_entry = impl.GetL1Entry(cur_address);
if (l1_entry->IsTable()) {
L2PageTableEntry *l2_entry = impl.GetL2Entry(l1_entry, cur_address);
if (l2_entry->IsTable()) {
KVirtualAddress l3_table = GetPageTableVirtualAddress(l2_entry->GetTable());
if (this->GetPageTableManager().IsInPageTableHeap(l3_table)) {
while (!this->GetPageTableManager().Close(l3_table, 1)) { /* ... */ }
this->GetPageTableManager().Free(l3_table);
}
}
}
}
/* Free all L2 tables. */
for (KProcessAddress cur_address = as_start; cur_address <= as_last; cur_address += L1BlockSize) {
L1PageTableEntry *l1_entry = impl.GetL1Entry(cur_address);
if (l1_entry->IsTable()) {
KVirtualAddress l2_table = GetPageTableVirtualAddress(l1_entry->GetTable());
if (this->GetPageTableManager().IsInPageTableHeap(l2_table)) {
while (!this->GetPageTableManager().Close(l2_table, 1)) { /* ... */ }
this->GetPageTableManager().Free(l2_table);
}
}
}
/* Free the L1 table. */
this->GetPageTableManager().Free(reinterpret_cast<uintptr_t>(impl.Finalize()));
/* Perform inherited finalization. */
KPageTableBase::Finalize();
}
/* Release our asid. */
g_asid_manager.Release(this->asid);
return ResultSuccess();
}
Result KPageTable::Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, OperationType operation, bool reuse_ll) {

View file

@ -17,6 +17,14 @@
namespace ams::kern {
namespace {
ALWAYS_INLINE KDebugBase *GetDebugObject(KProcess *process) {
return static_cast<KDebugBase *>(process->GetDebugObject());
}
}
void KDebugBase::Initialize() {
/* Clear the process and continue flags. */
this->process = nullptr;
@ -425,4 +433,55 @@ namespace ams::kern {
return (!this->event_info_list.empty()) || this->process == nullptr || this->process->IsTerminated();
}
Result KDebugBase::ProcessDebugEvent(ams::svc::DebugEvent event, uintptr_t param0, uintptr_t param1, uintptr_t param2, uintptr_t param3, uintptr_t param4) {
MESOSPHERE_UNIMPLEMENTED();
}
Result KDebugBase::OnDebugEvent(ams::svc::DebugEvent event, uintptr_t param0, uintptr_t param1, uintptr_t param2, uintptr_t param3, uintptr_t param4) {
if (KProcess *process = GetCurrentProcessPointer(); process != nullptr && process->IsAttachedToDebugger()) {
return ProcessDebugEvent(event, param0, param1, param2, param3, param4);
}
return ResultSuccess();
}
Result KDebugBase::OnExitProcess(KProcess *process) {
MESOSPHERE_ASSERT(process != nullptr);
if (process->IsAttachedToDebugger()) {
KScopedSchedulerLock sl;
if (KDebugBase *debug = GetDebugObject(process); debug != nullptr) {
debug->PushDebugEvent(ams::svc::DebugEvent_ExitProcess, ams::svc::ProcessExitReason_ExitProcess);
debug->NotifyAvailable();
}
}
return ResultSuccess();
}
Result KDebugBase::OnTerminateProcess(KProcess *process) {
MESOSPHERE_ASSERT(process != nullptr);
if (process->IsAttachedToDebugger()) {
KScopedSchedulerLock sl;
if (KDebugBase *debug = GetDebugObject(process); debug != nullptr) {
debug->PushDebugEvent(ams::svc::DebugEvent_ExitProcess, ams::svc::ProcessExitReason_TerminateProcess);
debug->NotifyAvailable();
}
}
return ResultSuccess();
}
Result KDebugBase::OnExitThread(KThread *thread) {
MESOSPHERE_ASSERT(thread != nullptr);
if (KProcess *process = thread->GetOwnerProcess(); process != nullptr && process->IsAttachedToDebugger()) {
R_TRY(OnDebugEvent(ams::svc::DebugEvent_ExitThread, thread->GetId(), thread->IsTerminationRequested() ? ams::svc::ThreadExitReason_TerminateThread : ams::svc::ThreadExitReason_ExitThread));
}
return ResultSuccess();
}
}

View file

@ -273,8 +273,16 @@ namespace ams::kern {
void KPageTableBase::Finalize() {
/* Finalize memory blocks. */
this->memory_block_manager.Finalize(this->memory_block_slab_manager);
MESOSPHERE_TODO("cpu::InvalidateEntireInstructionCache();");
/* Free any unsafe mapped memory. */
if (this->mapped_unsafe_physical_memory) {
Kernel::GetUnsafeMemory().Release(this->mapped_unsafe_physical_memory);
}
/* Invalidate the entire instruction cache. */
cpu::InvalidateEntireInstructionCache();
}
KProcessAddress KPageTableBase::GetRegionAddress(KMemoryState state) const {

View file

@ -28,10 +28,118 @@ namespace ams::kern {
std::atomic<u64> g_initial_process_id = InitialProcessIdMin;
std::atomic<u64> g_process_id = ProcessIdMin;
void TerminateChildren(KProcess *process, const KThread *thread_to_not_terminate) {
/* Request that all children threads terminate. */
{
KScopedLightLock proc_lk(process->GetListLock());
KScopedSchedulerLock sl;
auto &thread_list = process->GetThreadList();
for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
if (KThread *thread = std::addressof(*it); thread != thread_to_not_terminate) {
if (thread->GetState() != KThread::ThreadState_Terminated) {
thread->RequestTerminate();
}
}
}
}
/* Wait for all children threads to terminate.*/
while (true) {
/* Get the next child. */
KThread *cur_child = nullptr;
{
KScopedLightLock proc_lk(process->GetListLock());
auto &thread_list = process->GetThreadList();
for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
if (KThread *thread = std::addressof(*it); thread != thread_to_not_terminate) {
if (thread->GetState() != KThread::ThreadState_Terminated) {
if (AMS_LIKELY(thread->Open())) {
cur_child = thread;
break;
}
}
}
}
}
/* If we didn't find any non-terminated children, we're done. */
if (cur_child == nullptr) {
break;
}
/* Terminate and close the thread. */
cur_child->Terminate();
cur_child->Close();
}
}
}
void KProcess::Finalize() {
MESOSPHERE_UNIMPLEMENTED();
/* Ensure we're not executing on any core. */
for (size_t i = 0; i < cpu::NumCores; ++i) {
MESOSPHERE_ASSERT(Kernel::GetCurrentContext(static_cast<s32>(i)).current_process.load(std::memory_order_relaxed) != this);
}
/* Delete the process local region. */
this->DeleteThreadLocalRegion(this->plr_address);
/* Get the used memory size. */
const size_t used_memory_size = this->GetUsedUserPhysicalMemorySize();
/* Finalize the page table. */
this->page_table.Finalize();
/* Free the system resource. */
if (this->system_resource_address != Null<KVirtualAddress>) {
/* Check that we have no outstanding allocations. */
MESOSPHERE_ABORT_UNLESS(this->memory_block_slab_manager.GetUsed() == 0);
MESOSPHERE_ABORT_UNLESS(this->block_info_manager.GetUsed() == 0);
MESOSPHERE_ABORT_UNLESS(this->page_table_manager.GetUsed() == 0);
/* Free the memory. */
KSystemControl::FreeSecureMemory(this->system_resource_address, this->system_resource_num_pages * PageSize, this->memory_pool);
/* Clear our tracking variables. */
this->system_resource_address = Null<KVirtualAddress>;
this->system_resource_num_pages = 0;
}
/* Release memory to the resource limit. */
if (this->resource_limit != nullptr) {
MESOSPHERE_ABORT_UNLESS(used_memory_size >= this->memory_release_hint);
this->resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, used_memory_size, used_memory_size - this->memory_release_hint);
this->resource_limit->Close();
}
/* Free all shared memory infos. */
{
auto it = this->shared_memory_list.begin();
while (it != this->shared_memory_list.end()) {
KSharedMemoryInfo *info = std::addressof(*it);
KSharedMemory *shmem = info->GetSharedMemory();
while (!info->Close()) {
shmem->Close();
}
shmem->Close();
it = this->shared_memory_list.erase(it);
KSharedMemoryInfo::Free(info);
}
}
/* Our thread local page list must be empty at this point. */
MESOSPHERE_ABORT_UNLESS(this->partially_used_tlp_tree.empty());
MESOSPHERE_ABORT_UNLESS(this->fully_used_tlp_tree.empty());
/* Log that we finalized for debug. */
MESOSPHERE_LOG("KProcess::Finalize() pid=%ld name=%-12s\n", this->process_id, this->name);
/* Perform inherited finalization. */
KAutoObjectWithSlabHeapAndContainer<KProcess, KSynchronizationObject>::Finalize();
}
Result KProcess::Initialize(const ams::svc::CreateProcessParameter &params) {
@ -270,15 +378,112 @@ namespace ams::kern {
}
void KProcess::DoWorkerTask() {
MESOSPHERE_UNIMPLEMENTED();
/* Terminate child threads. */
TerminateChildren(this, nullptr);
/* Call the debug callback. */
KDebug::OnExitProcess(this);
/* Finish termination. */
this->FinishTermination();
}
void KProcess::StartTermination() {
/* Terminate child threads other than the current one. */
TerminateChildren(this, GetCurrentThreadPointer());
/* Finalize the handle tahble. */
this->handle_table.Finalize();
}
void KProcess::FinishTermination() {
/* Release resource limit hint. */
if (this->resource_limit != nullptr) {
this->memory_release_hint = this->GetUsedUserPhysicalMemorySize();
this->resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, 0, this->memory_release_hint);
}
/* Change state. */
{
KScopedSchedulerLock sl;
this->ChangeState(State_Terminated);
}
/* Close. */
this->Close();
}
void KProcess::Exit() {
MESOSPHERE_UNIMPLEMENTED();
MESOSPHERE_ASSERT_THIS();
/* Determine whether we need to start terminating */
bool needs_terminate = false;
{
KScopedLightLock lk(this->state_lock);
KScopedSchedulerLock sl;
MESOSPHERE_ASSERT(this->state != State_Created);
MESOSPHERE_ASSERT(this->state != State_CreatedAttached);
MESOSPHERE_ASSERT(this->state != State_Crashed);
MESOSPHERE_ASSERT(this->state != State_Terminated);
if (this->state == State_Running || this->state == State_RunningAttached || this->state == State_DebugBreak) {
this->ChangeState(State_Terminating);
needs_terminate = true;
}
}
/* If we need to start termination, do so. */
if (needs_terminate) {
this->StartTermination();
/* Note for debug that we're exiting the process. */
MESOSPHERE_LOG("KProcess::Exit() pid=%ld name=%-12s\n", this->process_id, this->name);
/* Register the process as a work task. */
KWorkerTaskManager::AddTask(KWorkerTaskManager::WorkerType_Exit, this);
}
/* Exit the current thread. */
GetCurrentThread().Exit();
MESOSPHERE_PANIC("Thread survived call to exit");
}
Result KProcess::Terminate() {
MESOSPHERE_UNIMPLEMENTED();
MESOSPHERE_ASSERT_THIS();
/* Determine whether we need to start terminating */
bool needs_terminate = false;
{
KScopedLightLock lk(this->state_lock);
/* Check whether we're allowed to terminate. */
R_UNLESS(this->state != State_Created, svc::ResultInvalidState());
R_UNLESS(this->state != State_CreatedAttached, svc::ResultInvalidState());
KScopedSchedulerLock sl;
if (this->state == State_Running || this->state == State_RunningAttached|| this->state == State_Crashed || this->state == State_DebugBreak) {
this->ChangeState(State_Terminating);
needs_terminate = true;
}
}
/* If we need to terminate, do so. */
if (needs_terminate) {
/* Start termination. */
this->StartTermination();
/* Note for debug that we're terminating the process. */
MESOSPHERE_LOG("KProcess::Terminate() pid=%ld name=%-12s\n", this->process_id, this->name);
/* Call the debug callback. */
KDebug::OnTerminateProcess(this);
/* Finish termination. */
this->FinishTermination();
}
return ResultSuccess();
}
Result KProcess::AddSharedMemory(KSharedMemory *shmem, KProcessAddress address, size_t size) {
@ -491,7 +696,7 @@ namespace ams::kern {
MESOSPHERE_ASSERT(this->num_threads > 0);
if (const auto count = --this->num_threads; count == 0) {
MESOSPHERE_TODO("this->Terminate();");
this->Terminate();
}
}

View file

@ -716,7 +716,8 @@ namespace ams::kern {
MESOSPHERE_ASSERT(this == GetCurrentThreadPointer());
/* TODO: KDebug::OnExitThread(this); */
/* Call the debug callback. */
KDebug::OnExitThread(this);
/* Release the thread resource hint from parent. */
if (this->parent != nullptr) {
@ -740,6 +741,75 @@ namespace ams::kern {
MESOSPHERE_PANIC("KThread::Exit() would return");
}
void KThread::Terminate() {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(this != GetCurrentThreadPointer());
/* Request the thread terminate. */
if (const auto new_state = this->RequestTerminate(); new_state != ThreadState_Terminated) {
/* If the thread isn't terminated, wait for it to terminate. */
s32 index;
KSynchronizationObject *objects[] = { this };
Kernel::GetSynchronization().Wait(std::addressof(index), objects, 1, ams::svc::WaitInfinite);
}
}
KThread::ThreadState KThread::RequestTerminate() {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(this != GetCurrentThreadPointer());
KScopedSchedulerLock sl;
/* Determine if this is the first termination request. */
const bool first_request = [&] ALWAYS_INLINE_LAMBDA () -> bool {
/* Perform an atomic compare-and-swap from false to true. */
bool expected = false;
do {
if (expected) {
return false;
}
} while (!this->termination_requested.compare_exchange_weak(expected, true));
return true;
}();
/* If this is the first request, start termination procedure. */
if (first_request) {
/* If the thread is in initialized state, just change state to terminated. */
if (this->GetState() == ThreadState_Initialized) {
this->thread_state = ThreadState_Terminated;
return ThreadState_Terminated;
}
/* Register the terminating dpc. */
this->RegisterDpc(DpcFlag_Terminating);
/* If the thread is suspended, continue it. */
if (this->IsSuspended()) {
this->suspend_allowed_flags = 0;
this->Continue();
}
/* Change the thread's priority to be higher than any system thread's. */
if (this->GetBasePriority() >= ams::svc::SystemThreadPriorityHighest) {
this->SetBasePriority(ams::svc::SystemThreadPriorityHighest - 1);
}
/* If the thread is runnable, send a termination interrupt to other cores. */
if (this->GetState() == ThreadState_Runnable) {
if (const u64 core_mask = this->affinity_mask.GetAffinityMask() & ~(1ul << GetCurrentCoreId()); core_mask != 0) {
Kernel::GetInterruptManager().SendInterProcessorInterrupt(KInterruptName_ThreadTerminate, core_mask);
}
}
/* Wake up the thread. */
this->SetSyncedObject(nullptr, svc::ResultTerminationRequested());
this->Wakeup();
}
return this->GetState();
}
Result KThread::Sleep(s64 timeout) {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(!KScheduler::IsSchedulerLockedByCurrentThread());

View file

@ -84,7 +84,7 @@ namespace ams::kern::svc {
void ExitThread() {
GetCurrentThread().Exit();
MESOSPHERE_PANIC("Process survived call to exit");
MESOSPHERE_PANIC("Thread survived call to exit");
}
void SleepThread(int64_t ns) {