mirror of
https://github.com/Atmosphere-NX/Atmosphere
synced 2024-12-22 12:21:18 +00:00
kern: implement KProcess::Run
This commit is contained in:
parent
28ea0b12a8
commit
c568788609
25 changed files with 516 additions and 33 deletions
|
@ -75,9 +75,11 @@
|
|||
#include <mesosphere/kern_select_debug.hpp>
|
||||
#include <mesosphere/kern_k_process.hpp>
|
||||
#include <mesosphere/kern_k_resource_limit.hpp>
|
||||
#include <mesosphere/kern_k_synchronization.hpp>
|
||||
|
||||
/* More Miscellaneous objects. */
|
||||
#include <mesosphere/kern_k_object_name.hpp>
|
||||
#include <mesosphere/kern_k_scoped_resource_reservation.hpp>
|
||||
|
||||
/* Supervisor Calls. */
|
||||
#include <mesosphere/kern_svc.hpp>
|
||||
|
|
|
@ -97,7 +97,7 @@ namespace ams::kern::arch::arm64 {
|
|||
u8 asid;
|
||||
protected:
|
||||
virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, OperationType operation, bool reuse_ll) override;
|
||||
virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, const KPageGroup *page_group, const KPageProperties properties, OperationType operation, bool reuse_ll) override;
|
||||
virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, const KPageGroup &page_group, const KPageProperties properties, OperationType operation, bool reuse_ll) override;
|
||||
virtual void FinalizeUpdate(PageLinkedList *page_list) override;
|
||||
|
||||
KPageTableManager &GetPageTableManager() const { return *this->manager; }
|
||||
|
@ -201,6 +201,7 @@ namespace ams::kern::arch::arm64 {
|
|||
}
|
||||
|
||||
Result MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll);
|
||||
Result MapGroup(KProcessAddress virt_addr, const KPageGroup &pg, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll);
|
||||
|
||||
bool MergePages(KProcessAddress virt_addr, PageLinkedList *page_list);
|
||||
|
||||
|
|
|
@ -39,6 +39,14 @@ namespace ams::kern::arch::arm64 {
|
|||
return this->page_table.SetProcessMemoryPermission(addr, size, perm);
|
||||
}
|
||||
|
||||
Result SetHeapSize(KProcessAddress *out, size_t size) {
|
||||
return this->page_table.SetHeapSize(out, size);
|
||||
}
|
||||
|
||||
Result SetMaxHeapSize(size_t size) {
|
||||
return this->page_table.SetMaxHeapSize(size);
|
||||
}
|
||||
|
||||
Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
|
||||
return this->page_table.MapIo(phys_addr, size, perm);
|
||||
}
|
||||
|
@ -59,6 +67,10 @@ namespace ams::kern::arch::arm64 {
|
|||
return this->page_table.MapPages(out_addr, num_pages, alignment, phys_addr, state, perm);
|
||||
}
|
||||
|
||||
Result MapPages(KProcessAddress *out_addr, size_t num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||
return this->page_table.MapPages(out_addr, num_pages, state, perm);
|
||||
}
|
||||
|
||||
Result UnmapPages(KProcessAddress addr, size_t num_pages, KMemoryState state) {
|
||||
return this->page_table.UnmapPages(addr, num_pages, state);
|
||||
}
|
||||
|
|
|
@ -62,6 +62,8 @@ namespace ams::kern::arch::arm64 {
|
|||
Result Initialize(KVirtualAddress u_pc, KVirtualAddress k_sp, KVirtualAddress u_sp, uintptr_t arg, bool is_user, bool is_64_bit, bool is_main);
|
||||
Result Finalize();
|
||||
|
||||
void SetArguments(uintptr_t arg0, uintptr_t arg1);
|
||||
|
||||
static void FpuContextSwitchHandler(KThread *thread);
|
||||
|
||||
/* TODO: More methods (especially FPU management) */
|
||||
|
|
|
@ -197,6 +197,7 @@ namespace ams::kern {
|
|||
this->obj->Open();
|
||||
}
|
||||
}
|
||||
|
||||
ALWAYS_INLINE ~KScopedAutoObject() {
|
||||
if (this->obj != nullptr) {
|
||||
this->obj->Close();
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_thread.hpp>
|
||||
#include <mesosphere/kern_select_page_table.hpp>
|
||||
#include <mesosphere/kern_svc.hpp>
|
||||
|
||||
|
@ -208,10 +209,22 @@ namespace ams::kern {
|
|||
util::BitPack32 intended_kernel_version;
|
||||
u32 program_type{};
|
||||
private:
|
||||
static constexpr ALWAYS_INLINE void SetSvcAllowedImpl(u8 *data, u32 id) {
|
||||
constexpr size_t BitsPerWord = BITSIZEOF(*data);
|
||||
MESOSPHERE_ASSERT(id < svc::SvcId_Count);
|
||||
data[id / BitsPerWord] |= (1ul << (id % BitsPerWord));
|
||||
}
|
||||
|
||||
static constexpr ALWAYS_INLINE void ClearSvcAllowedImpl(u8 *data, u32 id) {
|
||||
constexpr size_t BitsPerWord = BITSIZEOF(*data);
|
||||
MESOSPHERE_ASSERT(id < svc::SvcId_Count);
|
||||
data[id / BitsPerWord] &= ~(1ul << (id % BitsPerWord));
|
||||
}
|
||||
|
||||
bool SetSvcAllowed(u32 id) {
|
||||
constexpr size_t BitsPerWord = BITSIZEOF(this->svc_access_flags[0]);
|
||||
if (id < BITSIZEOF(this->svc_access_flags)) {
|
||||
this->svc_access_flags[id / BitsPerWord] = (1ul << (id % BitsPerWord));
|
||||
SetSvcAllowedImpl(this->svc_access_flags, id);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
|
@ -248,6 +261,19 @@ namespace ams::kern {
|
|||
|
||||
constexpr u64 GetCoreMask() const { return this->core_mask; }
|
||||
constexpr u64 GetPriorityMask() const { return this->priority_mask; }
|
||||
constexpr s32 GetHandleTableSize() const { return this->handle_table_size; }
|
||||
|
||||
ALWAYS_INLINE void CopySvcPermissionsTo(KThread::StackParameters &sp) const {
|
||||
static_assert(sizeof(svc_access_flags) == sizeof(sp.svc_permission));
|
||||
std::memcpy(sp.svc_permission, this->svc_access_flags, sizeof(this->svc_access_flags));
|
||||
|
||||
/* Clear specific SVCs based on our state. */
|
||||
ClearSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException);
|
||||
ClearSvcAllowedImpl(sp.svc_permission, svc::SvcId_SynchronizePreemptionState);
|
||||
if (sp.is_preemption_state_pinned) {
|
||||
ClearSvcAllowedImpl(sp.svc_permission, svc::SvcId_GetInfo);
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO: Member functions. */
|
||||
};
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <mesosphere/kern_k_auto_object.hpp>
|
||||
#include <mesosphere/kern_k_spin_lock.hpp>
|
||||
#include <mesosphere/kern_k_thread.hpp>
|
||||
#include <mesosphere/kern_k_interrupt_event.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
|
@ -98,7 +99,7 @@ namespace ams::kern {
|
|||
|
||||
/* Initialize all fields. */
|
||||
this->table = this->entries;
|
||||
this->table_size = (size <= 0) ? MaxTableSize : table_size;
|
||||
this->table_size = (size <= 0) ? MaxTableSize : size;
|
||||
this->next_linear_id = MinLinearId;
|
||||
this->count = 0;
|
||||
this->max_count = 0;
|
||||
|
@ -136,10 +137,10 @@ namespace ams::kern {
|
|||
|
||||
template<typename T = KAutoObject>
|
||||
ALWAYS_INLINE KScopedAutoObject<T> GetObjectForIpc(ams::svc::Handle handle) const {
|
||||
/* TODO: static_assert(!std::is_base_of<KInterruptEvent, T>::value); */
|
||||
static_assert(!std::is_base_of<KInterruptEvent, T>::value);
|
||||
|
||||
KAutoObject *obj = this->GetObjectImpl(handle);
|
||||
if (false /* TODO: obj->DynamicCast<KInterruptEvent *>() != nullptr */) {
|
||||
if (obj->DynamicCast<KInterruptEvent *>() != nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
if constexpr (std::is_same<T, KAutoObject>::value) {
|
||||
|
|
|
@ -73,8 +73,8 @@ namespace ams::kern {
|
|||
public:
|
||||
explicit Iterator(BaseIterator it) : base_it(it) { /* ... */ }
|
||||
|
||||
T *GetItem() const {
|
||||
static_cast<pointer>(this->base_it->GetItem());
|
||||
pointer GetItem() const {
|
||||
return static_cast<pointer>(this->base_it->GetItem());
|
||||
}
|
||||
|
||||
bool operator==(const Iterator &rhs) const {
|
||||
|
|
|
@ -111,12 +111,15 @@ namespace ams::kern {
|
|||
|
||||
class KScopedPageGroup {
|
||||
private:
|
||||
KPageGroup *group;
|
||||
const KPageGroup *group;
|
||||
public:
|
||||
explicit ALWAYS_INLINE KScopedPageGroup(KPageGroup *gp) : group(gp) { group->Open(); }
|
||||
explicit ALWAYS_INLINE KScopedPageGroup(KPageGroup &gp) : KScopedPageGroup(std::addressof(gp)) { /* ... */ }
|
||||
ALWAYS_INLINE ~KScopedPageGroup() { group->Close(); }
|
||||
explicit ALWAYS_INLINE KScopedPageGroup(const KPageGroup *gp) : group(gp) { if (this->group) { this->group->Open(); } }
|
||||
explicit ALWAYS_INLINE KScopedPageGroup(const KPageGroup &gp) : KScopedPageGroup(std::addressof(gp)) { /* ... */ }
|
||||
ALWAYS_INLINE ~KScopedPageGroup() { if (this->group) { this->group->Close(); } }
|
||||
|
||||
ALWAYS_INLINE void CancelClose() {
|
||||
this->group = nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
|
@ -185,7 +185,7 @@ namespace ams::kern {
|
|||
bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const;
|
||||
protected:
|
||||
virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, OperationType operation, bool reuse_ll) = 0;
|
||||
virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, const KPageGroup *page_group, const KPageProperties properties, OperationType operation, bool reuse_ll) = 0;
|
||||
virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, const KPageGroup &page_group, const KPageProperties properties, OperationType operation, bool reuse_ll) = 0;
|
||||
virtual void FinalizeUpdate(PageLinkedList *page_list) = 0;
|
||||
|
||||
KPageTableImpl &GetImpl() { return this->impl; }
|
||||
|
@ -247,6 +247,8 @@ namespace ams::kern {
|
|||
|
||||
Result SetMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm);
|
||||
Result SetProcessMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm);
|
||||
Result SetHeapSize(KProcessAddress *out, size_t size);
|
||||
Result SetMaxHeapSize(size_t size);
|
||||
Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm);
|
||||
Result MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm);
|
||||
Result MapRegion(KMemoryRegionType region_type, KMemoryPermission perm);
|
||||
|
@ -259,6 +261,10 @@ namespace ams::kern {
|
|||
return this->MapPages(out_addr, num_pages, alignment, phys_addr, true, this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize, state, perm);
|
||||
}
|
||||
|
||||
Result MapPages(KProcessAddress *out_addr, size_t num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||
return this->MapPages(out_addr, num_pages, PageSize, Null<KPhysicalAddress>, false, this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize, state, perm);
|
||||
}
|
||||
|
||||
Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state);
|
||||
Result MapPageGroup(KProcessAddress *out_addr, const KPageGroup &pg, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
|
||||
Result MapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state, KMemoryPermission perm);
|
||||
|
|
|
@ -132,17 +132,40 @@ namespace ams::kern {
|
|||
|
||||
constexpr bool Is64Bit() const { return this->flags & ams::svc::CreateProcessFlag_Is64Bit; }
|
||||
|
||||
constexpr KProcessAddress GetEntryPoint() const { return this->code_address; }
|
||||
|
||||
constexpr bool IsSuspended() const {
|
||||
return this->is_suspended;
|
||||
}
|
||||
|
||||
KThread *GetPreemptionStatePinnedThread(s32 core_id) const {
|
||||
MESOSPHERE_ASSERT(0 <= core_id && core_id < static_cast<s32>(cpu::NumCores));
|
||||
return this->pinned_threads[core_id];
|
||||
}
|
||||
|
||||
void CopySvcPermissionsTo(KThread::StackParameters &sp) {
|
||||
this->capabilities.CopySvcPermissionsTo(sp);
|
||||
}
|
||||
|
||||
constexpr KResourceLimit *GetResourceLimit() const { return this->resource_limit; }
|
||||
|
||||
constexpr KProcessPageTable &GetPageTable() { return this->page_table; }
|
||||
constexpr const KProcessPageTable &GetPageTable() const { return this->page_table; }
|
||||
|
||||
constexpr KHandleTable &GetHandleTable() { return this->handle_table; }
|
||||
constexpr const KHandleTable &GetHandleTable() const { return this->handle_table; }
|
||||
|
||||
Result CreateThreadLocalRegion(KProcessAddress *out);
|
||||
void *GetThreadLocalRegionPointer(KProcessAddress addr);
|
||||
|
||||
void IncrementThreadCount();
|
||||
void DecrementThreadCount();
|
||||
|
||||
void RegisterThread(KThread *thread);
|
||||
void UnregisterThread(KThread *thread);
|
||||
|
||||
Result Run(s32 priority, size_t stack_size);
|
||||
|
||||
void SetPreemptionState();
|
||||
public:
|
||||
/* Overridden parent functions. */
|
||||
|
@ -161,6 +184,14 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
virtual void DoWorkerTask() override;
|
||||
private:
|
||||
void ChangeState(State new_state) {
|
||||
if (this->state != new_state) {
|
||||
this->state = new_state;
|
||||
this->is_signaled = true;
|
||||
this->NotifyAvailable();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,64 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_resource_limit.hpp>
|
||||
#include <mesosphere/kern_k_process.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KScopedResourceReservation {
|
||||
private:
|
||||
KResourceLimit *limit;
|
||||
s64 value;
|
||||
ams::svc::LimitableResource resource;
|
||||
bool succeeded;
|
||||
public:
|
||||
ALWAYS_INLINE KScopedResourceReservation(KResourceLimit *l, ams::svc::LimitableResource r, s64 v, s64 timeout) : limit(l), value(v), resource(r) {
|
||||
if (this->limit && this->value) {
|
||||
this->succeeded = this->limit->Reserve(this->resource, this->value, timeout);
|
||||
} else {
|
||||
this->succeeded = true;
|
||||
}
|
||||
}
|
||||
|
||||
ALWAYS_INLINE KScopedResourceReservation(KResourceLimit *l, ams::svc::LimitableResource r, s64 v = 1) : limit(l), value(v), resource(r) {
|
||||
if (this->limit && this->value) {
|
||||
this->succeeded = this->limit->Reserve(this->resource, this->value);
|
||||
} else {
|
||||
this->succeeded = true;
|
||||
}
|
||||
}
|
||||
|
||||
ALWAYS_INLINE KScopedResourceReservation(const KProcess *p, ams::svc::LimitableResource r, s64 v, s64 t) : KScopedResourceReservation(p->GetResourceLimit(), r, v, t) { /* ... */ }
|
||||
ALWAYS_INLINE KScopedResourceReservation(const KProcess *p, ams::svc::LimitableResource r, s64 v = 1) : KScopedResourceReservation(p->GetResourceLimit(), r, v) { /* ... */ }
|
||||
|
||||
ALWAYS_INLINE ~KScopedResourceReservation() {
|
||||
if (this->limit && this->value && this->succeeded) {
|
||||
this->limit->Release(this->resource, this->value);
|
||||
}
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void Commit() {
|
||||
this->limit = nullptr;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE bool Succeeded() const {
|
||||
return this->succeeded;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,54 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_k_auto_object.hpp>
|
||||
#include <mesosphere/kern_slab_helpers.hpp>
|
||||
#include <mesosphere/kern_k_linked_list.hpp>
|
||||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_k_auto_object.hpp>
|
||||
#include <mesosphere/kern_k_synchronization_object.hpp>
|
||||
#include <mesosphere/kern_k_thread.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KSynchronization {
|
||||
private:
|
||||
friend class KSynchronizationObject;
|
||||
public:
|
||||
constexpr KSynchronization() { /* ... */ }
|
||||
|
||||
Result Wait(s32 *out_index, KSynchronizationObject **objects, const s32 num_objects, s64 timeout);
|
||||
private:
|
||||
void OnAvailable(KSynchronizationObject *object);
|
||||
void OnAbort(KSynchronizationObject *object, Result abort_reason);
|
||||
};
|
||||
|
||||
}
|
|
@ -87,7 +87,7 @@ namespace ams::kern {
|
|||
u8 current_svc_id;
|
||||
bool is_calling_svc;
|
||||
bool is_in_exception_handler;
|
||||
bool has_exception_svc_perms;
|
||||
bool is_preemption_state_pinned;
|
||||
s32 disable_count;
|
||||
KThreadContext *context;
|
||||
};
|
||||
|
@ -198,7 +198,9 @@ namespace ams::kern {
|
|||
return InitializeThread(thread, func, arg, Null<KProcessAddress>, 0, GetCurrentCoreId(), nullptr, ThreadType_HighPriority);
|
||||
}
|
||||
|
||||
/* TODO: static Result InitializeUserThread */
|
||||
static Result InitializeUserThread(KThread *thread, KThreadFunction func, uintptr_t arg, KProcessAddress user_stack_top, s32 prio, s32 core, KProcess *owner) {
|
||||
return InitializeThread(thread, func, arg, user_stack_top, prio, core, owner, ThreadType_User);
|
||||
}
|
||||
private:
|
||||
StackParameters &GetStackParameters() {
|
||||
return *(reinterpret_cast<StackParameters *>(this->kernel_stack_top) - 1);
|
||||
|
@ -267,8 +269,8 @@ namespace ams::kern {
|
|||
public:
|
||||
constexpr u64 GetThreadId() const { return this->thread_id; }
|
||||
|
||||
constexpr KThreadContext *GetContext() { return std::addressof(this->thread_context); }
|
||||
constexpr const KThreadContext *GetContext() const { return std::addressof(this->thread_context); }
|
||||
constexpr KThreadContext &GetContext() { return this->thread_context; }
|
||||
constexpr const KThreadContext &GetContext() const { return this->thread_context; }
|
||||
constexpr const KAffinityMask &GetAffinityMask() const { return this->affinity_mask; }
|
||||
constexpr ThreadState GetState() const { return static_cast<ThreadState>(this->thread_state & ThreadState_Mask); }
|
||||
constexpr ThreadState GetRawState() const { return this->thread_state; }
|
||||
|
@ -304,6 +306,11 @@ namespace ams::kern {
|
|||
constexpr void SetLockOwner(KThread *owner) { this->lock_owner = owner; }
|
||||
constexpr KThread *GetLockOwner() const { return this->lock_owner; }
|
||||
|
||||
constexpr void SetSyncedObject(KSynchronizationObject *obj, Result wait_res) {
|
||||
this->synced_object = obj;
|
||||
this->wait_result = wait_res;
|
||||
}
|
||||
|
||||
bool HasWaiters() const { return !this->waiter_list.empty(); }
|
||||
|
||||
constexpr s64 GetLastScheduledTick() const { return this->last_scheduled_tick; }
|
||||
|
|
|
@ -34,6 +34,7 @@ namespace ams::kern {
|
|||
class KPageTableManager;
|
||||
class KMemoryBlockSlabManager;
|
||||
class KBlockInfoManager;
|
||||
class KSynchronization;
|
||||
|
||||
|
||||
|
||||
|
@ -70,6 +71,7 @@ namespace ams::kern {
|
|||
static KMemoryBlockSlabManager s_sys_memory_block_manager;
|
||||
static KBlockInfoManager s_block_info_manager;
|
||||
static KSupervisorPageTable s_supervisor_page_table;
|
||||
static KSynchronization s_synchronization;
|
||||
static KWorkerTaskManager s_worker_task_managers[KWorkerTaskManager::WorkerType_Count];
|
||||
private:
|
||||
static ALWAYS_INLINE KCoreLocalContext &GetCoreLocalContext() {
|
||||
|
@ -138,6 +140,10 @@ namespace ams::kern {
|
|||
return s_supervisor_page_table;
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE KSynchronization &GetSynchronization() {
|
||||
return s_synchronization;
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE KWorkerTaskManager &GetWorkerTaskManager(KWorkerTaskManager::WorkerType type) {
|
||||
MESOSPHERE_ASSERT(type <= KWorkerTaskManager::WorkerType_Count);
|
||||
return s_worker_task_managers[type];
|
||||
|
|
|
@ -22,6 +22,16 @@ namespace ams::kern::svc {
|
|||
|
||||
static constexpr size_t NumSupervisorCalls = 0x80;
|
||||
|
||||
#define AMS_KERN_SVC_DECLARE_ENUM_ID(ID, RETURN_TYPE, NAME, ...) \
|
||||
SvcId_##NAME = ID,
|
||||
|
||||
enum SvcId {
|
||||
AMS_SVC_FOREACH_KERN_DEFINITION(AMS_KERN_SVC_DECLARE_ENUM_ID, __invalid)
|
||||
SvcId_Count = NumSupervisorCalls,
|
||||
};
|
||||
|
||||
#undef AMS_KERN_SVC_DECLARE_ENUM_ID
|
||||
|
||||
#define AMS_KERN_SVC_DECLARE_PROTOTYPE_64(ID, RETURN_TYPE, NAME, ...) \
|
||||
NOINLINE RETURN_TYPE NAME##64(__VA_ARGS__);
|
||||
#define AMS_KERN_SVC_DECLARE_PROTOTYPE_64_FROM_32(ID, RETURN_TYPE, NAME, ...) \
|
||||
|
|
|
@ -184,8 +184,20 @@ namespace ams::kern::arch::arm64 {
|
|||
}
|
||||
}
|
||||
|
||||
Result KPageTable::Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, const KPageGroup *page_group, const KPageProperties properties, OperationType operation, bool reuse_ll) {
|
||||
MESOSPHERE_TODO_IMPLEMENT();
|
||||
Result KPageTable::Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, const KPageGroup &page_group, const KPageProperties properties, OperationType operation, bool reuse_ll) {
|
||||
/* Check validity of parameters. */
|
||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(virt_addr), PageSize));
|
||||
MESOSPHERE_ASSERT(num_pages > 0);
|
||||
MESOSPHERE_ASSERT(num_pages == page_group.GetNumPages());
|
||||
|
||||
/* Map the page group. */
|
||||
auto entry_template = this->GetEntryTemplate(properties);
|
||||
switch (operation) {
|
||||
case OperationType_MapGroup:
|
||||
return this->MapGroup(virt_addr, page_group, num_pages, entry_template, page_list, reuse_ll);
|
||||
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||
}
|
||||
}
|
||||
|
||||
Result KPageTable::Map(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll) {
|
||||
|
@ -510,6 +522,7 @@ namespace ams::kern::arch::arm64 {
|
|||
}
|
||||
}
|
||||
|
||||
/* We successfully mapped, so cancel our guard. */
|
||||
map_guard.Cancel();
|
||||
}
|
||||
|
||||
|
@ -527,6 +540,50 @@ namespace ams::kern::arch::arm64 {
|
|||
return ResultSuccess();
|
||||
}
|
||||
|
||||
Result KPageTable::MapGroup(KProcessAddress virt_addr, const KPageGroup &pg, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll) {
|
||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
/* We want to maintain a new reference to every page in the group. */
|
||||
KScopedPageGroup spg(pg);
|
||||
|
||||
/* Cache initial address for use on cleanup. */
|
||||
const KProcessAddress orig_virt_addr = virt_addr;
|
||||
|
||||
size_t mapped_pages = 0;
|
||||
|
||||
/* Map the pages, using a guard to ensure we don't leak. */
|
||||
{
|
||||
auto map_guard = SCOPE_GUARD { MESOSPHERE_R_ABORT_UNLESS(this->Unmap(orig_virt_addr, num_pages, page_list, true, true)); };
|
||||
|
||||
if (num_pages < ContiguousPageSize / PageSize) {
|
||||
for (const auto &block : pg) {
|
||||
const KPhysicalAddress block_phys_addr = GetLinearPhysicalAddress(block.GetAddress());
|
||||
const size_t cur_pages = block.GetNumPages();
|
||||
R_TRY(this->Map(virt_addr, block_phys_addr, cur_pages, entry_template, L3BlockSize, page_list, reuse_ll));
|
||||
|
||||
virt_addr += cur_pages * PageSize;
|
||||
mapped_pages += cur_pages;
|
||||
}
|
||||
} else {
|
||||
MESOSPHERE_TODO("Large page group map");
|
||||
}
|
||||
|
||||
/* We successfully mapped, so cancel our guard. */
|
||||
map_guard.Cancel();
|
||||
}
|
||||
MESOSPHERE_ASSERT(mapped_pages == num_pages);
|
||||
|
||||
/* Perform what coalescing we can. */
|
||||
this->MergePages(orig_virt_addr, page_list);
|
||||
if (num_pages > 1) {
|
||||
this->MergePages(orig_virt_addr + (num_pages - 1) * PageSize, page_list);
|
||||
}
|
||||
|
||||
/* We succeeded! We want to persist the reference to the pages. */
|
||||
spg.CancelClose();
|
||||
return ResultSuccess();
|
||||
}
|
||||
|
||||
bool KPageTable::MergePages(KProcessAddress virt_addr, PageLinkedList *page_list) {
|
||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
|
|
|
@ -137,6 +137,12 @@ namespace ams::kern::arch::arm64 {
|
|||
return ResultSuccess();
|
||||
}
|
||||
|
||||
void KThreadContext::SetArguments(uintptr_t arg0, uintptr_t arg1) {
|
||||
u64 *stack = reinterpret_cast<u64 *>(this->sp);
|
||||
stack[0] = arg0;
|
||||
stack[1] = arg1;
|
||||
}
|
||||
|
||||
void KThreadContext::FpuContextSwitchHandler(KThread *thread) {
|
||||
MESOSPHERE_ASSERT(!KInterruptManager::AreInterruptsEnabled());
|
||||
MESOSPHERE_ASSERT(!IsFpuEnabled());
|
||||
|
@ -148,9 +154,9 @@ namespace ams::kern::arch::arm64 {
|
|||
KProcess *process = thread->GetOwnerProcess();
|
||||
MESOSPHERE_ASSERT(process != nullptr);
|
||||
if (process->Is64Bit()) {
|
||||
RestoreFpuRegisters64(*thread->GetContext());
|
||||
RestoreFpuRegisters64(thread->GetContext());
|
||||
} else {
|
||||
RestoreFpuRegisters32(*thread->GetContext());
|
||||
RestoreFpuRegisters32(thread->GetContext());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -171,7 +171,7 @@ namespace ams::kern {
|
|||
|
||||
/* Run the processes. */
|
||||
for (size_t i = 0; i < g_initial_process_binary_header.num_processes; i++) {
|
||||
MESOSPHERE_TODO("infos[i].process->Run(infos[i].priority, infos[i].stack_size);");
|
||||
MESOSPHERE_R_ABORT_UNLESS(infos[i].process->Run(infos[i].priority, infos[i].stack_size));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -509,7 +509,7 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
/* Map the pages. */
|
||||
return this->Operate(page_list, address, num_pages, std::addressof(pg), properties, OperationType_MapGroup, false);
|
||||
return this->Operate(page_list, address, num_pages, pg, properties, OperationType_MapGroup, false);
|
||||
}
|
||||
|
||||
Result KPageTableBase::MapPageGroupImpl(PageLinkedList *page_list, KProcessAddress address, const KPageGroup &pg, const KPageProperties properties, bool reuse_ll) {
|
||||
|
@ -749,6 +749,22 @@ namespace ams::kern {
|
|||
return ResultSuccess();
|
||||
}
|
||||
|
||||
Result KPageTableBase::SetHeapSize(KProcessAddress *out, size_t size) {
|
||||
MESOSPHERE_TODO_IMPLEMENT();
|
||||
}
|
||||
|
||||
Result KPageTableBase::SetMaxHeapSize(size_t size) {
|
||||
/* Lock the table. */
|
||||
KScopedLightLock lk(this->general_lock);
|
||||
|
||||
/* Only process page tables are allowed to set heap size. */
|
||||
MESOSPHERE_ASSERT(!this->IsKernel());
|
||||
|
||||
this->max_heap_size = size;
|
||||
|
||||
return ResultSuccess();
|
||||
}
|
||||
|
||||
Result KPageTableBase::MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
|
||||
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), PageSize));
|
||||
MESOSPHERE_ASSERT(util::IsAligned(size, PageSize));
|
||||
|
|
|
@ -222,6 +222,122 @@ namespace ams::kern {
|
|||
return static_cast<u8 *>(tlp->GetPointer()) + (GetInteger(addr) & (PageSize - 1));
|
||||
}
|
||||
|
||||
void KProcess::IncrementThreadCount() {
|
||||
MESOSPHERE_ASSERT(this->num_threads >= 0);
|
||||
++this->num_created_threads;
|
||||
|
||||
if (const auto count = ++this->num_threads; count > this->peak_num_threads) {
|
||||
this->peak_num_threads = count;
|
||||
}
|
||||
}
|
||||
|
||||
void KProcess::DecrementThreadCount() {
|
||||
MESOSPHERE_ASSERT(this->num_threads > 0);
|
||||
|
||||
if (const auto count = --this->num_threads; count == 0) {
|
||||
MESOSPHERE_TODO("this->Terminate();");
|
||||
}
|
||||
}
|
||||
|
||||
void KProcess::RegisterThread(KThread *thread) {
|
||||
KScopedLightLock lk(this->list_lock);
|
||||
|
||||
this->thread_list.push_back(*thread);
|
||||
}
|
||||
|
||||
void KProcess::UnregisterThread(KThread *thread) {
|
||||
KScopedLightLock lk(this->list_lock);
|
||||
|
||||
this->thread_list.erase(this->thread_list.iterator_to(*thread));
|
||||
}
|
||||
|
||||
Result KProcess::Run(s32 priority, size_t stack_size) {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
/* Lock ourselves, to prevent concurrent access. */
|
||||
KScopedLightLock lk(this->lock);
|
||||
|
||||
/* Validate that we're in a state where we can initialize. */
|
||||
const auto state = this->state;
|
||||
R_UNLESS(state == State_Created || state == State_CreatedAttached, svc::ResultInvalidState());
|
||||
|
||||
/* Place a tentative reservation of a thread for this process. */
|
||||
KScopedResourceReservation thread_reservation(this, ams::svc::LimitableResource_ThreadCountMax);
|
||||
R_UNLESS(thread_reservation.Succeeded(), svc::ResultLimitReached());
|
||||
|
||||
/* Ensure that we haven't already allocated stack. */
|
||||
MESOSPHERE_ABORT_UNLESS(this->main_thread_stack_size == 0);
|
||||
|
||||
/* Ensure that we're allocating a valid stack. */
|
||||
stack_size = util::AlignUp(stack_size, PageSize);
|
||||
R_UNLESS(stack_size + this->code_size <= this->max_process_memory, svc::ResultOutOfMemory());
|
||||
R_UNLESS(stack_size + this->code_size >= this->code_size, svc::ResultOutOfMemory());
|
||||
|
||||
/* Place a tentative reservation of memory for our new stack. */
|
||||
KScopedResourceReservation mem_reservation(this, ams::svc::LimitableResource_PhysicalMemoryMax);
|
||||
R_UNLESS(mem_reservation.Succeeded(), svc::ResultLimitReached());
|
||||
|
||||
/* Allocate and map our stack. */
|
||||
KProcessAddress stack_top = Null<KProcessAddress>;
|
||||
if (stack_size) {
|
||||
KProcessAddress stack_bottom;
|
||||
R_TRY(this->page_table.MapPages(std::addressof(stack_bottom), stack_size / PageSize, KMemoryState_Stack, KMemoryPermission_UserReadWrite));
|
||||
|
||||
stack_top = stack_bottom + stack_size;
|
||||
this->main_thread_stack_size = stack_size;
|
||||
}
|
||||
|
||||
/* Ensure our stack is safe to clean up on exit. */
|
||||
auto stack_guard = SCOPE_GUARD {
|
||||
if (this->main_thread_stack_size) {
|
||||
MESOSPHERE_R_ABORT_UNLESS(this->page_table.UnmapPages(stack_top - this->main_thread_stack_size, this->main_thread_stack_size / PageSize, KMemoryState_Stack));
|
||||
this->main_thread_stack_size = 0;
|
||||
}
|
||||
};
|
||||
|
||||
/* Set our maximum heap size. */
|
||||
R_TRY(this->page_table.SetMaxHeapSize(this->max_process_memory - (this->main_thread_stack_size + this->code_size)));
|
||||
|
||||
/* Initialize our handle table. */
|
||||
R_TRY(this->handle_table.Initialize(this->capabilities.GetHandleTableSize()));
|
||||
auto ht_guard = SCOPE_GUARD { this->handle_table.Finalize(); };
|
||||
|
||||
/* Create a new thread for the process. */
|
||||
KThread *main_thread = KThread::Create();
|
||||
R_UNLESS(main_thread != nullptr, svc::ResultOutOfResource());
|
||||
auto thread_guard = SCOPE_GUARD { main_thread->Close(); };
|
||||
|
||||
/* Initialize the thread. */
|
||||
R_TRY(KThread::InitializeUserThread(main_thread, reinterpret_cast<KThreadFunction>(GetVoidPointer(this->GetEntryPoint())), 0, stack_top, priority, this->ideal_core_id, this));
|
||||
|
||||
/* Register the thread, and commit our reservation. */
|
||||
KThread::Register(main_thread);
|
||||
thread_reservation.Commit();
|
||||
|
||||
/* Add the thread to our handle table. */
|
||||
ams::svc::Handle thread_handle;
|
||||
R_TRY(this->handle_table.Add(std::addressof(thread_handle), main_thread));
|
||||
|
||||
/* Set the thread arguments. */
|
||||
main_thread->GetContext().SetArguments(0, thread_handle);
|
||||
|
||||
/* Update our state. */
|
||||
this->ChangeState((state == State_Created) ? State_Running : State_RunningAttached);
|
||||
auto state_guard = SCOPE_GUARD { this->ChangeState(state); };
|
||||
|
||||
/* Run our thread. */
|
||||
R_TRY(main_thread->Run());
|
||||
|
||||
/* We succeeded! Cancel our guards. */
|
||||
state_guard.Cancel();
|
||||
thread_guard.Cancel();
|
||||
ht_guard.Cancel();
|
||||
stack_guard.Cancel();
|
||||
mem_reservation.Commit();
|
||||
|
||||
return ResultSuccess();
|
||||
}
|
||||
|
||||
void KProcess::SetPreemptionState() {
|
||||
MESOSPHERE_TODO_IMPLEMENT();
|
||||
}
|
||||
|
|
59
libraries/libmesosphere/source/kern_k_synchronization.cpp
Normal file
59
libraries/libmesosphere/source/kern_k_synchronization.cpp
Normal file
|
@ -0,0 +1,59 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#include <mesosphere.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
Result KSynchronization::Wait(s32 *out_index, KSynchronizationObject **objects, const s32 num_objects, s64 timeout) {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
MESOSPHERE_TODO_IMPLEMENT();
|
||||
}
|
||||
|
||||
void KSynchronization::OnAvailable(KSynchronizationObject *object) {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
KScopedSchedulerLock sl;
|
||||
|
||||
/* If we're not signaled, we've nothing to notify. */
|
||||
if (!object->IsSignaled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* Iterate over each thread. */
|
||||
for (auto &thread : *object) {
|
||||
if (thread.GetState() == KThread::ThreadState_Waiting) {
|
||||
thread.SetSyncedObject(object, ResultSuccess());
|
||||
thread.SetState(KThread::ThreadState_Runnable);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void KSynchronization::OnAbort(KSynchronizationObject *object, Result abort_reason) {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
KScopedSchedulerLock sl;
|
||||
|
||||
/* Iterate over each thread. */
|
||||
for (auto &thread : *object) {
|
||||
if (thread.GetState() == KThread::ThreadState_Waiting) {
|
||||
thread.SetSyncedObject(object, abort_reason);
|
||||
thread.SetState(KThread::ThreadState_Runnable);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -17,16 +17,16 @@
|
|||
|
||||
namespace ams::kern {
|
||||
|
||||
void NotifyAvailable() {
|
||||
void KSynchronizationObject::NotifyAvailable() {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
MESOSPHERE_TODO_IMPLEMENT();
|
||||
Kernel::GetSynchronization().OnAvailable(this);
|
||||
}
|
||||
|
||||
void NotifyAbort(Result abort_reason) {
|
||||
void KSynchronizationObject::NotifyAbort(Result abort_reason) {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
MESOSPHERE_TODO_IMPLEMENT();
|
||||
Kernel::GetSynchronization().OnAbort(this, abort_reason);
|
||||
}
|
||||
|
||||
void KSynchronizationObject::Finalize() {
|
||||
|
|
|
@ -154,8 +154,8 @@ namespace ams::kern {
|
|||
|
||||
/* Setup the TLS, if needed. */
|
||||
if (type == ThreadType_User) {
|
||||
MESOSPHERE_TODO("R_TRY(owner->CreateThreadLocalRegion(&this->tls_address));");
|
||||
MESOSPHERE_TODO("this->tls_heap_address = owner->GetThreadLocalRegionAddress(this->tls_address);");
|
||||
R_TRY(owner->CreateThreadLocalRegion(std::addressof(this->tls_address)));
|
||||
this->tls_heap_address = owner->GetThreadLocalRegionPointer(this->tls_address);
|
||||
std::memset(this->tls_heap_address, 0, ams::svc::ThreadLocalRegionSize);
|
||||
}
|
||||
|
||||
|
@ -163,7 +163,7 @@ namespace ams::kern {
|
|||
if (owner != nullptr) {
|
||||
this->parent = owner;
|
||||
this->parent->Open();
|
||||
MESOSPHERE_TODO("this->parent->IncrementThreadCount();");
|
||||
this->parent->IncrementThreadCount();
|
||||
}
|
||||
|
||||
/* Initialize thread context. */
|
||||
|
@ -176,7 +176,7 @@ namespace ams::kern {
|
|||
/* Setup the stack parameters. */
|
||||
StackParameters &sp = this->GetStackParameters();
|
||||
if (this->parent != nullptr) {
|
||||
MESOSPHERE_TODO("this->parent->CopySvcPermissionTo(sp.svc_permission);");
|
||||
this->parent->CopySvcPermissionsTo(sp);
|
||||
}
|
||||
sp.context = std::addressof(this->thread_context);
|
||||
sp.disable_count = 1;
|
||||
|
@ -190,8 +190,10 @@ namespace ams::kern {
|
|||
|
||||
/* Register ourselves with our parent process. */
|
||||
if (this->parent != nullptr) {
|
||||
MESOSPHERE_TODO("this->parent->RegisterThread(this);");
|
||||
MESOSPHERE_TODO("if (this->parent->IsSuspended()) { this->RequestSuspend(SuspendType_Process);");
|
||||
this->parent->RegisterThread(this);
|
||||
if (this->parent->IsSuspended()) {
|
||||
this->RequestSuspend(SuspendType_Process);
|
||||
}
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
|
@ -559,7 +561,7 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
KThreadContext *KThread::GetContextForSchedulerLoop() {
|
||||
return std::addressof(this->thread_context);
|
||||
return std::addressof(this->GetContext());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ namespace ams::kern {
|
|||
KMemoryBlockSlabManager Kernel::s_sys_memory_block_manager;
|
||||
KBlockInfoManager Kernel::s_block_info_manager;
|
||||
KSupervisorPageTable Kernel::s_supervisor_page_table;
|
||||
KSynchronization Kernel::s_synchronization;
|
||||
KWorkerTaskManager Kernel::s_worker_task_managers[KWorkerTaskManager::WorkerType_Count];
|
||||
|
||||
namespace {
|
||||
|
|
Loading…
Reference in a new issue