mirror of
https://github.com/Atmosphere-NX/Atmosphere
synced 2024-12-23 04:41:12 +00:00
kern: implement 64-virtual-core interface
This commit is contained in:
parent
72671d39ab
commit
b421e3eadb
8 changed files with 224 additions and 96 deletions
|
@ -0,0 +1,33 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms and conditions of the GNU General Public License,
|
||||||
|
* version 2, as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||||
|
* more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
#pragma once
|
||||||
|
#include <mesosphere/kern_common.hpp>
|
||||||
|
|
||||||
|
namespace ams::kern::board::nintendo::nx::impl::cpu {
|
||||||
|
|
||||||
|
/* Virtual to Physical core map. */
|
||||||
|
constexpr inline const s32 VirtualToPhysicalCoreMap[BITSIZEOF(u64)] = {
|
||||||
|
0, 1, 2, 3, 0, 0, 0, 0,
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 3,
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
|
@ -152,7 +152,8 @@ namespace ams::kern {
|
||||||
|
|
||||||
ConditionVariableThreadTree *condvar_tree{};
|
ConditionVariableThreadTree *condvar_tree{};
|
||||||
uintptr_t condvar_key{};
|
uintptr_t condvar_key{};
|
||||||
KAffinityMask affinity_mask{};
|
u64 virtual_affinity_mask{};
|
||||||
|
KAffinityMask physical_affinity_mask{};
|
||||||
u64 thread_id{};
|
u64 thread_id{};
|
||||||
std::atomic<s64> cpu_time{};
|
std::atomic<s64> cpu_time{};
|
||||||
KSynchronizationObject *synced_object{};
|
KSynchronizationObject *synced_object{};
|
||||||
|
@ -181,12 +182,13 @@ namespace ams::kern {
|
||||||
Result wait_result;
|
Result wait_result;
|
||||||
Result debug_exception_result;
|
Result debug_exception_result;
|
||||||
s32 base_priority{};
|
s32 base_priority{};
|
||||||
s32 ideal_core_id{};
|
s32 physical_ideal_core_id{};
|
||||||
|
s32 virtual_ideal_core_id{};
|
||||||
s32 num_kernel_waiters{};
|
s32 num_kernel_waiters{};
|
||||||
s32 current_core_id{};
|
s32 current_core_id{};
|
||||||
s32 core_id{};
|
s32 core_id{};
|
||||||
KAffinityMask original_affinity_mask{};
|
KAffinityMask original_physical_affinity_mask{};
|
||||||
s32 original_ideal_core_id{};
|
s32 original_physical_ideal_core_id{};
|
||||||
s32 num_core_migration_disables{};
|
s32 num_core_migration_disables{};
|
||||||
ThreadState thread_state{};
|
ThreadState thread_state{};
|
||||||
std::atomic<bool> termination_requested{};
|
std::atomic<bool> termination_requested{};
|
||||||
|
@ -202,21 +204,21 @@ namespace ams::kern {
|
||||||
|
|
||||||
virtual ~KThread() { /* ... */ }
|
virtual ~KThread() { /* ... */ }
|
||||||
|
|
||||||
Result Initialize(KThreadFunction func, uintptr_t arg, void *kern_stack_top, KProcessAddress user_stack_top, s32 prio, s32 core, KProcess *owner, ThreadType type);
|
Result Initialize(KThreadFunction func, uintptr_t arg, void *kern_stack_top, KProcessAddress user_stack_top, s32 prio, s32 virt_core, KProcess *owner, ThreadType type);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static Result InitializeThread(KThread *thread, KThreadFunction func, uintptr_t arg, KProcessAddress user_stack_top, s32 prio, s32 core, KProcess *owner, ThreadType type);
|
static Result InitializeThread(KThread *thread, KThreadFunction func, uintptr_t arg, KProcessAddress user_stack_top, s32 prio, s32 virt_core, KProcess *owner, ThreadType type);
|
||||||
public:
|
public:
|
||||||
static Result InitializeKernelThread(KThread *thread, KThreadFunction func, uintptr_t arg, s32 prio, s32 core) {
|
static Result InitializeKernelThread(KThread *thread, KThreadFunction func, uintptr_t arg, s32 prio, s32 virt_core) {
|
||||||
return InitializeThread(thread, func, arg, Null<KProcessAddress>, prio, core, nullptr, ThreadType_Kernel);
|
return InitializeThread(thread, func, arg, Null<KProcessAddress>, prio, virt_core, nullptr, ThreadType_Kernel);
|
||||||
}
|
}
|
||||||
|
|
||||||
static Result InitializeHighPriorityThread(KThread *thread, KThreadFunction func, uintptr_t arg) {
|
static Result InitializeHighPriorityThread(KThread *thread, KThreadFunction func, uintptr_t arg) {
|
||||||
return InitializeThread(thread, func, arg, Null<KProcessAddress>, 0, GetCurrentCoreId(), nullptr, ThreadType_HighPriority);
|
return InitializeThread(thread, func, arg, Null<KProcessAddress>, 0, GetCurrentCoreId(), nullptr, ThreadType_HighPriority);
|
||||||
}
|
}
|
||||||
|
|
||||||
static Result InitializeUserThread(KThread *thread, KThreadFunction func, uintptr_t arg, KProcessAddress user_stack_top, s32 prio, s32 core, KProcess *owner) {
|
static Result InitializeUserThread(KThread *thread, KThreadFunction func, uintptr_t arg, KProcessAddress user_stack_top, s32 prio, s32 virt_core, KProcess *owner) {
|
||||||
return InitializeThread(thread, func, arg, user_stack_top, prio, core, owner, ThreadType_User);
|
return InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner, ThreadType_User);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ResumeThreadsSuspendedForInit();
|
static void ResumeThreadsSuspendedForInit();
|
||||||
|
@ -323,10 +325,14 @@ namespace ams::kern {
|
||||||
constexpr KThreadContext &GetContext() { return this->thread_context; }
|
constexpr KThreadContext &GetContext() { return this->thread_context; }
|
||||||
constexpr const KThreadContext &GetContext() const { return this->thread_context; }
|
constexpr const KThreadContext &GetContext() const { return this->thread_context; }
|
||||||
|
|
||||||
constexpr const KAffinityMask &GetAffinityMask() const { return this->affinity_mask; }
|
constexpr const u64 GetVirtualAffinityMask() const { return this->virtual_affinity_mask; }
|
||||||
|
constexpr const KAffinityMask &GetAffinityMask() const { return this->physical_affinity_mask; }
|
||||||
|
|
||||||
Result GetCoreMask(int32_t *out_ideal_core, u64 *out_affinity_mask);
|
Result GetCoreMask(int32_t *out_ideal_core, u64 *out_affinity_mask);
|
||||||
Result SetCoreMask(int32_t ideal_core, u64 affinity_mask);
|
Result SetCoreMask(int32_t ideal_core, u64 affinity_mask);
|
||||||
|
|
||||||
|
Result GetPhysicalCoreMask(int32_t *out_ideal_core, u64 *out_affinity_mask);
|
||||||
|
|
||||||
constexpr ThreadState GetState() const { return static_cast<ThreadState>(this->thread_state & ThreadState_Mask); }
|
constexpr ThreadState GetState() const { return static_cast<ThreadState>(this->thread_state & ThreadState_Mask); }
|
||||||
constexpr ThreadState GetRawState() const { return this->thread_state; }
|
constexpr ThreadState GetRawState() const { return this->thread_state; }
|
||||||
NOINLINE void SetState(ThreadState state);
|
NOINLINE void SetState(ThreadState state);
|
||||||
|
@ -374,7 +380,9 @@ namespace ams::kern {
|
||||||
return this->condvar_tree != nullptr;
|
return this->condvar_tree != nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr s32 GetIdealCore() const { return this->ideal_core_id; }
|
constexpr s32 GetIdealVirtualCore() const { return this->virtual_ideal_core_id; }
|
||||||
|
constexpr s32 GetIdealPhysicalCore() const { return this->physical_ideal_core_id; }
|
||||||
|
|
||||||
constexpr s32 GetActiveCore() const { return this->core_id; }
|
constexpr s32 GetActiveCore() const { return this->core_id; }
|
||||||
constexpr void SetActiveCore(s32 core) { this->core_id = core; }
|
constexpr void SetActiveCore(s32 core) { this->core_id = core; }
|
||||||
|
|
||||||
|
|
|
@ -28,3 +28,24 @@
|
||||||
#else
|
#else
|
||||||
#error "Unknown architecture for CPU"
|
#error "Unknown architecture for CPU"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef ATMOSPHERE_BOARD_NINTENDO_NX
|
||||||
|
|
||||||
|
#include <mesosphere/board/nintendo/nx/kern_cpu_map.hpp>
|
||||||
|
|
||||||
|
namespace ams::kern::cpu {
|
||||||
|
|
||||||
|
using namespace ams::kern::board::nintendo::nx::impl::cpu;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
#error "Unknown board for CPU Map"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace ams::kern {
|
||||||
|
|
||||||
|
static_assert(cpu::NumCores <= static_cast<s32>(BITSIZEOF(u64)));
|
||||||
|
static_assert(util::size(cpu::VirtualToPhysicalCoreMap) == BITSIZEOF(u64));
|
||||||
|
|
||||||
|
}
|
||||||
|
|
|
@ -922,7 +922,7 @@ namespace ams::kern {
|
||||||
mem_reservation.Commit();
|
mem_reservation.Commit();
|
||||||
|
|
||||||
/* Note for debug that we're running a new process. */
|
/* Note for debug that we're running a new process. */
|
||||||
MESOSPHERE_LOG("KProcess::Run() pid=%ld name=%-12s thread=%ld affinity=0x%lx ideal_core=%d active_core=%d\n", this->process_id, this->name, main_thread->GetId(), main_thread->GetAffinityMask().GetAffinityMask(), main_thread->GetIdealCore(), main_thread->GetActiveCore());
|
MESOSPHERE_LOG("KProcess::Run() pid=%ld name=%-12s thread=%ld affinity=0x%lx ideal_core=%d active_core=%d\n", this->process_id, this->name, main_thread->GetId(), main_thread->GetVirtualAffinityMask(), main_thread->GetIdealVirtualCore(), main_thread->GetActiveCore());
|
||||||
|
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,13 +38,17 @@ namespace ams::kern {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KThread::Initialize(KThreadFunction func, uintptr_t arg, void *kern_stack_top, KProcessAddress user_stack_top, s32 prio, s32 core, KProcess *owner, ThreadType type) {
|
Result KThread::Initialize(KThreadFunction func, uintptr_t arg, void *kern_stack_top, KProcessAddress user_stack_top, s32 prio, s32 virt_core, KProcess *owner, ThreadType type) {
|
||||||
/* Assert parameters are valid. */
|
/* Assert parameters are valid. */
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
MESOSPHERE_ASSERT(kern_stack_top != nullptr);
|
MESOSPHERE_ASSERT(kern_stack_top != nullptr);
|
||||||
MESOSPHERE_ASSERT((type == ThreadType_Main) || (ams::svc::HighestThreadPriority <= prio && prio <= ams::svc::LowestThreadPriority));
|
MESOSPHERE_ASSERT((type == ThreadType_Main) || (ams::svc::HighestThreadPriority <= prio && prio <= ams::svc::LowestThreadPriority));
|
||||||
MESOSPHERE_ASSERT((owner != nullptr) || (type != ThreadType_User));
|
MESOSPHERE_ASSERT((owner != nullptr) || (type != ThreadType_User));
|
||||||
MESOSPHERE_ASSERT(0 <= core && core < static_cast<s32>(cpu::NumCores));
|
MESOSPHERE_ASSERT(0 <= virt_core && virt_core < static_cast<s32>(BITSIZEOF(u64)));
|
||||||
|
|
||||||
|
/* Convert the virtual core to a physical core. */
|
||||||
|
const s32 phys_core = cpu::VirtualToPhysicalCoreMap[virt_core];
|
||||||
|
MESOSPHERE_ASSERT(0 <= phys_core && phys_core < static_cast<s32>(cpu::NumCores));
|
||||||
|
|
||||||
/* First, clear the TLS address. */
|
/* First, clear the TLS address. */
|
||||||
this->tls_address = Null<KProcessAddress>;
|
this->tls_address = Null<KProcessAddress>;
|
||||||
|
@ -60,7 +64,7 @@ namespace ams::kern {
|
||||||
[[fallthrough]];
|
[[fallthrough]];
|
||||||
case ThreadType_HighPriority:
|
case ThreadType_HighPriority:
|
||||||
{
|
{
|
||||||
MESOSPHERE_ASSERT(core == GetCurrentCoreId());
|
MESOSPHERE_ASSERT(phys_core == GetCurrentCoreId());
|
||||||
}
|
}
|
||||||
[[fallthrough]];
|
[[fallthrough]];
|
||||||
case ThreadType_Kernel:
|
case ThreadType_Kernel:
|
||||||
|
@ -71,7 +75,7 @@ namespace ams::kern {
|
||||||
[[fallthrough]];
|
[[fallthrough]];
|
||||||
case ThreadType_User:
|
case ThreadType_User:
|
||||||
{
|
{
|
||||||
MESOSPHERE_ASSERT(((owner == nullptr) || (owner->GetCoreMask() | (1ul << core)) == owner->GetCoreMask()));
|
MESOSPHERE_ASSERT(((owner == nullptr) || (owner->GetCoreMask() | (1ul << virt_core)) == owner->GetCoreMask()));
|
||||||
MESOSPHERE_ASSERT(((owner == nullptr) || (owner->GetPriorityMask() | (1ul << prio)) == owner->GetPriorityMask()));
|
MESOSPHERE_ASSERT(((owner == nullptr) || (owner->GetPriorityMask() | (1ul << prio)) == owner->GetPriorityMask()));
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
@ -81,8 +85,10 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Set the ideal core ID and affinity mask. */
|
/* Set the ideal core ID and affinity mask. */
|
||||||
this->ideal_core_id = core;
|
this->virtual_ideal_core_id = virt_core;
|
||||||
this->affinity_mask.SetAffinity(core, true);
|
this->physical_ideal_core_id = phys_core;
|
||||||
|
this->virtual_affinity_mask = (static_cast<u64>(1) << virt_core);
|
||||||
|
this->physical_affinity_mask.SetAffinity(phys_core, true);
|
||||||
|
|
||||||
/* Set the thread state. */
|
/* Set the thread state. */
|
||||||
this->thread_state = (type == ThreadType_Main) ? ThreadState_Runnable : ThreadState_Initialized;
|
this->thread_state = (type == ThreadType_Main) ? ThreadState_Runnable : ThreadState_Initialized;
|
||||||
|
@ -103,7 +109,7 @@ namespace ams::kern {
|
||||||
this->cancellable = false;
|
this->cancellable = false;
|
||||||
|
|
||||||
/* Set core ID and wait result. */
|
/* Set core ID and wait result. */
|
||||||
this->core_id = this->ideal_core_id;
|
this->core_id = phys_core;
|
||||||
this->wait_result = svc::ResultNoSynchronizationObject();
|
this->wait_result = svc::ResultNoSynchronizationObject();
|
||||||
|
|
||||||
/* Set the stack top. */
|
/* Set the stack top. */
|
||||||
|
@ -141,7 +147,7 @@ namespace ams::kern {
|
||||||
this->num_kernel_waiters = 0;
|
this->num_kernel_waiters = 0;
|
||||||
|
|
||||||
/* Set our current core id. */
|
/* Set our current core id. */
|
||||||
this->current_core_id = core;
|
this->current_core_id = phys_core;
|
||||||
|
|
||||||
/* We haven't released our resource limit hint, and we've spent no time on the cpu. */
|
/* We haven't released our resource limit hint, and we've spent no time on the cpu. */
|
||||||
this->resource_limit_release_hint = 0;
|
this->resource_limit_release_hint = 0;
|
||||||
|
@ -390,20 +396,19 @@ namespace ams::kern {
|
||||||
++this->num_core_migration_disables;
|
++this->num_core_migration_disables;
|
||||||
|
|
||||||
/* Save our ideal state to restore when we're unpinned. */
|
/* Save our ideal state to restore when we're unpinned. */
|
||||||
this->original_ideal_core_id = this->ideal_core_id;
|
this->original_physical_ideal_core_id = this->physical_ideal_core_id;
|
||||||
this->original_affinity_mask = this->affinity_mask;
|
this->original_physical_affinity_mask = this->physical_affinity_mask;
|
||||||
|
|
||||||
/* Bind ourselves to this core. */
|
/* Bind ourselves to this core. */
|
||||||
const s32 active_core = this->GetActiveCore();
|
const s32 active_core = this->GetActiveCore();
|
||||||
const s32 current_core = GetCurrentCoreId();
|
const s32 current_core = GetCurrentCoreId();
|
||||||
|
|
||||||
this->SetActiveCore(current_core);
|
this->SetActiveCore(current_core);
|
||||||
this->ideal_core_id = current_core;
|
this->physical_ideal_core_id = current_core;
|
||||||
|
this->physical_affinity_mask.SetAffinityMask(1ul << current_core);
|
||||||
|
|
||||||
this->affinity_mask.SetAffinityMask(1ul << current_core);
|
if (active_core != current_core || this->physical_affinity_mask.GetAffinityMask() != this->original_physical_affinity_mask.GetAffinityMask()) {
|
||||||
|
KScheduler::OnThreadAffinityMaskChanged(this, this->original_physical_affinity_mask, active_core);
|
||||||
if (active_core != current_core || this->affinity_mask.GetAffinityMask() != this->original_affinity_mask.GetAffinityMask()) {
|
|
||||||
KScheduler::OnThreadAffinityMaskChanged(this, this->original_affinity_mask, active_core);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -438,19 +443,19 @@ namespace ams::kern {
|
||||||
--this->num_core_migration_disables;
|
--this->num_core_migration_disables;
|
||||||
|
|
||||||
/* Restore our original state. */
|
/* Restore our original state. */
|
||||||
const KAffinityMask old_mask = this->affinity_mask;
|
const KAffinityMask old_mask = this->physical_affinity_mask;
|
||||||
|
|
||||||
this->ideal_core_id = this->original_ideal_core_id;
|
this->physical_ideal_core_id = this->original_physical_ideal_core_id;
|
||||||
this->affinity_mask = this->original_affinity_mask;
|
this->physical_affinity_mask = this->original_physical_affinity_mask;
|
||||||
|
|
||||||
if (this->affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
|
if (this->physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
|
||||||
const s32 active_core = this->GetActiveCore();
|
const s32 active_core = this->GetActiveCore();
|
||||||
|
|
||||||
if (!this->affinity_mask.GetAffinity(active_core)) {
|
if (!this->physical_affinity_mask.GetAffinity(active_core)) {
|
||||||
if (this->ideal_core_id >= 0) {
|
if (this->physical_ideal_core_id >= 0) {
|
||||||
this->SetActiveCore(this->ideal_core_id);
|
this->SetActiveCore(this->physical_ideal_core_id);
|
||||||
} else {
|
} else {
|
||||||
this->SetActiveCore(BITSIZEOF(unsigned long long) - 1 - __builtin_clzll(this->affinity_mask.GetAffinityMask()));
|
this->SetActiveCore(BITSIZEOF(unsigned long long) - 1 - __builtin_clzll(this->physical_affinity_mask.GetAffinityMask()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
KScheduler::OnThreadAffinityMaskChanged(this, old_mask, active_core);
|
KScheduler::OnThreadAffinityMaskChanged(this, old_mask, active_core);
|
||||||
|
@ -492,16 +497,16 @@ namespace ams::kern {
|
||||||
MESOSPHERE_ASSERT(this->num_core_migration_disables >= 0);
|
MESOSPHERE_ASSERT(this->num_core_migration_disables >= 0);
|
||||||
if ((this->num_core_migration_disables++) == 0) {
|
if ((this->num_core_migration_disables++) == 0) {
|
||||||
/* Save our ideal state to restore when we can migrate again. */
|
/* Save our ideal state to restore when we can migrate again. */
|
||||||
this->original_ideal_core_id = this->ideal_core_id;
|
this->original_physical_ideal_core_id = this->physical_ideal_core_id;
|
||||||
this->original_affinity_mask = this->affinity_mask;
|
this->original_physical_affinity_mask = this->physical_affinity_mask;
|
||||||
|
|
||||||
/* Bind ourselves to this core. */
|
/* Bind ourselves to this core. */
|
||||||
const s32 active_core = this->GetActiveCore();
|
const s32 active_core = this->GetActiveCore();
|
||||||
this->ideal_core_id = active_core;
|
this->physical_ideal_core_id = active_core;
|
||||||
this->affinity_mask.SetAffinityMask(1ul << active_core);
|
this->physical_affinity_mask.SetAffinityMask(1ul << active_core);
|
||||||
|
|
||||||
if (this->affinity_mask.GetAffinityMask() != this->original_affinity_mask.GetAffinityMask()) {
|
if (this->physical_affinity_mask.GetAffinityMask() != this->original_physical_affinity_mask.GetAffinityMask()) {
|
||||||
KScheduler::OnThreadAffinityMaskChanged(this, this->original_affinity_mask, active_core);
|
KScheduler::OnThreadAffinityMaskChanged(this, this->original_physical_affinity_mask, active_core);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -513,20 +518,20 @@ namespace ams::kern {
|
||||||
KScopedSchedulerLock sl;
|
KScopedSchedulerLock sl;
|
||||||
MESOSPHERE_ASSERT(this->num_core_migration_disables > 0);
|
MESOSPHERE_ASSERT(this->num_core_migration_disables > 0);
|
||||||
if ((--this->num_core_migration_disables) == 0) {
|
if ((--this->num_core_migration_disables) == 0) {
|
||||||
const KAffinityMask old_mask = this->affinity_mask;
|
const KAffinityMask old_mask = this->physical_affinity_mask;
|
||||||
|
|
||||||
/* Restore our ideals. */
|
/* Restore our ideals. */
|
||||||
this->ideal_core_id = this->original_ideal_core_id;
|
this->physical_ideal_core_id = this->original_physical_ideal_core_id;
|
||||||
this->affinity_mask = this->original_affinity_mask;
|
this->physical_affinity_mask = this->original_physical_affinity_mask;
|
||||||
|
|
||||||
if (this->affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
|
if (this->physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
|
||||||
const s32 active_core = this->GetActiveCore();
|
const s32 active_core = this->GetActiveCore();
|
||||||
|
|
||||||
if (!this->affinity_mask.GetAffinity(active_core)) {
|
if (!this->physical_affinity_mask.GetAffinity(active_core)) {
|
||||||
if (this->ideal_core_id >= 0) {
|
if (this->physical_ideal_core_id >= 0) {
|
||||||
this->SetActiveCore(this->ideal_core_id);
|
this->SetActiveCore(this->physical_ideal_core_id);
|
||||||
} else {
|
} else {
|
||||||
this->SetActiveCore(BITSIZEOF(unsigned long long) - 1 - __builtin_clzll(this->affinity_mask.GetAffinityMask()));
|
this->SetActiveCore(BITSIZEOF(unsigned long long) - 1 - __builtin_clzll(this->physical_affinity_mask.GetAffinityMask()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
KScheduler::OnThreadAffinityMaskChanged(this, old_mask, active_core);
|
KScheduler::OnThreadAffinityMaskChanged(this, old_mask, active_core);
|
||||||
|
@ -535,6 +540,19 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KThread::GetCoreMask(int32_t *out_ideal_core, u64 *out_affinity_mask) {
|
Result KThread::GetCoreMask(int32_t *out_ideal_core, u64 *out_affinity_mask) {
|
||||||
|
MESOSPHERE_ASSERT_THIS();
|
||||||
|
{
|
||||||
|
KScopedSchedulerLock sl;
|
||||||
|
|
||||||
|
/* Get the virtual mask. */
|
||||||
|
*out_ideal_core = this->virtual_ideal_core_id;
|
||||||
|
*out_affinity_mask = this->virtual_affinity_mask;
|
||||||
|
}
|
||||||
|
|
||||||
|
return ResultSuccess();
|
||||||
|
}
|
||||||
|
|
||||||
|
Result KThread::GetPhysicalCoreMask(int32_t *out_ideal_core, u64 *out_affinity_mask) {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
{
|
{
|
||||||
KScopedSchedulerLock sl;
|
KScopedSchedulerLock sl;
|
||||||
|
@ -542,63 +560,72 @@ namespace ams::kern {
|
||||||
|
|
||||||
/* Select between core mask and original core mask. */
|
/* Select between core mask and original core mask. */
|
||||||
if (this->num_core_migration_disables == 0) {
|
if (this->num_core_migration_disables == 0) {
|
||||||
*out_ideal_core = this->ideal_core_id;
|
*out_ideal_core = this->physical_ideal_core_id;
|
||||||
*out_affinity_mask = this->affinity_mask.GetAffinityMask();
|
*out_affinity_mask = this->physical_affinity_mask.GetAffinityMask();
|
||||||
} else {
|
} else {
|
||||||
*out_ideal_core = this->original_ideal_core_id;
|
*out_ideal_core = this->original_physical_ideal_core_id;
|
||||||
*out_affinity_mask = this->original_affinity_mask.GetAffinityMask();
|
*out_affinity_mask = this->original_physical_affinity_mask.GetAffinityMask();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KThread::SetCoreMask(int32_t ideal_core, u64 affinity_mask) {
|
Result KThread::SetCoreMask(int32_t core_id, u64 v_affinity_mask) {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
MESOSPHERE_ASSERT(this->parent != nullptr);
|
MESOSPHERE_ASSERT(this->parent != nullptr);
|
||||||
MESOSPHERE_ASSERT(affinity_mask != 0);
|
MESOSPHERE_ASSERT(v_affinity_mask != 0);
|
||||||
KScopedLightLock lk(this->activity_pause_lock);
|
KScopedLightLock lk(this->activity_pause_lock);
|
||||||
|
|
||||||
/* Set the core mask. */
|
/* Set the core mask. */
|
||||||
|
u64 p_affinity_mask = 0;
|
||||||
{
|
{
|
||||||
KScopedSchedulerLock sl;
|
KScopedSchedulerLock sl;
|
||||||
MESOSPHERE_ASSERT(this->num_core_migration_disables >= 0);
|
MESOSPHERE_ASSERT(this->num_core_migration_disables >= 0);
|
||||||
|
|
||||||
/* If the core id is no-update magic, preserve the ideal core id. */
|
/* If the core id is no-update magic, preserve the ideal core id. */
|
||||||
if (ideal_core == ams::svc::IdealCoreNoUpdate) {
|
if (core_id == ams::svc::IdealCoreNoUpdate) {
|
||||||
if (this->num_core_migration_disables == 0) {
|
core_id = this->virtual_ideal_core_id;
|
||||||
ideal_core = this->ideal_core_id;
|
R_UNLESS(((1ul << core_id) & v_affinity_mask) != 0, svc::ResultInvalidCombination());
|
||||||
} else {
|
|
||||||
ideal_core = this->original_ideal_core_id;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
R_UNLESS(((1ul << ideal_core) & affinity_mask) != 0, svc::ResultInvalidCombination());
|
/* Set the virtual core/affinity mask. */
|
||||||
|
this->virtual_ideal_core_id = core_id;
|
||||||
|
this->virtual_affinity_mask = v_affinity_mask;
|
||||||
|
|
||||||
|
/* Translate the virtual core to a physical core. */
|
||||||
|
if (core_id >= 0) {
|
||||||
|
core_id = cpu::VirtualToPhysicalCoreMap[core_id];
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Translate the virtual affinity mask to a physical one. */
|
||||||
|
while (v_affinity_mask != 0) {
|
||||||
|
const u64 next = __builtin_ctzll(v_affinity_mask);
|
||||||
|
v_affinity_mask &= ~(1ul << next);
|
||||||
|
p_affinity_mask |= (1ul << cpu::VirtualToPhysicalCoreMap[next]);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If we haven't disabled migration, perform an affinity change. */
|
/* If we haven't disabled migration, perform an affinity change. */
|
||||||
if (this->num_core_migration_disables == 0) {
|
if (this->num_core_migration_disables == 0) {
|
||||||
const KAffinityMask old_mask = this->affinity_mask;
|
const KAffinityMask old_mask = this->physical_affinity_mask;
|
||||||
|
|
||||||
/* Set our new ideals. */
|
/* Set our new ideals. */
|
||||||
this->ideal_core_id = ideal_core;
|
this->physical_ideal_core_id = core_id;
|
||||||
this->affinity_mask.SetAffinityMask(affinity_mask);
|
this->physical_affinity_mask.SetAffinityMask(p_affinity_mask);
|
||||||
|
|
||||||
if (this->affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
|
if (this->physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
|
||||||
const s32 active_core = this->GetActiveCore();
|
const s32 active_core = this->GetActiveCore();
|
||||||
|
|
||||||
if (active_core >= 0) {
|
if (active_core >= 0 && !this->physical_affinity_mask.GetAffinity(active_core)) {
|
||||||
if (!this->affinity_mask.GetAffinity(active_core)) {
|
const s32 new_core = this->physical_ideal_core_id >= 0 ? this->physical_ideal_core_id : BITSIZEOF(unsigned long long) - 1 - __builtin_clzll(this->physical_affinity_mask.GetAffinityMask());
|
||||||
this->SetActiveCore(this->ideal_core_id);
|
this->SetActiveCore(new_core);
|
||||||
} else {
|
|
||||||
this->SetActiveCore(BITSIZEOF(unsigned long long) - 1 - __builtin_clzll(this->affinity_mask.GetAffinityMask()));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
KScheduler::OnThreadAffinityMaskChanged(this, old_mask, active_core);
|
KScheduler::OnThreadAffinityMaskChanged(this, old_mask, active_core);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* Otherwise, we edit the original affinity for restoration later. */
|
/* Otherwise, we edit the original affinity for restoration later. */
|
||||||
this->original_ideal_core_id = ideal_core;
|
this->original_physical_ideal_core_id = core_id;
|
||||||
this->original_affinity_mask.SetAffinityMask(affinity_mask);
|
this->original_physical_affinity_mask.SetAffinityMask(p_affinity_mask);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -627,7 +654,7 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If the thread is currently running, check whether it's no longer allowed under the new mask. */
|
/* If the thread is currently running, check whether it's no longer allowed under the new mask. */
|
||||||
if (thread_is_current && ((1ul << thread_core) & affinity_mask) == 0) {
|
if (thread_is_current && ((1ul << thread_core) & p_affinity_mask) == 0) {
|
||||||
/* If the thread is pinned, we want to wait until it's not pinned. */
|
/* If the thread is pinned, we want to wait until it's not pinned. */
|
||||||
if (this->GetStackParameters().is_pinned) {
|
if (this->GetStackParameters().is_pinned) {
|
||||||
/* Verify that the current thread isn't terminating. */
|
/* Verify that the current thread isn't terminating. */
|
||||||
|
@ -1127,7 +1154,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
/* If the thread is runnable, send a termination interrupt to other cores. */
|
/* If the thread is runnable, send a termination interrupt to other cores. */
|
||||||
if (this->GetState() == ThreadState_Runnable) {
|
if (this->GetState() == ThreadState_Runnable) {
|
||||||
if (const u64 core_mask = this->affinity_mask.GetAffinityMask() & ~(1ul << GetCurrentCoreId()); core_mask != 0) {
|
if (const u64 core_mask = this->physical_affinity_mask.GetAffinityMask() & ~(1ul << GetCurrentCoreId()); core_mask != 0) {
|
||||||
cpu::DataSynchronizationBarrier();
|
cpu::DataSynchronizationBarrier();
|
||||||
Kernel::GetInterruptManager().SendInterProcessorInterrupt(KInterruptName_ThreadTerminate, core_mask);
|
Kernel::GetInterruptManager().SendInterProcessorInterrupt(KInterruptName_ThreadTerminate, core_mask);
|
||||||
}
|
}
|
||||||
|
|
|
@ -363,7 +363,11 @@ namespace ams::kern::svc {
|
||||||
case ams::svc::DebugThreadParam_IdealCore:
|
case ams::svc::DebugThreadParam_IdealCore:
|
||||||
{
|
{
|
||||||
/* Get the ideal core. */
|
/* Get the ideal core. */
|
||||||
*out_32 = thread->GetIdealCore();
|
s32 core_id;
|
||||||
|
u64 affinity_mask;
|
||||||
|
thread->GetPhysicalCoreMask(std::addressof(core_id), std::addressof(affinity_mask));
|
||||||
|
|
||||||
|
*out_32 = core_id;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case ams::svc::DebugThreadParam_CurrentCore:
|
case ams::svc::DebugThreadParam_CurrentCore:
|
||||||
|
@ -375,7 +379,11 @@ namespace ams::kern::svc {
|
||||||
case ams::svc::DebugThreadParam_AffinityMask:
|
case ams::svc::DebugThreadParam_AffinityMask:
|
||||||
{
|
{
|
||||||
/* Get the affinity mask. */
|
/* Get the affinity mask. */
|
||||||
*out_32 = thread->GetAffinityMask().GetAffinityMask();
|
s32 core_id;
|
||||||
|
u64 affinity_mask;
|
||||||
|
thread->GetPhysicalCoreMask(std::addressof(core_id), std::addressof(affinity_mask));
|
||||||
|
|
||||||
|
*out_32 = affinity_mask;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -206,15 +206,18 @@ namespace ams::kern::svc {
|
||||||
case ams::svc::InfoType_ThreadTickCount:
|
case ams::svc::InfoType_ThreadTickCount:
|
||||||
{
|
{
|
||||||
/* Verify the requested core is valid. */
|
/* Verify the requested core is valid. */
|
||||||
const bool core_valid = (info_subtype == static_cast<u64>(-1ul)) || (info_subtype < cpu::NumCores);
|
const bool core_valid = (info_subtype == static_cast<u64>(-1ul)) || (info_subtype < util::size(cpu::VirtualToPhysicalCoreMap));
|
||||||
R_UNLESS(core_valid, svc::ResultInvalidCombination());
|
R_UNLESS(core_valid, svc::ResultInvalidCombination());
|
||||||
|
|
||||||
/* Get the thread from its handle. */
|
/* Get the thread from its handle. */
|
||||||
KScopedAutoObject thread = GetCurrentProcess().GetHandleTable().GetObject<KThread>(handle);
|
KScopedAutoObject thread = GetCurrentProcess().GetHandleTable().GetObject<KThread>(handle);
|
||||||
R_UNLESS(thread.IsNotNull(), svc::ResultInvalidHandle());
|
R_UNLESS(thread.IsNotNull(), svc::ResultInvalidHandle());
|
||||||
|
|
||||||
/* Get the tick count. */
|
/* Disable interrupts while we get the tick count. */
|
||||||
s64 tick_count;
|
s64 tick_count;
|
||||||
|
{
|
||||||
|
KScopedInterruptDisable di;
|
||||||
|
|
||||||
if (info_subtype == static_cast<u64>(-1ul)) {
|
if (info_subtype == static_cast<u64>(-1ul)) {
|
||||||
tick_count = thread->GetCpuTime();
|
tick_count = thread->GetCpuTime();
|
||||||
if (GetCurrentThreadPointer() == thread.GetPointerUnsafe()) {
|
if (GetCurrentThreadPointer() == thread.GetPointerUnsafe()) {
|
||||||
|
@ -223,13 +226,17 @@ namespace ams::kern::svc {
|
||||||
tick_count += (cur_tick - prev_switch);
|
tick_count += (cur_tick - prev_switch);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
tick_count = thread->GetCpuTime(static_cast<s32>(info_subtype));
|
const s32 phys_core = cpu::VirtualToPhysicalCoreMap[info_subtype];
|
||||||
if (GetCurrentThreadPointer() == thread.GetPointerUnsafe() && static_cast<s32>(info_subtype) == GetCurrentCoreId()) {
|
MESOSPHERE_ABORT_UNLESS(phys_core < static_cast<s32>(cpu::NumCores));
|
||||||
|
|
||||||
|
tick_count = thread->GetCpuTime(phys_core);
|
||||||
|
if (GetCurrentThreadPointer() == thread.GetPointerUnsafe() && phys_core == GetCurrentCoreId()) {
|
||||||
const s64 cur_tick = KHardwareTimer::GetTick();
|
const s64 cur_tick = KHardwareTimer::GetTick();
|
||||||
const s64 prev_switch = Kernel::GetScheduler().GetLastContextSwitchTime();
|
const s64 prev_switch = Kernel::GetScheduler().GetLastContextSwitchTime();
|
||||||
tick_count += (cur_tick - prev_switch);
|
tick_count += (cur_tick - prev_switch);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* Set the output. */
|
/* Set the output. */
|
||||||
*out = tick_count;
|
*out = tick_count;
|
||||||
|
|
|
@ -22,7 +22,31 @@ namespace ams::kern::svc {
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
int32_t GetCurrentProcessorNumber() {
|
int32_t GetCurrentProcessorNumber() {
|
||||||
return GetCurrentCoreId();
|
/* Setup variables to track affinity information. */
|
||||||
|
s32 current_phys_core;
|
||||||
|
u64 v_affinity_mask = 0;
|
||||||
|
|
||||||
|
/* Forever try to get the affinity. */
|
||||||
|
while (true) {
|
||||||
|
/* Update affinity information if we've run out. */
|
||||||
|
while (v_affinity_mask == 0) {
|
||||||
|
current_phys_core = GetCurrentCoreId();
|
||||||
|
v_affinity_mask = GetCurrentThread().GetVirtualAffinityMask();
|
||||||
|
if ((v_affinity_mask & (1ul << current_phys_core)) != 0) {
|
||||||
|
return current_phys_core;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Check the next virtual bit. */
|
||||||
|
do {
|
||||||
|
const s32 next_virt_core = static_cast<s32>(__builtin_ctzll(v_affinity_mask));
|
||||||
|
if (current_phys_core == cpu::VirtualToPhysicalCoreMap[next_virt_core]) {
|
||||||
|
return next_virt_core;
|
||||||
|
}
|
||||||
|
|
||||||
|
v_affinity_mask &= ~(1ul << next_virt_core);
|
||||||
|
} while (v_affinity_mask != 0);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue