mirror of
https://github.com/Atmosphere-NX/Atmosphere
synced 2024-11-09 22:56:35 +00:00
kern: implement more of KInterruptManager
This commit is contained in:
parent
62de3322ff
commit
5f857cb079
17 changed files with 579 additions and 39 deletions
|
@ -26,8 +26,6 @@ namespace ams::kern::arm64 {
|
|||
}
|
||||
|
||||
class KHardwareTimer : public KHardwareTimerBase {
|
||||
public:
|
||||
static constexpr s32 InterruptId = 30; /* Nintendo uses the non-secure timer interrupt. */
|
||||
public:
|
||||
constexpr KHardwareTimer() : KHardwareTimerBase() { /* ... */ }
|
||||
public:
|
||||
|
|
|
@ -43,9 +43,15 @@ namespace ams::kern::arm64 {
|
|||
u32 icpendr[32];
|
||||
u32 isactiver[32];
|
||||
u32 icactiver[32];
|
||||
u8 ipriorityr[1020];
|
||||
union {
|
||||
u8 bytes[1020];
|
||||
u32 words[255];
|
||||
} ipriorityr;
|
||||
u32 _0x7fc;
|
||||
u8 itargetsr[1020];
|
||||
union {
|
||||
u8 bytes[1020];
|
||||
u32 words[255];
|
||||
} itargetsr;
|
||||
u32 _0xbfc;
|
||||
u32 icfgr[64];
|
||||
u32 igrpmodr[32];
|
||||
|
@ -56,11 +62,20 @@ namespace ams::kern::arm64 {
|
|||
u32 cpendsgir[4];
|
||||
u32 spendsgir[4];
|
||||
u32 reserved_0xf30[52];
|
||||
|
||||
static constexpr size_t SgirCpuTargetListShift = 16;
|
||||
|
||||
enum SgirTargetListFilter : u32 {
|
||||
SgirTargetListFilter_CpuTargetList = (0 << 24),
|
||||
SgirTargetListFilter_Others = (1 << 24),
|
||||
SgirTargetListFilter_Self = (2 << 24),
|
||||
SgirTargetListFilter_Reserved = (3 << 24),
|
||||
};
|
||||
};
|
||||
static_assert(std::is_pod<GicDistributor>::value);
|
||||
static_assert(sizeof(GicDistributor) == 0x1000);
|
||||
|
||||
struct GicController {
|
||||
struct GicCpuInterface {
|
||||
u32 ctlr;
|
||||
u32 pmr;
|
||||
u32 bpr;
|
||||
|
@ -83,16 +98,18 @@ namespace ams::kern::arm64 {
|
|||
u32 dir;
|
||||
u32 _0x1004[1023];
|
||||
};
|
||||
static_assert(std::is_pod<GicController>::value);
|
||||
static_assert(sizeof(GicController) == 0x2000);
|
||||
static_assert(std::is_pod<GicCpuInterface>::value);
|
||||
static_assert(sizeof(GicCpuInterface) == 0x2000);
|
||||
|
||||
struct KInterruptController {
|
||||
NON_COPYABLE(KInterruptController);
|
||||
NON_MOVEABLE(KInterruptController);
|
||||
public:
|
||||
static constexpr size_t NumLocalInterrupts = 32;
|
||||
static constexpr size_t NumGlobalInterrupts = 988;
|
||||
static constexpr size_t NumInterrupts = NumLocalInterrupts + NumGlobalInterrupts;
|
||||
static constexpr s32 NumSoftwareInterrupts = 16;
|
||||
static constexpr s32 NumLocalInterrupts = NumSoftwareInterrupts + 16;
|
||||
static constexpr s32 NumGlobalInterrupts = 988;
|
||||
static constexpr s32 NumInterrupts = NumLocalInterrupts + NumGlobalInterrupts;
|
||||
static constexpr s32 NumPriorityLevels = 4;
|
||||
public:
|
||||
struct LocalState {
|
||||
u32 local_isenabler[NumLocalInterrupts / 32];
|
||||
|
@ -107,16 +124,135 @@ namespace ams::kern::arm64 {
|
|||
u32 global_targetsr[NumGlobalInterrupts / 4];
|
||||
u32 global_icfgr[NumGlobalInterrupts / 16];
|
||||
};
|
||||
|
||||
enum PriorityLevel : u8 {
|
||||
PriorityLevel_High = 0,
|
||||
PriorityLevel_Low = NumPriorityLevels - 1,
|
||||
|
||||
PriorityLevel_Timer = 1,
|
||||
PriorityLevel_Scheduler = 2,
|
||||
};
|
||||
private:
|
||||
static inline volatile GicDistributor *s_gicd;
|
||||
static inline volatile GicController *s_gicc;
|
||||
static inline u32 s_mask[cpu::NumCores];
|
||||
private:
|
||||
volatile GicDistributor *gicd;
|
||||
volatile GicController *gicc;
|
||||
volatile GicDistributor *gicd;
|
||||
volatile GicCpuInterface *gicc;
|
||||
public:
|
||||
KInterruptController() { /* Don't initialize anything -- this will be taken care of by ::Initialize() */ }
|
||||
constexpr KInterruptController() : gicd(nullptr), gicc(nullptr) { /* ... */ }
|
||||
|
||||
/* TODO: Actually implement KInterruptController functionality. */
|
||||
void Initialize(s32 core_id);
|
||||
void Finalize(s32 core_id);
|
||||
public:
|
||||
void Enable(s32 irq) const {
|
||||
this->gicd->isenabler[irq / BITSIZEOF(u32)] = (1u << (irq % BITSIZEOF(u32)));
|
||||
}
|
||||
|
||||
void Disable(s32 irq) const {
|
||||
this->gicd->icenabler[irq / BITSIZEOF(u32)] = (1u << (irq % BITSIZEOF(u32)));
|
||||
}
|
||||
|
||||
void Clear(s32 irq) const {
|
||||
this->gicd->icpendr[irq / BITSIZEOF(u32)] = (1u << (irq % BITSIZEOF(u32)));
|
||||
}
|
||||
|
||||
void SetTarget(s32 irq, s32 core_id) const {
|
||||
this->gicd->itargetsr.bytes[irq] |= GetGicMask(core_id);
|
||||
}
|
||||
|
||||
void ClearTarget(s32 irq, s32 core_id) const {
|
||||
this->gicd->itargetsr.bytes[irq] &= ~GetGicMask(core_id);
|
||||
}
|
||||
|
||||
void SetPriorityLevel(s32 irq, s32 level) const {
|
||||
MESOSPHERE_ASSERT(PriorityLevel_High <= level && level <= PriorityLevel_Low);
|
||||
this->gicd->ipriorityr.bytes[irq] = ToGicPriorityValue(level);
|
||||
}
|
||||
|
||||
s32 GetPriorityLevel(s32 irq) const {
|
||||
return FromGicPriorityValue(this->gicd->ipriorityr.bytes[irq]);
|
||||
}
|
||||
|
||||
void SetPriorityLevel(s32 level) const {
|
||||
MESOSPHERE_ASSERT(PriorityLevel_High <= level && level <= PriorityLevel_Low);
|
||||
this->gicc->pmr = ToGicPriorityValue(level);
|
||||
}
|
||||
|
||||
void SetEdge(s32 irq) const {
|
||||
u32 cfg = this->gicd->icfgr[irq / (BITSIZEOF(u32) / 2)];
|
||||
cfg &= ~(0x3 << (2 * (irq % (BITSIZEOF(u32) / 2))));
|
||||
cfg |= (0x2 << (2 * (irq % (BITSIZEOF(u32) / 2))));
|
||||
this->gicd->icfgr[irq / (BITSIZEOF(u32) / 2)] = cfg;
|
||||
}
|
||||
|
||||
void SetLevel(s32 irq) const {
|
||||
u32 cfg = this->gicd->icfgr[irq / (BITSIZEOF(u32) / 2)];
|
||||
cfg &= ~(0x3 << (2 * (irq % (BITSIZEOF(u32) / 2))));
|
||||
cfg |= (0x0 << (2 * (irq % (BITSIZEOF(u32) / 2))));
|
||||
this->gicd->icfgr[irq / (BITSIZEOF(u32) / 2)] = cfg;
|
||||
}
|
||||
|
||||
void SendInterProcessorInterrupt(s32 irq, u64 core_mask) {
|
||||
MESOSPHERE_ASSERT(IsSoftware(irq));
|
||||
this->gicd->sgir = GetCpuTargetListMask(irq, core_mask);
|
||||
}
|
||||
|
||||
void SendInterProcessorInterrupt(s32 irq) {
|
||||
MESOSPHERE_ASSERT(IsSoftware(irq));
|
||||
this->gicd->sgir = GicDistributor::SgirTargetListFilter_Others | irq;
|
||||
}
|
||||
|
||||
/* TODO: Implement more KInterruptController functionality. */
|
||||
public:
|
||||
static constexpr ALWAYS_INLINE bool IsSoftware(s32 id) {
|
||||
MESOSPHERE_ASSERT(0 <= id && id < NumInterrupts);
|
||||
return id < NumSoftwareInterrupts;
|
||||
}
|
||||
|
||||
static constexpr ALWAYS_INLINE bool IsLocal(s32 id) {
|
||||
MESOSPHERE_ASSERT(0 <= id && id < NumInterrupts);
|
||||
return id < NumLocalInterrupts;
|
||||
}
|
||||
|
||||
static constexpr ALWAYS_INLINE bool IsGlobal(s32 id) {
|
||||
MESOSPHERE_ASSERT(0 <= id && id < NumInterrupts);
|
||||
return NumLocalInterrupts <= id;
|
||||
}
|
||||
|
||||
static constexpr size_t GetGlobalInterruptIndex(s32 id) {
|
||||
MESOSPHERE_ASSERT(IsGlobal(id));
|
||||
return id - NumLocalInterrupts;
|
||||
}
|
||||
|
||||
static constexpr size_t GetLocalInterruptIndex(s32 id) {
|
||||
MESOSPHERE_ASSERT(IsLocal(id));
|
||||
return id;
|
||||
}
|
||||
private:
|
||||
static constexpr size_t PriorityShift = BITSIZEOF(u8) - __builtin_ctz(NumPriorityLevels);
|
||||
static_assert(PriorityShift < BITSIZEOF(u8));
|
||||
|
||||
static constexpr ALWAYS_INLINE u8 ToGicPriorityValue(s32 level) {
|
||||
return (level << PriorityShift) | ((1 << PriorityShift) - 1);
|
||||
}
|
||||
|
||||
static constexpr ALWAYS_INLINE s32 FromGicPriorityValue(u8 priority) {
|
||||
return (priority >> PriorityShift) & (NumPriorityLevels - 1);
|
||||
}
|
||||
|
||||
static constexpr ALWAYS_INLINE s32 GetCpuTargetListMask(s32 irq, u64 core_mask) {
|
||||
MESOSPHERE_ASSERT(IsSoftware(irq));
|
||||
MESOSPHERE_ASSERT(core_mask < (1ul << cpu::NumCores));
|
||||
return GicDistributor::SgirTargetListFilter_CpuTargetList | irq | (static_cast<u16>(core_mask) << GicDistributor::SgirCpuTargetListShift);
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE s32 GetGicMask(s32 core_id) {
|
||||
return s_mask[core_id];
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void SetGicMask(s32 core_id) const {
|
||||
s_mask[core_id] = this->gicd->itargetsr.bytes[0];
|
||||
}
|
||||
|
||||
NOINLINE void SetupInterruptLines(s32 core_id) const;
|
||||
};
|
||||
}
|
||||
|
|
|
@ -31,12 +31,20 @@ namespace ams::kern::arm64 {
|
|||
bool manually_cleared;
|
||||
bool needs_clear;
|
||||
u8 priority;
|
||||
|
||||
constexpr KCoreLocalInterruptEntry()
|
||||
: handler(nullptr), manually_cleared(false), needs_clear(false), priority(KInterruptController::PriorityLevel_Low)
|
||||
{
|
||||
/* ... */
|
||||
}
|
||||
};
|
||||
|
||||
struct KGlobalInterruptEntry {
|
||||
KInterruptHandler *handler;
|
||||
bool manually_cleared;
|
||||
bool needs_clear;
|
||||
|
||||
constexpr KGlobalInterruptEntry() : handler(nullptr), manually_cleared(false), needs_clear(false) { /* ... */ }
|
||||
};
|
||||
private:
|
||||
static inline KSpinLock s_lock;
|
||||
|
@ -48,23 +56,53 @@ namespace ams::kern::arm64 {
|
|||
KInterruptController interrupt_controller;
|
||||
KInterruptController::LocalState local_state;
|
||||
bool local_state_saved;
|
||||
private:
|
||||
static ALWAYS_INLINE KSpinLock &GetLock() { return s_lock; }
|
||||
static ALWAYS_INLINE KGlobalInterruptEntry &GetGlobalInterruptEntry(s32 irq) { return s_global_interrupts[KInterruptController::GetGlobalInterruptIndex(irq)]; }
|
||||
ALWAYS_INLINE KCoreLocalInterruptEntry &GetLocalInterruptEntry(s32 irq) { return this->core_local_interrupts[KInterruptController::GetLocalInterruptIndex(irq)]; }
|
||||
public:
|
||||
KInterruptManager() : local_state_saved(false) { /* Leave things mostly uninitalized. We'll call ::Initialize() later. */ }
|
||||
/* TODO: Actually implement KInterruptManager functionality. */
|
||||
constexpr KInterruptManager() : core_local_interrupts(), interrupt_controller(), local_state(), local_state_saved(false) { /* ... */ }
|
||||
NOINLINE void Initialize(s32 core_id);
|
||||
NOINLINE void Finalize(s32 core_id);
|
||||
|
||||
NOINLINE Result BindHandler(KInterruptHandler *handler, s32 irq, s32 core_id, s32 priority, bool manual_clear, bool level);
|
||||
NOINLINE Result UnbindHandler(s32 irq, s32 core);
|
||||
|
||||
NOINLINE Result ClearInterrupt(s32 irq);
|
||||
NOINLINE Result ClearInterrupt(s32 irq, s32 core_id);
|
||||
|
||||
ALWAYS_INLINE void SendInterProcessorInterrupt(s32 irq, u64 core_mask) {
|
||||
this->interrupt_controller.SendInterProcessorInterrupt(irq, core_mask);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void SendInterProcessorInterrupt(s32 irq) {
|
||||
this->interrupt_controller.SendInterProcessorInterrupt(irq);
|
||||
}
|
||||
|
||||
/* Implement more KInterruptManager functionality. */
|
||||
private:
|
||||
Result BindGlobal(KInterruptHandler *handler, s32 irq, s32 core_id, s32 priority, bool manual_clear, bool level);
|
||||
Result BindLocal(KInterruptHandler *handler, s32 irq, s32 priority, bool manual_clear);
|
||||
Result UnbindGlobal(s32 irq);
|
||||
Result UnbindLocal(s32 irq);
|
||||
Result ClearGlobal(s32 irq);
|
||||
Result ClearLocal(s32 irq);
|
||||
public:
|
||||
static ALWAYS_INLINE u32 DisableInterrupts() {
|
||||
u64 intr_state;
|
||||
__asm__ __volatile__("mrs %[intr_state], daif" : [intr_state]"=r"(intr_state));
|
||||
__asm__ __volatile__("msr daif, %[intr_state]" :: [intr_state]"r"(intr_state | 0x80));
|
||||
__asm__ __volatile__("mrs %[intr_state], daif\n"
|
||||
"msr daifset, #2"
|
||||
: [intr_state]"=r"(intr_state)
|
||||
:: "memory");
|
||||
return intr_state;
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE u32 EnableInterrupts() {
|
||||
u64 intr_state;
|
||||
__asm__ __volatile__("mrs %[intr_state], daif" : [intr_state]"=r"(intr_state));
|
||||
__asm__ __volatile__("msr daif, %[intr_state]" :: [intr_state]"r"(intr_state & ~0x80ul));
|
||||
__asm__ __volatile__("mrs %[intr_state], daif\n"
|
||||
"msr daifclr, #2"
|
||||
: [intr_state]"=r"(intr_state)
|
||||
:: "memory");
|
||||
return intr_state;
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,31 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
namespace ams::kern::arm64 {
|
||||
|
||||
namespace interrupt_name {
|
||||
enum KInterruptName : s32 {
|
||||
KInterruptName_Scheduler = 6,
|
||||
KInterruptName_HardwareTimerEl1 = 30,
|
||||
|
||||
#if defined(ATMOSPHERE_BOARD_NINTENDO_SWITCH)
|
||||
KInterruptName_MemoryController = 109,
|
||||
#endif
|
||||
};
|
||||
};
|
||||
|
||||
}
|
|
@ -41,4 +41,8 @@ namespace ams::kern {
|
|||
virtual void DoTask() = 0;
|
||||
};
|
||||
|
||||
static ALWAYS_INLINE KInterruptTask *GetDummyInterruptTask() {
|
||||
return reinterpret_cast<KInterruptTask *>(1);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -40,6 +40,10 @@ namespace ams::kern {
|
|||
TaskQueue task_queue;
|
||||
KThread *thread;
|
||||
public:
|
||||
constexpr KInterruptTaskManager() : task_queue(), thread(nullptr) { /* ... */ }
|
||||
|
||||
constexpr ALWAYS_INLINE KThread *GetThread() const { return this->thread; }
|
||||
|
||||
/* TODO: Actually implement KInterruptTaskManager. This is a placeholder. */
|
||||
};
|
||||
|
||||
|
|
|
@ -57,8 +57,8 @@ namespace ams::kern {
|
|||
KMemoryRegionType_VirtualDramSystemPool = 0x2B1A,
|
||||
|
||||
KMemoryRegionType_Uart = 0x1D,
|
||||
KMemoryRegionType_InterruptDistributor = 0x4D,
|
||||
KMemoryRegionType_InterruptController = 0x2D,
|
||||
KMemoryRegionType_InterruptDistributor = 0x4D | KMemoryRegionAttr_NoUserMap,
|
||||
KMemoryRegionType_InterruptCpuInterface = 0x2D | KMemoryRegionAttr_NoUserMap,
|
||||
|
||||
KMemoryRegionType_MemoryController = 0x55,
|
||||
KMemoryRegionType_MemoryController0 = 0x95,
|
||||
|
@ -425,6 +425,14 @@ namespace ams::kern {
|
|||
return GetVirtualMemoryBlockTree().FindFirstBlockByType(KMemoryRegionType_CoreLocal)->GetAddress();
|
||||
}
|
||||
|
||||
static NOINLINE KVirtualAddress GetInterruptDistributorAddress() {
|
||||
return GetPhysicalMemoryBlockTree().FindFirstDerivedBlock(KMemoryRegionType_InterruptDistributor)->GetPairAddress();
|
||||
}
|
||||
|
||||
static NOINLINE KVirtualAddress GetInterruptCpuInterfaceAddress() {
|
||||
return GetPhysicalMemoryBlockTree().FindFirstDerivedBlock(KMemoryRegionType_InterruptCpuInterface)->GetPairAddress();
|
||||
}
|
||||
|
||||
static void InitializeLinearMemoryBlockTrees(KPhysicalAddress aligned_linear_phys_start, KVirtualAddress linear_virtual_start);
|
||||
};
|
||||
|
||||
|
|
|
@ -127,12 +127,7 @@ namespace ams::kern {
|
|||
this->ScheduleImpl();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void RescheduleOtherCores(u64 cores_needing_scheduling) {
|
||||
if (const u64 core_mask = cores_needing_scheduling & ~(1ul << this->core_id); core_mask != 0) {
|
||||
cpu::DataSynchronizationBarrier();
|
||||
/* TODO: Send scheduler interrupt. */
|
||||
}
|
||||
}
|
||||
void RescheduleOtherCores(u64 cores_needing_scheduling);
|
||||
|
||||
ALWAYS_INLINE void RescheduleCurrentCore() {
|
||||
MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() == 1);
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_select_interrupt_name.hpp>
|
||||
|
||||
#if defined(ATMOSPHERE_ARCH_ARM64)
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_select_interrupt_name.hpp>
|
||||
|
||||
#if defined(ATMOSPHERE_ARCH_ARM64)
|
||||
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
|
||||
#if defined(ATMOSPHERE_ARCH_ARM64)
|
||||
|
||||
#include <mesosphere/arch/arm64/kern_k_interrupt_name.hpp>
|
||||
namespace ams::kern {
|
||||
using namespace ams::kern::arm64::interrupt_name;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#error "Unknown architecture for KInterruptName"
|
||||
|
||||
#endif
|
|
@ -32,8 +32,16 @@ namespace ams::kern::arm64 {
|
|||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
/* One global hardware timer interrupt task per core. */
|
||||
KHardwareTimerInterruptTask g_hardware_timer_interrupt_tasks[cpu::NumCores];
|
||||
impl::KHardwareTimerInterruptTask g_hardware_timer_interrupt_tasks[cpu::NumCores];
|
||||
|
||||
ALWAYS_INLINE auto *GetHardwareTimerInterruptTask(s32 core_id) {
|
||||
return std::addressof(g_hardware_timer_interrupt_tasks[core_id]);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
@ -41,7 +49,8 @@ namespace ams::kern::arm64 {
|
|||
/* Setup the global timer for the core. */
|
||||
InitializeGlobalTimer();
|
||||
|
||||
/* TODO: Bind the interrupt task for this core to the interrupt manager. */
|
||||
/* Bind the interrupt task for this core. */
|
||||
Kernel::GetInterruptManager().BindHandler(GetHardwareTimerInterruptTask(core_id), KInterruptName_HardwareTimerEl1, core_id, KInterruptController::PriorityLevel_Timer, true, true);
|
||||
}
|
||||
|
||||
void KHardwareTimer::Finalize() {
|
||||
|
@ -63,7 +72,9 @@ namespace ams::kern::arm64 {
|
|||
EnableInterrupt();
|
||||
}
|
||||
}
|
||||
/* TODO: Clear the timer interrupt. */
|
||||
|
||||
/* Clear the timer interrupt. */
|
||||
Kernel::GetInterruptManager().ClearInterrupt(KInterruptName_HardwareTimerEl1, GetCurrentCoreId());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,87 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#include <mesosphere.hpp>
|
||||
|
||||
namespace ams::kern::arm64 {
|
||||
|
||||
void KInterruptController::SetupInterruptLines(s32 core_id) const {
|
||||
const size_t ITLines = (core_id == 0) ? 32 * ((this->gicd->typer & 0x1F) + 1) : NumLocalInterrupts;
|
||||
|
||||
for (size_t i = 0; i < ITLines / 32; i++) {
|
||||
this->gicd->icenabler[i] = 0xFFFFFFFF;
|
||||
this->gicd->icpendr[i] = 0xFFFFFFFF;
|
||||
this->gicd->icactiver[i] = 0xFFFFFFFF;
|
||||
this->gicd->igroupr[i] = 0;
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < ITLines; i++) {
|
||||
this->gicd->ipriorityr.bytes[i] = 0xFF;
|
||||
this->gicd->itargetsr.bytes[i] = 0x00;
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < ITLines / 16; i++) {
|
||||
this->gicd->icfgr[i] = 0x00000000;
|
||||
}
|
||||
}
|
||||
|
||||
void KInterruptController::Initialize(s32 core_id) {
|
||||
/* Setup pointers to ARM mmio. */
|
||||
this->gicd = GetPointer<volatile GicDistributor>(KMemoryLayout::GetInterruptDistributorAddress());
|
||||
this->gicc = GetPointer<volatile GicCpuInterface>(KMemoryLayout::GetInterruptCpuInterfaceAddress());
|
||||
|
||||
/* Clear CTLRs. */
|
||||
this->gicc->ctlr = 0;
|
||||
if (core_id == 0) {
|
||||
this->gicd->ctlr = 0;
|
||||
}
|
||||
|
||||
this->gicc->pmr = 0;
|
||||
this->gicc->bpr = 7;
|
||||
|
||||
/* Setup all interrupt lines. */
|
||||
SetupInterruptLines(core_id);
|
||||
|
||||
/* Set CTLRs. */
|
||||
if (core_id == 0) {
|
||||
this->gicd->ctlr = 1;
|
||||
}
|
||||
this->gicc->ctlr = 1;
|
||||
|
||||
/* Set the mask for this core. */
|
||||
SetGicMask(core_id);
|
||||
|
||||
/* Set the priority level. */
|
||||
SetPriorityLevel(PriorityLevel_Low);
|
||||
}
|
||||
|
||||
void KInterruptController::Finalize(s32 core_id) {
|
||||
/* Clear CTLRs. */
|
||||
if (core_id == 0) {
|
||||
this->gicd->ctlr = 0;
|
||||
}
|
||||
this->gicc->ctlr = 0;
|
||||
|
||||
/* Set the priority level. */
|
||||
SetPriorityLevel(PriorityLevel_High);
|
||||
|
||||
/* Setup all interrupt lines. */
|
||||
SetupInterruptLines(core_id);
|
||||
|
||||
this->gicd = nullptr;
|
||||
this->gicc = nullptr;
|
||||
}
|
||||
|
||||
}
|
|
@ -18,11 +18,168 @@
|
|||
namespace ams::kern::arm64 {
|
||||
|
||||
void KInterruptManager::Initialize(s32 core_id) {
|
||||
/* TODO */
|
||||
this->interrupt_controller.Initialize(core_id);
|
||||
}
|
||||
|
||||
void KInterruptManager::Finalize(s32 core_id) {
|
||||
/* TODO */
|
||||
this->interrupt_controller.Finalize(core_id);
|
||||
}
|
||||
|
||||
Result KInterruptManager::BindHandler(KInterruptHandler *handler, s32 irq, s32 core_id, s32 priority, bool manual_clear, bool level) {
|
||||
R_UNLESS(KInterruptController::IsGlobal(irq) || KInterruptController::IsLocal(irq), svc::ResultOutOfRange());
|
||||
|
||||
KScopedInterruptDisable di;
|
||||
|
||||
if (KInterruptController::IsGlobal(irq)) {
|
||||
KScopedSpinLock lk(GetLock());
|
||||
return this->BindGlobal(handler, irq, core_id, priority, manual_clear, level);
|
||||
} else {
|
||||
MESOSPHERE_ASSERT(core_id == GetCurrentCoreId());
|
||||
return this->BindLocal(handler, irq, priority, manual_clear);
|
||||
}
|
||||
}
|
||||
|
||||
Result KInterruptManager::UnbindHandler(s32 irq, s32 core_id) {
|
||||
R_UNLESS(KInterruptController::IsGlobal(irq) || KInterruptController::IsLocal(irq), svc::ResultOutOfRange());
|
||||
|
||||
KScopedInterruptDisable di;
|
||||
|
||||
if (KInterruptController::IsGlobal(irq)) {
|
||||
KScopedSpinLock lk(GetLock());
|
||||
return this->UnbindGlobal(irq);
|
||||
} else {
|
||||
MESOSPHERE_ASSERT(core_id == GetCurrentCoreId());
|
||||
return this->UnbindLocal(irq);
|
||||
}
|
||||
}
|
||||
|
||||
Result KInterruptManager::ClearInterrupt(s32 irq) {
|
||||
R_UNLESS(KInterruptController::IsGlobal(irq), svc::ResultOutOfRange());
|
||||
|
||||
KScopedInterruptDisable di;
|
||||
KScopedSpinLock lk(GetLock());
|
||||
return this->ClearGlobal(irq);
|
||||
}
|
||||
|
||||
Result KInterruptManager::ClearInterrupt(s32 irq, s32 core_id) {
|
||||
R_UNLESS(KInterruptController::IsGlobal(irq) || KInterruptController::IsLocal(irq), svc::ResultOutOfRange());
|
||||
|
||||
KScopedInterruptDisable di;
|
||||
|
||||
if (KInterruptController::IsGlobal(irq)) {
|
||||
KScopedSpinLock lk(GetLock());
|
||||
return this->ClearGlobal(irq);
|
||||
} else {
|
||||
MESOSPHERE_ASSERT(core_id == GetCurrentCoreId());
|
||||
return this->ClearLocal(irq);
|
||||
}
|
||||
}
|
||||
|
||||
Result KInterruptManager::BindGlobal(KInterruptHandler *handler, s32 irq, s32 core_id, s32 priority, bool manual_clear, bool level) {
|
||||
/* Ensure the priority level is valid. */
|
||||
R_UNLESS(KInterruptController::PriorityLevel_High <= priority, svc::ResultOutOfRange());
|
||||
R_UNLESS(priority <= KInterruptController::PriorityLevel_Low, svc::ResultOutOfRange());
|
||||
|
||||
/* Ensure we aren't already bound. */
|
||||
auto &entry = GetGlobalInterruptEntry(irq);
|
||||
R_UNLESS(entry.handler == nullptr, svc::ResultBusy());
|
||||
|
||||
/* Set entry fields. */
|
||||
entry.needs_clear = false;
|
||||
entry.manually_cleared = manual_clear;
|
||||
entry.handler = handler;
|
||||
|
||||
/* Configure the interrupt as level or edge. */
|
||||
if (level) {
|
||||
this->interrupt_controller.SetLevel(irq);
|
||||
} else {
|
||||
this->interrupt_controller.SetEdge(irq);
|
||||
}
|
||||
|
||||
/* Configure the interrupt. */
|
||||
this->interrupt_controller.Clear(irq);
|
||||
this->interrupt_controller.SetTarget(irq, core_id);
|
||||
this->interrupt_controller.SetPriorityLevel(irq, priority);
|
||||
this->interrupt_controller.Enable(irq);
|
||||
|
||||
return ResultSuccess();
|
||||
}
|
||||
|
||||
Result KInterruptManager::BindLocal(KInterruptHandler *handler, s32 irq, s32 priority, bool manual_clear) {
|
||||
/* Ensure the priority level is valid. */
|
||||
R_UNLESS(KInterruptController::PriorityLevel_High <= priority, svc::ResultOutOfRange());
|
||||
R_UNLESS(priority <= KInterruptController::PriorityLevel_Low, svc::ResultOutOfRange());
|
||||
|
||||
/* Ensure we aren't already bound. */
|
||||
auto &entry = this->GetLocalInterruptEntry(irq);
|
||||
R_UNLESS(entry.handler == nullptr, svc::ResultBusy());
|
||||
|
||||
/* Set entry fields. */
|
||||
entry.needs_clear = false;
|
||||
entry.manually_cleared = manual_clear;
|
||||
entry.handler = handler;
|
||||
entry.priority = static_cast<u8>(priority);
|
||||
|
||||
/* Configure the interrupt. */
|
||||
this->interrupt_controller.Clear(irq);
|
||||
this->interrupt_controller.SetPriorityLevel(irq, priority);
|
||||
this->interrupt_controller.Enable(irq);
|
||||
|
||||
return ResultSuccess();
|
||||
}
|
||||
|
||||
Result KInterruptManager::UnbindGlobal(s32 irq) {
|
||||
for (size_t core_id = 0; core_id < cpu::NumCores; core_id++) {
|
||||
this->interrupt_controller.ClearTarget(irq, static_cast<s32>(core_id));
|
||||
}
|
||||
this->interrupt_controller.SetPriorityLevel(irq, KInterruptController::PriorityLevel_Low);
|
||||
this->interrupt_controller.Disable(irq);
|
||||
|
||||
GetGlobalInterruptEntry(irq).handler = nullptr;
|
||||
|
||||
return ResultSuccess();
|
||||
}
|
||||
|
||||
Result KInterruptManager::UnbindLocal(s32 irq) {
|
||||
auto &entry = this->GetLocalInterruptEntry(irq);
|
||||
R_UNLESS(entry.handler != nullptr, svc::ResultInvalidState());
|
||||
|
||||
this->interrupt_controller.SetPriorityLevel(irq, KInterruptController::PriorityLevel_Low);
|
||||
this->interrupt_controller.Disable(irq);
|
||||
|
||||
entry.handler = nullptr;
|
||||
|
||||
return ResultSuccess();
|
||||
}
|
||||
|
||||
Result KInterruptManager::ClearGlobal(s32 irq) {
|
||||
/* We can't clear an entry with no handler. */
|
||||
auto &entry = GetGlobalInterruptEntry(irq);
|
||||
R_UNLESS(entry.handler != nullptr, svc::ResultInvalidState());
|
||||
|
||||
/* If auto-cleared, we can succeed immediately. */
|
||||
R_UNLESS(entry.manually_cleared, ResultSuccess());
|
||||
R_UNLESS(entry.needs_clear, ResultSuccess());
|
||||
|
||||
/* Clear and enable. */
|
||||
entry.needs_clear = false;
|
||||
this->interrupt_controller.Enable(irq);
|
||||
return ResultSuccess();
|
||||
}
|
||||
|
||||
Result KInterruptManager::ClearLocal(s32 irq) {
|
||||
/* We can't clear an entry with no handler. */
|
||||
auto &entry = this->GetLocalInterruptEntry(irq);
|
||||
R_UNLESS(entry.handler != nullptr, svc::ResultInvalidState());
|
||||
|
||||
/* If auto-cleared, we can succeed immediately. */
|
||||
R_UNLESS(entry.manually_cleared, ResultSuccess());
|
||||
R_UNLESS(entry.needs_clear, ResultSuccess());
|
||||
|
||||
/* Clear and set priority. */
|
||||
entry.needs_clear = false;
|
||||
this->interrupt_controller.SetPriorityLevel(irq, entry.priority);
|
||||
return ResultSuccess();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -35,8 +35,8 @@ namespace ams::kern {
|
|||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x7000E000, 0x400, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x7000E400, 0xC00, KMemoryRegionType_PowerManagementController | KMemoryRegionAttr_NoUserMap));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x50040000, 0x1000, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x50041000, 0x1000, KMemoryRegionType_InterruptDistributor | KMemoryRegionAttr_ShouldKernelMap | KMemoryRegionAttr_NoUserMap));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x50042000, 0x1000, KMemoryRegionType_InterruptController | KMemoryRegionAttr_ShouldKernelMap | KMemoryRegionAttr_NoUserMap));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x50041000, 0x1000, KMemoryRegionType_InterruptDistributor | KMemoryRegionAttr_ShouldKernelMap));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x50042000, 0x1000, KMemoryRegionType_InterruptCpuInterface | KMemoryRegionAttr_ShouldKernelMap));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x50043000, 0x1D000, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x6000F000, 0x1000, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap));
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x6001DC00, 0x400, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap));
|
||||
|
|
|
@ -19,12 +19,31 @@ namespace ams::kern {
|
|||
|
||||
namespace {
|
||||
|
||||
class KSchedulerInterruptTask : public KInterruptTask {
|
||||
public:
|
||||
constexpr KSchedulerInterruptTask() : KInterruptTask() { /* ... */ }
|
||||
|
||||
virtual KInterruptTask *OnInterrupt(s32 interrupt_id) override {
|
||||
return GetDummyInterruptTask();
|
||||
}
|
||||
|
||||
virtual void DoTask() override {
|
||||
MESOSPHERE_PANIC("KSchedulerInterruptTask::DoTask was called!");
|
||||
}
|
||||
};
|
||||
|
||||
ALWAYS_INLINE void IncrementScheduledCount(KThread *thread) {
|
||||
if (KProcess *parent = thread->GetOwnerProcess(); parent != nullptr) {
|
||||
/* TODO: parent->IncrementScheduledCount(); */
|
||||
}
|
||||
}
|
||||
|
||||
KSchedulerInterruptTask g_scheduler_interrupt_task;
|
||||
|
||||
ALWAYS_INLINE auto *GetSchedulerInterruptTask() {
|
||||
return std::addressof(g_scheduler_interrupt_task);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void KScheduler::Initialize(KThread *idle_thread) {
|
||||
|
@ -40,7 +59,8 @@ namespace ams::kern {
|
|||
SetSchedulerUpdateNeeded();
|
||||
}
|
||||
|
||||
/* TODO: Bind interrupt handler. */
|
||||
/* Bind interrupt handler. */
|
||||
Kernel::GetInterruptManager().BindHandler(GetSchedulerInterruptTask(), KInterruptName_Scheduler, this->core_id, KInterruptController::PriorityLevel_Scheduler, false, false);
|
||||
}
|
||||
|
||||
void KScheduler::Activate() {
|
||||
|
@ -51,6 +71,13 @@ namespace ams::kern {
|
|||
RescheduleCurrentCore();
|
||||
}
|
||||
|
||||
void KScheduler::RescheduleOtherCores(u64 cores_needing_scheduling) {
|
||||
if (const u64 core_mask = cores_needing_scheduling & ~(1ul << this->core_id); core_mask != 0) {
|
||||
cpu::DataSynchronizationBarrier();
|
||||
Kernel::GetInterruptManager().SendInterProcessorInterrupt(KInterruptName_Scheduler, core_mask);
|
||||
}
|
||||
}
|
||||
|
||||
u64 KScheduler::UpdateHighestPriorityThread(KThread *highest_thread) {
|
||||
if (KThread *prev_highest_thread = this->state.highest_priority_thread; AMS_LIKELY(prev_highest_thread != highest_thread)) {
|
||||
if (AMS_LIKELY(prev_highest_thread != nullptr)) {
|
||||
|
@ -171,7 +198,7 @@ namespace ams::kern {
|
|||
void KScheduler::SetInterruptTaskThreadRunnable() {
|
||||
MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() == 1);
|
||||
|
||||
KThread *task_thread = nullptr /* TODO: GetInterruptTaskManager().GetThread() */;
|
||||
KThread *task_thread = Kernel::GetInterruptTaskManager().GetThread();
|
||||
{
|
||||
KScopedSchedulerLock sl;
|
||||
if (AMS_LIKELY(task_thread->GetThreadState() == KThread::ThreadState_Waiting)) {
|
||||
|
|
|
@ -202,6 +202,18 @@ namespace ams::kern {
|
|||
/* TODO */
|
||||
}
|
||||
|
||||
void KThread::SetState(ThreadState state) {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
KScopedSchedulerLock sl;
|
||||
|
||||
const ThreadState old_state = this->thread_state;
|
||||
this->thread_state = static_cast<ThreadState>((old_state & ~ThreadState_Mask) | (state & ThreadState_Mask));
|
||||
if (this->thread_state != old_state) {
|
||||
KScheduler::OnThreadStateChanged(this, old_state);
|
||||
}
|
||||
}
|
||||
|
||||
KThreadContext *KThread::GetContextForSchedulerLoop() {
|
||||
return std::addressof(this->thread_context);
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue