2020-02-05 21:02:35 +00:00
|
|
|
/*
|
2021-10-04 19:59:10 +00:00
|
|
|
* Copyright (c) Atmosphère-NX
|
2020-02-05 21:02:35 +00:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms and conditions of the GNU General Public License,
|
|
|
|
* version 2, as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
|
|
* more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
#include <mesosphere.hpp>
|
|
|
|
|
2020-02-15 02:22:55 +00:00
|
|
|
namespace ams::kern::arch::arm64 {
|
2020-02-05 21:02:35 +00:00
|
|
|
|
|
|
|
void KInterruptManager::Initialize(s32 core_id) {
|
2020-12-18 01:18:47 +00:00
|
|
|
m_interrupt_controller.Initialize(core_id);
|
2020-02-05 21:02:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void KInterruptManager::Finalize(s32 core_id) {
|
2020-12-18 01:18:47 +00:00
|
|
|
m_interrupt_controller.Finalize(core_id);
|
2020-02-05 22:07:51 +00:00
|
|
|
}
|
|
|
|
|
2020-07-24 10:29:12 +00:00
|
|
|
void KInterruptManager::Save(s32 core_id) {
|
2020-12-01 15:35:43 +00:00
|
|
|
/* Verify core id. */
|
|
|
|
MESOSPHERE_ASSERT(core_id == GetCurrentCoreId());
|
|
|
|
|
2020-07-24 10:29:12 +00:00
|
|
|
/* Ensure all cores get to this point before continuing. */
|
|
|
|
cpu::SynchronizeAllCores();
|
|
|
|
|
|
|
|
/* If on core 0, save the global interrupts. */
|
|
|
|
if (core_id == 0) {
|
2020-12-18 01:18:47 +00:00
|
|
|
MESOSPHERE_ABORT_UNLESS(!m_global_state_saved);
|
|
|
|
m_interrupt_controller.SaveGlobal(std::addressof(m_global_state));
|
|
|
|
m_global_state_saved = true;
|
2020-07-24 10:29:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Ensure all cores get to this point before continuing. */
|
|
|
|
cpu::SynchronizeAllCores();
|
|
|
|
|
|
|
|
/* Save all local interrupts. */
|
2020-12-18 01:18:47 +00:00
|
|
|
MESOSPHERE_ABORT_UNLESS(!m_local_state_saved[core_id]);
|
|
|
|
m_interrupt_controller.SaveCoreLocal(std::addressof(m_local_states[core_id]));
|
|
|
|
m_local_state_saved[core_id] = true;
|
2020-07-24 10:29:12 +00:00
|
|
|
|
|
|
|
/* Ensure all cores get to this point before continuing. */
|
|
|
|
cpu::SynchronizeAllCores();
|
|
|
|
|
|
|
|
/* Finalize all cores other than core 0. */
|
|
|
|
if (core_id != 0) {
|
|
|
|
this->Finalize(core_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Ensure all cores get to this point before continuing. */
|
|
|
|
cpu::SynchronizeAllCores();
|
|
|
|
|
|
|
|
/* Finalize core 0. */
|
|
|
|
if (core_id == 0) {
|
|
|
|
this->Finalize(core_id);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void KInterruptManager::Restore(s32 core_id) {
|
2020-12-01 15:35:43 +00:00
|
|
|
/* Verify core id. */
|
|
|
|
MESOSPHERE_ASSERT(core_id == GetCurrentCoreId());
|
|
|
|
|
2020-07-24 10:29:12 +00:00
|
|
|
/* Ensure all cores get to this point before continuing. */
|
|
|
|
cpu::SynchronizeAllCores();
|
|
|
|
|
|
|
|
/* Initialize core 0. */
|
|
|
|
if (core_id == 0) {
|
|
|
|
this->Initialize(core_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Ensure all cores get to this point before continuing. */
|
|
|
|
cpu::SynchronizeAllCores();
|
|
|
|
|
|
|
|
/* Initialize all cores other than core 0. */
|
|
|
|
if (core_id != 0) {
|
|
|
|
this->Initialize(core_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Ensure all cores get to this point before continuing. */
|
|
|
|
cpu::SynchronizeAllCores();
|
|
|
|
|
|
|
|
/* Restore all local interrupts. */
|
2020-12-18 01:18:47 +00:00
|
|
|
MESOSPHERE_ASSERT(m_local_state_saved[core_id]);
|
|
|
|
m_interrupt_controller.RestoreCoreLocal(std::addressof(m_local_states[core_id]));
|
|
|
|
m_local_state_saved[core_id] = false;
|
2020-07-24 10:29:12 +00:00
|
|
|
|
|
|
|
/* Ensure all cores get to this point before continuing. */
|
|
|
|
cpu::SynchronizeAllCores();
|
|
|
|
|
|
|
|
/* If on core 0, restore the global interrupts. */
|
|
|
|
if (core_id == 0) {
|
2020-12-18 01:18:47 +00:00
|
|
|
MESOSPHERE_ASSERT(m_global_state_saved);
|
|
|
|
m_interrupt_controller.RestoreGlobal(std::addressof(m_global_state));
|
|
|
|
m_global_state_saved = false;
|
2020-07-24 10:29:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Ensure all cores get to this point before continuing. */
|
|
|
|
cpu::SynchronizeAllCores();
|
|
|
|
}
|
|
|
|
|
2020-02-08 10:49:32 +00:00
|
|
|
bool KInterruptManager::OnHandleInterrupt() {
|
|
|
|
/* Get the interrupt id. */
|
2020-12-18 01:18:47 +00:00
|
|
|
const u32 raw_irq = m_interrupt_controller.GetIrq();
|
2020-02-08 10:49:32 +00:00
|
|
|
const s32 irq = KInterruptController::ConvertRawIrq(raw_irq);
|
|
|
|
|
2020-08-04 01:11:13 +00:00
|
|
|
/* Trace the interrupt. */
|
|
|
|
MESOSPHERE_KTRACE_INTERRUPT(irq);
|
|
|
|
|
2020-02-08 10:49:32 +00:00
|
|
|
/* If the IRQ is spurious, we don't need to reschedule. */
|
|
|
|
if (irq < 0) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
KInterruptTask *task = nullptr;
|
|
|
|
if (KInterruptController::IsLocal(irq)) {
|
|
|
|
/* Get local interrupt entry. */
|
|
|
|
auto &entry = GetLocalInterruptEntry(irq);
|
|
|
|
if (entry.handler != nullptr) {
|
|
|
|
/* Set manual clear needed if relevant. */
|
|
|
|
if (entry.manually_cleared) {
|
2020-12-18 01:18:47 +00:00
|
|
|
m_interrupt_controller.SetPriorityLevel(irq, KInterruptController::PriorityLevel_Low);
|
2020-02-08 10:49:32 +00:00
|
|
|
entry.needs_clear = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set the handler. */
|
|
|
|
task = entry.handler->OnInterrupt(irq);
|
|
|
|
} else {
|
|
|
|
MESOSPHERE_LOG("Core%d: Unhandled local interrupt %d\n", GetCurrentCoreId(), irq);
|
|
|
|
}
|
|
|
|
} else if (KInterruptController::IsGlobal(irq)) {
|
2020-12-01 15:35:43 +00:00
|
|
|
KScopedSpinLock lk(this->GetGlobalInterruptLock());
|
2020-02-08 10:49:32 +00:00
|
|
|
|
|
|
|
/* Get global interrupt entry. */
|
|
|
|
auto &entry = GetGlobalInterruptEntry(irq);
|
|
|
|
if (entry.handler != nullptr) {
|
|
|
|
/* Set manual clear needed if relevant. */
|
|
|
|
if (entry.manually_cleared) {
|
2020-12-18 01:18:47 +00:00
|
|
|
m_interrupt_controller.Disable(irq);
|
2020-02-08 10:49:32 +00:00
|
|
|
entry.needs_clear = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set the handler. */
|
|
|
|
task = entry.handler->OnInterrupt(irq);
|
|
|
|
} else {
|
|
|
|
MESOSPHERE_LOG("Core%d: Unhandled global interrupt %d\n", GetCurrentCoreId(), irq);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
MESOSPHERE_LOG("Invalid interrupt %d\n", irq);
|
|
|
|
}
|
|
|
|
|
2020-02-14 23:27:34 +00:00
|
|
|
/* Acknowledge the interrupt. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_interrupt_controller.EndOfInterrupt(raw_irq);
|
2020-02-14 23:27:34 +00:00
|
|
|
|
2020-02-08 10:49:32 +00:00
|
|
|
/* If we found no task, then we don't need to reschedule. */
|
|
|
|
if (task == nullptr) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If the task isn't the dummy task, we should add it to the queue. */
|
|
|
|
if (task != GetDummyInterruptTask()) {
|
2020-02-10 10:26:00 +00:00
|
|
|
Kernel::GetInterruptTaskManager().EnqueueTask(task);
|
2020-02-08 10:49:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void KInterruptManager::HandleInterrupt(bool user_mode) {
|
|
|
|
/* On interrupt, call OnHandleInterrupt() to determine if we need rescheduling and handle. */
|
|
|
|
const bool needs_scheduling = Kernel::GetInterruptManager().OnHandleInterrupt();
|
|
|
|
|
|
|
|
/* If we need scheduling, */
|
|
|
|
if (needs_scheduling) {
|
2020-07-25 00:43:42 +00:00
|
|
|
/* If the user disable count is set, we may need to pin the current thread. */
|
|
|
|
if (user_mode && GetCurrentThread().GetUserDisableCount() != 0 && GetCurrentProcess().GetPinnedThread(GetCurrentCoreId()) == nullptr) {
|
2020-02-08 10:49:32 +00:00
|
|
|
KScopedSchedulerLock sl;
|
|
|
|
|
2020-07-25 00:43:42 +00:00
|
|
|
/* Pin the current thread. */
|
2020-08-03 06:06:29 +00:00
|
|
|
GetCurrentProcess().PinCurrentThread();
|
2020-02-08 10:49:32 +00:00
|
|
|
|
2020-07-25 00:43:42 +00:00
|
|
|
/* Set the interrupt flag for the thread. */
|
|
|
|
GetCurrentThread().SetInterruptFlag();
|
2020-02-08 10:49:32 +00:00
|
|
|
|
|
|
|
/* Request interrupt scheduling. */
|
|
|
|
Kernel::GetScheduler().RequestScheduleOnInterrupt();
|
|
|
|
} else {
|
|
|
|
/* Request interrupt scheduling. */
|
|
|
|
Kernel::GetScheduler().RequestScheduleOnInterrupt();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If user mode, check if the thread needs termination. */
|
|
|
|
/* If it does, we can take advantage of this to terminate it. */
|
|
|
|
if (user_mode) {
|
|
|
|
KThread *cur_thread = GetCurrentThreadPointer();
|
|
|
|
if (cur_thread->IsTerminationRequested()) {
|
|
|
|
KScopedInterruptEnable ei;
|
|
|
|
cur_thread->Exit();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-05 22:07:51 +00:00
|
|
|
Result KInterruptManager::BindHandler(KInterruptHandler *handler, s32 irq, s32 core_id, s32 priority, bool manual_clear, bool level) {
|
2021-01-08 10:13:36 +00:00
|
|
|
MESOSPHERE_UNUSED(core_id);
|
|
|
|
|
2020-02-05 22:07:51 +00:00
|
|
|
R_UNLESS(KInterruptController::IsGlobal(irq) || KInterruptController::IsLocal(irq), svc::ResultOutOfRange());
|
|
|
|
|
|
|
|
KScopedInterruptDisable di;
|
|
|
|
|
|
|
|
if (KInterruptController::IsGlobal(irq)) {
|
2020-12-01 15:35:43 +00:00
|
|
|
KScopedSpinLock lk(this->GetGlobalInterruptLock());
|
2020-02-05 22:07:51 +00:00
|
|
|
return this->BindGlobal(handler, irq, core_id, priority, manual_clear, level);
|
|
|
|
} else {
|
|
|
|
MESOSPHERE_ASSERT(core_id == GetCurrentCoreId());
|
|
|
|
return this->BindLocal(handler, irq, priority, manual_clear);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Result KInterruptManager::UnbindHandler(s32 irq, s32 core_id) {
|
2021-01-08 10:13:36 +00:00
|
|
|
MESOSPHERE_UNUSED(core_id);
|
|
|
|
|
2020-02-05 22:07:51 +00:00
|
|
|
R_UNLESS(KInterruptController::IsGlobal(irq) || KInterruptController::IsLocal(irq), svc::ResultOutOfRange());
|
|
|
|
|
|
|
|
KScopedInterruptDisable di;
|
|
|
|
|
|
|
|
if (KInterruptController::IsGlobal(irq)) {
|
2020-12-01 15:35:43 +00:00
|
|
|
KScopedSpinLock lk(this->GetGlobalInterruptLock());
|
2020-02-05 22:07:51 +00:00
|
|
|
return this->UnbindGlobal(irq);
|
|
|
|
} else {
|
|
|
|
MESOSPHERE_ASSERT(core_id == GetCurrentCoreId());
|
|
|
|
return this->UnbindLocal(irq);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Result KInterruptManager::ClearInterrupt(s32 irq, s32 core_id) {
|
2021-01-08 10:13:36 +00:00
|
|
|
MESOSPHERE_UNUSED(core_id);
|
|
|
|
|
2020-02-05 22:07:51 +00:00
|
|
|
R_UNLESS(KInterruptController::IsGlobal(irq) || KInterruptController::IsLocal(irq), svc::ResultOutOfRange());
|
|
|
|
|
|
|
|
KScopedInterruptDisable di;
|
|
|
|
|
|
|
|
if (KInterruptController::IsGlobal(irq)) {
|
2020-12-01 15:35:43 +00:00
|
|
|
KScopedSpinLock lk(this->GetGlobalInterruptLock());
|
2020-02-05 22:07:51 +00:00
|
|
|
return this->ClearGlobal(irq);
|
|
|
|
} else {
|
|
|
|
MESOSPHERE_ASSERT(core_id == GetCurrentCoreId());
|
|
|
|
return this->ClearLocal(irq);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Result KInterruptManager::BindGlobal(KInterruptHandler *handler, s32 irq, s32 core_id, s32 priority, bool manual_clear, bool level) {
|
|
|
|
/* Ensure the priority level is valid. */
|
|
|
|
R_UNLESS(KInterruptController::PriorityLevel_High <= priority, svc::ResultOutOfRange());
|
|
|
|
R_UNLESS(priority <= KInterruptController::PriorityLevel_Low, svc::ResultOutOfRange());
|
|
|
|
|
|
|
|
/* Ensure we aren't already bound. */
|
|
|
|
auto &entry = GetGlobalInterruptEntry(irq);
|
|
|
|
R_UNLESS(entry.handler == nullptr, svc::ResultBusy());
|
|
|
|
|
|
|
|
/* Set entry fields. */
|
|
|
|
entry.needs_clear = false;
|
|
|
|
entry.manually_cleared = manual_clear;
|
|
|
|
entry.handler = handler;
|
|
|
|
|
|
|
|
/* Configure the interrupt as level or edge. */
|
|
|
|
if (level) {
|
2020-12-18 01:18:47 +00:00
|
|
|
m_interrupt_controller.SetLevel(irq);
|
2020-02-05 22:07:51 +00:00
|
|
|
} else {
|
2020-12-18 01:18:47 +00:00
|
|
|
m_interrupt_controller.SetEdge(irq);
|
2020-02-05 22:07:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Configure the interrupt. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_interrupt_controller.Clear(irq);
|
|
|
|
m_interrupt_controller.SetTarget(irq, core_id);
|
|
|
|
m_interrupt_controller.SetPriorityLevel(irq, priority);
|
|
|
|
m_interrupt_controller.Enable(irq);
|
2020-02-05 22:07:51 +00:00
|
|
|
|
|
|
|
return ResultSuccess();
|
|
|
|
}
|
|
|
|
|
|
|
|
Result KInterruptManager::BindLocal(KInterruptHandler *handler, s32 irq, s32 priority, bool manual_clear) {
|
|
|
|
/* Ensure the priority level is valid. */
|
|
|
|
R_UNLESS(KInterruptController::PriorityLevel_High <= priority, svc::ResultOutOfRange());
|
|
|
|
R_UNLESS(priority <= KInterruptController::PriorityLevel_Low, svc::ResultOutOfRange());
|
|
|
|
|
|
|
|
/* Ensure we aren't already bound. */
|
|
|
|
auto &entry = this->GetLocalInterruptEntry(irq);
|
|
|
|
R_UNLESS(entry.handler == nullptr, svc::ResultBusy());
|
|
|
|
|
|
|
|
/* Set entry fields. */
|
|
|
|
entry.needs_clear = false;
|
|
|
|
entry.manually_cleared = manual_clear;
|
|
|
|
entry.handler = handler;
|
|
|
|
entry.priority = static_cast<u8>(priority);
|
|
|
|
|
|
|
|
/* Configure the interrupt. */
|
2020-12-18 01:18:47 +00:00
|
|
|
m_interrupt_controller.Clear(irq);
|
|
|
|
m_interrupt_controller.SetPriorityLevel(irq, priority);
|
|
|
|
m_interrupt_controller.Enable(irq);
|
2020-02-05 22:07:51 +00:00
|
|
|
|
|
|
|
return ResultSuccess();
|
|
|
|
}
|
|
|
|
|
|
|
|
Result KInterruptManager::UnbindGlobal(s32 irq) {
|
|
|
|
for (size_t core_id = 0; core_id < cpu::NumCores; core_id++) {
|
2020-12-18 01:18:47 +00:00
|
|
|
m_interrupt_controller.ClearTarget(irq, static_cast<s32>(core_id));
|
2020-02-05 22:07:51 +00:00
|
|
|
}
|
2020-12-18 01:18:47 +00:00
|
|
|
m_interrupt_controller.SetPriorityLevel(irq, KInterruptController::PriorityLevel_Low);
|
|
|
|
m_interrupt_controller.Disable(irq);
|
2020-02-05 22:07:51 +00:00
|
|
|
|
|
|
|
GetGlobalInterruptEntry(irq).handler = nullptr;
|
|
|
|
|
|
|
|
return ResultSuccess();
|
|
|
|
}
|
|
|
|
|
|
|
|
Result KInterruptManager::UnbindLocal(s32 irq) {
|
|
|
|
auto &entry = this->GetLocalInterruptEntry(irq);
|
|
|
|
R_UNLESS(entry.handler != nullptr, svc::ResultInvalidState());
|
|
|
|
|
2020-12-18 01:18:47 +00:00
|
|
|
m_interrupt_controller.SetPriorityLevel(irq, KInterruptController::PriorityLevel_Low);
|
|
|
|
m_interrupt_controller.Disable(irq);
|
2020-02-05 22:07:51 +00:00
|
|
|
|
|
|
|
entry.handler = nullptr;
|
|
|
|
|
|
|
|
return ResultSuccess();
|
|
|
|
}
|
|
|
|
|
|
|
|
Result KInterruptManager::ClearGlobal(s32 irq) {
|
|
|
|
/* We can't clear an entry with no handler. */
|
|
|
|
auto &entry = GetGlobalInterruptEntry(irq);
|
|
|
|
R_UNLESS(entry.handler != nullptr, svc::ResultInvalidState());
|
|
|
|
|
|
|
|
/* If auto-cleared, we can succeed immediately. */
|
2020-03-08 08:06:23 +00:00
|
|
|
R_SUCCEED_IF(!entry.manually_cleared);
|
|
|
|
R_SUCCEED_IF(!entry.needs_clear);
|
2020-02-05 22:07:51 +00:00
|
|
|
|
|
|
|
/* Clear and enable. */
|
|
|
|
entry.needs_clear = false;
|
2020-12-18 01:18:47 +00:00
|
|
|
m_interrupt_controller.Enable(irq);
|
2020-02-05 22:07:51 +00:00
|
|
|
return ResultSuccess();
|
|
|
|
}
|
|
|
|
|
|
|
|
Result KInterruptManager::ClearLocal(s32 irq) {
|
|
|
|
/* We can't clear an entry with no handler. */
|
|
|
|
auto &entry = this->GetLocalInterruptEntry(irq);
|
|
|
|
R_UNLESS(entry.handler != nullptr, svc::ResultInvalidState());
|
|
|
|
|
|
|
|
/* If auto-cleared, we can succeed immediately. */
|
2020-03-08 08:06:23 +00:00
|
|
|
R_SUCCEED_IF(!entry.manually_cleared);
|
|
|
|
R_SUCCEED_IF(!entry.needs_clear);
|
2020-02-05 22:07:51 +00:00
|
|
|
|
|
|
|
/* Clear and set priority. */
|
|
|
|
entry.needs_clear = false;
|
2020-12-18 01:18:47 +00:00
|
|
|
m_interrupt_controller.SetPriorityLevel(irq, entry.priority);
|
2020-02-05 22:07:51 +00:00
|
|
|
return ResultSuccess();
|
2020-02-05 21:02:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|