thermosphere: allow each core to pause itself in a lock-free manner & fix bugs

This commit is contained in:
TuxSH 2020-01-22 20:26:14 +00:00
parent 9ebf3c9580
commit 744491ca33
2 changed files with 13 additions and 10 deletions

View file

@ -22,8 +22,6 @@
#include "spinlock.h"
#include "single_step.h"
// Reminder: use these functions behind a lock
static Barrier g_debugPauseBarrier;
static atomic_uint g_debugPausePausedCoreList;
static atomic_uint g_debugPauseSingleStepCoreList;
@ -48,7 +46,7 @@ void debugPauseWaitAndUpdateSingleStep(void)
currentCoreCtx->wasPaused = false;
// Single-step: if inactive and requested, start single step; cancel if active and not requested
u32 ssReqd = (atomic_load(&g_debugPauseSingleStepCoreList) & ~BIT(currentCoreCtx->coreId)) != 0;
u32 ssReqd = (atomic_load(&g_debugPauseSingleStepCoreList) & BIT(currentCoreCtx->coreId)) != 0;
SingleStepState singleStepState = singleStepGetNextState(currentCoreCtx->guestFrame);
if (ssReqd && singleStepState == SingleStepState_Inactive) {
singleStepSetNextState(currentCoreCtx->guestFrame, SingleStepState_ActiveNotPending);
@ -61,18 +59,23 @@ void debugPauseCores(u32 coreList)
{
maskIrq();
// Since we're using a debugger lock, a simple stlr should be fine...
atomic_store(&g_debugPausePausedCoreList, coreList);
u32 desiredList = coreList;
u32 remainingList = coreList;
u32 readList = atomic_load(&g_debugPausePausedCoreList);
do {
desiredList |= readList;
remainingList &= ~readList;
} while (atomic_compare_exchange_weak(&g_debugPausePausedCoreList, &readList, desiredList));
if (coreList != BIT(currentCoreCtx->coreId)) {
if (remainingList != BIT(currentCoreCtx->coreId)) {
// We need to notify other cores...
u32 otherCores = coreList & ~BIT(currentCoreCtx->coreId);
u32 otherCores = remainingList & ~BIT(currentCoreCtx->coreId);
barrierInit(&g_debugPauseBarrier, otherCores | BIT(currentCoreCtx->coreId));
generateSgiForList(ThermosphereSgi_DebugPause, otherCores);
barrierWait(&g_debugPauseBarrier);
}
if (coreList & BIT(currentCoreCtx->coreId)) {
if (remainingList & BIT(currentCoreCtx->coreId)) {
currentCoreCtx->wasPaused = true;
}
@ -85,7 +88,7 @@ void debugUnpauseCores(u32 coreList, u32 singleStepList)
// Since we're using a debugger lock, a simple stlr should be fine...
atomic_store(&g_debugPauseSingleStepCoreList, singleStepList);
atomic_store(&g_debugPausePausedCoreList, 0);
atomic_fetch_and(&g_debugPausePausedCoreList, ~coreList);
__sev();
}

View file

@ -23,7 +23,7 @@ void debugPauseSgiHandler(void);
// Hypervisor interrupts will be serviced during the pause-wait
void debugPauseWaitAndUpdateSingleStep(void);
// Note: these functions are not reentrant! (need a global debug lock...)
// Note: these functions are not reentrant EXCEPT debugPauseCores(1 << currentCoreId)
// "Pause" makes sure all cores reaches the pause function before proceeding.
// "Unpause" doesn't synchronize, it just makes sure the core resumes & that "pause" can be called again.
void debugPauseCores(u32 coreList);