From 217c1ad0548dbbaa22c1ee008438506c5ba455cd Mon Sep 17 00:00:00 2001
From: TuxSH <1922548+TuxSH@users.noreply.github.com>
Date: Mon, 20 Jan 2020 02:24:02 +0000
Subject: [PATCH] thermosphere: implement reading and writing guest memory
---
thermosphere/src/caches.h | 6 +
thermosphere/src/exception_vectors.s | 25 +-
thermosphere/src/execute_function.c | 4 +-
thermosphere/src/fpu_regs_load_store.s | 4 +-
thermosphere/src/guest_memory.c | 292 ++++++++++++++++++++++
thermosphere/src/guest_memory.h | 31 +++
thermosphere/src/initSystem.c | 2 +-
thermosphere/src/main.c | 9 +-
thermosphere/src/memory_map.c | 6 +-
thermosphere/src/memory_map.h | 1 +
thermosphere/src/mmu.h | 3 +-
thermosphere/src/platform/qemu/devices.c | 2 +-
thermosphere/src/platform/stage2.c | 6 +-
thermosphere/src/platform/tegra/devices.c | 2 +-
thermosphere/src/smc.c | 2 +
thermosphere/src/software_breakpoints.c | 8 +-
thermosphere/src/start.s | 7 +-
thermosphere/src/sysreg.h | 17 +-
thermosphere/src/utils.c | 41 ---
thermosphere/src/utils.h | 42 ++--
thermosphere/src/vgic.c | 71 +++---
thermosphere/src/vgic.h | 4 +
22 files changed, 467 insertions(+), 118 deletions(-)
create mode 100644 thermosphere/src/guest_memory.c
create mode 100644 thermosphere/src/guest_memory.h
diff --git a/thermosphere/src/caches.h b/thermosphere/src/caches.h
index 2590b354c..284ce1f59 100644
--- a/thermosphere/src/caches.h
+++ b/thermosphere/src/caches.h
@@ -19,6 +19,12 @@
#include "utils.h"
#include "sysreg.h"
+static inline u32 cacheGetInstructionCachePolicy(void)
+{
+ u32 ctr = (u32)GET_SYSREG(ctr_el0);
+ return (ctr >> 14) & 3;
+}
+
static inline u32 cacheGetSmallestInstructionCacheLineSize(void)
{
u32 ctr = (u32)GET_SYSREG(ctr_el0);
diff --git a/thermosphere/src/exception_vectors.s b/thermosphere/src/exception_vectors.s
index d595b1b71..b9eb2e39d 100644
--- a/thermosphere/src/exception_vectors.s
+++ b/thermosphere/src/exception_vectors.s
@@ -140,7 +140,7 @@ vector_entry _synchSp0
msr elr_el2, x18
// Note: non-broadcasting TLB maintenance op
tlbi alle2
- dsb ish
+ dsb nsh
isb
eret
check_vector_size _synchSp0
@@ -271,11 +271,30 @@ doSmcIndirectCallImplSize:
/* Current EL, SPx */
-EXCEPTION_HANDLER_START _synchSpx, EXCEPTION_TYPE_HOST_CRASH
+vector_entry _synchSpx
+ // Ignore crash if x18 is 0, when we're copying memory from the guest (w/ irq masked)
+ cbz x18, _synchSpxIgnoreCrash
+
+ PIVOT_STACK_FOR_CRASH
+ SAVE_MOST_REGISTERS
+
+ mov x0, sp
+ mov w1, #0
+
+ bl exceptionEntryPostprocess
+
mov x0, sp
mrs x1, esr_el2
bl handleSameElSyncException
-EXCEPTION_HANDLER_END _synchSpx, EXCEPTION_TYPE_HOST_CRASH
+
+ b .
+
+_synchSpxIgnoreCrash:
+ mrs x18, elr_el2
+ add x18, x18, #4
+ msr elr_el2, x18
+ eret
+check_vector_size _synchSpx
EXCEPTION_HANDLER_START _irqSpx, EXCEPTION_TYPE_HOST
mov x0, sp
diff --git a/thermosphere/src/execute_function.c b/thermosphere/src/execute_function.c
index dcf019ef5..d326c3548 100644
--- a/thermosphere/src/execute_function.c
+++ b/thermosphere/src/execute_function.c
@@ -25,7 +25,7 @@ void executeFunctionOnCores(ExecutedFunction fun, void *args, bool sync, u32 cor
currentCoreCtx->executedFunction = fun;
currentCoreCtx->executedFunctionArgs = args;
currentCoreCtx->executedFunctionSync = sync;
-
+ __compiler_barrier();
generateSgiForList(ThermosphereSgi_ExecuteFunction, coreList);
}
@@ -46,4 +46,4 @@ void executeFunctionInterruptHandler(u32 srcCore)
if (ctx->executedFunctionSync) {
barrierWait(&ctx->executedFunctionBarrier);
}
-}
\ No newline at end of file
+}
diff --git a/thermosphere/src/fpu_regs_load_store.s b/thermosphere/src/fpu_regs_load_store.s
index 52feb0868..0959c1cb9 100644
--- a/thermosphere/src/fpu_regs_load_store.s
+++ b/thermosphere/src/fpu_regs_load_store.s
@@ -42,13 +42,13 @@ FUNCTION fpuLoadRegistersFromStorage
msr fpsr, x1
msr fpcr, x2
dsb ish
- isb ish
+ isb
ret
END_FUNCTION
FUNCTION fpuStoreRegistersToStorage
dsb ish
- isb ish
+ isb
LDSTORE_QREGS stp
mrs x1, fpsr
mrs x2, fpcr
diff --git a/thermosphere/src/guest_memory.c b/thermosphere/src/guest_memory.c
new file mode 100644
index 000000000..69c7d3a17
--- /dev/null
+++ b/thermosphere/src/guest_memory.c
@@ -0,0 +1,292 @@
+/*
+ * Copyright (c) 2019 Atmosphère-NX
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+#include
+
+#include "guest_memory.h"
+#include "memory_map.h"
+#include "mmu.h"
+#include "spinlock.h"
+#include "core_ctx.h"
+#include "sysreg.h"
+#include "vgic.h"
+#include "irq.h"
+#include "caches.h"
+
+static size_t guestReadWriteGicd(size_t offset, size_t size, void *readBuf, const void *writeBuf)
+{
+ recursiveSpinlockLock(&g_irqManager.lock);
+
+ if (readBuf != NULL) {
+ size_t readOffset = 0;
+ size_t rem = size;
+ while (rem > 0) {
+ if ((offset + readOffset) % 4 == 0 && rem >= 4) {
+ // All accesses of this kind are valid
+ *(u32 *)((uintptr_t)readBuf + readOffset) = vgicReadGicdRegister(offset + readOffset, 4);
+ readOffset += 4;
+ rem -= 4;
+ } else if ((offset + readOffset) % 2 == 0 && rem >= 2) {
+ // All accesses of this kind would be translated to ldrh and are thus invalid. Abort.
+ size = readOffset;
+ goto end;
+ } else if (vgicValidateGicdRegisterAccess(offset + readOffset, 1)) {
+ // Valid byte access
+ *(u8 *)((uintptr_t)readBuf + readOffset) = vgicReadGicdRegister(offset + readOffset, 1);
+ readOffset += 1;
+ rem -= 1;
+ } else {
+ // Invalid byte access
+ size = readOffset;
+ goto end;
+ }
+ }
+ }
+
+ if (writeBuf != NULL) {
+ size_t writeOffset = 0;
+ size_t rem = size;
+ while (rem > 0) {
+ if ((offset + writeOffset) % 4 == 0 && rem >= 4) {
+ // All accesses of this kind are valid
+ vgicWriteGicdRegister(*(u32 *)((uintptr_t)writeBuf + writeOffset), offset + writeOffset, 4);
+ writeOffset += 4;
+ rem -= 4;
+ } else if ((offset + writeOffset) % 2 == 0 && rem >= 2) {
+ // All accesses of this kind would be translated to ldrh and are thus invalid. Abort.
+ size = writeOffset;
+ goto end;
+ } else if (vgicValidateGicdRegisterAccess(offset + writeOffset, 1)) {
+ // Valid byte access
+ vgicWriteGicdRegister(*(u32 *)((uintptr_t)writeBuf + writeOffset), offset + writeOffset, 1);
+ writeOffset += 1;
+ rem -= 1;
+ } else {
+ // Invalid byte access
+ size = writeOffset;
+ goto end;
+ }
+ }
+ }
+
+end:
+ recursiveSpinlockUnlock(&g_irqManager.lock);
+ return size;
+}
+
+static size_t guestReadWriteDeviceMemory(void *addr, size_t size, void *readBuf, const void *writeBuf)
+{
+ // We might trigger bus errors... ignore the exception and return early if that's the case
+
+ CoreCtx *curCtxBackup = currentCoreCtx;
+ __compiler_barrier();
+ currentCoreCtx = NULL;
+ __compiler_barrier();
+
+ uintptr_t addri = (uintptr_t)addr;
+
+ if (readBuf != NULL) {
+ size_t readOffset = 0;
+ size_t rem = size;
+ while (rem > 0 && (__compiler_barrier(), currentCoreCtx == NULL)) {
+ if ((addri + readOffset) % 4 == 0 && rem >= 4) {
+ *(vu32 *)((uintptr_t)readBuf + readOffset) = *(vu32 *)(addri + readOffset);
+ readOffset += 4;
+ rem -= 4;
+ } else if (readOffset % 2 == 0 && rem >= 2) {
+ *(vu16 *)((uintptr_t)readBuf + readOffset) = *(vu16 *)(addri + readOffset);
+ readOffset += 2;
+ rem -= 2;
+ } else {
+ *(vu8 *)((uintptr_t)readBuf + readOffset) = *(vu8 *)(addri + readOffset);
+ readOffset += 1;
+ rem -= 1;
+ }
+ }
+ if (rem != 0) {
+ size = readOffset;
+ goto end;
+ }
+ }
+
+ if (writeBuf != NULL) {
+ size_t writeOffset = 0;
+ size_t rem = size;
+ while (rem > 0 && (__compiler_barrier(), currentCoreCtx == NULL)) {
+ if ((addri + writeOffset) % 4 == 0 && rem >= 4) {
+ *(vu32 *)(addri + writeOffset) = *(vu32 *)((uintptr_t)writeBuf + writeOffset);
+ writeOffset += 4;
+ rem -= 4;
+ } else if (writeOffset % 2 == 0 && rem >= 2) {
+ *(vu16 *)(addri + writeOffset) = *(vu16 *)((uintptr_t)writeBuf + writeOffset);
+ writeOffset += 2;
+ rem -= 2;
+ } else {
+ *(vu8 *)(addri + writeOffset) = *(vu8 *)((uintptr_t)writeBuf + writeOffset);
+ writeOffset += 1;
+ rem -= 1;
+ }
+ }
+ if (rem != 0) {
+ size = writeOffset;
+ goto end;
+ }
+ }
+
+end:
+ __compiler_barrier();
+ currentCoreCtx = curCtxBackup;
+ __compiler_barrier();
+ return size;
+}
+
+static size_t guestReadWriteNormalMemory(void *addr, size_t size, void *readBuf, const void *writeBuf)
+{
+ if (readBuf != NULL) {
+ memcpy(readBuf, addr, size);
+ }
+
+ if (writeBuf != NULL) {
+ memcpy(addr, writeBuf, size);
+
+ // We may have written to executable memory or to translation tables...
+ // & the page may have various aliases.
+ // We need to ensure cache & TLB coherency.
+ cacheCleanDataCacheRangePoU(addr, size);
+ u32 policy = cacheGetInstructionCachePolicy();
+ if (policy == 1 || policy == 2) {
+ // AVIVT, VIVT
+ cacheInvalidateInstructionCache();
+ } else {
+ // VPIPT, PIPT
+ // Ez coherency, just do range operations...
+ cacheInvalidateInstructionCacheRangePoU(addr, size);
+ }
+ __tlb_invalidate_el1();
+ __dsb();
+ __isb();
+ }
+
+ return size;
+}
+
+static size_t guestReadWriteMemoryPage(uintptr_t addr, size_t size, void *readBuf, const void *writeBuf)
+{
+ u64 irqFlags = maskIrq();
+ size_t offset = addr & 0xFFFull;
+
+ // Translate the VA, stages 1&2
+ __asm__ __volatile__ ("at s12e1r, %0" :: "r"(addr) : "memory");
+ u64 par = GET_SYSREG(par_el1);
+ if (par & PAR_F) {
+ // The translation failed. Why?
+ if (par & PAR_S) {
+ // Stage 2 fault. Could be an attempt to access the GICD, let's see what the IPA is...
+ __asm__ __volatile__ ("at s1e1r, %0" :: "r"(addr) : "memory");
+ par = GET_SYSREG(par_el1);
+ if ((par & PAR_F) != 0 || (par & PAR_PA_MASK) != MEMORY_MAP_VA_GICD) {
+ // The guest doesn't have access to it...
+ // Read as 0, write ignored
+ if (readBuf != NULL) {
+ memset(readBuf, 0, size);
+ }
+ } else {
+ // GICD mmio
+ size = guestReadWriteGicd(offset, size, readBuf, writeBuf);
+ }
+ } else {
+ // Oops, couldn't read/write anything (stage 1 fault)
+ size = 0;
+ }
+ } else {
+ /*
+ Translation didn't fail.
+
+ To avoid "B2.8 Mismatched memory attributes" we must use the same effective
+ attributes & shareability as the guest.
+
+ Note that par_el1 reports the effective shareablity of device and noncacheable memory as inner shareable.
+ In fact, the VMSAv8-64 section in the Armv8 ARM reads:
+ "The shareability field is only relevant if the memory is a Normal Cacheable memory type. All Device and Normal
+ Non-cacheable memory regions are always treated as Outer Shareable, regardless of the translation table
+ shareability attributes."
+
+ There's one corner case where we can't avoid it: another core is running,
+ changes the attributes (other than permissions) of the page, and issues
+ a broadcasting TLB maintenance instructions and/or accesses the page with the altered
+ attribute itself. We don't handle this corner case -- just don't read/write that kind of memory...
+ */
+ u64 memAttribs = (par >> PAR_ATTR_SHIFT) & PAR_ATTR_MASK;
+ u32 shrb = (par >> PAR_SH_SHIFT) & PAR_SH_MASK;
+ uintptr_t pa = par & PAR_PA_MASK;
+ uintptr_t va = MEMORY_MAP_VA_GUEST_MEM + 0x2000 * currentCoreCtx->coreId;
+
+ u64 mair = GET_SYSREG(mair_el2);
+ mair |= memAttribs << (8 * MEMORY_MAP_MEMTYPE_NORMAL_GUEST_SLOT);
+ SET_SYSREG(mair_el2, mair);
+ __isb();
+
+ u64 attribs = MMU_PTE_BLOCK_XN | MMU_PTE_BLOCK_SH(shrb) | MMU_PTE_BLOCK_MEMTYPE(MEMORY_MAP_MEMTYPE_NORMAL_GUEST_SLOT);
+ mmu_map_page((uintptr_t *)MEMORY_MAP_VA_TTBL, va, pa, attribs);
+ // Note: no need to broadcast here
+ __tlb_invalidate_el2_page_local(pa);
+ __dsb_local();
+
+ void *vaddr = (void *)(va + offset);
+ if (memAttribs & 0xF0) {
+ // Normal memory, or unpredictable
+ size = guestReadWriteNormalMemory(vaddr, size, readBuf, writeBuf);
+ } else {
+ // Device memory, or unpredictable
+ size = guestReadWriteDeviceMemory(vaddr, size, readBuf, writeBuf);
+ }
+
+ __dsb_local();
+ __isb();
+ mmu_unmap_page((uintptr_t *)MEMORY_MAP_VA_TTBL, va);
+ // Note: no need to broadcast here
+ __tlb_invalidate_el2_page_local(pa);
+ __dsb_local();
+
+ mair &= ~(0xFFul << (8 * MEMORY_MAP_MEMTYPE_NORMAL_GUEST_SLOT));
+ SET_SYSREG(mair_el2, mair);
+ __isb();
+ }
+
+ restoreInterruptFlags(irqFlags);
+ return size;
+}
+
+size_t guestReadWriteMemory(uintptr_t addr, size_t size, void *readBuf, const void *writeBuf)
+{
+ uintptr_t curAddr = addr;
+ size_t remainingAmount = size;
+ u8 *rb8 = (u8 *)readBuf;
+ const u8 *wb8 = (const u8*)writeBuf;
+ while (remainingAmount > 0) {
+ size_t expectedAmount = ((curAddr & ~0xFFFul) + 0x1000) - curAddr;
+ expectedAmount = expectedAmount > remainingAmount ? remainingAmount : expectedAmount;
+ size_t actualAmount = guestReadWriteMemoryPage(curAddr, expectedAmount, rb8, wb8);
+ curAddr += actualAmount;
+ rb8 += actualAmount;
+ wb8 += actualAmount;
+ remainingAmount -= actualAmount;
+ if (actualAmount != expectedAmount) {
+ break;
+ }
+ }
+ return curAddr - addr;
+}
diff --git a/thermosphere/src/guest_memory.h b/thermosphere/src/guest_memory.h
new file mode 100644
index 000000000..4b1ee34a1
--- /dev/null
+++ b/thermosphere/src/guest_memory.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2019 Atmosphère-NX
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+#pragma once
+
+#include "utils.h"
+
+size_t guestReadWriteMemory(uintptr_t addr, size_t size, void *readBuf, const void *writeBuf);
+
+static inline size_t guestReadMemory(uintptr_t addr, size_t size, void *buf)
+{
+ return guestReadWriteMemory(addr, size, buf, NULL);
+}
+
+static inline size_t guestWriteMemory(uintptr_t addr, size_t size, void *buf)
+{
+ return guestReadWriteMemory(addr, size, NULL, buf);
+}
diff --git a/thermosphere/src/initSystem.c b/thermosphere/src/initSystem.c
index ded60bbf9..3ff65a8f2 100644
--- a/thermosphere/src/initSystem.c
+++ b/thermosphere/src/initSystem.c
@@ -41,7 +41,7 @@ static void initSysregs(void)
SET_SYSREG(cntp_ctl_el0, 0x00000000);
SET_SYSREG(cntv_ctl_el0, 0x00000000);
- __dsb();
+ __dsb_local();
__isb();
}
diff --git a/thermosphere/src/main.c b/thermosphere/src/main.c
index a09ca738d..723aeda1f 100644
--- a/thermosphere/src/main.c
+++ b/thermosphere/src/main.c
@@ -14,6 +14,7 @@
#include "timer.h"
#include "irq.h"
#include "transport_interface.h"
+#include "guest_memory.h"
#include "memory_map.h"
#include "mmu.h"
@@ -32,8 +33,8 @@ static void loadKernelViaSemihosting(void)
MMU_PTE_BLOCK_XN | MMU_PTE_BLOCK_INNER_SHAREBLE | MMU_PTE_BLOCK_MEMTYPE(MEMORY_MAP_MEMTYPE_NORMAL_UNCACHEABLE)
);
- __tlb_invalidate_el2();
- __dsb();
+ __tlb_invalidate_el2_local();
+ __dsb_local();
DEBUG("Loading kernel via semihosted file I/O... ");
handle = semihosting_file_open("test_kernel.bin", FOPEN_MODE_RB);
@@ -49,8 +50,8 @@ static void loadKernelViaSemihosting(void)
semihosting_file_close(handle);
mmu_unmap_range(1, mmuTable, 0x40000000, 0x40000000);
- __tlb_invalidate_el2();
- __dsb();
+ __tlb_invalidate_el2_local();
+ __dsb_local();
currentCoreCtx->kernelEntrypoint = buf;
}
diff --git a/thermosphere/src/memory_map.c b/thermosphere/src/memory_map.c
index 07cac2407..d69a98b58 100644
--- a/thermosphere/src/memory_map.c
+++ b/thermosphere/src/memory_map.c
@@ -143,20 +143,20 @@ void memoryMapEnableMmu(const LoadImageLayout *layout)
SET_SYSREG(ttbr0_el2, mmuTable);
SET_SYSREG(tcr_el2, tcr);
SET_SYSREG(mair_el2, mair);
- __dsb();
+ __dsb_local();
__isb();
// TLB invalidation
// Whether this does anything before MMU is enabled is impldef, apparently
__tlb_invalidate_el2_local();
- __dsb();
+ __dsb_local();
__isb();
// Enable MMU & enable caching. We will crash.
u64 sctlr = GET_SYSREG(sctlr_el2);
sctlr |= SCTLR_ELx_I | SCTLR_ELx_C | SCTLR_ELx_M;
SET_SYSREG(sctlr_el2, sctlr);
- __dsb();
+ __dsb_local();
__isb();
}
diff --git a/thermosphere/src/memory_map.h b/thermosphere/src/memory_map.h
index ec8c448e7..d61f615a1 100644
--- a/thermosphere/src/memory_map.h
+++ b/thermosphere/src/memory_map.h
@@ -22,6 +22,7 @@
#define MEMORY_MAP_MEMTYPE_NORMAL 1ul
#define MEMORY_MAP_MEMTYPE_DEVICE_NGNRE 2ul
#define MEMORY_MAP_MEMTYPE_NORMAL_UNCACHEABLE 3ul
+#define MEMORY_MAP_MEMTYPE_NORMAL_GUEST_SLOT 4ul
#define MEMORY_MAP_VA_SPACE_SIZE 39ul
diff --git a/thermosphere/src/mmu.h b/thermosphere/src/mmu.h
index 3e8fcc87a..1e1bbcc50 100644
--- a/thermosphere/src/mmu.h
+++ b/thermosphere/src/mmu.h
@@ -72,6 +72,7 @@
*/
#define MMU_PTE_BLOCK_MEMTYPE(x) ((uint64_t)((x) << 2))
#define MMU_PTE_BLOCK_NS BITL(5)
+#define MMU_PTE_BLOCK_SH(x) ((x) << 8)
#define MMU_PTE_BLOCK_NON_SHAREABLE (0ull << 8)
#define MMU_PTE_BLOCK_OUTER_SHAREABLE (2ull << 8)
#define MMU_PTE_BLOCK_INNER_SHAREBLE (3ull << 8)
@@ -194,4 +195,4 @@ static inline void mmu_unmap_range(unsigned int level, uintptr_t *tbl, uintptr_t
for(size_t offset = 0; offset < size; offset += BITL(MMU_Lx_SHIFT(level))) {
mmu_unmap(level, tbl, base_addr + offset);
}
-}
\ No newline at end of file
+}
diff --git a/thermosphere/src/platform/qemu/devices.c b/thermosphere/src/platform/qemu/devices.c
index 20c766507..ff560ed69 100644
--- a/thermosphere/src/platform/qemu/devices.c
+++ b/thermosphere/src/platform/qemu/devices.c
@@ -26,6 +26,6 @@ void devicesMapAllExtra(void)
// Don't broadcast, since it's only ran once per boot by only one core, before the others are started...
__tlb_invalidate_el2_local();
- __dsb();
+ __dsb_local();
__isb();
}
diff --git a/thermosphere/src/platform/stage2.c b/thermosphere/src/platform/stage2.c
index c69c46dcb..8e95d2dad 100644
--- a/thermosphere/src/platform/stage2.c
+++ b/thermosphere/src/platform/stage2.c
@@ -39,18 +39,18 @@ void stage2ConfigureAndEnable(void)
// Stage2 regs config
SET_SYSREG(vttbr_el2, vttbr);
SET_SYSREG(vtcr_el2, vtcr);
- __dsb();
+ __dsb_local();
__isb();
// Enable stage 2
u64 hcr = GET_SYSREG(hcr_el2);
hcr |= HCR_VM;
SET_SYSREG(hcr_el2, hcr);
- __dsb();
+ __dsb_local();
__isb();
// TLB invalidation
__tlb_invalidate_el1_stage12_local();
- __dsb();
+ __dsb_local();
__isb();
}
diff --git a/thermosphere/src/platform/tegra/devices.c b/thermosphere/src/platform/tegra/devices.c
index 96997e61d..228704bb6 100644
--- a/thermosphere/src/platform/tegra/devices.c
+++ b/thermosphere/src/platform/tegra/devices.c
@@ -33,6 +33,6 @@ void devicesMapAllExtra(void)
// Don't broadcast, since it's only ran once per boot by only one core, before the others are started...
__tlb_invalidate_el2_local();
- __dsb();
+ __dsb_local();
__isb();
}
diff --git a/thermosphere/src/smc.c b/thermosphere/src/smc.c
index 152f8b629..9226e52a6 100644
--- a/thermosphere/src/smc.c
+++ b/thermosphere/src/smc.c
@@ -16,6 +16,8 @@ void doSmcIndirectCall(ExceptionStackFrame *frame, u32 smcId)
cacheHandleSelfModifyingCodePoU(codebuf, doSmcIndirectCallImplSize/4);
+ __dsb_sy();
+ __isb();
((void (*)(ExceptionStackFrame *))codebuf)(frame);
}
diff --git a/thermosphere/src/software_breakpoints.c b/thermosphere/src/software_breakpoints.c
index 6c5cc9e6c..191dbe645 100644
--- a/thermosphere/src/software_breakpoints.c
+++ b/thermosphere/src/software_breakpoints.c
@@ -62,10 +62,10 @@ static inline bool doApplySoftwareBreakpoint(size_t id)
u32 brkInst = 0xF2000000 | bp->uid;
- if (readEl1Memory(&bp->savedInstruction, bp->address, 4) && writeEl1Memory(bp->address, &brkInst, 4)) {
+ /*if (readEl1Memory(&bp->savedInstruction, bp->address, 4) && writeEl1Memory(bp->address, &brkInst, 4)) {
bp->applied = true;
return true;
- }
+ }*/
return false;
}
@@ -91,10 +91,10 @@ static inline bool doRevertSoftwareBreakpoint(size_t id)
return true;
}
- if (writeEl1Memory(bp->address, &bp->savedInstruction, 4)) {
+ /*if (writeEl1Memory(bp->address, &bp->savedInstruction, 4)) {
bp->applied = false;
return true;
- }
+ }*/
return false;
}
diff --git a/thermosphere/src/start.s b/thermosphere/src/start.s
index 0751c97fe..ae9249177 100644
--- a/thermosphere/src/start.s
+++ b/thermosphere/src/start.s
@@ -52,6 +52,10 @@ _startCommon:
// "Boot core only" stuff:
bl cacheClearSharedDataCachesOnBoot
+ ic iallu
+ dsb nsh
+ isb
+
// Temporarily use temp end region as stack, then create the translation table
// The stack top is also equal to the mmu table address...
adr x0, g_loadImageLayout
@@ -89,9 +93,6 @@ _postMmuEnableReturnAddr:
stp x18, xzr, [sp, #-0x10]!
sub sp, sp, #EXCEP_STACK_FRAME_SIZE
- dsb sy
- isb
-
mov x0, sp
mov x1, x20
bl thermosphereMain
diff --git a/thermosphere/src/sysreg.h b/thermosphere/src/sysreg.h
index 242e05f21..43617ef8d 100644
--- a/thermosphere/src/sysreg.h
+++ b/thermosphere/src/sysreg.h
@@ -412,13 +412,28 @@
#define MDSCR_SS BITL(0)
// Common CNTHCTL_EL2 flags
-#define CNTHCTL_EVNTI_MASK 0xFll
+#define CNTHCTL_EVNTI_MASK 0xFul
#define CNTHCTL_EVNTI_SHIFT 4
#define CNTHCTL_EVNTDIR BITL(3)
#define CNTHCTL_EVNTEN BITL(2)
#define CNTHCTL_EL1PCEN BITL(1)
#define CNTHCTL_EL1PCTEN BITL(0)
+// PAR_EL1
+#define PAR_F BITL(0)
+// Successful translation:
+#define PAR_ATTR_SHIFT 56
+#define PAR_ATTR_MASK 0xFFul
+#define PAR_PA_MASK MASK2L(51, 12) // bits 51-48 RES0 if not implemented
+#define PAR_NS BITL(9)
+#define PAR_SH_SHIFT 7
+#define PAR_SH_MASK 3ul
+// Faulting translation:
+#define PAR_S BITL(9)
+#define PAR_PTW BITL(8)
+#define PAR_FST_SHIFT 1
+#define PAR_FST_MASK 0x3Ful
+
#define ENCODE_SYSREG_FIELDS_MOV(op0, op1, crn, crm, op2) (((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5))
#define ENCODE_SYSREG_MOV(name) EVAL(ENCODE_SYSREG_FIELDS_MOV CAT(TUP_, name))
#define MAKE_MSR(name, Rt) (0xD5000000 | ENCODE_SYSREG_MOV(name) | ((Rt) & 0x1F))
diff --git a/thermosphere/src/utils.c b/thermosphere/src/utils.c
index ebdfd45f0..823e1b6b9 100644
--- a/thermosphere/src/utils.c
+++ b/thermosphere/src/utils.c
@@ -27,44 +27,3 @@ __attribute__((noinline)) bool overlaps(u64 as, u64 ae, u64 bs, u64 be)
return true;
return false;
}
-
-// TODO: put that elsewhere
-bool readEl1Memory(void *dst, uintptr_t addr, size_t size)
-{
- // Note: what if we read uncached regions/not shared?
- bool valid;
-
- u64 flags = maskIrq();
- uintptr_t pa = get_physical_address_el1_stage12(&valid, addr);
- restoreInterruptFlags(flags);
-
- if (!valid) {
- return false;
- }
-
- memcpy(dst, (const void *)pa, size);
-
- return true;
-}
-
-bool writeEl1Memory(uintptr_t addr, const void *src, size_t size)
-{
- bool valid;
-
- u64 flags = maskIrq();
- uintptr_t pa = get_physical_address_el1_stage12(&valid, addr);
- restoreInterruptFlags(flags);
-
- if (!valid) {
- return false;
- }
-
- memcpy((void *)pa, src, size);
- cacheHandleSelfModifyingCodePoU((const void *)pa, size);
-
- __tlb_invalidate_el1_stage12_local(); //FIXME FIXME FIXME
- __dsb_sy();
- __isb();
-
- return true;
-}
diff --git a/thermosphere/src/utils.h b/thermosphere/src/utils.h
index 01803ad9e..7ea9f4bf0 100644
--- a/thermosphere/src/utils.h
+++ b/thermosphere/src/utils.h
@@ -62,6 +62,11 @@ typedef enum ReadWriteDirection {
DIRECTION_READWRITE = DIRECTION_READ | DIRECTION_WRITE,
} ReadWriteDirection;
+static inline void __compiler_barrier(void)
+{
+ __asm__ __volatile__ ("" ::: "memory");
+}
+
static inline void __wfi(void)
{
__asm__ __volatile__ ("wfi" ::: "memory");
@@ -100,6 +105,11 @@ static inline void __dsb(void)
__asm__ __volatile__ ("dsb ish" ::: "memory");
}
+static inline void __dsb_local(void)
+{
+ __asm__ __volatile__ ("dsb nsh" ::: "memory");
+}
+
static inline void __dmb_sy(void)
{
__asm__ __volatile__ ("dmb sy" ::: "memory");
@@ -125,11 +135,27 @@ static inline void __tlb_invalidate_el2_local(void)
__asm__ __volatile__ ("tlbi alle2" ::: "memory");
}
+static inline void __tlb_invalidate_el2_page(uintptr_t addr)
+{
+ __asm__ __volatile__ ("tlbi vae2is, %0" :: "r"(addr) : "memory");
+}
+
+static inline void __tlb_invalidate_el2_page_local(uintptr_t addr)
+{
+ __asm__ __volatile__ ("tlbi vae2is, %0" :: "r"(addr) : "memory");
+}
+
+
static inline void __tlb_invalidate_el1_stage12_local(void)
{
__asm__ __volatile__ ("tlbi alle1" ::: "memory");
}
+static inline void __tlb_invalidate_el1(void)
+{
+ __asm__ __volatile__ ("tlbi vmalle1is" ::: "memory");
+}
+
bool overlaps(u64 as, u64 ae, u64 bs, u64 be);
// Assumes addr is valid, must be called with interrupts masked
@@ -138,23 +164,11 @@ static inline uintptr_t va2pa(const void *el2_vaddr) {
// For debug purposes only
uintptr_t PAR;
uintptr_t va = (uintptr_t)el2_vaddr;
- __asm__ __volatile__ ("at s1e2r, %0" :: "r"(va));
- __asm__ __volatile__ ("mrs %0, par_el1" : "=r"(PAR));
+ __asm__ __volatile__ ("at s1e2r, %0" :: "r"(va) : "memory");
+ __asm__ __volatile__ ("mrs %0, par_el1" : "=r"(PAR) :: "memory");
return (PAR & MASK2L(47, 12)) | (va & MASKL(12));
}
-static inline uintptr_t get_physical_address_el1_stage12(bool *valid, uintptr_t el1_vaddr) {
- // NOTE: interrupt must be disabled when calling this func
- uintptr_t PAR;
- __asm__ __volatile__ ("at s12e1r, %0" :: "r"(el1_vaddr)); // note: we don't care whether it's writable in EL1&0 translation regime
- __asm__ __volatile__ ("mrs %0, par_el1" : "=r"(PAR));
- *valid = (PAR & 1) == 0ull;
- return (PAR & 1) ? 0ull : (PAR & MASK2L(47, 12)) | ((uintptr_t)el1_vaddr & MASKL(12));
-}
-
-bool readEl1Memory(void *dst, uintptr_t addr, size_t size);
-bool writeEl1Memory(uintptr_t addr, const void *src, size_t size);
-
static inline void panic(void) {
#ifndef PLATFORM_QEMU
__builtin_trap();
diff --git a/thermosphere/src/vgic.c b/thermosphere/src/vgic.c
index f203abcab..6be16bf46 100644
--- a/thermosphere/src/vgic.c
+++ b/thermosphere/src/vgic.c
@@ -526,12 +526,23 @@ static inline u32 vgicGetPeripheralId2Register(void)
return 2 << 4;
}
-static void handleVgicMmioWrite(ExceptionStackFrame *frame, DataAbortIss dabtIss, size_t offset)
+bool vgicValidateGicdRegisterAccess(size_t offset, size_t sz)
{
- size_t sz = BITL(dabtIss.sas);
- u32 val = (u32)(readFrameRegisterZ(frame, dabtIss.srt) & MASKL(8 * sz));
- uintptr_t addr = (uintptr_t)gicd + offset;
+ // ipriorityr, itargetsr, *pendsgir are byte-accessible
+ if (
+ !(offset >= GICDOFF(ipriorityr) && offset < GICDOFF(ipriorityr) + GIC_IRQID_MAX) &&
+ !(offset >= GICDOFF(itargetsr) && offset < GICDOFF(itargetsr) + GIC_IRQID_MAX) &&
+ !(offset >= GICDOFF(cpendsgir) && offset < GICDOFF(cpendsgir) + 16) &&
+ !(offset >= GICDOFF(spendsgir) && offset < GICDOFF(spendsgir) + 16)
+ ) {
+ return (offset & 3) == 0 && sz == 4;
+ } else {
+ return sz == 1 || (sz == 4 && ((offset & 3) != 0));
+ }
+}
+void vgicWriteGicdRegister(u32 val, size_t offset, size_t sz)
+{
//DEBUG("gicd write off 0x%03llx sz %lx val %x w%d\n", offset, sz, val, (int)dabtIss.srt);
switch (offset) {
@@ -607,19 +618,15 @@ static void handleVgicMmioWrite(ExceptionStackFrame *frame, DataAbortIss dabtIss
break;
default:
- dumpUnhandledDataAbort(dabtIss, addr, "GICD reserved/implementation-defined register");
+ DEBUG("Write to GICD reserved/implementation-defined register offset=0x%03lx value=0x%08lx", offset, val);
break;
}
}
-static void handleVgicMmioRead(ExceptionStackFrame *frame, DataAbortIss dabtIss, size_t offset)
+u32 vgicReadGicdRegister(size_t offset, size_t sz)
{
- size_t sz = BITL(dabtIss.sas);
- uintptr_t addr = (uintptr_t)gicd + offset;
-
- u32 val = 0;
-
//DEBUG("gicd read off 0x%03llx sz %lx\n", offset, sz);
+ u32 val = 0;
switch (offset) {
case GICDOFF(icfgr) ... GICDOFF(icfgr) + 31/4:
@@ -682,7 +689,7 @@ static void handleVgicMmioRead(ExceptionStackFrame *frame, DataAbortIss dabtIss,
case GICDOFF(sgir):
// Write-only register
- dumpUnhandledDataAbort(dabtIss, addr, "GICD read to write-only register GCID_SGIR");
+ DEBUG("Read from write-only register GCID_SGIR\n");
break;
case GICDOFF(icpidr2):
@@ -690,10 +697,24 @@ static void handleVgicMmioRead(ExceptionStackFrame *frame, DataAbortIss dabtIss,
break;
default:
- dumpUnhandledDataAbort(dabtIss, addr, "GICD reserved/implementation-defined register");
+ DEBUG("Read from GICD reserved/implementation-defined register offset=0x%03lx\n", offset);
break;
}
+ return val;
+}
+
+static void handleVgicMmioWrite(ExceptionStackFrame *frame, DataAbortIss dabtIss, size_t offset)
+{
+ size_t sz = BITL(dabtIss.sas);
+ u32 val = (u32)(readFrameRegisterZ(frame, dabtIss.srt) & MASKL(8 * sz));
+ vgicWriteGicdRegister(val, offset, sz);
+}
+
+static void handleVgicMmioRead(ExceptionStackFrame *frame, DataAbortIss dabtIss, size_t offset)
+{
+ size_t sz = BITL(dabtIss.sas);
+ u32 val = vgicReadGicdRegister(offset, sz);
writeFrameRegisterZ(frame, dabtIss.srt, val);
}
@@ -993,28 +1014,10 @@ void handleVgicdMmio(ExceptionStackFrame *frame, DataAbortIss dabtIss, size_t of
{
size_t sz = BITL(dabtIss.sas);
uintptr_t addr = (uintptr_t)gicd + offset;
- bool oops = true;
+ bool oops = !vgicValidateGicdRegisterAccess(offset, sz);
- // ipriorityr, itargetsr, *pendsgir are byte-accessible
- if (
- !(offset >= GICDOFF(ipriorityr) && offset < GICDOFF(ipriorityr) + GIC_IRQID_MAX) &&
- !(offset >= GICDOFF(itargetsr) && offset < GICDOFF(itargetsr) + GIC_IRQID_MAX) &&
- !(offset >= GICDOFF(cpendsgir) && offset < GICDOFF(cpendsgir) + 16) &&
- !(offset >= GICDOFF(spendsgir) && offset < GICDOFF(spendsgir) + 16)
- ) {
- if ((offset & 3) != 0 || sz != 4) {
- dumpUnhandledDataAbort(dabtIss, addr, "GICD non-word aligned MMIO");
- } else {
- oops = false;
- }
- } else {
- if (sz != 1 && sz != 4) {
- dumpUnhandledDataAbort(dabtIss, addr, "GICD 16 or 64-bit access");
- } else if (sz == 4 && (offset & 3) != 0) {
- dumpUnhandledDataAbort(dabtIss, addr, "GICD misaligned MMIO");
- } else {
- oops = false;
- }
+ if (oops) {
+ dumpUnhandledDataAbort(dabtIss, addr, "invalid GICD register access");
}
recursiveSpinlockLock(&g_irqManager.lock);
diff --git a/thermosphere/src/vgic.h b/thermosphere/src/vgic.h
index f816ac74e..57402e25c 100644
--- a/thermosphere/src/vgic.h
+++ b/thermosphere/src/vgic.h
@@ -19,6 +19,10 @@
#include "types.h"
#include "data_abort.h"
+bool vgicValidateGicdRegisterAccess(size_t offset, size_t sz);
+void vgicWriteGicdRegister(u32 val, size_t offset, size_t sz);
+u32 vgicReadGicdRegister(size_t offset, size_t sz);
+
void handleVgicdMmio(ExceptionStackFrame *frame, DataAbortIss dabtIss, size_t offset);
void vgicInit(void);