thermosphere: major refactor of memory map

- use recursive stage 1 page table (thanks @fincs for this idea)
- NULL now unmapped
- no identity mapping
- image + GICv2 now mapped at the same address for every platform
- tempbss mapped just after "real" bss, can now steal unused mem from
the latter
- no hardcoded VAs for other MMIO devices
- tegra: remove timers, use the generic timer instead
This commit is contained in:
TuxSH 2020-01-17 22:10:26 +00:00
parent 92a291cd41
commit 626f0ecb98
47 changed files with 795 additions and 469 deletions

View file

@ -69,7 +69,6 @@ CFLAGS := \
-fstrict-volatile-bitfields \ -fstrict-volatile-bitfields \
-fno-unwind-tables \ -fno-unwind-tables \
-std=gnu11 \ -std=gnu11 \
-Werror \
-Wall \ -Wall \
-Wno-main \ -Wno-main \
$(ARCH) $(DEFINES) $(ARCH) $(DEFINES)
@ -143,10 +142,11 @@ all: $(BUILD)
ifeq ($(PLATFORM), qemu) ifeq ($(PLATFORM), qemu)
export QEMU := qemu-system-aarch64 export QEMU := qemu-system-aarch64
#export QEMU := ~/qemu/aarch64-softmmu/qemu-system-aarch64
QEMUFLAGS := -nographic -machine virt,virtualization=on,accel=tcg,gic-version=2 -cpu cortex-a57 -smp 4 -m 1024\ QEMUFLAGS := -nographic -machine virt,virtualization=on,accel=tcg,gic-version=2 -cpu cortex-a57 -smp 4 -m 1024\
-kernel thermosphere.elf -d unimp,guest_errors -semihosting-config enable,target=native\ -kernel thermosphere.elf -d unimp,guest_errors -semihosting-config enable,target=native\
-chardev stdio,id=uart -serial chardev:uart -monitor none -chardev stdio,id=uart -serial chardev:uart -monitor tcp:localhost:3333,server,nowait
qemu: all qemu: all
@$(QEMU) $(QEMUFLAGS) @$(QEMU) $(QEMUFLAGS)

View file

@ -6,14 +6,22 @@ PHDRS
main PT_LOAD; main PT_LOAD;
} }
MEMORY
{
mainVa : ORIGIN = 0x7FFFE10000, LENGTH = 2M - 64K
}
SECTIONS SECTIONS
{ {
PROVIDE(__start__ = ORIGIN(main)); __start_pa__ = ABSOLUTE(ORIGIN(main));
. = __start__; __temp_pa__ = ABSOLUTE(ORIGIN(temp));
__max_image_size__ = ABSOLUTE(LENGTH(main));
__max_temp_size__ = ABSOLUTE(LENGTH(temp) - 0x1000);
.text : .text :
{ {
. = ALIGN(8); . = ALIGN(8);
__start__ = ABSOLUTE(.);
KEEP(*(.crt0*)); KEEP(*(.crt0*));
*(.text.unlikely .text.*_unlikely .text.unlikely.*) *(.text.unlikely .text.*_unlikely .text.unlikely.*)
*(.text.exit .text.exit.*) *(.text.exit .text.exit.*)
@ -26,37 +34,37 @@ SECTIONS
__vectors_end__ = ABSOLUTE(.); __vectors_end__ = ABSOLUTE(.);
ASSERT(__vectors_end__ - __vectors_start__ <= 0x800, "Exception vectors section should be max 0x800 in size!"); ASSERT(__vectors_end__ - __vectors_start__ <= 0x800, "Exception vectors section should be max 0x800 in size!");
. = ALIGN(8); . = ALIGN(8);
} >main :main } >mainVa AT>main :main
.init : .init :
{ {
KEEP( *(.init) ) KEEP( *(.init) )
. = ALIGN(8); . = ALIGN(8);
} >main :main } >mainVa AT>main :main
.plt : .plt :
{ {
*(.plt) *(.plt)
*(.iplt) *(.iplt)
. = ALIGN(8); . = ALIGN(8);
} >main :main } >mainVa AT>main :main
.fini : .fini :
{ {
KEEP( *(.fini) ) KEEP( *(.fini) )
. = ALIGN(8); . = ALIGN(8);
} >main :main } >mainVa AT>main :main
.rodata : .rodata :
{ {
*(.rodata .rodata.* .gnu.linkonce.r.*) *(.rodata .rodata.* .gnu.linkonce.r.*)
SORT(CONSTRUCTORS) SORT(CONSTRUCTORS)
. = ALIGN(8); . = ALIGN(8);
} >main :main } >mainVa AT>main :main
.got : { __got_start__ = ABSOLUTE(.); *(.got) *(.igot) } >main :main .got : { __got_start__ = ABSOLUTE(.); *(.got) *(.igot) } >mainVa AT>main :main
.got.plt : { *(.got.plt) *(.igot.plt) __got_end__ = ABSOLUTE(.);} >main :main .got.plt : { *(.got.plt) *(.igot.plt) __got_end__ = ABSOLUTE(.);} >mainVa AT>main :main
.preinit_array : .preinit_array :
{ {
@ -64,8 +72,9 @@ SECTIONS
PROVIDE (__preinit_array_start = ABSOLUTE(.)); PROVIDE (__preinit_array_start = ABSOLUTE(.));
KEEP (*(.preinit_array)) KEEP (*(.preinit_array))
PROVIDE (__preinit_array_end = ABSOLUTE(.)); PROVIDE (__preinit_array_end = ABSOLUTE(.));
ASSERT(__preinit_array_end == __preinit_array_start, ".preinit_array not empty!");
. = ALIGN(8); . = ALIGN(8);
} >main :main } >mainVa AT>main :main
.init_array : .init_array :
{ {
@ -73,7 +82,8 @@ SECTIONS
KEEP (*(SORT(.init_array.*))) KEEP (*(SORT(.init_array.*)))
KEEP (*(.init_array)) KEEP (*(.init_array))
PROVIDE (__init_array_end = ABSOLUTE(.)); PROVIDE (__init_array_end = ABSOLUTE(.));
} >main :main ASSERT(__init_array_end == __init_array_start, ".init_array not empty!");
} >mainVa AT>main :main
.fini_array : .fini_array :
{ {
@ -83,7 +93,8 @@ SECTIONS
KEEP (*(SORT(.fini_array.*))) KEEP (*(SORT(.fini_array.*)))
PROVIDE (__fini_array_end = ABSOLUTE(.)); PROVIDE (__fini_array_end = ABSOLUTE(.));
. = ALIGN(8); . = ALIGN(8);
} >main :main ASSERT(__fini_array_end == __fini_array_start, ".fini_array not empty!");
} >mainVa AT>main :main
.ctors : .ctors :
{ {
@ -93,7 +104,7 @@ SECTIONS
KEEP (*(SORT(.ctors.*))) KEEP (*(SORT(.ctors.*)))
KEEP (*(.ctors)) KEEP (*(.ctors))
. = ALIGN(8); . = ALIGN(8);
} >main :main } >mainVa AT>main :main
.dtors ALIGN(8) : .dtors ALIGN(8) :
{ {
@ -103,56 +114,54 @@ SECTIONS
KEEP (*(SORT(.dtors.*))) KEEP (*(SORT(.dtors.*)))
KEEP (*(.dtors)) KEEP (*(.dtors))
. = ALIGN(8); . = ALIGN(8);
} >main :main } >mainVa AT>main :main
.data ALIGN(8) : .data ALIGN(8) :
{ {
*(.data .data.* .gnu.linkonce.d.*) *(.data .data.* .gnu.linkonce.d.*)
CONSTRUCTORS CONSTRUCTORS
. = ALIGN(8); . = ALIGN(8);
} >main :main } >mainVa AT>main :main
.eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) } >main :main .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) } >mainVa AT>main :main
.eh_frame : { KEEP (*(.eh_frame)) *(.eh_frame.*) } >main :main .eh_frame : { KEEP (*(.eh_frame)) *(.eh_frame.*) } >mainVa AT>main :main
.gcc_except_table : { *(.gcc_except_table .gcc_except_table.*) } >main :main .gcc_except_table : { *(.gcc_except_table .gcc_except_table.*) } >mainVa AT>main :main
.gnu_extab : { *(.gnu_extab*) } >main :main .gnu_extab : { *(.gnu_extab*) } >mainVa AT>main :main
.exception_ranges : { *(.exception_ranges .exception_ranges*) } >main :main .exception_ranges : { *(.exception_ranges .exception_ranges*) } >mainVa AT>main :main
.dynamic : { *(.dynamic) } >main :main .dynamic : { *(.dynamic) } >mainVa AT>main :main
.interp : { *(.interp) } >main :main .interp : { *(.interp) } >mainVa AT>main :main
.note.gnu.build-id : { *(.note.gnu.build-id) } >main :main .note.gnu.build-id : { *(.note.gnu.build-id) } >mainVa AT>main :main
.hash : { *(.hash) } >main :main .hash : { *(.hash) } >mainVa AT>main :main
.gnu.hash : { *(.gnu.hash) } >main :main .gnu.hash : { *(.gnu.hash) } >mainVa AT>main :main
.gnu.version : { *(.gnu.version) } >main :main .gnu.version : { *(.gnu.version) } >mainVa AT>main :main
.gnu.version_d : { *(.gnu.version_d) } >main :main .gnu.version_d : { *(.gnu.version_d) } >mainVa AT>main :main
.gnu.version_r : { *(.gnu.version_r) } >main :main .gnu.version_r : { *(.gnu.version_r) } >mainVa AT>main :main
.dynsym : { *(.dynsym) } >main :main .dynsym : { *(.dynsym) } >mainVa AT>main :main
.dynstr : { *(.dynstr) } >main :main .dynstr : { *(.dynstr) } >mainVa AT>main :main
.rela.dyn : { *(.rela.*); __main_end__ = ABSOLUTE(.);} >main :main .rela.dyn : { *(.rela.*); __main_end__ = ABSOLUTE(.);} >mainVa AT>main :main
.bss (NOLOAD) : .bss (NOLOAD) :
{ {
. = ALIGN(8); . = ALIGN(0x1000);
__bss_start__ = ABSOLUTE(.); __bss_start__ = ABSOLUTE(.);
*(.dynbss) *(.dynbss)
*(.bss .bss.* .gnu.linkonce.b.*) *(.bss .bss.* .gnu.linkonce.b.*)
*(COMMON) *(COMMON)
. = ALIGN(8); . = ALIGN(8);
__end__ = ABSOLUTE(.); __real_bss_end__ = ABSOLUTE(.);
} >main :NONE __image_size__ = ABSOLUTE(__real_bss_end__ - __start__);
ASSERT(__image_size__ <= __max_image_size__, "Image too big!");
.temp (NOLOAD) : /*
{ The logic here: tempbss *additional pages* are at a very different PA, but
. = ALIGN(0x1000); we can allow .tempbss to use unused "non-temporary" BSS space. Their VAs are
__stacks_top__ = ABSOLUTE(. + 0x2000); contiguous.
__crash_stacks_top__ = ABSOLUTE(. + 0x3000); */
. += 0x3000;
__temp_bss_start__ = ABSOLUTE(.);
*(.tempbss .tempbss.*) *(.tempbss .tempbss.*)
__temp_bss_end__ = ABSOLUTE(.); __bss_end__ = ABSOLUTE(.);
. = ALIGN(0x1000); __temp_size__ = ABSOLUTE(__bss_end__ - __real_bss_end__);
} >temp :NONE } >mainVa :NONE

View file

@ -15,13 +15,11 @@
*/ */
#include "core_ctx.h" #include "core_ctx.h"
#include "utils.h" #include "memory_map.h"
// start.s // start.s
extern uintptr_t g_initialKernelEntrypoint; extern uintptr_t g_initialKernelEntrypoint;
extern u8 __stacks_top__[], __crash_stacks_top__[];
static atomic_uint g_activeCoreMask = 0; static atomic_uint g_activeCoreMask = 0;
// Prevents it from being put in BSS // Prevents it from being put in BSS
@ -34,11 +32,11 @@ CoreCtx g_coreCtxs[4] = {
void coreCtxInit(u32 coreId, bool isBootCore, u64 argument) void coreCtxInit(u32 coreId, bool isBootCore, u64 argument)
{ {
size_t crashStackSize = (__crash_stacks_top__ - __stacks_top__) / 4; size_t crashStackSize = 0x1000 / 4;
currentCoreCtx = &g_coreCtxs[coreId]; currentCoreCtx = &g_coreCtxs[coreId];
currentCoreCtx->isBootCore = isBootCore; currentCoreCtx->isBootCore = isBootCore;
currentCoreCtx->kernelArgument = argument; currentCoreCtx->kernelArgument = argument;
currentCoreCtx->crashStack = __crash_stacks_top__ - crashStackSize * coreId; currentCoreCtx->crashStack = (u8 *)(MEMORY_MAP_VA_CRASH_STACKS_TOP - crashStackSize * coreId);
if (isBootCore && currentCoreCtx->kernelEntrypoint == 0) { if (isBootCore && currentCoreCtx->kernelEntrypoint == 0) {
currentCoreCtx->kernelEntrypoint = g_initialKernelEntrypoint; currentCoreCtx->kernelEntrypoint = g_initialKernelEntrypoint;
} }

View file

@ -28,7 +28,7 @@ void dumpUnhandledDataAbort(DataAbortIss dabtIss, u64 far, const char *msg)
{ {
char s1[64], s2[32], s3[64] = ""; char s1[64], s2[32], s3[64] = "";
(void)s1; (void)s2; (void)s3; (void)s1; (void)s2; (void)s3;
sprintf(s1, "Unhandled %s %s", msg , dabtIss.wnr ? "write" : "read"); sprintf(s1, "Unhandled%s %s", msg , dabtIss.wnr ? "write" : "read");
if (dabtIss.fnv) { if (dabtIss.fnv) {
sprintf(s2, "<unk>"); sprintf(s2, "<unk>");
} else { } else {
@ -55,14 +55,14 @@ void handleLowerElDataAbortException(ExceptionStackFrame *frame, ExceptionSyndro
dumpUnhandledDataAbort(dabtIss, far, ""); dumpUnhandledDataAbort(dabtIss, far, "");
} }
if (farpg == (uintptr_t)g_irqManager.gic.gicd) { if (farpg == MEMORY_MAP_PA_GICD) {
handleVgicdMmio(frame, dabtIss, far & 0xFFF); handleVgicdMmio(frame, dabtIss, far & 0xFFF);
} else if (farpg == (uintptr_t)g_irqManager.gic.gich) { } else if (farpg == MEMORY_MAP_PA_GICH) {
dumpUnhandledDataAbort(dabtIss, far, "GICH"); dumpUnhandledDataAbort(dabtIss, far, " GICH");
} else { } else {
dumpUnhandledDataAbort(dabtIss, far, "(fallback)"); dumpUnhandledDataAbort(dabtIss, far, "");
} }
// Skip instruction anyway? // Skip instruction anyway
skipFaultingInstruction(frame, esr.il == 0 ? 2 : 4); skipFaultingInstruction(frame, esr.il == 0 ? 2 : 4);
} }

View file

@ -88,9 +88,9 @@
ldr x16, [x18, #CORECTX_SCRATCH_OFFSET] ldr x16, [x18, #CORECTX_SCRATCH_OFFSET]
.endm .endm
.equ EXCEPTION_TYPE_HOST, 0 #define EXCEPTION_TYPE_HOST 0
.equ EXCEPTION_TYPE_GUEST, 1 #define EXCEPTION_TYPE_GUEST 1
.equ EXCEPTION_TYPE_HOST_CRASH, 2 #define EXCEPTION_TYPE_HOST_CRASH 2
.macro EXCEPTION_HANDLER_START name, type .macro EXCEPTION_HANDLER_START name, type
vector_entry \name vector_entry \name
@ -135,8 +135,15 @@ check_vector_size \name
vector_base g_thermosphereVectors vector_base g_thermosphereVectors
/* Current EL, SP0 */ /* Current EL, SP0 */
/* Those are unused by us, except on same-EL double-faults. */ vector_entry _synchSp0
UNKNOWN_EXCEPTION _synchSp0 // Used when we enable the MMU
msr elr_el2, x18
// Note: non-broadcasting TLB maintenance op
tlbi alle2
dsb ish
isb
eret
check_vector_size _synchSp0
_unknownException: _unknownException:
pivot_stack_for_crash pivot_stack_for_crash

View file

@ -61,7 +61,7 @@ void dumpStackFrame(const ExceptionStackFrame *frame, bool sameEl)
DEBUG("x30\t\t%016llx\n\n", frame->x[30]); DEBUG("x30\t\t%016llx\n\n", frame->x[30]);
DEBUG("elr_el2\t\t%016llx\n", frame->elr_el2); DEBUG("elr_el2\t\t%016llx\n", frame->elr_el2);
DEBUG("spsr_el2\t%016llx\n", frame->spsr_el2); DEBUG("spsr_el2\t%016llx\n", frame->spsr_el2);
DEBUG("far_el2\t%016llx\n", frame->far_el2); DEBUG("far_el2\t\t%016llx\n", frame->far_el2);
if (sameEl) { if (sameEl) {
DEBUG("sp_el2\t\t%016llx\n", frame->sp_el2); DEBUG("sp_el2\t\t%016llx\n", frame->sp_el2);
} else { } else {
@ -96,9 +96,8 @@ void skipFaultingInstruction(ExceptionStackFrame *frame, u32 size)
frame->elr_el2 += size; frame->elr_el2 += size;
} }
void exceptionEnterInterruptibleHypervisorCode(ExceptionStackFrame *frame) void exceptionEnterInterruptibleHypervisorCode(void)
{ {
(void)frame;
// We don't want the guest to spam us with its timer interrupts. Disable the timers. // We don't want the guest to spam us with its timer interrupts. Disable the timers.
SET_SYSREG(cntp_ctl_el0, 0); SET_SYSREG(cntp_ctl_el0, 0);
SET_SYSREG(cntv_ctl_el0, 0); SET_SYSREG(cntv_ctl_el0, 0);
@ -107,8 +106,10 @@ void exceptionEnterInterruptibleHypervisorCode(ExceptionStackFrame *frame)
// Called on exception entry (avoids overflowing a vector section) // Called on exception entry (avoids overflowing a vector section)
void exceptionEntryPostprocess(ExceptionStackFrame *frame, bool isLowerEl) void exceptionEntryPostprocess(ExceptionStackFrame *frame, bool isLowerEl)
{ {
if (frame == currentCoreCtx->guestFrame) {
frame->cntp_ctl_el0 = GET_SYSREG(cntp_ctl_el0); frame->cntp_ctl_el0 = GET_SYSREG(cntp_ctl_el0);
frame->cntv_ctl_el0 = GET_SYSREG(cntv_ctl_el0); frame->cntv_ctl_el0 = GET_SYSREG(cntv_ctl_el0);
}
} }
// Called on exception return (avoids overflowing a vector section) // Called on exception return (avoids overflowing a vector section)
@ -116,7 +117,7 @@ void exceptionReturnPreprocess(ExceptionStackFrame *frame)
{ {
if (currentCoreCtx->wasPaused && frame == currentCoreCtx->guestFrame) { if (currentCoreCtx->wasPaused && frame == currentCoreCtx->guestFrame) {
// Were we paused & are we about to return to the guest? // Were we paused & are we about to return to the guest?
exceptionEnterInterruptibleHypervisorCode(frame); exceptionEnterInterruptibleHypervisorCode();
debugPauseWaitAndUpdateSingleStep(); debugPauseWaitAndUpdateSingleStep();
} }
@ -124,9 +125,11 @@ void exceptionReturnPreprocess(ExceptionStackFrame *frame)
currentCoreCtx->totalTimeInHypervisor += timerGetSystemTick() - frame->cntpct_el0; currentCoreCtx->totalTimeInHypervisor += timerGetSystemTick() - frame->cntpct_el0;
SET_SYSREG(cntvoff_el2, currentCoreCtx->totalTimeInHypervisor); SET_SYSREG(cntvoff_el2, currentCoreCtx->totalTimeInHypervisor);
if (frame == currentCoreCtx->guestFrame) {
// Restore interrupt mask // Restore interrupt mask
SET_SYSREG(cntp_ctl_el0, frame->cntp_ctl_el0); SET_SYSREG(cntp_ctl_el0, frame->cntp_ctl_el0);
SET_SYSREG(cntv_ctl_el0, frame->cntv_ctl_el0); SET_SYSREG(cntv_ctl_el0, frame->cntv_ctl_el0);
}
} }
void handleLowerElSyncException(ExceptionStackFrame *frame, ExceptionSyndromeRegister esr) void handleLowerElSyncException(ExceptionStackFrame *frame, ExceptionSyndromeRegister esr)

View file

@ -138,7 +138,7 @@ bool spsrEvaluateConditionCode(u64 spsr, u32 conditionCode);
void skipFaultingInstruction(ExceptionStackFrame *frame, u32 size); void skipFaultingInstruction(ExceptionStackFrame *frame, u32 size);
void dumpStackFrame(const ExceptionStackFrame *frame, bool sameEl); void dumpStackFrame(const ExceptionStackFrame *frame, bool sameEl);
void exceptionEnterInterruptibleHypervisorCode(ExceptionStackFrame *frame); void exceptionEnterInterruptibleHypervisorCode(void);
void handleLowerElSyncException(ExceptionStackFrame *frame, ExceptionSyndromeRegister esr); void handleLowerElSyncException(ExceptionStackFrame *frame, ExceptionSyndromeRegister esr);
void handleSameElSyncException(ExceptionStackFrame *frame, ExceptionSyndromeRegister esr); void handleSameElSyncException(ExceptionStackFrame *frame, ExceptionSyndromeRegister esr);

View file

@ -143,11 +143,3 @@ typedef struct ArmGicV2VirtualInterfaceController {
u8 _0xf4[0x100 - 0xF4]; u8 _0xf4[0x100 - 0xF4];
ArmGicV2ListRegister lr[64]; ArmGicV2ListRegister lr[64];
} ArmGicV2VirtualInterfaceController; } ArmGicV2VirtualInterfaceController;
typedef struct ArmGicV2 {
volatile ArmGicV2Distributor *gicd;
volatile ArmGicV2Controller *gicc;
volatile ArmGicV2VirtualInterfaceController *gich;
volatile ArmGicV2Controller *gicv;
} ArmGicV2;

View file

@ -16,18 +16,16 @@
#include <string.h> #include <string.h>
#include "core_ctx.h" #include "core_ctx.h"
#include "platform/memory_map_mmu_cfg.h" #include "platform/stage2.h"
#include "platform/devices.h"
#include "sysreg.h" #include "sysreg.h"
#include "utils.h" #include "utils.h"
extern u8 __bss_start__[], __end__[], __temp_bss_start__[], __temp_bss_end__[]; // BSS includes real bss and tmp bss
extern const u32 __vectors_start__[]; extern u8 __bss_start__[], __real_bss_end__[], __bss_end__[];
static void initSysregs(void) static void initSysregs(void)
{ {
// Set VBAR
SET_SYSREG(vbar_el2, (uintptr_t)__vectors_start__);
// Set system to sane defaults, aarch64 for el1, mmu&caches initially disabled for EL1, etc. // Set system to sane defaults, aarch64 for el1, mmu&caches initially disabled for EL1, etc.
SET_SYSREG(hcr_el2, 0x80000000); SET_SYSREG(hcr_el2, 0x80000000);
SET_SYSREG(dacr32_el2, 0xFFFFFFFF); // unused SET_SYSREG(dacr32_el2, 0xFFFFFFFF); // unused
@ -54,12 +52,14 @@ void initSystem(u32 coreId, bool isBootCore, u64 argument)
if (isBootCore) { if (isBootCore) {
if (!currentCoreCtx->warmboot) { if (!currentCoreCtx->warmboot) {
memset(__bss_start__, 0, __end__ - __bss_start__); memset(__bss_start__, 0, __real_bss_end__ - __bss_start__);
} }
memset(__temp_bss_start__, 0, __temp_bss_end__ - __temp_bss_start__); memset(__real_bss_end__, 0, __bss_end__ - __real_bss_end__);
} }
configureMemoryMapEnableMmu(); stage2ConfigureAndEnable();
configureMemoryMapEnableStage2(); if (isBootCore) {
devicesMapAllExtra();
}
} }

View file

@ -29,26 +29,21 @@ static void initGic(void)
{ {
// Reinits the GICD and GICC (for non-secure mode, obviously) // Reinits the GICD and GICC (for non-secure mode, obviously)
if (currentCoreCtx->isBootCore && !currentCoreCtx->warmboot) { if (currentCoreCtx->isBootCore && !currentCoreCtx->warmboot) {
initGicV2Pointers(&g_irqManager.gic);
// Disable interrupt handling & global interrupt distribution // Disable interrupt handling & global interrupt distribution
g_irqManager.gic.gicd->ctlr = 0; gicd->ctlr = 0;
// Get some info // Get some info
g_irqManager.numSharedInterrupts = 32 * (g_irqManager.gic.gicd->typer & 0x1F); // number of interrupt lines / 32 g_irqManager.numSharedInterrupts = 32 * (gicd->typer & 0x1F); // number of interrupt lines / 32
// unimplemented priority bits (lowest significant) are RAZ/WI // unimplemented priority bits (lowest significant) are RAZ/WI
g_irqManager.gic.gicd->ipriorityr[0] = 0xFF; gicd->ipriorityr[0] = 0xFF;
g_irqManager.priorityShift = 8 - __builtin_popcount(g_irqManager.gic.gicd->ipriorityr[0]); g_irqManager.priorityShift = 8 - __builtin_popcount(gicd->ipriorityr[0]);
g_irqManager.numPriorityLevels = (u8)BIT(__builtin_popcount(g_irqManager.gic.gicd->ipriorityr[0])); g_irqManager.numPriorityLevels = (u8)BIT(__builtin_popcount(gicd->ipriorityr[0]));
g_irqManager.numCpuInterfaces = (u8)(1 + ((g_irqManager.gic.gicd->typer >> 5) & 7)); g_irqManager.numCpuInterfaces = (u8)(1 + ((gicd->typer >> 5) & 7));
g_irqManager.numListRegisters = (u8)(1 + (g_irqManager.gic.gich->vtr & 0x3F)); g_irqManager.numListRegisters = (u8)(1 + (gich->vtr & 0x3F));
} }
volatile ArmGicV2Controller *gicc = g_irqManager.gic.gicc;
volatile ArmGicV2Distributor *gicd = g_irqManager.gic.gicd;
// Only one core will reset the GIC state for the shared peripheral interrupts // Only one core will reset the GIC state for the shared peripheral interrupts
u32 numInterrupts = 32; u32 numInterrupts = 32;
@ -135,7 +130,6 @@ static inline bool checkGuestTimerInterrupts(ExceptionStackFrame *frame, u16 irq
static void doConfigureInterrupt(u16 id, u8 prio, bool isLevelSensitive) static void doConfigureInterrupt(u16 id, u8 prio, bool isLevelSensitive)
{ {
volatile ArmGicV2Distributor *gicd = g_irqManager.gic.gicd;
gicd->icenabler[id / 32] = BIT(id % 32); gicd->icenabler[id / 32] = BIT(id % 32);
if (id >= 32) { if (id >= 32) {
@ -177,7 +171,7 @@ void configureInterrupt(u16 id, u8 prio, bool isLevelSensitive)
void irqSetAffinity(u16 id, u8 affinity) void irqSetAffinity(u16 id, u8 affinity)
{ {
u64 flags = recursiveSpinlockLockMaskIrq(&g_irqManager.lock); u64 flags = recursiveSpinlockLockMaskIrq(&g_irqManager.lock);
g_irqManager.gic.gicd->itargetsr[id] = affinity; gicd->itargetsr[id] = affinity;
recursiveSpinlockUnlockRestoreIrq(&g_irqManager.lock, flags); recursiveSpinlockUnlockRestoreIrq(&g_irqManager.lock, flags);
} }
@ -206,7 +200,6 @@ void handleIrqException(ExceptionStackFrame *frame, bool isLowerEl, bool isA32)
{ {
(void)isLowerEl; (void)isLowerEl;
(void)isA32; (void)isA32;
volatile ArmGicV2Controller *gicc = g_irqManager.gic.gicc;
// Acknowledge the interrupt. Interrupt goes from pending to active. // Acknowledge the interrupt. Interrupt goes from pending to active.
u32 iar = gicc->iar; u32 iar = gicc->iar;
@ -277,7 +270,7 @@ void handleIrqException(ExceptionStackFrame *frame, bool isLowerEl, bool isA32)
// Bottom half part // Bottom half part
if (hasBottomHalf) { if (hasBottomHalf) {
exceptionEnterInterruptibleHypervisorCode(frame); exceptionEnterInterruptibleHypervisorCode();
unmaskIrq(); unmaskIrq();
if (transportIface != NULL) { if (transportIface != NULL) {
transportInterfaceIrqHandlerBottomHalf(transportIface); transportInterfaceIrqHandlerBottomHalf(transportIface);

View file

@ -21,6 +21,7 @@
#include "exceptions.h" #include "exceptions.h"
#include "utils.h" #include "utils.h"
#include "platform/interrupt_config.h" #include "platform/interrupt_config.h"
#include "memory_map.h"
#define IRQ_PRIORITY_HOST 0 #define IRQ_PRIORITY_HOST 0
#define IRQ_PRIORITY_GUEST 1 #define IRQ_PRIORITY_GUEST 1
@ -29,13 +30,11 @@
typedef struct IrqManager { typedef struct IrqManager {
RecursiveSpinlock lock; RecursiveSpinlock lock;
ArmGicV2 gic;
u16 numSharedInterrupts; u16 numSharedInterrupts;
u8 priorityShift; u8 priorityShift;
u8 numPriorityLevels; u8 numPriorityLevels;
u8 numCpuInterfaces; u8 numCpuInterfaces;
u8 numListRegisters; u8 numListRegisters;
// Note: we don't store interrupt handlers since we will handle some SGI + uart interrupt(s)...
} IrqManager; } IrqManager;
typedef enum ThermosphereSgi { typedef enum ThermosphereSgi {
@ -46,6 +45,10 @@ typedef enum ThermosphereSgi {
ThermosphereSgi_Max, ThermosphereSgi_Max,
} ThermosphereSgi; } ThermosphereSgi;
static volatile ArmGicV2Distributor *const gicd = (volatile ArmGicV2Distributor *)MEMORY_MAP_VA_GICD;
static volatile ArmGicV2Controller *const gicc = (volatile ArmGicV2Controller *)MEMORY_MAP_VA_GICC;
static volatile ArmGicV2VirtualInterfaceController *const gich = (volatile ArmGicV2VirtualInterfaceController *)MEMORY_MAP_VA_GICH;
extern IrqManager g_irqManager; extern IrqManager g_irqManager;
void initIrq(void); void initIrq(void);
@ -56,17 +59,17 @@ void handleIrqException(ExceptionStackFrame *frame, bool isLowerEl, bool isA32);
static inline void generateSgiForAllOthers(ThermosphereSgi id) static inline void generateSgiForAllOthers(ThermosphereSgi id)
{ {
g_irqManager.gic.gicd->sgir = (1 << 24) | ((u32)id & 0xF); gicd->sgir = (1 << 24) | ((u32)id & 0xF);
} }
static inline void generateSgiForSelf(ThermosphereSgi id) static inline void generateSgiForSelf(ThermosphereSgi id)
{ {
g_irqManager.gic.gicd->sgir = (2 << 24) | ((u32)id & 0xF); gicd->sgir = (2 << 24) | ((u32)id & 0xF);
} }
static inline void generateSgiForList(ThermosphereSgi id, u32 list) static inline void generateSgiForList(ThermosphereSgi id, u32 list)
{ {
g_irqManager.gic.gicd->sgir = (0 << 24) | (list << 16) | ((u32)id & 0xF); gicd->sgir = (0 << 24) | (list << 16) | ((u32)id & 0xF);
} }
static inline void generateSgiForAll(ThermosphereSgi id) static inline void generateSgiForAll(ThermosphereSgi id)

View file

@ -15,14 +15,26 @@
#include "irq.h" #include "irq.h"
#include "transport_interface.h" #include "transport_interface.h"
extern const u8 __start__[]; #include "memory_map.h"
#include "mmu.h"
static void loadKernelViaSemihosting(void) static void loadKernelViaSemihosting(void)
{ {
// Note: !! hardcoded addresses !!
size_t len = 1<<20; // max len size_t len = 1<<20; // max len
uintptr_t buf = (uintptr_t)__start__ + (1<<20); uintptr_t buf = 0x60000000 + (1<<20);
long handle = -1, ret; long handle = -1, ret;
u64 *mmuTable = (u64 *)MEMORY_MAP_VA_TTBL;
mmu_map_block_range(
1, mmuTable, 0x40000000, 0x40000000, 0x40000000,
MMU_PTE_BLOCK_XN | MMU_PTE_BLOCK_INNER_SHAREBLE | MMU_PTE_BLOCK_MEMTYPE(MEMORY_MAP_MEMTYPE_NORMAL_UNCACHEABLE)
);
__tlb_invalidate_el2();
__dsb();
DEBUG("Loading kernel via semihosted file I/O... "); DEBUG("Loading kernel via semihosted file I/O... ");
handle = semihosting_file_open("test_kernel.bin", FOPEN_MODE_RB); handle = semihosting_file_open("test_kernel.bin", FOPEN_MODE_RB);
if (handle < 0) { if (handle < 0) {
@ -35,6 +47,11 @@ static void loadKernelViaSemihosting(void)
DEBUG("OK!\n"); DEBUG("OK!\n");
semihosting_file_close(handle); semihosting_file_close(handle);
mmu_unmap_range(1, mmuTable, 0x40000000, 0x40000000);
__tlb_invalidate_el2();
__dsb();
currentCoreCtx->kernelEntrypoint = buf; currentCoreCtx->kernelEntrypoint = buf;
} }

View file

@ -0,0 +1,180 @@
/*
* Copyright (c) 2019 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "memory_map.h"
#include "mmu.h"
#include "sysreg.h"
#include "platform/interrupt_config.h"
#define ATTRIB_MEMTYPE_NORMAL MMU_PTE_BLOCK_MEMTYPE(MEMORY_MAP_MEMTYPE_NORMAL)
#define ATTRIB_MEMTYPE_DEVICE MMU_PTE_BLOCK_MEMTYPE(MEMORY_MAP_MEMTYPE_DEVICE_NGNRE)
static uintptr_t g_currentPlatformMmioPage = MEMORY_MAP_VA_MMIO_PLAT_BASE;
void memoryMapSetupMmu(const LoadImageLayout *layout, u64 *mmuTable)
{
static const u64 normalAttribs = MMU_PTE_BLOCK_INNER_SHAREBLE | ATTRIB_MEMTYPE_NORMAL;
static const u64 deviceAttribs = MMU_PTE_BLOCK_XN | MMU_PTE_BLOCK_INNER_SHAREBLE | ATTRIB_MEMTYPE_DEVICE;
// mmuTable is currently a PA
mmu_init_table(mmuTable, 0x200);
/*
Map the table into itself at the entry which index has all bits set.
This is called "recursive page tables" and means (assuming 39-bit addr space) that:
- the table will reuse itself as L2 table for the 0x7FC0000000+ range
- the table will reuse itself as L3 table for the 0x7FFFE00000+ range
- the table itself will be accessible at 0x7FFFFFF000
*/
mmuTable[0x1FF] = (uintptr_t)mmuTable | MMU_PTE_BLOCK_XN | MMU_PTE_BLOCK_AF | MMU_PTE_TYPE_TABLE;
/*
Layout in physmem:
Location1
Image (code and data incl. BSS)
Part of "temp" (tempbss, stacks) if there's enough space left
Location2
Remaining of "temp" (note: we don't and can't check if there's enough mem left!)
MMU table (taken from temp physmem)
Layout in vmem:
Location1
Image
tempbss
Location2
Crash stacks
{guard page, stack} * numCores
Location3 (all L1, L2, L3 bits set):
MMU table
*/
// Map our code & data (.text/other code, .rodata, .data, .bss) at the bottom of our L3 range, all RWX
// Note that BSS is page-aligned
// See LD script for more details
uintptr_t curVa = MEMORY_MAP_VA_IMAGE;
uintptr_t curPa = layout->startPa;
size_t tempInImageRegionMaxSize = layout->maxImageSize - layout->imageSize;
size_t tempInImageRegionSize;
size_t tempExtraSize;
if (layout->tempSize <= tempInImageRegionMaxSize) {
tempInImageRegionSize = layout->tempSize;
tempExtraSize = 0;
} else {
// We need extra data
tempInImageRegionSize = tempInImageRegionMaxSize;
tempExtraSize = layout->tempSize - tempInImageRegionSize;
}
size_t imageRegionMapSize = (layout->imageSize + tempInImageRegionSize + 0xFFF) & ~0xFFFul;
size_t tempExtraMapSize = (tempExtraSize + 0xFFF) & ~0xFFFul;
// Do not map the MMU table in that mapping:
mmu_map_page_range(mmuTable, curVa, curPa, imageRegionMapSize, normalAttribs);
curVa += imageRegionMapSize;
curPa = layout->tempPa;
mmu_map_page_range(mmuTable, curVa, curPa, tempExtraMapSize, normalAttribs);
curPa += tempExtraMapSize;
// Map the remaining temporary data as stacks, aligned 0x1000
// Crash stacks, total size is fixed:
curVa = MEMORY_MAP_VA_CRASH_STACKS_BOTTOM;
mmu_map_page_range(mmuTable, curVa, curPa, MEMORY_MAP_VA_CRASH_STACKS_SIZE, normalAttribs);
curPa += MEMORY_MAP_VA_CRASH_STACKS_SIZE;
// Regular stacks
size_t sizePerStack = 0x1000;
curVa = MEMORY_MAP_VA_STACKS_TOP - sizePerStack;
for (u32 i = 0; i < 4; i++) {
mmu_map_page_range(mmuTable, curVa, curPa, sizePerStack, normalAttribs);
curVa -= 2 * sizePerStack;
curPa += sizePerStack;
}
// MMIO
mmu_map_page(mmuTable, MEMORY_MAP_VA_GICD, MEMORY_MAP_PA_GICD, deviceAttribs);
mmu_map_page_range(mmuTable, MEMORY_MAP_VA_GICC, MEMORY_MAP_PA_GICC, 0x2000, deviceAttribs);
mmu_map_page(mmuTable, MEMORY_MAP_VA_GICH, MEMORY_MAP_PA_GICH, deviceAttribs);
}
void memoryMapEnableMmu(const LoadImageLayout *layout)
{
uintptr_t mmuTable = layout->tempPa + layout->maxTempSize;
u32 ps = GET_SYSREG(id_aa64mmfr0_el1) & 0xF;
/*
- PA size: from ID_AA64MMFR0_EL1
- Granule size: 4KB
- Shareability attribute for memory associated with translation table walks using TTBR0_EL2: Inner Shareable
- Outer cacheability attribute for memory associated with translation table walks using TTBR0_EL2: Normal memory, Outer Write-Back Read-Allocate Write-Allocate Cacheable
- Inner cacheability attribute for memory associated with translation table walks using TTBR0_EL2: Normal memory, Inner Write-Back Read-Allocate Write-Allocate Cacheable
- T0SZ = MEMORY_MAP_VA_SPACE_SIZE = 39
*/
u64 tcr = TCR_EL2_RSVD | TCR_PS(ps) | TCR_TG0_4K | TCR_SHARED_INNER | TCR_ORGN_WBWA | TCR_IRGN_WBWA | TCR_T0SZ(MEMORY_MAP_VA_SPACE_SIZE);
/*
- Attribute 0: Device-nGnRnE memory
- Attribute 1: Normal memory, Inner and Outer Write-Back Read-Allocate Write-Allocate Non-transient
- Attribute 2: Device-nGnRE memory
- Attribute 3: Normal memory, Inner and Outer Noncacheable
- Other attributes: Device-nGnRnE memory
*/
u64 mair = 0x44FF0400;
// Set VBAR because we *will* crash (instruction abort because of the value of pc) when enabling the MMU
SET_SYSREG(vbar_el2, layout->vbar);
// MMU regs config
SET_SYSREG(ttbr0_el2, mmuTable);
SET_SYSREG(tcr_el2, tcr);
SET_SYSREG(mair_el2, mair);
__dsb();
__isb();
// TLB invalidation
// Whether this does anything before MMU is enabled is impldef, apparently
__tlb_invalidate_el2_local();
__dsb();
__isb();
// Enable MMU & enable caching. We will crash.
u64 sctlr = GET_SYSREG(sctlr_el2);
sctlr |= SCTLR_ELx_I | SCTLR_ELx_C | SCTLR_ELx_M;
SET_SYSREG(sctlr_el2, sctlr);
__dsb();
__isb();
}
uintptr_t memoryMapGetStackTop(u32 coreId)
{
return MEMORY_MAP_VA_STACKS_TOP - 0x2000 * coreId;
}
uintptr_t memoryMapPlatformMmio(uintptr_t pa, size_t size)
{
uintptr_t va = g_currentPlatformMmioPage;
static const u64 deviceAttribs = MMU_PTE_BLOCK_XN | MMU_PTE_BLOCK_INNER_SHAREBLE | ATTRIB_MEMTYPE_DEVICE;
u64 *mmuTable = (u64 *)MEMORY_MAP_VA_TTBL;
size = (size + 0xFFF) & ~0xFFFul;
mmu_map_page_range(mmuTable, va, pa, size, deviceAttribs);
g_currentPlatformMmioPage += size;
return va;
}

View file

@ -0,0 +1,64 @@
/*
* Copyright (c) 2019 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "utils.h"
#define MEMORY_MAP_MEMTYPE_DEVICE_NGNRNE 0ul
#define MEMORY_MAP_MEMTYPE_NORMAL 1ul
#define MEMORY_MAP_MEMTYPE_DEVICE_NGNRE 2ul
#define MEMORY_MAP_MEMTYPE_NORMAL_UNCACHEABLE 3ul
#define MEMORY_MAP_VA_SPACE_SIZE 39ul
// The following few definitions depend on the value of MEMORY_MAP_VA_SPACE_SIZE:
#define MEMORY_MAP_SELF_L2_VA_RANGE 0x7FC0000000ul // = 511 << 31
#define MEMORY_MAP_SELF_L3_VA_RANGE 0x7FFFE00000ul // = 511 << 31 | 511 << 21
#define MEMORY_MAP_VA_TTBL 0x7FFFFFF000ul // = 511 << 31 | 511 << 21 | 511 << 12
#define MEMORY_MAP_VA_MAX 0x7FFFFFFFFFul // = all 39 bits set
#define MEMORY_MAP_VA_CRASH_STACKS_SIZE 0x1000ul
#define MEMORY_MAP_VA_IMAGE (MEMORY_MAP_SELF_L3_VA_RANGE + 0x10000)
#define MEMORY_MAP_VA_CRASH_STACKS_BOTTOM (MEMORY_MAP_SELF_L3_VA_RANGE + 0x40000)
#define MEMORY_MAP_VA_CRASH_STACKS_TOP (MEMORY_MAP_SELF_L3_VA_RANGE + 0x41000)
#define MEMORY_MAP_VA_GUEST_MEM (MEMORY_MAP_SELF_L3_VA_RANGE + 0x50000)
#define MEMORY_MAP_VA_STACKS_TOP (MEMORY_MAP_SELF_L3_VA_RANGE + 0x80000)
#define MEMORY_MAP_VA_MMIO_BASE MEMORY_MAP_VA_STACKS_TOP
#define MEMORY_MAP_VA_GICD MEMORY_MAP_VA_MMIO_BASE
#define MEMORY_MAP_VA_GICC (MEMORY_MAP_VA_MMIO_BASE + 0x1000)
#define MEMORY_MAP_VA_GICH (MEMORY_MAP_VA_MMIO_BASE + 0x3000)
#define MEMORY_MAP_VA_MMIO_PLAT_BASE (MEMORY_MAP_VA_MMIO_BASE + 0x90000)
typedef struct LoadImageLayout {
uintptr_t startPa;
size_t maxImageSize;
size_t imageSize; // "image" includes "real" BSS but not tempbss
uintptr_t tempPa;
size_t maxTempSize;
size_t tempSize;
uintptr_t vbar;
} LoadImageLayout;
extern LoadImageLayout g_loadImageLayout;
// Non-reentrant
uintptr_t memoryMapPlatformMmio(uintptr_t pa, size_t size);

View file

@ -140,10 +140,6 @@
#define VTCR_EL2_RSVD BIT(31) #define VTCR_EL2_RSVD BIT(31)
#define TCR_EL3_RSVD (BIT(31) | BIT(23)) #define TCR_EL3_RSVD (BIT(31) | BIT(23))
// We define those:
#define ATTRIB_MEMTYPE_NORMAL MMU_PTE_BLOCK_MEMTYPE(MMU_MT_NORMAL)
#define ATTRIB_MEMTYPE_DEVICE MMU_PTE_BLOCK_MEMTYPE(MMU_MT_DEVICE_NGNRE)
static inline void mmu_init_table(uintptr_t *tbl, size_t num_entries) { static inline void mmu_init_table(uintptr_t *tbl, size_t num_entries) {
for(size_t i = 0; i < num_entries; i++) { for(size_t i = 0; i < num_entries; i++) {
tbl[i] = MMU_PTE_TYPE_FAULT; tbl[i] = MMU_PTE_TYPE_FAULT;

View file

@ -0,0 +1,27 @@
/*
* Copyright (c) 2019 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#ifdef PLATFORM_TEGRA
#include "tegra/devices.h"
#elif defined(PLATFORM_QEMU)
#include "qemu/devices.h"
#endif

View file

@ -0,0 +1,31 @@
/*
* Copyright (c) 2019 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "devices.h"
#include "../../memory_map.h"
#include "../../utils.h"
#include "uart.h"
void devicesMapAllExtra(void)
{
uartSetRegisterBase(memoryMapPlatformMmio(MEMORY_MAP_PA_UART, 0x1000));
// Don't broadcast, since it's only ran once per boot by only one core, before the others are started...
__tlb_invalidate_el2_local();
__dsb();
__isb();
}

View file

@ -0,0 +1,19 @@
/*
* Copyright (c) 2019 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
void devicesMapAllExtra(void);

View file

@ -18,17 +18,17 @@
#include "../../gicv2.h" #include "../../gicv2.h"
// For both guest and host #define MEMORY_MAP_PA_GICD 0x08000000ull
#define MAX_NUM_REGISTERED_INTERRUPTS 512 #define MEMORY_MAP_PA_GICC 0x08010000ull
#define MEMORY_MAP_PA_GICH 0x08030000ull
#define MEMORY_MAP_PA_GICV 0x08040000ull
#define GIC_IRQID_PMU 23 #define GIC_IRQID_PMU 23
#define GIC_IRQID_MAINTENANCE 25 #define GIC_IRQID_MAINTENANCE 25
#define GIC_IRQID_NS_PHYS_HYP_TIMER 26 #define GIC_IRQID_NS_PHYS_HYP_TIMER 26
#define GIC_IRQID_NS_VIRT_TIMER 27 #define GIC_IRQID_NS_VIRT_TIMER 27
//#define GIC_IRQID_LEGACY_NFIQ 28 not defined?
#define GIC_IRQID_SEC_PHYS_TIMER 29 #define GIC_IRQID_SEC_PHYS_TIMER 29
#define GIC_IRQID_NS_PHYS_TIMER 30 #define GIC_IRQID_NS_PHYS_TIMER 30
//#define GIC_IRQID_LEGACY_NIRQ 31 not defined?
#define GIC_IRQID_NS_VIRT_HYP_TIMER GIC_IRQID_SPURIOUS // SBSA: 28. Unimplemented #define GIC_IRQID_NS_VIRT_HYP_TIMER GIC_IRQID_SPURIOUS // SBSA: 28. Unimplemented
@ -36,11 +36,3 @@
#define GIC_IRQID_SEC_VIRT_HYP_TIMER GIC_IRQID_SPURIOUS // SBSA: 19. Unimplemented #define GIC_IRQID_SEC_VIRT_HYP_TIMER GIC_IRQID_SPURIOUS // SBSA: 19. Unimplemented
#define GIC_IRQID_UART (32 + 1) #define GIC_IRQID_UART (32 + 1)
static inline void initGicV2Pointers(ArmGicV2 *gic)
{
gic->gicd = (volatile ArmGicV2Distributor *)0x08000000ull;
gic->gicc = (volatile ArmGicV2Controller *)0x08010000ull;
gic->gich = (volatile ArmGicV2VirtualInterfaceController *)0x08030000ull;
gic->gicv = (volatile ArmGicV2Controller *)0x08040000ull;
}

View file

@ -14,7 +14,9 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#include "memory_map.h" #include "stage2_config.h"
#include "interrupt_config.h"
#include "../../memory_map.h"
#include "../../utils.h" #include "../../utils.h"
#include "../../mmu.h" #include "../../mmu.h"
#include "../../core_ctx.h" #include "../../core_ctx.h"
@ -23,11 +25,10 @@
#define ADDRSPACESZ 39 #define ADDRSPACESZ 39
#define ADDRSPACESZ2 ADDRSPACESZ #define ADDRSPACESZ2 ADDRSPACESZ
static ALIGN(0x1000) u64 g_ttbl[BIT(ADDRSPACESZ - 30)] = {0}; static TEMPORARY ALIGN(0x1000) u64 g_vttbl[BIT(ADDRSPACESZ2 - 30)] = {0};
static ALIGN(0x1000) u64 g_vttbl[BIT(ADDRSPACESZ2 - 30)] = {0};
static TEMPORARY ALIGN(0x1000) u64 g_vttbl_l2_mmio_0_0[512] = {0}; static TEMPORARY ALIGN(0x1000) u64 g_vttbl_l2_mmio_0_0[512] = {0};
static TEMPORARY ALIGN(0x1000) u64 g_vttbl_l3_0[512] = {0}; static TEMPORARY ALIGN(0x1000) u64 g_vttbl_l3_0[512] = {0};
static TEMPORARY uintptr_t g_vttblPaddr;
static inline void identityMapL1(u64 *tbl, uintptr_t addr, size_t size, u64 attribs) static inline void identityMapL1(u64 *tbl, uintptr_t addr, size_t size, u64 attribs)
{ {
@ -44,39 +45,31 @@ static inline void identityMapL3(u64 *tbl, uintptr_t addr, size_t size, u64 attr
mmu_map_block_range(3, tbl, addr, addr, size, attribs | MMU_PTE_BLOCK_INNER_SHAREBLE); mmu_map_block_range(3, tbl, addr, addr, size, attribs | MMU_PTE_BLOCK_INNER_SHAREBLE);
} }
uintptr_t configureMemoryMap(u32 *addrSpaceSize) uintptr_t stage2Configure(u32 *addrSpaceSize)
{
// QEMU virt RAM address space starts at 0x40000000
*addrSpaceSize = ADDRSPACESZ;
if (currentCoreCtx->isBootCore && !currentCoreCtx->warmboot) {
identityMapL1(g_ttbl, 0x00000000ull, BITL(30), ATTRIB_MEMTYPE_DEVICE);
identityMapL1(g_ttbl, 0x40000000ull, (BITL(ADDRSPACESZ - 30) - 1ull) << 30, ATTRIB_MEMTYPE_NORMAL);
}
return (uintptr_t)g_ttbl;
}
uintptr_t configureStage2MemoryMap(u32 *addrSpaceSize)
{ {
*addrSpaceSize = ADDRSPACESZ2; *addrSpaceSize = ADDRSPACESZ2;
static const u64 devattrs = MMU_S2AP_RW | MMU_MEMATTR_DEVICE_NGNRE; static const u64 devattrs = 0 | MMU_S2AP_RW | MMU_MEMATTR_DEVICE_NGNRE;
static const u64 unchanged = MMU_S2AP_RW | MMU_MEMATTR_NORMAL_CACHEABLE_OR_UNCHANGED; static const u64 unchanged = MMU_S2AP_RW | MMU_MEMATTR_NORMAL_CACHEABLE_OR_UNCHANGED;
if (currentCoreCtx->isBootCore) { if (currentCoreCtx->isBootCore) {
g_vttblPaddr = va2pa(g_vttbl);
uintptr_t *l2pa = (uintptr_t *)va2pa(g_vttbl_l2_mmio_0_0);
uintptr_t *l3pa = (uintptr_t *)va2pa(g_vttbl_l3_0);
identityMapL1(g_vttbl, 0, 4ull << 30, unchanged); identityMapL1(g_vttbl, 0, 4ull << 30, unchanged);
identityMapL1(g_vttbl, 0x40000000ull, (BITL(ADDRSPACESZ2 - 30) - 1ull) << 30, unchanged); identityMapL1(g_vttbl, 0x40000000ull, (BITL(ADDRSPACESZ2 - 30) - 1ull) << 30, unchanged);
mmu_map_table(1, g_vttbl, 0x00000000ull, g_vttbl_l2_mmio_0_0, 0); mmu_map_table(1, g_vttbl, 0x00000000ull, l2pa, 0);
identityMapL2(g_vttbl_l2_mmio_0_0, 0x08000000ull, BITL(30), unchanged); identityMapL2(g_vttbl_l2_mmio_0_0, 0x08000000ull, BITL(30), unchanged);
mmu_map_table(2, g_vttbl_l2_mmio_0_0, 0x08000000ull, g_vttbl_l3_0, 0); mmu_map_table(2, g_vttbl_l2_mmio_0_0, 0x08000000ull, l3pa, 0);
identityMapL3(g_vttbl_l3_0, 0x08000000ull, BITL(21), unchanged); identityMapL3(g_vttbl_l3_0, 0x08000000ull, BITL(21), unchanged);
// GICD -> trapped, GICv2 CPU -> vCPU interface, GICH -> trapped (deny access) // GICD -> trapped, GICv2 CPU -> vCPU interface, GICH -> trapped (deny access)
mmu_unmap_range(3, g_vttbl_l3_0, 0x08000000ull, 0x10000ull); mmu_unmap_range(3, g_vttbl_l3_0, MEMORY_MAP_PA_GICD, 0x10000ull);
mmu_unmap_range(3, g_vttbl_l3_0, 0x08030000ull, 0x10000ull); mmu_unmap_range(3, g_vttbl_l3_0, MEMORY_MAP_PA_GICH, 0x10000ull);
mmu_map_page_range(g_vttbl_l3_0, 0x08010000ull, 0x08040000ull, 0x10000ull, devattrs); mmu_map_page_range(g_vttbl_l3_0, MEMORY_MAP_PA_GICC, MEMORY_MAP_PA_GICV, 0x10000ull, devattrs);
} }
return (uintptr_t)g_vttbl; return g_vttblPaddr;
} }

View file

@ -18,5 +18,4 @@
#include "../../types.h" #include "../../types.h"
uintptr_t configureMemoryMap(u32 *addrSpaceSize); uintptr_t stage2Configure(u32 *addrSpaceSize);
uintptr_t configureStage2MemoryMap(u32 *addrSpaceSize);

View file

@ -28,16 +28,23 @@
//115200 //115200
static uintptr_t g_uartRegBase;
static inline volatile PL011UartRegisters *uartGetRegisters(UartDevice dev) static inline volatile PL011UartRegisters *uartGetRegisters(UartDevice dev)
{ {
switch (dev) { switch (dev) {
case UART_A: case UART_A:
return (volatile PL011UartRegisters *)0x09000000; return (volatile PL011UartRegisters *)g_uartRegBase;
default: default:
return NULL; return NULL;
} }
} }
void uartSetRegisterBase(uintptr_t regBase)
{
g_uartRegBase = regBase;
}
void uartInit(UartDevice dev, u32 baudRate, u32 flags) void uartInit(UartDevice dev, u32 baudRate, u32 flags)
{ {
/* The TRM (DDI0183) reads: /* The TRM (DDI0183) reads:

View file

@ -19,6 +19,8 @@
#include "../../utils.h" #include "../../utils.h"
#include "interrupt_config.h" #include "interrupt_config.h"
#define MEMORY_MAP_PA_UART 0x09000000ull
// AMBA PL011 driver // AMBA PL011 driver
// Originally from // Originally from
/* /*
@ -56,10 +58,10 @@ typedef struct PL011UartRegisters {
} PL011UartRegisters; } PL011UartRegisters;
// Data status bits // Data status bits
#define UART_DATA_ERROR_MASK 0x0F00 #define PL011_DATA_ERROR_MASK 0x0F00
// Status reg bits // Status reg bits
#define UART_STATUS_ERROR_MASK 0x0F #define PL011_STATUS_ERROR_MASK 0x0F
// Errors // Errors
#define PL011_OE BIT(3) // Overrun error #define PL011_OE BIT(3) // Overrun error
@ -128,6 +130,7 @@ typedef struct PL011UartRegisters {
#define UART_CLK_IN_HZ 1 #define UART_CLK_IN_HZ 1
void uartSetRegisterBase(uintptr_t regBase);
void uartInit(UartDevice dev, u32 baudRate, u32 flags); void uartInit(UartDevice dev, u32 baudRate, u32 flags);
void uartWriteData(UartDevice dev, const void *buffer, size_t size); void uartWriteData(UartDevice dev, const void *buffer, size_t size);
void uartReadData(UartDevice dev, void *buffer, size_t size); void uartReadData(UartDevice dev, void *buffer, size_t size);

View file

@ -17,56 +17,12 @@
#include "../utils.h" #include "../utils.h"
#include "../sysreg.h" #include "../sysreg.h"
#include "../mmu.h" #include "../mmu.h"
#include "memory_map_mmu_cfg.h" #include "stage2.h"
void configureMemoryMapEnableMmu(void) void stage2ConfigureAndEnable(void)
{ {
u32 addrSpaceSize; u32 addrSpaceSize;
uintptr_t ttbr0 = configureMemoryMap(&addrSpaceSize); uintptr_t vttbr = stage2Configure(&addrSpaceSize);
u32 ps = GET_SYSREG(id_aa64mmfr0_el1) & 0xF;
/*
- PA size: from ID_AA64MMFR0_EL1
- Granule size: 4KB
- Shareability attribute for memory associated with translation table walks using TTBR0_EL2: Inner Shareable
- Outer cacheability attribute for memory associated with translation table walks using TTBR0_EL2: Normal memory, Outer Write-Back Read-Allocate Write-Allocate Cacheable
- Inner cacheability attribute for memory associated with translation table walks using TTBR0_EL2: Normal memory, Inner Write-Back Read-Allocate Write-Allocate Cacheable
- T0SZ = from configureMemoryMap
*/
u64 tcr = TCR_EL2_RSVD | TCR_PS(ps) | TCR_TG0_4K | TCR_SHARED_INNER | TCR_ORGN_WBWA | TCR_IRGN_WBWA | TCR_T0SZ(addrSpaceSize);
/*
- Attribute 0: Normal memory, Inner and Outer Write-Back Read-Allocate Write-Allocate Non-transient
- Attribute 1: Device-nGnRE memory
- Other attributes: Device-nGnRnE memory
*/
u64 mair = 0x4FFull;
// MMU regs config
SET_SYSREG(ttbr0_el2, ttbr0);
SET_SYSREG(tcr_el2, tcr);
SET_SYSREG(mair_el2, mair);
__dsb();
__isb();
// TLB invalidation
__tlb_invalidate_el2();
__dsb();
__isb();
// Enable MMU & enable caching
u64 sctlr = GET_SYSREG(sctlr_el2);
sctlr |= SCTLR_ELx_I | SCTLR_ELx_C | SCTLR_ELx_M;
SET_SYSREG(sctlr_el2, sctlr);
__dsb();
__isb();
}
void configureMemoryMapEnableStage2(void)
{
u32 addrSpaceSize;
uintptr_t vttbr = configureStage2MemoryMap(&addrSpaceSize);
u32 ps = GET_SYSREG(id_aa64mmfr0_el1) & 0xF; u32 ps = GET_SYSREG(id_aa64mmfr0_el1) & 0xF;
/* /*
@ -86,15 +42,15 @@ void configureMemoryMapEnableStage2(void)
__dsb(); __dsb();
__isb(); __isb();
// TLB invalidation
__tlb_invalidate_el1_stage12();
__dsb();
__isb();
// Enable stage 2 // Enable stage 2
u64 hcr = GET_SYSREG(hcr_el2); u64 hcr = GET_SYSREG(hcr_el2);
hcr |= HCR_VM; hcr |= HCR_VM;
SET_SYSREG(hcr_el2, hcr); SET_SYSREG(hcr_el2, hcr);
__dsb(); __dsb();
__isb(); __isb();
// TLB invalidation
__tlb_invalidate_el1_stage12_local();
__dsb();
__isb();
} }

View file

@ -18,13 +18,12 @@
#ifdef PLATFORM_TEGRA #ifdef PLATFORM_TEGRA
#include "tegra/memory_map.h" #include "tegra/stage2_config.h"
#elif defined(PLATFORM_QEMU) #elif defined(PLATFORM_QEMU)
#include "qemu/memory_map.h" #include "qemu/stage2_config.h"
#endif #endif
void configureMemoryMapEnableMmu(void); void stage2ConfigureAndEnable(void);
void configureMemoryMapEnableStage2(void);

View file

@ -15,9 +15,20 @@
*/ */
#include "car.h" #include "car.h"
#include "timers.h"
#include "../../utils.h" #include "../../utils.h"
static uintptr_t g_carRegs;
static inline volatile tegra_car_t *car_get_regs(void)
{
return (volatile tegra_car_t *)g_carRegs;
}
static inline volatile uint32_t *car_reg_at(uint32_t offset)
{
return (volatile uint32_t *)(g_carRegs + offset);
}
static inline uint32_t get_clk_source_reg(CarDevice dev) { static inline uint32_t get_clk_source_reg(CarDevice dev) {
switch (dev) { switch (dev) {
case CARDEVICE_UARTA: return 0x178; case CARDEVICE_UARTA: return 0x178;
@ -93,24 +104,29 @@ static inline uint32_t get_clk_source_div(CarDevice dev) {
static uint32_t g_clk_reg_offsets[NUM_CAR_BANKS] = {0x010, 0x014, 0x018, 0x360, 0x364, 0x280, 0x298}; static uint32_t g_clk_reg_offsets[NUM_CAR_BANKS] = {0x010, 0x014, 0x018, 0x360, 0x364, 0x280, 0x298};
static uint32_t g_rst_reg_offsets[NUM_CAR_BANKS] = {0x004, 0x008, 0x00C, 0x358, 0x35C, 0x28C, 0x2A4}; static uint32_t g_rst_reg_offsets[NUM_CAR_BANKS] = {0x004, 0x008, 0x00C, 0x358, 0x35C, 0x28C, 0x2A4};
void car_set_regs(uintptr_t regs)
{
g_carRegs = regs;
}
void clk_enable(CarDevice dev) { void clk_enable(CarDevice dev) {
uint32_t clk_source_reg; uint32_t clk_source_reg;
if ((clk_source_reg = get_clk_source_reg(dev))) { if ((clk_source_reg = get_clk_source_reg(dev))) {
MAKE_CAR_REG(clk_source_reg) = (get_clk_source_val(dev) << 29) | get_clk_source_div(dev); *car_reg_at(clk_source_reg) = (get_clk_source_val(dev) << 29) | get_clk_source_div(dev);
} }
MAKE_CAR_REG(g_clk_reg_offsets[dev >> 5]) |= BIT(dev & 0x1F); *car_reg_at(g_clk_reg_offsets[dev >> 5]) |= BIT(dev & 0x1F);
} }
void clk_disable(CarDevice dev) { void clk_disable(CarDevice dev) {
MAKE_CAR_REG(g_clk_reg_offsets[dev >> 5]) &= ~(BIT(dev & 0x1F)); *car_reg_at(g_clk_reg_offsets[dev >> 5]) &= ~(BIT(dev & 0x1F));
} }
void rst_enable(CarDevice dev) { void rst_enable(CarDevice dev) {
MAKE_CAR_REG(g_rst_reg_offsets[dev >> 5]) |= BIT(dev & 0x1F); *car_reg_at(g_rst_reg_offsets[dev >> 5]) |= BIT(dev & 0x1F);
} }
void rst_disable(CarDevice dev) { void rst_disable(CarDevice dev) {
MAKE_CAR_REG(g_rst_reg_offsets[dev >> 5]) &= ~(BIT(dev & 0x1F)); *car_reg_at(g_rst_reg_offsets[dev >> 5]) &= ~(BIT(dev & 0x1F));
} }
void clkrst_enable(CarDevice dev) { void clkrst_enable(CarDevice dev) {
@ -127,10 +143,10 @@ void clkrst_reboot(CarDevice dev) {
clkrst_disable(dev); clkrst_disable(dev);
if (dev == CARDEVICE_KFUSE) { if (dev == CARDEVICE_KFUSE) {
/* Workaround for KFUSE clock. */ /* Workaround for KFUSE clock. */
clk_enable(dev); /*clk_enable(dev);
udelay(100); udelay(100);
rst_disable(dev); rst_disable(dev);
udelay(200); udelay(200);*/
} else { } else {
clkrst_enable(dev); clkrst_enable(dev);
} }

View file

@ -17,8 +17,7 @@
#pragma once #pragma once
#include "../../utils.h" #include "../../utils.h"
#define CAR_BASE 0x60006000 #define MEMORY_MAP_PA_CAR 0x60006000ul
#define MAKE_CAR_REG(n) MAKE_REG32(CAR_BASE + n)
#define CLK_L_SDMMC1 (1 << 14) #define CLK_L_SDMMC1 (1 << 14)
#define CLK_L_SDMMC2 (1 << 9) #define CLK_L_SDMMC2 (1 << 9)
@ -485,9 +484,7 @@ typedef struct {
uint32_t sdmmc4_div_clk_shaper_ctrl; /* _SDMMC4_DIV_CLK_SHAPER_CTRL_0, 0x744 */ uint32_t sdmmc4_div_clk_shaper_ctrl; /* _SDMMC4_DIV_CLK_SHAPER_CTRL_0, 0x744 */
} tegra_car_t; } tegra_car_t;
static inline volatile tegra_car_t *car_get_regs(void) { void car_set_regs(uintptr_t regs);
return (volatile tegra_car_t *)CAR_BASE;
}
void clk_enable(CarDevice dev); void clk_enable(CarDevice dev);
void clk_disable(CarDevice dev); void clk_disable(CarDevice dev);

View file

@ -0,0 +1,38 @@
/*
* Copyright (c) 2019 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "devices.h"
#include "../../memory_map.h"
#include "../../utils.h"
#include "uart.h"
#include "car.h"
#include "gpio.h"
#include "pinmux.h"
void devicesMapAllExtra(void)
{
uartSetRegisterBase(memoryMapPlatformMmio(MEMORY_MAP_PA_UART, 0x1000));
car_set_regs(memoryMapPlatformMmio(MEMORY_MAP_PA_CAR, 0x1000));
gpio_set_regs(memoryMapPlatformMmio(MEMORY_MAP_PA_GPIO, 0x1000));
pinmux_set_regs(memoryMapPlatformMmio(MEMORY_MAP_PA_PINMUX, 0x1000));
// Don't broadcast, since it's only ran once per boot by only one core, before the others are started...
__tlb_invalidate_el2_local();
__dsb();
__isb();
}

View file

@ -0,0 +1,19 @@
/*
* Copyright (c) 2019 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
void devicesMapAllExtra(void);

View file

@ -19,6 +19,13 @@
#include "gpio.h" #include "gpio.h"
#include "../../utils.h" #include "../../utils.h"
static uintptr_t g_gpioRegs;
static inline volatile tegra_gpio_t *gpio_get_regs(void)
{
return (volatile tegra_gpio_t *)g_gpioRegs;
}
static volatile tegra_gpio_bank_t *gpio_get_bank(uint32_t pin) { static volatile tegra_gpio_bank_t *gpio_get_bank(uint32_t pin) {
volatile tegra_gpio_t *gpio = gpio_get_regs(); volatile tegra_gpio_t *gpio = gpio_get_regs();
uint32_t bank_number = (pin >> GPIO_BANK_SHIFT); uint32_t bank_number = (pin >> GPIO_BANK_SHIFT);
@ -67,6 +74,11 @@ static bool gpio_simple_register_get(uint32_t pin, uint32_t offset) {
return !!(cluster[port] & mask); return !!(cluster[port] & mask);
} }
void gpio_set_regs(uintptr_t regs)
{
g_gpioRegs = regs;
}
void gpio_configure_mode(uint32_t pin, uint32_t mode) { void gpio_configure_mode(uint32_t pin, uint32_t mode) {
gpio_simple_register_set(pin, mode == GPIO_MODE_GPIO, offsetof(tegra_gpio_bank_t, config)); gpio_simple_register_set(pin, mode == GPIO_MODE_GPIO, offsetof(tegra_gpio_bank_t, config));
} }

View file

@ -17,8 +17,7 @@
#pragma once #pragma once
#include <stdint.h> #include <stdint.h>
#define GPIO_BASE 0x6000D000 #define MEMORY_MAP_PA_GPIO 0x6000D000ul
#define MAKE_GPIO_REG(n) MAKE_REG32(GPIO_BASE + n)
#define TEGRA_GPIO_PORTS 4 #define TEGRA_GPIO_PORTS 4
#define TEGRA_GPIO_BANKS 8 #define TEGRA_GPIO_BANKS 8
@ -85,11 +84,6 @@ typedef struct {
tegra_gpio_bank_t bank[TEGRA_GPIO_BANKS]; tegra_gpio_bank_t bank[TEGRA_GPIO_BANKS];
} tegra_gpio_t; } tegra_gpio_t;
static inline volatile tegra_gpio_t *gpio_get_regs(void)
{
return (volatile tegra_gpio_t *)GPIO_BASE;
}
#define TEGRA_GPIO(port, offset) \ #define TEGRA_GPIO(port, offset) \
((TEGRA_GPIO_PORT_##port * 8) + offset) ((TEGRA_GPIO_PORT_##port * 8) + offset)
@ -117,6 +111,7 @@ static inline volatile tegra_gpio_t *gpio_get_regs(void)
#define GPIO_LCD_BL_EN TEGRA_GPIO(V, 1) #define GPIO_LCD_BL_EN TEGRA_GPIO(V, 1)
#define GPIO_LCD_BL_RST TEGRA_GPIO(V, 2) #define GPIO_LCD_BL_RST TEGRA_GPIO(V, 2)
void gpio_set_regs(uintptr_t regs);
void gpio_configure_mode(uint32_t pin, uint32_t mode); void gpio_configure_mode(uint32_t pin, uint32_t mode);
void gpio_configure_direction(uint32_t pin, uint32_t dir); void gpio_configure_direction(uint32_t pin, uint32_t dir);
void gpio_write(uint32_t pin, uint32_t value); void gpio_write(uint32_t pin, uint32_t value);

View file

@ -18,8 +18,10 @@
#include "../../gicv2.h" #include "../../gicv2.h"
// For both guest and host #define MEMORY_MAP_PA_GICD 0x50041000ull
#define MAX_NUM_REGISTERED_INTERRUPTS 512 #define MEMORY_MAP_PA_GICC 0x50042000ull
#define MEMORY_MAP_PA_GICH 0x50044000ull
#define MEMORY_MAP_PA_GICV 0x50046000ull
#define GIC_IRQID_MAINTENANCE 25 #define GIC_IRQID_MAINTENANCE 25
#define GIC_IRQID_NS_PHYS_HYP_TIMER 26 #define GIC_IRQID_NS_PHYS_HYP_TIMER 26
@ -37,11 +39,3 @@
#define GIC_IRQID_UARTB (32 + 37) #define GIC_IRQID_UARTB (32 + 37)
#define GIC_IRQID_UARTC (32 + 46) #define GIC_IRQID_UARTC (32 + 46)
#define GIC_IRQID_UARTD (32 + 90) #define GIC_IRQID_UARTD (32 + 90)
static inline void initGicV2Pointers(ArmGicV2 *gic)
{
gic->gicd = (volatile ArmGicV2Distributor *)0x50041000ull;
gic->gicc = (volatile ArmGicV2Controller *)0x50042000ull;
gic->gich = (volatile ArmGicV2VirtualInterfaceController *)0x50044000ull;
gic->gicv = (volatile ArmGicV2Controller *)0x50046000ull;
}

View file

@ -1,36 +0,0 @@
/*
* Copyright (c) 2018-2019 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "../../utils.h"
#define MISC_BASE 0x70000000ull
#define MAKE_MISC_REG(n) MAKE_REG32(MISC_BASE + n)
#define APB_MISC_SECURE_REGS_APB_SLAVE_SECURITY_ENABLE_REG0_0 MAKE_MISC_REG(0x0C00)
#define APB_MISC_SECURE_REGS_APB_SLAVE_SECURITY_ENABLE_REG1_0 MAKE_MISC_REG(0x0C04)
#define APB_MISC_SECURE_REGS_APB_SLAVE_SECURITY_ENABLE_REG2_0 MAKE_MISC_REG(0x0C08)
#define PINMUX_AUX_GEN1_I2C_SCL_0 MAKE_MISC_REG(0x30BC)
#define PINMUX_AUX_GEN1_I2C_SDA_0 MAKE_MISC_REG(0x30C0)
#define PINMUX_AUX_UARTn_TX_0(n) MAKE_MISC_REG(0x30E4 + 0x10 * (n))
#define PINMUX_AUX_UARTn_RX_0(n) MAKE_MISC_REG(0x30E8 + 0x10 * (n))
#define PINMUX_AUX_UARTn_RTS_0(n) MAKE_MISC_REG(0x30EC + 0x10 * (n))
#define PINMUX_AUX_UARTn_CTS_0(n) MAKE_MISC_REG(0x30F0 + 0x10 * (n))

View file

@ -0,0 +1,19 @@
/*
* Copyright (c) 2018-2019 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "pinmux.h"
uintptr_t g_pinmuxRegs;

View file

@ -18,8 +18,7 @@
#include <stdint.h> #include <stdint.h>
#define PINMUX_BASE 0x70003000 #define MEMORY_MAP_PA_PINMUX 0x70003000ul
#define MAKE_PINMUX_REG(n) MAKE_REG32(PINMUX_BASE + n)
#define PINMUX_TRISTATE (1 << 4) #define PINMUX_TRISTATE (1 << 4)
#define PINMUX_PARKED (1 << 5) #define PINMUX_PARKED (1 << 5)
@ -204,7 +203,14 @@ typedef struct {
uint32_t pz5; uint32_t pz5;
} tegra_pinmux_t; } tegra_pinmux_t;
extern uintptr_t g_pinmuxRegs;
static inline void pinmux_set_regs(uintptr_t regs)
{
g_pinmuxRegs = regs;
}
static inline volatile tegra_pinmux_t *pinmux_get_regs(void) static inline volatile tegra_pinmux_t *pinmux_get_regs(void)
{ {
return (volatile tegra_pinmux_t *)PINMUX_BASE; return (volatile tegra_pinmux_t *)g_pinmuxRegs;
} }

View file

@ -14,21 +14,19 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#include "memory_map.h" #include "stage2_config.h"
#include "interrupt_config.h"
#include "../../utils.h" #include "../../utils.h"
#include "../../mmu.h" #include "../../mmu.h"
#include "../../core_ctx.h" #include "../../core_ctx.h"
// Tegra PA size is 36-bit... should we limit ourselves to 34?
// i.e. 14GB of dram max
#define ADDRSPACESZ 36 #define ADDRSPACESZ 36
#define ADDRSPACESZ2 ADDRSPACESZ #define ADDRSPACESZ2 ADDRSPACESZ
static ALIGN(0x1000) u64 g_ttbl[BIT(ADDRSPACESZ - 30)] = {0}; static TEMPORARY ALIGN(0x1000) u64 g_vttbl[BIT(ADDRSPACESZ2 - 30)] = {0};
static ALIGN(0x1000) u64 g_vttbl[BIT(ADDRSPACESZ2 - 30)] = {0};
static TEMPORARY ALIGN(0x1000) u64 g_vttbl_l2_mmio_0[512] = {0}; static TEMPORARY ALIGN(0x1000) u64 g_vttbl_l2_mmio_0[512] = {0};
static TEMPORARY ALIGN(0x1000) u64 g_vttbl_l3_0[512] = {0}; static TEMPORARY ALIGN(0x1000) u64 g_vttbl_l3_0[512] = {0};
static TEMPORARY uintptr_t g_vttblPaddr;
static inline void identityMapL1(u64 *tbl, uintptr_t addr, size_t size, u64 attribs) static inline void identityMapL1(u64 *tbl, uintptr_t addr, size_t size, u64 attribs)
{ {
@ -45,37 +43,30 @@ static inline void identityMapL3(u64 *tbl, uintptr_t addr, size_t size, u64 attr
mmu_map_block_range(3, tbl, addr, addr, size, attribs | MMU_PTE_BLOCK_INNER_SHAREBLE); mmu_map_block_range(3, tbl, addr, addr, size, attribs | MMU_PTE_BLOCK_INNER_SHAREBLE);
} }
uintptr_t configureMemoryMap(u32 *addrSpaceSize) uintptr_t stage2Configure(u32 *addrSpaceSize)
{
*addrSpaceSize = ADDRSPACESZ;
if (currentCoreCtx->isBootCore && !currentCoreCtx->warmboot) {
identityMapL1(g_ttbl, 0x00000000ull, 2 * BITL(30), ATTRIB_MEMTYPE_DEVICE);
identityMapL1(g_ttbl, 0x80000000ull, (BITL(ADDRSPACESZ - 30) - 2ull) << 30, ATTRIB_MEMTYPE_NORMAL);
}
return (uintptr_t)g_ttbl;
}
uintptr_t configureStage2MemoryMap(u32 *addrSpaceSize)
{ {
*addrSpaceSize = ADDRSPACESZ2; *addrSpaceSize = ADDRSPACESZ2;
static const u64 devattrs = MMU_S2AP_RW | MMU_MEMATTR_DEVICE_NGNRE; static const u64 devattrs = MMU_PTE_BLOCK_XN | MMU_S2AP_RW | MMU_MEMATTR_DEVICE_NGNRE;
static const u64 unchanged = MMU_S2AP_RW | MMU_MEMATTR_NORMAL_CACHEABLE_OR_UNCHANGED; static const u64 unchanged = MMU_S2AP_RW | MMU_MEMATTR_NORMAL_CACHEABLE_OR_UNCHANGED;
if (currentCoreCtx->isBootCore) { if (currentCoreCtx->isBootCore) {
g_vttblPaddr = va2pa(g_vttbl);
uintptr_t *l2pa = (uintptr_t *)va2pa(g_vttbl_l2_mmio_0);
uintptr_t *l3pa = (uintptr_t *)va2pa(g_vttbl_l3_0);
identityMapL1(g_vttbl, 0x00000000ull, BITL(30), unchanged); identityMapL1(g_vttbl, 0x00000000ull, BITL(30), unchanged);
identityMapL1(g_vttbl, 0x80000000ull, (BITL(ADDRSPACESZ2 - 30) - 2ull) << 30, unchanged); identityMapL1(g_vttbl, 0x80000000ull, (BITL(ADDRSPACESZ2 - 30) - 2ull) << 30, unchanged);
mmu_map_table(1, g_vttbl, 0x40000000ull, g_vttbl_l2_mmio_0, 0); mmu_map_table(1, g_vttbl, 0x40000000ull, l2pa, 0);
identityMapL2(g_vttbl_l2_mmio_0, 0x40000000ull, BITL(30), unchanged); identityMapL2(g_vttbl_l2_mmio_0, 0x40000000ull, BITL(30), unchanged);
mmu_map_table(2, g_vttbl_l2_mmio_0, 0x50000000ull, g_vttbl_l3_0, 0); mmu_map_table(2, g_vttbl_l2_mmio_0, 0x50000000ull, l3pa, 0);
identityMapL3(g_vttbl_l3_0, 0x00000000ull, BITL(21), unchanged); identityMapL3(g_vttbl_l3_0, 0x00000000ull, BITL(21), unchanged);
// GICD -> trapped, GICv2 CPU -> vCPU interface, GICH -> trapped (access denied including for the unused view) // GICD -> trapped, GICv2 CPU -> vCPU interface, GICH -> trapped (access denied including for the unused view)
mmu_unmap_page(g_vttbl_l3_0, 0x50401000ull); mmu_unmap_page(g_vttbl_l3_0, MEMORY_MAP_PA_GICD);
mmu_unmap_page(g_vttbl_l3_0, 0x50404000ull); mmu_unmap_page(g_vttbl_l3_0, MEMORY_MAP_PA_GICH);
mmu_unmap_page(g_vttbl_l3_0, 0x50405000ull); mmu_unmap_page(g_vttbl_l3_0, MEMORY_MAP_PA_GICH + 0x1000ull);
mmu_map_page_range(g_vttbl_l3_0, 0x50042000ull, 0x50046000ull, 0x2000ull, devattrs); mmu_map_page_range(g_vttbl_l3_0, MEMORY_MAP_PA_GICC, MEMORY_MAP_PA_GICD, 0x2000ull, devattrs);
} }
return (uintptr_t)g_vttbl; return (uintptr_t)g_vttbl;

View file

@ -18,6 +18,5 @@
#include "../../types.h" #include "../../types.h"
uintptr_t configureMemoryMap(u32 *addrSpaceSize); uintptr_t stage2Configure(u32 *addrSpaceSize);
uintptr_t configureStage2MemoryMap(u32 *addrSpaceSize);

View file

@ -1,95 +0,0 @@
/*
* Copyright (c) 2018-2019 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "../../utils.h"
#define TIMERS_BASE 0x60005000
#define MAKE_TIMERS_REG(n) MAKE_REG32(TIMERS_BASE + n)
#define TIMERUS_CNTR_1US_0 MAKE_TIMERS_REG(0x10)
#define TIMERUS_USEC_CFG_0 MAKE_TIMERS_REG(0x14)
#define SHARED_INTR_STATUS_0 MAKE_TIMERS_REG(0x1A0)
#define SHARED_TIMER_SECURE_CFG_0 MAKE_TIMERS_REG(0x1A4)
#define RTC_BASE 0x7000E000
#define MAKE_RTC_REG(n) MAKE_REG32(RTC_BASE + n)
#define RTC_SECONDS MAKE_RTC_REG(0x08)
#define RTC_SHADOW_SECONDS MAKE_RTC_REG(0x0C)
#define RTC_MILLI_SECONDS MAKE_RTC_REG(0x10)
typedef struct {
uint32_t CONFIG;
uint32_t STATUS;
uint32_t COMMAND;
uint32_t PATTERN;
} watchdog_timers_t;
#define GET_WDT(n) ((volatile watchdog_timers_t *)(TIMERS_BASE + 0x100 + 0x20 * n))
#define WDT_REBOOT_PATTERN 0xC45A
#define GET_WDT_REBOOT_CFG_REG(n) MAKE_REG32(TIMERS_BASE + 0x60 + 0x8 * n)
void wait(uint32_t microseconds);
static inline uint32_t get_time_s(void) {
return RTC_SECONDS;
}
static inline uint32_t get_time_ms(void) {
return (RTC_MILLI_SECONDS | (RTC_SHADOW_SECONDS << 10));
}
static inline uint32_t get_time_us(void) {
return TIMERUS_CNTR_1US_0;
}
/**
* Returns the time in microseconds.
*/
static inline uint32_t get_time(void) {
return get_time_us();
}
/**
* Returns the number of microseconds that have passed since a given get_time().
*/
static inline uint32_t get_time_since(uint32_t base) {
return get_time_us() - base;
}
/**
* Delays for a given number of microseconds.
*/
static inline void udelay(uint32_t usecs) {
uint32_t start = get_time_us();
while (get_time_us() - start < usecs);
}
/**
* Delays until a number of usecs have passed since an absolute start time.
*/
static inline void udelay_absolute(uint32_t start, uint32_t usecs) {
while (get_time_us() - start < usecs);
}
/**
* Delays for a given number of milliseconds.
*/
static inline void mdelay(uint32_t msecs) {
uint32_t start = get_time_ms();
while (get_time_ms() - start < msecs);
}

View file

@ -16,28 +16,28 @@
*/ */
#include "uart.h" #include "uart.h"
#include "timers.h"
#include "pinmux.h" #include "pinmux.h"
#include "gpio.h" #include "gpio.h"
#include "car.h" #include "car.h"
#include "../../irq.h" #include "../../irq.h"
#include "../../timer.h"
#define UART_BASE 0x70006000 static uintptr_t g_uartRegBase;
static inline volatile tegra_uart_t *uartGetRegisters(UartDevice dev) static inline volatile tegra_uart_t *uartGetRegisters(UartDevice dev)
{ {
static const size_t offsets[] = { 0, 0x40, 0x200, 0x300, 0x400 }; static const size_t offsets[] = { 0, 0x40, 0x200, 0x300, 0x400 };
return (volatile tegra_uart_t *)(UART_BASE + offsets[dev]); return (volatile tegra_uart_t *)(g_uartRegBase + offsets[dev]);
} }
static inline void uartWaitCycles(u32 baud, u32 num) static inline void uartWaitCycles(u32 baud, u32 num)
{ {
udelay((num * 1000000 + 16 * baud - 1) / (16 * baud)); timerWaitUsecs((num * 1000000 + 16 * baud - 1) / (16 * baud));
} }
static inline void uartWaitSyms(u32 baud, u32 num) static inline void uartWaitSyms(u32 baud, u32 num)
{ {
udelay((num * 1000000 + baud - 1) / baud); timerWaitUsecs((num * 1000000 + baud - 1) / baud);
} }
static void uartSetPinmuxConfig(UartDevice dev) { static void uartSetPinmuxConfig(UartDevice dev) {
@ -96,7 +96,7 @@ static void uartReset(UartDevice dev)
// This function blocks until the UART device is in the desired state. // This function blocks until the UART device is in the desired state.
void uartWaitIdle(UartDevice dev, UartVendorStatus status) static void uartWaitIdle(UartDevice dev, UartVendorStatus status)
{ {
volatile tegra_uart_t *uart = uartGetRegisters(dev); volatile tegra_uart_t *uart = uartGetRegisters(dev);
@ -109,6 +109,11 @@ void uartWaitIdle(UartDevice dev, UartVendorStatus status)
} }
} }
void uartSetRegisterBase(uintptr_t regBase)
{
g_uartRegBase = regBase;
}
void uartInit(UartDevice dev, u32 baud, u32 flags) void uartInit(UartDevice dev, u32 baud, u32 flags)
{ {
volatile tegra_uart_t *uart = uartGetRegisters(dev); volatile tegra_uart_t *uart = uartGetRegisters(dev);

View file

@ -20,6 +20,8 @@
#include "../../utils.h" #include "../../utils.h"
#include "interrupt_config.h" #include "interrupt_config.h"
#define MEMORY_MAP_PA_UART 0x70006000ul
/* UART devices */ /* UART devices */
typedef enum UartDevice { typedef enum UartDevice {
UART_A = 0, UART_A = 0,
@ -191,6 +193,7 @@ typedef struct {
u32 asr; u32 asr;
} tegra_uart_t; } tegra_uart_t;
void uartSetRegisterBase(uintptr_t regBase);
void uartInit(UartDevice dev, u32 baud, u32 flags); void uartInit(UartDevice dev, u32 baud, u32 flags);
void uartWriteData(UartDevice dev, const void *buffer, size_t size); void uartWriteData(UartDevice dev, const void *buffer, size_t size);
void uartReadData(UartDevice dev, void *buffer, size_t size); void uartReadData(UartDevice dev, void *buffer, size_t size);

View file

@ -2,14 +2,12 @@
#include "smc.h" #include "smc.h"
#include "core_ctx.h" #include "core_ctx.h"
#include "caches.h" #include "caches.h"
#include "memory_map.h"
// Currently in exception_vectors.s: // Currently in exception_vectors.s:
extern const u32 doSmcIndirectCallImpl[]; extern const u32 doSmcIndirectCallImpl[];
extern const u32 doSmcIndirectCallImplSmcInstructionOffset, doSmcIndirectCallImplSize; extern const u32 doSmcIndirectCallImplSmcInstructionOffset, doSmcIndirectCallImplSize;
// start.s
void start2(u64 contextId);
void doSmcIndirectCall(ExceptionStackFrame *frame, u32 smcId) void doSmcIndirectCall(ExceptionStackFrame *frame, u32 smcId)
{ {
u32 codebuf[doSmcIndirectCallImplSize / 4]; // note: potential VLA u32 codebuf[doSmcIndirectCallImplSize / 4]; // note: potential VLA
@ -29,7 +27,7 @@ static void doCpuOnHook(ExceptionStackFrame *frame, u32 smcId)
// frame->x[3] is contextId // frame->x[3] is contextId
if (cpuId < 4) { if (cpuId < 4) {
g_coreCtxs[cpuId].kernelEntrypoint = ep; g_coreCtxs[cpuId].kernelEntrypoint = ep;
frame->x[2] = (uintptr_t)start2; frame->x[2] = g_loadImageLayout.startPa + 4;
} }
} }

View file

@ -32,16 +32,13 @@ g_initialKernelEntrypoint:
start: start:
mov x19, #1 mov x19, #1
b _startCommon b _startCommon
.global start2
.type start2, %function
start2: start2:
mov x19, xzr mov x19, xzr
_startCommon: _startCommon:
// Disable interrupts, select sp_el2 // Disable interrupts, select sp_el0 before mmu is enabled
msr daifset, 0b1111
msr spsel, #1
mrs x20, cntpct_el0 mrs x20, cntpct_el0
msr daifset, 0b1111
msr spsel, #0
// Set sctlr_el2 ASAP to disable mmu/caching if not already done. // Set sctlr_el2 ASAP to disable mmu/caching if not already done.
mov x1, #0x0838 mov x1, #0x0838
@ -52,25 +49,39 @@ _startCommon:
bl cacheClearLocalDataCacheOnBoot bl cacheClearLocalDataCacheOnBoot
cbz x19, 1f cbz x19, 1f
// "Boot core only" stuff:
bl cacheClearSharedDataCachesOnBoot bl cacheClearSharedDataCachesOnBoot
// Temporarily use temp end region as stack, then create the translation table
// The stack top is also equal to the mmu table address...
adr x0, g_loadImageLayout
ldp x2, x3, [x0, #0x18]
add x1, x2, x3
mov sp, x1
bl memoryMapSetupMmu
1: 1:
// Enable MMU, note that the function is not allowed to use any stack
adr x0, g_loadImageLayout
ldr x18, =_postMmuEnableReturnAddr
bl memoryMapEnableMmu
// This is where we will land on exception return after enabling the MMU:
_postMmuEnableReturnAddr:
// Select sp_el2
msr spsel, #1
// Get core ID // Get core ID
// Ensure Aff0 is 4-1 at most (4 cores), and that Aff1, 2 and 3 are 0 (1 cluster only) mrs x8, mpidr_el1
mrs x0, mpidr_el1 and x8, x8, #0xFF
tst x0, #(0xFF << 32)
bne .
and x0, x0, #0x00FFFFFF // Aff0 to 2
cmp x0, #4
bhs .
// Set stack pointer mov w0, w8
adrp x8, __stacks_top__ bl memoryMapGetStackTop
lsl x9, x0, #10 mov sp, x0
sub sp, x8, x9
// Set up x18, other sysregs, BSS, MMU, etc. // Set up x18, other sysregs, BSS, etc.
// Don't call init array to save space? // Don't call init array to save space?
mov w0, w8
mov w1, w19 mov w1, w19
bl initSystem bl initSystem
@ -92,3 +103,26 @@ _startCommon:
b _restoreAllRegisters b _restoreAllRegisters
.pool .pool
/*
typedef struct LoadImageLayout {
uintptr_t startPa;
size_t imageSize; // "image" includes "real" BSS but not tempbss
size_t maxImageSize;
uintptr_t tempPa;
size_t maxTempSize;
size_t tempSize;
uintptr_t vbar;
} LoadImageLayout;
*/
.global g_loadImageLayout
g_loadImageLayout:
.quad __start_pa__
.quad __max_image_size__
.quad __image_size__
.quad __temp_pa__
.quad __max_temp_size__
.quad __temp_size__
.quad __vectors_start__

View file

@ -16,6 +16,7 @@
#include "timer.h" #include "timer.h"
#include "irq.h" #include "irq.h"
#include "exceptions.h"
u64 g_timerFreq = 0; u64 g_timerFreq = 0;
@ -29,10 +30,17 @@ void timerInit(void)
void timerInterruptHandler(void) void timerInterruptHandler(void)
{ {
// Disable timer programming until reprogrammed // Mask the timer interrupt until reprogrammed
timerConfigure(false, false); timerConfigure(false, false);
}
// For fun
DEBUG("EL2 [core %d]: Timer interrupt at %lums\n", (int)currentCoreCtx->coreId, timerGetSystemTimeMs()); void timerWaitUsecs(u64 us)
timerSetTimeoutMs(1000); {
exceptionEnterInterruptibleHypervisorCode();
u64 mask = unmaskIrq();
timerSetTimeoutUs(us);
do {
__wfi();
} while (!timerGetInterruptStatus());
restoreInterruptFlags(mask);
} }

View file

@ -21,6 +21,7 @@
#include "platform/interrupt_config.h" #include "platform/interrupt_config.h"
#define SECTONSECS 1000000000ull #define SECTONSECS 1000000000ull
#define SECTOUSECS 1000000ull
#define SECTOMSECS 1000ull #define SECTOMSECS 1000ull
// All generic timers possibly defined in the Arm architecture: // All generic timers possibly defined in the Arm architecture:
@ -61,6 +62,11 @@ static inline u64 timerGetSystemTick(void)
return GET_SYSREG(TIMER_COUNTER_REG(CURRENT_TIMER)); return GET_SYSREG(TIMER_COUNTER_REG(CURRENT_TIMER));
} }
static inline bool timerGetInterruptStatus(void)
{
return (GET_SYSREG(TIMER_CTL_REG(CURRENT_TIMER)) & TIMER_CTL_ISTATUS) != 0;
}
static inline u64 timerGetSystemTimeNs(void) static inline u64 timerGetSystemTimeNs(void)
{ {
return timerGetSystemTick() * SECTONSECS / g_timerFreq; return timerGetSystemTick() * SECTONSECS / g_timerFreq;
@ -80,7 +86,6 @@ static inline void timerConfigure(bool enabled, bool interruptMasked)
static inline void timerSetTimeoutTicks(u64 ticks) static inline void timerSetTimeoutTicks(u64 ticks)
{ {
timerConfigure(true, true);
SET_SYSREG(TIMER_CVAL_REG(CURRENT_TIMER), timerGetSystemTick() + ticks); SET_SYSREG(TIMER_CVAL_REG(CURRENT_TIMER), timerGetSystemTick() + ticks);
timerConfigure(true, false); timerConfigure(true, false);
} }
@ -94,3 +99,10 @@ static inline void timerSetTimeoutMs(u64 ms)
{ {
timerSetTimeoutTicks(ms * g_timerFreq / SECTOMSECS); timerSetTimeoutTicks(ms * g_timerFreq / SECTOMSECS);
} }
static inline void timerSetTimeoutUs(u64 us)
{
timerSetTimeoutTicks(us * g_timerFreq / SECTOUSECS);
}
void timerWaitUsecs(u64 us);

View file

@ -62,7 +62,7 @@ bool writeEl1Memory(uintptr_t addr, const void *src, size_t size)
memcpy((void *)pa, src, size); memcpy((void *)pa, src, size);
cacheHandleSelfModifyingCodePoU((const void *)pa, size); cacheHandleSelfModifyingCodePoU((const void *)pa, size);
__tlb_invalidate_el1_stage12(); //FIXME FIXME FIXME __tlb_invalidate_el1_stage12_local(); //FIXME FIXME FIXME
__dsb_sy(); __dsb_sy();
__isb(); __isb();

View file

@ -62,6 +62,11 @@ typedef enum ReadWriteDirection {
DIRECTION_READWRITE = DIRECTION_READ | DIRECTION_WRITE, DIRECTION_READWRITE = DIRECTION_READ | DIRECTION_WRITE,
} ReadWriteDirection; } ReadWriteDirection;
static inline void __wfi(void)
{
__asm__ __volatile__ ("wfi" ::: "memory");
}
static inline void __wfe(void) static inline void __wfe(void)
{ {
__asm__ __volatile__ ("wfe" ::: "memory"); __asm__ __volatile__ ("wfe" ::: "memory");
@ -111,24 +116,40 @@ static inline void __isb(void)
} }
static inline void __tlb_invalidate_el2(void) static inline void __tlb_invalidate_el2(void)
{
__asm__ __volatile__ ("tlbi alle2is" ::: "memory");
}
static inline void __tlb_invalidate_el2_local(void)
{ {
__asm__ __volatile__ ("tlbi alle2" ::: "memory"); __asm__ __volatile__ ("tlbi alle2" ::: "memory");
} }
static inline void __tlb_invalidate_el1_stage12(void) static inline void __tlb_invalidate_el1_stage12_local(void)
{ {
__asm__ __volatile__ ("tlbi alle1" ::: "memory"); __asm__ __volatile__ ("tlbi alle1" ::: "memory");
} }
bool overlaps(u64 as, u64 ae, u64 bs, u64 be); bool overlaps(u64 as, u64 ae, u64 bs, u64 be);
static inline uintptr_t get_physical_address_el1_stage12(bool *valid, const uintptr_t el1_vaddr) { // Assumes addr is valid, must be called with interrupts masked
static inline uintptr_t va2pa(const void *el2_vaddr) {
// NOTE: interrupt must be disabled when calling this func
// For debug purposes only
uintptr_t PAR;
uintptr_t va = (uintptr_t)el2_vaddr;
__asm__ __volatile__ ("at s1e2r, %0" :: "r"(va));
__asm__ __volatile__ ("mrs %0, par_el1" : "=r"(PAR));
return (PAR & MASK2L(47, 12)) | (va & MASKL(12));
}
static inline uintptr_t get_physical_address_el1_stage12(bool *valid, uintptr_t el1_vaddr) {
// NOTE: interrupt must be disabled when calling this func // NOTE: interrupt must be disabled when calling this func
uintptr_t PAR; uintptr_t PAR;
__asm__ __volatile__ ("at s12e1r, %0" :: "r"(el1_vaddr)); // note: we don't care whether it's writable in EL1&0 translation regime __asm__ __volatile__ ("at s12e1r, %0" :: "r"(el1_vaddr)); // note: we don't care whether it's writable in EL1&0 translation regime
__asm__ __volatile__ ("mrs %0, par_el1" : "=r"(PAR)); __asm__ __volatile__ ("mrs %0, par_el1" : "=r"(PAR));
*valid = (PAR & 1) == 0ull; *valid = (PAR & 1) == 0ull;
return (PAR & 1) ? 0ull : (PAR & MASK2L(40, 12)) | ((uintptr_t)el1_vaddr & MASKL(12)); return (PAR & 1) ? 0ull : (PAR & MASK2L(47, 12)) | ((uintptr_t)el1_vaddr & MASKL(12));
} }
bool readEl1Memory(void *dst, uintptr_t addr, size_t size); bool readEl1Memory(void *dst, uintptr_t addr, size_t size);

View file

@ -138,7 +138,7 @@ void vgicDebugPrintLrList(void)
DEBUG("core %u lr [", currentCoreCtx->coreId); DEBUG("core %u lr [", currentCoreCtx->coreId);
for (u32 i = 0; i < g_irqManager.numListRegisters; i++) { for (u32 i = 0; i < g_irqManager.numListRegisters; i++) {
if (g_vgicUsedLrMap[currentCoreCtx->coreId] & BITL(i)) { if (g_vgicUsedLrMap[currentCoreCtx->coreId] & BITL(i)) {
DEBUG("%u,", g_irqManager.gic.gich->lr[i].virtualId); DEBUG("%u,", gich->lr[i].virtualId);
} else { } else {
DEBUG("-,"); DEBUG("-,");
} }
@ -328,7 +328,7 @@ static inline u32 vgicGetDistributorTypeRegister(void)
{ {
// See above comment. // See above comment.
// Therefore, LSPI = 0, SecurityExtn = 0, rest = from physical distributor // Therefore, LSPI = 0, SecurityExtn = 0, rest = from physical distributor
return g_irqManager.gic.gicd->typer & 0x7F; return gicd->typer & 0x7F;
} }
static inline u32 vgicGetDistributorImplementerIdentificationRegister(void) static inline u32 vgicGetDistributorImplementerIdentificationRegister(void)
@ -356,7 +356,7 @@ static void vgicSetInterruptEnabledState(u16 id)
} }
state->enabled = true; state->enabled = true;
g_irqManager.gic.gicd->isenabler[id / 32] = BIT(id % 32); gicd->isenabler[id / 32] = BIT(id % 32);
} }
static void vgicClearInterruptEnabledState(u16 id) static void vgicClearInterruptEnabledState(u16 id)
@ -376,7 +376,7 @@ static void vgicClearInterruptEnabledState(u16 id)
} }
state->enabled = false; state->enabled = false;
g_irqManager.gic.gicd->icenabler[id / 32] = BIT(id % 32); gicd->icenabler[id / 32] = BIT(id % 32);
} }
static inline bool vgicGetInterruptEnabledState(u16 id) static inline bool vgicGetInterruptEnabledState(u16 id)
@ -397,7 +397,7 @@ static void vgicSetInterruptPriorityByte(u16 id, u8 priority)
if (id >= 16) { if (id >= 16) {
// Ensure we have the correct priority on the physical distributor... // Ensure we have the correct priority on the physical distributor...
g_irqManager.gic.gicd->ipriorityr[id] = IRQ_PRIORITY_GUEST << g_irqManager.priorityShift; gicd->ipriorityr[id] = IRQ_PRIORITY_GUEST << g_irqManager.priorityShift;
} }
VirqState *state = vgicGetVirqState(currentCoreCtx->coreId, id); VirqState *state = vgicGetVirqState(currentCoreCtx->coreId, id);
@ -442,7 +442,7 @@ static void vgicSetInterruptTargets(u16 id, u8 coreList)
} }
state->targetList = coreList; state->targetList = coreList;
g_irqManager.gic.gicd->itargetsr[id] = state->targetList; gicd->itargetsr[id] = state->targetList;
} }
static inline u8 vgicGetInterruptTargets(u16 id) static inline u8 vgicGetInterruptTargets(u16 id)
@ -464,10 +464,10 @@ static inline void vgicSetInterruptConfigBits(u16 id, u32 config)
bool newLvl = ((config & 2) << IRQ_CFGR_SHIFT(id)) == 0; bool newLvl = ((config & 2) << IRQ_CFGR_SHIFT(id)) == 0;
if (state->levelSensitive != newLvl) { if (state->levelSensitive != newLvl) {
u32 cfg = g_irqManager.gic.gicd->icfgr[id / 16]; u32 cfg = gicd->icfgr[id / 16];
cfg &= ~(3 << IRQ_CFGR_SHIFT(id)); cfg &= ~(3 << IRQ_CFGR_SHIFT(id));
cfg |= (!newLvl ? 3 : 1) << IRQ_CFGR_SHIFT(id); cfg |= (!newLvl ? 3 : 1) << IRQ_CFGR_SHIFT(id);
g_irqManager.gic.gicd->icfgr[id / 16] = cfg; gicd->icfgr[id / 16] = cfg;
state->levelSensitive = newLvl; state->levelSensitive = newLvl;
} }
@ -530,7 +530,7 @@ static void handleVgicMmioWrite(ExceptionStackFrame *frame, DataAbortIss dabtIss
{ {
size_t sz = BITL(dabtIss.sas); size_t sz = BITL(dabtIss.sas);
u32 val = (u32)(readFrameRegisterZ(frame, dabtIss.srt) & MASKL(8 * sz)); u32 val = (u32)(readFrameRegisterZ(frame, dabtIss.srt) & MASKL(8 * sz));
uintptr_t addr = (uintptr_t)g_irqManager.gic.gicd + offset; uintptr_t addr = (uintptr_t)gicd + offset;
//DEBUG("gicd write off 0x%03llx sz %lx val %x w%d\n", offset, sz, val, (int)dabtIss.srt); //DEBUG("gicd write off 0x%03llx sz %lx val %x w%d\n", offset, sz, val, (int)dabtIss.srt);
@ -615,7 +615,7 @@ static void handleVgicMmioWrite(ExceptionStackFrame *frame, DataAbortIss dabtIss
static void handleVgicMmioRead(ExceptionStackFrame *frame, DataAbortIss dabtIss, size_t offset) static void handleVgicMmioRead(ExceptionStackFrame *frame, DataAbortIss dabtIss, size_t offset)
{ {
size_t sz = BITL(dabtIss.sas); size_t sz = BITL(dabtIss.sas);
uintptr_t addr = (uintptr_t)g_irqManager.gic.gicd + offset; uintptr_t addr = (uintptr_t)gicd + offset;
u32 val = 0; u32 val = 0;
@ -720,9 +720,9 @@ static void vgicCleanupPendingList(void)
// Note: we can't touch PPIs for other cores... but each core will call this function anyway. // Note: we can't touch PPIs for other cores... but each core will call this function anyway.
if (id >= 32 || coreId == currentCoreCtx->coreId) { if (id >= 32 || coreId == currentCoreCtx->coreId) {
u32 mask = g_irqManager.gic.gicd->ispendr[id / 32] & BIT(id % 32); u32 mask = gicd->ispendr[id / 32] & BIT(id % 32);
if (mask == 0) { if (mask == 0) {
g_irqManager.gic.gicd->icactiver[id / 32] = BIT(id % 32); gicd->icactiver[id / 32] = BIT(id % 32);
pending = false; pending = false;
} else { } else {
pending = true; pending = true;
@ -774,7 +774,7 @@ static void vgicChoosePendingInterrupts(size_t *outNumChosen, VirqState *chosen[
static inline u64 vgicGetElrsrRegister(void) static inline u64 vgicGetElrsrRegister(void)
{ {
return (u64)g_irqManager.gic.gich->elsr0 | (((u64)g_irqManager.gic.gich->elsr1) << 32); return (u64)gich->elsr0 | (((u64)gich->elsr1) << 32);
} }
static inline bool vgicIsListRegisterAvailable(u32 id) static inline bool vgicIsListRegisterAvailable(u32 id)
@ -794,7 +794,7 @@ static inline volatile ArmGicV2ListRegister *vgicAllocateListRegister(void)
return NULL; return NULL;
} else { } else {
g_vgicUsedLrMap[currentCoreCtx->coreId] |= BITL(ff - 1); g_vgicUsedLrMap[currentCoreCtx->coreId] |= BITL(ff - 1);
return &g_irqManager.gic.gich->lr[ff - 1]; return &gich->lr[ff - 1];
} }
} }
@ -906,7 +906,6 @@ static bool vgicUpdateListRegister(volatile ArmGicV2ListRegister *lr)
void vgicUpdateState(void) void vgicUpdateState(void)
{ {
volatile ArmGicV2VirtualInterfaceController *gich = g_irqManager.gic.gich;
u32 coreId = currentCoreCtx->coreId; u32 coreId = currentCoreCtx->coreId;
// First, put back inactive interrupts into the queue, handle some SGI stuff // First, put back inactive interrupts into the queue, handle some SGI stuff
@ -944,14 +943,17 @@ void vgicUpdateState(void)
void vgicMaintenanceInterruptHandler(void) void vgicMaintenanceInterruptHandler(void)
{ {
volatile ArmGicV2VirtualInterfaceController *gich = g_irqManager.gic.gich; ArmGicV2MaintenanceIntStatRegister misr = gich->misr;
ArmGicV2MaintenanceIntStatRegister misr = g_irqManager.gic.gich->misr;
// Force GICV_CTRL to behave like ns-GICC_CTLR, with group 1 being replaced by group 0 // Force GICV_CTRL to behave like ns-GICC_CTLR, with group 1 being replaced by group 0
// Ensure we aren't spammed by maintenance interrupts, either. // Ensure we aren't spammed by maintenance interrupts, either.
if (misr.vgrp0e || misr.vgrp0d || misr.vgrp1e || misr.vgrp1d) { if (misr.vgrp0e || misr.vgrp0d || misr.vgrp1e || misr.vgrp1d) {
g_irqManager.gic.gicv->ctlr &= BIT(9) | BIT(0); ArmGicV2VmControlRegister vmcr = gich->vmcr;
vmcr.cbpr = 0;
vmcr.fiqEn = 0;
vmcr.ackCtl = 0;
vmcr.enableGrp1 = 0;
gich->vmcr = vmcr;
} }
if (misr.vgrp0e) { if (misr.vgrp0e) {
@ -990,7 +992,7 @@ void vgicMaintenanceInterruptHandler(void)
void handleVgicdMmio(ExceptionStackFrame *frame, DataAbortIss dabtIss, size_t offset) void handleVgicdMmio(ExceptionStackFrame *frame, DataAbortIss dabtIss, size_t offset)
{ {
size_t sz = BITL(dabtIss.sas); size_t sz = BITL(dabtIss.sas);
uintptr_t addr = (uintptr_t)g_irqManager.gic.gicd + offset; uintptr_t addr = (uintptr_t)gicd + offset;
bool oops = true; bool oops = true;
// ipriorityr, itargetsr, *pendsgir are byte-accessible // ipriorityr, itargetsr, *pendsgir are byte-accessible
@ -1056,7 +1058,7 @@ void vgicInit(void)
if (j < 16) { if (j < 16) {
state->enabled = true; state->enabled = true;
} else { } else {
state->levelSensitive = (g_irqManager.gic.gicd->icfgr[j / 16] & (2 << IRQ_CFGR_SHIFT(j % 16))) == 0; state->levelSensitive = (gicd->icfgr[j / 16] & (2 << IRQ_CFGR_SHIFT(j % 16))) == 0;
} }
} }
} }
@ -1073,5 +1075,5 @@ void vgicInit(void)
.lrenpie = true, .lrenpie = true,
.en = true, .en = true,
}; };
g_irqManager.gic.gich->hcr = hcr; gich->hcr = hcr;
} }