Add spsr_el3 to cpu_context

This commit is contained in:
TuxSH 2018-03-11 12:53:52 +01:00
parent f624cccd0d
commit 52f7adfc50
5 changed files with 27 additions and 16 deletions

View file

@ -177,12 +177,12 @@ void setup_4x_mmio(void) {
/* TODO */
}
#define SET_SYSREG(reg, val) do { temp_reg = val; __asm__ __volatile__ ("msr " #reg ", %0" :: "r"(temp_reg) : "memory"); } while(false)
void setup_current_core_state(void) {
uint64_t temp_reg;
/* Setup system registers. */
SET_SYSREG(spsr_el3, 0b1111 << 6 | 0b1001); /* use EL2h+DAIF set initially, may be overwritten later. Not in official code */
SET_SYSREG(actlr_el3, 0x73ull);
SET_SYSREG(actlr_el2, 0x73ull);
SET_SYSREG(hcr_el2, 0x80000000ull);

View file

@ -21,7 +21,7 @@
#define RESTORE_WP_REG(i, _) RESTORE_SYSREG64(DBGBVR##i##_EL1); RESTORE_SYSREG64(DBGBCR##i##_EL1);
/* start.s */
void __attribute__((noreturn)) __jump_to_lower_el(uint64_t arg, uintptr_t ep, unsigned int el);
void __attribute__((noreturn)) __jump_to_lower_el(uint64_t arg, uintptr_t ep, uint32_t spsr);
/* See notes in start.s */
critical_section_t g_boot_critical_section = {{{.ticket_number = 1}}};
@ -51,14 +51,15 @@ void __attribute__((noreturn)) core_jump_to_lower_el(void) {
uintptr_t ep;
uint64_t arg;
unsigned int core_id = get_core_id();
uint32_t spsr = get_spsr();
use_core_entrypoint_and_argument(core_id, &ep, &arg);
critical_section_leave(&g_boot_critical_section);
flush_dcache_range(&g_boot_critical_section, (uint8_t *)&g_boot_critical_section + sizeof(g_boot_critical_section)); /* already does a dsb sy */
__sev();
/* Nintendo jumps to EL1, we jump to EL2. Both are supported with all current packages2. */
__jump_to_lower_el(arg, ep, 2);
/* Nintendo hardcodes EL1, but we can boot fine using other EL1/EL2 modes as well */
__jump_to_lower_el(arg, ep, 0b1111 << 6 | (spsr & 0b1101)); /* only keep EL, SPSel, set DAIF */
}
uint32_t cpu_on(uint32_t core, uintptr_t entrypoint_addr, uint64_t argument) {
@ -66,7 +67,7 @@ uint32_t cpu_on(uint32_t core, uintptr_t entrypoint_addr, uint64_t argument) {
if (core >= NUM_CPU_CORES) {
return 0xFFFFFFFE;
}
/* Is core already on? */
if (g_cpu_contexts[core].is_active) {
return 0xFFFFFFFC;
@ -76,11 +77,11 @@ uint32_t cpu_on(uint32_t core, uintptr_t entrypoint_addr, uint64_t argument) {
const uint32_t status_masks[NUM_CPU_CORES] = {0x4000, 0x200, 0x400, 0x800};
const uint32_t toggle_vals[NUM_CPU_CORES] = {0xE, 0x9, 0xA, 0xB};
/* Check if we're already in the correct state. */
if ((APBDEV_PMC_PWRGATE_STATUS_0 & status_masks[core]) != status_masks[core]) {
uint32_t counter = 5001;
/* Poll the start bit until 0 */
while (APBDEV_PMC_PWRGATE_TOGGLE_0 & 0x100) {
wait(1);
@ -89,10 +90,10 @@ uint32_t cpu_on(uint32_t core, uintptr_t entrypoint_addr, uint64_t argument) {
return 0;
}
}
/* Program PWRGATE_TOGGLE with the START bit set to 1, selecting CE[N] */
APBDEV_PMC_PWRGATE_TOGGLE_0 = toggle_vals[core] | 0x100;
/* Poll until we're in the correct state. */
counter = 5001;
while (counter > 0) {
@ -148,6 +149,7 @@ void save_current_core_context(void) {
SAVE_SYSREG32(SDER32_EL3);
SAVE_SYSREG32(MDCR_EL2);
SAVE_SYSREG32(MDCR_EL3);
SAVE_SYSREG32(SPSR_EL3);
EVAL(REPEAT(6, SAVE_BP_REG, ~));
EVAL(REPEAT(4, SAVE_WP_REG, ~));
@ -170,6 +172,7 @@ void restore_current_core_context(void) {
RESTORE_SYSREG32(SDER32_EL3);
RESTORE_SYSREG32(MDCR_EL2);
RESTORE_SYSREG32(MDCR_EL3);
RESTORE_SYSREG32(SPSR_EL3);
EVAL(REPEAT(6, RESTORE_BP_REG, ~));
EVAL(REPEAT(4, RESTORE_WP_REG, ~));

View file

@ -23,6 +23,7 @@ typedef struct {
uint32_t SDER32_EL3;
uint32_t MDCR_EL2;
uint32_t MDCR_EL3;
uint32_t SPSR_EL3; /* not in official code */
uint64_t DBGBVR0_EL1;
uint64_t DBGBCR0_EL1;
uint64_t DBGBVR1_EL1;

View file

@ -108,7 +108,7 @@ __start_warm:
/* FWIW this function doesn't use a stack atm, with latest GCC, but that might change. */
bl get_warmboot_crt0_stack_address_critsec_enter
mov sp, x0
/* PA(__main_start__) = __warmboot_crt0_start__ + 0x800 (refer to the linker script) */
ldr x0, =g_boot_critical_section
ldr x1, =__main_start__
@ -210,12 +210,11 @@ __set_exception_entry_stack_pointer:
.global __jump_to_lower_el
.type __jump_to_lower_el, %function
__jump_to_lower_el:
/* x0: arg (context ID), x1: entrypoint, w2: exception level */
msr elr_el3, x1
/* x0: arg (context ID), x1: entrypoint, w2: spsr */
mov w2, w2
mov w1, #(0b1111 << 6 | 1) /* DAIF set and SP = SP_ELx*/
orr w1, w1, w2, lsl#2
msr spsr_el3, x1
msr elr_el3, x1
msr spsr_el3, x2
bl __set_exception_entry_stack_pointer

View file

@ -21,6 +21,8 @@
#define ALINLINE __attribute__((always_inline))
#define SET_SYSREG(reg, val) do { temp_reg = (val); __asm__ __volatile__ ("msr " #reg ", %0" :: "r"(temp_reg) : "memory"); } while(false)
/* Custom stuff below */
/* For warmboot (and coldboot crt0) */
@ -110,6 +112,12 @@ static inline uint64_t get_debug_authentication_status(void) {
return debug_auth;
}
static inline uint32_t get_spsr(void) {
uint32_t spsr;
__asm__ __volatile__ ("mrs %0, spsr_el3" : "=r"(spsr));
return spsr;
}
static inline bool check_32bit_additive_overflow(uint32_t a, uint32_t b) {
return __builtin_add_overflow_p(a, b, (uint32_t)0);
}