From adc496b6a7d109b82dab5efa9f1e5ab6853fd48a Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Thu, 2 Aug 2018 21:33:55 -0700 Subject: [PATCH] Exosphere: Change physical segment maps depending on firmware version --- exosphere/src/bootup.c | 7 ++++- exosphere/src/coldboot_init.c | 48 +++++++++++++++++++++++++++-------- exosphere/src/memory_map.h | 11 ++++++++ exosphere/src/start.s | 6 ++++- exosphere/src/warmboot_init.c | 34 ++++++++++++++++++------- 5 files changed, 84 insertions(+), 22 deletions(-) diff --git a/exosphere/src/bootup.c b/exosphere/src/bootup.c index 57e18367b..90f590841 100644 --- a/exosphere/src/bootup.c +++ b/exosphere/src/bootup.c @@ -147,7 +147,12 @@ void bootup_misc_mmio(void) { (void)(MAKE_MC_REG(0x014)); /* Clear RESET Vector, setup CPU Secure Boot RESET Vectors. */ - uint32_t reset_vec = TZRAM_GET_SEGMENT_PA(TZRAM_SEGMENT_ID_WARMBOOT_CRT0_AND_MAIN); + uint32_t reset_vec; + if (exosphere_get_target_firmware() >= EXOSPHERE_TARGET_FIRMWARE_500) { + reset_vec = TZRAM_GET_SEGMENT_5X_PA(TZRAM_SEGMENT_ID_WARMBOOT_CRT0_AND_MAIN); + } else { + reset_vec = TZRAM_GET_SEGMENT_PA(TZRAM_SEGMENT_ID_WARMBOOT_CRT0_AND_MAIN); + } EVP_CPU_RESET_VECTOR_0 = 0; SB_AA64_RESET_LOW_0 = reset_vec | 1; SB_AA64_RESET_HIGH_0 = 0; diff --git a/exosphere/src/coldboot_init.c b/exosphere/src/coldboot_init.c index 5cc2d43e5..65952539e 100644 --- a/exosphere/src/coldboot_init.c +++ b/exosphere/src/coldboot_init.c @@ -18,7 +18,8 @@ extern const uint8_t __start_cold[]; /* warboot_init.c */ extern unsigned int g_exosphere_target_firmware_for_init; void init_dma_controllers(unsigned int target_firmware); -void set_memory_registers_enable_mmu(void); +void set_memory_registers_enable_mmu_1x_ttbr0(void); +void set_memory_registers_enable_mmu_5x_ttbr0(void); static void identity_map_all_mappings(uintptr_t *mmu_l1_tbl, uintptr_t *mmu_l3_tbl) { static const uintptr_t addrs[] = { TUPLE_FOLD_LEFT_0(EVAL(IDENTIY_MAPPING_ID_MAX), _MMAPID, COMMA) }; @@ -65,22 +66,34 @@ static void warmboot_map_all_ram_segments(uintptr_t *mmu_l3_tbl) { } } -static void tzram_map_all_segments(uintptr_t *mmu_l3_tbl) { +static void tzram_map_all_segments(uintptr_t *mmu_l3_tbl, unsigned int target_firmware) { static const uintptr_t offs[] = { TUPLE_FOLD_LEFT_0(EVAL(TZRAM_SEGMENT_ID_MAX), _MMAPTZS, COMMA) }; static const size_t sizes[] = { TUPLE_FOLD_LEFT_1(EVAL(TZRAM_SEGMENT_ID_MAX), _MMAPTZS, COMMA) }; static const size_t increments[] = { TUPLE_FOLD_LEFT_2(EVAL(TZRAM_SEGMENT_ID_MAX), _MMAPTZS, COMMA) }; static const bool is_executable[] = { TUPLE_FOLD_LEFT_3(EVAL(TZRAM_SEGMENT_ID_MAX), _MMAPTZS, COMMA) }; + static const uintptr_t offs_5x[] = { TUPLE_FOLD_LEFT_0(EVAL(TZRAM_SEGMENT_ID_MAX), _MMAPTZ5XS, COMMA) }; + for(size_t i = 0, offset = 0; i < TZRAM_SEGMENT_ID_MAX; i++) { - tzram_map_segment(mmu_l3_tbl, TZRAM_SEGMENT_BASE + offset, 0x7C010000ull + offs[i], sizes[i], is_executable[i]); + uintptr_t off = (target_firmware < EXOSPHERE_TARGET_FIRMWARE_500) ? offs[i] : offs_5x[i]; + tzram_map_segment(mmu_l3_tbl, TZRAM_SEGMENT_BASE + offset, 0x7C010000ull + off, sizes[i], is_executable[i]); offset += increments[i]; } } -static void configure_ttbls(void) { - uintptr_t *mmu_l1_tbl = (uintptr_t *)(TZRAM_GET_SEGMENT_PA(TZRAM_SEGEMENT_ID_SECMON_EVT) + 0x800 - 64); - uintptr_t *mmu_l2_tbl = (uintptr_t *)TZRAM_GET_SEGMENT_PA(TZRAM_SEGMENT_ID_L2_TRANSLATION_TABLE); - uintptr_t *mmu_l3_tbl = (uintptr_t *)TZRAM_GET_SEGMENT_PA(TZRAM_SEGMENT_ID_L3_TRANSLATION_TABLE); +static void configure_ttbls(unsigned int target_firmware) { + uintptr_t *mmu_l1_tbl; + uintptr_t *mmu_l2_tbl; + uintptr_t *mmu_l3_tbl; + if (target_firmware < EXOSPHERE_TARGET_FIRMWARE_500) { + mmu_l1_tbl = (uintptr_t *)(TZRAM_GET_SEGMENT_PA(TZRAM_SEGEMENT_ID_SECMON_EVT) + 0x800 - 64); + mmu_l2_tbl = (uintptr_t *)TZRAM_GET_SEGMENT_PA(TZRAM_SEGMENT_ID_L2_TRANSLATION_TABLE); + mmu_l3_tbl = (uintptr_t *)TZRAM_GET_SEGMENT_PA(TZRAM_SEGMENT_ID_L3_TRANSLATION_TABLE); + } else { + mmu_l1_tbl = (uintptr_t *)(TZRAM_GET_SEGMENT_5X_PA(TZRAM_SEGEMENT_ID_SECMON_EVT) + 0x800 - 64); + mmu_l2_tbl = (uintptr_t *)TZRAM_GET_SEGMENT_5X_PA(TZRAM_SEGMENT_ID_L2_TRANSLATION_TABLE); + mmu_l3_tbl = (uintptr_t *)TZRAM_GET_SEGMENT_5X_PA(TZRAM_SEGMENT_ID_L3_TRANSLATION_TABLE); + } mmu_init_table(mmu_l1_tbl, 64); /* 33-bit address space */ mmu_init_table(mmu_l2_tbl, 4096); @@ -101,7 +114,7 @@ static void configure_ttbls(void) { mmio_map_all_devices(mmu_l3_tbl); lp0_entry_map_all_ram_segments(mmu_l3_tbl); warmboot_map_all_ram_segments(mmu_l3_tbl); - tzram_map_all_segments(mmu_l3_tbl); + tzram_map_all_segments(mmu_l3_tbl, target_firmware); } static void do_relocation(const coldboot_crt0_reloc_list_t *reloc_list, size_t index) { @@ -117,8 +130,17 @@ static void do_relocation(const coldboot_crt0_reloc_list_t *reloc_list, size_t i } } +uintptr_t get_coldboot_crt0_temp_stack_address(void) { + return TZRAM_GET_SEGMENT_PA(TZRAM_SEGMENT_ID_CORE3_STACK) + 0x800; +} + uintptr_t get_coldboot_crt0_stack_address(void) { - return TZRAM_GET_SEGMENT_PA(TZRAM_SEGMENT_ID_CORE3_STACK) + 0x800; + if (exosphere_get_target_firmware_for_init() < EXOSPHERE_TARGET_FIRMWARE_500) { + return TZRAM_GET_SEGMENT_PA(TZRAM_SEGMENT_ID_CORE3_STACK) + 0x800; + } else { + return TZRAM_GET_SEGMENT_5X_PA(TZRAM_SEGMENT_ID_CORE3_STACK) + 0x800; + } + } void coldboot_init(coldboot_crt0_reloc_list_t *reloc_list, uintptr_t start_cold) { @@ -154,8 +176,12 @@ void coldboot_init(coldboot_crt0_reloc_list_t *reloc_list, uintptr_t start_cold) /* TZRAM accesses should work normally after this point. */ init_dma_controllers(g_exosphere_target_firmware_for_init); - configure_ttbls(); - set_memory_registers_enable_mmu(); + configure_ttbls(g_exosphere_target_firmware_for_init); + if (g_exosphere_target_firmware_for_init < EXOSPHERE_TARGET_FIRMWARE_500) { + set_memory_registers_enable_mmu_1x_ttbr0(); + } else { + set_memory_registers_enable_mmu_5x_ttbr0(); + } /* Copy or clear the remaining sections */ for(size_t i = 0; i < reloc_list->nb_relocs_post_mmu_init; i++) { diff --git a/exosphere/src/memory_map.h b/exosphere/src/memory_map.h index 723fc758a..41a4b8f6b 100644 --- a/exosphere/src/memory_map.h +++ b/exosphere/src/memory_map.h @@ -53,6 +53,16 @@ #define _MMAPTZS6 ( 0x1000ull, 0x1000ull, 0x02000ull, false ) /* L2 translation table */ #define _MMAPTZS7 ( 0x2000ull, 0x1000ull, 0x02000ull, false ) /* L3 translation table */ +/* TZRAM segments for 5.0.0+. (offset). */ +#define _MMAPTZ5XS0 ( 0x3000ull ) /* Warmboot crt0 sections and main code segment */ +#define _MMAPTZ5XS1 ( 0ull ) /* pk2ldr segment */ +#define _MMAPTZ5XS2 ( 0ull ) /* SPL .bss buffer, NOT mapped at startup */ +#define _MMAPTZ5XS3 ( 0ull ) /* Core 0ull1,2 stack */ +#define _MMAPTZ5XS4 ( 0x1000ull ) /* Core 3 stack */ +#define _MMAPTZ5XS5 ( 0x2000ull ) /* Secure Monitor exception vectors, some init stacks */ +#define _MMAPTZ5XS6 ( 0x10000 - 0x2000ull ) /* L2 translation table */ +#define _MMAPTZ5XS7 ( 0x10000 - 0x1000ull ) /* L3 translation table */ + #define MMIO_BASE 0x1F0080000ull #define LP0_ENTRY_RAM_SEGMENT_BASE (MMIO_BASE + 0x000100000ull) #define WARMBOOT_RAM_SEGMENT_BASE (LP0_ENTRY_RAM_SEGMENT_BASE + 0x000047000ull) /* increment seems to be arbitrary ? */ @@ -129,6 +139,7 @@ #define WARMBOOT_GET_RAM_SEGMENT_ATTRIBS(segment_id) (TUPLE_ELEM_2(CAT(_MMAPWBS, EVAL(segment_id)))) #define TZRAM_GET_SEGMENT_PA(segment_id) (0x7C010000ull + (TUPLE_ELEM_0(CAT(_MMAPTZS, EVAL(segment_id))))) +#define TZRAM_GET_SEGMENT_5X_PA(segment_id) (0x7C010000ull + (TUPLE_ELEM_0(CAT(_MMAPTZ5XS, EVAL(segment_id))))) #define TZRAM_GET_SEGMENT_ADDRESS(segment_id) (TUPLE_FOLD_LEFT_2(EVAL(segment_id), _MMAPTZS, PLUS) EVAL(TZRAM_SEGMENT_BASE)) #define TZRAM_GET_SEGMENT_SIZE(segment_id) (TUPLE_ELEM_1(CAT(_MMAPTZS, EVAL(segment_id)))) #define TZRAM_IS_SEGMENT_EXECUTABLE(segment_id) (TUPLE_ELEM_3(CAT(_MMAPTZS, EVAL(segment_id)))) diff --git a/exosphere/src/start.s b/exosphere/src/start.s index 7730bda95..38eb4ce34 100644 --- a/exosphere/src/start.s +++ b/exosphere/src/start.s @@ -101,8 +101,11 @@ __start_cold: br x16 _post_cold_crt0_reloc: - + /* Setup stack for coldboot crt0. */ msr spsel, #0 + bl get_coldboot_crt0_temp_stack_address + mov sp, x0 + mov fp, #0 bl get_coldboot_crt0_stack_address mov sp, x0 mov fp, #0 @@ -128,6 +131,7 @@ _post_cold_crt0_reloc: ldr x1, =0x80010000 /* Set size in coldboot relocation list. */ str x21, [x0, #0x8] + bl coldboot_init ldr x16, =__jump_to_main_cold diff --git a/exosphere/src/warmboot_init.c b/exosphere/src/warmboot_init.c index 1dd7d1445..91897f3ca 100644 --- a/exosphere/src/warmboot_init.c +++ b/exosphere/src/warmboot_init.c @@ -8,6 +8,9 @@ #undef MC_BASE #define MC_BASE (MMIO_GET_DEVICE_PA(MMIO_DEVID_MC)) +#define WARMBOOT_GET_TZRAM_SEGMENT_PA(x) ((g_exosphere_target_firmware_for_init < EXOSPHERE_TARGET_FIRMWARE_500) \ + ? TZRAM_GET_SEGMENT_PA(x) : TZRAM_GET_SEGMENT_5X_PA(x)) + /* start.s */ void __set_memory_registers(uintptr_t ttbr0, uintptr_t vbar, uint64_t cpuectlr, uint32_t scr, uint32_t tcr, uint32_t cptr, uint64_t mair, uint32_t sctlr); @@ -15,16 +18,16 @@ void __set_memory_registers(uintptr_t ttbr0, uintptr_t vbar, uint64_t cpuectlr, unsigned int g_exosphere_target_firmware_for_init = 0; uintptr_t get_warmboot_crt0_stack_address(void) { - return TZRAM_GET_SEGMENT_PA(TZRAM_SEGMENT_ID_CORE012_STACK) + 0x800; + return WARMBOOT_GET_TZRAM_SEGMENT_PA(TZRAM_SEGMENT_ID_CORE012_STACK) + 0x800; } uintptr_t get_warmboot_crt0_stack_address_critsec_enter(void) { unsigned int core_id = get_core_id(); if (core_id) { - return TZRAM_GET_SEGMENT_PA(TZRAM_SEGMENT_ID_CORE3_STACK) + 0x1000; + return WARMBOOT_GET_TZRAM_SEGMENT_PA(TZRAM_SEGMENT_ID_CORE3_STACK) + 0x1000; } else { - return TZRAM_GET_SEGMENT_PA(TZRAM_SEGEMENT_ID_SECMON_EVT) + 0x80 * (core_id + 1); + return WARMBOOT_GET_TZRAM_SEGMENT_PA(TZRAM_SEGEMENT_ID_SECMON_EVT) + 0x80 * (core_id + 1); } } @@ -89,9 +92,8 @@ void init_dma_controllers(unsigned int target_firmware) { } } -void set_memory_registers_enable_mmu(void) { +void _set_memory_registers_enable_mmu(const uintptr_t ttbr0) { static const uintptr_t vbar = TZRAM_GET_SEGMENT_ADDRESS(TZRAM_SEGEMENT_ID_SECMON_EVT) + 0x800; - static const uintptr_t ttbr0 = TZRAM_GET_SEGMENT_PA(TZRAM_SEGEMENT_ID_SECMON_EVT) + 0x800 - 64; /* - Disable table walk descriptor access prefetch. @@ -142,12 +144,22 @@ void set_memory_registers_enable_mmu(void) { __set_memory_registers(ttbr0, vbar, cpuectlr, scr, tcr, cptr, mair, sctlr); } +void set_memory_registers_enable_mmu_1x_ttbr0(void) { + static const uintptr_t ttbr0 = TZRAM_GET_SEGMENT_PA(TZRAM_SEGEMENT_ID_SECMON_EVT) + 0x800 - 64; + _set_memory_registers_enable_mmu(ttbr0); +} + +void set_memory_registers_enable_mmu_5x_ttbr0(void) { + static const uintptr_t ttbr0 = TZRAM_GET_SEGMENT_5X_PA(TZRAM_SEGEMENT_ID_SECMON_EVT) + 0x800 - 64; + _set_memory_registers_enable_mmu(ttbr0); +} + #if 0 /* Since we decided not to identity-unmap TZRAM */ static void identity_remap_tzram(void) { /* See also: configure_ttbls (in coldboot_init.c). */ - uintptr_t *mmu_l1_tbl = (uintptr_t *)(TZRAM_GET_SEGMENT_PA(TZRAM_SEGEMENT_ID_SECMON_EVT) + 0x800 - 64); - uintptr_t *mmu_l2_tbl = (uintptr_t *)TZRAM_GET_SEGMENT_PA(TZRAM_SEGMENT_ID_L2_TRANSLATION_TABLE); - uintptr_t *mmu_l3_tbl = (uintptr_t *)TZRAM_GET_SEGMENT_PA(TZRAM_SEGMENT_ID_L3_TRANSLATION_TABLE); + uintptr_t *mmu_l1_tbl = (uintptr_t *)(WARMBOOT_GET_TZRAM_SEGMENT_PA(TZRAM_SEGEMENT_ID_SECMON_EVT) + 0x800 - 64); + uintptr_t *mmu_l2_tbl = (uintptr_t *)WARMBOOT_GET_TZRAM_SEGMENT_PA(TZRAM_SEGMENT_ID_L2_TRANSLATION_TABLE); + uintptr_t *mmu_l3_tbl = (uintptr_t *)WARMBOOT_GET_TZRAM_SEGMENT_PA(TZRAM_SEGMENT_ID_L3_TRANSLATION_TABLE); mmu_map_table(1, mmu_l1_tbl, 0x40000000, mmu_l2_tbl, 0); mmu_map_table(2, mmu_l2_tbl, 0x7C000000, mmu_l3_tbl, 0); @@ -176,5 +188,9 @@ void warmboot_init(void) { /*identity_remap_tzram();*/ /* Nintendo pointlessly fully invalidate the TLB & invalidate the data cache on the modified ranges here */ - set_memory_registers_enable_mmu(); + if (g_exosphere_target_firmware_for_init < EXOSPHERE_TARGET_FIRMWARE_500) { + set_memory_registers_enable_mmu_1x_ttbr0(); + } else { + set_memory_registers_enable_mmu_5x_ttbr0(); + } }