Write a significant chunk of coldboot init...

...merge start.cold.s and start.warm.s, other changes, etc.
This commit is contained in:
TuxSH 2018-02-25 03:34:15 +01:00
parent be67169390
commit a00e68e94a
14 changed files with 394 additions and 291 deletions

View file

@ -8,9 +8,9 @@ tlb_invalidate_all:
isb
ret
.section .text.tlb_invalidate_inner_shareable, "ax", %progbits
.type tlb_invalidate_inner_shareable, %function
.global tlb_invalidate_inner_shareable
.section .text.tlb_invalidate_all_inner_shareable, "ax", %progbits
.type tlb_invalidate_all_inner_shareable, %function
.global tlb_invalidate_all_inner_shareable
tlb_invalidate_all_inner_shareable:
dsb ish
tlbi alle3is

85
exosphere/coldboot_init.c Normal file
View file

@ -0,0 +1,85 @@
#include "utils.h"
#include "mmu.h"
#include "memory_map.h"
extern void (*__preinit_array_start[])(void);
extern void (*__preinit_array_end[])(void);
extern void (*__init_array_start[])(void);
extern void (*__init_array_end[])(void);
extern void _init(void);
extern uint8_t __warmboot_crt0_start__[], __warmboot_crt0_end__[], __warmboot_crt0_lma__[];
extern uint8_t __main_start__[], __main_end__[], __main_lma__[];
extern uint8_t __pk2ldr_start__[], __pk2ldr_end__[], __pk2ldr_lma__[];
extern uint8_t __vectors_start__[], __vectors_end__[], __vectors_lma__[];
extern void flush_dcache_all_tzram_pa(void);
extern void invalidate_icache_all_tzram_pa(void);
uintptr_t get_coldboot_crt0_stack_address(void);
static void configure_ttbls(void) {
uintptr_t *mmu_l1_tbl = (uintptr_t *)(tzram_get_segment_pa(TZRAM_SEGEMENT_ID_SECMON_EVT) + 0x800 - 64);
uintptr_t *mmu_l2_tbl = (uintptr_t *)tzram_get_segment_pa(TZRAM_SEGMENT_ID_L2_TRANSLATION_TABLE);
uintptr_t *mmu_l3_tbl = (uintptr_t *)tzram_get_segment_pa(TZRAM_SEGMENT_ID_L3_TRANSLATION_TABLE);
mmu_init_table(mmu_l1_tbl, 64); /* 33-bit address space */
mmu_init_table(mmu_l2_tbl, 4096);
/*
Nintendo uses the same L3 table for everything, but they make sure
nothing clashes.
*/
mmu_init_table(mmu_l3_tbl, 4096);
mmu_map_table(1, mmu_l1_tbl, 0x40000000, mmu_l2_tbl, 0);
mmu_map_table(1, mmu_l1_tbl, 0x1C0000000, mmu_l2_tbl, 0);
mmu_map_table(2, mmu_l2_tbl, 0x40000000, mmu_l3_tbl, 0);
mmu_map_table(2, mmu_l2_tbl, 0x7C000000, mmu_l3_tbl, 0);
mmu_map_table(2, mmu_l2_tbl, 0x1F0000000ull, mmu_l3_tbl, 0);
identity_map_all_mappings(mmu_l1_tbl, mmu_l3_tbl);
mmio_map_all_devices(mmu_l3_tbl);
lp0_map_all_plaintext_ram_segments(mmu_l3_tbl);
lp0_map_all_ciphertext_ram_segments(mmu_l3_tbl);
tzram_map_all_segments(mmu_l3_tbl);
}
static void copy_lma_to_vma(unsigned int segment_id, void *lma, size_t size, bool vma_is_pa) {
uintptr_t vma = vma_is_pa ? tzram_get_segment_pa(segment_id) : tzram_get_segment_address(segment_id);
uintptr_t vma_offset = (uintptr_t)lma & 0xFFF;
uint64_t *p_vma = (uint64_t *)vma;
uint64_t *p_lma = (uint64_t *)lma;
for (size_t i = 0; i < size / 8; i++) {
p_vma[vma_offset / 8 + i] = p_lma[i];
}
}
static void __libc_init_array(void) {
for (size_t i = 0; i < __preinit_array_end - __preinit_array_start; i++)
__preinit_array_start[i]();
_init(); /* FIXME: do we have this gcc-provided symbol if we build with -nostartfiles? */
for (size_t i = 0; i < __init_array_end - __init_array_start; i++)
__init_array_start[i]();
}
uintptr_t get_coldboot_crt0_stack_address(void) {
return tzram_get_segment_pa(TZRAM_SEGMENT_ID_CORE3_STACK) + 0x800;
}
void coldboot_init(void) {
/* TODO: Set NX BOOTLOADER clock time field */
copy_lma_to_vma(TZRAM_SEGMENT_ID_WARMBOOT_CRT0_AND_MAIN, __warmboot_crt0_lma__, __warmboot_crt0_end__ - __warmboot_crt0_start__, true);
/* TODO: set some mmio regs, etc. */
/* TODO: initialize DMA controllers */
configure_ttbls();
copy_lma_to_vma(TZRAM_SEGMENT_ID_WARMBOOT_CRT0_AND_MAIN, __main_lma__, __main_end__ - __main_start__, false);
copy_lma_to_vma(TZRAM_SEGMENT_ID_PK2LDR, __pk2ldr_lma__, __pk2ldr_end__ - __pk2ldr_start__, false);
copy_lma_to_vma(TZRAM_SEGEMENT_ID_SECMON_EVT, __vectors_lma__, __vectors_end__ - __vectors_start__, false);
/* TODO: set the MMU regs & tlbi & enable MMU */
flush_dcache_all_tzram_pa();
invalidate_icache_all_tzram_pa();
/* TODO: zero-initialize the cpu context */
/* Nintendo clears the (emtpy) pk2ldr's BSS section, but we embed it 0-filled in the binary */
__libc_init_array(); /* construct global objects */
}

22
exosphere/coldboot_main.c Normal file
View file

@ -0,0 +1,22 @@
#include "utils.h"
#include "mmu.h"
#include "memory_map.h"
extern void (*__fini_array_start[])(void);
extern void (*__fini_array_end[])(void);
extern void _fini(void);
extern void __jump_to_lower_el(uint64_t arg, uintptr_t ep, unsigned int el);
void coldboot_main(void);
/* Needs to be called for EL3->EL3 chainloading (and only in that case). TODO: use it */
static void __libc_fini_array(void) __attribute__((used)) {
for (size_t i = __fini_array_end - __fini_array_start; i > 0; i--)
__fini_array_start[i - 1]();
_fini(); /* FIXME: do we have this gcc-provided symbol if we build with -nostartfiles? */
}
void coldboot_main(void) {
/* TODO */
}

View file

@ -1,6 +1,6 @@
#include <stdint.h>
#include "mmu.h"
#include "memory_map.h"
#include "mc.h"
volatile security_carveout_t *get_carveout_by_id(unsigned int carveout) {

View file

@ -46,8 +46,8 @@ static const struct {
uintptr_t pa;
size_t size;
uint64_t attributes;
} g_lp0_plaintext_ram_segments[] = {
{ 0x40020000, 0x10000, MMU_PTE_TABLE_NS | ATTRIB_MEMTYPE_DEVICE }, /* TZRAM decrypted by warmboot.bin */
} g_lp0_entry_ram_segments[] = {
{ 0x40020000, 0x10000, MMU_PTE_TABLE_NS | ATTRIB_MEMTYPE_DEVICE }, /* Encrypted TZRAM */
{ 0x40003000, 0x01000, MMU_PTE_TABLE_NS | ATTRIB_MEMTYPE_DEVICE }, /* LP0 entry code */
{ 0x7C010000, 0x10000, MMU_AP_PRIV_RO | ATTRIB_MEMTYPE_NORMAL }, /* TZRAM to encrypt */
};
@ -56,9 +56,9 @@ static const struct {
uintptr_t pa;
size_t size;
uint64_t attributes;
} g_lp0_ciphertext_ram_segments[] = {
{ 0x8000F000, 0x01000, MMU_PTE_TABLE_NS | ATTRIB_MEMTYPE_DEVICE }, /* Encrypted SE state */
{ 0x80010000, 0x10000, MMU_PTE_TABLE_NS | ATTRIB_MEMTYPE_DEVICE }, /* Encrypted TZRAM */
} g_warmboot_ram_segments[] = {
{ 0x8000F000, 0x01000, MMU_PTE_TABLE_NS | ATTRIB_MEMTYPE_DEVICE }, /* Encrypted SE state for bootROM */
{ 0x80010000, 0x10000, MMU_PTE_TABLE_NS | ATTRIB_MEMTYPE_DEVICE }, /* Encrypted TZRAM for warmboot.bin */
};
static const struct {
@ -78,8 +78,8 @@ static const struct {
};
#define MMIO_BASE 0x1F0080000ull
#define LP0_PLAINTEXT_RAM_SEGMENT_BASE (MMIO_BASE + 0x000100000)
#define LP0_CIPHERTEXT_RAM_SEGMENT_BASE (LP0_PLAINTEXT_RAM_SEGMENT_BASE + 0x000047000) /* increment seems to be arbitrary ? */
#define LP0_ENTRY_RAM_SEGMENT_BASE (MMIO_BASE + 0x000100000)
#define WARMBOOT_RAM_SEGMENT_BASE (LP0_ENTRY_RAM_SEGMENT_BASE + 0x000047000) /* increment seems to be arbitrary ? */
#define TZRAM_SEGMENT_BASE (MMIO_BASE + 0x0001E0000)
#define MMIO_DEVID_GICD 0
@ -101,12 +101,12 @@ static const struct {
#define MMIO_DEVID_DTV_I2C234 16
#define MMIO_DEVID_EXCEPTION_VECTORS 17
#define LP0_PLAINTEXT_RAM_SEGMENT_ID_DECRYPTED_TZRAM 0
#define LP0_PLAINTEXT_RAM_SEGMENT_ID_LP0_ENTRY_CODE 1
#define LP0_PLAINTEXT_RAM_SEGMENT_ID_CURRENT_TZRAM 2
#define LP0_ENTRY_RAM_SEGMENT_ID_DECRYPTED_TZRAM 0
#define LP0_ENTRY_RAM_SEGMENT_ID_LP0_ENTRY_CODE 1
#define LP0_ENTRY_RAM_SEGMENT_ID_CURRENT_TZRAM 2
#define LP0_CIPHERTEXT_RAM_SEGMENT_ID_SE_STATE 0
#define LP0_CIPHERTEXT_RAM_SEGMENT_ID_TZRAM 1
#define WARMBOOT_RAM_SEGMENT_ID_SE_STATE 0
#define WARMBOOT_RAM_SEGMENT_ID_TZRAM 1
#define TZRAM_SEGMENT_ID_WARMBOOT_CRT0_MAIN_CODE 0
#define TZRAM_SEGMENT_ID_PK2LDR 1
@ -119,7 +119,7 @@ static const struct {
/**********************************************************************************************/
static inline uintptr_t identity_map_all(uintptr_t *mmu_l1_tbl, uintptr_t *mmu_l3_tbl) {
static inline uintptr_t identity_map_all_mappings(uintptr_t *mmu_l1_tbl, uintptr_t *mmu_l3_tbl) {
static uint64_t base_attributes = MMU_PTE_BLOCK_INNER_SHAREBLE | ATTRIB_MEMTYPE_NORMAL;
for(size_t i = 0; i < sizeof(g_identity_mappings) / sizeof(g_identity_mappings[0]); i++) {
uint64_t attributes = base_attributes | g_identity_mappings[i].attributes;
@ -134,7 +134,7 @@ static inline uintptr_t identity_map_all(uintptr_t *mmu_l1_tbl, uintptr_t *mmu_l
}
}
static inline uintptr_t identity_unmap_all(uintptr_t *mmu_l1_tbl, uintptr_t *mmu_l3_tbl) {
static inline uintptr_t identity_unmap_all_mappings(uintptr_t *mmu_l1_tbl, uintptr_t *mmu_l3_tbl) {
for(size_t i = 0; i < sizeof(g_identity_mappings) / sizeof(g_identity_mappings[0]); i++) {
if(g_identity_mappings[i].is_block_range) {
mmu_unmap_block_range(mmu_l1_tbl, g_identity_mappings[i].address, g_identity_mappings[i].size);
@ -193,7 +193,7 @@ static inline void mmio_unmap_all_devices(uintptr_t *mmu_l3_tbl) {
/**********************************************************************************************/
static inline uintptr_t lp0_get_plaintext_ram_segment_pa(unsigned int segment_id) {
return g_lp0_plaintext_ram_segments[segment_id].pa;
return g_lp0_entry_ram_segments[segment_id].pa;
}
#ifndef MEMORY_MAP_USE_IDENTIY_MAPPING
@ -207,17 +207,17 @@ static inline uintptr_t lp0_get_plaintext_ram_segment_address(unsigned int segme
#endif
static inline void lp0_map_all_plaintext_ram_segments(uintptr_t *mmu_l3_tbl) {
for(size_t i = 0, offset = 0; i < sizeof(g_lp0_plaintext_ram_segments) / sizeof(g_lp0_plaintext_ram_segments[0]); i++) {
uint64_t attributes = MMU_PTE_BLOCK_XN | MMU_PTE_BLOCK_INNER_SHAREBLE | g_lp0_plaintext_ram_segments[i].attributes;
mmu_map_page_range(mmu_l3_tbl, LP0_PLAINTEXT_RAM_SEGMENT_BASE + offset, g_lp0_plaintext_ram_segments[i].pa,
g_lp0_plaintext_ram_segments[i].size, attributes);
for(size_t i = 0, offset = 0; i < sizeof(g_lp0_entry_ram_segments) / sizeof(g_lp0_entry_ram_segments[0]); i++) {
uint64_t attributes = MMU_PTE_BLOCK_XN | MMU_PTE_BLOCK_INNER_SHAREBLE | g_lp0_entry_ram_segments[i].attributes;
mmu_map_page_range(mmu_l3_tbl, LP0_PLAINTEXT_RAM_SEGMENT_BASE + offset, g_lp0_entry_ram_segments[i].pa,
g_lp0_entry_ram_segments[i].size, attributes);
offset += 0x10000;
}
}
static inline void lp0_unmap_all_plaintext_ram_segments(uintptr_t *mmu_l3_tbl) {
for(size_t i = 0, offset = 0; i < sizeof(g_lp0_plaintext_ram_segments) / sizeof(g_lp0_plaintext_ram_segments[0]); i++) {
mmu_unmap_range(3, mmu_l3_tbl, LP0_PLAINTEXT_RAM_SEGMENT_BASE + offset, g_lp0_plaintext_ram_segments[i].size);
for(size_t i = 0, offset = 0; i < sizeof(g_lp0_entry_ram_segments) / sizeof(g_lp0_entry_ram_segments[0]); i++) {
mmu_unmap_range(3, mmu_l3_tbl, LP0_PLAINTEXT_RAM_SEGMENT_BASE + offset, g_lp0_entry_ram_segments[i].size);
offset += 0x10000;
}
@ -226,14 +226,14 @@ static inline void lp0_unmap_all_plaintext_ram_segments(uintptr_t *mmu_l3_tbl) {
/**********************************************************************************************/
static inline uintptr_t lp0_get_ciphertext_ram_segment_pa(unsigned int segment_id) {
return g_lp0_ciphertext_ram_segments[segment_id].pa;
return g_warmboot_ram_segments[segment_id].pa;
}
#ifndef MEMORY_MAP_USE_IDENTIY_MAPPING
static inline uintptr_t lp0_get_ciphertext_ram_segment_address(unsigned int segment_id) {
size_t offset = 0;
for(unsigned int i = 0; i < segment_id; i++) {
offset += g_lp0_ciphertext_ram_segments[i].size;
offset += g_warmboot_ram_segments[i].size;
}
return LP0_CIPHERTEXT_RAM_SEGMENT_BASE + offset;
@ -245,19 +245,19 @@ static inline uintptr_t lp0_get_ciphertext_ram_segment_address(unsigned int segm
#endif
static inline void lp0_map_all_ciphertext_ram_segments(uintptr_t *mmu_l3_tbl) {
for(size_t i = 0, offset = 0; i < sizeof(g_lp0_ciphertext_ram_segments) / sizeof(g_lp0_ciphertext_ram_segments[0]); i++) {
uint64_t attributes = MMU_PTE_BLOCK_XN | MMU_PTE_BLOCK_INNER_SHAREBLE | g_lp0_ciphertext_ram_segments[i].attributes;
mmu_map_page_range(mmu_l3_tbl, LP0_CIPHERTEXT_RAM_SEGMENT_BASE + offset, g_lp0_ciphertext_ram_segments[i].pa,
g_lp0_ciphertext_ram_segments[i].size, attributes);
offset += g_lp0_ciphertext_ram_segments[i].size;
for(size_t i = 0, offset = 0; i < sizeof(g_warmboot_ram_segments) / sizeof(g_warmboot_ram_segments[0]); i++) {
uint64_t attributes = MMU_PTE_BLOCK_XN | MMU_PTE_BLOCK_INNER_SHAREBLE | g_warmboot_ram_segments[i].attributes;
mmu_map_page_range(mmu_l3_tbl, LP0_CIPHERTEXT_RAM_SEGMENT_BASE + offset, g_warmboot_ram_segments[i].pa,
g_warmboot_ram_segments[i].size, attributes);
offset += g_warmboot_ram_segments[i].size;
}
}
static inline void lp0_unmap_all_ciphertext_ram_segments(uintptr_t *mmu_l3_tbl) {
for(size_t i = 0, offset = 0; i < sizeof(g_lp0_ciphertext_ram_segments) / sizeof(g_lp0_ciphertext_ram_segments[0]); i++) {
mmu_unmap_range(3, mmu_l3_tbl, LP0_CIPHERTEXT_RAM_SEGMENT_BASE + offset, g_lp0_ciphertext_ram_segments[i].size);
for(size_t i = 0, offset = 0; i < sizeof(g_warmboot_ram_segments) / sizeof(g_warmboot_ram_segments[0]); i++) {
mmu_unmap_range(3, mmu_l3_tbl, LP0_CIPHERTEXT_RAM_SEGMENT_BASE + offset, g_warmboot_ram_segments[i].size);
offset += g_lp0_ciphertext_ram_segments[i].size;
offset += g_warmboot_ram_segments[i].size;
}
}
@ -297,7 +297,7 @@ static inline void tzram_map_all_segments(uintptr_t *mmu_l3_tbl) {
static inline void tzram_unmap_all_segments(uintptr_t *mmu_l3_tbl) {
/* Except the SPL userpage */
for(size_t i = 0, offset = 0; i < sizeof(g_lp0_ciphertext_ram_segments) / sizeof(g_lp0_ciphertext_ram_segments[0]); i++) {
for(size_t i = 0, offset = 0; i < sizeof(g_warmboot_ram_segments) / sizeof(g_warmboot_ram_segments[0]); i++) {
if(g_tzram_segments[i].map_size == 0) {
continue;
}

View file

@ -3,6 +3,7 @@
#include <string.h>
#include "utils.h"
#include "memory_map.h"
#include "package2.h"
#include "configitem.h"
@ -12,52 +13,37 @@
#include "randomcache.h"
#include "timers.h"
void setup_mmio_virtual_addresses(void) {
/* TODO: Set Timers address to 0x1F008B000. */
/* TODO: Set Security Engine address to 0x1F008F000. */
/* TODO: Set CAR address to 0x1F0087000. */
/* TODO: Set PMC address to 0x1F0089400. */
/* TODO: Set Fuse address to 0x1F0096800. */
/* TODO: Set Interrupt addresses to 0x1F0080000, 0x1F0082000. */
/* TODO: Set Flow Controller address to 0x1F009D000. */
/* TODO: Set UART-A address to 0x1F0085000. */
/* TODO: Set I2C-0 address to 0x1F00A5000. */
/* TODO: Set I2C-4 address to 0x1F00A1000. */
/* TODO: Set MISC address to 0x1F0098000. */
/* TODO: Set GPIO-1 address to 0x1F00A3000. */
}
/* Hardware init, sets up the RNG and SESSION keyslots, derives new DEVICE key. */
void setup_se(void) {
static void setup_se(void) {
uint8_t work_buffer[0x10];
/* Sanity check the Security Engine. */
se_verify_flags_cleared();
se_clear_interrupts();
/* Perform some sanity initialization. */
security_engine_t *p_security_engine = get_security_engine_address();
p_security_engine->_0x4 = 0;
p_security_engine->AES_KEY_READ_DISABLE_REG = 0;
p_security_engine->RSA_KEY_READ_DISABLE_REG = 0;
p_security_engine->_0x0 &= 0xFFFFFFFB;
/* Currently unknown what each flag does. */
for (unsigned int i = 0; i < KEYSLOT_AES_MAX; i++) {
set_aes_keyslot_flags(i, 0x15);
}
for (unsigned int i = 4; i < KEYSLOT_AES_MAX; i++) {
set_aes_keyslot_flags(i, 0x40);
}
for (unsigned int i = 0; i < KEYSLOT_RSA_MAX; i++) {
set_rsa_keyslot_flags(i, 0x41);
}
/* Detect Master Key revision. */
mkey_detect_revision();
/* Setup new device key, if necessary. */
if (mkey_get_revision() >= MASTERKEY_REVISION_400_CURRENT) {
const uint8_t new_devicekey_source_4x[0x10] = {0x8B, 0x4E, 0x1C, 0x22, 0x42, 0x07, 0xC8, 0x73, 0x56, 0x94, 0x08, 0x8B, 0xCC, 0x47, 0x0F, 0x5D};
@ -66,23 +52,23 @@ void setup_se(void) {
clear_aes_keyslot(KEYSLOT_SWITCH_4XNEWCONSOLEKEYGENKEY);
set_aes_keyslot_flags(KEYSLOT_SWITCH_DEVICEKEY, 0xFF);
}
se_initialize_rng(KEYSLOT_SWITCH_DEVICEKEY);
/* Generate random data, transform with device key to get RNG key. */
se_generate_random(KEYSLOT_SWITCH_DEVICEKEY, work_buffer, 0x10);
decrypt_data_into_keyslot(KEYSLOT_SWITCH_RNGKEY, KEYSLOT_SWITCH_DEVICEKEY, work_buffer, 0x10);
set_aes_keyslot_flags(KEYSLOT_SWITCH_RNGKEY, 0xFF);
/* Repeat for Session key. */
se_generate_random(KEYSLOT_SWITCH_DEVICEKEY, work_buffer, 0x10);
decrypt_data_into_keyslot(KEYSLOT_SWITCH_SESSIONKEY, KEYSLOT_SWITCH_DEVICEKEY, work_buffer, 0x10);
set_aes_keyslot_flags(KEYSLOT_SWITCH_SESSIONKEY, 0xFF);
/* TODO: Create Test Vector, to validate keyslot data is unchanged post warmboot. */
}
void setup_boot_config(void) {
static void setup_boot_config(void) {
/* Load boot config only if dev unit. */
if (configitem_is_retail()) {
bootconfig_clear();
@ -92,25 +78,24 @@ void setup_boot_config(void) {
}
}
bool rsa2048_pss_verify(const void *signature, size_t signature_size, const void *modulus, size_t modulus_size, const void *data, size_t data_size) {
static bool rsa2048_pss_verify(const void *signature, size_t signature_size, const void *modulus, size_t modulus_size, const void *data, size_t data_size) {
uint8_t message[RSA_2048_BYTES];
uint8_t h_buf[0x24];
/* Hardcode RSA with keyslot 0. */
const uint8_t public_exponent[4] = {0x00, 0x01, 0x00, 0x01};
set_rsa_keyslot(0, modulus, modulus_size, public_exponent, sizeof(public_exponent));
se_synchronous_exp_mod(0, message, sizeof(message), signature, signature_size);
/* Validate sanity byte. */
if (message[RSA_2048_BYTES - 1] != 0xBC) {
return false;
}
/* Copy Salt into MGF1 Hash Buffer. */
memset(h_buf, 0, sizeof(h_buf));
memcpy(h_buf, message + RSA_2048_BYTES - 0x20 - 0x1, 0x20);
/* Decrypt maskedDB (via inline MGF1). */
uint8_t seed = 0;
uint8_t mgf1_buf[0x20];
@ -122,7 +107,7 @@ bool rsa2048_pss_verify(const void *signature, size_t signature_size, const void
message[i] ^= mgf1_buf[i - ofs];
}
}
/* Constant lmask for rsa-2048-pss. */
message[0] &= 0x7F;
@ -139,7 +124,7 @@ bool rsa2048_pss_verify(const void *signature, size_t signature_size, const void
/* Check hash correctness. */
uint8_t validate_buf[8 + 0x20 + 0x20];
uint8_t validate_hash[0x20];
memset(validate_buf, 0, sizeof(validate_buf));
flush_dcache_range((uint8_t *)data, (uint8_t *)data + data_size);
se_calculate_sha256(&validate_buf[8], data, data_size);
@ -149,20 +134,20 @@ bool rsa2048_pss_verify(const void *signature, size_t signature_size, const void
return memcmp(h_buf, validate_hash, 0x20) == 0;
}
void package2_crypt_ctr(unsigned int master_key_rev, void *dst, size_t dst_size, const void *src, size_t src_size, const void *ctr, size_t ctr_size) {
static void package2_crypt_ctr(unsigned int master_key_rev, void *dst, size_t dst_size, const void *src, size_t src_size, const void *ctr, size_t ctr_size) {
/* Derive package2 key. */
const uint8_t package2_key_source[0x10] = {0xFB, 0x8B, 0x6A, 0x9C, 0x79, 0x00, 0xC8, 0x49, 0xEF, 0xD2, 0x4D, 0x85, 0x4D, 0x30, 0xA0, 0xC7};
flush_dcache_range((uint8_t *)dst, (uint8_t *)dst + dst_size);
flush_dcache_range((uint8_t *)src, (uint8_t *)src + src_size);
unsigned int keyslot = mkey_get_keyslot(master_key_rev);
decrypt_data_into_keyslot(KEYSLOT_SWITCH_PACKAGE2KEY, keyslot, package2_key_source, 0x10);
/* Perform Encryption. */
se_aes_ctr_crypt(KEYSLOT_SWITCH_PACKAGE2KEY, dst, dst_size, src, src_size, ctr, ctr_size);
}
void verify_header_signature(package2_header_t *header) {
static void verify_header_signature(package2_header_t *header) {
const uint8_t *modulus;
if (configitem_is_retail()) {
@ -206,14 +191,14 @@ void verify_header_signature(package2_header_t *header) {
};
modulus = package2_modulus_dev;
}
/* This is normally only allowed on dev units, but we'll allow it anywhere. */
if (bootconfig_is_package2_unsigned() == 0 && rsa2048_pss_verify(header->signature, 0x100, modulus, 0x100, header->encrypted_header, 0x100) == 0) {
generic_panic();
}
}
bool validate_package2_metadata(package2_meta_t *metadata) {
static bool validate_package2_metadata(package2_meta_t *metadata) {
if (metadata->magic != MAGIC_PK21) {
return false;
}
@ -295,12 +280,12 @@ bool validate_package2_metadata(package2_meta_t *metadata) {
}
/* Decrypts package2 header, and returns the master key revision required. */
uint32_t decrypt_and_validate_header(package2_header_t *header) {
static uint32_t decrypt_and_validate_header(package2_header_t *header) {
package2_meta_t metadata;
if (bootconfig_is_package2_plaintext() == 0) {
uint32_t mkey_rev;
/* Try to decrypt for all possible master keys. */
for (mkey_rev = 0; mkey_rev < MASTERKEY_REVISION_MAX; mkey_rev++) {
package2_crypt_ctr(mkey_rev, &metadata, sizeof(package2_meta_t), &header->metadata, sizeof(package2_meta_t), header->metadata.ctr, sizeof(header->metadata.ctr));
@ -312,17 +297,17 @@ uint32_t decrypt_and_validate_header(package2_header_t *header) {
break;
}
}
/* Ensure we successfully decrypted the header. */
generic_panic();
}
return 0;
}
void load_package2_sections(package2_meta_t *metadata, uint32_t master_key_rev) {
static void load_package2_sections(package2_meta_t *metadata, uint32_t master_key_rev) {
/* By default, copy data directly from where NX_BOOTLOADER puts it. */
void *load_buf = NX_BOOTLOADER_PACKAGE2_LOAD_ADDRESS;
/* Check whether any of our sections overlap this region. If they do, we must relocate and copy from elsewhere. */
bool needs_relocation = false;
for (unsigned int section = 0; section < PACKAGE2_SECTION_MAX; section++) {
@ -365,13 +350,13 @@ void load_package2_sections(package2_meta_t *metadata, uint32_t master_key_rev)
memset(load_buf, 0, PACKAGE2_SIZE_MAX);
load_buf = (void *)potential_base_start;
}
/* Copy each section to its appropriate location, decrypting if necessary. */
for (unsigned int section = 0; section < PACKAGE2_SECTION_MAX; section++) {
if (metadata->section_sizes[section] == 0) {
continue;
}
void *dst_start = (void *)(DRAM_BASE_PHYSICAL + (uint64_t)metadata->section_offsets[section]);
void *src_start = load_buf + sizeof(package2_header_t) + metadata->section_offsets[section];
size_t size = (size_t)metadata->section_sizes[section];
@ -382,17 +367,18 @@ void load_package2_sections(package2_meta_t *metadata, uint32_t master_key_rev)
package2_crypt_ctr(master_key_rev, dst_start, size, src_start, size, metadata->section_ctrs[section], 0x10);
}
}
/* Clear the encrypted package2 from memory. */
memset(load_buf, 0, PACKAGE2_SIZE_MAX);
}
/* This function is called during coldboot crt0, and validates a package2. */
uintptr_t get_pk2ldr_stack_address(void) {
return tzram_get_segment_address(TZRAM_SEGMENT_ID_PK2LDR) + 0x2000;
}
/* This function is called during coldboot init, and validates a package2. */
/* This package2 is read into memory by a concurrent BPMP bootloader. */
void load_package2(void) {
/* Setup MMIO virtual pointers. */
setup_mmio_virtual_addresses();
/* Setup the Security Engine. */
setup_se();
@ -408,12 +394,12 @@ void load_package2(void) {
/* Initialize cache'd random bytes for kernel. */
randomcache_init();
/* TODO: memclear the initial copy of Exosphere running in IRAM (relocated to TZRAM by earlier code). */
/* Let NX Bootloader know that we're running. */
MAILBOX_NX_BOOTLOADER_IS_SECMON_AWAKE = 1;
/* Synchronize with NX BOOTLOADER. */
if (MAILBOX_NX_BOOTLOADER_SETUP_STATE == NX_BOOTLOADER_STATE_INIT) {
while (MAILBOX_NX_BOOTLOADER_SETUP_STATE < NX_BOOTLOADER_STATE_MOVED_BOOTCONFIG) {
@ -423,47 +409,44 @@ void load_package2(void) {
/* Load Boot Config into global. */
setup_boot_config();
/* Synchronize with NX BOOTLOADER. */
if (MAILBOX_NX_BOOTLOADER_SETUP_STATE == NX_BOOTLOADER_STATE_MOVED_BOOTCONFIG) {
while (MAILBOX_NX_BOOTLOADER_SETUP_STATE < NX_BOOTLOADER_STATE_LOADED_PACKAGE2) {
wait(1);
}
}
/* Load header from NX_BOOTLOADER-initialized DRAM. */
package2_header_t header;
flush_dcache_range((uint8_t *)NX_BOOTLOADER_PACKAGE2_LOAD_ADDRESS, (uint8_t *)NX_BOOTLOADER_PACKAGE2_LOAD_ADDRESS + sizeof(header));
memcpy(&header, NX_BOOTLOADER_PACKAGE2_LOAD_ADDRESS, sizeof(header));
flush_dcache_range((uint8_t *)&header, (uint8_t *)&header + sizeof(header));
/* Perform signature checks. */
verify_header_signature(&header);
/* Decrypt header, get key revision required. */
uint32_t package2_mkey_rev = decrypt_and_validate_header(&header);
/* Load Package2 Sections. */
load_package2_sections(&header.metadata, package2_mkey_rev);
/* Clean up cache. */
flush_dcache_all();
invalidate_icache_inner_shareable();
/* Set CORE0 entrypoint for Package2. */
set_core_entrypoint_and_context_id(0, DRAM_BASE_PHYSICAL + header.metadata.entrypoint, 0);
/* TODO: Nintendo clears 0x1F01FA7D0 to 0x1F01FA7E8. What does this do? Does it remove the identity mapping page tables? */
tlb_invalidate_all();
/* Synchronize with NX BOOTLOADER. */
if (MAILBOX_NX_BOOTLOADER_SETUP_STATE == NX_BOOTLOADER_STATE_LOADED_PACKAGE2) {
while (MAILBOX_NX_BOOTLOADER_SETUP_STATE < NX_BOOTLOADER_STATE_FINISHED) {
wait(1);
}
}
/* TODO: MISC register 0x1F0098C00 |= 0x2000; */
/* TODO: Update SCR_EL3 depending on value in Bootconfig. */
}
/* TODO: Update SCR_EL3 depending on value in Bootconfig. */
}

View file

@ -1,6 +1,8 @@
#include <stdint.h>
#include "utils.h"
#include "memory_map.h"
#include "configitem.h"
#include "cpu_context.h"
#include "lock.h"
@ -107,6 +109,20 @@ smc_table_t g_smc_tables[2] = {
bool g_is_user_smc_in_progress = false;
bool g_is_priv_smc_in_progress = false;
uintptr_t get_smc_core012_stack_address(void) {
return tzram_get_segment_address(TZRAM_SEGMENT_ID_CORE012_STACK) + 0x1000;
}
uintptr_t get_exception_entry_core3_stack_address(unsigned int core_id) {
/* For core3, this is also the smc stack */
if (core_id == 3) {
return tzram_get_segment_address(TZRAM_SEGMENT_ID_CORE3_STACK) + 0x1000;
}
else {
return tzram_get_segment_address(TZRAM_SEGEMENT_ID_SECMON_EVT) + 0x80 * (core_id + 1);
}
}
/* Privileged SMC lock must be available to exceptions.s. */
void set_priv_smc_in_progress(void) {
lock_acquire(&g_is_priv_smc_in_progress);

View file

@ -13,8 +13,9 @@ typedef struct {
void set_priv_smc_in_progress(void);
void clear_priv_smc_in_progress(void);
void get_smc_core012_stack_address(void);
uintptr_t get_smc_core012_stack_address(void);
uintptr_t get_exception_entry_core3_stack_address(void);
void call_smc_handler(unsigned int handler_id, smc_args_t *args);
#endif
#endif

View file

@ -1,100 +0,0 @@
.align 6
.section .text.cold.start, "ax", %progbits
.global __start_cold
__start_cold:
/* Nintendo copy-pasted https://github.com/ARM-software/arm-trusted-firmware/blob/master/plat/nvidia/tegra/common/aarch64/tegra_helpers.S#L312 */
/*
* Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/* The following comments are mine. */
/* mask all interrupts */
msr daifset, 0b1111
/*
Enable invalidates of branch target buffer, then flush
the entire instruction cache at the local level, and
with the reg change, the branch target buffer, then disable
invalidates of the branch target buffer again.
*/
mrs x0, cpuactlr_el1
orr x0, x0, #1
msr cpuactlr_el1, x0
dsb sy
isb
ic iallu
dsb sy
isb
mrs x0, cpuactlr_el1
bic x0, x0, #1
msr cpuactlr_el1, x0
.rept 7
nop /* wait long enough for the write to cpuactlr_el1 to have completed */
.endr
/* if the OS lock is set, disable it and request a warm reset */
mrs x0, oslsr_el1
ands x0, x0, #2
b.eq _set_lock_and_sp
mov x0, xzr
msr oslar_el1, x0
mov x0, #(1 << 63)
msr cpuactlr_el1, x0 /* disable regional clock gating */
isb
mov x0, #3
msr rmr_el3, x0
isb
dsb sy
/* Nintendo forgot to copy-paste the branch instruction below. */
_reset_wfi:
wfi
b _reset_wfi
.rept 65
nop /* guard against speculative excecution */
.endr
_set_lock_and_sp:
/* set the OS lock */
mov x0, #1
msr oslar_el1, x0
/* set SP = SP_EL0 (temporary stack) */
msr spsel, #0
ldr x20, =__cold_crt0_stack_top__
mov sp, x20
bl configure_memory
ldr x16, =__init_cold
br x16
.section .text.cold, "ax", %progbits
__init_cold:
/* set SP = SP_EL3 (exception stack) */
msr spsel, #1
ldr x20, =__main_stack_top__
mov sp, x20
/* set SP = SP_EL0 (temporary stack) */
msr spsel, #0
ldr x20, =__pk2_load_stack_top__
mov sp, x20
bl load_package2
ldr x20, =__cold_init_stack_top__
mov sp, x20
b coldboot_main
.global __set_sp_el0_and_jump_to_el1
.type __set_sp_el0_and_jump_to_el1, %function
__set_sp_el0_and_jump_to_el1:
/* the official handler does some weird stuff with SP_EL0 */
msr elr_el3, x1
mov sp, x2
mov x1, #0x3c5 /* EL1, all interrupts masked */
msr spsr_el3, x1
isb
eret

148
exosphere/start.s Normal file
View file

@ -0,0 +1,148 @@
.macro ERRATUM_INVALIDATE_BTB_AT_BOOT
/* Nintendo copy-pasted https://github.com/ARM-software/arm-trusted-firmware/blob/master/plat/nvidia/tegra/common/aarch64/tegra_helpers.S#L312 */
/*
* Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/* The following comments are mine. */
/* mask all interrupts */
msr daifset, 0b1111
/*
Enable invalidates of branch target buffer, then flush
the entire instruction cache at the local level, and
with the reg change, the branch target buffer, then disable
invalidates of the branch target buffer again.
*/
mrs x0, cpuactlr_el1
orr x0, x0, #1
msr cpuactlr_el1, x0
dsb sy
isb
ic iallu
dsb sy
isb
mrs x0, cpuactlr_el1
bic x0, x0, #1
msr cpuactlr_el1, x0
.rept 7
nop /* wait long enough for the write to cpuactlr_el1 to have completed */
.endr
/* if the OS lock is set, disable it and request a warm reset */
mrs x0, oslsr_el1
ands x0, x0, #2
b.eq _set_lock_and_sp
mov x0, xzr
msr oslar_el1, x0
mov x0, #(1 << 63)
msr cpuactlr_el1, x0 /* disable regional clock gating */
isb
mov x0, #3
msr rmr_el3, x0
isb
dsb sy
/* Nintendo forgot to copy-paste the branch instruction below. */
_reset_wfi:
wfi
b _reset_wfi
.rept 65
nop /* guard against speculative excecution */
.endr
_set_lock_and_sp:
/* set the OS lock */
mov x0, #1
msr oslar_el1, x0
.endm
.align 6
.section .text.cold.start, "ax", %progbits
.global __start_cold
__start_cold:
ERRATUM_INVALIDATE_BTB_AT_BOOT
msr spsel, #0
bl get_coldboot_crt0_stack_address /* should be optimized so it doesn't make function calls */
mov sp, x0
bl coldboot_init
ldr x16, =__jump_to_main_cold
br x16
.align 6
.section .text.warm.start, "ax", %progbits
.global __start_warm
__start_warm:
ERRATUM_INVALIDATE_BTB_AT_BOOT
/* For some reasons, Nintendo uses spsel, #1 here, causing issues if an exception occurs */
msr spsel, #0
bl get_warmboot_crt0_stack_address /* should be optimized so it doesn't make function calls */
mov sp, x0
bl warmboot_init
ldr x16, =__jump_to_main_warm
br x16
.section .text.__jump_to_main_cold, "ax", %progbits
__jump_to_main_cold:
bl __set_exception_entry_stack_pointer
bl get_pk2ldr_stack_address
mov sp, x0
bl load_package2
mov w0, #3 /* use core3 stack temporarily */
bl get_exception_entry_stack_address
mov sp, x0
b coldboot_main
.section .text.__jump_to_main_warm, "ax", %progbits
__jump_to_main_warm:
/* Nintendo doesn't do that here, causing issues if an exception occurs */
bl __set_exception_entry_stack_pointer
bl get_pk2ldr_stack_address
mov sp, x0
bl load_package2
mov w0, #3 /* use core0,1,2 stack bottom + 0x800 (VA of warmboot crt0 sp) temporarily */
bl get_exception_entry_stack_address
add sp, x0, #0x800
b warmboot_main
.section .text.__set_exception_entry_stack, "ax", %progbits
.type __set_exception_entry_stack, %function
.global __set_exception_entry_stack
__set_exception_entry_stack_pointer:
/* If SPSel == 1 on entry, make sure your function doesn't use stack variables! */
mov x16, lr
mrs x17, spsel
msr x0, mpidr_el1
and w0, w0, #3
bl get_exception_entry_stack_address /* should be optimized so it doesn't make function calls */
msr spsel, #1
mov sp, x0
msr spsel, x17
mov lr, x16
ret
.section .text.__jump_to_lower_el, "ax", %progbits
.global __jump_to_lower_el
.type __jump_to_lower_el, %function
__jump_to_lower_el:
/* x0: arg (context ID), x1: entrypoint, w2: exception level */
msr elr_el3, x1
mov x1, #((0b1111 << 6) | 1) /* DAIF set and SP = SP_ELx*/
orr x1, w2, lsl#2
msr spsr_el3, x1
bl __set_exception_entry_stack_pointer
isb
eret

View file

@ -1,79 +0,0 @@
.align 6
.section .text.warm.start, "ax", %progbits
.global __start_warm
__start_warm:
/* Nintendo copy-pasted https://github.com/ARM-software/arm-trusted-firmware/blob/master/plat/nvidia/tegra/common/aarch64/tegra_helpers.S#L312 */
/*
* Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/* The following comments are mine. */
/* mask all interrupts */
msr daifset, 0b1111
/*
Enable invalidates of branch target buffer, then flush
the entire instruction cache at the local level, and
with the reg change, the branch target buffer, then disable
invalidates of the branch target buffer again.
*/
mrs x0, cpuactlr_el1
orr x0, x0, #1
msr cpuactlr_el1, x0
dsb sy
isb
ic iallu
dsb sy
isb
mrs x0, cpuactlr_el1
bic x0, x0, #1
msr cpuactlr_el1, x0
.rept 7
nop /* wait long enough for the write to cpuactlr_el1 to have completed */
.endr
/* if the OS lock is set, disable it and request a warm reset */
mrs x0, oslsr_el1
ands x0, x0, #2
b.eq _set_lock_and_sp
mov x0, xzr
msr oslar_el1, x0
mov x0, #(1 << 63)
msr cpuactlr_el1, x0 /* disable regional clock gating */
isb
mov x0, #3
msr rmr_el3, x0
isb
dsb sy
/* Nintendo forgot to copy-paste the branch instruction below. */
_reset_wfi:
wfi
b _reset_wfi
.rept 65
nop /* guard against speculative excecution */
.endr
_set_lock_and_sp:
/* set the OS lock */
mov x0, #1
msr oslar_el1, x0
bl __synchronize_cores
/* set SP = SP_EL3 (handler stack) */
msr spsel, #1
ldr x20, =__warm_crt0_stack_top__
mov sp, x20
bl reconfigure_memory
ldr x16, =__init_warm
br x16
.section .text.warm, "ax", %progbits
__init_warm:
ldr x20, =__warm_init_stack_top__
mov sp, x20
b warmboot_main

View file

@ -37,9 +37,9 @@ static __attribute__((noinline)) bool overlaps(uint64_t as, uint64_t ae, uint64_
static inline unsigned int get_core_id(void) {
unsigned int core_id;
uint64_t core_id;
__asm__ __volatile__ ("mrs %0, MPIDR_EL1" : "=r"(core_id));
return core_id & 3;
return (unsigned int)core_id & 3;
}
#endif

16
exosphere/warmboot_init.c Normal file
View file

@ -0,0 +1,16 @@
#include "utils.h"
#include "memory_map.h"
uintptr_t get_warmboot_crt0_stack_address(void);
void flush_dcache_all_tzram_pa(void) {
/* TODO */
}
void invalidate_icache_all_tzram_pa(void) {
/* TODO */
}
uintptr_t get_coldboot_crt0_stack_address(void) {
return tzram_get_segment_pa(TZRAM_SEGMENT_ID_CORE3_STACK) + 0x800;
}

11
exosphere/warmboot_main.c Normal file
View file

@ -0,0 +1,11 @@
#include "utils.h"
#include "mmu.h"
#include "memory_map.h"
extern void __jump_to_lower_el(uint64_t arg, uintptr_t ep, unsigned int el);
void warmboot_main(void);
void warmboot_main(void) {
/* TODO */
}