diff --git a/exosphere/src/memory_map.h b/exosphere/src/memory_map.h index c40eadbab..50bd32c7b 100644 --- a/exosphere/src/memory_map.h +++ b/exosphere/src/memory_map.h @@ -48,7 +48,9 @@ #define _MMAPDEV15 ( 0x6000D000ull, 0x1000ull, true ) /* GPIO-1 - GPIO-8 */ #define _MMAPDEV16 ( 0x7000C000ull, 0x1000ull, true ) /* I2C-I2C4 */ #define _MMAPDEV17 ( 0x6000F000ull, 0x1000ull, true ) /* Exception vectors */ -#define _MMAPDEV18 ( 0x40038000ull, 0x8000ull, true ) /* DEBUG: IRAM */ +#define _MMAPDEV18 ( 0x00000000ull, 0x1000ull, true ) /* AMS irampage, NOT mapped at startup */ +#define _MMAPDEV19 ( 0x00000000ull, 0x1000ull, true ) /* AMS userpage, NOT mapped at startup */ +#define _MMAPDEV20 ( 0x40038000ull, 0x5000ull, true ) /* DEBUG: IRAM */ /* LP0 entry ram segments (addr, size, additional attributes) */ #define _MMAPLP0ES0 ( 0x40020000ull, 0x10000ull, MMU_PTE_BLOCK_NS | ATTRIB_MEMTYPE_DEVICE ) /* Encrypted TZRAM */ @@ -108,8 +110,10 @@ #define MMIO_DEVID_GPIO 15 #define MMIO_DEVID_DTV_I2C234 16 #define MMIO_DEVID_EXCEPTION_VECTORS 17 -#define MMIO_DEVID_DEBUG_IRAM 18 -#define MMIO_DEVID_MAX 19 +#define MMIO_DEVID_AMS_IRAM_PAGE 18 +#define MMIO_DEVID_AMS_USER_PAGE 19 +#define MMIO_DEVID_DEBUG_IRAM 20 +#define MMIO_DEVID_MAX 21 #define LP0_ENTRY_RAM_SEGMENT_ID_ENCRYPTED_TZRAM 0 #define LP0_ENTRY_RAM_SEGMENT_ID_LP0_ENTRY_CODE 1 diff --git a/exosphere/src/mmu.h b/exosphere/src/mmu.h index 2b41db321..a85569308 100644 --- a/exosphere/src/mmu.h +++ b/exosphere/src/mmu.h @@ -165,6 +165,10 @@ static inline void mmu_unmap(unsigned int level, uintptr_t *tbl, uintptr_t base_ tbl[mmu_compute_index(level, base_addr)] = MMU_PTE_TYPE_FAULT; } +static inline void mmu_unmap_page(uintptr_t *tbl, uintptr_t base_addr) { + tbl[mmu_compute_index(3, base_addr)] = MMU_PTE_TYPE_FAULT; +} + static inline void mmu_map_block_range(unsigned int level, uintptr_t *tbl, uintptr_t base_addr, uintptr_t phys_addr, size_t size, uint64_t attrs) { size = ((size + (BITL(MMU_Lx_SHIFT(level)) - 1)) >> MMU_Lx_SHIFT(level)) << MMU_Lx_SHIFT(level); for(size_t offset = 0; offset < size; offset += BITL(MMU_Lx_SHIFT(level))) { diff --git a/exosphere/src/smc_ams.c b/exosphere/src/smc_ams.c index 98e10146b..08e220f3d 100644 --- a/exosphere/src/smc_ams.c +++ b/exosphere/src/smc_ams.c @@ -14,20 +14,144 @@ * along with this program. If not, see . */ + +#include #include #include #include #include "utils.h" -#include "smc_api.h" #include "smc_ams.h" +#include "arm.h" +#include "synchronization.h" +#include "memory_map.h" +#include "mmu.h" + +static atomic_flag g_ams_userpage_mapped = ATOMIC_FLAG_INIT; +static atomic_flag g_ams_iram_page_mapped = ATOMIC_FLAG_INIT; + +static inline uintptr_t get_ams_user_page_secure_monitor_addr(void) { + return MMIO_GET_DEVICE_ADDRESS(MMIO_DEVID_AMS_USER_PAGE); +} + +static inline uintptr_t get_ams_iram_page_secure_monitor_addr(void) { + return MMIO_GET_DEVICE_ADDRESS(MMIO_DEVID_AMS_IRAM_PAGE); +} + +#define AMS_USER_PAGE_SECURE_MONITOR_ADDR (get_ams_user_page_secure_monitor_addr()) +#define AMS_IRAM_PAGE_SECURE_MONITOR_ADDR (get_ams_iram_page_secure_monitor_addr()) + + +static inline uintptr_t get_page_for_address(void *address) { + return ((uintptr_t)(address)) & ~0xFFFULL; +} + +static bool ams_is_user_addr_valid(uintptr_t user_address) { + /* Check that the address is in dram. */ + uintptr_t page_address = get_page_for_address((void *)user_address); + return (page_address - 0x80000000ull) < (6ull << 30); +} + +static bool ams_is_iram_addr_valid(uintptr_t iram_address) { + /* Check that the address is in iram. */ + return 0x40000000ULL <= iram_address && iram_address <= 0x4003FFFFULL; +} + +static void ams_map_userpage(uintptr_t user_address) { + lock_acquire(&g_ams_userpage_mapped); + static const uint64_t userpage_attributes = MMU_PTE_BLOCK_XN | MMU_PTE_BLOCK_INNER_SHAREBLE | MMU_PTE_BLOCK_NS | ATTRIB_MEMTYPE_NORMAL; + uintptr_t *mmu_l3_tbl = (uintptr_t *)TZRAM_GET_SEGMENT_ADDRESS(TZRAM_SEGMENT_ID_L3_TRANSLATION_TABLE); + mmu_map_page(mmu_l3_tbl, AMS_USER_PAGE_SECURE_MONITOR_ADDR, get_page_for_address((void *)user_address), userpage_attributes); + tlb_invalidate_page_inner_shareable((void *)AMS_USER_PAGE_SECURE_MONITOR_ADDR); +} + +static void ams_unmap_userpage(void) { + uintptr_t *mmu_l3_tbl = (uintptr_t *)TZRAM_GET_SEGMENT_ADDRESS(TZRAM_SEGMENT_ID_L3_TRANSLATION_TABLE); + mmu_unmap_page(mmu_l3_tbl, AMS_USER_PAGE_SECURE_MONITOR_ADDR); + tlb_invalidate_page_inner_shareable((void *)AMS_USER_PAGE_SECURE_MONITOR_ADDR); + lock_release(&g_ams_userpage_mapped); +} + +static void ams_map_irampage(uintptr_t iram_address) { + lock_acquire(&g_ams_iram_page_mapped); + static const uint64_t irampage_attributes = MMU_PTE_BLOCK_XN | MMU_PTE_BLOCK_INNER_SHAREBLE | ATTRIB_MEMTYPE_DEVICE; + uintptr_t *mmu_l3_tbl = (uintptr_t *)TZRAM_GET_SEGMENT_ADDRESS(TZRAM_SEGMENT_ID_L3_TRANSLATION_TABLE); + mmu_map_page(mmu_l3_tbl, AMS_IRAM_PAGE_SECURE_MONITOR_ADDR, get_page_for_address((void *)iram_address), irampage_attributes); + tlb_invalidate_page_inner_shareable((void *)AMS_IRAM_PAGE_SECURE_MONITOR_ADDR); +} + +static void ams_unmap_irampage(void) { + uintptr_t *mmu_l3_tbl = (uintptr_t *)TZRAM_GET_SEGMENT_ADDRESS(TZRAM_SEGMENT_ID_L3_TRANSLATION_TABLE); + mmu_unmap_page(mmu_l3_tbl, AMS_IRAM_PAGE_SECURE_MONITOR_ADDR); + tlb_invalidate_page_inner_shareable((void *)AMS_IRAM_PAGE_SECURE_MONITOR_ADDR); + lock_release(&g_ams_iram_page_mapped); +} uint32_t ams_iram_copy(smc_args_t *args) { - /* TODO: Implement a DRAM <-> IRAM copy of up to one page here. */ + /* Implements a DRAM <-> IRAM copy of up to one page. */ /* This operation is necessary to implement reboot-to-payload. */ - /* args->X[1] = DRAM address (translated by kernel). */ - /* args->X[2] = IRAM address. */ - /* args->X[3] = size (must be <= 0x1000). */ + /* args->X[1] = DRAM address (translated by kernel), must be 4-byte aligned. */ + /* args->X[2] = IRAM address, must be 4-byte aligned. */ + /* args->X[3] = size (must be <= 0x1000 and 4-byte aligned). */ /* args->X[4] = 0 for read, 1 for write. */ - return 2; + + const uintptr_t dram_address = (uintptr_t)args->X[1]; + const uintptr_t iram_address = (uintptr_t)args->X[2]; + const uintptr_t dram_page_offset = (dram_address & 0xFFFULL); + const uintptr_t iram_page_offset = (iram_address & 0xFFFULL); + const size_t size = args->X[3]; + const uint32_t option = (uint32_t)args->X[4]; + + /* Validate addresses. */ + if (!ams_is_user_addr_valid(dram_address) || !ams_is_iram_addr_valid(iram_address)) { + return 2; + } + + /* Validate size. */ + if (size > 0x1000 || (size + dram_page_offset) > 0x1000 || (size + iram_page_offset) > 0x1000) { + return 2; + } + + /* Validate alignment. */ + if (size % sizeof(uint32_t) || dram_page_offset % sizeof(uint32_t) || iram_page_offset % sizeof(uint32_t)) { + return 2; + } + + /* Validate argument. */ + if (option != 0 && option != 1) { + return 2; + } + + /* Map pages. */ + ams_map_userpage(dram_address); + ams_map_irampage(iram_address); + + /* Set source/destination for copy. */ + volatile uint32_t *dram_ptr = (volatile uint32_t *)(AMS_USER_PAGE_SECURE_MONITOR_ADDR + dram_page_offset); + volatile uint32_t *iram_ptr = (volatile uint32_t *)(AMS_IRAM_PAGE_SECURE_MONITOR_ADDR + iram_page_offset); + + volatile uint32_t *dst; + volatile uint32_t *src; + const size_t num_dwords = size / sizeof(uint32_t); + if (option == 0) { + dst = dram_ptr; + src = iram_ptr; + } else { + dst = iram_ptr; + src = dram_ptr; + } + + /* Actually copy data. */ + for (size_t i = 0; i < num_dwords; i++) { + dst[i] = src[i]; + } + + /* Flush! */ + flush_dcache_range((void *)dst, (void *)(dst + num_dwords)); + + /* Unmap pages. */ + ams_unmap_irampage(); + ams_unmap_userpage(); + + return 0; }