diff --git a/bdk/memory_map.h b/bdk/memory_map.h index 385723b..848fc1c 100644 --- a/bdk/memory_map.h +++ b/bdk/memory_map.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 CTCaer + * Copyright (c) 2019-2021 CTCaer * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -50,6 +50,13 @@ // Virtual disk / Chainloader buffers. #define RAM_DISK_ADDR 0xA4000000 #define RAM_DISK_SZ 0x41000000 // 1040MB. +#define RAM_DISK2_SZ 0x21000000 // 528MB. + +// NX BIS driver sector cache. +#define NX_BIS_CACHE_ADDR 0xC5000000 +#define NX_BIS_CACHE_SZ 0x10020000 // 256MB. +#define NX_BIS_LOOKUP_ADDR 0xD6000000 +#define NX_BIS_LOOKUP_SZ 0xF000000 // 240MB. // L4T Kernel Panic Storage (PSTORE). #define PSTORE_ADDR 0xB0000000 @@ -93,10 +100,6 @@ /* --- Hole: 129MB 0xF6A00000 - 0xFEB3FFFF --- */ #define DRAM_START2 0xFEB40000 -// NX BIS driver sector cache. -#define NX_BIS_CACHE_ADDR 0xFEE00000 -#define NX_BIS_CACHE_SZ 0x100000 - // USB buffers. #define USBD_ADDR 0xFEF00000 #define USB_DESCRIPTOR_ADDR 0xFEF40000 diff --git a/nyx/nyx_gui/frontend/gui_info.c b/nyx/nyx_gui/frontend/gui_info.c index 2979eff..a2605dc 100644 --- a/nyx/nyx_gui/frontend/gui_info.c +++ b/nyx/nyx_gui/frontend/gui_info.c @@ -413,8 +413,10 @@ t210b01:; LIST_INIT(gpt); nx_emmc_gpt_parse(&gpt, &emmc_storage); emmc_part_t *cal0_part = nx_emmc_part_find(&gpt, "PRODINFO"); // check if null - nx_emmc_bis_init(cal0_part); + nx_emmc_bis_init(cal0_part, false, 0); nx_emmc_bis_read(0, 0x40, cal0_buf); + nx_emmc_bis_end(); + nx_emmc_gpt_free(&gpt); // Clear BIS keys slots and reinstate SBK. hos_bis_keys_clear(); diff --git a/nyx/nyx_gui/storage/nx_emmc_bis.c b/nyx/nyx_gui/storage/nx_emmc_bis.c index f88a644..b7134f6 100644 --- a/nyx/nyx_gui/storage/nx_emmc_bis.c +++ b/nyx/nyx_gui/storage/nx_emmc_bis.c @@ -1,8 +1,8 @@ /* * eMMC BIS driver for Nintendo Switch * - * Copyright (c) 2019 shchmue - * Copyright (c) 2019-2020 CTCaer + * Copyright (c) 2019-2020 shchmue + * Copyright (c) 2019-2021 CTCaer * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -21,48 +21,64 @@ #include +#include #include +#include #include "../storage/nx_emmc.h" +#include #include #include -#define MAX_SEC_CACHE_ENTRIES 1500 +#define BIS_CLUSTER_SECTORS 32 +#define BIS_CLUSTER_SIZE 16384 +#define BIS_CACHE_MAX_ENTRIES 16384 +#define BIS_CACHE_LOOKUP_TBL_EMPTY_ENTRY -1 -typedef struct _sector_cache_t +typedef struct _cluster_cache_t { - u32 sector; - u32 visit_cnt; - u8 tweak[0x10]; - u8 data[0x200]; - u8 align[8]; -} sector_cache_t; + u32 cluster_idx; // Index of the cluster in the partition. + bool dirty; // Has been modified without write-back flag. + u8 data[BIS_CLUSTER_SIZE]; // The cached cluster itself. Aligned to 8 bytes for DMA engine. +} cluster_cache_t; -static u8 ks_crypt = 0; -static u8 ks_tweak = 0; -static u32 sector_cache_cnt = 0; +typedef struct _bis_cache_t +{ + bool full; + bool enabled; + u32 dirty_cnt; + u32 top_idx; + u8 dma_buff[BIS_CLUSTER_SIZE]; // Aligned to 8 bytes for DMA engine. + cluster_cache_t clusters[]; +} bis_cache_t; + +static u8 ks_crypt = 0; +static u8 ks_tweak = 0; +static u32 emu_offset = 0; static emmc_part_t *system_part = NULL; -static sector_cache_t *sector_cache = (sector_cache_t *)NX_BIS_CACHE_ADDR; +static u32 *cache_lookup_tbl = (u32 *)NX_BIS_LOOKUP_ADDR; +static bis_cache_t *bis_cache = (bis_cache_t *)NX_BIS_CACHE_ADDR; -static void _gf256_mul_x_le(u8 *block) +static void _gf256_mul_x_le(void *block) { - u8 *pdata = (u8 *)block; + u32 *pdata = (u32 *)block; u32 carry = 0; - for (u32 i = 0; i < 0x10; i++) + for (u32 i = 0; i < 4; i++) { - u8 b = pdata[i]; + u32 b = pdata[i]; pdata[i] = (b << 1) | carry; - carry = b >> 7; + carry = b >> 31; } if (carry) pdata[0x0] ^= 0x87; } -static int _nx_aes_xts_crypt_sec(u32 ks1, u32 ks2, u32 enc, u8 *tweak, bool regen_tweak, u32 tweak_exp, u64 sec, void *dst, void *src, u32 sec_size) +static int _nx_aes_xts_crypt_sec(u32 tweak_ks, u32 crypt_ks, u32 enc, u8 *tweak, bool regen_tweak, u32 tweak_exp, u64 sec, void *dst, void *src, u32 sec_size) { - u8 *pdst = (u8 *)dst; - u8 *psrc = (u8 *)src; + u32 *pdst = (u32 *)dst; + u32 *psrc = (u32 *)src; + u32 *ptweak = (u32 *)tweak; if (regen_tweak) { @@ -71,155 +87,298 @@ static int _nx_aes_xts_crypt_sec(u32 ks1, u32 ks2, u32 enc, u8 *tweak, bool rege tweak[i] = sec & 0xFF; sec >>= 8; } - if (!se_aes_crypt_block_ecb(ks1, 1, tweak, tweak)) + if (!se_aes_crypt_block_ecb(tweak_ks, 1, tweak, tweak)) return 0; } + // tweak_exp allows us to use a saved tweak to reduce _gf256_mul_x_le calls. for (u32 i = 0; i < (tweak_exp << 5); i++) _gf256_mul_x_le(tweak); - u8 tmp_tweak[0x10]; - memcpy(tmp_tweak, tweak, 0x10); + u8 orig_tweak[SE_KEY_128_SIZE] __attribute__((aligned(4))); + memcpy(orig_tweak, tweak, SE_KEY_128_SIZE); - // We are assuming a 0x10-aligned sector size in this implementation. + // We are assuming a 16 sector aligned size in this implementation. for (u32 i = 0; i < (sec_size >> 4); i++) { - for (u32 j = 0; j < 0x10; j++) - pdst[j] = psrc[j] ^ tweak[j]; + for (u32 j = 0; j < 4; j++) + pdst[j] = psrc[j] ^ ptweak[j]; _gf256_mul_x_le(tweak); - psrc += 0x10; - pdst += 0x10; + psrc += 4; + pdst += 4; } - se_aes_crypt_ecb(ks2, enc, dst, sec_size, src, sec_size); + if (!se_aes_crypt_ecb(crypt_ks, enc, dst, sec_size, dst, sec_size)) + return 0; - memcpy(tweak, tmp_tweak, 0x10); - - pdst = (u8 *)dst; + pdst = (u32 *)dst; + ptweak = (u32 *)orig_tweak; for (u32 i = 0; i < (sec_size >> 4); i++) { - for (u32 j = 0; j < 0x10; j++) - pdst[j] = pdst[j] ^ tweak[j]; + for (u32 j = 0; j < 4; j++) + pdst[j] = pdst[j] ^ ptweak[j]; - _gf256_mul_x_le(tweak); - pdst += 0x10; + _gf256_mul_x_le(orig_tweak); + pdst += 4; } return 1; } +static int nx_emmc_bis_write_block(u32 sector, u32 count, void *buff, bool flush) +{ + if (!system_part) + return 3; // Not ready. + + int res; + u8 tweak[SE_KEY_128_SIZE] __attribute__((aligned(4))); + u32 cluster = sector / BIS_CLUSTER_SECTORS; + u32 aligned_sector = cluster * BIS_CLUSTER_SECTORS; + u32 sector_in_cluster = sector % BIS_CLUSTER_SECTORS; + u32 lookup_idx = cache_lookup_tbl[cluster]; + bool is_cached = lookup_idx != BIS_CACHE_LOOKUP_TBL_EMPTY_ENTRY; + + // Write to cached cluster. + if (is_cached) + { + if (buff) + memcpy(bis_cache->clusters[lookup_idx].data + sector_in_cluster * NX_EMMC_BLOCKSIZE, buff, count * NX_EMMC_BLOCKSIZE); + else + buff = bis_cache->clusters[lookup_idx].data; + if (!bis_cache->clusters[lookup_idx].dirty) + bis_cache->dirty_cnt++; + bis_cache->clusters[lookup_idx].dirty = true; + + if (!flush) + return 0; // Success. + + // Reset args to trigger a full cluster flush to emmc. + sector_in_cluster = 0; + sector = aligned_sector; + count = BIS_CLUSTER_SECTORS; + } + + // Encrypt cluster. + if (!_nx_aes_xts_crypt_sec(ks_tweak, ks_crypt, 1, tweak, true, sector_in_cluster, cluster, bis_cache->dma_buff, buff, count * NX_EMMC_BLOCKSIZE)) + return 1; // Encryption error. + + // If not reading from cache, do a regular read and decrypt. + if (!emu_offset) + res = nx_emmc_part_write(&emmc_storage, system_part, sector, count, bis_cache->dma_buff); + else + res = sdmmc_storage_read(&sd_storage, emu_offset + system_part->lba_start + sector, count, bis_cache->dma_buff); + if (!res) + return 1; // R/W error. + + // Mark cache entry not dirty if write succeeds. + if (is_cached) + { + bis_cache->clusters[lookup_idx].dirty = false; + bis_cache->dirty_cnt--; + } + + return 0; // Success. +} + +static void _nx_emmc_bis_cluster_cache_init(bool enable_cache) +{ + u32 cache_lookup_tbl_size = (system_part->lba_end - system_part->lba_start + 1) / BIS_CLUSTER_SECTORS * sizeof(*cache_lookup_tbl); + + // Clear cache header. + memset(bis_cache, 0, sizeof(bis_cache_t)); + + // Clear cluster lookup table. + memset(cache_lookup_tbl, BIS_CACHE_LOOKUP_TBL_EMPTY_ENTRY, cache_lookup_tbl_size); + + // Enable cache. + bis_cache->enabled = enable_cache; +} + +static void _nx_emmc_bis_flush_cache() +{ + if (!bis_cache->enabled || !bis_cache->dirty_cnt) + return; + + for (u32 i = 0; i < bis_cache->top_idx && bis_cache->dirty_cnt; i++) + { + if (bis_cache->clusters[i].dirty) { + nx_emmc_bis_write_block(bis_cache->clusters[i].cluster_idx * BIS_CLUSTER_SECTORS, BIS_CLUSTER_SECTORS, NULL, true); + bis_cache->dirty_cnt--; + } + } + + _nx_emmc_bis_cluster_cache_init(true); +} + +static int nx_emmc_bis_read_block_normal(u32 sector, u32 count, void *buff) +{ + static u32 prev_cluster = -1; + static u32 prev_sector = 0; + static u8 tweak[SE_KEY_128_SIZE] __attribute__((aligned(4))); + + int res; + bool regen_tweak = true; + u32 tweak_exp = 0; + u32 cluster = sector / BIS_CLUSTER_SECTORS; + u32 sector_in_cluster = sector % BIS_CLUSTER_SECTORS; + + // If not reading from cache, do a regular read and decrypt. + if (!emu_offset) + res = nx_emmc_part_read(&emmc_storage, system_part, sector, count, bis_cache->dma_buff); + else + res = sdmmc_storage_read(&sd_storage, emu_offset + system_part->lba_start + sector, count, bis_cache->dma_buff); + if (!res) + return 1; // R/W error. + + if (prev_cluster != cluster) // Sector in different cluster than last read. + { + prev_cluster = cluster; + tweak_exp = sector_in_cluster; + } + else if (sector > prev_sector) // Sector in same cluster and past last sector. + { + // Calculates the new tweak using the saved one, reducing expensive _gf256_mul_x_le calls. + tweak_exp = sector - prev_sector - 1; + regen_tweak = false; + } + else // Sector in same cluster and before or same as last sector. + tweak_exp = sector_in_cluster; + + // Maximum one cluster (1 XTS crypto block 16KB). + if (!_nx_aes_xts_crypt_sec(ks_tweak, ks_crypt, 0, tweak, regen_tweak, tweak_exp, prev_cluster, buff, bis_cache->dma_buff, count * NX_EMMC_BLOCKSIZE)) + return 1; // R/W error. + + prev_sector = sector + count - 1; + + return 0; // Success. +} + +static int nx_emmc_bis_read_block_cached(u32 sector, u32 count, void *buff) +{ + int res; + u8 cache_tweak[SE_KEY_128_SIZE] __attribute__((aligned(4))); + u32 cluster = sector / BIS_CLUSTER_SECTORS; + u32 cluster_sector = cluster * BIS_CLUSTER_SECTORS; + u32 sector_in_cluster = sector % BIS_CLUSTER_SECTORS; + u32 lookup_idx = cache_lookup_tbl[cluster]; + + // Read from cached cluster. + if (lookup_idx != BIS_CACHE_LOOKUP_TBL_EMPTY_ENTRY) + { + memcpy(buff, bis_cache->clusters[lookup_idx].data + sector_in_cluster * NX_EMMC_BLOCKSIZE, count * NX_EMMC_BLOCKSIZE); + + return 0; // Success. + } + + // Flush cache if full. + if (bis_cache->top_idx >= BIS_CACHE_MAX_ENTRIES) + _nx_emmc_bis_flush_cache(); + + // Set new cached cluster parameters. + bis_cache->clusters[bis_cache->top_idx].cluster_idx = cluster; + bis_cache->clusters[bis_cache->top_idx].dirty = false; + cache_lookup_tbl[cluster] = bis_cache->top_idx; + + // Read the whole cluster the sector resides in. + if (!emu_offset) + res = nx_emmc_part_read(&emmc_storage, system_part, cluster_sector, BIS_CLUSTER_SECTORS, bis_cache->dma_buff); + else + res = sdmmc_storage_read(&sd_storage, emu_offset + system_part->lba_start + cluster_sector, BIS_CLUSTER_SECTORS, bis_cache->dma_buff); + if (!res) + return 1; // R/W error. + + // Decrypt cluster. + if (!_nx_aes_xts_crypt_sec(ks_tweak, ks_crypt, 0, cache_tweak, true, 0, cluster, bis_cache->dma_buff, bis_cache->dma_buff, BIS_CLUSTER_SIZE)) + return 1; // Decryption error. + + // Copy to cluster cache. + memcpy(bis_cache->clusters[bis_cache->top_idx].data, bis_cache->dma_buff, BIS_CLUSTER_SIZE); + memcpy(buff, bis_cache->dma_buff + sector_in_cluster * NX_EMMC_BLOCKSIZE, count * NX_EMMC_BLOCKSIZE); + + // Increment cache count. + bis_cache->top_idx++; + + return 0; // Success. +} + static int nx_emmc_bis_read_block(u32 sector, u32 count, void *buff) { if (!system_part) return 3; // Not ready. - static u32 prev_cluster = -1; - static u32 prev_sector = 0; - static u8 tweak[0x10]; - - u32 cache_idx = 0; - u32 tweak_exp = 0; - bool regen_tweak = true; - bool cache_sector = false; - - if (count == 1) - { - for ( ; cache_idx < sector_cache_cnt; cache_idx++) - { - if (sector_cache[cache_idx].sector == sector) - { - sector_cache[cache_idx].visit_cnt++; - memcpy(buff, sector_cache[cache_idx].data, 0x200); - memcpy(tweak, sector_cache[cache_idx].tweak, 0x10); - prev_sector = sector; - prev_cluster = sector >> 5; - - return 0; - } - } - // add to cache - if (cache_idx == sector_cache_cnt && cache_idx < MAX_SEC_CACHE_ENTRIES) - { - sector_cache[cache_idx].sector = sector; - sector_cache[cache_idx].visit_cnt++; - cache_sector = true; - sector_cache_cnt++; - } - } - - if (nx_emmc_part_read(&emmc_storage, system_part, sector, count, buff)) - { - if (prev_cluster != sector >> 5) // Sector in different cluster than last read. - { - prev_cluster = sector >> 5; - tweak_exp = sector % 0x20; - } - else if (sector > prev_sector) // Sector in same cluster and past last sector. - { - tweak_exp = sector - prev_sector - 1; - regen_tweak = false; - } - else // Sector in same cluster and before or same as last sector. - tweak_exp = sector % 0x20; - - // Maximum one cluster (1 XTS crypto block 16KB). - _nx_aes_xts_crypt_sec(ks_tweak, ks_crypt, 0, tweak, regen_tweak, tweak_exp, prev_cluster, buff, buff, count << 9); - if (cache_sector) - { - memcpy(sector_cache[cache_idx].data, buff, 0x200); - memcpy(sector_cache[cache_idx].tweak, tweak, 0x10); - } - prev_sector = sector + count - 1; - - return 0; - } - - // Error occurred. - return 1; + if (bis_cache->enabled) + return nx_emmc_bis_read_block_cached(sector, count, buff); + else + return nx_emmc_bis_read_block_normal(sector, count, buff); } int nx_emmc_bis_read(u32 sector, u32 count, void *buff) { - int res = 1; u8 *buf = (u8 *)buff; u32 curr_sct = sector; while (count) { - u32 sct_cnt = MIN(count, 0x20); - res = nx_emmc_bis_read_block(curr_sct, sct_cnt, buf); - if (res) - return 1; + u32 sct_cnt = MIN(count, BIS_CLUSTER_SECTORS); + if (nx_emmc_bis_read_block(curr_sct, sct_cnt, buf)) + return 0; - count -= sct_cnt; + count -= sct_cnt; curr_sct += sct_cnt; - buf += 512 * sct_cnt; + buf += sct_cnt * NX_EMMC_BLOCKSIZE; } - return res; + return 1; } -void nx_emmc_bis_init(emmc_part_t *part) +int nx_emmc_bis_write(u32 sector, u32 count, void *buff) +{ + u8 *buf = (u8 *)buff; + u32 curr_sct = sector; + + while (count) + { + u32 sct_cnt = MIN(count, BIS_CLUSTER_SECTORS); + if (nx_emmc_bis_write_block(curr_sct, sct_cnt, buf, false)) + return 0; + + count -= sct_cnt; + curr_sct += sct_cnt; + buf += sct_cnt * NX_EMMC_BLOCKSIZE; + } + + return 1; +} + +void nx_emmc_bis_init(emmc_part_t *part, bool enable_cache, u32 emummc_offset) { system_part = part; - sector_cache_cnt = 0; + emu_offset = emummc_offset; - switch (part->index) + _nx_emmc_bis_cluster_cache_init(enable_cache); + + if (!strcmp(part->name, "PRODINFO") || !strcmp(part->name, "PRODINFOF")) { - case 0: // PRODINFO. - case 1: // PRODINFOF. ks_crypt = 0; ks_tweak = 1; - break; - case 8: // SAFE. + } + else if (!strcmp(part->name, "SAFE")) + { ks_crypt = 2; ks_tweak = 3; - break; - case 9: // SYSTEM. - case 10: // USER. + } + else if (!strcmp(part->name, "SYSTEM") || !strcmp(part->name, "USER")) + { ks_crypt = 4; ks_tweak = 5; - break; } + else + system_part = NULL; +} + +void nx_emmc_bis_end() +{ + _nx_emmc_bis_flush_cache(); + system_part = NULL; } diff --git a/nyx/nyx_gui/storage/nx_emmc_bis.h b/nyx/nyx_gui/storage/nx_emmc_bis.h index 70ec895..f1783b3 100644 --- a/nyx/nyx_gui/storage/nx_emmc_bis.h +++ b/nyx/nyx_gui/storage/nx_emmc_bis.h @@ -223,7 +223,8 @@ typedef struct _nx_emmc_cal0_t u8 console_6axis_sensor_mount_type; } __attribute__((packed)) nx_emmc_cal0_t; -int nx_emmc_bis_read(u32 sector, u32 count, void *buff); -void nx_emmc_bis_init(emmc_part_t *part); +int nx_emmc_bis_read(u32 sector, u32 count, void *buff); +void nx_emmc_bis_init(emmc_part_t *part, bool enable_cache, u32 emummc_offset); +void nx_emmc_bis_end(); #endif \ No newline at end of file