Fix smcComputeCmac, generally improve userpage cache usage

This commit is contained in:
Michael Scire 2018-03-08 04:59:00 -08:00
parent ec8f27f1be
commit 8e5228866c
3 changed files with 24 additions and 11 deletions

View file

@ -216,7 +216,7 @@ void clear_aes_keyslot_iv(unsigned int keyslot) {
}
for (size_t i = 0; i < (0x10 >> 2); i++) {
SECURITY_ENGINE->AES_KEYTABLE_ADDR = (keyslot << 4) | 8;
SECURITY_ENGINE->AES_KEYTABLE_ADDR = (keyslot << 4) | 8 | i;
SECURITY_ENGINE->AES_KEYTABLE_DATA = 0;
}
}
@ -585,10 +585,13 @@ void se_compute_aes_cmac(unsigned int keyslot, void *cmac, size_t cmac_size, con
generic_panic();
}
if (data_size) {
flush_dcache_range((uint8_t *)data, (uint8_t *)data + data_size);
}
/* Generate the derived key, to be XOR'd with final output block. */
uint8_t derived_key[0x10];
memset(derived_key, 0, sizeof(derived_key));
se_aes_128_ecb_encrypt_block(keyslot, derived_key, sizeof(derived_key), derived_key, sizeof(derived_key));
uint8_t derived_key[0x10] = {0};
se_aes_ecb_encrypt_block(keyslot, derived_key, sizeof(derived_key), derived_key, sizeof(derived_key), config_high);
shift_left_xor_rb(derived_key);
if (data_size & 0xF) {
shift_left_xor_rb(derived_key);
@ -598,6 +601,7 @@ void se_compute_aes_cmac(unsigned int keyslot, void *cmac, size_t cmac_size, con
SECURITY_ENGINE->CRYPTO_REG = (keyslot << 24) | (0x145);
clear_aes_keyslot_iv(keyslot);
unsigned int num_blocks = (data_size + 0xF) >> 4;
/* Handle aligned blocks. */
if (num_blocks > 1) {
@ -607,8 +611,7 @@ void se_compute_aes_cmac(unsigned int keyslot, void *cmac, size_t cmac_size, con
}
/* Create final block. */
uint8_t last_block[0x10];
memset(last_block, 0, sizeof(last_block));
uint8_t last_block[0x10] = {0};
if (data_size & 0xF) {
memcpy(last_block, data + (data_size & ~0xF), data_size & 0xF);
last_block[data_size & 0xF] = 0x80; /* Last block = data || 100...0 */
@ -621,6 +624,7 @@ void se_compute_aes_cmac(unsigned int keyslot, void *cmac, size_t cmac_size, con
}
/* Perform last operation. */
SECURITY_ENGINE->BLOCK_COUNT_REG = 0;
flush_dcache_range(last_block, last_block + sizeof(last_block));
trigger_se_blocking_op(OP_START, NULL, 0, last_block, sizeof(last_block));

View file

@ -357,6 +357,7 @@ uint32_t user_compute_cmac(smc_args_t *args) {
}
flush_dcache_range(user_data, user_data + size);
se_compute_aes_128_cmac(keyslot, result_cmac, 0x10, user_data, size);
/* Copy CMAC out. */
@ -400,6 +401,8 @@ uint32_t user_load_rsa_oaep_key(smc_args_t *args) {
return 2;
}
flush_dcache_range(user_data, user_data + size);
/* Ensure that our private key is 0x100 bytes. */
if (gcm_decrypt_key(user_data, size, user_data, size, sealed_kek, 0x10, wrapped_key, 0x10, CRYPTOUSECASE_RSAOAEP, is_personalized) < 0x100) {
return 2;
@ -447,6 +450,8 @@ uint32_t user_decrypt_rsa_private_key(smc_args_t *args) {
return 2;
}
flush_dcache_range(user_data, user_data + size);
size_t out_size;
if ((out_size = gcm_decrypt_key(user_data, size, user_data, size, sealed_kek, 0x10, wrapped_key, 0x10, CRYPTOUSECASE_RSAPRIVATE, is_personalized)) == 0) {
@ -495,6 +500,8 @@ uint32_t user_load_secure_exp_mod_key(smc_args_t *args) {
return 2;
}
flush_dcache_range(user_data, user_data + size);
size_t out_size;
/* Ensure that our key is non-zero bytes. */

View file

@ -32,6 +32,8 @@ bool upage_init(upage_ref_t *upage, void *user_address) {
mmu_map_page(mmu_l3_tbl, USER_PAGE_SECURE_MONITOR_ADDR, upage->user_address, userpage_attributes);
tlb_invalidate_page_inner_shareable((void *)USER_PAGE_SECURE_MONITOR_ADDR);
upage->secure_monitor_address = USER_PAGE_SECURE_MONITOR_ADDR;
} else {
generic_panic();
}
}