mirror of
https://github.com/Atmosphere-NX/Atmosphere
synced 2024-12-23 04:41:12 +00:00
exo2: minor fixes, now completes main and receives SMCs on hw
This commit is contained in:
parent
27843314a4
commit
8c4c1db506
11 changed files with 134 additions and 47 deletions
|
@ -125,7 +125,7 @@ namespace ams::secmon::boot {
|
||||||
SetL3BlockEntry(l3, MemoryRegionVirtualTzramConfigurationData.GetAddress(), MemoryRegionPhysicalTzramConfigurationData.GetAddress(), MemoryRegionVirtualTzramConfigurationData.GetSize(), MappingAttributesEl3SecureRwData);
|
SetL3BlockEntry(l3, MemoryRegionVirtualTzramConfigurationData.GetAddress(), MemoryRegionPhysicalTzramConfigurationData.GetAddress(), MemoryRegionVirtualTzramConfigurationData.GetSize(), MappingAttributesEl3SecureRwData);
|
||||||
|
|
||||||
/* Map the page tables. */
|
/* Map the page tables. */
|
||||||
SetL3BlockEntry(l3, util::AlignDown(MemoryRegionVirtualTzramL1PageTable.GetAddress(), PageSize), util::AlignDown(MemoryRegionPhysicalTzramL1PageTable.GetAddress(), PageSize), PageSize, MappingAttributesEl3SecureDevice);
|
SetL3BlockEntry(l3, util::AlignDown(MemoryRegionVirtualTzramL1PageTable.GetAddress(), PageSize), util::AlignDown(MemoryRegionPhysicalTzramL1PageTable.GetAddress(), PageSize), PageSize, MappingAttributesEl3SecureRwData);
|
||||||
SetL3BlockEntry(l3, MemoryRegionVirtualTzramL2L3PageTable.GetAddress(), MemoryRegionPhysicalTzramL2L3PageTable.GetAddress(), MemoryRegionVirtualTzramL2L3PageTable.GetSize(), MappingAttributesEl3SecureRwData);
|
SetL3BlockEntry(l3, MemoryRegionVirtualTzramL2L3PageTable.GetAddress(), MemoryRegionPhysicalTzramL2L3PageTable.GetAddress(), MemoryRegionVirtualTzramL2L3PageTable.GetSize(), MappingAttributesEl3SecureRwData);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,6 +19,26 @@
|
||||||
namespace ams::diag {
|
namespace ams::diag {
|
||||||
|
|
||||||
void AbortImpl() {
|
void AbortImpl() {
|
||||||
|
/* TODO: This is here for debugging. Remove this when exo2 is working. */
|
||||||
|
#if 1
|
||||||
|
{
|
||||||
|
*(volatile u32 *)(secmon::MemoryRegionVirtualDebug.GetAddress() + 0x00) = 0xDDDDDDDD;
|
||||||
|
|
||||||
|
u64 temp_reg;
|
||||||
|
__asm__ __volatile__("mov %0, lr" : "=r"(temp_reg) :: "memory");
|
||||||
|
*(volatile u32 *)(secmon::MemoryRegionVirtualDebug.GetAddress() + 0x10) = static_cast<u32>(temp_reg >> 0);
|
||||||
|
*(volatile u32 *)(secmon::MemoryRegionVirtualDebug.GetAddress() + 0x14) = static_cast<u32>(temp_reg >> 32);
|
||||||
|
|
||||||
|
|
||||||
|
__asm__ __volatile__("mov %0, sp" : "=r"(temp_reg) :: "memory");
|
||||||
|
for (int i = 0; i < 0x100; i += 4) {
|
||||||
|
*(volatile u32 *)(secmon::MemoryRegionVirtualDebug.GetAddress() + 0x20 + i) = *(volatile u32 *)(temp_reg + i);
|
||||||
|
}
|
||||||
|
*(volatile u32 *)(secmon::MemoryRegionVirtualDevicePmc.GetAddress() + 0x50) = 0x02;
|
||||||
|
*(volatile u32 *)(secmon::MemoryRegionVirtualDevicePmc.GetAddress() + 0x00) = 0x10;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
secmon::SetError(pkg1::ErrorInfo_UnknownAbort);
|
secmon::SetError(pkg1::ErrorInfo_UnknownAbort);
|
||||||
secmon::ErrorReboot();
|
secmon::ErrorReboot();
|
||||||
}
|
}
|
||||||
|
@ -36,6 +56,42 @@ namespace ams::secmon {
|
||||||
}
|
}
|
||||||
|
|
||||||
NORETURN void ErrorReboot() {
|
NORETURN void ErrorReboot() {
|
||||||
|
/* TODO: This is here for debugging. Remove this when exo2 is working. */
|
||||||
|
#if 1
|
||||||
|
{
|
||||||
|
u64 temp_reg;
|
||||||
|
*(volatile u32 *)(secmon::MemoryRegionVirtualDebug.GetAddress() + 0x00) = 0x5A5A5A5A;
|
||||||
|
|
||||||
|
__asm__ __volatile__("mrs %0, esr_el3" : "=r"(temp_reg) :: "memory");
|
||||||
|
*(volatile u32 *)(secmon::MemoryRegionVirtualDebug.GetAddress() + 0x08) = static_cast<u32>(temp_reg >> 0);
|
||||||
|
*(volatile u32 *)(secmon::MemoryRegionVirtualDebug.GetAddress() + 0x0C) = static_cast<u32>(temp_reg >> 32);
|
||||||
|
|
||||||
|
__asm__ __volatile__("mrs %0, elr_el3" : "=r"(temp_reg) :: "memory");
|
||||||
|
*(volatile u32 *)(secmon::MemoryRegionVirtualDebug.GetAddress() + 0x18) = static_cast<u32>(temp_reg >> 0);
|
||||||
|
*(volatile u32 *)(secmon::MemoryRegionVirtualDebug.GetAddress() + 0x1C) = static_cast<u32>(temp_reg >> 32);
|
||||||
|
|
||||||
|
__asm__ __volatile__("mrs %0, far_el3" : "=r"(temp_reg) :: "memory");
|
||||||
|
*(volatile u32 *)(secmon::MemoryRegionVirtualDebug.GetAddress() + 0x18) = static_cast<u32>(temp_reg >> 0);
|
||||||
|
*(volatile u32 *)(secmon::MemoryRegionVirtualDebug.GetAddress() + 0x1C) = static_cast<u32>(temp_reg >> 32);
|
||||||
|
|
||||||
|
__asm__ __volatile__("mov %0, lr" : "=r"(temp_reg) :: "memory");
|
||||||
|
*(volatile u32 *)(secmon::MemoryRegionVirtualDebug.GetAddress() + 0x20) = static_cast<u32>(temp_reg >> 0);
|
||||||
|
*(volatile u32 *)(secmon::MemoryRegionVirtualDebug.GetAddress() + 0x24) = static_cast<u32>(temp_reg >> 32);
|
||||||
|
|
||||||
|
__asm__ __volatile__("mov %0, sp" : "=r"(temp_reg) :: "memory");
|
||||||
|
*(volatile u32 *)(secmon::MemoryRegionVirtualDebug.GetAddress() + 0x30) = static_cast<u32>(temp_reg >> 0);
|
||||||
|
*(volatile u32 *)(secmon::MemoryRegionVirtualDebug.GetAddress() + 0x34) = static_cast<u32>(temp_reg >> 32);
|
||||||
|
|
||||||
|
for (int i = 0; i < 0x100; i += 4) {
|
||||||
|
*(volatile u32 *)(secmon::MemoryRegionVirtualDebug.GetAddress() + 0x40 + i) = *(volatile u32 *)(temp_reg + i);
|
||||||
|
}
|
||||||
|
*(volatile u32 *)(secmon::MemoryRegionVirtualDevicePmc.GetAddress() + 0x50) = 0x02;
|
||||||
|
*(volatile u32 *)(secmon::MemoryRegionVirtualDevicePmc.GetAddress() + 0x00) = 0x10;
|
||||||
|
|
||||||
|
util::WaitMicroSeconds(1000);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Lockout the security engine. */
|
/* Lockout the security engine. */
|
||||||
se::Lockout();
|
se::Lockout();
|
||||||
|
|
||||||
|
|
|
@ -21,6 +21,9 @@ namespace ams::secmon {
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
|
constexpr inline const uintptr_t BootCodeAddress = MemoryRegionVirtualTzramBootCode.GetAddress();
|
||||||
|
constexpr inline const size_t BootCodeSize = MemoryRegionVirtualTzramBootCode.GetSize();
|
||||||
|
|
||||||
using namespace ams::mmu;
|
using namespace ams::mmu;
|
||||||
|
|
||||||
constexpr void UnmapBootCodeImpl(u64 *l1, u64 *l2, u64 *l3, uintptr_t boot_code, size_t boot_code_size) {
|
constexpr void UnmapBootCodeImpl(u64 *l1, u64 *l2, u64 *l3, uintptr_t boot_code, size_t boot_code_size) {
|
||||||
|
@ -39,6 +42,20 @@ namespace ams::secmon {
|
||||||
InvalidateL1Entries(l1, MemoryRegionPhysical.GetAddress(), MemoryRegionPhysical.GetSize());
|
InvalidateL1Entries(l1, MemoryRegionPhysical.GetAddress(), MemoryRegionPhysical.GetSize());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ClearLow(uintptr_t address, size_t size) {
|
||||||
|
/* Clear the low part. */
|
||||||
|
util::ClearMemory(reinterpret_cast<void *>(address), size / 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ClearHigh(uintptr_t address, size_t size) {
|
||||||
|
/* Clear the high part. */
|
||||||
|
util::ClearMemory(reinterpret_cast<void *>(address + size / 2), size / 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
void ClearBootCodeHigh() {
|
||||||
|
ClearHigh(BootCodeAddress, BootCodeSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
void UnmapBootCode() {
|
void UnmapBootCode() {
|
||||||
|
@ -46,15 +63,11 @@ namespace ams::secmon {
|
||||||
u64 * const l1 = MemoryRegionVirtualTzramL1PageTable.GetPointer<u64>();
|
u64 * const l1 = MemoryRegionVirtualTzramL1PageTable.GetPointer<u64>();
|
||||||
u64 * const l2_l3 = MemoryRegionVirtualTzramL2L3PageTable.GetPointer<u64>();
|
u64 * const l2_l3 = MemoryRegionVirtualTzramL2L3PageTable.GetPointer<u64>();
|
||||||
|
|
||||||
/* Get the boot code region. */
|
/* Clear the low boot code region; high was already cleared by a previous call. */
|
||||||
const uintptr_t boot_code = MemoryRegionVirtualTzramBootCode.GetAddress();
|
ClearLow(BootCodeAddress, BootCodeSize);
|
||||||
const size_t boot_code_size = MemoryRegionVirtualTzramBootCode.GetSize();
|
|
||||||
|
|
||||||
/* Clear the boot code. */
|
|
||||||
util::ClearMemory(reinterpret_cast<void *>(boot_code), boot_code_size);
|
|
||||||
|
|
||||||
/* Unmap. */
|
/* Unmap. */
|
||||||
UnmapBootCodeImpl(l1, l2_l3, l2_l3, boot_code, boot_code_size);
|
UnmapBootCodeImpl(l1, l2_l3, l2_l3, BootCodeAddress, BootCodeSize);
|
||||||
|
|
||||||
/* Ensure the mappings are consistent. */
|
/* Ensure the mappings are consistent. */
|
||||||
secmon::EnsureMappingConsistency();
|
secmon::EnsureMappingConsistency();
|
||||||
|
|
|
@ -52,7 +52,7 @@ namespace ams::secmon {
|
||||||
constinit const se::StickyBits ExpectedSeStickyBits = {
|
constinit const se::StickyBits ExpectedSeStickyBits = {
|
||||||
.se_security = (1 << 0), /* SE_HARD_SETTING */
|
.se_security = (1 << 0), /* SE_HARD_SETTING */
|
||||||
.tzram_security = 0,
|
.tzram_security = 0,
|
||||||
.crypto_security_perkey = 0,
|
.crypto_security_perkey = (1 << pkg1::AesKeySlot_UserEnd) - 1,
|
||||||
.crypto_keytable_access = {
|
.crypto_keytable_access = {
|
||||||
(1 << 6) | (1 << 5) | (0 << 4) | (1 << 3) | (0 << 2) | (1 << 1) | (0 << 0), /* 0: User keyslot. KEYUSE, UIVUPDATE, OIVUPDATE, KEYUPDATE enabled. UIVREAD, OIVREAD, KEYREAD disabled. */
|
(1 << 6) | (1 << 5) | (0 << 4) | (1 << 3) | (0 << 2) | (1 << 1) | (0 << 0), /* 0: User keyslot. KEYUSE, UIVUPDATE, OIVUPDATE, KEYUPDATE enabled. UIVREAD, OIVREAD, KEYREAD disabled. */
|
||||||
(1 << 6) | (1 << 5) | (0 << 4) | (1 << 3) | (0 << 2) | (1 << 1) | (0 << 0), /* 1: User keyslot. KEYUSE, UIVUPDATE, OIVUPDATE, KEYUPDATE enabled. UIVREAD, OIVREAD, KEYREAD disabled. */
|
(1 << 6) | (1 << 5) | (0 << 4) | (1 << 3) | (0 << 2) | (1 << 1) | (0 << 0), /* 1: User keyslot. KEYUSE, UIVUPDATE, OIVUPDATE, KEYUPDATE enabled. UIVREAD, OIVREAD, KEYREAD disabled. */
|
||||||
|
@ -60,15 +60,15 @@ namespace ams::secmon {
|
||||||
(1 << 6) | (1 << 5) | (0 << 4) | (1 << 3) | (0 << 2) | (1 << 1) | (0 << 0), /* 3: User keyslot. KEYUSE, UIVUPDATE, OIVUPDATE, KEYUPDATE enabled. UIVREAD, OIVREAD, KEYREAD disabled. */
|
(1 << 6) | (1 << 5) | (0 << 4) | (1 << 3) | (0 << 2) | (1 << 1) | (0 << 0), /* 3: User keyslot. KEYUSE, UIVUPDATE, OIVUPDATE, KEYUPDATE enabled. UIVREAD, OIVREAD, KEYREAD disabled. */
|
||||||
(1 << 6) | (1 << 5) | (0 << 4) | (1 << 3) | (0 << 2) | (1 << 1) | (0 << 0), /* 4: User keyslot. KEYUSE, UIVUPDATE, OIVUPDATE, KEYUPDATE enabled. UIVREAD, OIVREAD, KEYREAD disabled. */
|
(1 << 6) | (1 << 5) | (0 << 4) | (1 << 3) | (0 << 2) | (1 << 1) | (0 << 0), /* 4: User keyslot. KEYUSE, UIVUPDATE, OIVUPDATE, KEYUPDATE enabled. UIVREAD, OIVREAD, KEYREAD disabled. */
|
||||||
(1 << 6) | (1 << 5) | (0 << 4) | (1 << 3) | (0 << 2) | (1 << 1) | (0 << 0), /* 5: User keyslot. KEYUSE, UIVUPDATE, OIVUPDATE, KEYUPDATE enabled. UIVREAD, OIVREAD, KEYREAD disabled. */
|
(1 << 6) | (1 << 5) | (0 << 4) | (1 << 3) | (0 << 2) | (1 << 1) | (0 << 0), /* 5: User keyslot. KEYUSE, UIVUPDATE, OIVUPDATE, KEYUPDATE enabled. UIVREAD, OIVREAD, KEYREAD disabled. */
|
||||||
(0 << 6) | (1 << 5) | (0 << 4) | (1 << 3) | (0 << 2) | (1 << 1) | (0 << 0), /* 6: Unused keyslot. UIVUPDATE, OIVUPDATE, KEYUPDATE enabled. KEYUSE, UIVREAD, OIVREAD, KEYREAD disabled. */
|
(0 << 6) | (0 << 5) | (0 << 4) | (0 << 3) | (0 << 2) | (0 << 1) | (0 << 0), /* 6: Unused keyslot. KEYUSE, UIVUPDATE, UIVREAD, OIVUPDATE, OIVREAD, KEYUPDATE, KEYREAD disabled. */
|
||||||
(0 << 6) | (1 << 5) | (0 << 4) | (1 << 3) | (0 << 2) | (1 << 1) | (0 << 0), /* 7: Unused keyslot. UIVUPDATE, OIVUPDATE, KEYUPDATE enabled. KEYUSE, UIVREAD, OIVREAD, KEYREAD disabled. */
|
(0 << 6) | (0 << 5) | (0 << 4) | (0 << 3) | (0 << 2) | (0 << 1) | (0 << 0), /* 7: Unused keyslot. KEYUSE, UIVUPDATE, UIVREAD, OIVUPDATE, OIVREAD, KEYUPDATE, KEYREAD disabled. */
|
||||||
(0 << 6) | (1 << 5) | (0 << 4) | (1 << 3) | (0 << 2) | (1 << 1) | (0 << 0), /* 8: Temp keyslot. UIVUPDATE, OIVUPDATE, KEYUPDATE enabled. KEYUSE, UIVREAD, OIVREAD, KEYREAD disabled. */
|
(0 << 6) | (1 << 5) | (0 << 4) | (1 << 3) | (0 << 2) | (1 << 1) | (0 << 0), /* 8: Temp keyslot. UIVUPDATE, OIVUPDATE, KEYUPDATE enabled. KEYUSE, UIVREAD, OIVREAD, KEYREAD disabled. */
|
||||||
(0 << 6) | (1 << 5) | (0 << 4) | (1 << 3) | (0 << 2) | (1 << 1) | (0 << 0), /* 9: SmcTemp keyslot. UIVUPDATE, OIVUPDATE, KEYUPDATE enabled. KEYUSE, UIVREAD, OIVREAD, KEYREAD disabled. */
|
(0 << 6) | (1 << 5) | (0 << 4) | (1 << 3) | (0 << 2) | (1 << 1) | (0 << 0), /* 9: SmcTemp keyslot. UIVUPDATE, OIVUPDATE, KEYUPDATE enabled. KEYUSE, UIVREAD, OIVREAD, KEYREAD disabled. */
|
||||||
(0 << 6) | (0 << 5) | (0 << 4) | (0 << 3) | (0 << 2) | (0 << 1) | (0 << 0), /* 10: Wrap1 keyslot. KEYUSE, UIVUPDATE, UIVREAD, OIVUPDATE, OIVREAD, KEYUPDATE, KEYREAD disabled. */
|
(0 << 6) | (0 << 5) | (0 << 4) | (0 << 3) | (0 << 2) | (0 << 1) | (0 << 0), /* 10: Wrap1 keyslot. KEYUSE, UIVUPDATE, UIVREAD, OIVUPDATE, OIVREAD, KEYUPDATE, KEYREAD disabled. */
|
||||||
(0 << 6) | (0 << 5) | (0 << 4) | (0 << 3) | (0 << 2) | (0 << 1) | (0 << 0), /* 11: Wrap2 keyslot. KEYUSE, UIVUPDATE, UIVREAD, OIVUPDATE, OIVREAD, KEYUPDATE, KEYREAD disabled. */
|
(0 << 6) | (0 << 5) | (0 << 4) | (0 << 3) | (0 << 2) | (0 << 1) | (0 << 0), /* 11: Wrap2 keyslot. KEYUSE, UIVUPDATE, UIVREAD, OIVUPDATE, OIVREAD, KEYUPDATE, KEYREAD disabled. */
|
||||||
(0 << 6) | (0 << 5) | (0 << 4) | (0 << 3) | (0 << 2) | (0 << 1) | (0 << 0), /* 12: DMaster keyslot. KEYUSE, UIVUPDATE, UIVREAD, OIVUPDATE, OIVREAD, KEYUPDATE, KEYREAD disabled. */
|
(0 << 6) | (0 << 5) | (0 << 4) | (0 << 3) | (0 << 2) | (0 << 1) | (0 << 0), /* 12: DMaster keyslot. KEYUSE, UIVUPDATE, UIVREAD, OIVUPDATE, OIVREAD, KEYUPDATE, KEYREAD disabled. */
|
||||||
(0 << 6) | (0 << 5) | (0 << 4) | (0 << 3) | (0 << 2) | (0 << 1) | (0 << 0), /* 13: Master keyslot. KEYUSE, UIVUPDATE, UIVREAD, OIVUPDATE, OIVREAD, KEYUPDATE, KEYREAD disabled. */
|
(0 << 6) | (0 << 5) | (0 << 4) | (0 << 3) | (0 << 2) | (0 << 1) | (0 << 0), /* 13: Master keyslot. KEYUSE, UIVUPDATE, UIVREAD, OIVUPDATE, OIVREAD, KEYUPDATE, KEYREAD disabled. */
|
||||||
(0 << 6) | (1 << 5) | (0 << 4) | (1 << 3) | (0 << 2) | (1 << 1) | (0 << 0), /* 14: Unused keyslot. UIVUPDATE, OIVUPDATE, KEYUPDATE enabled. KEYUSE, UIVREAD, OIVREAD, KEYREAD disabled. */
|
(0 << 6) | (0 << 5) | (0 << 4) | (0 << 3) | (0 << 2) | (0 << 1) | (0 << 0), /* 14: Unused keyslot. KEYUSE, UIVUPDATE, UIVREAD, OIVUPDATE, OIVREAD, KEYUPDATE, KEYREAD disabled. */
|
||||||
(0 << 6) | (0 << 5) | (0 << 4) | (0 << 3) | (0 << 2) | (0 << 1) | (0 << 0), /* 13: Device keyslot. KEYUSE, UIVUPDATE, UIVREAD, OIVUPDATE, OIVREAD, KEYUPDATE, KEYREAD disabled. */
|
(0 << 6) | (0 << 5) | (0 << 4) | (0 << 3) | (0 << 2) | (0 << 1) | (0 << 0), /* 13: Device keyslot. KEYUSE, UIVUPDATE, UIVREAD, OIVUPDATE, OIVREAD, KEYUPDATE, KEYREAD disabled. */
|
||||||
},
|
},
|
||||||
.rsa_security_perkey = 0,
|
.rsa_security_perkey = 0,
|
||||||
|
|
|
@ -24,7 +24,7 @@ _ZN3ams6secmon5StartEv:
|
||||||
mov sp, x20
|
mov sp, x20
|
||||||
|
|
||||||
/* Set SPSEL 0 stack pointer to a temporary location in volatile memory. */
|
/* Set SPSEL 0 stack pointer to a temporary location in volatile memory. */
|
||||||
msr spsel, #1
|
msr spsel, #0
|
||||||
ldr x20, =0x1F01C0800
|
ldr x20, =0x1F01C0800
|
||||||
mov sp, x20
|
mov sp, x20
|
||||||
|
|
||||||
|
@ -34,17 +34,20 @@ _ZN3ams6secmon5StartEv:
|
||||||
/* Invoke main. */
|
/* Invoke main. */
|
||||||
bl _ZN3ams6secmon4MainEv
|
bl _ZN3ams6secmon4MainEv
|
||||||
|
|
||||||
|
/* Clear boot code high. */
|
||||||
|
bl _ZN3ams6secmon17ClearBootCodeHighEv
|
||||||
|
|
||||||
/* Set the stack pointer to the core 3 exception stack address. */
|
/* Set the stack pointer to the core 3 exception stack address. */
|
||||||
ldr x20, =0x1F01F9000
|
ldr x20, =0x1F01F9000
|
||||||
mov sp, x20
|
mov sp, x20
|
||||||
|
|
||||||
|
/* Unmap the boot code region (and clear the low part). */
|
||||||
|
bl _ZN3ams6secmon13UnmapBootCodeEv
|
||||||
|
|
||||||
/* Initialize the random cache. */
|
/* Initialize the random cache. */
|
||||||
/* NOTE: Nintendo does this much earlier, but we reuse volatile space. */
|
/* NOTE: Nintendo does this much earlier, but we reuse volatile space. */
|
||||||
bl _ZN3ams6secmon3smc15FillRandomCacheEv
|
bl _ZN3ams6secmon3smc15FillRandomCacheEv
|
||||||
|
|
||||||
/* Unmap the boot code region. */
|
|
||||||
bl _ZN3ams6secmon13UnmapBootCodeEv
|
|
||||||
|
|
||||||
/* Jump to lower exception level. */
|
/* Jump to lower exception level. */
|
||||||
b _ZN3ams6secmon25JumpToLowerExceptionLevelEv
|
b _ZN3ams6secmon25JumpToLowerExceptionLevelEv
|
||||||
|
|
||||||
|
@ -169,7 +172,6 @@ _ZN3ams6secmon25ReleaseCommonSmcStackLockEv:
|
||||||
|
|
||||||
/* Return. */
|
/* Return. */
|
||||||
ret
|
ret
|
||||||
ret
|
|
||||||
|
|
||||||
.section .text._ZN3ams6secmon26ReleaseCommonWarmbootStackEv, "ax", %progbits
|
.section .text._ZN3ams6secmon26ReleaseCommonWarmbootStackEv, "ax", %progbits
|
||||||
.align 4
|
.align 4
|
||||||
|
|
|
@ -228,6 +228,21 @@ namespace ams::secmon::smc {
|
||||||
|
|
||||||
/* Set the invocation result. */
|
/* Set the invocation result. */
|
||||||
args.r[0] = static_cast<u64>(InvokeSmcHandler(info, args));
|
args.r[0] = static_cast<u64>(InvokeSmcHandler(info, args));
|
||||||
|
|
||||||
|
/* TODO: For debugging. Remove this when exo2 is complete. */
|
||||||
|
#if 1
|
||||||
|
if (args.r[0] == static_cast<u64>(SmcResult::NotImplemented)) {
|
||||||
|
*(volatile u32 *)(MemoryRegionVirtualDebug.GetAddress()) = 0xBBBBBBBB;
|
||||||
|
*(volatile u32 *)(MemoryRegionVirtualDebug.GetAddress() + 0x10) = static_cast<u32>(info.function_id);
|
||||||
|
for (size_t i = 0; i < sizeof(args) / sizeof(u32); ++i) {
|
||||||
|
((volatile u32 *)(MemoryRegionVirtualDebug.GetAddress() + 0x20))[i] = reinterpret_cast<u32 *>(std::addressof(args))[i];
|
||||||
|
}
|
||||||
|
*(volatile u32 *)(MemoryRegionVirtualDevicePmc.GetAddress() + 0x50) = 0x02;
|
||||||
|
*(volatile u32 *)(MemoryRegionVirtualDevicePmc.GetAddress() + 0x00) = 0x10;
|
||||||
|
|
||||||
|
util::WaitMicroSeconds(1000);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -201,22 +201,29 @@ namespace ams::mmu::arch::arm64 {
|
||||||
return ((address >> L3EntryShift) & TableEntryIndexMask);
|
return ((address >> L3EntryShift) & TableEntryIndexMask);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE void SetTableImpl(u64 *table, u64 index, u64 value) {
|
constexpr ALWAYS_INLINE void SetTableEntryImpl(volatile u64 *table, u64 index, u64 value) {
|
||||||
/* Ensure (for constexpr validation purposes) that the entry we set is clear. */
|
/* Write the value. */
|
||||||
if (table[index]) {
|
|
||||||
__builtin_unreachable();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Set the value. */
|
|
||||||
table[index] = value;
|
table[index] = value;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE void SetTableEntry(u64 *table, u64 index, u64 value) {
|
||||||
|
/* Ensure (for constexpr validation purposes) that the entry we set is clear. */
|
||||||
|
if (std::is_constant_evaluated()) {
|
||||||
|
if (table[index]) {
|
||||||
|
__builtin_unreachable();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Set the value. */
|
||||||
|
SetTableEntryImpl(table, index, value);
|
||||||
|
}
|
||||||
|
|
||||||
constexpr void SetL1TableEntry(u64 *table, uintptr_t virt_addr, uintptr_t phys_addr, PageTableTableAttribute attr) {
|
constexpr void SetL1TableEntry(u64 *table, uintptr_t virt_addr, uintptr_t phys_addr, PageTableTableAttribute attr) {
|
||||||
SetTableImpl(table, GetL1EntryIndex(virt_addr), MakeTableEntry(phys_addr & TableEntryMask, attr));
|
SetTableEntry(table, GetL1EntryIndex(virt_addr), MakeTableEntry(phys_addr & TableEntryMask, attr));
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr void SetL2TableEntry(u64 *table, uintptr_t virt_addr, uintptr_t phys_addr, PageTableTableAttribute attr) {
|
constexpr void SetL2TableEntry(u64 *table, uintptr_t virt_addr, uintptr_t phys_addr, PageTableTableAttribute attr) {
|
||||||
SetTableImpl(table, GetL2EntryIndex(virt_addr), MakeTableEntry(phys_addr & TableEntryMask, attr));
|
SetTableEntry(table, GetL2EntryIndex(virt_addr), MakeTableEntry(phys_addr & TableEntryMask, attr));
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr void SetL1BlockEntry(u64 *table, uintptr_t virt_addr, uintptr_t phys_addr, size_t size, PageTableMappingAttribute attr) {
|
constexpr void SetL1BlockEntry(u64 *table, uintptr_t virt_addr, uintptr_t phys_addr, size_t size, PageTableMappingAttribute attr) {
|
||||||
|
@ -224,7 +231,7 @@ namespace ams::mmu::arch::arm64 {
|
||||||
const u64 count = (size >> L1EntryShift);
|
const u64 count = (size >> L1EntryShift);
|
||||||
|
|
||||||
for (u64 i = 0; i < count; ++i) {
|
for (u64 i = 0; i < count; ++i) {
|
||||||
SetTableImpl(table, start + i, MakeL1BlockEntry((phys_addr & L1EntryMask) + (i << L1EntryShift), attr));
|
SetTableEntry(table, start + i, MakeL1BlockEntry((phys_addr & L1EntryMask) + (i << L1EntryShift), attr));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -233,7 +240,7 @@ namespace ams::mmu::arch::arm64 {
|
||||||
const u64 count = (size >> L2EntryShift);
|
const u64 count = (size >> L2EntryShift);
|
||||||
|
|
||||||
for (u64 i = 0; i < count; ++i) {
|
for (u64 i = 0; i < count; ++i) {
|
||||||
SetTableImpl(table, start + i, MakeL2BlockEntry((phys_addr & L2EntryMask) + (i << L2EntryShift), attr));
|
SetTableEntry(table, start + i, MakeL2BlockEntry((phys_addr & L2EntryMask) + (i << L2EntryShift), attr));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -242,11 +249,11 @@ namespace ams::mmu::arch::arm64 {
|
||||||
const u64 count = (size >> L3EntryShift);
|
const u64 count = (size >> L3EntryShift);
|
||||||
|
|
||||||
for (u64 i = 0; i < count; ++i) {
|
for (u64 i = 0; i < count; ++i) {
|
||||||
SetTableImpl(table, start + i, MakeL3BlockEntry((phys_addr & L3EntryMask) + (i << L3EntryShift), attr));
|
SetTableEntry(table, start + i, MakeL3BlockEntry((phys_addr & L3EntryMask) + (i << L3EntryShift), attr));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr void InvalidateL1Entries(u64 *table, uintptr_t virt_addr, size_t size) {
|
constexpr void InvalidateL1Entries(volatile u64 *table, uintptr_t virt_addr, size_t size) {
|
||||||
const u64 start = GetL1EntryIndex(virt_addr);
|
const u64 start = GetL1EntryIndex(virt_addr);
|
||||||
const u64 count = (size >> L1EntryShift);
|
const u64 count = (size >> L1EntryShift);
|
||||||
const u64 end = start + count;
|
const u64 end = start + count;
|
||||||
|
@ -256,7 +263,7 @@ namespace ams::mmu::arch::arm64 {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr void InvalidateL2Entries(u64 *table, uintptr_t virt_addr, size_t size) {
|
constexpr void InvalidateL2Entries(volatile u64 *table, uintptr_t virt_addr, size_t size) {
|
||||||
const u64 start = GetL2EntryIndex(virt_addr);
|
const u64 start = GetL2EntryIndex(virt_addr);
|
||||||
const u64 count = (size >> L2EntryShift);
|
const u64 count = (size >> L2EntryShift);
|
||||||
const u64 end = start + count;
|
const u64 end = start + count;
|
||||||
|
@ -266,7 +273,7 @@ namespace ams::mmu::arch::arm64 {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr void InvalidateL3Entries(u64 *table, uintptr_t virt_addr, size_t size) {
|
constexpr void InvalidateL3Entries(volatile u64 *table, uintptr_t virt_addr, size_t size) {
|
||||||
const u64 start = GetL3EntryIndex(virt_addr);
|
const u64 start = GetL3EntryIndex(virt_addr);
|
||||||
const u64 count = (size >> L3EntryShift);
|
const u64 count = (size >> L3EntryShift);
|
||||||
const u64 end = start + count;
|
const u64 end = start + count;
|
||||||
|
|
|
@ -30,17 +30,17 @@ namespace ams::secmon {
|
||||||
EmummcConfiguration emummc_cfg;
|
EmummcConfiguration emummc_cfg;
|
||||||
u8 _raw_emummc_config[0x120];
|
u8 _raw_emummc_config[0x120];
|
||||||
};
|
};
|
||||||
union {
|
|
||||||
u8 _misc_data[0x400 - sizeof(_raw_exosphere_config) - sizeof(_raw_emummc_config)];
|
|
||||||
};
|
|
||||||
u8 sealed_device_keys[pkg1::KeyGeneration_Max][se::AesBlockSize];
|
u8 sealed_device_keys[pkg1::KeyGeneration_Max][se::AesBlockSize];
|
||||||
u8 sealed_master_keys[pkg1::KeyGeneration_Max][se::AesBlockSize];
|
u8 sealed_master_keys[pkg1::KeyGeneration_Max][se::AesBlockSize];
|
||||||
pkg1::BootConfig boot_config;
|
pkg1::BootConfig boot_config;
|
||||||
u8 rsa_private_exponents[4][se::RsaSize];
|
u8 rsa_private_exponents[4][se::RsaSize];
|
||||||
|
union {
|
||||||
|
u8 _misc_data[0xFC0 - sizeof(_raw_exosphere_config) - sizeof(_raw_emummc_config) - sizeof(sealed_device_keys) - sizeof(sealed_master_keys) - sizeof(boot_config) - sizeof(rsa_private_exponents)];
|
||||||
|
};
|
||||||
|
/* u8 l1_page_table[0x40]; */
|
||||||
};
|
};
|
||||||
static_assert(sizeof(ConfigurationContext) == 0x1000);
|
static_assert(sizeof(ConfigurationContext) == 0xFC0);
|
||||||
static_assert(util::is_pod<ConfigurationContext>::value);
|
static_assert(util::is_pod<ConfigurationContext>::value);
|
||||||
static_assert(offsetof(ConfigurationContext, sealed_device_keys) == 0x400);
|
|
||||||
|
|
||||||
namespace impl {
|
namespace impl {
|
||||||
|
|
||||||
|
|
|
@ -101,14 +101,14 @@ namespace ams::se {
|
||||||
void ExecuteOperationSingleBlock(volatile SecurityEngineRegisters *SE, void *dst, size_t dst_size, const void *src, size_t src_size) {
|
void ExecuteOperationSingleBlock(volatile SecurityEngineRegisters *SE, void *dst, size_t dst_size, const void *src, size_t src_size) {
|
||||||
/* Validate sizes. */
|
/* Validate sizes. */
|
||||||
AMS_ABORT_UNLESS(dst_size <= AesBlockSize);
|
AMS_ABORT_UNLESS(dst_size <= AesBlockSize);
|
||||||
AMS_ABORT_UNLESS(src_size == AesBlockSize);
|
AMS_ABORT_UNLESS(src_size <= AesBlockSize);
|
||||||
|
|
||||||
/* Set the block count to 1. */
|
/* Set the block count to 1. */
|
||||||
reg::Write(SE->SE_CRYPTO_LAST_BLOCK, 0);
|
reg::Write(SE->SE_CRYPTO_LAST_BLOCK, 0);
|
||||||
|
|
||||||
/* Create an aligned buffer. */
|
/* Create an aligned buffer. */
|
||||||
util::AlignedBuffer<hw::DataCacheLineSize, AesBlockSize> aligned;
|
util::AlignedBuffer<hw::DataCacheLineSize, AesBlockSize> aligned;
|
||||||
std::memcpy(aligned, src, AesBlockSize);
|
std::memcpy(aligned, src, src_size);
|
||||||
hw::FlushDataCache(aligned, AesBlockSize);
|
hw::FlushDataCache(aligned, AesBlockSize);
|
||||||
hw::DataSynchronizationBarrierInnerShareable();
|
hw::DataSynchronizationBarrierInnerShareable();
|
||||||
|
|
||||||
|
|
|
@ -62,12 +62,6 @@ namespace ams::se {
|
||||||
void SetPerKeySecure() {
|
void SetPerKeySecure() {
|
||||||
auto *SE = GetRegisters();
|
auto *SE = GetRegisters();
|
||||||
|
|
||||||
/* Clear AES PerKey security. */
|
|
||||||
SE->SE_CRYPTO_SECURITY_PERKEY = 0;
|
|
||||||
|
|
||||||
/* Clear RSA PerKey security. */
|
|
||||||
SE->SE_RSA_SECURITY_PERKEY = 0;
|
|
||||||
|
|
||||||
/* Update PERKEY_SETTING to secure. */
|
/* Update PERKEY_SETTING to secure. */
|
||||||
reg::ReadWrite(SE->SE_SE_SECURITY, SE_REG_BITS_ENUM(SECURITY_PERKEY_SETTING, SECURE));
|
reg::ReadWrite(SE->SE_SE_SECURITY, SE_REG_BITS_ENUM(SECURITY_PERKEY_SETTING, SECURE));
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,8 +44,8 @@ namespace ams::se {
|
||||||
if (!TestRegister(SE->SE_CRYPTO_KEYTABLE_ACCESS[i], bits.crypto_keytable_access[i])) { return false; }
|
if (!TestRegister(SE->SE_CRYPTO_KEYTABLE_ACCESS[i], bits.crypto_keytable_access[i])) { return false; }
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Test RSA_SCEURITY_PERKEY */
|
/* Test RSA_SECURITY_PERKEY */
|
||||||
if (!TestRegister(SE->SE_CRYPTO_SECURITY_PERKEY, bits.rsa_security_perkey)) { return false; }
|
if (!TestRegister(SE->SE_RSA_SECURITY_PERKEY, bits.rsa_security_perkey)) { return false; }
|
||||||
|
|
||||||
/* Check RSA_KEYTABLE_ACCESS. */
|
/* Check RSA_KEYTABLE_ACCESS. */
|
||||||
for (int i = 0; i < RsaKeySlotCount; ++i) {
|
for (int i = 0; i < RsaKeySlotCount; ++i) {
|
||||||
|
|
Loading…
Reference in a new issue