kern/kldr: fix bugs in physical randomization

This commit is contained in:
Michael Scire 2020-04-22 03:45:21 -07:00
parent 4f50f57bb7
commit 6ad0f0e7f2
2 changed files with 5 additions and 4 deletions

View file

@ -221,14 +221,14 @@ namespace ams::kern::arch::arm64::init {
PageTableEntry *src_entry = this->GetMappingEntry(src_virt_addr, block_size); PageTableEntry *src_entry = this->GetMappingEntry(src_virt_addr, block_size);
const auto src_saved = *src_entry; const auto src_saved = *src_entry;
for (size_t i = 0; i < num_mappings; i++) { for (size_t i = 0; i < num_mappings; i++) {
*src_entry = InvalidPageTableEntry; src_entry[i] = InvalidPageTableEntry;
} }
/* Unmap the target. */ /* Unmap the target. */
PageTableEntry *dst_entry = this->GetMappingEntry(dst_virt_addr, block_size); PageTableEntry *dst_entry = this->GetMappingEntry(dst_virt_addr, block_size);
const auto dst_saved = *dst_entry; const auto dst_saved = *dst_entry;
for (size_t i = 0; i < num_mappings; i++) { for (size_t i = 0; i < num_mappings; i++) {
*dst_entry = InvalidPageTableEntry; dst_entry[i] = InvalidPageTableEntry;
} }
/* Invalidate the entire tlb. */ /* Invalidate the entire tlb. */
@ -237,7 +237,7 @@ namespace ams::kern::arch::arm64::init {
/* Copy data, if we should. */ /* Copy data, if we should. */
const u64 negative_block_size_for_mask = static_cast<u64>(-static_cast<s64>(block_size)); const u64 negative_block_size_for_mask = static_cast<u64>(-static_cast<s64>(block_size));
const u64 offset_mask = negative_block_size_for_mask & ((1ul << 36) - 1); const u64 offset_mask = negative_block_size_for_mask & ((1ul << 48) - 1);
const KVirtualAddress copy_src_addr = KVirtualAddress(src_saved.GetRawAttributesUnsafeForSwap() & offset_mask); const KVirtualAddress copy_src_addr = KVirtualAddress(src_saved.GetRawAttributesUnsafeForSwap() & offset_mask);
const KVirtualAddress copy_dst_addr = KVirtualAddress(dst_saved.GetRawAttributesUnsafeForSwap() & offset_mask); const KVirtualAddress copy_dst_addr = KVirtualAddress(dst_saved.GetRawAttributesUnsafeForSwap() & offset_mask);
if (block_size && do_copy) { if (block_size && do_copy) {
@ -250,7 +250,7 @@ namespace ams::kern::arch::arm64::init {
} }
/* Swap the mappings. */ /* Swap the mappings. */
const u64 attr_preserve_mask = (negative_block_size_for_mask | 0xFFFF000000000000ul) ^ ((1ul << 36) - 1); const u64 attr_preserve_mask = (negative_block_size_for_mask | 0xFFFF000000000000ul) ^ ((1ul << 48) - 1);
const size_t shift_for_contig = contig ? 4 : 0; const size_t shift_for_contig = contig ? 4 : 0;
size_t advanced_size = 0; size_t advanced_size = 0;
const u64 src_attr_val = src_saved.GetRawAttributesUnsafeForSwap() & attr_preserve_mask; const u64 src_attr_val = src_saved.GetRawAttributesUnsafeForSwap() & attr_preserve_mask;

View file

@ -307,6 +307,7 @@ namespace ams::kern::init::loader {
/* On 10.0.0+, Physically randomize the kernel region. */ /* On 10.0.0+, Physically randomize the kernel region. */
if (kern::GetTargetFirmware() >= kern::TargetFirmware_10_0_0) { if (kern::GetTargetFirmware() >= kern::TargetFirmware_10_0_0) {
ttbr1_table.PhysicallyRandomize(virtual_base_address + rx_offset, bss_end_offset - rx_offset, true); ttbr1_table.PhysicallyRandomize(virtual_base_address + rx_offset, bss_end_offset - rx_offset, true);
cpu::StoreEntireCacheForInit();
} }
/* Clear kernel .bss. */ /* Clear kernel .bss. */