diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp index 59929ffd3..303459627 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp @@ -73,6 +73,7 @@ namespace ams::kern { size_t Initialize(KPhysicalAddress address, size_t size, KVirtualAddress management, KVirtualAddress management_end, Pool p); KPhysicalAddress AllocateBlock(s32 index, bool random) { return m_heap.AllocateBlock(index, random); } + KPhysicalAddress AllocateAligned(s32 index, size_t num_pages, size_t align_pages) { return m_heap.AllocateAligned(index, num_pages, align_pages); } void Free(KPhysicalAddress addr, size_t num_pages) { m_heap.Free(addr, num_pages); } void SetInitialUsedHeapSize(size_t reserved_size) { m_heap.SetInitialUsedSize(reserved_size); } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_page_bitmap.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_page_bitmap.hpp index 97934c534..6f13a1ca8 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_page_bitmap.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_page_bitmap.hpp @@ -20,7 +20,7 @@ namespace ams::kern { class KPageBitmap { - private: + public: class RandomBitGenerator { private: util::TinyMT m_rng; @@ -42,12 +42,43 @@ namespace ams::kern { --m_bits_available; return rnd_bit; } + + u64 GenerateRandomBits(u32 num_bits) { + u64 result = 0; + + /* Iteratively add random bits to our result. */ + while (num_bits > 0) { + /* Ensure we have random bits to take from. */ + if (m_bits_available == 0) { + this->RefreshEntropy(); + } + + /* Determine how many bits to take this round. */ + const auto cur_bits = std::min(num_bits, m_bits_available); + + /* Generate mask for our current bits. */ + const u64 mask = (static_cast(1) << cur_bits) - 1; + + /* Add bits to output from our entropy. */ + result <<= cur_bits; + result |= (m_entropy & mask); + + /* Remove bits from our entropy. */ + m_entropy >>= cur_bits; + m_bits_available -= cur_bits; + + /* Advance. */ + num_bits -= cur_bits; + } + + return result; + } public: RandomBitGenerator() : m_entropy(), m_bits_available() { m_rng.Initialize(static_cast(KSystemControl::GenerateRandomU64())); } - size_t SelectRandomBit(u64 bitmap) { + u64 SelectRandomBit(u64 bitmap) { u64 selected = 0; for (size_t cur_num_bits = BITSIZEOF(bitmap) / 2; cur_num_bits != 0; cur_num_bits /= 2) { @@ -66,6 +97,17 @@ namespace ams::kern { return selected; } + + u64 GenerateRandom(u64 max) { + /* Determine the number of bits we need. */ + const u64 bits_needed = 1 + (BITSIZEOF(max) - util::CountLeadingZeros(max)); + + /* Generate a random value of the desired bitwidth. */ + const u64 rnd = this->GenerateRandomBits(bits_needed); + + /* Adjust the value to be in range. */ + return rnd - ((rnd / max) * max); + } }; public: static constexpr size_t MaxDepth = 4; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_page_heap.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_page_heap.hpp index 8998d5267..d953c481c 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_page_heap.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_page_heap.hpp @@ -27,7 +27,7 @@ namespace ams::kern { static constexpr s32 GetAlignedBlockIndex(size_t num_pages, size_t align_pages) { const size_t target_pages = std::max(num_pages, align_pages); for (size_t i = 0; i < NumMemoryBlockPageShifts; i++) { - if (target_pages <= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) { + if (target_pages <= (static_cast(1) << MemoryBlockPageShifts[i]) / PageSize) { return static_cast(i); } } @@ -36,7 +36,7 @@ namespace ams::kern { static constexpr s32 GetBlockIndex(size_t num_pages) { for (s32 i = static_cast(NumMemoryBlockPageShifts) - 1; i >= 0; i--) { - if (num_pages >= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) { + if (num_pages >= (static_cast(1) << MemoryBlockPageShifts[i]) / PageSize) { return i; } } @@ -44,7 +44,7 @@ namespace ams::kern { } static constexpr size_t GetBlockSize(size_t index) { - return size_t(1) << MemoryBlockPageShifts[index]; + return static_cast(1) << MemoryBlockPageShifts[index]; } static constexpr size_t GetBlockNumPages(size_t index) { @@ -128,13 +128,14 @@ namespace ams::kern { size_t m_initial_used_size; size_t m_num_blocks; Block m_blocks[NumMemoryBlockPageShifts]; + KPageBitmap::RandomBitGenerator m_rng; private: void Initialize(KPhysicalAddress heap_address, size_t heap_size, KVirtualAddress management_address, size_t management_size, const size_t *block_shifts, size_t num_block_shifts); size_t GetNumFreePages() const; void FreeBlock(KPhysicalAddress block, s32 index); public: - KPageHeap() : m_heap_address(Null), m_heap_size(), m_initial_used_size(), m_num_blocks(), m_blocks() { /* ... */ } + KPageHeap() : m_heap_address(Null), m_heap_size(), m_initial_used_size(), m_num_blocks(), m_blocks(), m_rng() { /* ... */ } constexpr KPhysicalAddress GetAddress() const { return m_heap_address; } constexpr size_t GetSize() const { return m_heap_size; } @@ -158,9 +159,25 @@ namespace ams::kern { m_initial_used_size = m_heap_size - free_size - reserved_size; } - KPhysicalAddress AllocateBlock(s32 index, bool random); + KPhysicalAddress AllocateBlock(s32 index, bool random) { + if (random) { + const size_t block_pages = m_blocks[index].GetNumPages(); + return this->AllocateByRandom(index, block_pages, block_pages); + } else { + return this->AllocateByLinearSearch(index); + } + } + + KPhysicalAddress AllocateAligned(s32 index, size_t num_pages, size_t align_pages) { + /* TODO: linear search support? */ + return this->AllocateByRandom(index, num_pages, align_pages); + } + void Free(KPhysicalAddress addr, size_t num_pages); private: + KPhysicalAddress AllocateByLinearSearch(s32 index); + KPhysicalAddress AllocateByRandom(s32 index, size_t num_pages, size_t align_pages); + static size_t CalculateManagementOverheadSize(size_t region_size, const size_t *block_shifts, size_t num_block_shifts); public: static size_t CalculateManagementOverheadSize(size_t region_size) { diff --git a/libraries/libmesosphere/source/kern_k_memory_manager.cpp b/libraries/libmesosphere/source/kern_k_memory_manager.cpp index 3059a18ab..57dd4787b 100644 --- a/libraries/libmesosphere/source/kern_k_memory_manager.cpp +++ b/libraries/libmesosphere/source/kern_k_memory_manager.cpp @@ -202,7 +202,7 @@ namespace ams::kern { Impl *chosen_manager = nullptr; KPhysicalAddress allocated_block = Null; for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr; chosen_manager = this->GetNextManager(chosen_manager, dir)) { - allocated_block = chosen_manager->AllocateBlock(heap_index, true); + allocated_block = chosen_manager->AllocateAligned(heap_index, num_pages, align_pages); if (allocated_block != Null) { break; } @@ -213,12 +213,6 @@ namespace ams::kern { return Null; } - /* If we allocated more than we need, free some. */ - const size_t allocated_pages = KPageHeap::GetBlockNumPages(heap_index); - if (allocated_pages > num_pages) { - chosen_manager->Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages); - } - /* Maintain the optimized memory bitmap, if we should. */ if (m_has_optimized_process[pool]) { chosen_manager->TrackUnoptimizedAllocation(allocated_block, num_pages); diff --git a/libraries/libmesosphere/source/kern_k_page_heap.cpp b/libraries/libmesosphere/source/kern_k_page_heap.cpp index a7c3426cc..2974fd1fc 100644 --- a/libraries/libmesosphere/source/kern_k_page_heap.cpp +++ b/libraries/libmesosphere/source/kern_k_page_heap.cpp @@ -51,11 +51,11 @@ namespace ams::kern { return num_free; } - KPhysicalAddress KPageHeap::AllocateBlock(s32 index, bool random) { + KPhysicalAddress KPageHeap::AllocateByLinearSearch(s32 index) { const size_t needed_size = m_blocks[index].GetSize(); for (s32 i = index; i < static_cast(m_num_blocks); i++) { - if (const KPhysicalAddress addr = m_blocks[i].PopBlock(random); addr != Null) { + if (const KPhysicalAddress addr = m_blocks[i].PopBlock(false); addr != Null) { if (const size_t allocated_size = m_blocks[i].GetSize(); allocated_size > needed_size) { this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize); } @@ -66,6 +66,84 @@ namespace ams::kern { return Null; } + KPhysicalAddress KPageHeap::AllocateByRandom(s32 index, size_t num_pages, size_t align_pages) { + /* Get the size and required alignment. */ + const size_t needed_size = num_pages * PageSize; + const size_t align_size = align_pages * PageSize; + + /* Determine meta-alignment of our desired alignment size. */ + const size_t align_shift = util::CountTrailingZeros(align_size); + + /* Decide on a block to allocate from. */ + constexpr size_t MinimumPossibleAlignmentsForRandomAllocation = 4; + { + /* By default, we'll want to look at all blocks larger than our current one. */ + s32 max_blocks = static_cast(m_num_blocks); + + /* Determine the maximum block we should try to allocate from. */ + size_t possible_alignments = 0; + for (s32 i = index; i < max_blocks; ++i) { + /* Add the possible alignments from blocks at the current size. */ + possible_alignments += (1 + ((m_blocks[i].GetSize() - needed_size) >> align_shift)) * m_blocks[i].GetNumFreeBlocks(); + + /* If there are enough possible alignments, we don't need to look at larger blocks. */ + if (possible_alignments >= MinimumPossibleAlignmentsForRandomAllocation) { + max_blocks = i + 1; + break; + } + } + + /* If we have any possible alignments which require a larger block, we need to pick one. */ + if (possible_alignments > 0 && index + 1 < max_blocks) { + /* Select a random alignment from the possibilities. */ + const size_t rnd = m_rng.GenerateRandom(possible_alignments); + + /* Determine which block corresponds to the random alignment we chose. */ + possible_alignments = 0; + for (s32 i = index; i < max_blocks; ++i) { + /* Add the possible alignments from blocks at the current size. */ + possible_alignments += (1 + ((m_blocks[i].GetSize() - needed_size) >> align_shift)) * m_blocks[i].GetNumFreeBlocks(); + + /* If the current block gets us to our random choice, use the current block. */ + if (rnd < possible_alignments) { + index = i; + break; + } + } + } + } + + /* Pop a block from the index we selected. */ + if (KPhysicalAddress addr = m_blocks[index].PopBlock(true); addr != Null) { + /* Determine how much size we have left over. */ + if (const size_t leftover_size = m_blocks[index].GetSize() - needed_size; leftover_size > 0) { + /* Determine how many valid alignments we can have. */ + const size_t possible_alignments = 1 + (leftover_size >> align_shift); + + /* Select a random valid alignment. */ + const size_t random_offset = m_rng.GenerateRandom(possible_alignments) << align_shift; + + /* Free memory before the random offset. */ + if (random_offset != 0) { + this->Free(addr, random_offset / PageSize); + } + + /* Advance our block by the random offset. */ + addr += random_offset; + + /* Free memory after our allocated block. */ + if (random_offset != leftover_size) { + this->Free(addr + needed_size, (leftover_size - random_offset) / PageSize); + } + } + + /* Return the block we allocated. */ + return addr; + } + + return Null; + } + void KPageHeap::FreeBlock(KPhysicalAddress block, s32 index) { do { block = m_blocks[index++].PushBlock(block); diff --git a/libraries/libmesosphere/source/kern_k_page_table_base.cpp b/libraries/libmesosphere/source/kern_k_page_table_base.cpp index f8f578d09..12daa920f 100644 --- a/libraries/libmesosphere/source/kern_k_page_table_base.cpp +++ b/libraries/libmesosphere/source/kern_k_page_table_base.cpp @@ -3608,13 +3608,13 @@ namespace ams::kern { /* Allocate the start page as needed. */ if (aligned_src_start < mapping_src_start) { - start_partial_page = Kernel::GetMemoryManager().AllocateAndOpenContinuous(1, 0, m_allocate_option); + start_partial_page = Kernel::GetMemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option); R_UNLESS(start_partial_page != Null, svc::ResultOutOfMemory()); } /* Allocate the end page as needed. */ if (mapping_src_end < aligned_src_end && (aligned_src_start < mapping_src_end || aligned_src_start == mapping_src_start)) { - end_partial_page = Kernel::GetMemoryManager().AllocateAndOpenContinuous(1, 0, m_allocate_option); + end_partial_page = Kernel::GetMemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option); R_UNLESS(end_partial_page != Null, svc::ResultOutOfMemory()); }