2020-02-09 11:45:45 +00:00
/*
2021-10-04 19:59:10 +00:00
* Copyright ( c ) Atmosphère - NX
2020-02-09 11:45:45 +00:00
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms and conditions of the GNU General Public License ,
* version 2 , as published by the Free Software Foundation .
*
* This program is distributed in the hope it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
* more details .
*
* You should have received a copy of the GNU General Public License
* along with this program . If not , see < http : //www.gnu.org/licenses/>.
*/
# include <mesosphere.hpp>
2020-02-15 02:22:55 +00:00
namespace ams : : kern : : arch : : arm64 {
2020-02-09 11:45:45 +00:00
2020-02-19 09:22:27 +00:00
namespace {
2020-02-20 06:35:31 +00:00
class AlignedMemoryBlock {
private :
2020-12-18 01:18:47 +00:00
uintptr_t m_before_start ;
uintptr_t m_before_end ;
uintptr_t m_after_start ;
uintptr_t m_after_end ;
size_t m_current_alignment ;
2020-02-20 06:35:31 +00:00
public :
2020-12-18 01:18:47 +00:00
constexpr AlignedMemoryBlock ( uintptr_t start , size_t num_pages , size_t alignment ) : m_before_start ( 0 ) , m_before_end ( 0 ) , m_after_start ( 0 ) , m_after_end ( 0 ) , m_current_alignment ( 0 ) {
2020-02-20 06:35:31 +00:00
MESOSPHERE_ASSERT ( util : : IsAligned ( start , PageSize ) ) ;
MESOSPHERE_ASSERT ( num_pages > 0 ) ;
/* Find an alignment that allows us to divide into at least two regions.*/
uintptr_t start_page = start / PageSize ;
alignment / = PageSize ;
while ( util : : AlignUp ( start_page , alignment ) > = util : : AlignDown ( start_page + num_pages , alignment ) ) {
alignment = KPageTable : : GetSmallerAlignment ( alignment * PageSize ) / PageSize ;
}
2020-12-18 01:18:47 +00:00
m_before_start = start_page ;
m_before_end = util : : AlignUp ( start_page , alignment ) ;
m_after_start = m_before_end ;
m_after_end = start_page + num_pages ;
m_current_alignment = alignment ;
MESOSPHERE_ASSERT ( m_current_alignment > 0 ) ;
2020-02-20 06:35:31 +00:00
}
constexpr void SetAlignment ( size_t alignment ) {
/* We can only ever decrease the granularity. */
2020-12-18 01:18:47 +00:00
MESOSPHERE_ASSERT ( m_current_alignment > = alignment / PageSize ) ;
m_current_alignment = alignment / PageSize ;
2020-02-20 06:35:31 +00:00
}
constexpr size_t GetAlignment ( ) const {
2020-12-18 01:18:47 +00:00
return m_current_alignment * PageSize ;
2020-02-20 06:35:31 +00:00
}
constexpr void FindBlock ( uintptr_t & out , size_t & num_pages ) {
2020-12-18 01:18:47 +00:00
if ( ( m_after_end - m_after_start ) > = m_current_alignment ) {
2020-02-20 06:35:31 +00:00
/* Select aligned memory from after block. */
2020-12-18 01:18:47 +00:00
const size_t available_pages = util : : AlignDown ( m_after_end , m_current_alignment ) - m_after_start ;
2020-02-20 06:35:31 +00:00
if ( num_pages = = 0 | | available_pages < num_pages ) {
num_pages = available_pages ;
}
2020-12-18 01:18:47 +00:00
out = m_after_start * PageSize ;
m_after_start + = num_pages ;
} else if ( ( m_before_end - m_before_start ) > = m_current_alignment ) {
2020-02-20 06:35:31 +00:00
/* Select aligned memory from before block. */
2020-12-18 01:18:47 +00:00
const size_t available_pages = m_before_end - util : : AlignUp ( m_before_start , m_current_alignment ) ;
2020-02-20 06:35:31 +00:00
if ( num_pages = = 0 | | available_pages < num_pages ) {
num_pages = available_pages ;
}
2020-12-18 01:18:47 +00:00
m_before_end - = num_pages ;
out = m_before_end * PageSize ;
2020-02-20 06:35:31 +00:00
} else {
/* Neither after or before can get an aligned bit of memory. */
out = 0 ;
num_pages = 0 ;
}
}
} ;
2020-02-19 09:22:27 +00:00
constexpr u64 EncodeTtbr ( KPhysicalAddress table , u8 asid ) {
return ( static_cast < u64 > ( asid ) < < 48 ) | ( static_cast < u64 > ( GetInteger ( table ) ) ) ;
}
}
2022-03-23 04:33:43 +00:00
ALWAYS_INLINE void KPageTable : : NoteUpdated ( ) const {
cpu : : DataSynchronizationBarrierInnerShareableStore ( ) ;
/* Mark ourselves as in a tlb maintenance operation. */
GetCurrentThread ( ) . SetInTlbMaintenanceOperation ( ) ;
ON_SCOPE_EXIT { GetCurrentThread ( ) . ClearInTlbMaintenanceOperation ( ) ; __asm__ __volatile__ ( " " : : : " memory " ) ; } ;
if ( this - > IsKernel ( ) ) {
this - > OnKernelTableUpdated ( ) ;
} else {
this - > OnTableUpdated ( ) ;
}
}
ALWAYS_INLINE void KPageTable : : NoteSingleKernelPageUpdated ( KProcessAddress virt_addr ) const {
MESOSPHERE_ASSERT ( this - > IsKernel ( ) ) ;
cpu : : DataSynchronizationBarrierInnerShareableStore ( ) ;
/* Mark ourselves as in a tlb maintenance operation. */
GetCurrentThread ( ) . SetInTlbMaintenanceOperation ( ) ;
ON_SCOPE_EXIT { GetCurrentThread ( ) . ClearInTlbMaintenanceOperation ( ) ; __asm__ __volatile__ ( " " : : : " memory " ) ; } ;
this - > OnKernelTableSinglePageUpdated ( virt_addr ) ;
}
2024-10-09 21:04:15 +00:00
2020-02-09 11:45:45 +00:00
void KPageTable : : Initialize ( s32 core_id ) {
/* Nothing actually needed here. */
2020-08-17 21:20:24 +00:00
MESOSPHERE_UNUSED ( core_id ) ;
2020-02-09 11:45:45 +00:00
}
Result KPageTable : : InitializeForKernel ( void * table , KVirtualAddress start , KVirtualAddress end ) {
/* Initialize basic fields. */
2020-12-18 01:18:47 +00:00
m_asid = 0 ;
2022-10-12 04:32:56 +00:00
m_manager = Kernel : : GetSystemSystemResource ( ) . GetPageTableManagerPointer ( ) ;
2020-02-09 11:45:45 +00:00
/* Initialize the base page table. */
MESOSPHERE_R_ABORT_UNLESS ( KPageTableBase : : InitializeForKernel ( true , table , start , end ) ) ;
2022-02-14 22:45:32 +00:00
R_SUCCEED ( ) ;
2020-02-09 11:45:45 +00:00
}
2024-10-09 21:04:15 +00:00
Result KPageTable : : InitializeForProcess ( ams : : svc : : CreateProcessFlag flags , bool from_back , KMemoryManager : : Pool pool , KProcessAddress code_address , size_t code_size , KSystemResource * system_resource , KResourceLimit * resource_limit , size_t process_index ) {
/* Determine our ASID */
m_asid = process_index + 1 ;
MESOSPHERE_ABORT_UNLESS ( 0 < m_asid & & m_asid < util : : size ( s_ttbr0_entries ) ) ;
2020-02-19 09:22:27 +00:00
/* Set our manager. */
2022-10-12 04:32:56 +00:00
m_manager = system_resource - > GetPageTableManagerPointer ( ) ;
2020-02-19 09:22:27 +00:00
2024-10-09 21:04:15 +00:00
/* Get the virtual address of our L1 table. */
const KPhysicalAddress ttbr0_phys = KPhysicalAddress ( s_ttbr0_entries [ m_asid ] & UINT64_C ( 0xFFFFFFFFFFFE ) ) ;
const KVirtualAddress ttbr0_virt = KMemoryLayout : : GetLinearVirtualAddress ( ttbr0_phys ) ;
2020-02-19 09:22:27 +00:00
/* Initialize our base table. */
2024-03-28 09:07:04 +00:00
const size_t as_width = GetAddressSpaceWidth ( flags ) ;
2020-02-19 09:22:27 +00:00
const KProcessAddress as_start = 0 ;
const KProcessAddress as_end = ( 1ul < < as_width ) ;
2024-10-09 21:04:15 +00:00
R_TRY ( KPageTableBase : : InitializeForProcess ( flags , from_back , pool , GetVoidPointer ( ttbr0_virt ) , as_start , as_end , code_address , code_size , system_resource , resource_limit ) ) ;
2020-02-19 09:22:27 +00:00
/* Note that we've updated the table (since we created it). */
this - > NoteUpdated ( ) ;
2022-02-14 22:45:32 +00:00
R_SUCCEED ( ) ;
2020-02-19 09:22:27 +00:00
}
2020-02-09 11:45:45 +00:00
Result KPageTable : : Finalize ( ) {
2020-07-23 06:52:29 +00:00
/* Only process tables should be finalized. */
MESOSPHERE_ASSERT ( ! this - > IsKernel ( ) ) ;
2023-10-11 16:16:52 +00:00
/* NOTE: Here Nintendo calls an unknown OnFinalize function. */
/* this->OnFinalize(); */
2020-07-23 06:52:29 +00:00
/* Note that we've updated (to ensure we're synchronized). */
this - > NoteUpdated ( ) ;
2023-10-11 16:16:52 +00:00
/* NOTE: Here Nintendo calls a second unknown OnFinalize function. */
/* this->OnFinalize2(); */
2020-07-23 06:52:29 +00:00
/* Free all pages in the table. */
{
/* Get implementation objects. */
auto & impl = this - > GetImpl ( ) ;
auto & mm = Kernel : : GetMemoryManager ( ) ;
/* Traverse, freeing all pages. */
{
/* Begin the traversal. */
TraversalContext context ;
2024-10-10 21:00:15 +00:00
TraversalEntry entry ;
2020-07-23 06:52:29 +00:00
2024-10-10 21:00:15 +00:00
KPhysicalAddress cur_phys_addr = Null < KPhysicalAddress > ;
size_t cur_size = 0 ;
u8 has_attr = 0 ;
2020-07-23 06:52:29 +00:00
2024-10-10 21:00:15 +00:00
bool cur_valid = impl . BeginTraversal ( std : : addressof ( entry ) , std : : addressof ( context ) , this - > GetAddressSpaceStart ( ) ) ;
2020-07-23 06:52:29 +00:00
while ( true ) {
2024-10-10 21:00:15 +00:00
if ( cur_valid ) {
/* Free the actual pages, if there are any. */
if ( IsHeapPhysicalAddressForFinalize ( entry . phys_addr ) ) {
if ( cur_size > 0 ) {
/* NOTE: Nintendo really does check next_entry.attr == (cur_entry.attr != 0)...but attr is always zero as of 18.0.0, and this is "probably" for the new console or debug-only anyway, */
/* so we'll implement the weird logic verbatim even though it doesn't match the GetContiguousRange logic. */
if ( entry . phys_addr = = cur_phys_addr + cur_size & & entry . attr = = has_attr ) {
/* Just extend the block, since we can. */
cur_size + = entry . block_size ;
} else {
/* Close the block, and begin tracking anew. */
mm . Close ( cur_phys_addr , cur_size / PageSize ) ;
cur_phys_addr = entry . phys_addr ;
cur_size = entry . block_size ;
has_attr = entry . attr ! = 0 ;
}
} else {
cur_phys_addr = entry . phys_addr ;
cur_size = entry . block_size ;
has_attr = entry . attr ! = 0 ;
}
2020-07-23 06:52:29 +00:00
}
2024-10-10 21:00:15 +00:00
/* Clean up the page table entries. */
bool freeing_table = false ;
while ( true ) {
/* Clear the entries. */
const size_t num_to_clear = ( ! freeing_table & & context . is_contiguous ) ? BlocksPerContiguousBlock : 1 ;
auto * pte = reinterpret_cast < PageTableEntry * > ( context . is_contiguous ? util : : AlignDown ( reinterpret_cast < uintptr_t > ( context . level_entries [ context . level ] ) , BlocksPerContiguousBlock * sizeof ( PageTableEntry ) ) : reinterpret_cast < uintptr_t > ( context . level_entries [ context . level ] ) ) ;
for ( size_t i = 0 ; i < num_to_clear ; + + i ) {
pte [ i ] = InvalidPageTableEntry ;
}
2020-07-23 06:52:29 +00:00
2024-10-10 21:00:15 +00:00
/* Remove the entries from the previous table. */
if ( context . level ! = KPageTableImpl : : EntryLevel_L1 ) {
context . level_entries [ context . level + 1 ] - > RemoveTableEntries ( num_to_clear ) ;
}
2020-07-23 06:52:29 +00:00
2024-10-10 21:00:15 +00:00
/* If we cleared a table, we need to note that we updated and free the table. */
if ( freeing_table ) {
KVirtualAddress table = KVirtualAddress ( util : : AlignDown ( reinterpret_cast < uintptr_t > ( context . level_entries [ context . level - 1 ] ) , PageSize ) ) ;
2024-10-11 01:04:54 +00:00
if ( table = = Null < KVirtualAddress > ) {
break ;
}
2024-10-10 21:00:15 +00:00
ClearPageTable ( table ) ;
this - > GetPageTableManager ( ) . Free ( table ) ;
}
2020-07-23 06:52:29 +00:00
2024-10-10 21:00:15 +00:00
/* Advance; we're no longer contiguous. */
context . is_contiguous = false ;
context . level_entries [ context . level ] = pte + num_to_clear - 1 ;
2020-07-23 06:52:29 +00:00
2024-10-10 21:00:15 +00:00
/* We may have removed the last entries in a table, in which case we can free and unmap the tables. */
if ( context . level > = KPageTableImpl : : EntryLevel_L1 | | context . level_entries [ context . level + 1 ] - > GetTableNumEntries ( ) ! = 0 ) {
break ;
}
/* Advance; we will not be working with blocks any more. */
context . level = static_cast < KPageTableImpl : : EntryLevel > ( util : : ToUnderlying ( context . level ) + 1 ) ;
freeing_table = true ;
2020-07-23 06:52:29 +00:00
}
}
2024-10-10 21:00:15 +00:00
/* Continue the traversal. */
cur_valid = impl . ContinueTraversal ( std : : addressof ( entry ) , std : : addressof ( context ) ) ;
2024-10-11 01:04:54 +00:00
if ( entry . block_size = = 0 ) {
break ;
}
2020-07-23 06:52:29 +00:00
}
2024-10-10 21:00:15 +00:00
/* Free any remaining pages. */
if ( cur_size > 0 ) {
mm . Close ( cur_phys_addr , cur_size / PageSize ) ;
2020-07-23 06:52:29 +00:00
}
}
2024-10-09 21:04:15 +00:00
/* Clear the L1 table. */
2021-06-17 20:03:46 +00:00
{
const KVirtualAddress l1_table = reinterpret_cast < uintptr_t > ( impl . Finalize ( ) ) ;
ClearPageTable ( l1_table ) ;
}
2020-07-23 06:52:29 +00:00
/* Perform inherited finalization. */
KPageTableBase : : Finalize ( ) ;
}
2022-02-14 22:45:32 +00:00
R_SUCCEED ( ) ;
2020-02-09 11:45:45 +00:00
}
2020-02-14 01:38:56 +00:00
2021-09-21 17:09:27 +00:00
Result KPageTable : : OperateImpl ( PageLinkedList * page_list , KProcessAddress virt_addr , size_t num_pages , KPhysicalAddress phys_addr , bool is_pa_valid , const KPageProperties properties , OperationType operation , bool reuse_ll ) {
2020-02-14 01:38:56 +00:00
/* Check validity of parameters. */
MESOSPHERE_ASSERT ( this - > IsLockedByCurrentThread ( ) ) ;
MESOSPHERE_ASSERT ( num_pages > 0 ) ;
MESOSPHERE_ASSERT ( util : : IsAligned ( GetInteger ( virt_addr ) , PageSize ) ) ;
MESOSPHERE_ASSERT ( this - > ContainsPages ( virt_addr , num_pages ) ) ;
2023-10-11 16:32:23 +00:00
if ( operation = = OperationType_Map ) {
2020-02-14 01:38:56 +00:00
MESOSPHERE_ABORT_UNLESS ( is_pa_valid ) ;
MESOSPHERE_ASSERT ( util : : IsAligned ( GetInteger ( phys_addr ) , PageSize ) ) ;
} else {
MESOSPHERE_ABORT_UNLESS ( ! is_pa_valid ) ;
}
if ( operation = = OperationType_Unmap ) {
2022-02-14 22:45:32 +00:00
R_RETURN ( this - > Unmap ( virt_addr , num_pages , page_list , false , reuse_ll ) ) ;
2022-10-12 05:37:43 +00:00
} else if ( operation = = OperationType_Separate ) {
2024-10-10 19:58:15 +00:00
R_RETURN ( this - > SeparatePages ( virt_addr , num_pages , page_list , reuse_ll ) ) ;
2020-02-14 01:38:56 +00:00
} else {
auto entry_template = this - > GetEntryTemplate ( properties ) ;
switch ( operation ) {
case OperationType_Map :
2024-10-10 21:20:31 +00:00
/* If mapping io or uncached pages, ensure that there is no pending reschedule. */
if ( properties . io | | properties . uncached ) {
KScopedSchedulerLock sl ;
}
2023-10-11 16:32:23 +00:00
R_RETURN ( this - > MapContiguous ( virt_addr , phys_addr , num_pages , entry_template , properties . disable_merge_attributes = = DisableMergeAttribute_DisableHead , page_list , reuse_ll ) ) ;
2020-02-19 16:07:44 +00:00
case OperationType_ChangePermissions :
2023-02-21 15:39:21 +00:00
R_RETURN ( this - > ChangePermissions ( virt_addr , num_pages , entry_template , properties . disable_merge_attributes , false , false , page_list , reuse_ll ) ) ;
2020-02-19 16:07:44 +00:00
case OperationType_ChangePermissionsAndRefresh :
2023-02-21 15:39:21 +00:00
R_RETURN ( this - > ChangePermissions ( virt_addr , num_pages , entry_template , properties . disable_merge_attributes , true , false , page_list , reuse_ll ) ) ;
case OperationType_ChangePermissionsAndRefreshAndFlush :
R_RETURN ( this - > ChangePermissions ( virt_addr , num_pages , entry_template , properties . disable_merge_attributes , true , true , page_list , reuse_ll ) ) ;
2020-02-14 01:38:56 +00:00
MESOSPHERE_UNREACHABLE_DEFAULT_CASE ( ) ;
}
}
}
2021-09-21 17:09:27 +00:00
Result KPageTable : : OperateImpl ( PageLinkedList * page_list , KProcessAddress virt_addr , size_t num_pages , const KPageGroup & page_group , const KPageProperties properties , OperationType operation , bool reuse_ll ) {
2020-02-20 03:38:20 +00:00
/* Check validity of parameters. */
MESOSPHERE_ASSERT ( this - > IsLockedByCurrentThread ( ) ) ;
MESOSPHERE_ASSERT ( util : : IsAligned ( GetInteger ( virt_addr ) , PageSize ) ) ;
MESOSPHERE_ASSERT ( num_pages > 0 ) ;
MESOSPHERE_ASSERT ( num_pages = = page_group . GetNumPages ( ) ) ;
/* Map the page group. */
auto entry_template = this - > GetEntryTemplate ( properties ) ;
switch ( operation ) {
case OperationType_MapGroup :
2023-10-11 16:32:23 +00:00
case OperationType_MapFirstGroup :
2024-10-10 21:20:31 +00:00
/* If mapping io or uncached pages, ensure that there is no pending reschedule. */
if ( properties . io | | properties . uncached ) {
KScopedSchedulerLock sl ;
}
2023-10-11 16:32:23 +00:00
R_RETURN ( this - > MapGroup ( virt_addr , page_group , num_pages , entry_template , properties . disable_merge_attributes = = DisableMergeAttribute_DisableHead , operation ! = OperationType_MapFirstGroup , page_list , reuse_ll ) ) ;
2020-02-20 03:38:20 +00:00
MESOSPHERE_UNREACHABLE_DEFAULT_CASE ( ) ;
}
2020-02-14 01:38:56 +00:00
}
2020-02-17 10:49:21 +00:00
Result KPageTable : : Unmap ( KProcessAddress virt_addr , size_t num_pages , PageLinkedList * page_list , bool force , bool reuse_ll ) {
2020-02-18 09:04:44 +00:00
MESOSPHERE_ASSERT ( this - > IsLockedByCurrentThread ( ) ) ;
2024-10-10 22:29:29 +00:00
/* Ensure there are no pending data writes. */
cpu : : DataSynchronizationBarrier ( ) ;
2020-02-18 09:04:44 +00:00
auto & impl = this - > GetImpl ( ) ;
/* If we're not forcing an unmap, separate pages immediately. */
if ( ! force ) {
2024-10-10 19:58:15 +00:00
R_TRY ( this - > SeparatePages ( virt_addr , num_pages , page_list , reuse_ll ) ) ;
2020-02-18 09:04:44 +00:00
}
/* Cache initial addresses for use on cleanup. */
const KProcessAddress orig_virt_addr = virt_addr ;
size_t remaining_pages = num_pages ;
/* Ensure that any pages we track close on exit. */
KPageGroup pages_to_close ( this - > GetBlockInfoManager ( ) ) ;
2021-09-17 23:54:49 +00:00
ON_SCOPE_EXIT { pages_to_close . CloseAndReset ( ) ; } ;
2020-02-18 09:04:44 +00:00
/* Begin traversal. */
TraversalContext context ;
TraversalEntry next_entry ;
bool next_valid = impl . BeginTraversal ( std : : addressof ( next_entry ) , std : : addressof ( context ) , virt_addr ) ;
while ( remaining_pages > 0 ) {
/* Handle the case where we're not valid. */
if ( ! next_valid ) {
MESOSPHERE_ABORT_UNLESS ( force ) ;
const size_t cur_size = std : : min ( next_entry . block_size - ( GetInteger ( virt_addr ) & ( next_entry . block_size - 1 ) ) , remaining_pages * PageSize ) ;
remaining_pages - = cur_size / PageSize ;
2020-07-25 03:44:15 +00:00
virt_addr + = cur_size ;
next_valid = impl . ContinueTraversal ( std : : addressof ( next_entry ) , std : : addressof ( context ) ) ;
2020-02-18 09:04:44 +00:00
continue ;
}
/* Handle the case where the block is bigger than it should be. */
if ( next_entry . block_size > remaining_pages * PageSize ) {
MESOSPHERE_ABORT_UNLESS ( force ) ;
2024-10-10 19:58:15 +00:00
MESOSPHERE_R_ABORT_UNLESS ( this - > SeparatePagesImpl ( std : : addressof ( next_entry ) , std : : addressof ( context ) , virt_addr , remaining_pages * PageSize , page_list , reuse_ll ) ) ;
2020-02-18 09:04:44 +00:00
}
/* Check that our state is coherent. */
MESOSPHERE_ASSERT ( ( next_entry . block_size / PageSize ) < = remaining_pages ) ;
MESOSPHERE_ASSERT ( util : : IsAligned ( GetInteger ( next_entry . phys_addr ) , next_entry . block_size ) ) ;
/* Unmap the block. */
2024-10-10 19:58:15 +00:00
bool freeing_table = false ;
2024-10-11 01:04:54 +00:00
bool need_recalculate_virt_addr = false ;
2024-10-10 19:58:15 +00:00
while ( true ) {
/* Clear the entries. */
const size_t num_to_clear = ( ! freeing_table & & context . is_contiguous ) ? BlocksPerContiguousBlock : 1 ;
auto * pte = reinterpret_cast < PageTableEntry * > ( context . is_contiguous ? util : : AlignDown ( reinterpret_cast < uintptr_t > ( context . level_entries [ context . level ] ) , BlocksPerContiguousBlock * sizeof ( PageTableEntry ) ) : reinterpret_cast < uintptr_t > ( context . level_entries [ context . level ] ) ) ;
for ( size_t i = 0 ; i < num_to_clear ; + + i ) {
pte [ i ] = InvalidPageTableEntry ;
}
2020-02-18 09:04:44 +00:00
2024-10-10 19:58:15 +00:00
/* Remove the entries from the previous table. */
if ( context . level ! = KPageTableImpl : : EntryLevel_L1 ) {
context . level_entries [ context . level + 1 ] - > RemoveTableEntries ( num_to_clear ) ;
}
2020-02-18 09:04:44 +00:00
2024-10-10 19:58:15 +00:00
/* If we cleared a table, we need to note that we updated and free the table. */
if ( freeing_table ) {
2024-10-11 01:04:54 +00:00
/* If there's no table, we also don't need to do a free. */
const KVirtualAddress table = KVirtualAddress ( util : : AlignDown ( reinterpret_cast < uintptr_t > ( context . level_entries [ context . level - 1 ] ) , PageSize ) ) ;
if ( table = = Null < KVirtualAddress > ) {
break ;
}
2024-10-10 19:58:15 +00:00
this - > NoteUpdated ( ) ;
2024-10-11 01:04:54 +00:00
this - > FreePageTable ( page_list , table ) ;
need_recalculate_virt_addr = true ;
2024-10-10 19:58:15 +00:00
}
2020-02-18 09:04:44 +00:00
2024-10-10 19:58:15 +00:00
/* Advance; we're no longer contiguous. */
context . is_contiguous = false ;
context . level_entries [ context . level ] = pte + num_to_clear - 1 ;
2024-10-10 21:00:15 +00:00
/* We may have removed the last entries in a table, in which case we can free and unmap the tables. */
2024-10-10 19:58:15 +00:00
if ( context . level > = KPageTableImpl : : EntryLevel_L1 | | context . level_entries [ context . level + 1 ] - > GetTableNumEntries ( ) ! = 0 ) {
2020-02-18 09:04:44 +00:00
break ;
2024-10-10 19:58:15 +00:00
}
/* Advance; we will not be working with blocks any more. */
context . level = static_cast < KPageTableImpl : : EntryLevel > ( util : : ToUnderlying ( context . level ) + 1 ) ;
freeing_table = true ;
2020-02-18 09:04:44 +00:00
}
/* Close the blocks. */
if ( ! force & & IsHeapPhysicalAddress ( next_entry . phys_addr ) ) {
const size_t block_num_pages = next_entry . block_size / PageSize ;
2021-09-18 07:11:10 +00:00
if ( R_FAILED ( pages_to_close . AddBlock ( next_entry . phys_addr , block_num_pages ) ) ) {
2020-02-18 09:04:44 +00:00
this - > NoteUpdated ( ) ;
2021-09-18 07:11:10 +00:00
Kernel : : GetMemoryManager ( ) . Close ( next_entry . phys_addr , block_num_pages ) ;
2021-09-17 23:54:49 +00:00
pages_to_close . CloseAndReset ( ) ;
2020-02-18 09:04:44 +00:00
}
}
/* Advance. */
2024-10-10 19:58:15 +00:00
size_t freed_size = next_entry . block_size ;
2024-10-11 01:04:54 +00:00
if ( need_recalculate_virt_addr ) {
2024-10-10 19:58:15 +00:00
/* We advanced more than by the block, so we need to calculate the actual advanced size. */
2024-10-11 01:04:54 +00:00
const size_t block_size = impl . GetBlockSize ( context . level , context . is_contiguous ) ;
const KProcessAddress new_virt_addr = util : : AlignDown ( GetInteger ( impl . GetAddressForContext ( std : : addressof ( context ) ) ) + block_size , block_size ) ;
2024-10-10 19:58:15 +00:00
MESOSPHERE_ABORT_UNLESS ( new_virt_addr > = virt_addr + next_entry . block_size ) ;
freed_size = std : : min < size_t > ( new_virt_addr - virt_addr , remaining_pages * PageSize ) ;
}
/* We can just advance by the block size. */
virt_addr + = freed_size ;
remaining_pages - = freed_size / PageSize ;
2020-02-18 09:04:44 +00:00
next_valid = impl . ContinueTraversal ( std : : addressof ( next_entry ) , std : : addressof ( context ) ) ;
}
/* Ensure we remain coherent. */
if ( this - > IsKernel ( ) & & num_pages = = 1 ) {
this - > NoteSingleKernelPageUpdated ( orig_virt_addr ) ;
} else {
this - > NoteUpdated ( ) ;
}
2022-02-14 22:45:32 +00:00
R_SUCCEED ( ) ;
2020-02-14 01:38:56 +00:00
}
2024-10-10 22:29:29 +00:00
Result KPageTable : : Map ( KProcessAddress virt_addr , KPhysicalAddress phys_addr , size_t num_pages , PageTableEntry entry_template , bool disable_head_merge , size_t page_size , PageLinkedList * page_list , bool reuse_ll ) {
MESOSPHERE_ASSERT ( this - > IsLockedByCurrentThread ( ) ) ;
2024-10-11 01:04:54 +00:00
MESOSPHERE_ASSERT ( util : : IsAligned ( GetInteger ( virt_addr ) , PageSize ) ) ;
MESOSPHERE_ASSERT ( util : : IsAligned ( GetInteger ( phys_addr ) , PageSize ) ) ;
2024-10-10 22:29:29 +00:00
auto & impl = this - > GetImpl ( ) ;
u8 sw_reserved_bits = PageTableEntry : : EncodeSoftwareReservedBits ( disable_head_merge , false , false ) ;
/* Begin traversal. */
TraversalContext context ;
TraversalEntry entry ;
bool valid = impl . BeginTraversal ( std : : addressof ( entry ) , std : : addressof ( context ) , virt_addr ) ;
/* Iterate, mapping each page. */
while ( num_pages > 0 ) {
/* If we're mapping at the address, there must be nothing there. */
MESOSPHERE_ABORT_UNLESS ( ! valid ) ;
/* If we fail, clean up any empty tables we may have allocated. */
ON_RESULT_FAILURE {
/* Remove entries for and free any tables. */
while ( context . level < KPageTableImpl : : EntryLevel_L1 ) {
/* If the higher-level table has entries, we don't need to do a free. */
if ( context . level_entries [ context . level + 1 ] - > GetTableNumEntries ( ) ! = 0 ) {
break ;
}
/* If there's no table, we also don't need to do a free. */
const KVirtualAddress table = KVirtualAddress ( util : : AlignDown ( reinterpret_cast < uintptr_t > ( context . level_entries [ context . level ] ) , PageSize ) ) ;
if ( table = = Null < KVirtualAddress > ) {
break ;
}
/* Clear the entry for the table we're removing. */
* context . level_entries [ context . level + 1 ] = InvalidPageTableEntry ;
/* Remove the entry for the table one level higher. */
if ( context . level + 1 < KPageTableImpl : : EntryLevel_L1 ) {
context . level_entries [ context . level + 2 ] - > RemoveTableEntries ( 1 ) ;
}
/* Advance our level. */
context . level = static_cast < KPageTableImpl : : EntryLevel > ( util : : ToUnderlying ( context . level ) + 1 ) ;
/* Note that we performed an update and free the table. */
this - > NoteUpdated ( ) ;
this - > FreePageTable ( page_list , table ) ;
}
} ;
/* If necessary, allocate page tables for the entry. */
size_t mapping_size = entry . block_size ;
while ( mapping_size > page_size ) {
/* Allocate the table. */
const auto table = AllocatePageTable ( page_list , reuse_ll ) ;
R_UNLESS ( table ! = Null < KVirtualAddress > , svc : : ResultOutOfResource ( ) ) ;
/* Wait for pending stores to complete. */
cpu : : DataSynchronizationBarrierInnerShareableStore ( ) ;
/* Update the block entry to be a table entry. */
* context . level_entries [ context . level ] = PageTableEntry ( PageTableEntry : : TableTag { } , KPageTable : : GetPageTablePhysicalAddress ( table ) , this - > IsKernel ( ) , true , 0 ) ;
/* Add the entry to the table containing this one. */
if ( context . level ! = KPageTableImpl : : EntryLevel_L1 ) {
context . level_entries [ context . level + 1 ] - > AddTableEntries ( 1 ) ;
}
/* Decrease our level. */
context . level = static_cast < KPageTableImpl : : EntryLevel > ( util : : ToUnderlying ( context . level ) - 1 ) ;
/* Add our new entry to the context. */
context . level_entries [ context . level ] = GetPointer < PageTableEntry > ( table ) + impl . GetLevelIndex ( virt_addr , context . level ) ;
/* Update our mapping size. */
mapping_size = impl . GetBlockSize ( context . level ) ;
}
/* Determine how many pages we can set up on this iteration. */
const size_t block_size = impl . GetBlockSize ( context . level ) ;
const size_t max_ptes = ( context . level = = KPageTableImpl : : EntryLevel_L1 ? impl . GetNumL1Entries ( ) : BlocksPerTable ) - ( ( reinterpret_cast < uintptr_t > ( context . level_entries [ context . level ] ) / sizeof ( PageTableEntry ) ) & ( BlocksPerTable - 1 ) ) ;
const size_t max_pages = ( block_size * max_ptes ) / PageSize ;
const size_t cur_pages = std : : min ( max_pages , num_pages ) ;
/* Determine the new base attribute. */
const bool contig = page_size > = BlocksPerContiguousBlock * mapping_size ;
const size_t num_ptes = cur_pages / ( block_size / PageSize ) ;
auto * pte = context . level_entries [ context . level ] ;
for ( size_t i = 0 ; i < num_ptes ; + + i ) {
2024-10-10 23:10:18 +00:00
pte [ i ] = PageTableEntry ( PageTableEntry : : BlockTag { } , phys_addr + i * block_size , entry_template , sw_reserved_bits , contig , context . level = = KPageTableImpl : : EntryLevel_L3 ) ;
2024-10-10 22:29:29 +00:00
sw_reserved_bits & = ~ ( PageTableEntry : : SoftwareReservedBit_DisableMergeHead ) ;
}
/* Add the entries to the table containing this one. */
if ( context . level ! = KPageTableImpl : : EntryLevel_L1 ) {
context . level_entries [ context . level + 1 ] - > AddTableEntries ( num_ptes ) ;
}
/* Update our context. */
context . is_contiguous = contig ;
context . level_entries [ context . level ] = pte + num_ptes - ( contig ? BlocksPerContiguousBlock : 1 ) ;
/* Advance our addresses. */
phys_addr + = cur_pages * PageSize ;
virt_addr + = cur_pages * PageSize ;
num_pages - = cur_pages ;
/* Continue traversal. */
valid = impl . ContinueTraversal ( std : : addressof ( entry ) , std : : addressof ( context ) ) ;
}
/* We mapped, so wait for our writes to take. */
cpu : : DataSynchronizationBarrierInnerShareableStore ( ) ;
R_SUCCEED ( ) ;
}
2023-10-11 16:32:23 +00:00
Result KPageTable : : MapContiguous ( KProcessAddress virt_addr , KPhysicalAddress phys_addr , size_t num_pages , PageTableEntry entry_template , bool disable_head_merge , PageLinkedList * page_list , bool reuse_ll ) {
2020-02-14 01:38:56 +00:00
MESOSPHERE_ASSERT ( this - > IsLockedByCurrentThread ( ) ) ;
/* Cache initial addresses for use on cleanup. */
const KProcessAddress orig_virt_addr = virt_addr ;
const KPhysicalAddress orig_phys_addr = phys_addr ;
size_t remaining_pages = num_pages ;
2020-02-17 10:49:21 +00:00
/* Map the pages, using a guard to ensure we don't leak. */
{
2022-02-14 22:45:32 +00:00
ON_RESULT_FAILURE { MESOSPHERE_R_ABORT_UNLESS ( this - > Unmap ( orig_virt_addr , num_pages , page_list , true , true ) ) ; } ;
2020-02-17 10:49:21 +00:00
if ( num_pages < ContiguousPageSize / PageSize ) {
2020-12-01 12:14:58 +00:00
R_TRY ( this - > Map ( virt_addr , phys_addr , num_pages , entry_template , disable_head_merge & & virt_addr = = orig_virt_addr , L3BlockSize , page_list , reuse_ll ) ) ;
2020-02-17 10:49:21 +00:00
remaining_pages - = num_pages ;
virt_addr + = num_pages * PageSize ;
phys_addr + = num_pages * PageSize ;
} else {
/* Map the fractional part of the pages. */
size_t alignment ;
for ( alignment = ContiguousPageSize ; ( virt_addr & ( alignment - 1 ) ) = = ( phys_addr & ( alignment - 1 ) ) ; alignment = GetLargerAlignment ( alignment ) ) {
/* Check if this would be our last map. */
2020-07-25 09:47:07 +00:00
const size_t pages_to_map = ( ( alignment - ( virt_addr & ( alignment - 1 ) ) ) & ( alignment - 1 ) ) / PageSize ;
2020-02-17 10:49:21 +00:00
if ( pages_to_map + ( alignment / PageSize ) > remaining_pages ) {
break ;
}
/* Map pages, if we should. */
if ( pages_to_map > 0 ) {
2020-12-01 12:14:58 +00:00
R_TRY ( this - > Map ( virt_addr , phys_addr , pages_to_map , entry_template , disable_head_merge & & virt_addr = = orig_virt_addr , GetSmallerAlignment ( alignment ) , page_list , reuse_ll ) ) ;
2020-02-17 10:49:21 +00:00
remaining_pages - = pages_to_map ;
virt_addr + = pages_to_map * PageSize ;
phys_addr + = pages_to_map * PageSize ;
}
/* Don't go further than L1 block. */
if ( alignment = = L1BlockSize ) {
break ;
}
}
while ( remaining_pages > 0 ) {
/* Select the next smallest alignment. */
alignment = GetSmallerAlignment ( alignment ) ;
MESOSPHERE_ASSERT ( ( virt_addr & ( alignment - 1 ) ) = = 0 ) ;
MESOSPHERE_ASSERT ( ( phys_addr & ( alignment - 1 ) ) = = 0 ) ;
/* Map pages, if we should. */
const size_t pages_to_map = util : : AlignDown ( remaining_pages , alignment / PageSize ) ;
if ( pages_to_map > 0 ) {
2020-12-01 12:14:58 +00:00
R_TRY ( this - > Map ( virt_addr , phys_addr , pages_to_map , entry_template , disable_head_merge & & virt_addr = = orig_virt_addr , alignment , page_list , reuse_ll ) ) ;
2020-02-17 10:49:21 +00:00
remaining_pages - = pages_to_map ;
virt_addr + = pages_to_map * PageSize ;
phys_addr + = pages_to_map * PageSize ;
}
}
}
2020-02-14 01:38:56 +00:00
}
/* Perform what coalescing we can. */
2024-10-10 20:39:08 +00:00
this - > MergePages ( orig_virt_addr , num_pages , page_list ) ;
2022-03-23 04:33:43 +00:00
2020-02-14 01:38:56 +00:00
/* Open references to the pages, if we should. */
if ( IsHeapPhysicalAddress ( orig_phys_addr ) ) {
2023-10-11 16:32:23 +00:00
Kernel : : GetMemoryManager ( ) . Open ( orig_phys_addr , num_pages ) ;
2020-02-14 01:38:56 +00:00
}
2022-02-14 22:45:32 +00:00
R_SUCCEED ( ) ;
2020-02-14 01:38:56 +00:00
}
2023-10-11 16:32:23 +00:00
Result KPageTable : : MapGroup ( KProcessAddress virt_addr , const KPageGroup & pg , size_t num_pages , PageTableEntry entry_template , bool disable_head_merge , bool not_first , PageLinkedList * page_list , bool reuse_ll ) {
2020-02-20 03:38:20 +00:00
MESOSPHERE_ASSERT ( this - > IsLockedByCurrentThread ( ) ) ;
/* We want to maintain a new reference to every page in the group. */
2023-10-11 16:32:23 +00:00
KScopedPageGroup spg ( pg , not_first ) ;
2020-02-20 03:38:20 +00:00
/* Cache initial address for use on cleanup. */
const KProcessAddress orig_virt_addr = virt_addr ;
size_t mapped_pages = 0 ;
/* Map the pages, using a guard to ensure we don't leak. */
{
2022-02-14 22:45:32 +00:00
ON_RESULT_FAILURE { MESOSPHERE_R_ABORT_UNLESS ( this - > Unmap ( orig_virt_addr , num_pages , page_list , true , true ) ) ; } ;
2020-02-20 03:38:20 +00:00
if ( num_pages < ContiguousPageSize / PageSize ) {
for ( const auto & block : pg ) {
2021-09-18 07:11:10 +00:00
const KPhysicalAddress block_phys_addr = block . GetAddress ( ) ;
2020-02-20 03:38:20 +00:00
const size_t cur_pages = block . GetNumPages ( ) ;
2020-12-01 12:14:58 +00:00
R_TRY ( this - > Map ( virt_addr , block_phys_addr , cur_pages , entry_template , disable_head_merge & & virt_addr = = orig_virt_addr , L3BlockSize , page_list , reuse_ll ) ) ;
2020-02-20 03:38:20 +00:00
virt_addr + = cur_pages * PageSize ;
mapped_pages + = cur_pages ;
}
} else {
2020-02-20 06:35:31 +00:00
/* Create a block representing our virtual space. */
AlignedMemoryBlock virt_block ( GetInteger ( virt_addr ) , num_pages , L1BlockSize ) ;
for ( const auto & block : pg ) {
/* Create a block representing this physical group, synchronize its alignment to our virtual block. */
2021-09-18 07:11:10 +00:00
const KPhysicalAddress block_phys_addr = block . GetAddress ( ) ;
2020-02-20 06:35:31 +00:00
size_t cur_pages = block . GetNumPages ( ) ;
AlignedMemoryBlock phys_block ( GetInteger ( block_phys_addr ) , cur_pages , virt_block . GetAlignment ( ) ) ;
virt_block . SetAlignment ( phys_block . GetAlignment ( ) ) ;
while ( cur_pages > 0 ) {
/* Find a physical region for us to map at. */
uintptr_t phys_choice = 0 ;
size_t phys_pages = 0 ;
phys_block . FindBlock ( phys_choice , phys_pages ) ;
/* If we didn't find a region, try decreasing our alignment. */
if ( phys_pages = = 0 ) {
const size_t next_alignment = KPageTable : : GetSmallerAlignment ( phys_block . GetAlignment ( ) ) ;
MESOSPHERE_ASSERT ( next_alignment > = PageSize ) ;
phys_block . SetAlignment ( next_alignment ) ;
virt_block . SetAlignment ( next_alignment ) ;
continue ;
}
/* Begin choosing virtual blocks to map at the region we chose. */
while ( phys_pages > 0 ) {
/* Find a virtual region for us to map at. */
uintptr_t virt_choice = 0 ;
size_t virt_pages = phys_pages ;
virt_block . FindBlock ( virt_choice , virt_pages ) ;
/* If we didn't find a region, try decreasing our alignment. */
if ( virt_pages = = 0 ) {
const size_t next_alignment = KPageTable : : GetSmallerAlignment ( virt_block . GetAlignment ( ) ) ;
MESOSPHERE_ASSERT ( next_alignment > = PageSize ) ;
phys_block . SetAlignment ( next_alignment ) ;
virt_block . SetAlignment ( next_alignment ) ;
continue ;
}
/* Map! */
2020-12-01 12:14:58 +00:00
R_TRY ( this - > Map ( virt_choice , phys_choice , virt_pages , entry_template , disable_head_merge & & virt_addr = = orig_virt_addr , virt_block . GetAlignment ( ) , page_list , reuse_ll ) ) ;
2020-02-20 06:35:31 +00:00
/* Advance. */
phys_choice + = virt_pages * PageSize ;
phys_pages - = virt_pages ;
cur_pages - = virt_pages ;
mapped_pages + = virt_pages ;
}
}
}
2020-02-20 03:38:20 +00:00
}
}
MESOSPHERE_ASSERT ( mapped_pages = = num_pages ) ;
/* Perform what coalescing we can. */
2024-10-10 20:39:08 +00:00
this - > MergePages ( orig_virt_addr , num_pages , page_list ) ;
2022-03-23 04:33:43 +00:00
2020-02-20 03:38:20 +00:00
/* We succeeded! We want to persist the reference to the pages. */
spg . CancelClose ( ) ;
2022-02-14 22:45:32 +00:00
R_SUCCEED ( ) ;
2020-02-20 03:38:20 +00:00
}
2024-10-10 20:39:08 +00:00
bool KPageTable : : MergePages ( TraversalContext * context , PageLinkedList * page_list ) {
2020-02-18 09:04:44 +00:00
MESOSPHERE_ASSERT ( this - > IsLockedByCurrentThread ( ) ) ;
auto & impl = this - > GetImpl ( ) ;
2024-10-10 19:58:15 +00:00
/* Iteratively merge, until we can't. */
2024-10-10 20:39:08 +00:00
bool merged = false ;
2024-10-10 19:58:15 +00:00
while ( true ) {
/* Try to merge. */
KVirtualAddress freed_table = Null < KVirtualAddress > ;
if ( ! impl . MergePages ( std : : addressof ( freed_table ) , context ) ) {
break ;
2020-02-18 09:04:44 +00:00
}
2024-10-10 19:58:15 +00:00
/* Note that we updated. */
2020-02-18 09:04:44 +00:00
this - > NoteUpdated ( ) ;
2024-10-10 19:58:15 +00:00
/* Free the page. */
2024-10-10 20:39:08 +00:00
if ( freed_table ! = Null < KVirtualAddress > ) {
ClearPageTable ( freed_table ) ;
this - > FreePageTable ( page_list , freed_table ) ;
}
/* We performed at least one merge. */
merged = true ;
2024-10-10 19:58:15 +00:00
}
2024-10-10 20:39:08 +00:00
return merged ;
2024-10-10 19:58:15 +00:00
}
2020-02-18 09:04:44 +00:00
2024-10-10 19:58:15 +00:00
void KPageTable : : MergePages ( KProcessAddress virt_addr , size_t num_pages , PageLinkedList * page_list ) {
MESOSPHERE_ASSERT ( this - > IsLockedByCurrentThread ( ) ) ;
2020-02-18 09:04:44 +00:00
2024-10-10 19:58:15 +00:00
auto & impl = this - > GetImpl ( ) ;
2020-02-18 09:04:44 +00:00
2024-10-10 19:58:15 +00:00
/* Begin traversal. */
TraversalContext context ;
TraversalEntry entry ;
MESOSPHERE_ABORT_UNLESS ( impl . BeginTraversal ( std : : addressof ( entry ) , std : : addressof ( context ) , virt_addr ) ) ;
2020-02-18 09:04:44 +00:00
2024-10-10 19:58:15 +00:00
/* Merge start of the range. */
this - > MergePages ( std : : addressof ( context ) , page_list ) ;
2020-02-18 09:04:44 +00:00
2024-10-10 19:58:15 +00:00
/* If we have more than one page, do the same for the end of the range. */
if ( num_pages > 1 ) {
/* Begin traversal for end of range. */
const size_t size = num_pages * PageSize ;
const auto end_page = virt_addr + size ;
const auto last_page = end_page - PageSize ;
MESOSPHERE_ABORT_UNLESS ( impl . BeginTraversal ( std : : addressof ( entry ) , std : : addressof ( context ) , last_page ) ) ;
2020-02-18 09:04:44 +00:00
2024-10-10 19:58:15 +00:00
/* Merge. */
this - > MergePages ( std : : addressof ( context ) , page_list ) ;
2020-02-18 09:04:44 +00:00
}
2024-10-10 19:58:15 +00:00
}
2020-02-18 09:04:44 +00:00
2024-10-10 19:58:15 +00:00
Result KPageTable : : SeparatePagesImpl ( TraversalEntry * entry , TraversalContext * context , KProcessAddress virt_addr , size_t block_size , PageLinkedList * page_list , bool reuse_ll ) {
MESOSPHERE_ASSERT ( this - > IsLockedByCurrentThread ( ) ) ;
2020-02-18 09:04:44 +00:00
2024-10-10 19:58:15 +00:00
auto & impl = this - > GetImpl ( ) ;
2020-02-18 09:04:44 +00:00
2024-10-10 19:58:15 +00:00
/* If at any point we fail, we want to merge. */
ON_RESULT_FAILURE { this - > MergePages ( context , page_list ) ; } ;
2020-02-18 09:04:44 +00:00
2024-10-10 19:58:15 +00:00
/* Iterate, separating until our block size is small enough. */
while ( entry - > block_size > block_size ) {
/* If necessary, allocate a table. */
KVirtualAddress table = Null < KVirtualAddress > ;
if ( ! context - > is_contiguous ) {
table = this - > AllocatePageTable ( page_list , reuse_ll ) ;
R_UNLESS ( table ! = Null < KVirtualAddress > , svc : : ResultOutOfResource ( ) ) ;
2020-02-18 09:04:44 +00:00
}
2024-10-10 19:58:15 +00:00
/* Separate. */
2024-10-10 23:10:18 +00:00
impl . SeparatePages ( entry , context , virt_addr , GetPointer < PageTableEntry > ( table ) ) ;
2020-02-18 09:04:44 +00:00
this - > NoteUpdated ( ) ;
}
2022-02-14 22:45:32 +00:00
R_SUCCEED ( ) ;
2020-02-18 09:04:44 +00:00
}
2024-10-10 19:58:15 +00:00
Result KPageTable : : SeparatePages ( KProcessAddress virt_addr , size_t num_pages , PageLinkedList * page_list , bool reuse_ll ) {
2020-02-18 09:04:44 +00:00
MESOSPHERE_ASSERT ( this - > IsLockedByCurrentThread ( ) ) ;
2024-10-10 19:58:15 +00:00
auto & impl = this - > GetImpl ( ) ;
2020-02-18 09:04:44 +00:00
2024-10-10 19:58:15 +00:00
/* Begin traversal. */
TraversalContext start_context ;
TraversalEntry entry ;
MESOSPHERE_ABORT_UNLESS ( impl . BeginTraversal ( std : : addressof ( entry ) , std : : addressof ( start_context ) , virt_addr ) ) ;
2020-02-19 16:07:44 +00:00
2024-10-10 19:58:15 +00:00
/* Separate pages at the start of the range. */
2020-02-19 16:07:44 +00:00
const size_t size = num_pages * PageSize ;
2024-10-10 19:58:15 +00:00
R_TRY ( this - > SeparatePagesImpl ( std : : addressof ( entry ) , std : : addressof ( start_context ) , virt_addr , std : : min ( util : : GetAlignment ( GetInteger ( virt_addr ) ) , size ) , page_list , reuse_ll ) ) ;
/* If necessary, separate pages at the end of the range. */
2020-02-19 16:07:44 +00:00
if ( num_pages > 1 ) {
const auto end_page = virt_addr + size ;
const auto last_page = end_page - PageSize ;
2024-10-10 19:58:15 +00:00
/* Begin traversal. */
TraversalContext end_context ;
MESOSPHERE_ABORT_UNLESS ( impl . BeginTraversal ( std : : addressof ( entry ) , std : : addressof ( end_context ) , last_page ) ) ;
ON_RESULT_FAILURE { this - > MergePages ( std : : addressof ( start_context ) , page_list ) ; } ;
2022-02-14 22:45:32 +00:00
2024-10-10 19:58:15 +00:00
R_TRY ( this - > SeparatePagesImpl ( std : : addressof ( entry ) , std : : addressof ( end_context ) , last_page , std : : min ( util : : GetAlignment ( GetInteger ( end_page ) ) , size ) , page_list , reuse_ll ) ) ;
2020-02-19 16:07:44 +00:00
}
2024-10-10 19:58:15 +00:00
R_SUCCEED ( ) ;
}
Result KPageTable : : ChangePermissions ( KProcessAddress virt_addr , size_t num_pages , PageTableEntry entry_template , DisableMergeAttribute disable_merge_attr , bool refresh_mapping , bool flush_mapping , PageLinkedList * page_list , bool reuse_ll ) {
MESOSPHERE_ASSERT ( this - > IsLockedByCurrentThread ( ) ) ;
2024-10-10 21:20:31 +00:00
/* Ensure there are no pending data writes. */
cpu : : DataSynchronizationBarrier ( ) ;
2024-10-10 19:58:15 +00:00
/* Separate pages before we change permissions. */
R_TRY ( this - > SeparatePages ( virt_addr , num_pages , page_list , reuse_ll ) ) ;
2020-04-20 00:16:19 +00:00
/* ===================================================== */
2020-02-19 16:07:44 +00:00
2020-04-20 00:16:19 +00:00
/* Define a helper function which will apply our template to entries. */
2020-02-19 16:07:44 +00:00
2020-04-20 00:16:19 +00:00
enum ApplyOption : u32 {
ApplyOption_None = 0 ,
ApplyOption_FlushDataCache = ( 1u < < 0 ) ,
ApplyOption_MergeMappings = ( 1u < < 1 ) ,
} ;
2020-02-19 16:07:44 +00:00
2020-12-01 12:14:58 +00:00
auto ApplyEntryTemplate = [ this , virt_addr , disable_merge_attr , num_pages , page_list ] ( PageTableEntry entry_template , u32 apply_option ) - > void {
2020-04-20 00:16:19 +00:00
/* Create work variables for us to use. */
2020-12-01 12:14:58 +00:00
const KProcessAddress orig_virt_addr = virt_addr ;
const KProcessAddress end_virt_addr = orig_virt_addr + ( num_pages * PageSize ) ;
2020-04-20 00:16:19 +00:00
KProcessAddress cur_virt_addr = virt_addr ;
size_t remaining_pages = num_pages ;
2020-02-19 16:07:44 +00:00
2020-04-20 00:16:19 +00:00
auto & impl = this - > GetImpl ( ) ;
2020-02-19 16:07:44 +00:00
2020-12-01 12:14:58 +00:00
/* Parse the disable merge attrs. */
const bool attr_disable_head = ( disable_merge_attr & DisableMergeAttribute_DisableHead ) ! = 0 ;
const bool attr_disable_head_body = ( disable_merge_attr & DisableMergeAttribute_DisableHeadAndBody ) ! = 0 ;
const bool attr_enable_head_body = ( disable_merge_attr & DisableMergeAttribute_EnableHeadAndBody ) ! = 0 ;
const bool attr_disable_tail = ( disable_merge_attr & DisableMergeAttribute_DisableTail ) ! = 0 ;
const bool attr_enable_tail = ( disable_merge_attr & DisableMergeAttribute_EnableTail ) ! = 0 ;
const bool attr_enable_and_merge = ( disable_merge_attr & DisableMergeAttribute_EnableAndMergeHeadBodyTail ) ! = 0 ;
2020-04-20 00:16:19 +00:00
/* Begin traversal. */
TraversalContext context ;
TraversalEntry next_entry ;
MESOSPHERE_ABORT_UNLESS ( impl . BeginTraversal ( std : : addressof ( next_entry ) , std : : addressof ( context ) , cur_virt_addr ) ) ;
/* Continue changing properties until we've changed them for all pages. */
2020-12-01 12:14:58 +00:00
bool cleared_disable_merge_bits = false ;
2020-04-20 00:16:19 +00:00
while ( remaining_pages > 0 ) {
MESOSPHERE_ABORT_UNLESS ( util : : IsAligned ( GetInteger ( next_entry . phys_addr ) , next_entry . block_size ) ) ;
MESOSPHERE_ABORT_UNLESS ( next_entry . block_size < = remaining_pages * PageSize ) ;
2020-02-19 16:07:44 +00:00
2020-12-01 12:14:58 +00:00
/* Determine if we're at the start. */
const bool is_start = ( cur_virt_addr = = orig_virt_addr ) ;
const bool is_end = ( ( cur_virt_addr + next_entry . block_size ) = = end_virt_addr ) ;
/* Determine the relevant merge attributes. */
bool disable_head_merge , disable_head_body_merge , disable_tail_merge ;
if ( next_entry . IsHeadMergeDisabled ( ) ) {
disable_head_merge = true ;
} else if ( attr_disable_head ) {
disable_head_merge = is_start ;
} else {
disable_head_merge = false ;
}
if ( is_start ) {
if ( attr_disable_head_body ) {
disable_head_body_merge = true ;
} else if ( attr_enable_head_body ) {
disable_head_body_merge = false ;
} else {
disable_head_body_merge = ( ! attr_enable_and_merge & & next_entry . IsHeadAndBodyMergeDisabled ( ) ) ;
}
} else {
disable_head_body_merge = ( ! attr_enable_and_merge & & next_entry . IsHeadAndBodyMergeDisabled ( ) ) ;
cleared_disable_merge_bits | = ( attr_enable_and_merge & & next_entry . IsHeadAndBodyMergeDisabled ( ) ) ;
}
if ( is_end ) {
if ( attr_disable_tail ) {
disable_tail_merge = true ;
} else if ( attr_enable_tail ) {
disable_tail_merge = false ;
} else {
disable_tail_merge = ( ! attr_enable_and_merge & & next_entry . IsTailMergeDisabled ( ) ) ;
}
} else {
disable_tail_merge = ( ! attr_enable_and_merge & & next_entry . IsTailMergeDisabled ( ) ) ;
cleared_disable_merge_bits | = ( attr_enable_and_merge & & next_entry . IsTailMergeDisabled ( ) ) ;
}
/* Encode the merge disable flags into the software reserved bits. */
u8 sw_reserved_bits = PageTableEntry : : EncodeSoftwareReservedBits ( disable_head_merge , disable_head_body_merge , disable_tail_merge ) ;
2020-04-20 00:16:19 +00:00
/* If we should flush entries, do so. */
if ( ( apply_option & ApplyOption_FlushDataCache ) ! = 0 ) {
if ( IsHeapPhysicalAddress ( next_entry . phys_addr ) ) {
cpu : : FlushDataCache ( GetVoidPointer ( GetHeapVirtualAddress ( next_entry . phys_addr ) ) , next_entry . block_size ) ;
}
}
/* Apply the entry template. */
2024-10-10 21:20:31 +00:00
{
const size_t num_entries = context . is_contiguous ? BlocksPerContiguousBlock : 1 ;
auto * const pte = context . level_entries [ context . level ] ;
const size_t block_size = impl . GetBlockSize ( context . level ) ;
for ( size_t i = 0 ; i < num_entries ; + + i ) {
pte [ i ] = PageTableEntry ( PageTableEntry : : BlockTag { } , next_entry . phys_addr + i * block_size , entry_template , sw_reserved_bits , context . is_contiguous , context . level = = KPageTableImpl : : EntryLevel_L3 ) ;
sw_reserved_bits & = ~ ( PageTableEntry : : SoftwareReservedBit_DisableMergeHead ) ;
}
2020-04-20 00:16:19 +00:00
}
2020-02-19 16:07:44 +00:00
2020-04-20 00:16:19 +00:00
/* If our option asks us to, try to merge mappings. */
2020-12-01 12:14:58 +00:00
bool merge = ( ( apply_option & ApplyOption_MergeMappings ) ! = 0 | | cleared_disable_merge_bits ) & & next_entry . block_size < L1BlockSize ;
2020-04-20 00:16:19 +00:00
if ( merge ) {
const size_t larger_align = GetLargerAlignment ( next_entry . block_size ) ;
if ( util : : IsAligned ( GetInteger ( cur_virt_addr ) + next_entry . block_size , larger_align ) ) {
const uintptr_t aligned_start = util : : AlignDown ( GetInteger ( cur_virt_addr ) , larger_align ) ;
2020-12-01 12:14:58 +00:00
if ( orig_virt_addr < = aligned_start & & aligned_start + larger_align - 1 < GetInteger ( orig_virt_addr ) + ( num_pages * PageSize ) - 1 ) {
2024-10-10 20:39:08 +00:00
merge = this - > MergePages ( std : : addressof ( context ) , page_list ) ;
2020-04-20 00:16:19 +00:00
} else {
merge = false ;
2020-02-19 16:07:44 +00:00
}
2020-04-20 00:16:19 +00:00
} else {
merge = false ;
2020-02-19 16:07:44 +00:00
}
2020-04-20 00:16:19 +00:00
}
2020-02-19 16:07:44 +00:00
2020-04-20 00:16:19 +00:00
/* If we merged, correct the traversal to a sane state. */
if ( merge ) {
2024-10-10 21:00:15 +00:00
/* NOTE: Begin a new traversal, now that we've merged. */
2020-04-20 00:16:19 +00:00
MESOSPHERE_ABORT_UNLESS ( impl . BeginTraversal ( std : : addressof ( next_entry ) , std : : addressof ( context ) , cur_virt_addr ) ) ;
2020-02-19 16:07:44 +00:00
2020-04-20 00:16:19 +00:00
/* The actual size needs to not take into account the portion of the block before our virtual address. */
const size_t actual_size = next_entry . block_size - ( GetInteger ( next_entry . phys_addr ) & ( next_entry . block_size - 1 ) ) ;
remaining_pages - = std : : min ( remaining_pages , actual_size / PageSize ) ;
cur_virt_addr + = actual_size ;
} else {
/* If we didn't merge, just advance. */
remaining_pages - = next_entry . block_size / PageSize ;
cur_virt_addr + = next_entry . block_size ;
}
2020-02-19 16:07:44 +00:00
2020-04-20 00:16:19 +00:00
/* Continue our traversal. */
if ( remaining_pages = = 0 ) {
2020-02-19 16:07:44 +00:00
break ;
2020-04-20 00:16:19 +00:00
}
MESOSPHERE_ABORT_UNLESS ( impl . ContinueTraversal ( std : : addressof ( next_entry ) , std : : addressof ( context ) ) ) ;
2020-02-19 16:07:44 +00:00
}
2020-04-20 00:16:19 +00:00
} ;
2020-02-19 16:07:44 +00:00
2020-04-20 00:16:19 +00:00
/* ===================================================== */
/* If we don't need to refresh the pages, we can just apply the mappings. */
if ( ! refresh_mapping ) {
ApplyEntryTemplate ( entry_template , ApplyOption_None ) ;
this - > NoteUpdated ( ) ;
} else {
/* We need to refresh the mappings. */
/* First, apply the changes without the mapped bit. This will cause all entries to page fault if accessed. */
{
PageTableEntry unmapped_template = entry_template ;
unmapped_template . SetMapped ( false ) ;
ApplyEntryTemplate ( unmapped_template , ApplyOption_MergeMappings ) ;
this - > NoteUpdated ( ) ;
}
/* Next, take and immediately release the scheduler lock. This will force a reschedule. */
{
KScopedSchedulerLock sl ;
2020-02-19 16:07:44 +00:00
}
2020-04-20 00:16:19 +00:00
2023-02-21 15:39:21 +00:00
/* Finally, apply the changes as directed, flushing the mappings before they're applied (if we should). */
ApplyEntryTemplate ( entry_template , flush_mapping ? ApplyOption_FlushDataCache : ApplyOption_None ) ;
2020-02-19 16:07:44 +00:00
}
/* We've succeeded, now perform what coalescing we can. */
2024-10-10 19:58:15 +00:00
this - > MergePages ( virt_addr , num_pages , page_list ) ;
2020-02-19 16:07:44 +00:00
2022-02-14 22:45:32 +00:00
R_SUCCEED ( ) ;
2020-02-19 16:07:44 +00:00
}
2021-09-21 17:09:27 +00:00
void KPageTable : : FinalizeUpdateImpl ( PageLinkedList * page_list ) {
2020-02-14 01:38:56 +00:00
while ( page_list - > Peek ( ) ) {
KVirtualAddress page = KVirtualAddress ( page_list - > Pop ( ) ) ;
MESOSPHERE_ASSERT ( this - > GetPageTableManager ( ) . IsInPageTableHeap ( page ) ) ;
MESOSPHERE_ASSERT ( this - > GetPageTableManager ( ) . GetRefCount ( page ) = = 0 ) ;
this - > GetPageTableManager ( ) . Free ( page ) ;
}
}
2020-02-09 11:45:45 +00:00
}