2020-02-09 11:45:45 +00:00
/*
2021-10-04 19:59:10 +00:00
* Copyright ( c ) Atmosphère - NX
2020-02-09 11:45:45 +00:00
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms and conditions of the GNU General Public License ,
* version 2 , as published by the Free Software Foundation .
*
* This program is distributed in the hope it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
* more details .
*
* You should have received a copy of the GNU General Public License
* along with this program . If not , see < http : //www.gnu.org/licenses/>.
*/
# include <mesosphere.hpp>
2020-02-15 02:22:55 +00:00
namespace ams : : kern : : arch : : arm64 {
2020-02-09 11:45:45 +00:00
2020-02-19 09:22:27 +00:00
namespace {
2020-02-20 06:35:31 +00:00
class AlignedMemoryBlock {
private :
2020-12-18 01:18:47 +00:00
uintptr_t m_before_start ;
uintptr_t m_before_end ;
uintptr_t m_after_start ;
uintptr_t m_after_end ;
size_t m_current_alignment ;
2020-02-20 06:35:31 +00:00
public :
2020-12-18 01:18:47 +00:00
constexpr AlignedMemoryBlock ( uintptr_t start , size_t num_pages , size_t alignment ) : m_before_start ( 0 ) , m_before_end ( 0 ) , m_after_start ( 0 ) , m_after_end ( 0 ) , m_current_alignment ( 0 ) {
2020-02-20 06:35:31 +00:00
MESOSPHERE_ASSERT ( util : : IsAligned ( start , PageSize ) ) ;
MESOSPHERE_ASSERT ( num_pages > 0 ) ;
/* Find an alignment that allows us to divide into at least two regions.*/
uintptr_t start_page = start / PageSize ;
alignment / = PageSize ;
while ( util : : AlignUp ( start_page , alignment ) > = util : : AlignDown ( start_page + num_pages , alignment ) ) {
alignment = KPageTable : : GetSmallerAlignment ( alignment * PageSize ) / PageSize ;
}
2020-12-18 01:18:47 +00:00
m_before_start = start_page ;
m_before_end = util : : AlignUp ( start_page , alignment ) ;
m_after_start = m_before_end ;
m_after_end = start_page + num_pages ;
m_current_alignment = alignment ;
MESOSPHERE_ASSERT ( m_current_alignment > 0 ) ;
2020-02-20 06:35:31 +00:00
}
constexpr void SetAlignment ( size_t alignment ) {
/* We can only ever decrease the granularity. */
2020-12-18 01:18:47 +00:00
MESOSPHERE_ASSERT ( m_current_alignment > = alignment / PageSize ) ;
m_current_alignment = alignment / PageSize ;
2020-02-20 06:35:31 +00:00
}
constexpr size_t GetAlignment ( ) const {
2020-12-18 01:18:47 +00:00
return m_current_alignment * PageSize ;
2020-02-20 06:35:31 +00:00
}
constexpr void FindBlock ( uintptr_t & out , size_t & num_pages ) {
2020-12-18 01:18:47 +00:00
if ( ( m_after_end - m_after_start ) > = m_current_alignment ) {
2020-02-20 06:35:31 +00:00
/* Select aligned memory from after block. */
2020-12-18 01:18:47 +00:00
const size_t available_pages = util : : AlignDown ( m_after_end , m_current_alignment ) - m_after_start ;
2020-02-20 06:35:31 +00:00
if ( num_pages = = 0 | | available_pages < num_pages ) {
num_pages = available_pages ;
}
2020-12-18 01:18:47 +00:00
out = m_after_start * PageSize ;
m_after_start + = num_pages ;
} else if ( ( m_before_end - m_before_start ) > = m_current_alignment ) {
2020-02-20 06:35:31 +00:00
/* Select aligned memory from before block. */
2020-12-18 01:18:47 +00:00
const size_t available_pages = m_before_end - util : : AlignUp ( m_before_start , m_current_alignment ) ;
2020-02-20 06:35:31 +00:00
if ( num_pages = = 0 | | available_pages < num_pages ) {
num_pages = available_pages ;
}
2020-12-18 01:18:47 +00:00
m_before_end - = num_pages ;
out = m_before_end * PageSize ;
2020-02-20 06:35:31 +00:00
} else {
/* Neither after or before can get an aligned bit of memory. */
out = 0 ;
num_pages = 0 ;
}
}
} ;
2020-02-19 09:22:27 +00:00
constexpr u64 EncodeTtbr ( KPhysicalAddress table , u8 asid ) {
return ( static_cast < u64 > ( asid ) < < 48 ) | ( static_cast < u64 > ( GetInteger ( table ) ) ) ;
}
class KPageTableAsidManager {
private :
using WordType = u32 ;
static constexpr u8 ReservedAsids [ ] = { 0 } ;
static constexpr size_t NumReservedAsids = util : : size ( ReservedAsids ) ;
static constexpr size_t BitsPerWord = BITSIZEOF ( WordType ) ;
static constexpr size_t AsidCount = 0x100 ;
static constexpr size_t NumWords = AsidCount / BitsPerWord ;
static constexpr WordType FullWord = ~ WordType ( 0u ) ;
private :
2020-12-18 01:18:47 +00:00
WordType m_state [ NumWords ] ;
KLightLock m_lock ;
u8 m_hint ;
2020-02-19 09:22:27 +00:00
private :
constexpr bool TestImpl ( u8 asid ) const {
2020-12-18 01:18:47 +00:00
return m_state [ asid / BitsPerWord ] & ( 1u < < ( asid % BitsPerWord ) ) ;
2020-02-19 09:22:27 +00:00
}
constexpr void ReserveImpl ( u8 asid ) {
MESOSPHERE_ASSERT ( ! this - > TestImpl ( asid ) ) ;
2020-12-18 01:18:47 +00:00
m_state [ asid / BitsPerWord ] | = ( 1u < < ( asid % BitsPerWord ) ) ;
2020-02-19 09:22:27 +00:00
}
constexpr void ReleaseImpl ( u8 asid ) {
MESOSPHERE_ASSERT ( this - > TestImpl ( asid ) ) ;
2020-12-18 01:18:47 +00:00
m_state [ asid / BitsPerWord ] & = ~ ( 1u < < ( asid % BitsPerWord ) ) ;
2020-02-19 09:22:27 +00:00
}
constexpr u8 FindAvailable ( ) const {
2020-12-18 01:18:47 +00:00
for ( size_t i = 0 ; i < util : : size ( m_state ) ; i + + ) {
if ( m_state [ i ] = = FullWord ) {
2020-02-19 09:22:27 +00:00
continue ;
}
2020-12-18 01:18:47 +00:00
const WordType clear_bit = ( m_state [ i ] + 1 ) ^ ( m_state [ i ] ) ;
2020-02-19 09:22:27 +00:00
return BitsPerWord * i + BitsPerWord - 1 - ClearLeadingZero ( clear_bit ) ;
}
2020-12-18 01:18:47 +00:00
if ( m_state [ util : : size ( m_state ) - 1 ] = = FullWord ) {
2020-02-19 09:22:27 +00:00
MESOSPHERE_PANIC ( " Unable to reserve ASID " ) ;
}
__builtin_unreachable ( ) ;
}
static constexpr ALWAYS_INLINE WordType ClearLeadingZero ( WordType value ) {
return __builtin_clzll ( value ) - ( BITSIZEOF ( unsigned long long ) - BITSIZEOF ( WordType ) ) ;
}
public :
2020-12-18 01:18:47 +00:00
constexpr KPageTableAsidManager ( ) : m_state ( ) , m_lock ( ) , m_hint ( ) {
2020-02-19 09:22:27 +00:00
for ( size_t i = 0 ; i < NumReservedAsids ; i + + ) {
this - > ReserveImpl ( ReservedAsids [ i ] ) ;
}
}
u8 Reserve ( ) {
2020-12-18 01:18:47 +00:00
KScopedLightLock lk ( m_lock ) ;
2020-02-19 09:22:27 +00:00
2020-12-18 01:18:47 +00:00
if ( this - > TestImpl ( m_hint ) ) {
m_hint = this - > FindAvailable ( ) ;
2020-02-19 09:22:27 +00:00
}
2020-12-18 01:18:47 +00:00
this - > ReserveImpl ( m_hint ) ;
2020-02-19 09:22:27 +00:00
2020-12-18 01:18:47 +00:00
return m_hint + + ;
2020-02-19 09:22:27 +00:00
}
void Release ( u8 asid ) {
2020-12-18 01:18:47 +00:00
KScopedLightLock lk ( m_lock ) ;
2020-02-19 09:22:27 +00:00
this - > ReleaseImpl ( asid ) ;
}
} ;
KPageTableAsidManager g_asid_manager ;
}
2022-03-23 04:33:43 +00:00
ALWAYS_INLINE void KPageTable : : NoteUpdated ( ) const {
cpu : : DataSynchronizationBarrierInnerShareableStore ( ) ;
/* Mark ourselves as in a tlb maintenance operation. */
GetCurrentThread ( ) . SetInTlbMaintenanceOperation ( ) ;
ON_SCOPE_EXIT { GetCurrentThread ( ) . ClearInTlbMaintenanceOperation ( ) ; __asm__ __volatile__ ( " " : : : " memory " ) ; } ;
if ( this - > IsKernel ( ) ) {
this - > OnKernelTableUpdated ( ) ;
} else {
this - > OnTableUpdated ( ) ;
}
}
ALWAYS_INLINE void KPageTable : : NoteSingleKernelPageUpdated ( KProcessAddress virt_addr ) const {
MESOSPHERE_ASSERT ( this - > IsKernel ( ) ) ;
cpu : : DataSynchronizationBarrierInnerShareableStore ( ) ;
/* Mark ourselves as in a tlb maintenance operation. */
GetCurrentThread ( ) . SetInTlbMaintenanceOperation ( ) ;
ON_SCOPE_EXIT { GetCurrentThread ( ) . ClearInTlbMaintenanceOperation ( ) ; __asm__ __volatile__ ( " " : : : " memory " ) ; } ;
this - > OnKernelTableSinglePageUpdated ( virt_addr ) ;
}
2020-02-09 11:45:45 +00:00
void KPageTable : : Initialize ( s32 core_id ) {
/* Nothing actually needed here. */
2020-08-17 21:20:24 +00:00
MESOSPHERE_UNUSED ( core_id ) ;
2020-02-09 11:45:45 +00:00
}
Result KPageTable : : InitializeForKernel ( void * table , KVirtualAddress start , KVirtualAddress end ) {
/* Initialize basic fields. */
2020-12-18 01:18:47 +00:00
m_asid = 0 ;
2022-10-12 04:32:56 +00:00
m_manager = Kernel : : GetSystemSystemResource ( ) . GetPageTableManagerPointer ( ) ;
2020-02-09 11:45:45 +00:00
/* Allocate a page for ttbr. */
2021-10-27 19:31:53 +00:00
/* NOTE: It is a postcondition of page table manager allocation that the page is all-zero. */
2020-12-18 01:18:47 +00:00
const u64 asid_tag = ( static_cast < u64 > ( m_asid ) < < 48ul ) ;
const KVirtualAddress page = m_manager - > Allocate ( ) ;
2020-02-09 11:45:45 +00:00
MESOSPHERE_ASSERT ( page ! = Null < KVirtualAddress > ) ;
2020-12-18 01:18:47 +00:00
m_ttbr = GetInteger ( KPageTableBase : : GetLinearMappedPhysicalAddress ( page ) ) | asid_tag ;
2020-02-09 11:45:45 +00:00
/* Initialize the base page table. */
MESOSPHERE_R_ABORT_UNLESS ( KPageTableBase : : InitializeForKernel ( true , table , start , end ) ) ;
2022-02-14 22:45:32 +00:00
R_SUCCEED ( ) ;
2020-02-09 11:45:45 +00:00
}
2024-03-28 09:07:04 +00:00
Result KPageTable : : InitializeForProcess ( ams : : svc : : CreateProcessFlag flags , bool from_back , KMemoryManager : : Pool pool , KProcessAddress code_address , size_t code_size , KSystemResource * system_resource , KResourceLimit * resource_limit ) {
2020-02-19 09:22:27 +00:00
/* Get an ASID */
2020-12-18 01:18:47 +00:00
m_asid = g_asid_manager . Reserve ( ) ;
2022-02-14 22:45:32 +00:00
ON_RESULT_FAILURE { g_asid_manager . Release ( m_asid ) ; } ;
2020-02-19 09:22:27 +00:00
/* Set our manager. */
2022-10-12 04:32:56 +00:00
m_manager = system_resource - > GetPageTableManagerPointer ( ) ;
2020-02-19 09:22:27 +00:00
/* Allocate a new table, and set our ttbr value. */
2020-12-18 01:18:47 +00:00
const KVirtualAddress new_table = m_manager - > Allocate ( ) ;
2020-02-19 09:22:27 +00:00
R_UNLESS ( new_table ! = Null < KVirtualAddress > , svc : : ResultOutOfResource ( ) ) ;
2020-12-18 01:18:47 +00:00
m_ttbr = EncodeTtbr ( GetPageTablePhysicalAddress ( new_table ) , m_asid ) ;
2022-02-14 22:45:32 +00:00
ON_RESULT_FAILURE_2 { m_manager - > Free ( new_table ) ; } ;
2020-02-19 09:22:27 +00:00
/* Initialize our base table. */
2024-03-28 09:07:04 +00:00
const size_t as_width = GetAddressSpaceWidth ( flags ) ;
2020-02-19 09:22:27 +00:00
const KProcessAddress as_start = 0 ;
const KProcessAddress as_end = ( 1ul < < as_width ) ;
2024-03-28 09:07:04 +00:00
R_TRY ( KPageTableBase : : InitializeForProcess ( flags , from_back , pool , GetVoidPointer ( new_table ) , as_start , as_end , code_address , code_size , system_resource , resource_limit ) ) ;
2020-02-19 09:22:27 +00:00
/* Note that we've updated the table (since we created it). */
this - > NoteUpdated ( ) ;
2022-02-14 22:45:32 +00:00
R_SUCCEED ( ) ;
2020-02-19 09:22:27 +00:00
}
2020-02-09 11:45:45 +00:00
Result KPageTable : : Finalize ( ) {
2020-07-23 06:52:29 +00:00
/* Only process tables should be finalized. */
MESOSPHERE_ASSERT ( ! this - > IsKernel ( ) ) ;
2023-10-11 16:16:52 +00:00
/* NOTE: Here Nintendo calls an unknown OnFinalize function. */
/* this->OnFinalize(); */
2020-07-23 06:52:29 +00:00
/* Note that we've updated (to ensure we're synchronized). */
this - > NoteUpdated ( ) ;
2023-10-11 16:16:52 +00:00
/* NOTE: Here Nintendo calls a second unknown OnFinalize function. */
/* this->OnFinalize2(); */
2020-07-23 06:52:29 +00:00
/* Free all pages in the table. */
{
/* Get implementation objects. */
auto & impl = this - > GetImpl ( ) ;
auto & mm = Kernel : : GetMemoryManager ( ) ;
/* Traverse, freeing all pages. */
{
/* Get the address space size. */
const size_t as_size = this - > GetAddressSpaceSize ( ) ;
/* Begin the traversal. */
TraversalContext context ;
2024-03-28 09:30:24 +00:00
TraversalEntry cur_entry = { . phys_addr = Null < KPhysicalAddress > , . block_size = 0 , . sw_reserved_bits = 0 , . attr = 0 } ;
2020-07-23 06:52:29 +00:00
bool cur_valid = false ;
TraversalEntry next_entry ;
bool next_valid ;
size_t tot_size = 0 ;
next_valid = impl . BeginTraversal ( std : : addressof ( next_entry ) , std : : addressof ( context ) , this - > GetAddressSpaceStart ( ) ) ;
/* Iterate over entries. */
while ( true ) {
2024-03-28 09:30:24 +00:00
/* NOTE: Nintendo really does check next_entry.attr == (cur_entry.attr != 0)...but attr is always zero as of 18.0.0, and this is "probably" for the new console or debug-only anyway, */
/* so we'll implement the weird logic verbatim even though it doesn't match the GetContiguousRange logic. */
if ( ( ! next_valid & & ! cur_valid ) | | ( next_valid & & cur_valid & & next_entry . phys_addr = = cur_entry . phys_addr + cur_entry . block_size & & next_entry . attr = = ( cur_entry . attr ? 1 : 0 ) ) ) {
2020-07-23 06:52:29 +00:00
cur_entry . block_size + = next_entry . block_size ;
} else {
if ( cur_valid & & IsHeapPhysicalAddressForFinalize ( cur_entry . phys_addr ) ) {
2021-09-18 07:11:10 +00:00
mm . Close ( cur_entry . phys_addr , cur_entry . block_size / PageSize ) ;
2020-07-23 06:52:29 +00:00
}
/* Update tracking variables. */
tot_size + = cur_entry . block_size ;
cur_entry = next_entry ;
cur_valid = next_valid ;
}
if ( cur_entry . block_size + tot_size > = as_size ) {
break ;
}
next_valid = impl . ContinueTraversal ( std : : addressof ( next_entry ) , std : : addressof ( context ) ) ;
}
/* Handle the last block. */
if ( cur_valid & & IsHeapPhysicalAddressForFinalize ( cur_entry . phys_addr ) ) {
2021-09-18 07:11:10 +00:00
mm . Close ( cur_entry . phys_addr , cur_entry . block_size / PageSize ) ;
2020-07-23 06:52:29 +00:00
}
}
/* Cache address space extents for convenience. */
const KProcessAddress as_start = this - > GetAddressSpaceStart ( ) ;
const KProcessAddress as_last = as_start + this - > GetAddressSpaceSize ( ) - 1 ;
/* Free all L3 tables. */
for ( KProcessAddress cur_address = as_start ; cur_address < = as_last ; cur_address + = L2BlockSize ) {
L1PageTableEntry * l1_entry = impl . GetL1Entry ( cur_address ) ;
if ( l1_entry - > IsTable ( ) ) {
L2PageTableEntry * l2_entry = impl . GetL2Entry ( l1_entry , cur_address ) ;
if ( l2_entry - > IsTable ( ) ) {
2021-06-17 20:03:46 +00:00
const KVirtualAddress l3_table = GetPageTableVirtualAddress ( l2_entry - > GetTable ( ) ) ;
2020-07-23 06:52:29 +00:00
if ( this - > GetPageTableManager ( ) . IsInPageTableHeap ( l3_table ) ) {
while ( ! this - > GetPageTableManager ( ) . Close ( l3_table , 1 ) ) { /* ... */ }
2021-06-17 20:03:46 +00:00
ClearPageTable ( l3_table ) ;
2020-07-23 06:52:29 +00:00
this - > GetPageTableManager ( ) . Free ( l3_table ) ;
}
}
}
}
/* Free all L2 tables. */
for ( KProcessAddress cur_address = as_start ; cur_address < = as_last ; cur_address + = L1BlockSize ) {
L1PageTableEntry * l1_entry = impl . GetL1Entry ( cur_address ) ;
if ( l1_entry - > IsTable ( ) ) {
2021-06-17 20:03:46 +00:00
const KVirtualAddress l2_table = GetPageTableVirtualAddress ( l1_entry - > GetTable ( ) ) ;
2020-07-23 06:52:29 +00:00
if ( this - > GetPageTableManager ( ) . IsInPageTableHeap ( l2_table ) ) {
while ( ! this - > GetPageTableManager ( ) . Close ( l2_table , 1 ) ) { /* ... */ }
2021-06-17 20:03:46 +00:00
ClearPageTable ( l2_table ) ;
2020-07-23 06:52:29 +00:00
this - > GetPageTableManager ( ) . Free ( l2_table ) ;
}
}
}
/* Free the L1 table. */
2021-06-17 20:03:46 +00:00
{
const KVirtualAddress l1_table = reinterpret_cast < uintptr_t > ( impl . Finalize ( ) ) ;
ClearPageTable ( l1_table ) ;
this - > GetPageTableManager ( ) . Free ( l1_table ) ;
}
2020-07-23 06:52:29 +00:00
/* Perform inherited finalization. */
KPageTableBase : : Finalize ( ) ;
}
/* Release our asid. */
2020-12-18 01:18:47 +00:00
g_asid_manager . Release ( m_asid ) ;
2020-07-23 06:52:29 +00:00
2022-02-14 22:45:32 +00:00
R_SUCCEED ( ) ;
2020-02-09 11:45:45 +00:00
}
2020-02-14 01:38:56 +00:00
2021-09-21 17:09:27 +00:00
Result KPageTable : : OperateImpl ( PageLinkedList * page_list , KProcessAddress virt_addr , size_t num_pages , KPhysicalAddress phys_addr , bool is_pa_valid , const KPageProperties properties , OperationType operation , bool reuse_ll ) {
2020-02-14 01:38:56 +00:00
/* Check validity of parameters. */
MESOSPHERE_ASSERT ( this - > IsLockedByCurrentThread ( ) ) ;
MESOSPHERE_ASSERT ( num_pages > 0 ) ;
MESOSPHERE_ASSERT ( util : : IsAligned ( GetInteger ( virt_addr ) , PageSize ) ) ;
MESOSPHERE_ASSERT ( this - > ContainsPages ( virt_addr , num_pages ) ) ;
2023-10-11 16:32:23 +00:00
if ( operation = = OperationType_Map ) {
2020-02-14 01:38:56 +00:00
MESOSPHERE_ABORT_UNLESS ( is_pa_valid ) ;
MESOSPHERE_ASSERT ( util : : IsAligned ( GetInteger ( phys_addr ) , PageSize ) ) ;
} else {
MESOSPHERE_ABORT_UNLESS ( ! is_pa_valid ) ;
}
if ( operation = = OperationType_Unmap ) {
2022-02-14 22:45:32 +00:00
R_RETURN ( this - > Unmap ( virt_addr , num_pages , page_list , false , reuse_ll ) ) ;
2022-10-12 05:37:43 +00:00
} else if ( operation = = OperationType_Separate ) {
const size_t size = num_pages * PageSize ;
R_TRY ( this - > SeparatePages ( virt_addr , std : : min ( util : : GetAlignment ( GetInteger ( virt_addr ) ) , size ) , page_list , reuse_ll ) ) ;
ON_RESULT_FAILURE { this - > MergePages ( virt_addr , page_list ) ; } ;
if ( num_pages > 1 ) {
const auto end_page = virt_addr + size ;
const auto last_page = end_page - PageSize ;
R_TRY ( this - > SeparatePages ( last_page , std : : min ( util : : GetAlignment ( GetInteger ( end_page ) ) , size ) , page_list , reuse_ll ) ) ;
}
R_SUCCEED ( ) ;
2020-02-14 01:38:56 +00:00
} else {
auto entry_template = this - > GetEntryTemplate ( properties ) ;
switch ( operation ) {
case OperationType_Map :
2023-10-11 16:32:23 +00:00
R_RETURN ( this - > MapContiguous ( virt_addr , phys_addr , num_pages , entry_template , properties . disable_merge_attributes = = DisableMergeAttribute_DisableHead , page_list , reuse_ll ) ) ;
2020-02-19 16:07:44 +00:00
case OperationType_ChangePermissions :
2023-02-21 15:39:21 +00:00
R_RETURN ( this - > ChangePermissions ( virt_addr , num_pages , entry_template , properties . disable_merge_attributes , false , false , page_list , reuse_ll ) ) ;
2020-02-19 16:07:44 +00:00
case OperationType_ChangePermissionsAndRefresh :
2023-02-21 15:39:21 +00:00
R_RETURN ( this - > ChangePermissions ( virt_addr , num_pages , entry_template , properties . disable_merge_attributes , true , false , page_list , reuse_ll ) ) ;
case OperationType_ChangePermissionsAndRefreshAndFlush :
R_RETURN ( this - > ChangePermissions ( virt_addr , num_pages , entry_template , properties . disable_merge_attributes , true , true , page_list , reuse_ll ) ) ;
2020-02-14 01:38:56 +00:00
MESOSPHERE_UNREACHABLE_DEFAULT_CASE ( ) ;
}
}
}
2021-09-21 17:09:27 +00:00
Result KPageTable : : OperateImpl ( PageLinkedList * page_list , KProcessAddress virt_addr , size_t num_pages , const KPageGroup & page_group , const KPageProperties properties , OperationType operation , bool reuse_ll ) {
2020-02-20 03:38:20 +00:00
/* Check validity of parameters. */
MESOSPHERE_ASSERT ( this - > IsLockedByCurrentThread ( ) ) ;
MESOSPHERE_ASSERT ( util : : IsAligned ( GetInteger ( virt_addr ) , PageSize ) ) ;
MESOSPHERE_ASSERT ( num_pages > 0 ) ;
MESOSPHERE_ASSERT ( num_pages = = page_group . GetNumPages ( ) ) ;
/* Map the page group. */
auto entry_template = this - > GetEntryTemplate ( properties ) ;
switch ( operation ) {
case OperationType_MapGroup :
2023-10-11 16:32:23 +00:00
case OperationType_MapFirstGroup :
R_RETURN ( this - > MapGroup ( virt_addr , page_group , num_pages , entry_template , properties . disable_merge_attributes = = DisableMergeAttribute_DisableHead , operation ! = OperationType_MapFirstGroup , page_list , reuse_ll ) ) ;
2020-02-20 03:38:20 +00:00
MESOSPHERE_UNREACHABLE_DEFAULT_CASE ( ) ;
}
2020-02-14 01:38:56 +00:00
}
2020-12-01 12:14:58 +00:00
Result KPageTable : : MapL1Blocks ( KProcessAddress virt_addr , KPhysicalAddress phys_addr , size_t num_pages , PageTableEntry entry_template , bool disable_head_merge , PageLinkedList * page_list , bool reuse_ll ) {
2020-07-27 10:00:04 +00:00
MESOSPHERE_ASSERT ( this - > IsLockedByCurrentThread ( ) ) ;
MESOSPHERE_ASSERT ( util : : IsAligned ( GetInteger ( virt_addr ) , L1BlockSize ) ) ;
MESOSPHERE_ASSERT ( util : : IsAligned ( GetInteger ( phys_addr ) , L1BlockSize ) ) ;
MESOSPHERE_ASSERT ( util : : IsAligned ( num_pages * PageSize , L1BlockSize ) ) ;
2020-08-17 21:20:24 +00:00
/* Allocation is never needed for L1 block mapping. */
MESOSPHERE_UNUSED ( page_list , reuse_ll ) ;
2020-07-27 10:00:04 +00:00
auto & impl = this - > GetImpl ( ) ;
2020-12-01 12:14:58 +00:00
u8 sw_reserved_bits = PageTableEntry : : EncodeSoftwareReservedBits ( disable_head_merge , false , false ) ;
2020-07-27 10:00:04 +00:00
/* Iterate, mapping each block. */
for ( size_t i = 0 ; i < num_pages ; i + = L1BlockSize / PageSize ) {
/* Map the block. */
2020-12-01 12:14:58 +00:00
* impl . GetL1Entry ( virt_addr ) = L1PageTableEntry ( PageTableEntry : : BlockTag { } , phys_addr , PageTableEntry ( entry_template ) , sw_reserved_bits , false ) ;
sw_reserved_bits & = ~ ( PageTableEntry : : SoftwareReservedBit_DisableMergeHead ) ;
2020-07-27 10:00:04 +00:00
virt_addr + = L1BlockSize ;
phys_addr + = L1BlockSize ;
}
2022-02-14 22:45:32 +00:00
R_SUCCEED ( ) ;
2020-07-27 10:00:04 +00:00
}
2020-12-01 12:14:58 +00:00
Result KPageTable : : MapL2Blocks ( KProcessAddress virt_addr , KPhysicalAddress phys_addr , size_t num_pages , PageTableEntry entry_template , bool disable_head_merge , PageLinkedList * page_list , bool reuse_ll ) {
2020-07-27 10:00:04 +00:00
MESOSPHERE_ASSERT ( this - > IsLockedByCurrentThread ( ) ) ;
MESOSPHERE_ASSERT ( util : : IsAligned ( GetInteger ( virt_addr ) , L2BlockSize ) ) ;
MESOSPHERE_ASSERT ( util : : IsAligned ( GetInteger ( phys_addr ) , L2BlockSize ) ) ;
MESOSPHERE_ASSERT ( util : : IsAligned ( num_pages * PageSize , L2BlockSize ) ) ;
auto & impl = this - > GetImpl ( ) ;
KVirtualAddress l2_virt = Null < KVirtualAddress > ;
int l2_open_count = 0 ;
2020-12-01 12:14:58 +00:00
u8 sw_reserved_bits = PageTableEntry : : EncodeSoftwareReservedBits ( disable_head_merge , false , false ) ;
2020-07-27 10:00:04 +00:00
/* Iterate, mapping each block. */
for ( size_t i = 0 ; i < num_pages ; i + = L2BlockSize / PageSize ) {
KPhysicalAddress l2_phys = Null < KPhysicalAddress > ;
/* If we have no L2 table, we should get or allocate one. */
if ( l2_virt = = Null < KVirtualAddress > ) {
if ( L1PageTableEntry * l1_entry = impl . GetL1Entry ( virt_addr ) ; ! l1_entry - > GetTable ( l2_phys ) ) {
/* Allocate table. */
l2_virt = AllocatePageTable ( page_list , reuse_ll ) ;
R_UNLESS ( l2_virt ! = Null < KVirtualAddress > , svc : : ResultOutOfResource ( ) ) ;
/* Set the entry. */
l2_phys = GetPageTablePhysicalAddress ( l2_virt ) ;
2022-03-23 04:33:43 +00:00
PteDataMemoryBarrier ( ) ;
2020-07-27 10:00:04 +00:00
* l1_entry = L1PageTableEntry ( PageTableEntry : : TableTag { } , l2_phys , this - > IsKernel ( ) , true ) ;
} else {
l2_virt = GetPageTableVirtualAddress ( l2_phys ) ;
}
}
MESOSPHERE_ASSERT ( l2_virt ! = Null < KVirtualAddress > ) ;
/* Map the block. */
2020-12-01 12:14:58 +00:00
* impl . GetL2EntryFromTable ( l2_virt , virt_addr ) = L2PageTableEntry ( PageTableEntry : : BlockTag { } , phys_addr , PageTableEntry ( entry_template ) , sw_reserved_bits , false ) ;
sw_reserved_bits & = ~ ( PageTableEntry : : SoftwareReservedBit_DisableMergeHead ) ;
2020-07-27 10:00:04 +00:00
l2_open_count + + ;
virt_addr + = L2BlockSize ;
phys_addr + = L2BlockSize ;
/* Account for hitting end of table. */
if ( util : : IsAligned ( GetInteger ( virt_addr ) , L1BlockSize ) ) {
if ( this - > GetPageTableManager ( ) . IsInPageTableHeap ( l2_virt ) ) {
this - > GetPageTableManager ( ) . Open ( l2_virt , l2_open_count ) ;
}
l2_virt = Null < KVirtualAddress > ;
l2_open_count = 0 ;
}
}
/* Perform any remaining opens. */
if ( l2_open_count > 0 & & this - > GetPageTableManager ( ) . IsInPageTableHeap ( l2_virt ) ) {
this - > GetPageTableManager ( ) . Open ( l2_virt , l2_open_count ) ;
}
2022-02-14 22:45:32 +00:00
R_SUCCEED ( ) ;
2020-07-27 10:00:04 +00:00
}
2020-12-01 12:14:58 +00:00
Result KPageTable : : MapL3Blocks ( KProcessAddress virt_addr , KPhysicalAddress phys_addr , size_t num_pages , PageTableEntry entry_template , bool disable_head_merge , PageLinkedList * page_list , bool reuse_ll ) {
2020-02-14 01:38:56 +00:00
MESOSPHERE_ASSERT ( this - > IsLockedByCurrentThread ( ) ) ;
MESOSPHERE_ASSERT ( util : : IsAligned ( GetInteger ( virt_addr ) , PageSize ) ) ;
MESOSPHERE_ASSERT ( util : : IsAligned ( GetInteger ( phys_addr ) , PageSize ) ) ;
auto & impl = this - > GetImpl ( ) ;
KVirtualAddress l2_virt = Null < KVirtualAddress > ;
KVirtualAddress l3_virt = Null < KVirtualAddress > ;
int l2_open_count = 0 ;
int l3_open_count = 0 ;
2020-12-01 12:14:58 +00:00
u8 sw_reserved_bits = PageTableEntry : : EncodeSoftwareReservedBits ( disable_head_merge , false , false ) ;
2020-02-14 01:38:56 +00:00
/* Iterate, mapping each page. */
for ( size_t i = 0 ; i < num_pages ; i + + ) {
KPhysicalAddress l3_phys = Null < KPhysicalAddress > ;
bool l2_allocated = false ;
/* If we have no L3 table, we should get or allocate one. */
if ( l3_virt = = Null < KVirtualAddress > ) {
KPhysicalAddress l2_phys = Null < KPhysicalAddress > ;
/* If we have no L2 table, we should get or allocate one. */
if ( l2_virt = = Null < KVirtualAddress > ) {
if ( L1PageTableEntry * l1_entry = impl . GetL1Entry ( virt_addr ) ; ! l1_entry - > GetTable ( l2_phys ) ) {
/* Allocate table. */
l2_virt = AllocatePageTable ( page_list , reuse_ll ) ;
R_UNLESS ( l2_virt ! = Null < KVirtualAddress > , svc : : ResultOutOfResource ( ) ) ;
/* Set the entry. */
l2_phys = GetPageTablePhysicalAddress ( l2_virt ) ;
2022-03-23 04:33:43 +00:00
PteDataMemoryBarrier ( ) ;
2020-04-20 00:16:19 +00:00
* l1_entry = L1PageTableEntry ( PageTableEntry : : TableTag { } , l2_phys , this - > IsKernel ( ) , true ) ;
2020-02-14 01:38:56 +00:00
l2_allocated = true ;
} else {
l2_virt = GetPageTableVirtualAddress ( l2_phys ) ;
}
}
MESOSPHERE_ASSERT ( l2_virt ! = Null < KVirtualAddress > ) ;
if ( L2PageTableEntry * l2_entry = impl . GetL2EntryFromTable ( l2_virt , virt_addr ) ; ! l2_entry - > GetTable ( l3_phys ) ) {
/* Allocate table. */
l3_virt = AllocatePageTable ( page_list , reuse_ll ) ;
if ( l3_virt = = Null < KVirtualAddress > ) {
/* Cleanup the L2 entry. */
if ( l2_allocated ) {
* impl . GetL1Entry ( virt_addr ) = InvalidL1PageTableEntry ;
this - > NoteUpdated ( ) ;
FreePageTable ( page_list , l2_virt ) ;
} else if ( this - > GetPageTableManager ( ) . IsInPageTableHeap ( l2_virt ) & & l2_open_count > 0 ) {
this - > GetPageTableManager ( ) . Open ( l2_virt , l2_open_count ) ;
}
2022-02-14 22:45:32 +00:00
R_THROW ( svc : : ResultOutOfResource ( ) ) ;
2020-02-14 01:38:56 +00:00
}
/* Set the entry. */
l3_phys = GetPageTablePhysicalAddress ( l3_virt ) ;
2022-03-23 04:33:43 +00:00
PteDataMemoryBarrier ( ) ;
2020-04-20 00:16:19 +00:00
* l2_entry = L2PageTableEntry ( PageTableEntry : : TableTag { } , l3_phys , this - > IsKernel ( ) , true ) ;
2020-02-14 01:38:56 +00:00
l2_open_count + + ;
} else {
l3_virt = GetPageTableVirtualAddress ( l3_phys ) ;
}
}
MESOSPHERE_ASSERT ( l3_virt ! = Null < KVirtualAddress > ) ;
/* Map the page. */
2020-12-01 12:14:58 +00:00
* impl . GetL3EntryFromTable ( l3_virt , virt_addr ) = L3PageTableEntry ( PageTableEntry : : BlockTag { } , phys_addr , PageTableEntry ( entry_template ) , sw_reserved_bits , false ) ;
sw_reserved_bits & = ~ ( PageTableEntry : : SoftwareReservedBit_DisableMergeHead ) ;
2020-02-14 01:38:56 +00:00
l3_open_count + + ;
virt_addr + = PageSize ;
phys_addr + = PageSize ;
/* Account for hitting end of table. */
if ( util : : IsAligned ( GetInteger ( virt_addr ) , L2BlockSize ) ) {
if ( this - > GetPageTableManager ( ) . IsInPageTableHeap ( l3_virt ) ) {
this - > GetPageTableManager ( ) . Open ( l3_virt , l3_open_count ) ;
}
l3_virt = Null < KVirtualAddress > ;
l3_open_count = 0 ;
if ( util : : IsAligned ( GetInteger ( virt_addr ) , L1BlockSize ) ) {
if ( this - > GetPageTableManager ( ) . IsInPageTableHeap ( l2_virt ) & & l2_open_count > 0 ) {
this - > GetPageTableManager ( ) . Open ( l2_virt , l2_open_count ) ;
}
l2_virt = Null < KVirtualAddress > ;
l2_open_count = 0 ;
}
}
}
/* Perform any remaining opens. */
if ( l2_open_count > 0 & & this - > GetPageTableManager ( ) . IsInPageTableHeap ( l2_virt ) ) {
this - > GetPageTableManager ( ) . Open ( l2_virt , l2_open_count ) ;
}
if ( l3_open_count > 0 & & this - > GetPageTableManager ( ) . IsInPageTableHeap ( l3_virt ) ) {
this - > GetPageTableManager ( ) . Open ( l3_virt , l3_open_count ) ;
}
2022-02-14 22:45:32 +00:00
R_SUCCEED ( ) ;
2020-02-14 01:38:56 +00:00
}
2020-02-17 10:49:21 +00:00
Result KPageTable : : Unmap ( KProcessAddress virt_addr , size_t num_pages , PageLinkedList * page_list , bool force , bool reuse_ll ) {
2020-02-18 09:04:44 +00:00
MESOSPHERE_ASSERT ( this - > IsLockedByCurrentThread ( ) ) ;
auto & impl = this - > GetImpl ( ) ;
/* If we're not forcing an unmap, separate pages immediately. */
if ( ! force ) {
const size_t size = num_pages * PageSize ;
2021-04-07 18:25:49 +00:00
R_TRY ( this - > SeparatePages ( virt_addr , std : : min ( util : : GetAlignment ( GetInteger ( virt_addr ) ) , size ) , page_list , reuse_ll ) ) ;
2022-02-14 22:45:32 +00:00
ON_RESULT_FAILURE { this - > MergePages ( virt_addr , page_list ) ; } ;
2020-02-18 09:04:44 +00:00
if ( num_pages > 1 ) {
const auto end_page = virt_addr + size ;
const auto last_page = end_page - PageSize ;
2021-04-07 18:25:49 +00:00
R_TRY ( this - > SeparatePages ( last_page , std : : min ( util : : GetAlignment ( GetInteger ( end_page ) ) , size ) , page_list , reuse_ll ) ) ;
2020-02-18 09:04:44 +00:00
}
}
/* Cache initial addresses for use on cleanup. */
const KProcessAddress orig_virt_addr = virt_addr ;
size_t remaining_pages = num_pages ;
/* Ensure that any pages we track close on exit. */
KPageGroup pages_to_close ( this - > GetBlockInfoManager ( ) ) ;
2021-09-17 23:54:49 +00:00
ON_SCOPE_EXIT { pages_to_close . CloseAndReset ( ) ; } ;
2020-02-18 09:04:44 +00:00
/* Begin traversal. */
TraversalContext context ;
TraversalEntry next_entry ;
bool next_valid = impl . BeginTraversal ( std : : addressof ( next_entry ) , std : : addressof ( context ) , virt_addr ) ;
while ( remaining_pages > 0 ) {
/* Handle the case where we're not valid. */
if ( ! next_valid ) {
MESOSPHERE_ABORT_UNLESS ( force ) ;
const size_t cur_size = std : : min ( next_entry . block_size - ( GetInteger ( virt_addr ) & ( next_entry . block_size - 1 ) ) , remaining_pages * PageSize ) ;
remaining_pages - = cur_size / PageSize ;
2020-07-25 03:44:15 +00:00
virt_addr + = cur_size ;
next_valid = impl . ContinueTraversal ( std : : addressof ( next_entry ) , std : : addressof ( context ) ) ;
2020-02-18 09:04:44 +00:00
continue ;
}
/* Handle the case where the block is bigger than it should be. */
if ( next_entry . block_size > remaining_pages * PageSize ) {
MESOSPHERE_ABORT_UNLESS ( force ) ;
MESOSPHERE_R_ABORT_UNLESS ( this - > SeparatePages ( virt_addr , remaining_pages * PageSize , page_list , reuse_ll ) ) ;
2021-09-17 23:54:49 +00:00
const bool new_valid = impl . BeginTraversal ( std : : addressof ( next_entry ) , std : : addressof ( context ) , virt_addr ) ;
MESOSPHERE_ASSERT ( new_valid ) ;
MESOSPHERE_UNUSED ( new_valid ) ;
2020-02-18 09:04:44 +00:00
}
/* Check that our state is coherent. */
MESOSPHERE_ASSERT ( ( next_entry . block_size / PageSize ) < = remaining_pages ) ;
MESOSPHERE_ASSERT ( util : : IsAligned ( GetInteger ( next_entry . phys_addr ) , next_entry . block_size ) ) ;
/* Unmap the block. */
L1PageTableEntry * l1_entry = impl . GetL1Entry ( virt_addr ) ;
switch ( next_entry . block_size ) {
case L1BlockSize :
{
/* Clear the entry. */
* l1_entry = InvalidL1PageTableEntry ;
}
break ;
case L2ContiguousBlockSize :
case L2BlockSize :
{
/* Get the number of L2 blocks. */
const size_t num_l2_blocks = next_entry . block_size / L2BlockSize ;
/* Get the L2 entry. */
KPhysicalAddress l2_phys = Null < KPhysicalAddress > ;
MESOSPHERE_ABORT_UNLESS ( l1_entry - > GetTable ( l2_phys ) ) ;
const KVirtualAddress l2_virt = GetPageTableVirtualAddress ( l2_phys ) ;
/* Clear the entry. */
for ( size_t i = 0 ; i < num_l2_blocks ; i + + ) {
* impl . GetL2EntryFromTable ( l2_virt , virt_addr + L2BlockSize * i ) = InvalidL2PageTableEntry ;
}
2022-03-23 04:33:43 +00:00
PteDataMemoryBarrier ( ) ;
2020-02-18 09:04:44 +00:00
/* Close references to the L2 table. */
if ( this - > GetPageTableManager ( ) . IsInPageTableHeap ( l2_virt ) ) {
if ( this - > GetPageTableManager ( ) . Close ( l2_virt , num_l2_blocks ) ) {
* l1_entry = InvalidL1PageTableEntry ;
this - > NoteUpdated ( ) ;
this - > FreePageTable ( page_list , l2_virt ) ;
2021-09-17 23:54:49 +00:00
pages_to_close . CloseAndReset ( ) ;
2020-02-18 09:04:44 +00:00
}
}
}
break ;
case L3ContiguousBlockSize :
case L3BlockSize :
{
/* Get the number of L3 blocks. */
const size_t num_l3_blocks = next_entry . block_size / L3BlockSize ;
/* Get the L2 entry. */
KPhysicalAddress l2_phys = Null < KPhysicalAddress > ;
MESOSPHERE_ABORT_UNLESS ( l1_entry - > GetTable ( l2_phys ) ) ;
const KVirtualAddress l2_virt = GetPageTableVirtualAddress ( l2_phys ) ;
L2PageTableEntry * l2_entry = impl . GetL2EntryFromTable ( l2_virt , virt_addr ) ;
/* Get the L3 entry. */
KPhysicalAddress l3_phys = Null < KPhysicalAddress > ;
MESOSPHERE_ABORT_UNLESS ( l2_entry - > GetTable ( l3_phys ) ) ;
const KVirtualAddress l3_virt = GetPageTableVirtualAddress ( l3_phys ) ;
/* Clear the entry. */
for ( size_t i = 0 ; i < num_l3_blocks ; i + + ) {
* impl . GetL3EntryFromTable ( l3_virt , virt_addr + L3BlockSize * i ) = InvalidL3PageTableEntry ;
}
2022-03-23 04:33:43 +00:00
PteDataMemoryBarrier ( ) ;
2020-02-18 09:04:44 +00:00
/* Close references to the L3 table. */
if ( this - > GetPageTableManager ( ) . IsInPageTableHeap ( l3_virt ) ) {
if ( this - > GetPageTableManager ( ) . Close ( l3_virt , num_l3_blocks ) ) {
* l2_entry = InvalidL2PageTableEntry ;
this - > NoteUpdated ( ) ;
/* Close reference to the L2 table. */
if ( this - > GetPageTableManager ( ) . IsInPageTableHeap ( l2_virt ) ) {
if ( this - > GetPageTableManager ( ) . Close ( l2_virt , 1 ) ) {
* l1_entry = InvalidL1PageTableEntry ;
this - > NoteUpdated ( ) ;
this - > FreePageTable ( page_list , l2_virt ) ;
}
}
this - > FreePageTable ( page_list , l3_virt ) ;
2021-09-17 23:54:49 +00:00
pages_to_close . CloseAndReset ( ) ;
2020-02-18 09:04:44 +00:00
}
}
}
break ;
2020-02-19 16:07:44 +00:00
MESOSPHERE_UNREACHABLE_DEFAULT_CASE ( ) ;
2020-02-18 09:04:44 +00:00
}
/* Close the blocks. */
if ( ! force & & IsHeapPhysicalAddress ( next_entry . phys_addr ) ) {
const size_t block_num_pages = next_entry . block_size / PageSize ;
2021-09-18 07:11:10 +00:00
if ( R_FAILED ( pages_to_close . AddBlock ( next_entry . phys_addr , block_num_pages ) ) ) {
2020-02-18 09:04:44 +00:00
this - > NoteUpdated ( ) ;
2021-09-18 07:11:10 +00:00
Kernel : : GetMemoryManager ( ) . Close ( next_entry . phys_addr , block_num_pages ) ;
2021-09-17 23:54:49 +00:00
pages_to_close . CloseAndReset ( ) ;
2020-02-18 09:04:44 +00:00
}
}
/* Advance. */
virt_addr + = next_entry . block_size ;
remaining_pages - = next_entry . block_size / PageSize ;
next_valid = impl . ContinueTraversal ( std : : addressof ( next_entry ) , std : : addressof ( context ) ) ;
}
/* Ensure we remain coherent. */
if ( this - > IsKernel ( ) & & num_pages = = 1 ) {
this - > NoteSingleKernelPageUpdated ( orig_virt_addr ) ;
} else {
this - > NoteUpdated ( ) ;
}
2022-02-14 22:45:32 +00:00
R_SUCCEED ( ) ;
2020-02-14 01:38:56 +00:00
}
2023-10-11 16:32:23 +00:00
Result KPageTable : : MapContiguous ( KProcessAddress virt_addr , KPhysicalAddress phys_addr , size_t num_pages , PageTableEntry entry_template , bool disable_head_merge , PageLinkedList * page_list , bool reuse_ll ) {
2020-02-14 01:38:56 +00:00
MESOSPHERE_ASSERT ( this - > IsLockedByCurrentThread ( ) ) ;
/* Cache initial addresses for use on cleanup. */
const KProcessAddress orig_virt_addr = virt_addr ;
const KPhysicalAddress orig_phys_addr = phys_addr ;
size_t remaining_pages = num_pages ;
2020-02-17 10:49:21 +00:00
/* Map the pages, using a guard to ensure we don't leak. */
{
2022-02-14 22:45:32 +00:00
ON_RESULT_FAILURE { MESOSPHERE_R_ABORT_UNLESS ( this - > Unmap ( orig_virt_addr , num_pages , page_list , true , true ) ) ; } ;
2020-02-17 10:49:21 +00:00
if ( num_pages < ContiguousPageSize / PageSize ) {
2020-12-01 12:14:58 +00:00
R_TRY ( this - > Map ( virt_addr , phys_addr , num_pages , entry_template , disable_head_merge & & virt_addr = = orig_virt_addr , L3BlockSize , page_list , reuse_ll ) ) ;
2020-02-17 10:49:21 +00:00
remaining_pages - = num_pages ;
virt_addr + = num_pages * PageSize ;
phys_addr + = num_pages * PageSize ;
} else {
/* Map the fractional part of the pages. */
size_t alignment ;
for ( alignment = ContiguousPageSize ; ( virt_addr & ( alignment - 1 ) ) = = ( phys_addr & ( alignment - 1 ) ) ; alignment = GetLargerAlignment ( alignment ) ) {
/* Check if this would be our last map. */
2020-07-25 09:47:07 +00:00
const size_t pages_to_map = ( ( alignment - ( virt_addr & ( alignment - 1 ) ) ) & ( alignment - 1 ) ) / PageSize ;
2020-02-17 10:49:21 +00:00
if ( pages_to_map + ( alignment / PageSize ) > remaining_pages ) {
break ;
}
/* Map pages, if we should. */
if ( pages_to_map > 0 ) {
2020-12-01 12:14:58 +00:00
R_TRY ( this - > Map ( virt_addr , phys_addr , pages_to_map , entry_template , disable_head_merge & & virt_addr = = orig_virt_addr , GetSmallerAlignment ( alignment ) , page_list , reuse_ll ) ) ;
2020-02-17 10:49:21 +00:00
remaining_pages - = pages_to_map ;
virt_addr + = pages_to_map * PageSize ;
phys_addr + = pages_to_map * PageSize ;
}
/* Don't go further than L1 block. */
if ( alignment = = L1BlockSize ) {
break ;
}
}
while ( remaining_pages > 0 ) {
/* Select the next smallest alignment. */
alignment = GetSmallerAlignment ( alignment ) ;
MESOSPHERE_ASSERT ( ( virt_addr & ( alignment - 1 ) ) = = 0 ) ;
MESOSPHERE_ASSERT ( ( phys_addr & ( alignment - 1 ) ) = = 0 ) ;
/* Map pages, if we should. */
const size_t pages_to_map = util : : AlignDown ( remaining_pages , alignment / PageSize ) ;
if ( pages_to_map > 0 ) {
2020-12-01 12:14:58 +00:00
R_TRY ( this - > Map ( virt_addr , phys_addr , pages_to_map , entry_template , disable_head_merge & & virt_addr = = orig_virt_addr , alignment , page_list , reuse_ll ) ) ;
2020-02-17 10:49:21 +00:00
remaining_pages - = pages_to_map ;
virt_addr + = pages_to_map * PageSize ;
phys_addr + = pages_to_map * PageSize ;
}
}
}
2020-02-14 01:38:56 +00:00
}
/* Perform what coalescing we can. */
this - > MergePages ( orig_virt_addr , page_list ) ;
if ( num_pages > 1 ) {
this - > MergePages ( orig_virt_addr + ( num_pages - 1 ) * PageSize , page_list ) ;
}
2022-03-23 04:33:43 +00:00
/* Wait for pending stores to complete. */
cpu : : DataSynchronizationBarrierInnerShareableStore ( ) ;
2020-02-14 01:38:56 +00:00
/* Open references to the pages, if we should. */
if ( IsHeapPhysicalAddress ( orig_phys_addr ) ) {
2023-10-11 16:32:23 +00:00
Kernel : : GetMemoryManager ( ) . Open ( orig_phys_addr , num_pages ) ;
2020-02-14 01:38:56 +00:00
}
2022-02-14 22:45:32 +00:00
R_SUCCEED ( ) ;
2020-02-14 01:38:56 +00:00
}
2023-10-11 16:32:23 +00:00
Result KPageTable : : MapGroup ( KProcessAddress virt_addr , const KPageGroup & pg , size_t num_pages , PageTableEntry entry_template , bool disable_head_merge , bool not_first , PageLinkedList * page_list , bool reuse_ll ) {
2020-02-20 03:38:20 +00:00
MESOSPHERE_ASSERT ( this - > IsLockedByCurrentThread ( ) ) ;
/* We want to maintain a new reference to every page in the group. */
2023-10-11 16:32:23 +00:00
KScopedPageGroup spg ( pg , not_first ) ;
2020-02-20 03:38:20 +00:00
/* Cache initial address for use on cleanup. */
const KProcessAddress orig_virt_addr = virt_addr ;
size_t mapped_pages = 0 ;
/* Map the pages, using a guard to ensure we don't leak. */
{
2022-02-14 22:45:32 +00:00
ON_RESULT_FAILURE { MESOSPHERE_R_ABORT_UNLESS ( this - > Unmap ( orig_virt_addr , num_pages , page_list , true , true ) ) ; } ;
2020-02-20 03:38:20 +00:00
if ( num_pages < ContiguousPageSize / PageSize ) {
for ( const auto & block : pg ) {
2021-09-18 07:11:10 +00:00
const KPhysicalAddress block_phys_addr = block . GetAddress ( ) ;
2020-02-20 03:38:20 +00:00
const size_t cur_pages = block . GetNumPages ( ) ;
2020-12-01 12:14:58 +00:00
R_TRY ( this - > Map ( virt_addr , block_phys_addr , cur_pages , entry_template , disable_head_merge & & virt_addr = = orig_virt_addr , L3BlockSize , page_list , reuse_ll ) ) ;
2020-02-20 03:38:20 +00:00
virt_addr + = cur_pages * PageSize ;
mapped_pages + = cur_pages ;
}
} else {
2020-02-20 06:35:31 +00:00
/* Create a block representing our virtual space. */
AlignedMemoryBlock virt_block ( GetInteger ( virt_addr ) , num_pages , L1BlockSize ) ;
for ( const auto & block : pg ) {
/* Create a block representing this physical group, synchronize its alignment to our virtual block. */
2021-09-18 07:11:10 +00:00
const KPhysicalAddress block_phys_addr = block . GetAddress ( ) ;
2020-02-20 06:35:31 +00:00
size_t cur_pages = block . GetNumPages ( ) ;
AlignedMemoryBlock phys_block ( GetInteger ( block_phys_addr ) , cur_pages , virt_block . GetAlignment ( ) ) ;
virt_block . SetAlignment ( phys_block . GetAlignment ( ) ) ;
while ( cur_pages > 0 ) {
/* Find a physical region for us to map at. */
uintptr_t phys_choice = 0 ;
size_t phys_pages = 0 ;
phys_block . FindBlock ( phys_choice , phys_pages ) ;
/* If we didn't find a region, try decreasing our alignment. */
if ( phys_pages = = 0 ) {
const size_t next_alignment = KPageTable : : GetSmallerAlignment ( phys_block . GetAlignment ( ) ) ;
MESOSPHERE_ASSERT ( next_alignment > = PageSize ) ;
phys_block . SetAlignment ( next_alignment ) ;
virt_block . SetAlignment ( next_alignment ) ;
continue ;
}
/* Begin choosing virtual blocks to map at the region we chose. */
while ( phys_pages > 0 ) {
/* Find a virtual region for us to map at. */
uintptr_t virt_choice = 0 ;
size_t virt_pages = phys_pages ;
virt_block . FindBlock ( virt_choice , virt_pages ) ;
/* If we didn't find a region, try decreasing our alignment. */
if ( virt_pages = = 0 ) {
const size_t next_alignment = KPageTable : : GetSmallerAlignment ( virt_block . GetAlignment ( ) ) ;
MESOSPHERE_ASSERT ( next_alignment > = PageSize ) ;
phys_block . SetAlignment ( next_alignment ) ;
virt_block . SetAlignment ( next_alignment ) ;
continue ;
}
/* Map! */
2020-12-01 12:14:58 +00:00
R_TRY ( this - > Map ( virt_choice , phys_choice , virt_pages , entry_template , disable_head_merge & & virt_addr = = orig_virt_addr , virt_block . GetAlignment ( ) , page_list , reuse_ll ) ) ;
2020-02-20 06:35:31 +00:00
/* Advance. */
phys_choice + = virt_pages * PageSize ;
phys_pages - = virt_pages ;
cur_pages - = virt_pages ;
mapped_pages + = virt_pages ;
}
}
}
2020-02-20 03:38:20 +00:00
}
}
MESOSPHERE_ASSERT ( mapped_pages = = num_pages ) ;
/* Perform what coalescing we can. */
this - > MergePages ( orig_virt_addr , page_list ) ;
if ( num_pages > 1 ) {
this - > MergePages ( orig_virt_addr + ( num_pages - 1 ) * PageSize , page_list ) ;
}
2022-03-23 04:33:43 +00:00
/* Wait for pending stores to complete. */
cpu : : DataSynchronizationBarrierInnerShareableStore ( ) ;
2020-02-20 03:38:20 +00:00
/* We succeeded! We want to persist the reference to the pages. */
spg . CancelClose ( ) ;
2022-02-14 22:45:32 +00:00
R_SUCCEED ( ) ;
2020-02-20 03:38:20 +00:00
}
2020-02-14 01:38:56 +00:00
bool KPageTable : : MergePages ( KProcessAddress virt_addr , PageLinkedList * page_list ) {
MESOSPHERE_ASSERT ( this - > IsLockedByCurrentThread ( ) ) ;
auto & impl = this - > GetImpl ( ) ;
bool merged = false ;
/* If there's no L1 table, don't bother. */
L1PageTableEntry * l1_entry = impl . GetL1Entry ( virt_addr ) ;
if ( ! l1_entry - > IsTable ( ) ) {
2020-04-20 00:16:19 +00:00
/* Ensure the table is not corrupted. */
MESOSPHERE_ABORT_UNLESS ( l1_entry - > IsBlock ( ) | | l1_entry - > IsEmpty ( ) ) ;
2020-02-14 01:38:56 +00:00
return merged ;
}
/* Examine and try to merge the L2 table. */
L2PageTableEntry * l2_entry = impl . GetL2Entry ( l1_entry , virt_addr ) ;
if ( l2_entry - > IsTable ( ) ) {
/* We have an L3 entry. */
L3PageTableEntry * l3_entry = impl . GetL3Entry ( l2_entry , virt_addr ) ;
2020-12-01 12:14:58 +00:00
if ( ! l3_entry - > IsBlock ( ) ) {
2020-02-14 01:38:56 +00:00
return merged ;
}
/* If it's not contiguous, try to make it so. */
if ( ! l3_entry - > IsContiguous ( ) ) {
virt_addr = util : : AlignDown ( GetInteger ( virt_addr ) , L3ContiguousBlockSize ) ;
2020-12-01 12:14:58 +00:00
const KPhysicalAddress phys_addr = util : : AlignDown ( GetInteger ( l3_entry - > GetBlock ( ) ) , L3ContiguousBlockSize ) ;
const u64 entry_template = l3_entry - > GetEntryTemplateForMerge ( ) ;
2020-02-14 01:38:56 +00:00
/* Validate that we can merge. */
for ( size_t i = 0 ; i < L3ContiguousBlockSize / L3BlockSize ; i + + ) {
2020-12-01 12:14:58 +00:00
const L3PageTableEntry * check_entry = impl . GetL3Entry ( l2_entry , virt_addr + L3BlockSize * i ) ;
if ( ! check_entry - > IsForMerge ( entry_template | GetInteger ( phys_addr + L3BlockSize * i ) | PageTableEntry : : Type_L3Block ) ) {
return merged ;
}
2020-12-01 12:24:43 +00:00
if ( i > 0 & & ( check_entry - > IsHeadOrHeadAndBodyMergeDisabled ( ) ) ) {
2020-12-01 12:14:58 +00:00
return merged ;
}
if ( ( i < ( L3ContiguousBlockSize / L3BlockSize ) - 1 ) & & check_entry - > IsTailMergeDisabled ( ) ) {
2020-02-14 01:38:56 +00:00
return merged ;
}
}
2020-12-01 12:14:58 +00:00
/* Determine the new software reserved bits. */
const L3PageTableEntry * head_entry = impl . GetL3Entry ( l2_entry , virt_addr + L3BlockSize * 0 ) ;
const L3PageTableEntry * tail_entry = impl . GetL3Entry ( l2_entry , virt_addr + L3BlockSize * ( ( L3ContiguousBlockSize / L3BlockSize ) - 1 ) ) ;
auto sw_reserved_bits = PageTableEntry : : EncodeSoftwareReservedBits ( head_entry - > IsHeadMergeDisabled ( ) , head_entry - > IsHeadAndBodyMergeDisabled ( ) , tail_entry - > IsTailMergeDisabled ( ) ) ;
2020-02-14 01:38:56 +00:00
/* Merge! */
for ( size_t i = 0 ; i < L3ContiguousBlockSize / L3BlockSize ; i + + ) {
2020-12-01 12:14:58 +00:00
* impl . GetL3Entry ( l2_entry , virt_addr + L3BlockSize * i ) = L3PageTableEntry ( PageTableEntry : : BlockTag { } , phys_addr + L3BlockSize * i , PageTableEntry ( entry_template ) , sw_reserved_bits , true ) ;
sw_reserved_bits & = ~ ( PageTableEntry : : SoftwareReservedBit_DisableMergeHead ) ;
2020-02-14 01:38:56 +00:00
}
/* Note that we updated. */
this - > NoteUpdated ( ) ;
merged = true ;
}
/* We might be able to upgrade a contiguous set of L3 entries into an L2 block. */
virt_addr = util : : AlignDown ( GetInteger ( virt_addr ) , L2BlockSize ) ;
KPhysicalAddress phys_addr = util : : AlignDown ( GetInteger ( l3_entry - > GetBlock ( ) ) , L2BlockSize ) ;
2020-12-01 12:14:58 +00:00
const u64 entry_template = l3_entry - > GetEntryTemplateForMerge ( ) ;
2020-02-14 01:38:56 +00:00
/* Validate that we can merge. */
for ( size_t i = 0 ; i < L2BlockSize / L3ContiguousBlockSize ; i + + ) {
2020-12-01 12:14:58 +00:00
const L3PageTableEntry * check_entry = impl . GetL3Entry ( l2_entry , virt_addr + L3ContiguousBlockSize * i ) ;
if ( ! check_entry - > IsForMerge ( entry_template | GetInteger ( phys_addr + L3ContiguousBlockSize * i ) | PageTableEntry : : ContigType_Contiguous | PageTableEntry : : Type_L3Block ) ) {
return merged ;
}
2020-12-01 12:24:43 +00:00
if ( i > 0 & & ( check_entry - > IsHeadOrHeadAndBodyMergeDisabled ( ) ) ) {
2020-12-01 12:14:58 +00:00
return merged ;
}
if ( ( i < ( L2BlockSize / L3ContiguousBlockSize ) - 1 ) & & check_entry - > IsTailMergeDisabled ( ) ) {
2020-02-14 01:38:56 +00:00
return merged ;
}
}
2020-12-01 12:14:58 +00:00
/* Determine the new software reserved bits. */
const L3PageTableEntry * head_entry = impl . GetL3Entry ( l2_entry , virt_addr + L3ContiguousBlockSize * 0 ) ;
const L3PageTableEntry * tail_entry = impl . GetL3Entry ( l2_entry , virt_addr + L3ContiguousBlockSize * ( ( L2BlockSize / L3ContiguousBlockSize ) - 1 ) ) ;
auto sw_reserved_bits = PageTableEntry : : EncodeSoftwareReservedBits ( head_entry - > IsHeadMergeDisabled ( ) , head_entry - > IsHeadAndBodyMergeDisabled ( ) , tail_entry - > IsTailMergeDisabled ( ) ) ;
2020-02-14 01:38:56 +00:00
/* Merge! */
2020-12-01 12:14:58 +00:00
* l2_entry = L2PageTableEntry ( PageTableEntry : : BlockTag { } , phys_addr , PageTableEntry ( entry_template ) , sw_reserved_bits , false ) ;
2020-02-14 01:38:56 +00:00
/* Note that we updated. */
this - > NoteUpdated ( ) ;
merged = true ;
/* Free the L3 table. */
KVirtualAddress l3_table = util : : AlignDown ( reinterpret_cast < uintptr_t > ( l3_entry ) , PageSize ) ;
if ( this - > GetPageTableManager ( ) . IsInPageTableHeap ( l3_table ) ) {
this - > GetPageTableManager ( ) . Close ( l3_table , L2BlockSize / L3BlockSize ) ;
2020-04-19 00:10:26 +00:00
ClearPageTable ( l3_table ) ;
2020-02-14 01:38:56 +00:00
this - > FreePageTable ( page_list , l3_table ) ;
}
}
2020-04-20 00:16:19 +00:00
/* If the l2 entry is not a block or we can't make it contiguous, we're done. */
2020-12-01 12:14:58 +00:00
if ( ! l2_entry - > IsBlock ( ) ) {
2020-04-20 00:16:19 +00:00
return merged ;
}
2020-02-14 01:38:56 +00:00
2020-04-20 00:16:19 +00:00
/* If it's not contiguous, try to make it so. */
if ( ! l2_entry - > IsContiguous ( ) ) {
virt_addr = util : : AlignDown ( GetInteger ( virt_addr ) , L2ContiguousBlockSize ) ;
KPhysicalAddress phys_addr = util : : AlignDown ( GetInteger ( l2_entry - > GetBlock ( ) ) , L2ContiguousBlockSize ) ;
2020-12-01 12:14:58 +00:00
const u64 entry_template = l2_entry - > GetEntryTemplateForMerge ( ) ;
2020-02-14 01:38:56 +00:00
/* Validate that we can merge. */
2020-04-20 00:16:19 +00:00
for ( size_t i = 0 ; i < L2ContiguousBlockSize / L2BlockSize ; i + + ) {
2020-12-01 12:14:58 +00:00
const L2PageTableEntry * check_entry = impl . GetL2Entry ( l1_entry , virt_addr + L2BlockSize * i ) ;
if ( ! check_entry - > IsForMerge ( entry_template | GetInteger ( phys_addr + L2BlockSize * i ) | PageTableEntry : : Type_L2Block ) ) {
return merged ;
}
2020-12-01 12:24:43 +00:00
if ( i > 0 & & ( check_entry - > IsHeadOrHeadAndBodyMergeDisabled ( ) ) ) {
2020-12-01 12:14:58 +00:00
return merged ;
}
if ( ( i < ( L2ContiguousBlockSize / L2BlockSize ) - 1 ) & & check_entry - > IsTailMergeDisabled ( ) ) {
2020-02-14 01:38:56 +00:00
return merged ;
}
}
2020-12-01 12:14:58 +00:00
/* Determine the new software reserved bits. */
const L2PageTableEntry * head_entry = impl . GetL2Entry ( l1_entry , virt_addr + L2BlockSize * 0 ) ;
const L2PageTableEntry * tail_entry = impl . GetL2Entry ( l1_entry , virt_addr + L2BlockSize * ( ( L2ContiguousBlockSize / L2BlockSize ) - 1 ) ) ;
auto sw_reserved_bits = PageTableEntry : : EncodeSoftwareReservedBits ( head_entry - > IsHeadMergeDisabled ( ) , head_entry - > IsHeadAndBodyMergeDisabled ( ) , tail_entry - > IsTailMergeDisabled ( ) ) ;
2020-02-14 01:38:56 +00:00
/* Merge! */
2020-04-20 00:16:19 +00:00
for ( size_t i = 0 ; i < L2ContiguousBlockSize / L2BlockSize ; i + + ) {
2020-12-01 12:14:58 +00:00
* impl . GetL2Entry ( l1_entry , virt_addr + L2BlockSize * i ) = L2PageTableEntry ( PageTableEntry : : BlockTag { } , phys_addr + L2BlockSize * i , PageTableEntry ( entry_template ) , sw_reserved_bits , true ) ;
sw_reserved_bits & = ~ ( PageTableEntry : : SoftwareReservedBit_DisableMergeHead ) ;
2020-04-20 00:16:19 +00:00
}
2020-02-14 01:38:56 +00:00
/* Note that we updated. */
this - > NoteUpdated ( ) ;
merged = true ;
2020-04-20 00:16:19 +00:00
}
2020-02-14 01:38:56 +00:00
2020-04-20 00:16:19 +00:00
/* We might be able to upgrade a contiguous set of L2 entries into an L1 block. */
virt_addr = util : : AlignDown ( GetInteger ( virt_addr ) , L1BlockSize ) ;
KPhysicalAddress phys_addr = util : : AlignDown ( GetInteger ( l2_entry - > GetBlock ( ) ) , L1BlockSize ) ;
2020-12-01 12:14:58 +00:00
const u64 entry_template = l2_entry - > GetEntryTemplateForMerge ( ) ;
2020-04-20 00:16:19 +00:00
/* Validate that we can merge. */
for ( size_t i = 0 ; i < L1BlockSize / L2ContiguousBlockSize ; i + + ) {
2020-12-01 12:14:58 +00:00
const L2PageTableEntry * check_entry = impl . GetL2Entry ( l1_entry , virt_addr + L2ContiguousBlockSize * i ) ;
if ( ! check_entry - > IsForMerge ( entry_template | GetInteger ( phys_addr + L2ContiguousBlockSize * i ) | PageTableEntry : : ContigType_Contiguous | PageTableEntry : : Type_L2Block ) ) {
return merged ;
}
2020-12-01 12:24:43 +00:00
if ( i > 0 & & ( check_entry - > IsHeadOrHeadAndBodyMergeDisabled ( ) ) ) {
2020-12-01 12:14:58 +00:00
return merged ;
}
if ( ( i < ( L1ContiguousBlockSize / L2ContiguousBlockSize ) - 1 ) & & check_entry - > IsTailMergeDisabled ( ) ) {
2020-04-20 00:16:19 +00:00
return merged ;
2020-02-14 01:38:56 +00:00
}
}
2020-12-01 12:14:58 +00:00
/* Determine the new software reserved bits. */
const L2PageTableEntry * head_entry = impl . GetL2Entry ( l1_entry , virt_addr + L2ContiguousBlockSize * 0 ) ;
const L2PageTableEntry * tail_entry = impl . GetL2Entry ( l1_entry , virt_addr + L2ContiguousBlockSize * ( ( L1BlockSize / L2ContiguousBlockSize ) - 1 ) ) ;
auto sw_reserved_bits = PageTableEntry : : EncodeSoftwareReservedBits ( head_entry - > IsHeadMergeDisabled ( ) , head_entry - > IsHeadAndBodyMergeDisabled ( ) , tail_entry - > IsTailMergeDisabled ( ) ) ;
2020-04-20 00:16:19 +00:00
/* Merge! */
2020-12-01 12:14:58 +00:00
* l1_entry = L1PageTableEntry ( PageTableEntry : : BlockTag { } , phys_addr , PageTableEntry ( entry_template ) , sw_reserved_bits , false ) ;
2020-04-20 00:16:19 +00:00
/* Note that we updated. */
this - > NoteUpdated ( ) ;
merged = true ;
/* Free the L2 table. */
KVirtualAddress l2_table = util : : AlignDown ( reinterpret_cast < uintptr_t > ( l2_entry ) , PageSize ) ;
if ( this - > GetPageTableManager ( ) . IsInPageTableHeap ( l2_table ) ) {
this - > GetPageTableManager ( ) . Close ( l2_table , L1BlockSize / L2BlockSize ) ;
ClearPageTable ( l2_table ) ;
this - > FreePageTable ( page_list , l2_table ) ;
}
2020-02-14 01:38:56 +00:00
return merged ;
}
2020-02-18 09:04:44 +00:00
Result KPageTable : : SeparatePagesImpl ( KProcessAddress virt_addr , size_t block_size , PageLinkedList * page_list , bool reuse_ll ) {
MESOSPHERE_ASSERT ( this - > IsLockedByCurrentThread ( ) ) ;
auto & impl = this - > GetImpl ( ) ;
/* First, try to separate an L1 block into contiguous L2 blocks. */
L1PageTableEntry * l1_entry = impl . GetL1Entry ( virt_addr ) ;
if ( l1_entry - > IsBlock ( ) ) {
/* If our block size is too big, don't bother. */
2020-03-08 08:06:23 +00:00
R_SUCCEED_IF ( block_size > = L1BlockSize ) ;
2020-02-18 09:04:44 +00:00
/* Get the addresses we're working with. */
const KProcessAddress block_virt_addr = util : : AlignDown ( GetInteger ( virt_addr ) , L1BlockSize ) ;
2020-12-01 15:22:05 +00:00
const KPhysicalAddress block_phys_addr = l1_entry - > GetBlock ( ) ;
2020-02-18 09:04:44 +00:00
/* Allocate a new page for the L2 table. */
const KVirtualAddress l2_table = this - > AllocatePageTable ( page_list , reuse_ll ) ;
R_UNLESS ( l2_table ! = Null < KVirtualAddress > , svc : : ResultOutOfResource ( ) ) ;
const KPhysicalAddress l2_phys = GetPageTablePhysicalAddress ( l2_table ) ;
/* Set the entries in the L2 table. */
for ( size_t i = 0 ; i < L1BlockSize / L2BlockSize ; i + + ) {
2020-12-01 12:14:58 +00:00
const u64 entry_template = l1_entry - > GetEntryTemplateForL2Block ( i ) ;
* ( impl . GetL2EntryFromTable ( l2_table , block_virt_addr + L2BlockSize * i ) ) = L2PageTableEntry ( PageTableEntry : : BlockTag { } , block_phys_addr + L2BlockSize * i , PageTableEntry ( entry_template ) , PageTableEntry : : SoftwareReservedBit_None , true ) ;
2020-02-18 09:04:44 +00:00
}
/* Open references to the L2 table. */
2020-07-24 15:07:34 +00:00
this - > GetPageTableManager ( ) . Open ( l2_table , L1BlockSize / L2BlockSize ) ;
2020-02-18 09:04:44 +00:00
/* Replace the L1 entry with one to the new table. */
2022-03-23 04:33:43 +00:00
PteDataMemoryBarrier ( ) ;
2020-04-20 00:16:19 +00:00
* l1_entry = L1PageTableEntry ( PageTableEntry : : TableTag { } , l2_phys , this - > IsKernel ( ) , true ) ;
2020-02-18 09:04:44 +00:00
this - > NoteUpdated ( ) ;
}
/* If we don't have an l1 table, we're done. */
2020-04-20 00:16:19 +00:00
MESOSPHERE_ABORT_UNLESS ( l1_entry - > IsTable ( ) | | l1_entry - > IsEmpty ( ) ) ;
2020-03-08 08:06:23 +00:00
R_SUCCEED_IF ( ! l1_entry - > IsTable ( ) ) ;
2020-02-18 09:04:44 +00:00
/* We want to separate L2 contiguous blocks into L2 blocks, so check that our size permits that. */
2020-03-08 08:06:23 +00:00
R_SUCCEED_IF ( block_size > = L2ContiguousBlockSize ) ;
2020-02-18 09:04:44 +00:00
L2PageTableEntry * l2_entry = impl . GetL2Entry ( l1_entry , virt_addr ) ;
if ( l2_entry - > IsBlock ( ) ) {
/* If we're contiguous, try to separate. */
if ( l2_entry - > IsContiguous ( ) ) {
const KProcessAddress block_virt_addr = util : : AlignDown ( GetInteger ( virt_addr ) , L2ContiguousBlockSize ) ;
2020-12-01 15:19:03 +00:00
const KPhysicalAddress block_phys_addr = util : : AlignDown ( GetInteger ( l2_entry - > GetBlock ( ) ) , L2ContiguousBlockSize ) ;
2020-02-18 09:04:44 +00:00
/* Mark the entries as non-contiguous. */
for ( size_t i = 0 ; i < L2ContiguousBlockSize / L2BlockSize ; i + + ) {
2020-12-01 15:19:03 +00:00
L2PageTableEntry * target = impl . GetL2Entry ( l1_entry , block_virt_addr + L2BlockSize * i ) ;
const u64 entry_template = target - > GetEntryTemplateForL2Block ( i ) ;
* target = L2PageTableEntry ( PageTableEntry : : BlockTag { } , block_phys_addr + L2BlockSize * i , PageTableEntry ( entry_template ) , PageTableEntry : : SoftwareReservedBit_None , false ) ;
2020-02-18 09:04:44 +00:00
}
this - > NoteUpdated ( ) ;
}
/* We want to separate L2 blocks into L3 contiguous blocks, so check that our size permits that. */
2020-03-08 08:06:23 +00:00
R_SUCCEED_IF ( block_size > = L2BlockSize ) ;
2020-02-18 09:04:44 +00:00
/* Get the addresses we're working with. */
const KProcessAddress block_virt_addr = util : : AlignDown ( GetInteger ( virt_addr ) , L2BlockSize ) ;
const KPhysicalAddress block_phys_addr = l2_entry - > GetBlock ( ) ;
/* Allocate a new page for the L3 table. */
const KVirtualAddress l3_table = this - > AllocatePageTable ( page_list , reuse_ll ) ;
R_UNLESS ( l3_table ! = Null < KVirtualAddress > , svc : : ResultOutOfResource ( ) ) ;
const KPhysicalAddress l3_phys = GetPageTablePhysicalAddress ( l3_table ) ;
/* Set the entries in the L3 table. */
for ( size_t i = 0 ; i < L2BlockSize / L3BlockSize ; i + + ) {
2020-12-01 12:14:58 +00:00
const u64 entry_template = l2_entry - > GetEntryTemplateForL3Block ( i ) ;
* ( impl . GetL3EntryFromTable ( l3_table , block_virt_addr + L3BlockSize * i ) ) = L3PageTableEntry ( PageTableEntry : : BlockTag { } , block_phys_addr + L3BlockSize * i , PageTableEntry ( entry_template ) , PageTableEntry : : SoftwareReservedBit_None , true ) ;
2020-02-18 09:04:44 +00:00
}
/* Open references to the L3 table. */
2020-07-24 15:07:34 +00:00
this - > GetPageTableManager ( ) . Open ( l3_table , L2BlockSize / L3BlockSize ) ;
2020-02-18 09:04:44 +00:00
/* Replace the L2 entry with one to the new table. */
2022-03-23 04:33:43 +00:00
PteDataMemoryBarrier ( ) ;
2020-04-20 00:16:19 +00:00
* l2_entry = L2PageTableEntry ( PageTableEntry : : TableTag { } , l3_phys , this - > IsKernel ( ) , true ) ;
2020-02-18 09:04:44 +00:00
this - > NoteUpdated ( ) ;
}
/* If we don't have an L3 table, we're done. */
2020-04-20 00:16:19 +00:00
MESOSPHERE_ABORT_UNLESS ( l2_entry - > IsTable ( ) | | l2_entry - > IsEmpty ( ) ) ;
2020-03-08 08:06:23 +00:00
R_SUCCEED_IF ( ! l2_entry - > IsTable ( ) ) ;
2020-02-18 09:04:44 +00:00
/* We want to separate L3 contiguous blocks into L2 blocks, so check that our size permits that. */
2020-03-08 08:06:23 +00:00
R_SUCCEED_IF ( block_size > = L3ContiguousBlockSize ) ;
2020-02-18 09:04:44 +00:00
/* If we're contiguous, try to separate. */
L3PageTableEntry * l3_entry = impl . GetL3Entry ( l2_entry , virt_addr ) ;
if ( l3_entry - > IsBlock ( ) & & l3_entry - > IsContiguous ( ) ) {
const KProcessAddress block_virt_addr = util : : AlignDown ( GetInteger ( virt_addr ) , L3ContiguousBlockSize ) ;
2020-12-01 15:19:03 +00:00
const KPhysicalAddress block_phys_addr = util : : AlignDown ( GetInteger ( l3_entry - > GetBlock ( ) ) , L3ContiguousBlockSize ) ;
2020-02-18 09:04:44 +00:00
/* Mark the entries as non-contiguous. */
for ( size_t i = 0 ; i < L3ContiguousBlockSize / L3BlockSize ; i + + ) {
2020-12-01 15:19:03 +00:00
L3PageTableEntry * target = impl . GetL3Entry ( l2_entry , block_virt_addr + L3BlockSize * i ) ;
const u64 entry_template = target - > GetEntryTemplateForL3Block ( i ) ;
* target = L3PageTableEntry ( PageTableEntry : : BlockTag { } , block_phys_addr + L3BlockSize * i , PageTableEntry ( entry_template ) , PageTableEntry : : SoftwareReservedBit_None , false ) ;
2020-02-18 09:04:44 +00:00
}
this - > NoteUpdated ( ) ;
}
/* We're done! */
2022-02-14 22:45:32 +00:00
R_SUCCEED ( ) ;
2020-02-18 09:04:44 +00:00
}
Result KPageTable : : SeparatePages ( KProcessAddress virt_addr , size_t block_size , PageLinkedList * page_list , bool reuse_ll ) {
MESOSPHERE_ASSERT ( this - > IsLockedByCurrentThread ( ) ) ;
2022-02-14 22:45:32 +00:00
/* If we fail while separating, re-merge. */
ON_RESULT_FAILURE { this - > MergePages ( virt_addr , page_list ) ; } ;
2020-02-18 09:04:44 +00:00
2022-02-14 22:45:32 +00:00
/* Try to separate pages. */
R_RETURN ( this - > SeparatePagesImpl ( virt_addr , block_size , page_list , reuse_ll ) ) ;
2020-02-18 09:04:44 +00:00
}
2023-02-21 15:39:21 +00:00
Result KPageTable : : ChangePermissions ( KProcessAddress virt_addr , size_t num_pages , PageTableEntry entry_template , DisableMergeAttribute disable_merge_attr , bool refresh_mapping , bool flush_mapping , PageLinkedList * page_list , bool reuse_ll ) {
2020-02-19 16:07:44 +00:00
MESOSPHERE_ASSERT ( this - > IsLockedByCurrentThread ( ) ) ;
/* Separate pages before we change permissions. */
const size_t size = num_pages * PageSize ;
2021-04-07 18:25:49 +00:00
R_TRY ( this - > SeparatePages ( virt_addr , std : : min ( util : : GetAlignment ( GetInteger ( virt_addr ) ) , size ) , page_list , reuse_ll ) ) ;
2020-02-19 16:07:44 +00:00
if ( num_pages > 1 ) {
const auto end_page = virt_addr + size ;
const auto last_page = end_page - PageSize ;
2022-02-14 22:45:32 +00:00
ON_RESULT_FAILURE { this - > MergePages ( virt_addr , page_list ) ; } ;
2021-04-07 18:25:49 +00:00
R_TRY ( this - > SeparatePages ( last_page , std : : min ( util : : GetAlignment ( GetInteger ( end_page ) ) , size ) , page_list , reuse_ll ) ) ;
2020-02-19 16:07:44 +00:00
}
2020-04-20 00:16:19 +00:00
/* ===================================================== */
2020-02-19 16:07:44 +00:00
2020-04-20 00:16:19 +00:00
/* Define a helper function which will apply our template to entries. */
2020-02-19 16:07:44 +00:00
2020-04-20 00:16:19 +00:00
enum ApplyOption : u32 {
ApplyOption_None = 0 ,
ApplyOption_FlushDataCache = ( 1u < < 0 ) ,
ApplyOption_MergeMappings = ( 1u < < 1 ) ,
} ;
2020-02-19 16:07:44 +00:00
2020-12-01 12:14:58 +00:00
auto ApplyEntryTemplate = [ this , virt_addr , disable_merge_attr , num_pages , page_list ] ( PageTableEntry entry_template , u32 apply_option ) - > void {
2020-04-20 00:16:19 +00:00
/* Create work variables for us to use. */
2020-12-01 12:14:58 +00:00
const KProcessAddress orig_virt_addr = virt_addr ;
const KProcessAddress end_virt_addr = orig_virt_addr + ( num_pages * PageSize ) ;
2020-04-20 00:16:19 +00:00
KProcessAddress cur_virt_addr = virt_addr ;
size_t remaining_pages = num_pages ;
2020-02-19 16:07:44 +00:00
2020-04-20 00:16:19 +00:00
auto & impl = this - > GetImpl ( ) ;
2020-02-19 16:07:44 +00:00
2020-12-01 12:14:58 +00:00
/* Parse the disable merge attrs. */
const bool attr_disable_head = ( disable_merge_attr & DisableMergeAttribute_DisableHead ) ! = 0 ;
const bool attr_disable_head_body = ( disable_merge_attr & DisableMergeAttribute_DisableHeadAndBody ) ! = 0 ;
const bool attr_enable_head_body = ( disable_merge_attr & DisableMergeAttribute_EnableHeadAndBody ) ! = 0 ;
const bool attr_disable_tail = ( disable_merge_attr & DisableMergeAttribute_DisableTail ) ! = 0 ;
const bool attr_enable_tail = ( disable_merge_attr & DisableMergeAttribute_EnableTail ) ! = 0 ;
const bool attr_enable_and_merge = ( disable_merge_attr & DisableMergeAttribute_EnableAndMergeHeadBodyTail ) ! = 0 ;
2020-04-20 00:16:19 +00:00
/* Begin traversal. */
TraversalContext context ;
TraversalEntry next_entry ;
MESOSPHERE_ABORT_UNLESS ( impl . BeginTraversal ( std : : addressof ( next_entry ) , std : : addressof ( context ) , cur_virt_addr ) ) ;
/* Continue changing properties until we've changed them for all pages. */
2020-12-01 12:14:58 +00:00
bool cleared_disable_merge_bits = false ;
2020-04-20 00:16:19 +00:00
while ( remaining_pages > 0 ) {
MESOSPHERE_ABORT_UNLESS ( util : : IsAligned ( GetInteger ( next_entry . phys_addr ) , next_entry . block_size ) ) ;
MESOSPHERE_ABORT_UNLESS ( next_entry . block_size < = remaining_pages * PageSize ) ;
2020-02-19 16:07:44 +00:00
2020-12-01 12:14:58 +00:00
/* Determine if we're at the start. */
const bool is_start = ( cur_virt_addr = = orig_virt_addr ) ;
const bool is_end = ( ( cur_virt_addr + next_entry . block_size ) = = end_virt_addr ) ;
/* Determine the relevant merge attributes. */
bool disable_head_merge , disable_head_body_merge , disable_tail_merge ;
if ( next_entry . IsHeadMergeDisabled ( ) ) {
disable_head_merge = true ;
} else if ( attr_disable_head ) {
disable_head_merge = is_start ;
} else {
disable_head_merge = false ;
}
if ( is_start ) {
if ( attr_disable_head_body ) {
disable_head_body_merge = true ;
} else if ( attr_enable_head_body ) {
disable_head_body_merge = false ;
} else {
disable_head_body_merge = ( ! attr_enable_and_merge & & next_entry . IsHeadAndBodyMergeDisabled ( ) ) ;
}
} else {
disable_head_body_merge = ( ! attr_enable_and_merge & & next_entry . IsHeadAndBodyMergeDisabled ( ) ) ;
cleared_disable_merge_bits | = ( attr_enable_and_merge & & next_entry . IsHeadAndBodyMergeDisabled ( ) ) ;
}
if ( is_end ) {
if ( attr_disable_tail ) {
disable_tail_merge = true ;
} else if ( attr_enable_tail ) {
disable_tail_merge = false ;
} else {
disable_tail_merge = ( ! attr_enable_and_merge & & next_entry . IsTailMergeDisabled ( ) ) ;
}
} else {
disable_tail_merge = ( ! attr_enable_and_merge & & next_entry . IsTailMergeDisabled ( ) ) ;
cleared_disable_merge_bits | = ( attr_enable_and_merge & & next_entry . IsTailMergeDisabled ( ) ) ;
}
/* Encode the merge disable flags into the software reserved bits. */
u8 sw_reserved_bits = PageTableEntry : : EncodeSoftwareReservedBits ( disable_head_merge , disable_head_body_merge , disable_tail_merge ) ;
2020-04-20 00:16:19 +00:00
/* If we should flush entries, do so. */
if ( ( apply_option & ApplyOption_FlushDataCache ) ! = 0 ) {
if ( IsHeapPhysicalAddress ( next_entry . phys_addr ) ) {
cpu : : FlushDataCache ( GetVoidPointer ( GetHeapVirtualAddress ( next_entry . phys_addr ) ) , next_entry . block_size ) ;
}
}
/* Apply the entry template. */
L1PageTableEntry * l1_entry = impl . GetL1Entry ( cur_virt_addr ) ;
switch ( next_entry . block_size ) {
case L1BlockSize :
{
/* Write the updated entry. */
2020-12-01 12:14:58 +00:00
* l1_entry = L1PageTableEntry ( PageTableEntry : : BlockTag { } , next_entry . phys_addr , entry_template , sw_reserved_bits , false ) ;
2020-04-20 00:16:19 +00:00
}
break ;
case L2ContiguousBlockSize :
case L2BlockSize :
{
/* Get the number of L2 blocks. */
const size_t num_l2_blocks = next_entry . block_size / L2BlockSize ;
/* Get the L2 entry. */
KPhysicalAddress l2_phys = Null < KPhysicalAddress > ;
MESOSPHERE_ABORT_UNLESS ( l1_entry - > GetTable ( l2_phys ) ) ;
const KVirtualAddress l2_virt = GetPageTableVirtualAddress ( l2_phys ) ;
/* Write the updated entry. */
const bool contig = next_entry . block_size = = L2ContiguousBlockSize ;
2020-02-19 16:07:44 +00:00
for ( size_t i = 0 ; i < num_l2_blocks ; i + + ) {
2020-12-01 12:14:58 +00:00
* impl . GetL2EntryFromTable ( l2_virt , cur_virt_addr + L2BlockSize * i ) = L2PageTableEntry ( PageTableEntry : : BlockTag { } , next_entry . phys_addr + L2BlockSize * i , entry_template , sw_reserved_bits , contig ) ;
sw_reserved_bits & = ~ ( PageTableEntry : : SoftwareReservedBit_DisableMergeHead ) ;
2020-02-19 16:07:44 +00:00
}
2020-04-20 00:16:19 +00:00
}
break ;
case L3ContiguousBlockSize :
case L3BlockSize :
{
/* Get the number of L3 blocks. */
const size_t num_l3_blocks = next_entry . block_size / L3BlockSize ;
/* Get the L2 entry. */
KPhysicalAddress l2_phys = Null < KPhysicalAddress > ;
MESOSPHERE_ABORT_UNLESS ( l1_entry - > GetTable ( l2_phys ) ) ;
const KVirtualAddress l2_virt = GetPageTableVirtualAddress ( l2_phys ) ;
L2PageTableEntry * l2_entry = impl . GetL2EntryFromTable ( l2_virt , cur_virt_addr ) ;
/* Get the L3 entry. */
KPhysicalAddress l3_phys = Null < KPhysicalAddress > ;
MESOSPHERE_ABORT_UNLESS ( l2_entry - > GetTable ( l3_phys ) ) ;
const KVirtualAddress l3_virt = GetPageTableVirtualAddress ( l3_phys ) ;
/* Write the updated entry. */
const bool contig = next_entry . block_size = = L3ContiguousBlockSize ;
for ( size_t i = 0 ; i < num_l3_blocks ; i + + ) {
2020-12-01 12:14:58 +00:00
* impl . GetL3EntryFromTable ( l3_virt , cur_virt_addr + L3BlockSize * i ) = L3PageTableEntry ( PageTableEntry : : BlockTag { } , next_entry . phys_addr + L3BlockSize * i , entry_template , sw_reserved_bits , contig ) ;
sw_reserved_bits & = ~ ( PageTableEntry : : SoftwareReservedBit_DisableMergeHead ) ;
2020-02-19 16:07:44 +00:00
}
}
2020-04-20 00:16:19 +00:00
break ;
MESOSPHERE_UNREACHABLE_DEFAULT_CASE ( ) ;
}
2020-02-19 16:07:44 +00:00
2020-04-20 00:16:19 +00:00
/* If our option asks us to, try to merge mappings. */
2020-12-01 12:14:58 +00:00
bool merge = ( ( apply_option & ApplyOption_MergeMappings ) ! = 0 | | cleared_disable_merge_bits ) & & next_entry . block_size < L1BlockSize ;
2020-04-20 00:16:19 +00:00
if ( merge ) {
const size_t larger_align = GetLargerAlignment ( next_entry . block_size ) ;
if ( util : : IsAligned ( GetInteger ( cur_virt_addr ) + next_entry . block_size , larger_align ) ) {
const uintptr_t aligned_start = util : : AlignDown ( GetInteger ( cur_virt_addr ) , larger_align ) ;
2020-12-01 12:14:58 +00:00
if ( orig_virt_addr < = aligned_start & & aligned_start + larger_align - 1 < GetInteger ( orig_virt_addr ) + ( num_pages * PageSize ) - 1 ) {
2020-04-20 00:16:19 +00:00
merge = this - > MergePages ( cur_virt_addr , page_list ) ;
} else {
merge = false ;
2020-02-19 16:07:44 +00:00
}
2020-04-20 00:16:19 +00:00
} else {
merge = false ;
2020-02-19 16:07:44 +00:00
}
2020-04-20 00:16:19 +00:00
}
2020-02-19 16:07:44 +00:00
2020-04-20 00:16:19 +00:00
/* If we merged, correct the traversal to a sane state. */
if ( merge ) {
/* NOTE: Nintendo does not verify the result of this BeginTraversal call. */
MESOSPHERE_ABORT_UNLESS ( impl . BeginTraversal ( std : : addressof ( next_entry ) , std : : addressof ( context ) , cur_virt_addr ) ) ;
2020-02-19 16:07:44 +00:00
2020-04-20 00:16:19 +00:00
/* The actual size needs to not take into account the portion of the block before our virtual address. */
const size_t actual_size = next_entry . block_size - ( GetInteger ( next_entry . phys_addr ) & ( next_entry . block_size - 1 ) ) ;
remaining_pages - = std : : min ( remaining_pages , actual_size / PageSize ) ;
cur_virt_addr + = actual_size ;
} else {
/* If we didn't merge, just advance. */
remaining_pages - = next_entry . block_size / PageSize ;
cur_virt_addr + = next_entry . block_size ;
}
2020-02-19 16:07:44 +00:00
2020-04-20 00:16:19 +00:00
/* Continue our traversal. */
if ( remaining_pages = = 0 ) {
2020-02-19 16:07:44 +00:00
break ;
2020-04-20 00:16:19 +00:00
}
MESOSPHERE_ABORT_UNLESS ( impl . ContinueTraversal ( std : : addressof ( next_entry ) , std : : addressof ( context ) ) ) ;
2020-02-19 16:07:44 +00:00
}
2020-04-20 00:16:19 +00:00
} ;
2020-02-19 16:07:44 +00:00
2020-04-20 00:16:19 +00:00
/* ===================================================== */
/* If we don't need to refresh the pages, we can just apply the mappings. */
if ( ! refresh_mapping ) {
ApplyEntryTemplate ( entry_template , ApplyOption_None ) ;
this - > NoteUpdated ( ) ;
} else {
/* We need to refresh the mappings. */
/* First, apply the changes without the mapped bit. This will cause all entries to page fault if accessed. */
{
PageTableEntry unmapped_template = entry_template ;
unmapped_template . SetMapped ( false ) ;
ApplyEntryTemplate ( unmapped_template , ApplyOption_MergeMappings ) ;
this - > NoteUpdated ( ) ;
}
/* Next, take and immediately release the scheduler lock. This will force a reschedule. */
{
KScopedSchedulerLock sl ;
2020-02-19 16:07:44 +00:00
}
2020-04-20 00:16:19 +00:00
2023-02-21 15:39:21 +00:00
/* Finally, apply the changes as directed, flushing the mappings before they're applied (if we should). */
ApplyEntryTemplate ( entry_template , flush_mapping ? ApplyOption_FlushDataCache : ApplyOption_None ) ;
2020-02-19 16:07:44 +00:00
}
/* We've succeeded, now perform what coalescing we can. */
2020-04-20 00:16:19 +00:00
this - > MergePages ( virt_addr , page_list ) ;
2020-02-19 16:07:44 +00:00
if ( num_pages > 1 ) {
2020-04-20 00:16:19 +00:00
this - > MergePages ( virt_addr + ( num_pages - 1 ) * PageSize , page_list ) ;
2020-02-19 16:07:44 +00:00
}
2022-02-14 22:45:32 +00:00
R_SUCCEED ( ) ;
2020-02-19 16:07:44 +00:00
}
2021-09-21 17:09:27 +00:00
void KPageTable : : FinalizeUpdateImpl ( PageLinkedList * page_list ) {
2020-02-14 01:38:56 +00:00
while ( page_list - > Peek ( ) ) {
KVirtualAddress page = KVirtualAddress ( page_list - > Pop ( ) ) ;
MESOSPHERE_ASSERT ( this - > GetPageTableManager ( ) . IsInPageTableHeap ( page ) ) ;
MESOSPHERE_ASSERT ( this - > GetPageTableManager ( ) . GetRefCount ( page ) = = 0 ) ;
this - > GetPageTableManager ( ) . Free ( page ) ;
}
}
2020-02-09 11:45:45 +00:00
}