Compare commits

...

7 commits

23 changed files with 158 additions and 68 deletions

View file

@ -109,6 +109,9 @@ namespace ams::kern::arch::arm64 {
KPageTableManager &GetPageTableManager() const { return *m_manager; }
private:
constexpr PageTableEntry GetEntryTemplate(const KPageProperties properties) const {
/* Check that the property is not kernel execute. */
MESOSPHERE_ABORT_UNLESS((properties.perm & KMemoryPermission_KernelExecute) == 0);
/* Set basic attributes. */
PageTableEntry entry{PageTableEntry::ExtensionFlag_Valid};
entry.SetPrivilegedExecuteNever(true);
@ -122,22 +125,24 @@ namespace ams::kern::arch::arm64 {
/* Set page attribute. */
if (properties.io) {
MESOSPHERE_ABORT_UNLESS(!properties.uncached);
MESOSPHERE_ABORT_UNLESS((properties.perm & (KMemoryPermission_KernelExecute | KMemoryPermission_UserExecute)) == 0);
MESOSPHERE_ABORT_UNLESS((properties.perm & KMemoryPermission_UserExecute) == 0);
entry.SetPageAttribute(PageTableEntry::PageAttribute_Device_nGnRnE)
.SetUserExecuteNever(true);
} else if (properties.uncached) {
MESOSPHERE_ABORT_UNLESS((properties.perm & (KMemoryPermission_KernelExecute | KMemoryPermission_UserExecute)) == 0);
MESOSPHERE_ABORT_UNLESS((properties.perm & KMemoryPermission_UserExecute) == 0);
entry.SetPageAttribute(PageTableEntry::PageAttribute_NormalMemoryNotCacheable);
entry.SetPageAttribute(PageTableEntry::PageAttribute_NormalMemoryNotCacheable)
.SetUserExecuteNever(true);
} else {
entry.SetPageAttribute(PageTableEntry::PageAttribute_NormalMemory);
}
/* Set user execute never bit. */
if (properties.perm != KMemoryPermission_UserReadExecute) {
MESOSPHERE_ABORT_UNLESS((properties.perm & (KMemoryPermission_KernelExecute | KMemoryPermission_UserExecute)) == 0);
entry.SetUserExecuteNever(true);
if ((properties.perm & KMemoryPermission_UserExecute) != 0) {
/* Check that the permission is either r--/--x or r--/r-x. */
MESOSPHERE_ABORT_UNLESS((properties.perm & ~ams::svc::MemoryPermission_Read) == (KMemoryPermission_KernelRead | KMemoryPermission_UserExecute));
} else {
entry.SetUserExecuteNever(true);
}
}
/* Set AP[1] based on perm. */

View file

@ -21,7 +21,18 @@ namespace ams::kern::arch::arm64 {
void UserspaceAccessFunctionAreaBegin();
class UserspaceAccess {
private:
static bool CopyMemoryFromUserSize32BitWithSupervisorAccessImpl(void *dst, const void *src);
public:
static bool CopyMemoryFromUserSize32BitWithSupervisorAccess(void *dst, const void *src) {
/* Check that the address is within the valid userspace range. */
if (const uintptr_t src_uptr = reinterpret_cast<uintptr_t>(src); src_uptr < ams::svc::AddressNullGuard32Size || (src_uptr + sizeof(u32) - 1) >= ams::svc::AddressMemoryRegion39Size) {
return false;
}
return CopyMemoryFromUserSize32BitWithSupervisorAccessImpl(dst, src);
}
static bool CopyMemoryFromUser(void *dst, const void *src, size_t size);
static bool CopyMemoryFromUserAligned32Bit(void *dst, const void *src, size_t size);
static bool CopyMemoryFromUserAligned64Bit(void *dst, const void *src, size_t size);

View file

@ -24,4 +24,8 @@ namespace ams::kern {
constexpr inline size_t MainMemorySize = 4_GB;
constexpr inline size_t MainMemorySizeMax = 8_GB;
constexpr inline u32 MinimumMemoryManagerAlignmentShifts[] = {
0, 0, 0, 0
};
}

View file

@ -164,6 +164,7 @@ namespace ams::kern {
size_t m_num_managers;
u64 m_optimized_process_ids[Pool_Count];
bool m_has_optimized_process[Pool_Count];
s32 m_min_heap_indexes[Pool_Count];
private:
Impl &GetManager(KPhysicalAddress address) {
return m_managers[KMemoryLayout::GetPhysicalLinearRegion(address).GetAttributes()];
@ -188,12 +189,12 @@ namespace ams::kern {
Result AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool unoptimized, bool random, s32 min_heap_index);
public:
KMemoryManager()
: m_pool_locks(), m_pool_managers_head(), m_pool_managers_tail(), m_managers(), m_num_managers(), m_optimized_process_ids(), m_has_optimized_process()
: m_pool_locks(), m_pool_managers_head(), m_pool_managers_tail(), m_managers(), m_num_managers(), m_optimized_process_ids(), m_has_optimized_process(), m_min_heap_indexes()
{
/* ... */
}
NOINLINE void Initialize(KVirtualAddress management_region, size_t management_region_size);
NOINLINE void Initialize(KVirtualAddress management_region, size_t management_region_size, const u32 *min_align_shifts);
NOINLINE Result InitializeOptimizedMemory(u64 process_id, Pool pool);
NOINLINE void FinalizeOptimizedMemory(u64 process_id, Pool pool);
@ -299,6 +300,10 @@ namespace ams::kern {
manager->DumpFreeList();
}
}
size_t GetMinimumAlignment(Pool pool) {
return KPageHeap::GetBlockSize(m_min_heap_indexes[pool]);
}
public:
static size_t CalculateManagementOverheadSize(size_t region_size) {
return Impl::CalculateManagementOverheadSize(region_size);

View file

@ -100,6 +100,8 @@ namespace ams::kern::arch::arm64 {
u32 insn_value = 0;
if (UserspaceAccess::CopyMemoryFromUser(std::addressof(insn_value), reinterpret_cast<u32 *>(context->pc), sizeof(insn_value))) {
insn = insn_value;
} else if (KTargetSystem::IsDebugMode() && (context->pc & 3) == 0 && UserspaceAccess::CopyMemoryFromUserSize32BitWithSupervisorAccess(std::addressof(insn_value), reinterpret_cast<u32 *>(context->pc))) {
insn = insn_value;
} else {
insn = 0;
}
@ -112,33 +114,6 @@ namespace ams::kern::arch::arm64 {
bool should_process_user_exception = KTargetSystem::IsUserExceptionHandlersEnabled();
const u64 ec = (esr >> 26) & 0x3F;
switch (ec) {
case EsrEc_Unknown:
case EsrEc_IllegalExecution:
case EsrEc_Svc32:
case EsrEc_Svc64:
case EsrEc_PcAlignmentFault:
case EsrEc_SpAlignmentFault:
case EsrEc_SErrorInterrupt:
case EsrEc_BreakPointEl0:
case EsrEc_SoftwareStepEl0:
case EsrEc_WatchPointEl0:
case EsrEc_BkptInstruction:
case EsrEc_BrkInstruction:
break;
default:
{
/* If the fault address's state is KMemoryState_Code and the user can't read the address, force processing exception. */
KMemoryInfo info;
ams::svc::PageInfo pi;
if (R_SUCCEEDED(cur_process.GetPageTable().QueryInfo(std::addressof(info), std::addressof(pi), far))) {
if (info.GetState() == KMemoryState_Code && ((info.GetPermission() & KMemoryPermission_UserRead) != KMemoryPermission_UserRead)) {
should_process_user_exception = true;
}
}
}
break;
}
/* In the event that we return from this exception, we want SPSR.SS set so that we advance an instruction if single-stepping. */
#if defined(MESOSPHERE_ENABLE_HARDWARE_SINGLE_STEP)

View file

@ -21,6 +21,15 @@ namespace ams::kern::arch::arm64 {
void UserModeThreadStarter();
void SupervisorModeThreadStarter();
void InvokeSupervisorModeThread(uintptr_t argument, uintptr_t entrypoint) {
/* Invoke the function. */
using SupervisorModeFunctionType = void (*)(uintptr_t);
reinterpret_cast<SupervisorModeFunctionType>(entrypoint)(argument);
/* Wait forever. */
AMS_INFINITE_LOOP();
}
void OnThreadStart() {
MESOSPHERE_ASSERT(!KInterruptManager::AreInterruptsEnabled());
/* Send KDebug event for this thread's creation. */

View file

@ -154,6 +154,21 @@ _ZN3ams4kern4arch5arm6415UserspaceAccess27CopyMemoryFromUserSize32BitEPvPKv:
mov x0, #1
ret
/* ams::kern::arch::arm64::UserspaceAccess::CopyMemoryFromUserSize32BitWithSupervisorAccessImpl(void *dst, const void *src) */
.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess51CopyMemoryFromUserSize32BitWithSupervisorAccessImplEPvPKv, "ax", %progbits
.global _ZN3ams4kern4arch5arm6415UserspaceAccess51CopyMemoryFromUserSize32BitWithSupervisorAccessImplEPvPKv
.type _ZN3ams4kern4arch5arm6415UserspaceAccess51CopyMemoryFromUserSize32BitWithSupervisorAccessImplEPvPKv, %function
.balign 0x10
_ZN3ams4kern4arch5arm6415UserspaceAccess51CopyMemoryFromUserSize32BitWithSupervisorAccessImplEPvPKv:
/* Just load and store a u32. */
/* NOTE: This is done with supervisor access permissions. */
ldr w2, [x1]
str w2, [x0]
/* We're done. */
mov x0, #1
ret
/* ams::kern::arch::arm64::UserspaceAccess::CopyStringFromUser(void *dst, const void *src, size_t size) */
.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess18CopyStringFromUserEPvPKvm, "ax", %progbits
.global _ZN3ams4kern4arch5arm6415UserspaceAccess18CopyStringFromUserEPvPKvm

View file

@ -35,7 +35,7 @@ namespace ams::kern {
}
void KMemoryManager::Initialize(KVirtualAddress management_region, size_t management_region_size) {
void KMemoryManager::Initialize(KVirtualAddress management_region, size_t management_region_size, const u32 *min_align_shifts) {
/* Clear the management region to zero. */
const KVirtualAddress management_region_end = management_region + management_region_size;
std::memset(GetVoidPointer(management_region), 0, management_region_size);
@ -154,6 +154,17 @@ namespace ams::kern {
for (size_t i = 0; i < m_num_managers; ++i) {
m_managers[i].SetInitialUsedHeapSize(reserved_sizes[i]);
}
/* Determine the min heap size for all pools. */
for (size_t i = 0; i < Pool_Count; ++i) {
/* Determine the min alignment for the pool in pages. */
const size_t min_align_pages = 1 << min_align_shifts[i];
/* Determine a heap index. */
if (const auto heap_index = KPageHeap::GetAlignedBlockIndex(min_align_pages, min_align_pages); heap_index >= 0) {
m_min_heap_indexes[i] = heap_index;
}
}
}
Result KMemoryManager::InitializeOptimizedMemory(u64 process_id, Pool pool) {
@ -192,8 +203,19 @@ namespace ams::kern {
return Null<KPhysicalAddress>;
}
/* Lock the pool that we're allocating from. */
/* Determine the pool and direction we're allocating from. */
const auto [pool, dir] = DecodeOption(option);
/* Check that we're allocating a correctly aligned number of pages. */
const size_t min_align_pages = KPageHeap::GetBlockNumPages(m_min_heap_indexes[pool]);
if (!util::IsAligned(num_pages, min_align_pages)) {
return Null<KPhysicalAddress>;
}
/* Update our alignment. */
align_pages = std::max(align_pages, min_align_pages);
/* Lock the pool that we're allocating from. */
KScopedLightLock lk(m_pool_locks[pool]);
/* Choose a heap based on our page size request. */
@ -226,6 +248,13 @@ namespace ams::kern {
}
Result KMemoryManager::AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool unoptimized, bool random, s32 min_heap_index) {
/* Check that we're allocating a correctly aligned number of pages. */
const size_t min_align_pages = KPageHeap::GetBlockNumPages(m_min_heap_indexes[pool]);
R_UNLESS(util::IsAligned(num_pages, min_align_pages), svc::ResultInvalidSize());
/* Adjust our min heap index to the pool minimum if needed. */
min_heap_index = std::max(min_heap_index, m_min_heap_indexes[pool]);
/* Choose a heap based on our page size request. */
const s32 heap_index = KPageHeap::GetBlockIndex(num_pages);
R_UNLESS(0 <= heap_index, svc::ResultOutOfMemory());

View file

@ -1787,6 +1787,11 @@ namespace ams::kern {
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* If we're creating an executable mapping, take and immediately release the scheduler lock. This will force a reschedule. */
if (is_x) {
KScopedSchedulerLock sl;
}
/* Perform mapping operation. */
const KPageProperties properties = { new_perm, false, false, DisableMergeAttribute_None };
const auto operation = was_x ? OperationType_ChangePermissionsAndRefreshAndFlush : OperationType_ChangePermissions;
@ -3829,15 +3834,15 @@ namespace ams::kern {
switch (dst_state) {
case KMemoryState_Ipc:
test_state = KMemoryState_FlagCanUseIpc;
test_attr_mask = KMemoryAttribute_Uncached | KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked;
test_attr_mask = KMemoryAttribute_All & (~(KMemoryAttribute_PermissionLocked | KMemoryAttribute_IpcLocked));
break;
case KMemoryState_NonSecureIpc:
test_state = KMemoryState_FlagCanUseNonSecureIpc;
test_attr_mask = KMemoryAttribute_Uncached | KMemoryAttribute_Locked;
test_attr_mask = KMemoryAttribute_All & (~(KMemoryAttribute_PermissionLocked | KMemoryAttribute_DeviceShared | KMemoryAttribute_IpcLocked));
break;
case KMemoryState_NonDeviceIpc:
test_state = KMemoryState_FlagCanUseNonDeviceIpc;
test_attr_mask = KMemoryAttribute_Uncached | KMemoryAttribute_Locked;
test_attr_mask = KMemoryAttribute_All & (~(KMemoryAttribute_PermissionLocked | KMemoryAttribute_DeviceShared | KMemoryAttribute_IpcLocked));
break;
default:
R_THROW(svc::ResultInvalidCombination());

View file

@ -924,7 +924,6 @@ namespace ams::kern {
MESOSPHERE_ABORT_UNLESS(m_main_thread_stack_size == 0);
/* Ensure that we're allocating a valid stack. */
stack_size = util::AlignUp(stack_size, PageSize);
R_UNLESS(stack_size + m_code_size <= m_max_process_memory, svc::ResultOutOfMemory());
R_UNLESS(stack_size + m_code_size >= m_code_size, svc::ResultOutOfMemory());

View file

@ -56,8 +56,9 @@ namespace ams::kern {
{
const auto &management_region = KMemoryLayout::GetPoolManagementRegion();
MESOSPHERE_ABORT_UNLESS(management_region.GetEndAddress() != 0);
static_assert(util::size(MinimumMemoryManagerAlignmentShifts) == KMemoryManager::Pool_Count);
Kernel::GetMemoryManager().Initialize(management_region.GetAddress(), management_region.GetSize());
Kernel::GetMemoryManager().Initialize(management_region.GetAddress(), management_region.GetSize(), MinimumMemoryManagerAlignmentShifts);
}
/* Copy the Initial Process Binary to safe memory. */

View file

@ -48,10 +48,11 @@ namespace ams::kern::svc {
Result MapPhysicalMemory(uintptr_t address, size_t size) {
/* Validate address / size. */
R_UNLESS(util::IsAligned(address, PageSize), svc::ResultInvalidAddress());
R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize());
R_UNLESS(size > 0, svc::ResultInvalidSize());
R_UNLESS((address < address + size), svc::ResultInvalidMemoryRegion());
const size_t min_alignment = Kernel::GetMemoryManager().GetMinimumAlignment(GetCurrentProcess().GetMemoryPool());
R_UNLESS(util::IsAligned(address, min_alignment), svc::ResultInvalidAddress());
R_UNLESS(util::IsAligned(size, min_alignment), svc::ResultInvalidSize());
R_UNLESS(size > 0, svc::ResultInvalidSize());
R_UNLESS((address < address + size), svc::ResultInvalidMemoryRegion());
/* Verify that the process has system resource. */
auto &process = GetCurrentProcess();
@ -69,10 +70,11 @@ namespace ams::kern::svc {
Result UnmapPhysicalMemory(uintptr_t address, size_t size) {
/* Validate address / size. */
R_UNLESS(util::IsAligned(address, PageSize), svc::ResultInvalidAddress());
R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize());
R_UNLESS(size > 0, svc::ResultInvalidSize());
R_UNLESS((address < address + size), svc::ResultInvalidMemoryRegion());
const size_t min_alignment = Kernel::GetMemoryManager().GetMinimumAlignment(GetCurrentProcess().GetMemoryPool());
R_UNLESS(util::IsAligned(address, min_alignment), svc::ResultInvalidAddress());
R_UNLESS(util::IsAligned(size, min_alignment), svc::ResultInvalidSize());
R_UNLESS(size > 0, svc::ResultInvalidSize());
R_UNLESS((address < address + size), svc::ResultInvalidMemoryRegion());
/* Verify that the process has system resource. */
auto &process = GetCurrentProcess();

View file

@ -296,7 +296,9 @@ namespace ams::kern::svc {
Result StartProcess(ams::svc::Handle process_handle, int32_t priority, int32_t core_id, uint64_t main_thread_stack_size) {
/* Validate stack size. */
R_UNLESS(main_thread_stack_size == static_cast<size_t>(main_thread_stack_size), svc::ResultOutOfMemory());
const uint64_t aligned_stack_size = util::AlignUp(main_thread_stack_size, Kernel::GetMemoryManager().GetMinimumAlignment(GetCurrentProcess().GetMemoryPool()));
R_UNLESS(aligned_stack_size >= main_thread_stack_size, svc::ResultOutOfMemory());
R_UNLESS(aligned_stack_size == static_cast<size_t>(aligned_stack_size), svc::ResultOutOfMemory());
/* Get the target process. */
KScopedAutoObject process = GetCurrentProcess().GetHandleTable().GetObject<KProcess>(process_handle);
@ -314,7 +316,7 @@ namespace ams::kern::svc {
process->SetIdealCoreId(core_id);
/* Run the process. */
R_RETURN(process->Run(priority, static_cast<size_t>(main_thread_stack_size)));
R_RETURN(process->Run(priority, static_cast<size_t>(aligned_stack_size)));
}
Result TerminateProcess(ams::svc::Handle process_handle) {

View file

@ -27,6 +27,7 @@ namespace ams::kern::svc {
case ams::svc::MemoryPermission_Read:
case ams::svc::MemoryPermission_ReadWrite:
case ams::svc::MemoryPermission_ReadExecute:
case ams::svc::MemoryPermission_Execute:
return true;
default:
return false;

View file

@ -226,6 +226,7 @@ namespace ams::ldr {
MetaFlag_OptimizeMemoryAllocation = (1 << 4),
MetaFlag_DisableDeviceAddressSpaceMerge = (1 << 5),
MetaFlag_EnableAliasRegionExtraSize = (1 << 6),
MetaFlag_PreventCodeReads = (1 << 7),
};
enum AddressSpaceType {

View file

@ -135,14 +135,15 @@ namespace ams::ro {
class NroHeader {
public:
static constexpr u32 Magic = util::FourCC<'N','R','O','0'>::Code;
static constexpr u32 FlagAlignedHeader = 1;
private:
u32 m_entrypoint_insn;
u32 m_mod_offset;
u8 m_reserved_08[0x8];
u32 m_magic;
u8 m_reserved_14[0x4];
u8 m_version;
u32 m_size;
u8 m_reserved_1C[0x4];
u32 m_flags;
u32 m_text_offset;
u32 m_text_size;
u32 m_ro_offset;
@ -158,10 +159,22 @@ namespace ams::ro {
return m_magic == Magic;
}
u32 GetVersion() const {
return m_version;
}
u32 GetSize() const {
return m_size;
}
u32 GetFlags() const {
return m_flags;
}
bool IsAlignedHeader() const {
return m_flags & FlagAlignedHeader;
}
u32 GetTextOffset() const {
return m_text_offset;
}

View file

@ -27,6 +27,7 @@ namespace ams::os::impl {
case os::MemoryPermission_ReadOnly: return svc::MemoryPermission_Read;
case os::MemoryPermission_ReadWrite: return svc::MemoryPermission_ReadWrite;
case os::MemoryPermission_ReadExecute: return svc::MemoryPermission_ReadExecute;
case os::MemoryPermission_ExecuteOnly: return svc::MemoryPermission_Execute;
AMS_UNREACHABLE_DEFAULT_CASE();
}
}

View file

@ -444,6 +444,7 @@ _ZN3ams4kern4arch5arm6430EL1SynchronousExceptionHandlerEv:
ERET_WITH_SPECULATION_BARRIER
2: /* The exception wasn't an triggered by copying memory from userspace. */
/* NOTE: The following is, as of 19.0.0, now ifdef'd out on NX non-debug kernel. */
ldr x0, [sp, #8]
ldr x1, [sp, #16]

View file

@ -76,6 +76,9 @@ _ZN3ams4kern4arch5arm6427SupervisorModeThreadStarterEv:
/* v */
/* | u64 argument | u64 entrypoint | KThread::StackParameters (size 0x30) | */
/* Clear the link register. */
mov x30, #0
/* Load the argument and entrypoint. */
ldp x0, x1, [sp], #0x10
@ -84,4 +87,6 @@ _ZN3ams4kern4arch5arm6427SupervisorModeThreadStarterEv:
/* Mask I bit in DAIF */
msr daifclr, #2
br x1
/* Invoke the function (by calling ams::kern::arch::arm64::InvokeSupervisorModeThread(argument, entrypoint)). */
b _ZN3ams4kern4arch5arm6426InvokeSupervisorModeThreadEmm

View file

@ -555,7 +555,7 @@ namespace ams::ldr {
R_SUCCEED();
}
Result LoadAutoLoadModule(os::NativeHandle process_handle, fs::FileHandle file, const NsoHeader *nso_header, uintptr_t nso_address, size_t nso_size) {
Result LoadAutoLoadModule(os::NativeHandle process_handle, fs::FileHandle file, const NsoHeader *nso_header, uintptr_t nso_address, size_t nso_size, bool prevent_code_reads) {
/* Map and read data from file. */
{
/* Map the process memory. */
@ -594,7 +594,7 @@ namespace ams::ldr {
const size_t ro_size = util::AlignUp(nso_header->ro_size, os::MemoryPageSize);
const size_t rw_size = util::AlignUp(nso_header->rw_size + nso_header->bss_size, os::MemoryPageSize);
if (text_size) {
R_TRY(os::SetProcessMemoryPermission(process_handle, nso_address + nso_header->text_dst_offset, text_size, os::MemoryPermission_ReadExecute));
R_TRY(os::SetProcessMemoryPermission(process_handle, nso_address + nso_header->text_dst_offset, text_size, prevent_code_reads ? os::MemoryPermission_ExecuteOnly : os::MemoryPermission_ReadExecute));
}
if (ro_size) {
R_TRY(os::SetProcessMemoryPermission(process_handle, nso_address + nso_header->ro_dst_offset, ro_size, os::MemoryPermission_ReadOnly));
@ -606,7 +606,7 @@ namespace ams::ldr {
R_SUCCEED();
}
Result LoadAutoLoadModules(const ProcessInfo *process_info, const NsoHeader *nso_headers, const bool *has_nso, const ArgumentStore::Entry *argument) {
Result LoadAutoLoadModules(const ProcessInfo *process_info, const NsoHeader *nso_headers, const bool *has_nso, const ArgumentStore::Entry *argument, bool prevent_code_reads) {
/* Load each NSO. */
for (size_t i = 0; i < Nso_Count; i++) {
if (has_nso[i]) {
@ -614,7 +614,7 @@ namespace ams::ldr {
R_TRY(fs::OpenFile(std::addressof(file), GetNsoPath(i), fs::OpenMode_Read));
ON_SCOPE_EXIT { fs::CloseFile(file); };
R_TRY(LoadAutoLoadModule(process_info->process_handle, file, nso_headers + i, process_info->nso_address[i], process_info->nso_size[i]));
R_TRY(LoadAutoLoadModule(process_info->process_handle, file, nso_headers + i, process_info->nso_address[i], process_info->nso_size[i], prevent_code_reads));
}
}
@ -658,7 +658,7 @@ namespace ams::ldr {
ON_RESULT_FAILURE { svc::CloseHandle(process_handle); };
/* Load all auto load modules. */
R_RETURN(LoadAutoLoadModules(out, nso_headers, has_nso, argument));
R_RETURN(LoadAutoLoadModules(out, nso_headers, has_nso, argument, (meta->npdm->flags & ldr::Npdm::MetaFlag_PreventCodeReads) != 0));
}
}

View file

@ -51,11 +51,15 @@ namespace ams::ro::impl {
R_SUCCEED();
}
Result SetNroPerms(os::NativeHandle process_handle, u64 base_address, u64 rx_size, u64 ro_size, u64 rw_size) {
const u64 rx_offset = 0;
Result SetNroPerms(os::NativeHandle process_handle, u64 base_address, u64 rx_size, u64 ro_size, u64 rw_size, bool is_aligned_header) {
const u64 rx_offset = is_aligned_header ? os::MemoryPageSize : 0;
const u64 ro_offset = rx_offset + rx_size;
const u64 rw_offset = ro_offset + ro_size;
if (is_aligned_header) {
R_TRY(os::SetProcessMemoryPermission(process_handle, base_address, os::MemoryPageSize, os::MemoryPermission_ReadOnly));
}
R_TRY(os::SetProcessMemoryPermission(process_handle, base_address + rx_offset, rx_size, os::MemoryPermission_ReadExecute));
R_TRY(os::SetProcessMemoryPermission(process_handle, base_address + ro_offset, ro_size, os::MemoryPermission_ReadOnly));
R_TRY(os::SetProcessMemoryPermission(process_handle, base_address + rw_offset, rw_size, os::MemoryPermission_ReadWrite));

View file

@ -21,7 +21,7 @@ namespace ams::ro::impl {
/* Utilities for working with NROs. */
Result MapNro(u64 *out_base_address, os::NativeHandle process_handle, u64 nro_heap_address, u64 nro_heap_size, u64 bss_heap_address, u64 bss_heap_size);
Result SetNroPerms(os::NativeHandle process_handle, u64 base_address, u64 rx_size, u64 ro_size, u64 rw_size);
Result SetNroPerms(os::NativeHandle process_handle, u64 base_address, u64 rx_size, u64 ro_size, u64 rw_size, bool is_aligned_header);
Result UnmapNro(os::NativeHandle process_handle, u64 base_address, u64 nro_heap_address, u64 nro_heap_size, u64 bss_heap_address, u64 bss_heap_size);
}

View file

@ -247,7 +247,7 @@ namespace ams::ro::impl {
R_THROW(ro::ResultNotAuthorized());
}
Result ValidateNro(ModuleId *out_module_id, u64 *out_rx_size, u64 *out_ro_size, u64 *out_rw_size, u64 base_address, u64 expected_nro_size, u64 expected_bss_size) {
Result ValidateNro(ModuleId *out_module_id, u64 *out_rx_size, u64 *out_ro_size, u64 *out_rw_size, bool *out_aligned_header, u64 base_address, u64 expected_nro_size, u64 expected_bss_size) {
/* Map the NRO. */
void *mapped_memory = nullptr;
R_TRY_CATCH(os::MapProcessMemory(std::addressof(mapped_memory), m_process_handle, base_address, expected_nro_size, ro::impl::GenerateSecureRandom)) {
@ -306,6 +306,7 @@ namespace ams::ro::impl {
*out_rx_size = text_size;
*out_ro_size = ro_size;
*out_rw_size = rw_size;
*out_aligned_header = header->IsAlignedHeader();
R_SUCCEED();
}
@ -557,10 +558,11 @@ namespace ams::ro::impl {
/* Validate the NRO (parsing region extents). */
u64 rx_size = 0, ro_size = 0, rw_size = 0;
R_TRY(context->ValidateNro(std::addressof(nro_info->module_id), std::addressof(rx_size), std::addressof(ro_size), std::addressof(rw_size), nro_info->base_address, nro_size, bss_size));
bool aligned_header = false;
R_TRY(context->ValidateNro(std::addressof(nro_info->module_id), std::addressof(rx_size), std::addressof(ro_size), std::addressof(rw_size), std::addressof(aligned_header), nro_info->base_address, nro_size, bss_size));
/* Set NRO perms. */
R_TRY(SetNroPerms(context->GetProcessHandle(), nro_info->base_address, rx_size, ro_size, rw_size + bss_size));
R_TRY(SetNroPerms(context->GetProcessHandle(), nro_info->base_address, rx_size, ro_size, rw_size + bss_size, aligned_header));
context->SetNroInfoInUse(nro_info, true);
nro_info->code_size = rx_size + ro_size;