kern/ldr: add support for --x executables

This commit is contained in:
Michael Scire 2024-10-09 17:42:02 -07:00
parent dfff4508fa
commit 12f7c95c5d
9 changed files with 54 additions and 40 deletions

View file

@ -109,6 +109,9 @@ namespace ams::kern::arch::arm64 {
KPageTableManager &GetPageTableManager() const { return *m_manager; }
private:
constexpr PageTableEntry GetEntryTemplate(const KPageProperties properties) const {
/* Check that the property is not kernel execute. */
MESOSPHERE_ABORT_UNLESS((properties.perm & KMemoryPermission_KernelExecute) == 0);
/* Set basic attributes. */
PageTableEntry entry{PageTableEntry::ExtensionFlag_Valid};
entry.SetPrivilegedExecuteNever(true);
@ -122,22 +125,24 @@ namespace ams::kern::arch::arm64 {
/* Set page attribute. */
if (properties.io) {
MESOSPHERE_ABORT_UNLESS(!properties.uncached);
MESOSPHERE_ABORT_UNLESS((properties.perm & (KMemoryPermission_KernelExecute | KMemoryPermission_UserExecute)) == 0);
MESOSPHERE_ABORT_UNLESS((properties.perm & KMemoryPermission_UserExecute) == 0);
entry.SetPageAttribute(PageTableEntry::PageAttribute_Device_nGnRnE)
.SetUserExecuteNever(true);
} else if (properties.uncached) {
MESOSPHERE_ABORT_UNLESS((properties.perm & (KMemoryPermission_KernelExecute | KMemoryPermission_UserExecute)) == 0);
MESOSPHERE_ABORT_UNLESS((properties.perm & KMemoryPermission_UserExecute) == 0);
entry.SetPageAttribute(PageTableEntry::PageAttribute_NormalMemoryNotCacheable);
entry.SetPageAttribute(PageTableEntry::PageAttribute_NormalMemoryNotCacheable)
.SetUserExecuteNever(true);
} else {
entry.SetPageAttribute(PageTableEntry::PageAttribute_NormalMemory);
}
/* Set user execute never bit. */
if (properties.perm != KMemoryPermission_UserReadExecute) {
MESOSPHERE_ABORT_UNLESS((properties.perm & (KMemoryPermission_KernelExecute | KMemoryPermission_UserExecute)) == 0);
entry.SetUserExecuteNever(true);
if ((properties.perm & KMemoryPermission_UserExecute) != 0) {
/* Check that the permission is either r--/--x or r--/r-x. */
MESOSPHERE_ABORT_UNLESS((properties.perm & ~ams::svc::MemoryPermission_Read) == (KMemoryPermission_KernelRead | KMemoryPermission_UserExecute));
} else {
entry.SetUserExecuteNever(true);
}
}
/* Set AP[1] based on perm. */

View file

@ -21,7 +21,18 @@ namespace ams::kern::arch::arm64 {
void UserspaceAccessFunctionAreaBegin();
class UserspaceAccess {
private:
static bool CopyMemoryFromUserSize32BitWithSupervisorAccessImpl(void *dst, const void *src);
public:
static bool CopyMemoryFromUserSize32BitWithSupervisorAccess(void *dst, const void *src) {
/* Check that the address is within the valid userspace range. */
if (const uintptr_t src_uptr = reinterpret_cast<uintptr_t>(src); src_uptr < ams::svc::AddressNullGuard32Size || (src_uptr + sizeof(u32) - 1) >= ams::svc::AddressMemoryRegion39Size) {
return false;
}
return CopyMemoryFromUserSize32BitWithSupervisorAccessImpl(dst, src);
}
static bool CopyMemoryFromUser(void *dst, const void *src, size_t size);
static bool CopyMemoryFromUserAligned32Bit(void *dst, const void *src, size_t size);
static bool CopyMemoryFromUserAligned64Bit(void *dst, const void *src, size_t size);

View file

@ -100,6 +100,8 @@ namespace ams::kern::arch::arm64 {
u32 insn_value = 0;
if (UserspaceAccess::CopyMemoryFromUser(std::addressof(insn_value), reinterpret_cast<u32 *>(context->pc), sizeof(insn_value))) {
insn = insn_value;
} else if (KTargetSystem::IsDebugMode() && (context->pc & 3) == 0 && UserspaceAccess::CopyMemoryFromUserSize32BitWithSupervisorAccess(std::addressof(insn_value), reinterpret_cast<u32 *>(context->pc))) {
insn = insn_value;
} else {
insn = 0;
}
@ -112,33 +114,6 @@ namespace ams::kern::arch::arm64 {
bool should_process_user_exception = KTargetSystem::IsUserExceptionHandlersEnabled();
const u64 ec = (esr >> 26) & 0x3F;
switch (ec) {
case EsrEc_Unknown:
case EsrEc_IllegalExecution:
case EsrEc_Svc32:
case EsrEc_Svc64:
case EsrEc_PcAlignmentFault:
case EsrEc_SpAlignmentFault:
case EsrEc_SErrorInterrupt:
case EsrEc_BreakPointEl0:
case EsrEc_SoftwareStepEl0:
case EsrEc_WatchPointEl0:
case EsrEc_BkptInstruction:
case EsrEc_BrkInstruction:
break;
default:
{
/* If the fault address's state is KMemoryState_Code and the user can't read the address, force processing exception. */
KMemoryInfo info;
ams::svc::PageInfo pi;
if (R_SUCCEEDED(cur_process.GetPageTable().QueryInfo(std::addressof(info), std::addressof(pi), far))) {
if (info.GetState() == KMemoryState_Code && ((info.GetPermission() & KMemoryPermission_UserRead) != KMemoryPermission_UserRead)) {
should_process_user_exception = true;
}
}
}
break;
}
/* In the event that we return from this exception, we want SPSR.SS set so that we advance an instruction if single-stepping. */
#if defined(MESOSPHERE_ENABLE_HARDWARE_SINGLE_STEP)

View file

@ -154,6 +154,21 @@ _ZN3ams4kern4arch5arm6415UserspaceAccess27CopyMemoryFromUserSize32BitEPvPKv:
mov x0, #1
ret
/* ams::kern::arch::arm64::UserspaceAccess::CopyMemoryFromUserSize32BitWithSupervisorAccessImpl(void *dst, const void *src) */
.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess51CopyMemoryFromUserSize32BitWithSupervisorAccessImplEPvPKv, "ax", %progbits
.global _ZN3ams4kern4arch5arm6415UserspaceAccess51CopyMemoryFromUserSize32BitWithSupervisorAccessImplEPvPKv
.type _ZN3ams4kern4arch5arm6415UserspaceAccess51CopyMemoryFromUserSize32BitWithSupervisorAccessImplEPvPKv, %function
.balign 0x10
_ZN3ams4kern4arch5arm6415UserspaceAccess51CopyMemoryFromUserSize32BitWithSupervisorAccessImplEPvPKv:
/* Just load and store a u32. */
/* NOTE: This is done with supervisor access permissions. */
ldr w2, [x1]
str w2, [x0]
/* We're done. */
mov x0, #1
ret
/* ams::kern::arch::arm64::UserspaceAccess::CopyStringFromUser(void *dst, const void *src, size_t size) */
.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess18CopyStringFromUserEPvPKvm, "ax", %progbits
.global _ZN3ams4kern4arch5arm6415UserspaceAccess18CopyStringFromUserEPvPKvm

View file

@ -1787,6 +1787,11 @@ namespace ams::kern {
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* If we're creating an executable mapping, take and immediately release the scheduler lock. This will force a reschedule. */
if (is_x) {
KScopedSchedulerLock sl;
}
/* Perform mapping operation. */
const KPageProperties properties = { new_perm, false, false, DisableMergeAttribute_None };
const auto operation = was_x ? OperationType_ChangePermissionsAndRefreshAndFlush : OperationType_ChangePermissions;

View file

@ -27,6 +27,7 @@ namespace ams::kern::svc {
case ams::svc::MemoryPermission_Read:
case ams::svc::MemoryPermission_ReadWrite:
case ams::svc::MemoryPermission_ReadExecute:
case ams::svc::MemoryPermission_Execute:
return true;
default:
return false;

View file

@ -226,6 +226,7 @@ namespace ams::ldr {
MetaFlag_OptimizeMemoryAllocation = (1 << 4),
MetaFlag_DisableDeviceAddressSpaceMerge = (1 << 5),
MetaFlag_EnableAliasRegionExtraSize = (1 << 6),
MetaFlag_PreventCodeReads = (1 << 7),
};
enum AddressSpaceType {

View file

@ -27,6 +27,7 @@ namespace ams::os::impl {
case os::MemoryPermission_ReadOnly: return svc::MemoryPermission_Read;
case os::MemoryPermission_ReadWrite: return svc::MemoryPermission_ReadWrite;
case os::MemoryPermission_ReadExecute: return svc::MemoryPermission_ReadExecute;
case os::MemoryPermission_ExecuteOnly: return svc::MemoryPermission_Execute;
AMS_UNREACHABLE_DEFAULT_CASE();
}
}

View file

@ -555,7 +555,7 @@ namespace ams::ldr {
R_SUCCEED();
}
Result LoadAutoLoadModule(os::NativeHandle process_handle, fs::FileHandle file, const NsoHeader *nso_header, uintptr_t nso_address, size_t nso_size) {
Result LoadAutoLoadModule(os::NativeHandle process_handle, fs::FileHandle file, const NsoHeader *nso_header, uintptr_t nso_address, size_t nso_size, bool prevent_code_reads) {
/* Map and read data from file. */
{
/* Map the process memory. */
@ -594,7 +594,7 @@ namespace ams::ldr {
const size_t ro_size = util::AlignUp(nso_header->ro_size, os::MemoryPageSize);
const size_t rw_size = util::AlignUp(nso_header->rw_size + nso_header->bss_size, os::MemoryPageSize);
if (text_size) {
R_TRY(os::SetProcessMemoryPermission(process_handle, nso_address + nso_header->text_dst_offset, text_size, os::MemoryPermission_ReadExecute));
R_TRY(os::SetProcessMemoryPermission(process_handle, nso_address + nso_header->text_dst_offset, text_size, prevent_code_reads ? os::MemoryPermission_ExecuteOnly : os::MemoryPermission_ReadExecute));
}
if (ro_size) {
R_TRY(os::SetProcessMemoryPermission(process_handle, nso_address + nso_header->ro_dst_offset, ro_size, os::MemoryPermission_ReadOnly));
@ -606,7 +606,7 @@ namespace ams::ldr {
R_SUCCEED();
}
Result LoadAutoLoadModules(const ProcessInfo *process_info, const NsoHeader *nso_headers, const bool *has_nso, const ArgumentStore::Entry *argument) {
Result LoadAutoLoadModules(const ProcessInfo *process_info, const NsoHeader *nso_headers, const bool *has_nso, const ArgumentStore::Entry *argument, bool prevent_code_reads) {
/* Load each NSO. */
for (size_t i = 0; i < Nso_Count; i++) {
if (has_nso[i]) {
@ -614,7 +614,7 @@ namespace ams::ldr {
R_TRY(fs::OpenFile(std::addressof(file), GetNsoPath(i), fs::OpenMode_Read));
ON_SCOPE_EXIT { fs::CloseFile(file); };
R_TRY(LoadAutoLoadModule(process_info->process_handle, file, nso_headers + i, process_info->nso_address[i], process_info->nso_size[i]));
R_TRY(LoadAutoLoadModule(process_info->process_handle, file, nso_headers + i, process_info->nso_address[i], process_info->nso_size[i], prevent_code_reads));
}
}
@ -658,7 +658,7 @@ namespace ams::ldr {
ON_RESULT_FAILURE { svc::CloseHandle(process_handle); };
/* Load all auto load modules. */
R_RETURN(LoadAutoLoadModules(out, nso_headers, has_nso, argument));
R_RETURN(LoadAutoLoadModules(out, nso_headers, has_nso, argument, (meta->npdm->flags & ldr::Npdm::MetaFlag_PreventCodeReads) != 0));
}
}