This commit is contained in:
SciresM 2024-10-10 19:15:47 -07:00 committed by GitHub
commit 0944d4458a
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
105 changed files with 2043 additions and 1742 deletions

View file

@ -1,6 +1,6 @@
# Key: debugmode, default: 1.
# Desc: Controls whether kernel is debug mode.
# Disabling this may break Atmosphere's debugger in a future release.
# Disabling this will break Atmosphere.
# Key: debugmode_user, default: 0.
# Desc: Controls whether userland is debug mode.

2
emummc/README.md vendored
View file

@ -2,7 +2,7 @@
*A SDMMC driver replacement for Nintendo's Filesystem Services, by **m4xw***
### Supported Horizon Versions
**1.0.0 - 18.1.0**
**1.0.0 - 19.0.0**
## Features
* Arbitrary SDMMC backend selection

View file

@ -73,6 +73,8 @@
#include "offsets/1800_exfat.h"
#include "offsets/1810.h"
#include "offsets/1810_exfat.h"
#include "offsets/1900.h"
#include "offsets/1900_exfat.h"
#include "../utils/fatal.h"
#define GET_OFFSET_STRUCT_NAME(vers) g_offsets##vers
@ -157,6 +159,8 @@ DEFINE_OFFSET_STRUCT(_1800);
DEFINE_OFFSET_STRUCT(_1800_EXFAT);
DEFINE_OFFSET_STRUCT(_1810);
DEFINE_OFFSET_STRUCT(_1810_EXFAT);
DEFINE_OFFSET_STRUCT(_1900);
DEFINE_OFFSET_STRUCT(_1900_EXFAT);
const fs_offsets_t *get_fs_offsets(enum FS_VER version) {
switch (version) {
@ -274,6 +278,10 @@ const fs_offsets_t *get_fs_offsets(enum FS_VER version) {
return &(GET_OFFSET_STRUCT_NAME(_1810));
case FS_VER_18_1_0_EXFAT:
return &(GET_OFFSET_STRUCT_NAME(_1810_EXFAT));
case FS_VER_19_0_0:
return &(GET_OFFSET_STRUCT_NAME(_1900));
case FS_VER_19_0_0_EXFAT:
return &(GET_OFFSET_STRUCT_NAME(_1900_EXFAT));
default:
fatal_abort(Fatal_UnknownVersion);
}

View file

@ -107,6 +107,9 @@ enum FS_VER
FS_VER_18_1_0,
FS_VER_18_1_0_EXFAT,
FS_VER_19_0_0,
FS_VER_19_0_0_EXFAT,
FS_VER_MAX,
};

59
emummc/source/FS/offsets/1900.h vendored Normal file
View file

@ -0,0 +1,59 @@
/*
* Copyright (c) 2019 m4xw <m4x@m4xw.net>
* Copyright (c) 2019 Atmosphere-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __FS_1900_H__
#define __FS_1900_H__
// Accessor vtable getters
#define FS_OFFSET_1900_SDMMC_ACCESSOR_GC 0x195C00
#define FS_OFFSET_1900_SDMMC_ACCESSOR_SD 0x197F80
#define FS_OFFSET_1900_SDMMC_ACCESSOR_NAND 0x1963B0
// Hooks
#define FS_OFFSET_1900_SDMMC_WRAPPER_READ 0x191A70
#define FS_OFFSET_1900_SDMMC_WRAPPER_WRITE 0x191AD0
#define FS_OFFSET_1900_RTLD 0x275F0
#define FS_OFFSET_1900_RTLD_DESTINATION ((uintptr_t)(INT64_C(-0x50)))
#define FS_OFFSET_1900_CLKRST_SET_MIN_V_CLK_RATE 0x1B3880
// Misc funcs
#define FS_OFFSET_1900_LOCK_MUTEX 0x18AC20
#define FS_OFFSET_1900_UNLOCK_MUTEX 0x18AC70
#define FS_OFFSET_1900_SDMMC_WRAPPER_CONTROLLER_OPEN 0x191A30
#define FS_OFFSET_1900_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x191A50
// Misc Data
#define FS_OFFSET_1900_SD_MUTEX 0xFE1408
#define FS_OFFSET_1900_NAND_MUTEX 0xFDCB60
#define FS_OFFSET_1900_ACTIVE_PARTITION 0xFDCBA0
#define FS_OFFSET_1900_SDMMC_DAS_HANDLE 0xFC1908
// NOPs
#define FS_OFFSET_1900_SD_DAS_INIT 0x260C4
// Nintendo Paths
#define FS_OFFSET_1900_NINTENDO_PATHS \
{ \
{.opcode_reg = 3, .adrp_offset = 0x00067FC8, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 3, .adrp_offset = 0x00075D6C, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 4, .adrp_offset = 0x0007D1E8, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 4, .adrp_offset = 0x00092818, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 0, .adrp_offset = 0, .add_rel_offset = 0}, \
}
#endif // __FS_1900_H__

59
emummc/source/FS/offsets/1900_exfat.h vendored Normal file
View file

@ -0,0 +1,59 @@
/*
* Copyright (c) 2019 m4xw <m4x@m4xw.net>
* Copyright (c) 2019 Atmosphere-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __FS_1900_EXFAT_H__
#define __FS_1900_EXFAT_H__
// Accessor vtable getters
#define FS_OFFSET_1900_EXFAT_SDMMC_ACCESSOR_GC 0x1A1430
#define FS_OFFSET_1900_EXFAT_SDMMC_ACCESSOR_SD 0x1A37B0
#define FS_OFFSET_1900_EXFAT_SDMMC_ACCESSOR_NAND 0x1A1BE0
// Hooks
#define FS_OFFSET_1900_EXFAT_SDMMC_WRAPPER_READ 0x19D2A0
#define FS_OFFSET_1900_EXFAT_SDMMC_WRAPPER_WRITE 0x19D300
#define FS_OFFSET_1900_EXFAT_RTLD 0x275F0
#define FS_OFFSET_1900_EXFAT_RTLD_DESTINATION ((uintptr_t)(INT64_C(-0x50)))
#define FS_OFFSET_1900_EXFAT_CLKRST_SET_MIN_V_CLK_RATE 0x1BF0B0
// Misc funcs
#define FS_OFFSET_1900_EXFAT_LOCK_MUTEX 0x196450
#define FS_OFFSET_1900_EXFAT_UNLOCK_MUTEX 0x1964A0
#define FS_OFFSET_1900_EXFAT_SDMMC_WRAPPER_CONTROLLER_OPEN 0x19D260
#define FS_OFFSET_1900_EXFAT_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x19D280
// Misc Data
#define FS_OFFSET_1900_EXFAT_SD_MUTEX 0xFF4408
#define FS_OFFSET_1900_EXFAT_NAND_MUTEX 0xFEFB60
#define FS_OFFSET_1900_EXFAT_ACTIVE_PARTITION 0xFEFBA0
#define FS_OFFSET_1900_EXFAT_SDMMC_DAS_HANDLE 0xFCF908
// NOPs
#define FS_OFFSET_1900_EXFAT_SD_DAS_INIT 0x260C4
// Nintendo Paths
#define FS_OFFSET_1900_EXFAT_NINTENDO_PATHS \
{ \
{.opcode_reg = 3, .adrp_offset = 0x00067FC8, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 3, .adrp_offset = 0x00075D6C, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 4, .adrp_offset = 0x0007D1E8, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 4, .adrp_offset = 0x00092818, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 0, .adrp_offset = 0, .add_rel_offset = 0}, \
}
#endif // __FS_1900_EXFAT_H__

View file

@ -85,10 +85,10 @@ _ZN3ams6secmon4boot15VolatileKeyDataE:
/* We can get away with only including latest because exosphere supports newer-than-expected master key in engine. */
/* TODO: Update on next change of keys. */
/* Mariko Development Master Kek Source. */
.byte 0xE4, 0x45, 0xD0, 0x14, 0xA0, 0xE5, 0xE9, 0x4B, 0xFE, 0x76, 0xF4, 0x29, 0x41, 0xBB, 0x64, 0xED
.byte 0x65, 0x7B, 0x11, 0x46, 0x0E, 0xC2, 0x22, 0x5D, 0xB9, 0xF1, 0xF5, 0x00, 0xF9, 0x3E, 0x1F, 0x70
/* Mariko Production Master Kek Source. */
.byte 0x4F, 0x41, 0x3C, 0x3B, 0xFB, 0x6A, 0x01, 0x2A, 0x68, 0x9F, 0x83, 0xE9, 0x53, 0xBD, 0x16, 0xD2
.byte 0x31, 0xBE, 0x25, 0xFB, 0xDB, 0xB4, 0xEE, 0x49, 0x5C, 0x77, 0x05, 0xC2, 0x36, 0x9F, 0x34, 0x80
/* Development Master Key Vectors. */
.byte 0x46, 0x22, 0xB4, 0x51, 0x9A, 0x7E, 0xA7, 0x7F, 0x62, 0xA1, 0x1F, 0x8F, 0xC5, 0x3A, 0xDB, 0xFE /* Zeroes encrypted with Master Key 00. */
@ -109,6 +109,7 @@ _ZN3ams6secmon4boot15VolatileKeyDataE:
.byte 0x78, 0x66, 0x19, 0xBD, 0x86, 0xE7, 0xC1, 0x09, 0x9B, 0x6F, 0x92, 0xB2, 0x58, 0x7D, 0xCF, 0x26 /* Master key 0E encrypted with Master key 0F. */
.byte 0x39, 0x1E, 0x7E, 0xF8, 0x7E, 0x73, 0xEA, 0x6F, 0xAF, 0x00, 0x3A, 0xB4, 0xAA, 0xB8, 0xB7, 0x59 /* Master key 0F encrypted with Master key 10. */
.byte 0x0C, 0x75, 0x39, 0x15, 0x53, 0xEA, 0x81, 0x11, 0xA3, 0xE0, 0xDC, 0x3D, 0x0E, 0x76, 0xC6, 0xB8 /* Master key 10 encrypted with Master key 11. */
.byte 0x90, 0x64, 0xF9, 0x08, 0x29, 0x88, 0xD4, 0xDC, 0x73, 0xA4, 0xA1, 0x13, 0x9E, 0x59, 0x85, 0xA0 /* Master key 11 encrypted with Master key 12. */
/* Production Master Key Vectors. */
.byte 0x0C, 0xF0, 0x59, 0xAC, 0x85, 0xF6, 0x26, 0x65, 0xE1, 0xE9, 0x19, 0x55, 0xE6, 0xF2, 0x67, 0x3D /* Zeroes encrypted with Master Key 00. */
@ -129,6 +130,7 @@ _ZN3ams6secmon4boot15VolatileKeyDataE:
.byte 0xAF, 0x11, 0x4C, 0x67, 0x17, 0x7A, 0x52, 0x43, 0xF7, 0x70, 0x2F, 0xC7, 0xEF, 0x81, 0x72, 0x16 /* Master key 0E encrypted with Master key 0F. */
.byte 0x25, 0x12, 0x8B, 0xCB, 0xB5, 0x46, 0xA1, 0xF8, 0xE0, 0x52, 0x15, 0xB7, 0x0B, 0x57, 0x00, 0xBD /* Master key 0F encrypted with Master key 10. */
.byte 0x58, 0x15, 0xD2, 0xF6, 0x8A, 0xE8, 0x19, 0xAB, 0xFB, 0x2D, 0x52, 0x9D, 0xE7, 0x55, 0xF3, 0x93 /* Master key 10 encrypted with Master key 11. */
.byte 0x4A, 0x01, 0x3B, 0xC7, 0x44, 0x6E, 0x45, 0xBD, 0xE6, 0x5E, 0x2B, 0xEC, 0x07, 0x37, 0x52, 0x86 /* Master key 11 encrypted with Master key 12. */
/* Device Master Key Source Sources. */
.byte 0x8B, 0x4E, 0x1C, 0x22, 0x42, 0x07, 0xC8, 0x73, 0x56, 0x94, 0x08, 0x8B, 0xCC, 0x47, 0x0F, 0x5D /* 4.0.0 Device Master Key Source Source. */
@ -146,6 +148,7 @@ _ZN3ams6secmon4boot15VolatileKeyDataE:
.byte 0xEA, 0x90, 0x6E, 0xA8, 0xAE, 0x92, 0x99, 0x64, 0x36, 0xC1, 0xF3, 0x1C, 0xC6, 0x32, 0x83, 0x8C /* 16.0.0 Device Master Key Source Source. */
.byte 0xDA, 0xB9, 0xD6, 0x77, 0x52, 0x2D, 0x1F, 0x78, 0x73, 0xC9, 0x98, 0x5B, 0x06, 0xFE, 0xA0, 0x52 /* 17.0.0 Device Master Key Source Source. */
.byte 0x14, 0xF5, 0xA5, 0xD0, 0x73, 0x6D, 0x44, 0x80, 0x5F, 0x31, 0x5A, 0x8F, 0x1E, 0xD4, 0x0D, 0x63 /* 18.0.0 Device Master Key Source Source. */
.byte 0x07, 0x38, 0x9A, 0xEC, 0x9C, 0xBD, 0x50, 0x4A, 0x4C, 0x1F, 0x04, 0xDA, 0x40, 0x68, 0x29, 0xE3 /* 19.0.0 Device Master Key Source Source. */
/* Development Device Master Kek Sources. */
.byte 0xD6, 0xBD, 0x9F, 0xC6, 0x18, 0x09, 0xE1, 0x96, 0x20, 0x39, 0x60, 0xD2, 0x89, 0x83, 0x31, 0x34 /* 4.0.0 Device Master Kek Source. */
@ -163,6 +166,7 @@ _ZN3ams6secmon4boot15VolatileKeyDataE:
.byte 0xFF, 0xF6, 0x4B, 0x0F, 0xFF, 0x0D, 0xC0, 0x4F, 0x56, 0x8A, 0x40, 0x74, 0x67, 0xC5, 0xFE, 0x9F /* 16.0.0 Device Master Kek Source. */
.byte 0x4E, 0xCE, 0x7B, 0x2A, 0xEA, 0x2E, 0x3D, 0x16, 0xD5, 0x2A, 0xDE, 0xF6, 0xF8, 0x6A, 0x7D, 0x43 /* 17.0.0 Device Master Kek Source. */
.byte 0x3B, 0x00, 0x89, 0xD7, 0xA9, 0x9E, 0xB7, 0x70, 0x86, 0x00, 0xC3, 0x49, 0x52, 0x8C, 0xA4, 0xAF /* 18.0.0 Device Master Kek Source. */
.byte 0xAE, 0x78, 0x36, 0xB6, 0x91, 0xEB, 0xAF, 0x9C, 0x18, 0xF1, 0xC0, 0xD5, 0x8A, 0x0C, 0x7C, 0xA1 /* 19.0.0 Device Master Kek Source. */
/* Production Device Master Kek Sources. */
.byte 0x88, 0x62, 0x34, 0x6E, 0xFA, 0xF7, 0xD8, 0x3F, 0xE1, 0x30, 0x39, 0x50, 0xF0, 0xB7, 0x5D, 0x5D /* 4.0.0 Device Master Kek Source. */
@ -180,3 +184,4 @@ _ZN3ams6secmon4boot15VolatileKeyDataE:
.byte 0xF0, 0xF3, 0xFF, 0x52, 0x75, 0x2F, 0xBA, 0x4D, 0x09, 0x72, 0x30, 0x89, 0xA9, 0xDF, 0xFE, 0x1F /* 16.0.0 Device Master Kek Source. */
.byte 0x21, 0xD6, 0x35, 0xF1, 0x0F, 0x7A, 0xF0, 0x5D, 0xDF, 0x79, 0x1C, 0x7A, 0xE4, 0x32, 0x82, 0x9E /* 17.0.0 Device Master Kek Source. */
.byte 0xE7, 0x85, 0x8C, 0xA2, 0xF4, 0x49, 0xCB, 0x07, 0xD1, 0x8E, 0x48, 0x1B, 0xE8, 0x1E, 0x28, 0x3B /* 18.0.0 Device Master Kek Source. */
.byte 0x9B, 0xA5, 0xFD, 0x74, 0x7F, 0xCD, 0x23, 0xD1, 0xD9, 0xBD, 0x6C, 0x51, 0x72, 0x5F, 0x3D, 0x1F /* 19.0.0 Device Master Kek Source. */

View file

@ -94,7 +94,7 @@ namespace ams::secmon::boot {
}
/* Check that the key generation is one that we can use. */
static_assert(pkg1::KeyGeneration_Count == 18);
static_assert(pkg1::KeyGeneration_Count == 19);
if (key_generation >= pkg1::KeyGeneration_Count) {
return false;
}

View file

@ -23,17 +23,17 @@ namespace ams::nxboot {
alignas(se::AesBlockSize) constexpr inline const u8 MarikoMasterKekSource[se::AesBlockSize] = {
/* TODO: Update on next change of keys. */
0x4F, 0x41, 0x3C, 0x3B, 0xFB, 0x6A, 0x01, 0x2A, 0x68, 0x9F, 0x83, 0xE9, 0x53, 0xBD, 0x16, 0xD2
0x31, 0xBE, 0x25, 0xFB, 0xDB, 0xB4, 0xEE, 0x49, 0x5C, 0x77, 0x05, 0xC2, 0x36, 0x9F, 0x34, 0x80
};
alignas(se::AesBlockSize) constexpr inline const u8 MarikoMasterKekSourceDev[se::AesBlockSize] = {
/* TODO: Update on next change of keys. */
0xE4, 0x45, 0xD0, 0x14, 0xA0, 0xE5, 0xE9, 0x4B, 0xFE, 0x76, 0xF4, 0x29, 0x41, 0xBB, 0x64, 0xED
0x65, 0x7B, 0x11, 0x46, 0x0E, 0xC2, 0x22, 0x5D, 0xB9, 0xF1, 0xF5, 0x00, 0xF9, 0x3E, 0x1F, 0x70
};
alignas(se::AesBlockSize) constexpr inline const u8 EristaMasterKekSource[se::AesBlockSize] = {
/* TODO: Update on next change of keys. */
0x00, 0x04, 0x5D, 0xF0, 0x4D, 0xCD, 0x14, 0xA3, 0x1C, 0xBF, 0xDE, 0x48, 0x55, 0xBA, 0x35, 0xC1
0xD7, 0x63, 0x74, 0x46, 0x4E, 0xBA, 0x78, 0x0A, 0x7C, 0x9D, 0xB3, 0xE8, 0x7A, 0x3D, 0x71, 0xE3
};
alignas(se::AesBlockSize) constexpr inline const u8 KeyblobKeySource[se::AesBlockSize] = {
@ -72,6 +72,7 @@ namespace ams::nxboot {
{ 0xEA, 0x90, 0x6E, 0xA8, 0xAE, 0x92, 0x99, 0x64, 0x36, 0xC1, 0xF3, 0x1C, 0xC6, 0x32, 0x83, 0x8C }, /* 16.0.0 Device Master Key Source Source. */
{ 0xDA, 0xB9, 0xD6, 0x77, 0x52, 0x2D, 0x1F, 0x78, 0x73, 0xC9, 0x98, 0x5B, 0x06, 0xFE, 0xA0, 0x52 }, /* 17.0.0 Device Master Key Source Source. */
{ 0x14, 0xF5, 0xA5, 0xD0, 0x73, 0x6D, 0x44, 0x80, 0x5F, 0x31, 0x5A, 0x8F, 0x1E, 0xD4, 0x0D, 0x63 }, /* 18.0.0 Device Master Key Source Source. */
{ 0x07, 0x38, 0x9A, 0xEC, 0x9C, 0xBD, 0x50, 0x4A, 0x4C, 0x1F, 0x04, 0xDA, 0x40, 0x68, 0x29, 0xE3 }, /* 19.0.0 Device Master Key Source Source. */
};
alignas(se::AesBlockSize) constexpr inline const u8 DeviceMasterKekSources[pkg1::OldDeviceMasterKeyCount][se::AesBlockSize] = {
@ -90,6 +91,7 @@ namespace ams::nxboot {
{ 0xF0, 0xF3, 0xFF, 0x52, 0x75, 0x2F, 0xBA, 0x4D, 0x09, 0x72, 0x30, 0x89, 0xA9, 0xDF, 0xFE, 0x1F }, /* 16.0.0 Device Master Kek Source. */
{ 0x21, 0xD6, 0x35, 0xF1, 0x0F, 0x7A, 0xF0, 0x5D, 0xDF, 0x79, 0x1C, 0x7A, 0xE4, 0x32, 0x82, 0x9E }, /* 17.0.0 Device Master Kek Source. */
{ 0xE7, 0x85, 0x8C, 0xA2, 0xF4, 0x49, 0xCB, 0x07, 0xD1, 0x8E, 0x48, 0x1B, 0xE8, 0x1E, 0x28, 0x3B }, /* 18.0.0 Device Master Kek Source. */
{ 0x9B, 0xA5, 0xFD, 0x74, 0x7F, 0xCD, 0x23, 0xD1, 0xD9, 0xBD, 0x6C, 0x51, 0x72, 0x5F, 0x3D, 0x1F }, /* 19.0.0 Device Master Kek Source. */
};
alignas(se::AesBlockSize) constexpr inline const u8 DeviceMasterKekSourcesDev[pkg1::OldDeviceMasterKeyCount][se::AesBlockSize] = {
@ -108,6 +110,7 @@ namespace ams::nxboot {
{ 0xFF, 0xF6, 0x4B, 0x0F, 0xFF, 0x0D, 0xC0, 0x4F, 0x56, 0x8A, 0x40, 0x74, 0x67, 0xC5, 0xFE, 0x9F }, /* 16.0.0 Device Master Kek Source. */
{ 0x4E, 0xCE, 0x7B, 0x2A, 0xEA, 0x2E, 0x3D, 0x16, 0xD5, 0x2A, 0xDE, 0xF6, 0xF8, 0x6A, 0x7D, 0x43 }, /* 17.0.0 Device Master Kek Source. */
{ 0x3B, 0x00, 0x89, 0xD7, 0xA9, 0x9E, 0xB7, 0x70, 0x86, 0x00, 0xC3, 0x49, 0x52, 0x8C, 0xA4, 0xAF }, /* 18.0.0 Device Master Kek Source. */
{ 0xAE, 0x78, 0x36, 0xB6, 0x91, 0xEB, 0xAF, 0x9C, 0x18, 0xF1, 0xC0, 0xD5, 0x8A, 0x0C, 0x7C, 0xA1 }, /* 19.0.0 Device Master Kek Source. */
};
alignas(se::AesBlockSize) constexpr inline const u8 MasterKeySources[pkg1::KeyGeneration_Count][se::AesBlockSize] = {
@ -129,6 +132,7 @@ namespace ams::nxboot {
{ 0xAF, 0x11, 0x4C, 0x67, 0x17, 0x7A, 0x52, 0x43, 0xF7, 0x70, 0x2F, 0xC7, 0xEF, 0x81, 0x72, 0x16 }, /* Master key 0E encrypted with Master key 0F. */
{ 0x25, 0x12, 0x8B, 0xCB, 0xB5, 0x46, 0xA1, 0xF8, 0xE0, 0x52, 0x15, 0xB7, 0x0B, 0x57, 0x00, 0xBD }, /* Master key 0F encrypted with Master key 10. */
{ 0x58, 0x15, 0xD2, 0xF6, 0x8A, 0xE8, 0x19, 0xAB, 0xFB, 0x2D, 0x52, 0x9D, 0xE7, 0x55, 0xF3, 0x93 }, /* Master key 10 encrypted with Master key 11. */
{ 0x4A, 0x01, 0x3B, 0xC7, 0x44, 0x6E, 0x45, 0xBD, 0xE6, 0x5E, 0x2B, 0xEC, 0x07, 0x37, 0x52, 0x86 }, /* Master key 11 encrypted with Master key 12. */
};
alignas(se::AesBlockSize) constexpr inline const u8 MasterKeySourcesDev[pkg1::KeyGeneration_Count][se::AesBlockSize] = {
@ -150,6 +154,7 @@ namespace ams::nxboot {
{ 0x78, 0x66, 0x19, 0xBD, 0x86, 0xE7, 0xC1, 0x09, 0x9B, 0x6F, 0x92, 0xB2, 0x58, 0x7D, 0xCF, 0x26 }, /* Master key 0E encrypted with Master key 0F. */
{ 0x39, 0x1E, 0x7E, 0xF8, 0x7E, 0x73, 0xEA, 0x6F, 0xAF, 0x00, 0x3A, 0xB4, 0xAA, 0xB8, 0xB7, 0x59 }, /* Master key 0F encrypted with Master key 10. */
{ 0x0C, 0x75, 0x39, 0x15, 0x53, 0xEA, 0x81, 0x11, 0xA3, 0xE0, 0xDC, 0x3D, 0x0E, 0x76, 0xC6, 0xB8 }, /* Master key 10 encrypted with Master key 11. */
{ 0x90, 0x64, 0xF9, 0x08, 0x29, 0x88, 0xD4, 0xDC, 0x73, 0xA4, 0xA1, 0x13, 0x9E, 0x59, 0x85, 0xA0 }, /* Master key 11 encrypted with Master key 12. */
};
alignas(se::AesBlockSize) constinit u8 MasterKeys[pkg1::OldMasterKeyCount][se::AesBlockSize] = {};

View file

@ -80,7 +80,7 @@ namespace ams::nxboot {
}
/* Check that the key generation is one that we can use. */
static_assert(pkg1::KeyGeneration_Count == 18);
static_assert(pkg1::KeyGeneration_Count == 19);
if (key_generation >= pkg1::KeyGeneration_Count) {
return false;
}

View file

@ -261,6 +261,8 @@ namespace ams::nxboot {
return ams::TargetFirmware_17_0_0;
} else if (std::memcmp(package1 + 0x10, "20240207", 8) == 0) {
return ams::TargetFirmware_18_0_0;
} else if (std::memcmp(package1 + 0x10, "20240808", 8) == 0) {
return ams::TargetFirmware_19_0_0;
}
break;
default:

View file

@ -177,6 +177,9 @@ namespace ams::nxboot {
FsVersion_18_1_0,
FsVersion_18_1_0_Exfat,
FsVersion_19_0_0,
FsVersion_19_0_0_Exfat,
FsVersion_Count,
};
@ -266,6 +269,9 @@ namespace ams::nxboot {
{ 0xA3, 0x39, 0xF0, 0x1C, 0x95, 0xBF, 0xA7, 0x68 }, /* FsVersion_18_1_0 */
{ 0x20, 0x4C, 0xBA, 0x86, 0xDE, 0x08, 0x44, 0x6A }, /* FsVersion_18_1_0_Exfat */
{ 0xD9, 0x4C, 0x68, 0x15, 0xF8, 0xF5, 0x0A, 0x20 }, /* FsVersion_19_0_0 */
{ 0xED, 0xA8, 0x78, 0x68, 0xA4, 0x49, 0x07, 0x50 }, /* FsVersion_19_0_0_Exfat */
};
const InitialProcessBinaryHeader *FindInitialProcessBinary(const pkg2::Package2Header *header, const u8 *data, ams::TargetFirmware target_firmware) {
@ -645,6 +651,16 @@ namespace ams::nxboot {
AddPatch(fs_meta, 0x195FD9, NogcPatch0, sizeof(NogcPatch0));
AddPatch(fs_meta, 0x16FBE0, NogcPatch1, sizeof(NogcPatch1));
break;
case FsVersion_19_0_0:
AddPatch(fs_meta, 0x195C75, NogcPatch0, sizeof(NogcPatch0));
AddPatch(fs_meta, 0x195E75, NogcPatch0, sizeof(NogcPatch0));
AddPatch(fs_meta, 0x16F170, NogcPatch1, sizeof(NogcPatch1));
break;
case FsVersion_19_0_0_Exfat:
AddPatch(fs_meta, 0x1A14A5, NogcPatch0, sizeof(NogcPatch0));
AddPatch(fs_meta, 0x1A16A5, NogcPatch0, sizeof(NogcPatch0));
AddPatch(fs_meta, 0x17A9A0, NogcPatch1, sizeof(NogcPatch1));
break;
default:
break;
}

View file

@ -38,6 +38,7 @@ namespace ams::pkg1 {
KeyGeneration_16_0_0 = 0x0F,
KeyGeneration_17_0_0 = 0x10,
KeyGeneration_18_0_0 = 0x11,
KeyGeneration_19_0_0 = 0x12,
KeyGeneration_Count,

View file

@ -24,7 +24,7 @@ namespace ams::pkg2 {
constexpr inline int PayloadCount = 3;
constexpr inline int MinimumValidDataVersion = 0; /* We allow older package2 to load; this value is currently 0x18 in Nintendo's code. */
constexpr inline int CurrentBootloaderVersion = 0x15;
constexpr inline int CurrentBootloaderVersion = 0x16;
struct Package2Meta {
using Magic = util::FourCC<'P','K','2','1'>;

View file

@ -177,6 +177,7 @@ namespace ams::fuse {
}
constexpr const TargetFirmware FuseVersionIncrementFirmwares[] = {
TargetFirmware_19_0_0,
TargetFirmware_17_0_0,
TargetFirmware_16_0_0,
TargetFirmware_15_0_0,

View file

@ -110,47 +110,47 @@ namespace ams::kern::arch::arm64::init {
L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr);
/* If an L1 block is mapped or we're empty, advance by L1BlockSize. */
if (l1_entry->IsBlock() || l1_entry->IsEmpty()) {
if (l1_entry->IsMappedBlock() || l1_entry->IsEmpty()) {
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L1BlockSize));
MESOSPHERE_INIT_ABORT_UNLESS(static_cast<size_t>(end_virt_addr - virt_addr) >= L1BlockSize);
virt_addr += L1BlockSize;
if (l1_entry->IsBlock() && block_size == L1BlockSize) {
if (l1_entry->IsMappedBlock() && block_size == L1BlockSize) {
count++;
}
continue;
}
/* Non empty and non-block must be table. */
MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsTable());
MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsMappedTable());
/* Table, so check if we're mapped in L2. */
L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr);
if (l2_entry->IsBlock() || l2_entry->IsEmpty()) {
const size_t advance_size = (l2_entry->IsBlock() && l2_entry->IsContiguous()) ? L2ContiguousBlockSize : L2BlockSize;
if (l2_entry->IsMappedBlock() || l2_entry->IsEmpty()) {
const size_t advance_size = (l2_entry->IsMappedBlock() && l2_entry->IsContiguous()) ? L2ContiguousBlockSize : L2BlockSize;
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), advance_size));
MESOSPHERE_INIT_ABORT_UNLESS(static_cast<size_t>(end_virt_addr - virt_addr) >= advance_size);
virt_addr += advance_size;
if (l2_entry->IsBlock() && block_size == advance_size) {
if (l2_entry->IsMappedBlock() && block_size == advance_size) {
count++;
}
continue;
}
/* Non empty and non-block must be table. */
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsTable());
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsMappedTable());
/* Table, so check if we're mapped in L3. */
L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr);
/* L3 must be block or empty. */
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsBlock() || l3_entry->IsEmpty());
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsMappedBlock() || l3_entry->IsEmpty());
const size_t advance_size = (l3_entry->IsBlock() && l3_entry->IsContiguous()) ? L3ContiguousBlockSize : L3BlockSize;
const size_t advance_size = (l3_entry->IsMappedBlock() && l3_entry->IsContiguous()) ? L3ContiguousBlockSize : L3BlockSize;
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), advance_size));
MESOSPHERE_INIT_ABORT_UNLESS(static_cast<size_t>(end_virt_addr - virt_addr) >= advance_size);
virt_addr += advance_size;
if (l3_entry->IsBlock() && block_size == advance_size) {
if (l3_entry->IsMappedBlock() && block_size == advance_size) {
count++;
}
}
@ -164,10 +164,10 @@ namespace ams::kern::arch::arm64::init {
L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr);
/* If an L1 block is mapped or we're empty, advance by L1BlockSize. */
if (l1_entry->IsBlock() || l1_entry->IsEmpty()) {
if (l1_entry->IsMappedBlock() || l1_entry->IsEmpty()) {
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L1BlockSize));
MESOSPHERE_INIT_ABORT_UNLESS(static_cast<size_t>(end_virt_addr - virt_addr) >= L1BlockSize);
if (l1_entry->IsBlock() && block_size == L1BlockSize) {
if (l1_entry->IsMappedBlock() && block_size == L1BlockSize) {
if ((count++) == index) {
return virt_addr;
}
@ -177,16 +177,16 @@ namespace ams::kern::arch::arm64::init {
}
/* Non empty and non-block must be table. */
MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsTable());
MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsMappedTable());
/* Table, so check if we're mapped in L2. */
L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr);
if (l2_entry->IsBlock() || l2_entry->IsEmpty()) {
const size_t advance_size = (l2_entry->IsBlock() && l2_entry->IsContiguous()) ? L2ContiguousBlockSize : L2BlockSize;
if (l2_entry->IsMappedBlock() || l2_entry->IsEmpty()) {
const size_t advance_size = (l2_entry->IsMappedBlock() && l2_entry->IsContiguous()) ? L2ContiguousBlockSize : L2BlockSize;
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), advance_size));
MESOSPHERE_INIT_ABORT_UNLESS(static_cast<size_t>(end_virt_addr - virt_addr) >= advance_size);
if (l2_entry->IsBlock() && block_size == advance_size) {
if (l2_entry->IsMappedBlock() && block_size == advance_size) {
if ((count++) == index) {
return virt_addr;
}
@ -196,18 +196,18 @@ namespace ams::kern::arch::arm64::init {
}
/* Non empty and non-block must be table. */
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsTable());
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsMappedTable());
/* Table, so check if we're mapped in L3. */
L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr);
/* L3 must be block or empty. */
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsBlock() || l3_entry->IsEmpty());
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsMappedBlock() || l3_entry->IsEmpty());
const size_t advance_size = (l3_entry->IsBlock() && l3_entry->IsContiguous()) ? L3ContiguousBlockSize : L3BlockSize;
const size_t advance_size = (l3_entry->IsMappedBlock() && l3_entry->IsContiguous()) ? L3ContiguousBlockSize : L3BlockSize;
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), advance_size));
MESOSPHERE_INIT_ABORT_UNLESS(static_cast<size_t>(end_virt_addr - virt_addr) >= advance_size);
if (l3_entry->IsBlock() && block_size == advance_size) {
if (l3_entry->IsMappedBlock() && block_size == advance_size) {
if ((count++) == index) {
return virt_addr;
}
@ -220,29 +220,29 @@ namespace ams::kern::arch::arm64::init {
PageTableEntry *GetMappingEntry(KVirtualAddress virt_addr, size_t block_size) {
L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr);
if (l1_entry->IsBlock()) {
if (l1_entry->IsMappedBlock()) {
MESOSPHERE_INIT_ABORT_UNLESS(block_size == L1BlockSize);
return l1_entry;
}
MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsTable());
MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsMappedTable());
/* Table, so check if we're mapped in L2. */
L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr);
if (l2_entry->IsBlock()) {
if (l2_entry->IsMappedBlock()) {
const size_t real_size = (l2_entry->IsContiguous()) ? L2ContiguousBlockSize : L2BlockSize;
MESOSPHERE_INIT_ABORT_UNLESS(real_size == block_size);
return l2_entry;
}
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsTable());
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsMappedTable());
/* Table, so check if we're mapped in L3. */
L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr);
/* L3 must be block. */
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsBlock());
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsMappedBlock());
const size_t real_size = (l3_entry->IsContiguous()) ? L3ContiguousBlockSize : L3BlockSize;
MESOSPHERE_INIT_ABORT_UNLESS(real_size == block_size);
@ -340,7 +340,7 @@ namespace ams::kern::arch::arm64::init {
}
/* If we don't already have an L2 table, we need to make a new one. */
if (!l1_entry->IsTable()) {
if (!l1_entry->IsMappedTable()) {
KPhysicalAddress new_table = AllocateNewPageTable(allocator, phys_to_virt_offset);
cpu::DataSynchronizationBarrierInnerShareable();
*l1_entry = L1PageTableEntry(PageTableEntry::TableTag{}, new_table, attr.IsPrivilegedExecuteNever());
@ -371,7 +371,7 @@ namespace ams::kern::arch::arm64::init {
}
/* If we don't already have an L3 table, we need to make a new one. */
if (!l2_entry->IsTable()) {
if (!l2_entry->IsMappedTable()) {
KPhysicalAddress new_table = AllocateNewPageTable(allocator, phys_to_virt_offset);
cpu::DataSynchronizationBarrierInnerShareable();
*l2_entry = L2PageTableEntry(PageTableEntry::TableTag{}, new_table, attr.IsPrivilegedExecuteNever());
@ -416,12 +416,12 @@ namespace ams::kern::arch::arm64::init {
for (size_t l1_index = 0; l1_index < m_num_entries[0]; l1_index++) {
/* Get L1 entry. */
L1PageTableEntry * const l1_entry = l1_table + l1_index;
if (l1_entry->IsBlock()) {
if (l1_entry->IsMappedBlock()) {
/* Unmap the L1 entry, if we should. */
if (ShouldUnmap(l1_entry)) {
*static_cast<PageTableEntry *>(l1_entry) = InvalidPageTableEntry;
}
} else if (l1_entry->IsTable()) {
} else if (l1_entry->IsMappedTable()) {
/* Get the L2 table. */
L2PageTableEntry * const l2_table = reinterpret_cast<L2PageTableEntry *>(GetInteger(l1_entry->GetTable()) + phys_to_virt_offset);
@ -430,7 +430,7 @@ namespace ams::kern::arch::arm64::init {
for (size_t l2_index = 0; l2_index < MaxPageTableEntries; ++l2_index) {
/* Get L2 entry. */
L2PageTableEntry * const l2_entry = l2_table + l2_index;
if (l2_entry->IsBlock()) {
if (l2_entry->IsMappedBlock()) {
const size_t num_to_clear = (l2_entry->IsContiguous() ? L2ContiguousBlockSize : L2BlockSize) / L2BlockSize;
if (ShouldUnmap(l2_entry)) {
@ -442,7 +442,7 @@ namespace ams::kern::arch::arm64::init {
}
l2_index = l2_index + num_to_clear - 1;
} else if (l2_entry->IsTable()) {
} else if (l2_entry->IsMappedTable()) {
/* Get the L3 table. */
L3PageTableEntry * const l3_table = reinterpret_cast<L3PageTableEntry *>(GetInteger(l2_entry->GetTable()) + phys_to_virt_offset);
@ -450,7 +450,7 @@ namespace ams::kern::arch::arm64::init {
size_t remaining_l3_entries = 0;
for (size_t l3_index = 0; l3_index < MaxPageTableEntries; ++l3_index) {
/* Get L3 entry. */
if (L3PageTableEntry * const l3_entry = l3_table + l3_index; l3_entry->IsBlock()) {
if (L3PageTableEntry * const l3_entry = l3_table + l3_index; l3_entry->IsMappedBlock()) {
const size_t num_to_clear = (l3_entry->IsContiguous() ? L3ContiguousBlockSize : L3BlockSize) / L3BlockSize;
if (ShouldUnmap(l3_entry)) {
@ -498,25 +498,25 @@ namespace ams::kern::arch::arm64::init {
/* Get the L1 entry. */
const L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr);
if (l1_entry->IsBlock()) {
if (l1_entry->IsMappedBlock()) {
return l1_entry->GetBlock() + (GetInteger(virt_addr) & (L1BlockSize - 1));
}
MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsTable());
MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsMappedTable());
/* Get the L2 entry. */
const L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr);
if (l2_entry->IsBlock()) {
if (l2_entry->IsMappedBlock()) {
return l2_entry->GetBlock() + (GetInteger(virt_addr) & (L2BlockSize - 1));
}
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsTable());
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsMappedTable());
/* Get the L3 entry. */
const L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr);
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsBlock());
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsMappedBlock());
return l3_entry->GetBlock() + (GetInteger(virt_addr) & (L3BlockSize - 1));
}
@ -561,26 +561,26 @@ namespace ams::kern::arch::arm64::init {
L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr);
/* If an L1 block is mapped, update. */
if (l1_entry->IsBlock()) {
if (l1_entry->IsMappedBlock()) {
UpdateExtents(l1_entry->GetBlock(), L1BlockSize);
continue;
}
/* Not a block, so we must have a table. */
MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsTable());
MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsMappedTable());
L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr);
if (l2_entry->IsBlock()) {
if (l2_entry->IsMappedBlock()) {
UpdateExtents(l2_entry->GetBlock(), l2_entry->IsContiguous() ? L2ContiguousBlockSize : L2BlockSize);
continue;
}
/* Not a block, so we must have a table. */
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsTable());
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsMappedTable());
/* We must have a mapped l3 entry to inspect. */
L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr);
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsBlock());
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsMappedBlock());
UpdateExtents(l3_entry->GetBlock(), l3_entry->IsContiguous() ? L3ContiguousBlockSize : L3BlockSize);
}
@ -602,11 +602,11 @@ namespace ams::kern::arch::arm64::init {
L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr);
/* If an L1 block is mapped, the address isn't free. */
if (l1_entry->IsBlock()) {
if (l1_entry->IsMappedBlock()) {
return false;
}
if (!l1_entry->IsTable()) {
if (!l1_entry->IsMappedTable()) {
/* Not a table, so just move to check the next region. */
virt_addr = util::AlignDown(GetInteger(virt_addr) + L1BlockSize, L1BlockSize);
continue;
@ -615,11 +615,11 @@ namespace ams::kern::arch::arm64::init {
/* Table, so check if we're mapped in L2. */
L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr);
if (l2_entry->IsBlock()) {
if (l2_entry->IsMappedBlock()) {
return false;
}
if (!l2_entry->IsTable()) {
if (!l2_entry->IsMappedTable()) {
/* Not a table, so just move to check the next region. */
virt_addr = util::AlignDown(GetInteger(virt_addr) + L2BlockSize, L2BlockSize);
continue;
@ -628,7 +628,7 @@ namespace ams::kern::arch::arm64::init {
/* Table, so check if we're mapped in L3. */
L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr);
if (l3_entry->IsBlock()) {
if (l3_entry->IsMappedBlock()) {
return false;
}
@ -648,7 +648,7 @@ namespace ams::kern::arch::arm64::init {
L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr);
/* Check if an L1 block is present. */
if (l1_entry->IsBlock()) {
if (l1_entry->IsMappedBlock()) {
/* Ensure that we are allowed to have an L1 block here. */
const KPhysicalAddress block = l1_entry->GetBlock();
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L1BlockSize));
@ -669,10 +669,10 @@ namespace ams::kern::arch::arm64::init {
}
/* Not a block, so we must be a table. */
MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsTable());
MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsMappedTable());
L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr);
if (l2_entry->IsBlock()) {
if (l2_entry->IsMappedBlock()) {
const KPhysicalAddress block = l2_entry->GetBlock();
if (l2_entry->IsContiguous()) {
@ -720,11 +720,11 @@ namespace ams::kern::arch::arm64::init {
}
/* Not a block, so we must be a table. */
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsTable());
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsMappedTable());
/* We must have a mapped l3 entry to reprotect. */
L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr);
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsBlock());
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsMappedBlock());
const KPhysicalAddress block = l3_entry->GetBlock();
if (l3_entry->IsContiguous()) {

View file

@ -93,9 +93,13 @@ namespace ams::kern::arch::arm64 {
MESOSPHERE_ASSERT(alignment < L1BlockSize);
return KPageTable::GetBlockSize(static_cast<KPageTable::BlockType>(KPageTable::GetBlockType(alignment) + 1));
}
public:
/* TODO: How should this size be determined. Does the KProcess slab count need to go in a header as a define? */
static constexpr size_t NumTtbr0Entries = 81;
private:
static constinit inline const volatile u64 s_ttbr0_entries[NumTtbr0Entries] = {};
private:
KPageTableManager *m_manager;
u64 m_ttbr;
u8 m_asid;
protected:
Result OperateImpl(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, OperationType operation, bool reuse_ll);
@ -105,6 +109,9 @@ namespace ams::kern::arch::arm64 {
KPageTableManager &GetPageTableManager() const { return *m_manager; }
private:
constexpr PageTableEntry GetEntryTemplate(const KPageProperties properties) const {
/* Check that the property is not kernel execute. */
MESOSPHERE_ABORT_UNLESS((properties.perm & KMemoryPermission_KernelExecute) == 0);
/* Set basic attributes. */
PageTableEntry entry{PageTableEntry::ExtensionFlag_Valid};
entry.SetPrivilegedExecuteNever(true);
@ -118,22 +125,24 @@ namespace ams::kern::arch::arm64 {
/* Set page attribute. */
if (properties.io) {
MESOSPHERE_ABORT_UNLESS(!properties.uncached);
MESOSPHERE_ABORT_UNLESS((properties.perm & (KMemoryPermission_KernelExecute | KMemoryPermission_UserExecute)) == 0);
MESOSPHERE_ABORT_UNLESS((properties.perm & KMemoryPermission_UserExecute) == 0);
entry.SetPageAttribute(PageTableEntry::PageAttribute_Device_nGnRnE)
.SetUserExecuteNever(true);
} else if (properties.uncached) {
MESOSPHERE_ABORT_UNLESS((properties.perm & (KMemoryPermission_KernelExecute | KMemoryPermission_UserExecute)) == 0);
MESOSPHERE_ABORT_UNLESS((properties.perm & KMemoryPermission_UserExecute) == 0);
entry.SetPageAttribute(PageTableEntry::PageAttribute_NormalMemoryNotCacheable);
entry.SetPageAttribute(PageTableEntry::PageAttribute_NormalMemoryNotCacheable)
.SetUserExecuteNever(true);
} else {
entry.SetPageAttribute(PageTableEntry::PageAttribute_NormalMemory);
}
/* Set user execute never bit. */
if (properties.perm != KMemoryPermission_UserReadExecute) {
MESOSPHERE_ABORT_UNLESS((properties.perm & (KMemoryPermission_KernelExecute | KMemoryPermission_UserExecute)) == 0);
entry.SetUserExecuteNever(true);
if ((properties.perm & KMemoryPermission_UserExecute) != 0) {
/* Check that the permission is either r--/--x or r--/r-x. */
MESOSPHERE_ABORT_UNLESS((properties.perm & ~ams::svc::MemoryPermission_Read) == (KMemoryPermission_KernelRead | KMemoryPermission_UserExecute));
} else {
entry.SetUserExecuteNever(true);
}
}
/* Set AP[1] based on perm. */
@ -168,53 +177,42 @@ namespace ams::kern::arch::arm64 {
return entry;
}
public:
constexpr explicit KPageTable(util::ConstantInitializeTag) : KPageTableBase(util::ConstantInitialize), m_manager(), m_ttbr(), m_asid() { /* ... */ }
constexpr explicit KPageTable(util::ConstantInitializeTag) : KPageTableBase(util::ConstantInitialize), m_manager(), m_asid() { /* ... */ }
explicit KPageTable() { /* ... */ }
static NOINLINE void Initialize(s32 core_id);
ALWAYS_INLINE void Activate(u32 proc_id) {
cpu::SwitchProcess(m_ttbr, proc_id);
static const volatile u64 &GetTtbr0Entry(size_t index) { return s_ttbr0_entries[index]; }
static ALWAYS_INLINE u64 GetKernelTtbr0() {
return s_ttbr0_entries[0];
}
static ALWAYS_INLINE void ActivateKernel() {
/* Activate, using asid 0 and process id = 0xFFFFFFFF */
cpu::SwitchProcess(GetKernelTtbr0(), 0xFFFFFFFF);
}
static ALWAYS_INLINE void ActivateProcess(size_t proc_idx, u32 proc_id) {
cpu::SwitchProcess(s_ttbr0_entries[proc_idx + 1], proc_id);
}
NOINLINE Result InitializeForKernel(void *table, KVirtualAddress start, KVirtualAddress end);
NOINLINE Result InitializeForProcess(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit);
NOINLINE Result InitializeForProcess(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit, size_t process_index);
Result Finalize();
private:
Result MapL1Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll);
Result MapL2Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll);
Result MapL3Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll);
Result Unmap(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list, bool force, bool reuse_ll);
Result Map(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, size_t page_size, PageLinkedList *page_list, bool reuse_ll) {
switch (page_size) {
case L1BlockSize:
R_RETURN(this->MapL1Blocks(virt_addr, phys_addr, num_pages, entry_template, disable_head_merge, page_list, reuse_ll));
case L2ContiguousBlockSize:
entry_template.SetContiguous(true);
[[fallthrough]];
#ifdef ATMOSPHERE_BOARD_NINTENDO_NX
case L2TegraSmmuBlockSize:
#endif
case L2BlockSize:
R_RETURN(this->MapL2Blocks(virt_addr, phys_addr, num_pages, entry_template, disable_head_merge, page_list, reuse_ll));
case L3ContiguousBlockSize:
entry_template.SetContiguous(true);
[[fallthrough]];
case L3BlockSize:
R_RETURN(this->MapL3Blocks(virt_addr, phys_addr, num_pages, entry_template, disable_head_merge, page_list, reuse_ll));
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
}
}
Result Map(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, size_t page_size, PageLinkedList *page_list, bool reuse_ll);
Result MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll);
Result MapGroup(KProcessAddress virt_addr, const KPageGroup &pg, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, bool not_first, PageLinkedList *page_list, bool reuse_ll);
bool MergePages(KProcessAddress virt_addr, PageLinkedList *page_list);
bool MergePages(TraversalContext *context, PageLinkedList *page_list);
void MergePages(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list);
ALWAYS_INLINE Result SeparatePagesImpl(KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll);
Result SeparatePages(KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll);
Result SeparatePagesImpl(TraversalEntry *entry, TraversalContext *context, KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll);
Result SeparatePages(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list, bool reuse_ll);
Result ChangePermissions(KProcessAddress virt_addr, size_t num_pages, PageTableEntry entry_template, DisableMergeAttribute disable_merge_attr, bool refresh_mapping, bool flush_mapping, PageLinkedList *page_list, bool reuse_ll);
@ -224,7 +222,6 @@ namespace ams::kern::arch::arm64 {
static ALWAYS_INLINE void ClearPageTable(KVirtualAddress table) {
cpu::ClearPageToZero(GetVoidPointer(table));
cpu::DataSynchronizationBarrierInnerShareable();
}
ALWAYS_INLINE void OnTableUpdated() const {

View file

@ -20,18 +20,22 @@
namespace ams::kern::arch::arm64 {
constexpr size_t BlocksPerContiguousBlock = 0x10;
constexpr size_t BlocksPerTable = PageSize / sizeof(u64);
constexpr size_t L1BlockSize = 1_GB;
constexpr size_t L1ContiguousBlockSize = 0x10 * L1BlockSize;
constexpr size_t L1ContiguousBlockSize = BlocksPerContiguousBlock * L1BlockSize;
constexpr size_t L2BlockSize = 2_MB;
constexpr size_t L2ContiguousBlockSize = 0x10 * L2BlockSize;
constexpr size_t L2ContiguousBlockSize = BlocksPerContiguousBlock * L2BlockSize;
constexpr size_t L3BlockSize = PageSize;
constexpr size_t L3ContiguousBlockSize = 0x10 * L3BlockSize;
constexpr size_t L3ContiguousBlockSize = BlocksPerContiguousBlock * L3BlockSize;
class PageTableEntry {
public:
struct InvalidTag{};
struct TableTag{};
struct BlockTag{};
struct SeparateContiguousTag{};
enum Permission : u64 {
Permission_KernelRWX = ((0ul << 53) | (1ul << 54) | (0ul << 6)),
@ -122,6 +126,25 @@ namespace ams::kern::arch::arm64 {
{
/* ... */
}
/* Construct a table. */
constexpr explicit ALWAYS_INLINE PageTableEntry(TableTag, KPhysicalAddress phys_addr, bool is_kernel, bool pxn, size_t ref_count)
: PageTableEntry(((is_kernel ? 0x3ul : 0) << 60) | (static_cast<u64>(pxn) << 59) | GetInteger(phys_addr) | (ref_count << 2) | 0x3)
{
/* ... */
}
/* Construct a block. */
constexpr explicit ALWAYS_INLINE PageTableEntry(BlockTag, KPhysicalAddress phys_addr, const PageTableEntry &attr, u8 sw_reserved_bits, bool contig, bool page)
: PageTableEntry(attr, (static_cast<u64>(sw_reserved_bits) << 55) | (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | (page ? ExtensionFlag_TestTableMask : ExtensionFlag_Valid))
{
/* ... */
}
constexpr explicit ALWAYS_INLINE PageTableEntry(BlockTag, KPhysicalAddress phys_addr, const PageTableEntry &attr, SeparateContiguousTag)
: PageTableEntry(attr, GetInteger(phys_addr))
{
/* ... */
}
protected:
constexpr ALWAYS_INLINE u64 GetBits(size_t offset, size_t count) const {
return (m_attributes >> offset) & ((1ul << count) - 1);
@ -165,14 +188,23 @@ namespace ams::kern::arch::arm64 {
constexpr ALWAYS_INLINE Shareable GetShareable() const { return static_cast<Shareable>(this->SelectBits(8, 2)); }
constexpr ALWAYS_INLINE PageAttribute GetPageAttribute() const { return static_cast<PageAttribute>(this->SelectBits(2, 3)); }
constexpr ALWAYS_INLINE int GetAccessFlagInteger() const { return static_cast<int>(this->GetBits(10, 1)); }
constexpr ALWAYS_INLINE int GetShareableInteger() const { return static_cast<int>(this->GetBits(8, 2)); }
constexpr ALWAYS_INLINE int GetShareableInteger() const { return static_cast<int>(this->GetBits(8, 2)); }
constexpr ALWAYS_INLINE int GetPageAttributeInteger() const { return static_cast<int>(this->GetBits(2, 3)); }
constexpr ALWAYS_INLINE bool IsReadOnly() const { return this->GetBits(7, 1) != 0; }
constexpr ALWAYS_INLINE bool IsUserAccessible() const { return this->GetBits(6, 1) != 0; }
constexpr ALWAYS_INLINE bool IsNonSecure() const { return this->GetBits(5, 1) != 0; }
constexpr ALWAYS_INLINE u64 GetTestTableMask() const { return (m_attributes & ExtensionFlag_TestTableMask); }
constexpr ALWAYS_INLINE bool IsBlock() const { return (m_attributes & ExtensionFlag_TestTableMask) == ExtensionFlag_Valid; }
constexpr ALWAYS_INLINE bool IsPage() const { return (m_attributes & ExtensionFlag_TestTableMask) == ExtensionFlag_TestTableMask; }
constexpr ALWAYS_INLINE bool IsTable() const { return (m_attributes & ExtensionFlag_TestTableMask) == 2; }
constexpr ALWAYS_INLINE bool IsEmpty() const { return (m_attributes & ExtensionFlag_TestTableMask) == 0; }
constexpr ALWAYS_INLINE KPhysicalAddress GetTable() const { return this->SelectBits(12, 36); }
constexpr ALWAYS_INLINE bool IsMappedBlock() const { return this->GetBits(0, 2) == 1; }
constexpr ALWAYS_INLINE bool IsMappedTable() const { return this->GetBits(0, 2) == 3; }
constexpr ALWAYS_INLINE bool IsMapped() const { return this->GetBits(0, 1) != 0; }
constexpr ALWAYS_INLINE decltype(auto) SetUserExecuteNever(bool en) { this->SetBit(54, en); return *this; }
@ -186,8 +218,16 @@ namespace ams::kern::arch::arm64 {
constexpr ALWAYS_INLINE decltype(auto) SetPageAttribute(PageAttribute a) { this->SetBitsDirect(2, 3, a); return *this; }
constexpr ALWAYS_INLINE decltype(auto) SetMapped(bool m) { static_assert(static_cast<u64>(MappingFlag_Mapped == (1 << 0))); this->SetBit(0, m); return *this; }
constexpr ALWAYS_INLINE size_t GetTableReferenceCount() const { return this->GetBits(2, 10); }
constexpr ALWAYS_INLINE decltype(auto) SetTableReferenceCount(size_t num) { this->SetBits(2, 10, num); return *this; }
constexpr ALWAYS_INLINE decltype(auto) OpenTableReferences(size_t num) { MESOSPHERE_ASSERT(this->GetTableReferenceCount() + num <= BlocksPerTable + 1); return this->SetTableReferenceCount(this->GetTableReferenceCount() + num); }
constexpr ALWAYS_INLINE decltype(auto) CloseTableReferences(size_t num) { MESOSPHERE_ASSERT(this->GetTableReferenceCount() >= num); return this->SetTableReferenceCount(this->GetTableReferenceCount() - num); }
constexpr ALWAYS_INLINE decltype(auto) SetValid() { MESOSPHERE_ASSERT((m_attributes & ExtensionFlag_Valid) == 0); m_attributes |= ExtensionFlag_Valid; return *this; }
constexpr ALWAYS_INLINE u64 GetEntryTemplateForMerge() const {
constexpr u64 BaseMask = (0xFFF0000000000FFFul & ~static_cast<u64>((0x1ul << 52) | ExtensionFlag_TestTableMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail));
constexpr u64 BaseMask = (0xFFFF000000000FFFul & ~static_cast<u64>((0x1ul << 52) | ExtensionFlag_TestTableMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail));
return m_attributes & BaseMask;
}
@ -196,10 +236,45 @@ namespace ams::kern::arch::arm64 {
return (m_attributes & BaseMaskForMerge) == attr;
}
constexpr ALWAYS_INLINE u64 GetRawAttributesUnsafeForSwap() const {
static constexpr ALWAYS_INLINE u64 GetEntryTemplateForSeparateContiguousMask(size_t idx) {
constexpr u64 BaseMask = (0xFFFF000000000FFFul & ~static_cast<u64>((0x1ul << 52) | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail));
if (idx == 0) {
return BaseMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody;
} else if (idx < BlocksPerContiguousBlock - 1) {
return BaseMask;
} else {
return BaseMask | ExtensionFlag_DisableMergeTail;
}
}
constexpr ALWAYS_INLINE u64 GetEntryTemplateForSeparateContiguous(size_t idx) const {
return m_attributes & GetEntryTemplateForSeparateContiguousMask(idx);
}
static constexpr ALWAYS_INLINE u64 GetEntryTemplateForSeparateMask(size_t idx) {
constexpr u64 BaseMask = (0xFFFF000000000FFFul & ~static_cast<u64>((0x1ul << 52) | ExtensionFlag_TestTableMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail));
if (idx == 0) {
return BaseMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody;
} else if (idx < BlocksPerContiguousBlock) {
return BaseMask | ExtensionFlag_DisableMergeHeadAndBody;
} else if (idx < BlocksPerTable - 1) {
return BaseMask;
} else {
return BaseMask | ExtensionFlag_DisableMergeTail;
}
}
constexpr ALWAYS_INLINE u64 GetEntryTemplateForSeparate(size_t idx) const {
return m_attributes & GetEntryTemplateForSeparateMask(idx);
}
constexpr ALWAYS_INLINE u64 GetRawAttributesUnsafe() const {
return m_attributes;
}
constexpr ALWAYS_INLINE u64 GetRawAttributesUnsafeForSwap() const {
return m_attributes;
}
protected:
constexpr ALWAYS_INLINE u64 GetRawAttributes() const {
return m_attributes;
@ -229,7 +304,7 @@ namespace ams::kern::arch::arm64 {
}
constexpr explicit ALWAYS_INLINE L1PageTableEntry(BlockTag, KPhysicalAddress phys_addr, const PageTableEntry &attr, u8 sw_reserved_bits, bool contig)
: PageTableEntry(attr, (static_cast<u64>(sw_reserved_bits) << 55) | (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | PageTableEntry::ExtensionFlag_Valid)
: PageTableEntry(attr, (static_cast<u64>(sw_reserved_bits) << 55) | (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | 0x1)
{
/* ... */
}
@ -291,7 +366,7 @@ namespace ams::kern::arch::arm64 {
}
constexpr explicit ALWAYS_INLINE L2PageTableEntry(BlockTag, KPhysicalAddress phys_addr, const PageTableEntry &attr, u8 sw_reserved_bits, bool contig)
: PageTableEntry(attr, (static_cast<u64>(sw_reserved_bits) << 55) | (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | PageTableEntry::ExtensionFlag_Valid)
: PageTableEntry(attr, (static_cast<u64>(sw_reserved_bits) << 55) | (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | 0x1)
{
/* ... */
}
@ -356,12 +431,13 @@ namespace ams::kern::arch::arm64 {
constexpr explicit ALWAYS_INLINE L3PageTableEntry(InvalidTag) : PageTableEntry(InvalidTag{}) { /* ... */ }
constexpr explicit ALWAYS_INLINE L3PageTableEntry(BlockTag, KPhysicalAddress phys_addr, const PageTableEntry &attr, u8 sw_reserved_bits, bool contig)
: PageTableEntry(attr, (static_cast<u64>(sw_reserved_bits) << 55) | (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | static_cast<u64>(ExtensionFlag_TestTableMask))
: PageTableEntry(attr, (static_cast<u64>(sw_reserved_bits) << 55) | (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | 0x3)
{
/* ... */
}
constexpr ALWAYS_INLINE bool IsBlock() const { return (GetRawAttributes() & ExtensionFlag_TestTableMask) == ExtensionFlag_TestTableMask; }
constexpr ALWAYS_INLINE bool IsMappedBlock() const { return this->GetBits(0, 2) == 3; }
constexpr ALWAYS_INLINE KPhysicalAddress GetBlock() const {
return this->SelectBits(12, 36);

View file

@ -37,10 +37,17 @@ namespace ams::kern::arch::arm64 {
constexpr bool IsTailMergeDisabled() const { return (this->sw_reserved_bits & PageTableEntry::SoftwareReservedBit_DisableMergeHeadTail) != 0; }
};
enum EntryLevel : u32 {
EntryLevel_L3 = 0,
EntryLevel_L2 = 1,
EntryLevel_L1 = 2,
EntryLevel_Count = 3,
};
struct TraversalContext {
const L1PageTableEntry *l1_entry;
const L2PageTableEntry *l2_entry;
const L3PageTableEntry *l3_entry;
PageTableEntry *level_entries[EntryLevel_Count];
EntryLevel level;
bool is_contiguous;
};
private:
static constexpr size_t PageBits = util::CountTrailingZeros(PageSize);
@ -53,11 +60,19 @@ namespace ams::kern::arch::arm64 {
return (value >> Offset) & ((1ul << Count) - 1);
}
static constexpr ALWAYS_INLINE u64 GetBits(u64 value, size_t offset, size_t count) {
return (value >> offset) & ((1ul << count) - 1);
}
template<size_t Offset, size_t Count>
constexpr ALWAYS_INLINE u64 SelectBits(u64 value) {
static constexpr ALWAYS_INLINE u64 SelectBits(u64 value) {
return value & (((1ul << Count) - 1) << Offset);
}
static constexpr ALWAYS_INLINE u64 SelectBits(u64 value, size_t offset, size_t count) {
return value & (((1ul << count) - 1) << offset);
}
static constexpr ALWAYS_INLINE uintptr_t GetL0Index(KProcessAddress addr) { return GetBits<PageBits + LevelBits * (NumLevels - 0), LevelBits>(GetInteger(addr)); }
static constexpr ALWAYS_INLINE uintptr_t GetL1Index(KProcessAddress addr) { return GetBits<PageBits + LevelBits * (NumLevels - 1), LevelBits>(GetInteger(addr)); }
static constexpr ALWAYS_INLINE uintptr_t GetL2Index(KProcessAddress addr) { return GetBits<PageBits + LevelBits * (NumLevels - 2), LevelBits>(GetInteger(addr)); }
@ -70,13 +85,14 @@ namespace ams::kern::arch::arm64 {
static constexpr ALWAYS_INLINE uintptr_t GetContiguousL2Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 2) + 4>(GetInteger(addr)); }
static constexpr ALWAYS_INLINE uintptr_t GetContiguousL3Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 3) + 4>(GetInteger(addr)); }
static constexpr ALWAYS_INLINE uintptr_t GetBlock(const PageTableEntry *pte, EntryLevel level) { return SelectBits(pte->GetRawAttributesUnsafe(), PageBits + LevelBits * level, LevelBits * (NumLevels + 1 - level)); }
static constexpr ALWAYS_INLINE uintptr_t GetOffset(KProcessAddress addr, EntryLevel level) { return GetBits(GetInteger(addr), 0, PageBits + LevelBits * level); }
static ALWAYS_INLINE KVirtualAddress GetPageTableVirtualAddress(KPhysicalAddress addr) {
return KMemoryLayout::GetLinearVirtualAddress(addr);
}
ALWAYS_INLINE bool ExtractL1Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L1PageTableEntry *l1_entry, KProcessAddress virt_addr) const;
ALWAYS_INLINE bool ExtractL2Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L2PageTableEntry *l2_entry, KProcessAddress virt_addr) const;
ALWAYS_INLINE bool ExtractL3Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L3PageTableEntry *l3_entry, KProcessAddress virt_addr) const;
public:
static constexpr ALWAYS_INLINE uintptr_t GetLevelIndex(KProcessAddress addr, EntryLevel level) { return GetBits(GetInteger(addr), PageBits + LevelBits * level, LevelBits); }
private:
L1PageTableEntry *m_table;
bool m_is_kernel;
@ -105,11 +121,17 @@ namespace ams::kern::arch::arm64 {
ALWAYS_INLINE L3PageTableEntry *GetL3Entry(const L2PageTableEntry *entry, KProcessAddress address) const {
return GetL3EntryFromTable(KMemoryLayout::GetLinearVirtualAddress(entry->GetTable()), address);
}
static constexpr size_t GetBlockSize(EntryLevel level, bool contiguous = false) {
return 1 << (PageBits + LevelBits * level + 4 * contiguous);
}
public:
constexpr explicit KPageTableImpl(util::ConstantInitializeTag) : m_table(), m_is_kernel(), m_num_entries() { /* ... */ }
explicit KPageTableImpl() { /* ... */ }
size_t GetNumL1Entries() const { return m_num_entries; }
NOINLINE void InitializeForKernel(void *tb, KVirtualAddress start, KVirtualAddress end);
NOINLINE void InitializeForProcess(void *tb, KVirtualAddress start, KVirtualAddress end);
L1PageTableEntry *Finalize();
@ -121,6 +143,17 @@ namespace ams::kern::arch::arm64 {
bool ContinueTraversal(TraversalEntry *out_entry, TraversalContext *context) const;
bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress virt_addr) const;
static bool MergePages(KVirtualAddress *out, TraversalContext *context);
void SeparatePages(TraversalEntry *entry, TraversalContext *context, KProcessAddress address, PageTableEntry *pte) const;
KProcessAddress GetAddressForContext(const TraversalContext *context) const {
KProcessAddress addr = m_is_kernel ? static_cast<uintptr_t>(-GetBlockSize(EntryLevel_L1)) * m_num_entries : 0;
for (u32 level = context->level; level <= EntryLevel_L1; ++level) {
addr += ((reinterpret_cast<uintptr_t>(context->level_entries[level]) / sizeof(PageTableEntry)) & (BlocksPerTable - 1)) << (PageBits + LevelBits * level);
}
return addr;
}
};
}

View file

@ -23,13 +23,13 @@ namespace ams::kern::arch::arm64 {
private:
KPageTable m_page_table;
public:
void Activate(u64 id) {
void Activate(size_t process_index, u64 id) {
/* Activate the page table with the specified contextidr. */
m_page_table.Activate(id);
m_page_table.ActivateProcess(process_index, id);
}
Result Initialize(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit) {
R_RETURN(m_page_table.InitializeForProcess(flags, from_back, pool, code_address, code_size, system_resource, resource_limit));
Result Initialize(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit, size_t process_index) {
R_RETURN(m_page_table.InitializeForProcess(flags, from_back, pool, code_address, code_size, system_resource, resource_limit, process_index));
}
void Finalize() { m_page_table.Finalize(); }
@ -154,8 +154,8 @@ namespace ams::kern::arch::arm64 {
R_RETURN(m_page_table.InvalidateCurrentProcessDataCache(address, size));
}
Result ReadDebugMemory(void *buffer, KProcessAddress address, size_t size) {
R_RETURN(m_page_table.ReadDebugMemory(buffer, address, size));
Result ReadDebugMemory(void *buffer, KProcessAddress address, size_t size, bool force_debug_prod) {
R_RETURN(m_page_table.ReadDebugMemory(buffer, address, size, force_debug_prod));
}
Result ReadDebugIoMemory(void *buffer, KProcessAddress address, size_t size, KMemoryState state) {

View file

@ -29,8 +29,7 @@ namespace ams::kern::arch::arm64 {
NOINLINE void Initialize(s32 core_id);
void Activate() {
/* Activate, using process id = 0xFFFFFFFF */
m_page_table.Activate(0xFFFFFFFF);
m_page_table.ActivateKernel();
}
void ActivateForInit() {

View file

@ -21,10 +21,22 @@ namespace ams::kern::arch::arm64 {
void UserspaceAccessFunctionAreaBegin();
class UserspaceAccess {
private:
static bool CopyMemoryFromUserSize32BitWithSupervisorAccessImpl(void *dst, const void *src);
public:
static bool CopyMemoryFromUserSize32BitWithSupervisorAccess(void *dst, const void *src) {
/* Check that the address is within the valid userspace range. */
if (const uintptr_t src_uptr = reinterpret_cast<uintptr_t>(src); src_uptr < ams::svc::AddressNullGuard32Size || (src_uptr + sizeof(u32) - 1) >= ams::svc::AddressMemoryRegion39Size) {
return false;
}
return CopyMemoryFromUserSize32BitWithSupervisorAccessImpl(dst, src);
}
static bool CopyMemoryFromUser(void *dst, const void *src, size_t size);
static bool CopyMemoryFromUserAligned32Bit(void *dst, const void *src, size_t size);
static bool CopyMemoryFromUserAligned64Bit(void *dst, const void *src, size_t size);
static bool CopyMemoryFromUserSize64Bit(void *dst, const void *src);
static bool CopyMemoryFromUserSize32Bit(void *dst, const void *src);
static s32 CopyStringFromUser(void *dst, const void *src, size_t size);

View file

@ -24,4 +24,8 @@ namespace ams::kern {
constexpr inline size_t MainMemorySize = 4_GB;
constexpr inline size_t MainMemorySizeMax = 8_GB;
constexpr inline u32 MinimumMemoryManagerAlignmentShifts[] = {
0, 0, 0, 0
};
}

View file

@ -45,6 +45,7 @@ namespace ams::kern::board::nintendo::nx {
};
public:
/* Initialization. */
static NOINLINE void ConfigureKTargetSystem();
static NOINLINE void InitializePhase1();
static NOINLINE void InitializePhase2();
static NOINLINE u32 GetCreateProcessMemoryPool();

View file

@ -39,14 +39,16 @@ namespace ams::kern {
}
}
Result WaitForAddress(uintptr_t addr, ams::svc::ArbitrationType type, s32 value, s64 timeout) {
Result WaitForAddress(uintptr_t addr, ams::svc::ArbitrationType type, s64 value, s64 timeout) {
switch (type) {
case ams::svc::ArbitrationType_WaitIfLessThan:
R_RETURN(this->WaitIfLessThan(addr, value, false, timeout));
R_RETURN(this->WaitIfLessThan(addr, static_cast<s32>(value), false, timeout));
case ams::svc::ArbitrationType_DecrementAndWaitIfLessThan:
R_RETURN(this->WaitIfLessThan(addr, value, true, timeout));
R_RETURN(this->WaitIfLessThan(addr, static_cast<s32>(value), true, timeout));
case ams::svc::ArbitrationType_WaitIfEqual:
R_RETURN(this->WaitIfEqual(addr, value, timeout));
R_RETURN(this->WaitIfEqual(addr, static_cast<s32>(value), timeout));
case ams::svc::ArbitrationType_WaitIfEqual64:
R_RETURN(this->WaitIfEqual64(addr, value, timeout));
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
}
}
@ -56,6 +58,7 @@ namespace ams::kern {
Result SignalAndModifyByWaitingCountIfEqual(uintptr_t addr, s32 value, s32 count);
Result WaitIfLessThan(uintptr_t addr, s32 value, bool decrement, s64 timeout);
Result WaitIfEqual(uintptr_t addr, s32 value, s64 timeout);
Result WaitIfEqual64(uintptr_t addr, s64 value, s64 timeout);
};
}

View file

@ -37,8 +37,8 @@ namespace ams::kern {
size_t m_size;
Type m_type;
public:
static uintptr_t GetAddressSpaceStart(size_t width, Type type);
static size_t GetAddressSpaceSize(size_t width, Type type);
static uintptr_t GetAddressSpaceStart(ams::svc::CreateProcessFlag flags, Type type);
static size_t GetAddressSpaceSize(ams::svc::CreateProcessFlag flags, Type type);
static void SetAddressSpaceSize(size_t width, Type type, size_t size);

View file

@ -168,9 +168,10 @@ namespace ams::kern {
struct DebugFlags {
using IdBits = Field<0, CapabilityId<CapabilityType::DebugFlags> + 1>;
DEFINE_FIELD(AllowDebug, IdBits, 1, bool);
DEFINE_FIELD(ForceDebug, AllowDebug, 1, bool);
DEFINE_FIELD(Reserved, ForceDebug, 13);
DEFINE_FIELD(AllowDebug, IdBits, 1, bool);
DEFINE_FIELD(ForceDebugProd, AllowDebug, 1, bool);
DEFINE_FIELD(ForceDebug, ForceDebugProd, 1, bool);
DEFINE_FIELD(Reserved, ForceDebug, 12);
};
#undef DEFINE_FIELD
@ -255,6 +256,10 @@ namespace ams::kern {
return m_debug_capabilities.Get<DebugFlags::AllowDebug>();
}
constexpr bool CanForceDebugProd() const {
return m_debug_capabilities.Get<DebugFlags::ForceDebugProd>();
}
constexpr bool CanForceDebug() const {
return m_debug_capabilities.Get<DebugFlags::ForceDebug>();
}

View file

@ -32,6 +32,7 @@ namespace ams::kern {
KLightLock m_lock;
KProcess::State m_old_process_state;
bool m_is_attached;
bool m_is_force_debug_prod;
public:
explicit KDebugBase() { /* ... */ }
protected:
@ -62,6 +63,10 @@ namespace ams::kern {
return m_is_attached;
}
ALWAYS_INLINE bool IsForceDebugProd() const {
return m_is_force_debug_prod;
}
ALWAYS_INLINE bool OpenProcess() {
return m_process_holder.Open();
}

View file

@ -200,7 +200,8 @@ namespace ams::kern {
KMemoryBlockDisableMergeAttribute_DeviceLeft = (1u << 1),
KMemoryBlockDisableMergeAttribute_IpcLeft = (1u << 2),
KMemoryBlockDisableMergeAttribute_Locked = (1u << 3),
KMemoryBlockDisableMergeAttribute_DeviceRight = (1u << 4),
/* ... */
KMemoryBlockDisableMergeAttribute_DeviceRight = (1u << 5),
KMemoryBlockDisableMergeAttribute_AllLeft = KMemoryBlockDisableMergeAttribute_Normal | KMemoryBlockDisableMergeAttribute_DeviceLeft | KMemoryBlockDisableMergeAttribute_IpcLeft | KMemoryBlockDisableMergeAttribute_Locked,
KMemoryBlockDisableMergeAttribute_AllRight = KMemoryBlockDisableMergeAttribute_DeviceRight,
@ -288,18 +289,18 @@ namespace ams::kern {
class KMemoryBlock : public util::IntrusiveRedBlackTreeBaseNode<KMemoryBlock> {
private:
u16 m_device_disable_merge_left_count;
u16 m_device_disable_merge_right_count;
KProcessAddress m_address;
size_t m_num_pages;
KMemoryState m_memory_state;
u16 m_ipc_lock_count;
u16 m_device_use_count;
u16 m_ipc_disable_merge_count;
KMemoryPermission m_permission;
KMemoryPermission m_original_permission;
KMemoryAttribute m_attribute;
KMemoryBlockDisableMergeAttribute m_disable_merge_attribute;
KProcessAddress m_address;
u32 m_num_pages;
KMemoryState m_memory_state;
u16 m_ipc_lock_count;
u16 m_ipc_disable_merge_count;
u16 m_device_use_count;
u16 m_device_disable_merge_left_count;
u16 m_device_disable_merge_right_count;
public:
static constexpr ALWAYS_INLINE int Compare(const KMemoryBlock &lhs, const KMemoryBlock &rhs) {
if (lhs.GetAddress() < rhs.GetAddress()) {
@ -343,6 +344,10 @@ namespace ams::kern {
return m_ipc_disable_merge_count;
}
constexpr u16 GetDeviceUseCount() const {
return m_device_use_count;
}
constexpr KMemoryPermission GetPermission() const {
return m_permission;
}
@ -374,16 +379,15 @@ namespace ams::kern {
public:
explicit KMemoryBlock() { /* ... */ }
constexpr KMemoryBlock(util::ConstantInitializeTag, KProcessAddress addr, size_t np, KMemoryState ms, KMemoryPermission p, KMemoryAttribute attr)
: util::IntrusiveRedBlackTreeBaseNode<KMemoryBlock>(util::ConstantInitialize), m_device_disable_merge_left_count(),
m_device_disable_merge_right_count(), m_address(addr), m_num_pages(np), m_memory_state(ms), m_ipc_lock_count(0),
m_device_use_count(0), m_ipc_disable_merge_count(), m_permission(p), m_original_permission(KMemoryPermission_None),
m_attribute(attr), m_disable_merge_attribute()
constexpr KMemoryBlock(util::ConstantInitializeTag, KProcessAddress addr, u32 np, KMemoryState ms, KMemoryPermission p, KMemoryAttribute attr)
: util::IntrusiveRedBlackTreeBaseNode<KMemoryBlock>(util::ConstantInitialize), m_permission(p), m_original_permission(KMemoryPermission_None),
m_attribute(attr), m_disable_merge_attribute(), m_address(addr), m_num_pages(np), m_memory_state(ms), m_ipc_lock_count(0),
m_ipc_disable_merge_count(), m_device_use_count(0), m_device_disable_merge_left_count(), m_device_disable_merge_right_count()
{
/* ... */
}
constexpr void Initialize(KProcessAddress addr, size_t np, KMemoryState ms, KMemoryPermission p, KMemoryAttribute attr) {
constexpr void Initialize(KProcessAddress addr, u32 np, KMemoryState ms, KMemoryPermission p, KMemoryAttribute attr) {
MESOSPHERE_ASSERT_THIS();
m_device_disable_merge_left_count = 0;
m_device_disable_merge_right_count = 0;

View file

@ -164,6 +164,7 @@ namespace ams::kern {
size_t m_num_managers;
u64 m_optimized_process_ids[Pool_Count];
bool m_has_optimized_process[Pool_Count];
s32 m_min_heap_indexes[Pool_Count];
private:
Impl &GetManager(KPhysicalAddress address) {
return m_managers[KMemoryLayout::GetPhysicalLinearRegion(address).GetAttributes()];
@ -188,12 +189,12 @@ namespace ams::kern {
Result AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool unoptimized, bool random, s32 min_heap_index);
public:
KMemoryManager()
: m_pool_locks(), m_pool_managers_head(), m_pool_managers_tail(), m_managers(), m_num_managers(), m_optimized_process_ids(), m_has_optimized_process()
: m_pool_locks(), m_pool_managers_head(), m_pool_managers_tail(), m_managers(), m_num_managers(), m_optimized_process_ids(), m_has_optimized_process(), m_min_heap_indexes()
{
/* ... */
}
NOINLINE void Initialize(KVirtualAddress management_region, size_t management_region_size);
NOINLINE void Initialize(KVirtualAddress management_region, size_t management_region_size, const u32 *min_align_shifts);
NOINLINE Result InitializeOptimizedMemory(u64 process_id, Pool pool);
NOINLINE void FinalizeOptimizedMemory(u64 process_id, Pool pool);
@ -299,6 +300,10 @@ namespace ams::kern {
manager->DumpFreeList();
}
}
size_t GetMinimumAlignment(Pool pool) {
return KPageHeap::GetBlockSize(m_min_heap_indexes[pool]);
}
public:
static size_t CalculateManagementOverheadSize(size_t region_size) {
return Impl::CalculateManagementOverheadSize(region_size);

View file

@ -318,7 +318,7 @@ namespace ams::kern {
R_RETURN(this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask, perm, attr_mask, attr));
}
Result CheckMemoryState(const KMemoryInfo &info, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const;
Result CheckMemoryState(KMemoryBlockManager::const_iterator it, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const;
Result CheckMemoryState(KMemoryState *out_state, KMemoryPermission *out_perm, KMemoryAttribute *out_attr, size_t *out_blocks_needed, KMemoryBlockManager::const_iterator it, KProcessAddress last_addr, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr = DefaultMemoryIgnoreAttr) const;
Result CheckMemoryState(KMemoryState *out_state, KMemoryPermission *out_perm, KMemoryAttribute *out_attr, size_t *out_blocks_needed, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr = DefaultMemoryIgnoreAttr) const;
Result CheckMemoryState(size_t *out_blocks_needed, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr = DefaultMemoryIgnoreAttr) const {
@ -328,6 +328,8 @@ namespace ams::kern {
R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr));
}
bool CanReadWriteDebugMemory(KProcessAddress addr, size_t size, bool force_debug_prod);
Result LockMemoryAndOpen(KPageGroup *out_pg, KPhysicalAddress *out_paddr, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, KMemoryPermission new_perm, u32 lock_attr);
Result UnlockMemory(KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, KMemoryPermission new_perm, u32 lock_attr, const KPageGroup *pg);
@ -421,7 +423,7 @@ namespace ams::kern {
Result InvalidateProcessDataCache(KProcessAddress address, size_t size);
Result InvalidateCurrentProcessDataCache(KProcessAddress address, size_t size);
Result ReadDebugMemory(void *buffer, KProcessAddress address, size_t size);
Result ReadDebugMemory(void *buffer, KProcessAddress address, size_t size, bool force_debug_prod);
Result ReadDebugIoMemory(void *buffer, KProcessAddress address, size_t size, KMemoryState state);
Result WriteDebugMemory(KProcessAddress address, const void *buffer, size_t size);

View file

@ -206,6 +206,10 @@ namespace ams::kern {
return m_capabilities.IsPermittedDebug();
}
constexpr bool CanForceDebugProd() const {
return m_capabilities.CanForceDebugProd();
}
constexpr bool CanForceDebug() const {
return m_capabilities.CanForceDebug();
}
@ -360,7 +364,7 @@ namespace ams::kern {
R_RETURN(m_address_arbiter.SignalToAddress(address, signal_type, value, count));
}
Result WaitAddressArbiter(uintptr_t address, ams::svc::ArbitrationType arb_type, s32 value, s64 timeout) {
Result WaitAddressArbiter(uintptr_t address, ams::svc::ArbitrationType arb_type, s64 value, s64 timeout) {
R_RETURN(m_address_arbiter.WaitForAddress(address, arb_type, value, timeout));
}
@ -374,7 +378,7 @@ namespace ams::kern {
/* Update the current page table. */
if (next_process) {
next_process->GetPageTable().Activate(next_process->GetProcessId());
next_process->GetPageTable().Activate(next_process->GetSlabIndex(), next_process->GetProcessId());
} else {
Kernel::GetKernelPageTable().Activate();
}

View file

@ -69,6 +69,7 @@ namespace ams::kern {
static NOINLINE void InitializePhase1Base(u64 seed);
public:
/* Initialization. */
static NOINLINE void ConfigureKTargetSystem();
static NOINLINE void InitializePhase1();
static NOINLINE void InitializePhase2();
static NOINLINE u32 GetCreateProcessMemoryPool();

View file

@ -24,29 +24,36 @@ namespace ams::kern {
friend class KSystemControlBase;
friend class KSystemControl;
private:
static inline constinit bool s_is_debug_mode;
static inline constinit bool s_enable_debug_logging;
static inline constinit bool s_enable_user_exception_handlers;
static inline constinit bool s_enable_debug_memory_fill;
static inline constinit bool s_enable_user_pmu_access;
static inline constinit bool s_enable_kernel_debugging;
static inline constinit bool s_enable_dynamic_resource_limits;
struct KTargetSystemData {
bool is_debug_mode;
bool enable_debug_logging;
bool enable_user_exception_handlers;
bool enable_debug_memory_fill;
bool enable_user_pmu_access;
bool enable_kernel_debugging;
bool enable_dynamic_resource_limits;
};
private:
static ALWAYS_INLINE void SetIsDebugMode(bool en) { s_is_debug_mode = en; }
static ALWAYS_INLINE void EnableDebugLogging(bool en) { s_enable_debug_logging = en; }
static ALWAYS_INLINE void EnableUserExceptionHandlers(bool en) { s_enable_user_exception_handlers = en; }
static ALWAYS_INLINE void EnableDebugMemoryFill(bool en) { s_enable_debug_memory_fill = en; }
static ALWAYS_INLINE void EnableUserPmuAccess(bool en) { s_enable_user_pmu_access = en; }
static ALWAYS_INLINE void EnableKernelDebugging(bool en) { s_enable_kernel_debugging = en; }
static ALWAYS_INLINE void EnableDynamicResourceLimits(bool en) { s_enable_dynamic_resource_limits = en; }
static inline constinit bool s_is_initialized = false;
static inline constinit const volatile KTargetSystemData s_data = {
.is_debug_mode = true,
.enable_debug_logging = true,
.enable_user_exception_handlers = true,
.enable_debug_memory_fill = true,
.enable_user_pmu_access = true,
.enable_kernel_debugging = true,
.enable_dynamic_resource_limits = false,
};
private:
static ALWAYS_INLINE void SetInitialized() { s_is_initialized = true; }
public:
static ALWAYS_INLINE bool IsDebugMode() { return s_is_debug_mode; }
static ALWAYS_INLINE bool IsDebugLoggingEnabled() { return s_enable_debug_logging; }
static ALWAYS_INLINE bool IsUserExceptionHandlersEnabled() { return s_enable_user_exception_handlers; }
static ALWAYS_INLINE bool IsDebugMemoryFillEnabled() { return s_enable_debug_memory_fill; }
static ALWAYS_INLINE bool IsUserPmuAccessEnabled() { return s_enable_user_pmu_access; }
static ALWAYS_INLINE bool IsKernelDebuggingEnabled() { return s_enable_kernel_debugging; }
static ALWAYS_INLINE bool IsDynamicResourceLimitsEnabled() { return s_enable_dynamic_resource_limits; }
static ALWAYS_INLINE bool IsDebugMode() { return s_is_initialized && s_data.is_debug_mode; }
static ALWAYS_INLINE bool IsDebugLoggingEnabled() { return s_is_initialized && s_data.enable_debug_logging; }
static ALWAYS_INLINE bool IsUserExceptionHandlersEnabled() { return s_is_initialized && s_data.enable_user_exception_handlers; }
static ALWAYS_INLINE bool IsDebugMemoryFillEnabled() { return s_is_initialized && s_data.enable_debug_memory_fill; }
static ALWAYS_INLINE bool IsUserPmuAccessEnabled() { return s_is_initialized && s_data.enable_user_pmu_access; }
static ALWAYS_INLINE bool IsKernelDebuggingEnabled() { return s_is_initialized && s_data.enable_kernel_debugging; }
static ALWAYS_INLINE bool IsDynamicResourceLimitsEnabled() { return s_is_initialized && s_data.enable_dynamic_resource_limits; }
};
}

View file

@ -48,6 +48,22 @@ namespace ams::kern {
KProcess *GetOwner() const { return m_owner; }
KProcessAddress GetSourceAddress() { return m_address; }
size_t GetSize() const { return m_is_initialized ? GetReference(m_page_group).GetNumPages() * PageSize : 0; }
constexpr uintptr_t GetHint() const {
/* Get memory size. */
const size_t size = this->GetSize();
/* TODO: Is this architecture specific? */
if (size >= 2_MB) {
return GetInteger(m_address) & (2_MB - 1);
} else if (size >= 64_KB) {
return GetInteger(m_address) & (64_KB - 1);
} else if (size >= 4_KB) {
return GetInteger(m_address) & (4_KB - 1);
} else {
return 0;
}
}
};
}

View file

@ -20,7 +20,7 @@ endif
DEFINES := $(ATMOSPHERE_DEFINES) -DATMOSPHERE_IS_MESOSPHERE
SETTINGS := $(ATMOSPHERE_SETTINGS) $(ATMOSPHERE_OPTIMIZATION_FLAG) -mgeneral-regs-only -ffixed-x18 -Wextra -Werror -fno-non-call-exceptions
CFLAGS := $(ATMOSPHERE_CFLAGS) $(SETTINGS) $(DEFINES) $(INCLUDE)
CXXFLAGS := $(CFLAGS) $(ATMOSPHERE_CXXFLAGS) -fno-use-cxa-atexit -flto
CXXFLAGS := $(CFLAGS) $(ATMOSPHERE_CXXFLAGS) -fno-use-cxa-atexit
ASFLAGS := $(ATMOSPHERE_ASFLAGS) $(SETTINGS) $(DEFINES) $(INCLUDE)
SOURCES += $(foreach v,$(call ALL_SOURCE_DIRS,../libvapours/source),$(if $(findstring ../libvapours/source/sdmmc,$v),,$v))

View file

@ -79,6 +79,12 @@ namespace ams::kern::arch::arm {
/* Setup all interrupt lines. */
SetupInterruptLines(core_id);
/* Clear pointers, if needed. */
if (core_id == 0) {
m_gicd = nullptr;
m_gicc = nullptr;
}
}
void KInterruptController::SaveCoreLocal(LocalState *state) const {

View file

@ -100,6 +100,8 @@ namespace ams::kern::arch::arm64 {
u32 insn_value = 0;
if (UserspaceAccess::CopyMemoryFromUser(std::addressof(insn_value), reinterpret_cast<u32 *>(context->pc), sizeof(insn_value))) {
insn = insn_value;
} else if (KTargetSystem::IsDebugMode() && (context->pc & 3) == 0 && UserspaceAccess::CopyMemoryFromUserSize32BitWithSupervisorAccess(std::addressof(insn_value), reinterpret_cast<u32 *>(context->pc))) {
insn = insn_value;
} else {
insn = 0;
}
@ -112,33 +114,6 @@ namespace ams::kern::arch::arm64 {
bool should_process_user_exception = KTargetSystem::IsUserExceptionHandlersEnabled();
const u64 ec = (esr >> 26) & 0x3F;
switch (ec) {
case EsrEc_Unknown:
case EsrEc_IllegalExecution:
case EsrEc_Svc32:
case EsrEc_Svc64:
case EsrEc_PcAlignmentFault:
case EsrEc_SpAlignmentFault:
case EsrEc_SErrorInterrupt:
case EsrEc_BreakPointEl0:
case EsrEc_SoftwareStepEl0:
case EsrEc_WatchPointEl0:
case EsrEc_BkptInstruction:
case EsrEc_BrkInstruction:
break;
default:
{
/* If the fault address's state is KMemoryState_Code and the user can't read the address, force processing exception. */
KMemoryInfo info;
ams::svc::PageInfo pi;
if (R_SUCCEEDED(cur_process.GetPageTable().QueryInfo(std::addressof(info), std::addressof(pi), far))) {
if (info.GetState() == KMemoryState_Code && ((info.GetPermission() & KMemoryPermission_UserRead) != KMemoryPermission_UserRead)) {
should_process_user_exception = true;
}
}
}
break;
}
/* In the event that we return from this exception, we want SPSR.SS set so that we advance an instruction if single-stepping. */
#if defined(MESOSPHERE_ENABLE_HARDWARE_SINGLE_STEP)

View file

@ -15,6 +15,28 @@
*/
#include <mesosphere.hpp>
/* <stratosphere/rocrt/rocrt.hpp> */
namespace ams::rocrt {
constexpr inline const u32 ModuleHeaderVersion = util::FourCC<'M','O','D','0'>::Code;
struct ModuleHeader {
u32 signature;
u32 dynamic_offset;
u32 bss_start_offset;
u32 bss_end_offset;
u32 exception_info_start_offset;
u32 exception_info_end_offset;
u32 module_offset;
};
struct ModuleHeaderLocation {
u32 pad;
u32 header_offset;
};
}
namespace ams::kern::arch::arm64 {
namespace {
@ -657,7 +679,7 @@ namespace ams::kern::arch::arm64 {
return PrintAddressWithModuleName(address, has_module_name, module_name, base_address);
}
dyn_address = module.start_address + mod_offset + temp_32;
dyn_address = base_address + mod_offset + temp_32;
}
/* Locate tables inside .dyn. */

File diff suppressed because it is too large Load diff

View file

@ -27,109 +27,70 @@ namespace ams::kern::arch::arm64 {
m_table = static_cast<L1PageTableEntry *>(tb);
m_is_kernel = false;
m_num_entries = util::AlignUp(end - start, L1BlockSize) / L1BlockSize;
/* Page table entries created by KInitialPageTable need to be iterated and modified to ensure KPageTable invariants. */
PageTableEntry *level_entries[EntryLevel_Count] = { nullptr, nullptr, m_table };
u32 level = EntryLevel_L1;
while (level != EntryLevel_L1 || (level_entries[EntryLevel_L1] - static_cast<PageTableEntry *>(m_table)) < m_num_entries) {
/* Get the pte; it must never have the validity-extension flag set. */
auto *pte = level_entries[level];
MESOSPHERE_ASSERT((pte->GetSoftwareReservedBits() & PageTableEntry::SoftwareReservedBit_Valid) == 0);
/* While we're a table, recurse, fixing up the reference counts. */
while (level > EntryLevel_L3 && pte->IsMappedTable()) {
/* Count how many references are in the table. */
auto *table = GetPointer<PageTableEntry>(GetPageTableVirtualAddress(pte->GetTable()));
size_t ref_count = 0;
for (size_t i = 0; i < BlocksPerTable; ++i) {
if (table[i].IsMapped()) {
++ref_count;
}
}
/* Set the reference count for our new page, adding one additional uncloseable reference; kernel pages must never be unreferenced. */
pte->SetTableReferenceCount(ref_count + 1).SetValid();
/* Iterate downwards. */
level -= 1;
level_entries[level] = table;
pte = level_entries[level];
/* Check that the entry isn't unexpected. */
MESOSPHERE_ASSERT((pte->GetSoftwareReservedBits() & PageTableEntry::SoftwareReservedBit_Valid) == 0);
}
/* We're dealing with some block. If it's mapped, set it valid. */
if (pte->IsMapped()) {
pte->SetValid();
}
/* Advance. */
while (true) {
/* Advance to the next entry at the current level. */
++level_entries[level];
if (!util::IsAligned(reinterpret_cast<uintptr_t>(++level_entries[level]), PageSize)) {
break;
}
/* If we're at the end of a level, advance upwards. */
level_entries[level++] = nullptr;
if (level > EntryLevel_L1) {
return;
}
}
}
}
L1PageTableEntry *KPageTableImpl::Finalize() {
return m_table;
}
bool KPageTableImpl::ExtractL3Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L3PageTableEntry *l3_entry, KProcessAddress virt_addr) const {
/* Set the L3 entry. */
out_context->l3_entry = l3_entry;
if (l3_entry->IsBlock()) {
/* Set the output entry. */
out_entry->phys_addr = l3_entry->GetBlock() + (virt_addr & (L3BlockSize - 1));
if (l3_entry->IsContiguous()) {
out_entry->block_size = L3ContiguousBlockSize;
} else {
out_entry->block_size = L3BlockSize;
}
out_entry->sw_reserved_bits = l3_entry->GetSoftwareReservedBits();
out_entry->attr = 0;
return true;
} else {
out_entry->phys_addr = Null<KPhysicalAddress>;
out_entry->block_size = L3BlockSize;
out_entry->sw_reserved_bits = 0;
out_entry->attr = 0;
return false;
}
}
bool KPageTableImpl::ExtractL2Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L2PageTableEntry *l2_entry, KProcessAddress virt_addr) const {
/* Set the L2 entry. */
out_context->l2_entry = l2_entry;
if (l2_entry->IsBlock()) {
/* Set the output entry. */
out_entry->phys_addr = l2_entry->GetBlock() + (virt_addr & (L2BlockSize - 1));
if (l2_entry->IsContiguous()) {
out_entry->block_size = L2ContiguousBlockSize;
} else {
out_entry->block_size = L2BlockSize;
}
out_entry->sw_reserved_bits = l2_entry->GetSoftwareReservedBits();
out_entry->attr = 0;
/* Set the output context. */
out_context->l3_entry = nullptr;
return true;
} else if (l2_entry->IsTable()) {
return this->ExtractL3Entry(out_entry, out_context, this->GetL3EntryFromTable(GetPageTableVirtualAddress(l2_entry->GetTable()), virt_addr), virt_addr);
} else {
out_entry->phys_addr = Null<KPhysicalAddress>;
out_entry->block_size = L2BlockSize;
out_entry->sw_reserved_bits = 0;
out_entry->attr = 0;
out_context->l3_entry = nullptr;
return false;
}
}
bool KPageTableImpl::ExtractL1Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L1PageTableEntry *l1_entry, KProcessAddress virt_addr) const {
/* Set the L1 entry. */
out_context->l1_entry = l1_entry;
if (l1_entry->IsBlock()) {
/* Set the output entry. */
out_entry->phys_addr = l1_entry->GetBlock() + (virt_addr & (L1BlockSize - 1));
if (l1_entry->IsContiguous()) {
out_entry->block_size = L1ContiguousBlockSize;
} else {
out_entry->block_size = L1BlockSize;
}
out_entry->sw_reserved_bits = l1_entry->GetSoftwareReservedBits();
/* Set the output context. */
out_context->l2_entry = nullptr;
out_context->l3_entry = nullptr;
return true;
} else if (l1_entry->IsTable()) {
return this->ExtractL2Entry(out_entry, out_context, this->GetL2EntryFromTable(GetPageTableVirtualAddress(l1_entry->GetTable()), virt_addr), virt_addr);
} else {
out_entry->phys_addr = Null<KPhysicalAddress>;
out_entry->block_size = L1BlockSize;
out_entry->sw_reserved_bits = 0;
out_entry->attr = 0;
out_context->l2_entry = nullptr;
out_context->l3_entry = nullptr;
return false;
}
}
bool KPageTableImpl::BeginTraversal(TraversalEntry *out_entry, TraversalContext *out_context, KProcessAddress address) const {
/* Setup invalid defaults. */
out_entry->phys_addr = Null<KPhysicalAddress>;
out_entry->block_size = L1BlockSize;
out_entry->sw_reserved_bits = 0;
out_entry->attr = 0;
out_context->l1_entry = m_table + m_num_entries;
out_context->l2_entry = nullptr;
out_context->l3_entry = nullptr;
*out_entry = {};
*out_context = {};
/* Validate that we can read the actual entry. */
const size_t l0_index = GetL0Index(address);
@ -146,125 +107,78 @@ namespace ams::kern::arch::arm64 {
}
}
/* Extract the entry. */
const bool valid = this->ExtractL1Entry(out_entry, out_context, this->GetL1Entry(address), address);
/* Get the L1 entry, and check if it's a table. */
out_context->level_entries[EntryLevel_L1] = this->GetL1Entry(address);
if (out_context->level_entries[EntryLevel_L1]->IsMappedTable()) {
/* Get the L2 entry, and check if it's a table. */
out_context->level_entries[EntryLevel_L2] = this->GetL2EntryFromTable(GetPageTableVirtualAddress(out_context->level_entries[EntryLevel_L1]->GetTable()), address);
if (out_context->level_entries[EntryLevel_L2]->IsMappedTable()) {
/* Get the L3 entry. */
out_context->level_entries[EntryLevel_L3] = this->GetL3EntryFromTable(GetPageTableVirtualAddress(out_context->level_entries[EntryLevel_L2]->GetTable()), address);
/* Update the context for next traversal. */
switch (out_entry->block_size) {
case L1ContiguousBlockSize:
out_context->l1_entry += (L1ContiguousBlockSize / L1BlockSize) - GetContiguousL1Offset(address) / L1BlockSize;
break;
case L1BlockSize:
out_context->l1_entry += 1;
break;
case L2ContiguousBlockSize:
out_context->l1_entry += 1;
out_context->l2_entry += (L2ContiguousBlockSize / L2BlockSize) - GetContiguousL2Offset(address) / L2BlockSize;
break;
case L2BlockSize:
out_context->l1_entry += 1;
out_context->l2_entry += 1;
break;
case L3ContiguousBlockSize:
out_context->l1_entry += 1;
out_context->l2_entry += 1;
out_context->l3_entry += (L3ContiguousBlockSize / L3BlockSize) - GetContiguousL3Offset(address) / L3BlockSize;
break;
case L3BlockSize:
out_context->l1_entry += 1;
out_context->l2_entry += 1;
out_context->l3_entry += 1;
break;
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
/* It's either a page or not. */
out_context->level = EntryLevel_L3;
} else {
/* Not a L2 table, so possibly an L2 block. */
out_context->level = EntryLevel_L2;
}
} else {
/* Not a L1 table, so possibly an L1 block. */
out_context->level = EntryLevel_L1;
}
return valid;
/* Determine other fields. */
const auto *pte = out_context->level_entries[out_context->level];
out_context->is_contiguous = pte->IsContiguous();
out_entry->sw_reserved_bits = pte->GetSoftwareReservedBits();
out_entry->attr = 0;
out_entry->phys_addr = this->GetBlock(pte, out_context->level) + this->GetOffset(address, out_context->level);
out_entry->block_size = static_cast<size_t>(1) << (PageBits + LevelBits * out_context->level + 4 * out_context->is_contiguous);
return out_context->level == EntryLevel_L3 ? pte->IsPage() : pte->IsBlock();
}
bool KPageTableImpl::ContinueTraversal(TraversalEntry *out_entry, TraversalContext *context) const {
bool valid = false;
/* Advance entry. */
auto *cur_pte = context->level_entries[context->level];
auto *next_pte = reinterpret_cast<PageTableEntry *>(context->is_contiguous ? util::AlignDown(reinterpret_cast<uintptr_t>(cur_pte), BlocksPerContiguousBlock * sizeof(PageTableEntry)) + BlocksPerContiguousBlock * sizeof(PageTableEntry) : reinterpret_cast<uintptr_t>(cur_pte) + sizeof(PageTableEntry));
/* Check if we're not at the end of an L3 table. */
if (!util::IsAligned(reinterpret_cast<uintptr_t>(context->l3_entry), PageSize)) {
valid = this->ExtractL3Entry(out_entry, context, context->l3_entry, Null<KProcessAddress>);
/* Set the pte. */
context->level_entries[context->level] = next_pte;
switch (out_entry->block_size) {
case L3ContiguousBlockSize:
context->l3_entry += (L3ContiguousBlockSize / L3BlockSize);
break;
case L3BlockSize:
context->l3_entry += 1;
break;
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
}
} else if (!util::IsAligned(reinterpret_cast<uintptr_t>(context->l2_entry), PageSize)) {
/* We're not at the end of an L2 table. */
valid = this->ExtractL2Entry(out_entry, context, context->l2_entry, Null<KProcessAddress>);
/* Advance appropriately. */
while (context->level < EntryLevel_L1 && util::IsAligned(reinterpret_cast<uintptr_t>(context->level_entries[context->level]), PageSize)) {
/* Advance the above table by one entry. */
context->level_entries[context->level + 1]++;
context->level = static_cast<EntryLevel>(util::ToUnderlying(context->level) + 1);
}
switch (out_entry->block_size) {
case L2ContiguousBlockSize:
context->l2_entry += (L2ContiguousBlockSize / L2BlockSize);
break;
case L2BlockSize:
context->l2_entry += 1;
break;
case L3ContiguousBlockSize:
context->l2_entry += 1;
context->l3_entry += (L3ContiguousBlockSize / L3BlockSize);
break;
case L3BlockSize:
context->l2_entry += 1;
context->l3_entry += 1;
break;
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
}
} else {
/* We need to update the l1 entry. */
const size_t l1_index = context->l1_entry - m_table;
if (l1_index < m_num_entries) {
valid = this->ExtractL1Entry(out_entry, context, context->l1_entry, Null<KProcessAddress>);
} else {
/* Invalid, end traversal. */
out_entry->phys_addr = Null<KPhysicalAddress>;
out_entry->block_size = L1BlockSize;
out_entry->sw_reserved_bits = 0;
out_entry->attr = 0;
context->l1_entry = m_table + m_num_entries;
context->l2_entry = nullptr;
context->l3_entry = nullptr;
/* Check if we've hit the end of the L1 table. */
if (context->level == EntryLevel_L1) {
if (context->level_entries[EntryLevel_L1] - static_cast<const PageTableEntry *>(m_table) >= m_num_entries) {
*context = {};
*out_entry = {};
return false;
}
switch (out_entry->block_size) {
case L1ContiguousBlockSize:
context->l1_entry += (L1ContiguousBlockSize / L1BlockSize);
break;
case L1BlockSize:
context->l1_entry += 1;
break;
case L2ContiguousBlockSize:
context->l1_entry += 1;
context->l2_entry += (L2ContiguousBlockSize / L2BlockSize);
break;
case L2BlockSize:
context->l1_entry += 1;
context->l2_entry += 1;
break;
case L3ContiguousBlockSize:
context->l1_entry += 1;
context->l2_entry += 1;
context->l3_entry += (L3ContiguousBlockSize / L3BlockSize);
break;
case L3BlockSize:
context->l1_entry += 1;
context->l2_entry += 1;
context->l3_entry += 1;
break;
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
}
}
return valid;
/* We may have advanced to a new table, and if we have we should descend. */
while (context->level > EntryLevel_L3 && context->level_entries[context->level]->IsMappedTable()) {
context->level_entries[context->level - 1] = GetPointer<PageTableEntry>(GetPageTableVirtualAddress(context->level_entries[context->level]->GetTable()));
context->level = static_cast<EntryLevel>(util::ToUnderlying(context->level) - 1);
}
const auto *pte = context->level_entries[context->level];
context->is_contiguous = pte->IsContiguous();
out_entry->sw_reserved_bits = pte->GetSoftwareReservedBits();
out_entry->attr = 0;
out_entry->phys_addr = this->GetBlock(pte, context->level);
out_entry->block_size = static_cast<size_t>(1) << (PageBits + LevelBits * context->level + 4 * context->is_contiguous);
return context->level == EntryLevel_L3 ? pte->IsPage() : pte->IsBlock();
}
bool KPageTableImpl::GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress address) const {
@ -283,32 +197,144 @@ namespace ams::kern::arch::arm64 {
}
}
/* Try to get from l1 table. */
const L1PageTableEntry *l1_entry = this->GetL1Entry(address);
if (l1_entry->IsBlock()) {
*out = l1_entry->GetBlock() + GetL1Offset(address);
return true;
} else if (!l1_entry->IsTable()) {
return false;
/* Get the L1 entry, and check if it's a table. */
const PageTableEntry *pte = this->GetL1Entry(address);
EntryLevel level = EntryLevel_L1;
if (pte->IsMappedTable()) {
/* Get the L2 entry, and check if it's a table. */
pte = this->GetL2EntryFromTable(GetPageTableVirtualAddress(pte->GetTable()), address);
level = EntryLevel_L2;
if (pte->IsMappedTable()) {
pte = this->GetL3EntryFromTable(GetPageTableVirtualAddress(pte->GetTable()), address);
level = EntryLevel_L3;
}
}
/* Try to get from l2 table. */
const L2PageTableEntry *l2_entry = this->GetL2Entry(l1_entry, address);
if (l2_entry->IsBlock()) {
*out = l2_entry->GetBlock() + GetL2Offset(address);
return true;
} else if (!l2_entry->IsTable()) {
return false;
const bool is_block = level == EntryLevel_L3 ? pte->IsPage() : pte->IsBlock();
if (is_block) {
*out = this->GetBlock(pte, level) + this->GetOffset(address, level);
} else {
*out = Null<KPhysicalAddress>;
}
/* Try to get from l3 table. */
const L3PageTableEntry *l3_entry = this->GetL3Entry(l2_entry, address);
if (l3_entry->IsBlock()) {
*out = l3_entry->GetBlock() + GetL3Offset(address);
return true;
return is_block;
}
bool KPageTableImpl::MergePages(KVirtualAddress *out, TraversalContext *context) {
/* We want to upgrade the pages by one step. */
if (context->is_contiguous) {
/* We can't merge an L1 table. */
if (context->level == EntryLevel_L1) {
return false;
}
/* We want to upgrade a contiguous mapping in a table to a block. */
PageTableEntry *pte = reinterpret_cast<PageTableEntry *>(util::AlignDown(reinterpret_cast<uintptr_t>(context->level_entries[context->level]), BlocksPerTable * sizeof(PageTableEntry)));
const KPhysicalAddress phys_addr = util::AlignDown(GetBlock(pte, context->level), GetBlockSize(static_cast<EntryLevel>(context->level + 1), false));
/* First, check that all entries are valid for us to merge. */
const u64 entry_template = pte->GetEntryTemplateForMerge();
for (size_t i = 0; i < BlocksPerTable; ++i) {
if (!pte[i].IsForMerge(entry_template | GetInteger(phys_addr + (i << (PageBits + LevelBits * context->level))) | PageTableEntry::ContigType_Contiguous | pte->GetTestTableMask())) {
return false;
}
if (i > 0 && pte[i].IsHeadOrHeadAndBodyMergeDisabled()) {
return false;
}
if (i < BlocksPerTable - 1 && pte[i].IsTailMergeDisabled()) {
return false;
}
}
/* The entries are valid for us to merge, so merge them. */
const auto *head_pte = pte;
const auto *tail_pte = pte + BlocksPerTable - 1;
const auto sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(head_pte->IsHeadMergeDisabled(), head_pte->IsHeadAndBodyMergeDisabled(), tail_pte->IsTailMergeDisabled());
*context->level_entries[context->level + 1] = PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, PageTableEntry(entry_template), sw_reserved_bits, false, false);
/* Update our context. */
context->is_contiguous = false;
context->level = static_cast<EntryLevel>(util::ToUnderlying(context->level) + 1);
/* Set the output to the table we just freed. */
*out = KVirtualAddress(pte);
} else {
/* We want to upgrade a non-contiguous mapping to a contiguous mapping. */
PageTableEntry *pte = reinterpret_cast<PageTableEntry *>(util::AlignDown(reinterpret_cast<uintptr_t>(context->level_entries[context->level]), BlocksPerContiguousBlock * sizeof(PageTableEntry)));
const KPhysicalAddress phys_addr = util::AlignDown(GetBlock(pte, context->level), GetBlockSize(context->level, true));
/* First, check that all entries are valid for us to merge. */
const u64 entry_template = pte->GetEntryTemplateForMerge();
for (size_t i = 0; i < BlocksPerContiguousBlock; ++i) {
if (!pte[i].IsForMerge(entry_template | GetInteger(phys_addr + (i << (PageBits + LevelBits * context->level))) | pte->GetTestTableMask())) {
return false;
}
if (i > 0 && pte[i].IsHeadOrHeadAndBodyMergeDisabled()) {
return false;
}
if (i < BlocksPerContiguousBlock - 1 && pte[i].IsTailMergeDisabled()) {
return false;
}
}
/* The entries are valid for us to merge, so merge them. */
const auto *head_pte = pte;
const auto *tail_pte = pte + BlocksPerContiguousBlock - 1;
const auto sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(head_pte->IsHeadMergeDisabled(), head_pte->IsHeadAndBodyMergeDisabled(), tail_pte->IsTailMergeDisabled());
for (size_t i = 0; i < BlocksPerContiguousBlock; ++i) {
pte[i] = PageTableEntry(PageTableEntry::BlockTag{}, phys_addr + (i << (PageBits + LevelBits * context->level)), PageTableEntry(entry_template), sw_reserved_bits, true, context->level == EntryLevel_L3);
}
/* Update our context. */
context->level_entries[context->level] = pte;
context->is_contiguous = true;
}
return false;
return true;
}
void KPageTableImpl::SeparatePages(TraversalEntry *entry, TraversalContext *context, KProcessAddress address, PageTableEntry *pte) const {
/* We want to downgrade the pages by one step. */
if (context->is_contiguous) {
/* We want to downgrade a contiguous mapping to a non-contiguous mapping. */
pte = reinterpret_cast<PageTableEntry *>(util::AlignDown(reinterpret_cast<uintptr_t>(context->level_entries[context->level]), BlocksPerContiguousBlock * sizeof(PageTableEntry)));
auto * const first = pte;
const KPhysicalAddress block = this->GetBlock(first, context->level);
for (size_t i = 0; i < BlocksPerContiguousBlock; ++i) {
pte[i] = PageTableEntry(PageTableEntry::BlockTag{}, block + (i << (PageBits + LevelBits * context->level)), PageTableEntry(first->GetEntryTemplateForSeparateContiguous(i)), PageTableEntry::SeparateContiguousTag{});
}
context->is_contiguous = false;
context->level_entries[context->level] = pte + (this->GetLevelIndex(address, context->level) & (BlocksPerContiguousBlock - 1));
} else {
/* We want to downgrade a block into a table. */
auto * const first = context->level_entries[context->level];
const KPhysicalAddress block = this->GetBlock(first, context->level);
for (size_t i = 0; i < BlocksPerTable; ++i) {
pte[i] = PageTableEntry(PageTableEntry::BlockTag{}, block + (i << (PageBits + LevelBits * (context->level - 1))), PageTableEntry(first->GetEntryTemplateForSeparate(i)), PageTableEntry::SoftwareReservedBit_None, true, context->level - 1 == EntryLevel_L3);
}
context->is_contiguous = true;
context->level = static_cast<EntryLevel>(util::ToUnderlying(context->level) - 1);
/* Wait for pending stores to complete. */
cpu::DataSynchronizationBarrierInnerShareableStore();
/* Update the block entry to be a table entry. */
*context->level_entries[context->level + 1] = PageTableEntry(PageTableEntry::TableTag{}, KPageTable::GetPageTablePhysicalAddress(KVirtualAddress(pte)), m_is_kernel, true, BlocksPerTable);
context->level_entries[context->level] = pte + this->GetLevelIndex(address, context->level);
}
entry->sw_reserved_bits = 0;
entry->attr = 0;
entry->phys_addr = this->GetBlock(context->level_entries[context->level], context->level) + this->GetOffset(address, context->level);
entry->block_size = static_cast<size_t>(1) << (PageBits + LevelBits * context->level + 4 * context->is_contiguous);
}
void KPageTableImpl::Dump(uintptr_t start, size_t size) const {

View file

@ -21,6 +21,15 @@ namespace ams::kern::arch::arm64 {
void UserModeThreadStarter();
void SupervisorModeThreadStarter();
void InvokeSupervisorModeThread(uintptr_t argument, uintptr_t entrypoint) {
/* Invoke the function. */
using SupervisorModeFunctionType = void (*)(uintptr_t);
reinterpret_cast<SupervisorModeFunctionType>(entrypoint)(argument);
/* Wait forever. */
AMS_INFINITE_LOOP();
}
void OnThreadStart() {
MESOSPHERE_ASSERT(!KInterruptManager::AreInterruptsEnabled());
/* Send KDebug event for this thread's creation. */

View file

@ -126,6 +126,20 @@ _ZN3ams4kern4arch5arm6415UserspaceAccess30CopyMemoryFromUserAligned64BitEPvPKvm:
mov x0, #1
ret
/* ams::kern::arch::arm64::UserspaceAccess::CopyMemoryFromUserSize64Bit(void *dst, const void *src) */
.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess27CopyMemoryFromUserSize64BitEPvPKv, "ax", %progbits
.global _ZN3ams4kern4arch5arm6415UserspaceAccess27CopyMemoryFromUserSize64BitEPvPKv
.type _ZN3ams4kern4arch5arm6415UserspaceAccess27CopyMemoryFromUserSize64BitEPvPKv, %function
.balign 0x10
_ZN3ams4kern4arch5arm6415UserspaceAccess27CopyMemoryFromUserSize64BitEPvPKv:
/* Just load and store a u64. */
ldtr x2, [x1]
str x2, [x0]
/* We're done. */
mov x0, #1
ret
/* ams::kern::arch::arm64::UserspaceAccess::CopyMemoryFromUserSize32Bit(void *dst, const void *src) */
.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess27CopyMemoryFromUserSize32BitEPvPKv, "ax", %progbits
.global _ZN3ams4kern4arch5arm6415UserspaceAccess27CopyMemoryFromUserSize32BitEPvPKv
@ -140,6 +154,21 @@ _ZN3ams4kern4arch5arm6415UserspaceAccess27CopyMemoryFromUserSize32BitEPvPKv:
mov x0, #1
ret
/* ams::kern::arch::arm64::UserspaceAccess::CopyMemoryFromUserSize32BitWithSupervisorAccessImpl(void *dst, const void *src) */
.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess51CopyMemoryFromUserSize32BitWithSupervisorAccessImplEPvPKv, "ax", %progbits
.global _ZN3ams4kern4arch5arm6415UserspaceAccess51CopyMemoryFromUserSize32BitWithSupervisorAccessImplEPvPKv
.type _ZN3ams4kern4arch5arm6415UserspaceAccess51CopyMemoryFromUserSize32BitWithSupervisorAccessImplEPvPKv, %function
.balign 0x10
_ZN3ams4kern4arch5arm6415UserspaceAccess51CopyMemoryFromUserSize32BitWithSupervisorAccessImplEPvPKv:
/* Just load and store a u32. */
/* NOTE: This is done with supervisor access permissions. */
ldr w2, [x1]
str w2, [x0]
/* We're done. */
mov x0, #1
ret
/* ams::kern::arch::arm64::UserspaceAccess::CopyStringFromUser(void *dst, const void *src, size_t size) */
.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess18CopyStringFromUserEPvPKvm, "ax", %progbits
.global _ZN3ams4kern4arch5arm6415UserspaceAccess18CopyStringFromUserEPvPKvm

View file

@ -0,0 +1,67 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/* ams::kern::svc::CallWaitForAddress64From32() */
.section .text._ZN3ams4kern3svc26CallWaitForAddress64From32Ev, "ax", %progbits
.global _ZN3ams4kern3svc26CallWaitForAddress64From32Ev
.type _ZN3ams4kern3svc26CallWaitForAddress64From32Ev, %function
_ZN3ams4kern3svc26CallWaitForAddress64From32Ev:
/* Save LR + callee-save registers. */
str x30, [sp, #-16]!
stp x6, x7, [sp, #-16]!
/* Gather the arguments into correct registers. */
/* NOTE: This has to be manually implemented via asm, */
/* in order to avoid breaking ABI with pre-19.0.0. */
orr x2, x2, x5, lsl#32
orr x3, x3, x4, lsl#32
/* Invoke the svc handler. */
bl _ZN3ams4kern3svc22WaitForAddress64From32ENS_3svc7AddressENS2_15ArbitrationTypeEll
/* Clean up registers. */
mov x1, xzr
mov x2, xzr
mov x3, xzr
mov x4, xzr
mov x5, xzr
ldp x6, x7, [sp], #0x10
ldr x30, [sp], #0x10
ret
/* ams::kern::svc::CallWaitForAddress64() */
.section .text._ZN3ams4kern3svc20CallWaitForAddress64Ev, "ax", %progbits
.global _ZN3ams4kern3svc20CallWaitForAddress64Ev
.type _ZN3ams4kern3svc20CallWaitForAddress64Ev, %function
_ZN3ams4kern3svc20CallWaitForAddress64Ev:
/* Save LR + FP. */
stp x29, x30, [sp, #-16]!
/* Invoke the svc handler. */
bl _ZN3ams4kern3svc22WaitForAddress64From32ENS_3svc7AddressENS2_15ArbitrationTypeEll
/* Clean up registers. */
mov x1, xzr
mov x2, xzr
mov x3, xzr
mov x4, xzr
mov x5, xzr
mov x6, xzr
mov x7, xzr
ldp x29, x30, [sp], #0x10
ret

View file

@ -36,6 +36,10 @@ namespace ams::kern::svc {
/* Declare special prototype for (unsupported) CallCallSecureMonitor64From32. */
void CallCallSecureMonitor64From32();
/* Declare special prototypes for WaitForAddress. */
void CallWaitForAddress64();
void CallWaitForAddress64From32();
namespace {
#ifndef MESOSPHERE_USE_STUBBED_SVC_TABLES
@ -81,6 +85,8 @@ namespace ams::kern::svc {
table[svc::SvcId_CallSecureMonitor] = CallCallSecureMonitor64From32;
table[svc::SvcId_WaitForAddress] = CallWaitForAddress64From32;
return table;
}();
@ -97,6 +103,8 @@ namespace ams::kern::svc {
table[svc::SvcId_ReturnFromException] = CallReturnFromException64;
table[svc::SvcId_WaitForAddress] = CallWaitForAddress64;
return table;
}();

View file

@ -31,7 +31,6 @@ namespace ams::kern::board::nintendo::nx {
/* Struct representing registers saved on wake/sleep. */
class SavedSystemRegisters {
private:
u64 ttbr0_el1;
u64 elr_el1;
u64 sp_el0;
u64 spsr_el1;
@ -90,7 +89,6 @@ namespace ams::kern::board::nintendo::nx {
void SavedSystemRegisters::Save() {
/* Save system registers. */
this->ttbr0_el1 = cpu::GetTtbr0El1();
this->tpidr_el0 = cpu::GetTpidrEl0();
this->elr_el1 = cpu::GetElrEl1();
this->sp_el0 = cpu::GetSpEl0();
@ -405,7 +403,7 @@ namespace ams::kern::board::nintendo::nx {
cpu::EnsureInstructionConsistency();
/* Restore system registers. */
cpu::SetTtbr0El1 (this->ttbr0_el1);
cpu::SetTtbr0El1 (KPageTable::GetKernelTtbr0());
cpu::SetTpidrEl0 (this->tpidr_el0);
cpu::SetElrEl1 (this->elr_el1);
cpu::SetSpEl0 (this->sp_el0);

View file

@ -26,7 +26,7 @@ namespace ams::kern::board::nintendo::nx {
constexpr size_t SecureSizeMax = util::AlignDown(512_MB - 1, SecureAlignment);
/* Global variables for panic. */
constinit bool g_call_smc_on_panic;
constinit const volatile bool g_call_smc_on_panic = false;
/* Global variables for secure memory. */
constinit KSpinLock g_secure_applet_lock;
@ -401,34 +401,67 @@ namespace ams::kern::board::nintendo::nx {
}
/* System Initialization. */
void KSystemControl::InitializePhase1() {
void KSystemControl::ConfigureKTargetSystem() {
/* Configure KTargetSystem. */
volatile auto *ts = const_cast<volatile KTargetSystem::KTargetSystemData *>(std::addressof(KTargetSystem::s_data));
{
/* Set IsDebugMode. */
{
KTargetSystem::SetIsDebugMode(GetConfigBool(smc::ConfigItem::IsDebugMode));
ts->is_debug_mode = GetConfigBool(smc::ConfigItem::IsDebugMode);
/* If debug mode, we want to initialize uart logging. */
KTargetSystem::EnableDebugLogging(KTargetSystem::IsDebugMode());
ts->enable_debug_logging = ts->is_debug_mode;
}
/* Set Kernel Configuration. */
{
const auto kernel_config = util::BitPack32{GetConfigU32(smc::ConfigItem::KernelConfiguration)};
KTargetSystem::EnableDebugMemoryFill(kernel_config.Get<smc::KernelConfiguration::DebugFillMemory>());
KTargetSystem::EnableUserExceptionHandlers(kernel_config.Get<smc::KernelConfiguration::EnableUserExceptionHandlers>());
KTargetSystem::EnableDynamicResourceLimits(!kernel_config.Get<smc::KernelConfiguration::DisableDynamicResourceLimits>());
KTargetSystem::EnableUserPmuAccess(kernel_config.Get<smc::KernelConfiguration::EnableUserPmuAccess>());
ts->enable_debug_memory_fill = kernel_config.Get<smc::KernelConfiguration::DebugFillMemory>();
ts->enable_user_exception_handlers = kernel_config.Get<smc::KernelConfiguration::EnableUserExceptionHandlers>();
ts->enable_dynamic_resource_limits = !kernel_config.Get<smc::KernelConfiguration::DisableDynamicResourceLimits>();
ts->enable_user_pmu_access = kernel_config.Get<smc::KernelConfiguration::EnableUserPmuAccess>();
g_call_smc_on_panic = kernel_config.Get<smc::KernelConfiguration::UseSecureMonitorPanicCall>();
/* Configure call smc on panic. */
*const_cast<volatile bool *>(std::addressof(g_call_smc_on_panic)) = kernel_config.Get<smc::KernelConfiguration::UseSecureMonitorPanicCall>();
}
/* Set Kernel Debugging. */
{
/* NOTE: This is used to restrict access to SvcKernelDebug/SvcChangeKernelTraceState. */
/* Mesosphere may wish to not require this, as we'd ideally keep ProgramVerification enabled for userland. */
KTargetSystem::EnableKernelDebugging(GetConfigBool(smc::ConfigItem::DisableProgramVerification));
ts->enable_kernel_debugging = GetConfigBool(smc::ConfigItem::DisableProgramVerification);
}
}
}
void KSystemControl::InitializePhase1() {
/* Enable KTargetSystem. */
KTargetSystem::SetInitialized();
/* Check KTargetSystem was configured correctly. */
{
/* Check IsDebugMode. */
{
MESOSPHERE_ABORT_UNLESS(KTargetSystem::IsDebugMode() == GetConfigBool(smc::ConfigItem::IsDebugMode));
MESOSPHERE_ABORT_UNLESS(KTargetSystem::IsDebugLoggingEnabled() == GetConfigBool(smc::ConfigItem::IsDebugMode));
}
/* Check Kernel Configuration. */
{
const auto kernel_config = util::BitPack32{GetConfigU32(smc::ConfigItem::KernelConfiguration)};
MESOSPHERE_ABORT_UNLESS(KTargetSystem::IsDebugMemoryFillEnabled() == kernel_config.Get<smc::KernelConfiguration::DebugFillMemory>());
MESOSPHERE_ABORT_UNLESS(KTargetSystem::IsUserExceptionHandlersEnabled() == kernel_config.Get<smc::KernelConfiguration::EnableUserExceptionHandlers>());
MESOSPHERE_ABORT_UNLESS(KTargetSystem::IsDynamicResourceLimitsEnabled() == !kernel_config.Get<smc::KernelConfiguration::DisableDynamicResourceLimits>());
MESOSPHERE_ABORT_UNLESS(KTargetSystem::IsUserPmuAccessEnabled() == kernel_config.Get<smc::KernelConfiguration::EnableUserPmuAccess>());
MESOSPHERE_ABORT_UNLESS(g_call_smc_on_panic == kernel_config.Get<smc::KernelConfiguration::UseSecureMonitorPanicCall>());
}
/* Check Kernel Debugging. */
{
MESOSPHERE_ABORT_UNLESS(KTargetSystem::IsKernelDebuggingEnabled() == GetConfigBool(smc::ConfigItem::DisableProgramVerification));
}
}
@ -590,13 +623,8 @@ namespace ams::kern::board::nintendo::nx {
for (size_t i = 0; i < RebootPayloadSize / sizeof(u32); ++i) {
GetPointer<volatile u32>(iram_address)[i] = GetPointer<volatile u32>(reboot_payload)[i];
}
/* Reboot. */
smc::SetConfig(smc::ConfigItem::ExosphereNeedsReboot, smc::UserRebootType_ToPayload);
} else {
/* If we don't have a payload, reboot to rcm. */
smc::SetConfig(smc::ConfigItem::ExosphereNeedsReboot, smc::UserRebootType_ToRcm);
}
smc::SetConfig(smc::ConfigItem::ExosphereNeedsReboot, smc::UserRebootType_ToFatalError);
}
if (g_call_smc_on_panic) {

View file

@ -98,9 +98,10 @@ namespace ams::kern::board::nintendo::nx::smc {
};
enum UserRebootType {
UserRebootType_None = 0,
UserRebootType_ToRcm = 1,
UserRebootType_ToPayload = 2,
UserRebootType_None = 0,
UserRebootType_ToRcm = 1,
UserRebootType_ToPayload = 2,
UserRebootType_ToFatalError = 3,
};
void GenerateRandomBytes(void *dst, size_t size);

View file

@ -23,6 +23,10 @@ namespace ams::kern {
return UserspaceAccess::CopyMemoryFromUserSize32Bit(out, GetVoidPointer(address));
}
ALWAYS_INLINE bool ReadFromUser(s64 *out, KProcessAddress address) {
return UserspaceAccess::CopyMemoryFromUserSize64Bit(out, GetVoidPointer(address));
}
ALWAYS_INLINE bool DecrementIfLessThan(s32 *out, KProcessAddress address, s32 value) {
/* NOTE: If scheduler lock is not held here, interrupt disable is required. */
/* KScopedInterruptDisable di; */
@ -279,4 +283,51 @@ namespace ams::kern {
R_RETURN(cur_thread->GetWaitResult());
}
Result KAddressArbiter::WaitIfEqual64(uintptr_t addr, s64 value, s64 timeout) {
/* Prepare to wait. */
KThread *cur_thread = GetCurrentThreadPointer();
KHardwareTimer *timer;
ThreadQueueImplForKAddressArbiter wait_queue(std::addressof(m_tree));
{
KScopedSchedulerLockAndSleep slp(std::addressof(timer), cur_thread, timeout);
/* Check that the thread isn't terminating. */
if (cur_thread->IsTerminationRequested()) {
slp.CancelSleep();
R_THROW(svc::ResultTerminationRequested());
}
/* Read the value from userspace. */
s64 user_value;
if (!ReadFromUser(std::addressof(user_value), addr)) {
slp.CancelSleep();
R_THROW(svc::ResultInvalidCurrentMemory());
}
/* Check that the value is equal. */
if (value != user_value) {
slp.CancelSleep();
R_THROW(svc::ResultInvalidState());
}
/* Check that the timeout is non-zero. */
if (timeout == 0) {
slp.CancelSleep();
R_THROW(svc::ResultTimedOut());
}
/* Set the arbiter. */
cur_thread->SetAddressArbiter(std::addressof(m_tree), addr);
m_tree.insert(*cur_thread);
/* Wait for the thread to finish. */
wait_queue.SetHardwareTimer(timer);
cur_thread->BeginWait(std::addressof(wait_queue));
}
/* Get the wait result. */
R_RETURN(cur_thread->GetWaitResult());
}
}

View file

@ -37,6 +37,24 @@ namespace ams::kern {
{ 39, Invalid, ams::svc::AddressMemoryRegionStack39Size, KAddressSpaceInfo::Type_Stack, },
};
constexpr u8 FlagsToAddressSpaceWidthTable[4] = {
32, 36, 32, 39
};
constexpr size_t GetAddressSpaceWidth(ams::svc::CreateProcessFlag flags) {
/* Convert the input flags to an array index. */
const size_t idx = (flags & ams::svc::CreateProcessFlag_AddressSpaceMask) >> ams::svc::CreateProcessFlag_AddressSpaceShift;
MESOSPHERE_ABORT_UNLESS(idx < sizeof(FlagsToAddressSpaceWidthTable));
/* Return the width. */
return FlagsToAddressSpaceWidthTable[idx];
}
static_assert(GetAddressSpaceWidth(ams::svc::CreateProcessFlag_AddressSpace32Bit) == 32);
static_assert(GetAddressSpaceWidth(ams::svc::CreateProcessFlag_AddressSpace64BitDeprecated) == 36);
static_assert(GetAddressSpaceWidth(ams::svc::CreateProcessFlag_AddressSpace32BitWithoutAlias) == 32);
static_assert(GetAddressSpaceWidth(ams::svc::CreateProcessFlag_AddressSpace64Bit) == 39);
KAddressSpaceInfo &GetAddressSpaceInfo(size_t width, KAddressSpaceInfo::Type type) {
for (auto &info : AddressSpaceInfos) {
if (info.GetWidth() == width && info.GetType() == type) {
@ -48,12 +66,12 @@ namespace ams::kern {
}
uintptr_t KAddressSpaceInfo::GetAddressSpaceStart(size_t width, KAddressSpaceInfo::Type type) {
return GetAddressSpaceInfo(width, type).GetAddress();
uintptr_t KAddressSpaceInfo::GetAddressSpaceStart(ams::svc::CreateProcessFlag flags, KAddressSpaceInfo::Type type) {
return GetAddressSpaceInfo(GetAddressSpaceWidth(flags), type).GetAddress();
}
size_t KAddressSpaceInfo::GetAddressSpaceSize(size_t width, KAddressSpaceInfo::Type type) {
return GetAddressSpaceInfo(width, type).GetSize();
size_t KAddressSpaceInfo::GetAddressSpaceSize(ams::svc::CreateProcessFlag flags, KAddressSpaceInfo::Type type) {
return GetAddressSpaceInfo(GetAddressSpaceWidth(flags), type).GetSize();
}
void KAddressSpaceInfo::SetAddressSpaceSize(size_t width, Type type, size_t size) {

View file

@ -262,7 +262,14 @@ namespace ams::kern {
/* Validate. */
R_UNLESS(cap.Get<DebugFlags::Reserved>() == 0, svc::ResultReservedUsed());
u32 total = 0;
if (cap.Get<DebugFlags::AllowDebug>()) { ++total; }
if (cap.Get<DebugFlags::ForceDebugProd>()) { ++total; }
if (cap.Get<DebugFlags::ForceDebug>()) { ++total; }
R_UNLESS(total <= 1, svc::ResultInvalidCombination());
m_debug_capabilities.Set<DebugFlags::AllowDebug>(cap.Get<DebugFlags::AllowDebug>());
m_debug_capabilities.Set<DebugFlags::ForceDebugProd>(cap.Get<DebugFlags::ForceDebugProd>());
m_debug_capabilities.Set<DebugFlags::ForceDebug>(cap.Get<DebugFlags::ForceDebug>());
R_SUCCEED();
}

View file

@ -27,7 +27,8 @@ namespace ams::kern {
void KDebugBase::Initialize() {
/* Clear the continue flags. */
m_continue_flags = 0;
m_continue_flags = 0;
m_is_force_debug_prod = GetCurrentProcess().CanForceDebugProd();
}
bool KDebugBase::Is64Bit() const {
@ -120,8 +121,11 @@ namespace ams::kern {
/* Read the memory. */
if (info.GetSvcState() != ams::svc::MemoryState_Io) {
/* The memory is normal memory. */
R_TRY(target_pt.ReadDebugMemory(GetVoidPointer(buffer), cur_address, cur_size));
R_TRY(target_pt.ReadDebugMemory(GetVoidPointer(buffer), cur_address, cur_size, this->IsForceDebugProd()));
} else {
/* Only allow IO memory to be read if not force debug prod. */
R_UNLESS(!this->IsForceDebugProd(), svc::ResultInvalidCurrentMemory());
/* The memory is IO memory. */
R_TRY(target_pt.ReadDebugIoMemory(GetVoidPointer(buffer), cur_address, cur_size, info.GetState()));
}
@ -269,6 +273,9 @@ namespace ams::kern {
switch (state) {
case KProcess::State_Created:
case KProcess::State_Running:
/* Created and running processes can only be debugged if the debugger is not ForceDebugProd. */
R_UNLESS(!this->IsForceDebugProd(), svc::ResultInvalidState());
break;
case KProcess::State_Crashed:
break;
case KProcess::State_CreatedAttached:
@ -408,69 +415,6 @@ namespace ams::kern {
/* Get the process pointer. */
KProcess * const target = this->GetProcessUnsafe();
/* Detach from the process. */
{
/* Lock both ourselves and the target process. */
KScopedLightLock state_lk(target->GetStateLock());
KScopedLightLock list_lk(target->GetListLock());
KScopedLightLock this_lk(m_lock);
/* Check that we're still attached. */
if (this->IsAttached()) {
/* Lock the scheduler. */
KScopedSchedulerLock sl;
/* Get the process's state. */
const KProcess::State state = target->GetState();
/* Check that the process is in a state where we can terminate it. */
R_UNLESS(state != KProcess::State_Created, svc::ResultInvalidState());
R_UNLESS(state != KProcess::State_CreatedAttached, svc::ResultInvalidState());
/* Decide on a new state for the process. */
KProcess::State new_state;
if (state == KProcess::State_RunningAttached) {
/* If the process is running, transition it accordingly. */
new_state = KProcess::State_Running;
} else if (state == KProcess::State_DebugBreak) {
/* If the process is debug breaked, transition it accordingly. */
new_state = KProcess::State_Crashed;
/* Suspend all the threads in the process. */
{
auto end = target->GetThreadList().end();
for (auto it = target->GetThreadList().begin(); it != end; ++it) {
/* Request that we suspend the thread. */
it->RequestSuspend(KThread::SuspendType_Debug);
}
}
} else {
/* Otherwise, don't transition. */
new_state = state;
}
#if defined(MESOSPHERE_ENABLE_HARDWARE_SINGLE_STEP)
/* Clear single step on all threads. */
{
auto end = target->GetThreadList().end();
for (auto it = target->GetThreadList().begin(); it != end; ++it) {
it->ClearHardwareSingleStep();
}
}
#endif
/* Detach from the process. */
target->ClearDebugObject(new_state);
m_is_attached = false;
/* Close the initial reference opened to our process. */
this->CloseProcess();
/* Clear our continue flags. */
m_continue_flags = 0;
}
}
/* Terminate the process. */
target->Terminate();
@ -962,7 +906,12 @@ namespace ams::kern {
case ams::svc::DebugException_UndefinedInstruction:
{
MESOSPHERE_ASSERT(info->info.exception.exception_data_count == 1);
out->info.exception.specific.undefined_instruction.insn = info->info.exception.exception_data[0];
/* Only save the instruction if the caller is not force debug prod. */
if (this->IsForceDebugProd()) {
out->info.exception.specific.undefined_instruction.insn = 0;
} else {
out->info.exception.specific.undefined_instruction.insn = info->info.exception.exception_data[0];
}
}
break;
case ams::svc::DebugException_BreakPoint:

View file

@ -193,40 +193,25 @@ namespace ams::kern {
R_UNLESS(this->Is64Bit(), svc::ResultInvalidCombination());
}
using ASType = KAddressSpaceInfo::Type;
const uintptr_t start_address = rx_address;
const uintptr_t end_address = bss_size > 0 ? bss_address + bss_size : rw_address + rw_size;
const size_t as_width = this->Is64BitAddressSpace() ? ((GetTargetFirmware() >= TargetFirmware_2_0_0) ? 39 : 36) : 32;
const ASType as_type = this->Is64BitAddressSpace() ? ((GetTargetFirmware() >= TargetFirmware_2_0_0) ? KAddressSpaceInfo::Type_Map39Bit : KAddressSpaceInfo::Type_MapSmall) : KAddressSpaceInfo::Type_MapSmall;
const uintptr_t map_start = KAddressSpaceInfo::GetAddressSpaceStart(as_width, as_type);
const size_t map_size = KAddressSpaceInfo::GetAddressSpaceSize(as_width, as_type);
const uintptr_t map_end = map_start + map_size;
MESOSPHERE_ABORT_UNLESS(start_address == 0);
/* Default fields in parameter to zero. */
*out = {};
/* Set fields in parameter. */
out->code_address = map_start + start_address;
out->code_address = 0;
out->code_num_pages = util::AlignUp(end_address - start_address, PageSize) / PageSize;
out->program_id = m_kip_header.GetProgramId();
out->version = m_kip_header.GetVersion();
out->flags = 0;
out->reslimit = ams::svc::InvalidHandle;
out->system_resource_num_pages = 0;
MESOSPHERE_ABORT_UNLESS((out->code_address / PageSize) + out->code_num_pages <= (map_end / PageSize));
/* Copy name field. */
m_kip_header.GetName(out->name, sizeof(out->name));
/* Apply ASLR, if needed. */
if (enable_aslr) {
const size_t choices = (map_end / KernelAslrAlignment) - (util::AlignUp(out->code_address + out->code_num_pages * PageSize, KernelAslrAlignment) / KernelAslrAlignment);
out->code_address += KSystemControl::GenerateRandomRange(0, choices) * KernelAslrAlignment;
out->flags |= ams::svc::CreateProcessFlag_EnableAslr;
}
/* Apply other flags. */
if (this->Is64Bit()) {
out->flags |= ams::svc::CreateProcessFlag_Is64Bit;
@ -236,10 +221,28 @@ namespace ams::kern {
} else {
out->flags |= ams::svc::CreateProcessFlag_AddressSpace32Bit;
}
if (enable_aslr) {
out->flags |= ams::svc::CreateProcessFlag_EnableAslr;
}
/* All initial processes should disable device address space merge. */
out->flags |= ams::svc::CreateProcessFlag_DisableDeviceAddressSpaceMerge;
/* Set and check code address. */
using ASType = KAddressSpaceInfo::Type;
const ASType as_type = this->Is64BitAddressSpace() ? ((GetTargetFirmware() >= TargetFirmware_2_0_0) ? KAddressSpaceInfo::Type_Map39Bit : KAddressSpaceInfo::Type_MapSmall) : KAddressSpaceInfo::Type_MapSmall;
const uintptr_t map_start = KAddressSpaceInfo::GetAddressSpaceStart(static_cast<ams::svc::CreateProcessFlag>(out->flags), as_type);
const size_t map_size = KAddressSpaceInfo::GetAddressSpaceSize(static_cast<ams::svc::CreateProcessFlag>(out->flags), as_type);
const uintptr_t map_end = map_start + map_size;
out->code_address = map_start + start_address;
MESOSPHERE_ABORT_UNLESS((out->code_address / PageSize) + out->code_num_pages <= (map_end / PageSize));
/* Apply ASLR, if needed. */
if (enable_aslr) {
const size_t choices = (map_end / KernelAslrAlignment) - (util::AlignUp(out->code_address + out->code_num_pages * PageSize, KernelAslrAlignment) / KernelAslrAlignment);
out->code_address += KSystemControl::GenerateRandomRange(0, choices) * KernelAslrAlignment;
}
R_SUCCEED();
}

View file

@ -54,11 +54,11 @@ namespace ams::kern {
return "Unknown ";
}
constexpr const char *GetMemoryPermissionString(const KMemoryInfo &info) {
if (info.m_state == KMemoryState_Free) {
constexpr const char *GetMemoryPermissionString(const KMemoryBlock &block) {
if (block.GetState() == KMemoryState_Free) {
return " ";
} else {
switch (info.m_permission) {
switch (block.GetPermission()) {
case KMemoryPermission_UserReadExecute:
return "r-x";
case KMemoryPermission_UserRead:
@ -71,19 +71,19 @@ namespace ams::kern {
}
}
void DumpMemoryInfo(const KMemoryInfo &info) {
const char *state = GetMemoryStateName(info.m_state);
const char *perm = GetMemoryPermissionString(info);
const uintptr_t start = info.GetAddress();
const uintptr_t end = info.GetLastAddress();
const size_t kb = info.GetSize() / 1_KB;
void DumpMemoryBlock(const KMemoryBlock &block) {
const char *state = GetMemoryStateName(block.GetState());
const char *perm = GetMemoryPermissionString(block);
const uintptr_t start = GetInteger(block.GetAddress());
const uintptr_t end = GetInteger(block.GetLastAddress());
const size_t kb = block.GetSize() / 1_KB;
const char l = (info.m_attribute & KMemoryAttribute_Locked) ? 'L' : '-';
const char i = (info.m_attribute & KMemoryAttribute_IpcLocked) ? 'I' : '-';
const char d = (info.m_attribute & KMemoryAttribute_DeviceShared) ? 'D' : '-';
const char u = (info.m_attribute & KMemoryAttribute_Uncached) ? 'U' : '-';
const char l = (block.GetAttribute() & KMemoryAttribute_Locked) ? 'L' : '-';
const char i = (block.GetAttribute() & KMemoryAttribute_IpcLocked) ? 'I' : '-';
const char d = (block.GetAttribute() & KMemoryAttribute_DeviceShared) ? 'D' : '-';
const char u = (block.GetAttribute() & KMemoryAttribute_Uncached) ? 'U' : '-';
MESOSPHERE_LOG("0x%10lx - 0x%10lx (%9zu KB) %s %s %c%c%c%c [%d, %d]\n", start, end, kb, perm, state, l, i, d, u, info.m_ipc_lock_count, info.m_device_use_count);
MESOSPHERE_LOG("0x%10lx - 0x%10lx (%9zu KB) %s %s %c%c%c%c [%d, %d]\n", start, end, kb, perm, state, l, i, d, u, block.GetIpcLockCount(), block.GetDeviceUseCount());
}
}
@ -123,15 +123,14 @@ namespace ams::kern {
const KProcessAddress region_end = region_start + region_num_pages * PageSize;
const KProcessAddress region_last = region_end - 1;
for (const_iterator it = this->FindIterator(region_start); it != m_memory_block_tree.cend(); it++) {
const KMemoryInfo info = it->GetMemoryInfo();
if (region_last < info.GetAddress()) {
if (region_last < it->GetAddress()) {
break;
}
if (info.m_state != KMemoryState_Free) {
if (it->GetState() != KMemoryState_Free) {
continue;
}
KProcessAddress area = (info.GetAddress() <= GetInteger(region_start)) ? region_start : info.GetAddress();
KProcessAddress area = (it->GetAddress() <= GetInteger(region_start)) ? region_start : it->GetAddress();
area += guard_pages * PageSize;
const KProcessAddress offset_area = util::AlignDown(GetInteger(area), alignment) + offset;
@ -140,7 +139,7 @@ namespace ams::kern {
const KProcessAddress area_end = area + num_pages * PageSize + guard_pages * PageSize;
const KProcessAddress area_last = area_end - 1;
if (info.GetAddress() <= GetInteger(area) && area < area_last && area_last <= region_last && GetInteger(area_last) <= info.GetLastAddress()) {
if (GetInteger(it->GetAddress()) <= GetInteger(area) && area < area_last && area_last <= region_last && GetInteger(area_last) <= GetInteger(it->GetLastAddress())) {
return area;
}
}
@ -171,7 +170,7 @@ namespace ams::kern {
it = prev;
}
if (address + num_pages * PageSize < it->GetMemoryInfo().GetEndAddress()) {
if (address + num_pages * PageSize < it->GetEndAddress()) {
break;
}
}
@ -189,43 +188,39 @@ namespace ams::kern {
while (remaining_pages > 0) {
const size_t remaining_size = remaining_pages * PageSize;
KMemoryInfo cur_info = it->GetMemoryInfo();
if (it->HasProperties(state, perm, attr)) {
/* If we already have the right properties, just advance. */
if (cur_address + remaining_size < cur_info.GetEndAddress()) {
if (cur_address + remaining_size < it->GetEndAddress()) {
remaining_pages = 0;
cur_address += remaining_size;
} else {
remaining_pages = (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize;
cur_address = cur_info.GetEndAddress();
remaining_pages = (cur_address + remaining_size - it->GetEndAddress()) / PageSize;
cur_address = it->GetEndAddress();
}
} else {
/* If we need to, create a new block before and insert it. */
if (cur_info.GetAddress() != GetInteger(cur_address)) {
if (it->GetAddress() != GetInteger(cur_address)) {
KMemoryBlock *new_block = allocator->Allocate();
it->Split(new_block, cur_address);
it = m_memory_block_tree.insert(*new_block);
it++;
cur_info = it->GetMemoryInfo();
cur_address = cur_info.GetAddress();
cur_address = it->GetAddress();
}
/* If we need to, create a new block after and insert it. */
if (cur_info.GetSize() > remaining_size) {
if (it->GetSize() > remaining_size) {
KMemoryBlock *new_block = allocator->Allocate();
it->Split(new_block, cur_address + remaining_size);
it = m_memory_block_tree.insert(*new_block);
cur_info = it->GetMemoryInfo();
}
/* Update block state. */
it->Update(state, perm, attr, it->GetAddress() == address, set_disable_attr, clear_disable_attr);
cur_address += cur_info.GetSize();
remaining_pages -= cur_info.GetNumPages();
cur_address += it->GetSize();
remaining_pages -= it->GetNumPages();
}
it++;
}
@ -245,42 +240,38 @@ namespace ams::kern {
while (remaining_pages > 0) {
const size_t remaining_size = remaining_pages * PageSize;
KMemoryInfo cur_info = it->GetMemoryInfo();
if (it->HasProperties(test_state, test_perm, test_attr) && !it->HasProperties(state, perm, attr)) {
/* If we need to, create a new block before and insert it. */
if (cur_info.GetAddress() != GetInteger(cur_address)) {
if (it->GetAddress() != GetInteger(cur_address)) {
KMemoryBlock *new_block = allocator->Allocate();
it->Split(new_block, cur_address);
it = m_memory_block_tree.insert(*new_block);
it++;
cur_info = it->GetMemoryInfo();
cur_address = cur_info.GetAddress();
cur_address = it->GetAddress();
}
/* If we need to, create a new block after and insert it. */
if (cur_info.GetSize() > remaining_size) {
if (it->GetSize() > remaining_size) {
KMemoryBlock *new_block = allocator->Allocate();
it->Split(new_block, cur_address + remaining_size);
it = m_memory_block_tree.insert(*new_block);
cur_info = it->GetMemoryInfo();
}
/* Update block state. */
it->Update(state, perm, attr, it->GetAddress() == address, set_disable_attr, clear_disable_attr);
cur_address += cur_info.GetSize();
remaining_pages -= cur_info.GetNumPages();
cur_address += it->GetSize();
remaining_pages -= it->GetNumPages();
} else {
/* If we already have the right properties, just advance. */
if (cur_address + remaining_size < cur_info.GetEndAddress()) {
if (cur_address + remaining_size < it->GetEndAddress()) {
remaining_pages = 0;
cur_address += remaining_size;
} else {
remaining_pages = (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize;
cur_address = cur_info.GetEndAddress();
remaining_pages = (cur_address + remaining_size - it->GetEndAddress()) / PageSize;
cur_address = it->GetEndAddress();
}
}
it++;
@ -302,34 +293,30 @@ namespace ams::kern {
while (remaining_pages > 0) {
const size_t remaining_size = remaining_pages * PageSize;
KMemoryInfo cur_info = it->GetMemoryInfo();
/* If we need to, create a new block before and insert it. */
if (cur_info.m_address != GetInteger(cur_address)) {
if (it->GetAddress() != cur_address) {
KMemoryBlock *new_block = allocator->Allocate();
it->Split(new_block, cur_address);
it = m_memory_block_tree.insert(*new_block);
it++;
cur_info = it->GetMemoryInfo();
cur_address = cur_info.GetAddress();
cur_address = it->GetAddress();
}
if (cur_info.GetSize() > remaining_size) {
if (it->GetSize() > remaining_size) {
/* If we need to, create a new block after and insert it. */
KMemoryBlock *new_block = allocator->Allocate();
it->Split(new_block, cur_address + remaining_size);
it = m_memory_block_tree.insert(*new_block);
cur_info = it->GetMemoryInfo();
}
/* Call the locked update function. */
(std::addressof(*it)->*lock_func)(perm, cur_info.GetAddress() == address, cur_info.GetEndAddress() == end_address);
cur_address += cur_info.GetSize();
remaining_pages -= cur_info.GetNumPages();
(std::addressof(*it)->*lock_func)(perm, it->GetAddress() == address, it->GetEndAddress() == end_address);
cur_address += it->GetSize();
remaining_pages -= it->GetNumPages();
it++;
}
@ -347,43 +334,39 @@ namespace ams::kern {
while (remaining_pages > 0) {
const size_t remaining_size = remaining_pages * PageSize;
KMemoryInfo cur_info = it->GetMemoryInfo();
if ((it->GetAttribute() & mask) != attr) {
/* If we need to, create a new block before and insert it. */
if (cur_info.GetAddress() != GetInteger(cur_address)) {
if (it->GetAddress() != GetInteger(cur_address)) {
KMemoryBlock *new_block = allocator->Allocate();
it->Split(new_block, cur_address);
it = m_memory_block_tree.insert(*new_block);
it++;
cur_info = it->GetMemoryInfo();
cur_address = cur_info.GetAddress();
cur_address = it->GetAddress();
}
/* If we need to, create a new block after and insert it. */
if (cur_info.GetSize() > remaining_size) {
if (it->GetSize() > remaining_size) {
KMemoryBlock *new_block = allocator->Allocate();
it->Split(new_block, cur_address + remaining_size);
it = m_memory_block_tree.insert(*new_block);
cur_info = it->GetMemoryInfo();
}
/* Update block state. */
it->UpdateAttribute(mask, attr);
cur_address += cur_info.GetSize();
remaining_pages -= cur_info.GetNumPages();
cur_address += it->GetSize();
remaining_pages -= it->GetNumPages();
} else {
/* If we already have the right attributes, just advance. */
if (cur_address + remaining_size < cur_info.GetEndAddress()) {
if (cur_address + remaining_size < it->GetEndAddress()) {
remaining_pages = 0;
cur_address += remaining_size;
} else {
remaining_pages = (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize;
cur_address = cur_info.GetEndAddress();
remaining_pages = (cur_address + remaining_size - it->GetEndAddress()) / PageSize;
cur_address = it->GetEndAddress();
}
}
it++;
@ -401,8 +384,6 @@ namespace ams::kern {
auto it = m_memory_block_tree.cbegin();
auto prev = it++;
while (it != m_memory_block_tree.cend()) {
const KMemoryInfo prev_info = prev->GetMemoryInfo();
const KMemoryInfo cur_info = it->GetMemoryInfo();
/* Sequential blocks which can be merged should be merged. */
if (prev->CanMergeWith(*it)) {
@ -410,17 +391,17 @@ namespace ams::kern {
}
/* Sequential blocks should be sequential. */
if (prev_info.GetEndAddress() != cur_info.GetAddress()) {
if (prev->GetEndAddress() != it->GetAddress()) {
return false;
}
/* If the block is ipc locked, it must have a count. */
if ((cur_info.m_attribute & KMemoryAttribute_IpcLocked) != 0 && cur_info.m_ipc_lock_count == 0) {
if ((it->GetAttribute() & KMemoryAttribute_IpcLocked) != 0 && it->GetIpcLockCount() == 0) {
return false;
}
/* If the block is device shared, it must have a count. */
if ((cur_info.m_attribute & KMemoryAttribute_DeviceShared) != 0 && cur_info.m_device_use_count == 0) {
if ((it->GetAttribute() & KMemoryAttribute_DeviceShared) != 0 && it->GetDeviceUseCount() == 0) {
return false;
}
@ -430,14 +411,13 @@ namespace ams::kern {
/* Our loop will miss checking the last block, potentially, so check it. */
if (prev != m_memory_block_tree.cend()) {
const KMemoryInfo prev_info = prev->GetMemoryInfo();
/* If the block is ipc locked, it must have a count. */
if ((prev_info.m_attribute & KMemoryAttribute_IpcLocked) != 0 && prev_info.m_ipc_lock_count == 0) {
if ((prev->GetAttribute() & KMemoryAttribute_IpcLocked) != 0 && prev->GetIpcLockCount() == 0) {
return false;
}
/* If the block is device shared, it must have a count. */
if ((prev_info.m_attribute & KMemoryAttribute_DeviceShared) != 0 && prev_info.m_device_use_count == 0) {
if ((prev->GetAttribute() & KMemoryAttribute_DeviceShared) != 0 && prev->GetDeviceUseCount() == 0) {
return false;
}
}
@ -450,7 +430,7 @@ namespace ams::kern {
void KMemoryBlockManager::DumpBlocks() const {
/* Dump each block. */
for (const auto &block : m_memory_block_tree) {
DumpMemoryInfo(block.GetMemoryInfo());
DumpMemoryBlock(block);
}
}
}

View file

@ -35,7 +35,7 @@ namespace ams::kern {
}
void KMemoryManager::Initialize(KVirtualAddress management_region, size_t management_region_size) {
void KMemoryManager::Initialize(KVirtualAddress management_region, size_t management_region_size, const u32 *min_align_shifts) {
/* Clear the management region to zero. */
const KVirtualAddress management_region_end = management_region + management_region_size;
std::memset(GetVoidPointer(management_region), 0, management_region_size);
@ -154,6 +154,17 @@ namespace ams::kern {
for (size_t i = 0; i < m_num_managers; ++i) {
m_managers[i].SetInitialUsedHeapSize(reserved_sizes[i]);
}
/* Determine the min heap size for all pools. */
for (size_t i = 0; i < Pool_Count; ++i) {
/* Determine the min alignment for the pool in pages. */
const size_t min_align_pages = 1 << min_align_shifts[i];
/* Determine a heap index. */
if (const auto heap_index = KPageHeap::GetAlignedBlockIndex(min_align_pages, min_align_pages); heap_index >= 0) {
m_min_heap_indexes[i] = heap_index;
}
}
}
Result KMemoryManager::InitializeOptimizedMemory(u64 process_id, Pool pool) {
@ -192,8 +203,19 @@ namespace ams::kern {
return Null<KPhysicalAddress>;
}
/* Lock the pool that we're allocating from. */
/* Determine the pool and direction we're allocating from. */
const auto [pool, dir] = DecodeOption(option);
/* Check that we're allocating a correctly aligned number of pages. */
const size_t min_align_pages = KPageHeap::GetBlockNumPages(m_min_heap_indexes[pool]);
if (!util::IsAligned(num_pages, min_align_pages)) {
return Null<KPhysicalAddress>;
}
/* Update our alignment. */
align_pages = std::max(align_pages, min_align_pages);
/* Lock the pool that we're allocating from. */
KScopedLightLock lk(m_pool_locks[pool]);
/* Choose a heap based on our page size request. */
@ -226,6 +248,13 @@ namespace ams::kern {
}
Result KMemoryManager::AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool unoptimized, bool random, s32 min_heap_index) {
/* Check that we're allocating a correctly aligned number of pages. */
const size_t min_align_pages = KPageHeap::GetBlockNumPages(m_min_heap_indexes[pool]);
R_UNLESS(util::IsAligned(num_pages, min_align_pages), svc::ResultInvalidSize());
/* Adjust our min heap index to the pool minimum if needed. */
min_heap_index = std::max(min_heap_index, m_min_heap_indexes[pool]);
/* Choose a heap based on our page size request. */
const s32 heap_index = KPageHeap::GetBlockIndex(num_pages);
R_UNLESS(0 <= heap_index, svc::ResultOutOfMemory());

View file

@ -141,10 +141,10 @@ namespace ams::kern {
/* Define helpers. */
auto GetSpaceStart = [&](KAddressSpaceInfo::Type type) ALWAYS_INLINE_LAMBDA {
return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type);
return KAddressSpaceInfo::GetAddressSpaceStart(flags, type);
};
auto GetSpaceSize = [&](KAddressSpaceInfo::Type type) ALWAYS_INLINE_LAMBDA {
return KAddressSpaceInfo::GetAddressSpaceSize(m_address_space_width, type);
return KAddressSpaceInfo::GetAddressSpaceSize(flags, type);
};
/* Default to zero alias region extra size. */
@ -664,11 +664,11 @@ namespace ams::kern {
}
}
Result KPageTableBase::CheckMemoryState(const KMemoryInfo &info, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const {
Result KPageTableBase::CheckMemoryState(KMemoryBlockManager::const_iterator it, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const {
/* Validate the states match expectation. */
R_UNLESS((info.m_state & state_mask) == state, svc::ResultInvalidCurrentMemory());
R_UNLESS((info.m_permission & perm_mask) == perm, svc::ResultInvalidCurrentMemory());
R_UNLESS((info.m_attribute & attr_mask) == attr, svc::ResultInvalidCurrentMemory());
R_UNLESS((it->GetState() & state_mask) == state, svc::ResultInvalidCurrentMemory());
R_UNLESS((it->GetPermission() & perm_mask) == perm, svc::ResultInvalidCurrentMemory());
R_UNLESS((it->GetAttribute() & attr_mask) == attr, svc::ResultInvalidCurrentMemory());
R_SUCCEED();
}
@ -679,28 +679,26 @@ namespace ams::kern {
/* Get information about the first block. */
const KProcessAddress last_addr = addr + size - 1;
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
KMemoryInfo info = it->GetMemoryInfo();
/* If the start address isn't aligned, we need a block. */
const size_t blocks_for_start_align = (util::AlignDown(GetInteger(addr), PageSize) != info.GetAddress()) ? 1 : 0;
const size_t blocks_for_start_align = (util::AlignDown(GetInteger(addr), PageSize) != it->GetAddress()) ? 1 : 0;
while (true) {
/* Validate against the provided masks. */
R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr));
R_TRY(this->CheckMemoryState(it, state_mask, state, perm_mask, perm, attr_mask, attr));
/* Break once we're done. */
if (last_addr <= info.GetLastAddress()) {
if (last_addr <= it->GetLastAddress()) {
break;
}
/* Advance our iterator. */
it++;
MESOSPHERE_ASSERT(it != m_memory_block_manager.cend());
info = it->GetMemoryInfo();
}
/* If the end address isn't aligned, we need a block. */
const size_t blocks_for_end_align = (util::AlignUp(GetInteger(addr) + size, PageSize) != info.GetEndAddress()) ? 1 : 0;
const size_t blocks_for_end_align = (util::AlignUp(GetInteger(addr) + size, PageSize) != it->GetEndAddress()) ? 1 : 0;
if (out_blocks_needed != nullptr) {
*out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
@ -712,31 +710,27 @@ namespace ams::kern {
Result KPageTableBase::CheckMemoryState(KMemoryState *out_state, KMemoryPermission *out_perm, KMemoryAttribute *out_attr, size_t *out_blocks_needed, KMemoryBlockManager::const_iterator it, KProcessAddress last_addr, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr) const {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
/* Get information about the first block. */
KMemoryInfo info = it->GetMemoryInfo();
/* Validate all blocks in the range have correct state. */
const KMemoryState first_state = info.m_state;
const KMemoryPermission first_perm = info.m_permission;
const KMemoryAttribute first_attr = info.m_attribute;
const KMemoryState first_state = it->GetState();
const KMemoryPermission first_perm = it->GetPermission();
const KMemoryAttribute first_attr = it->GetAttribute();
while (true) {
/* Validate the current block. */
R_UNLESS(info.m_state == first_state, svc::ResultInvalidCurrentMemory());
R_UNLESS(info.m_permission == first_perm, svc::ResultInvalidCurrentMemory());
R_UNLESS((info.m_attribute | ignore_attr) == (first_attr | ignore_attr), svc::ResultInvalidCurrentMemory());
R_UNLESS(it->GetState() == first_state, svc::ResultInvalidCurrentMemory());
R_UNLESS(it->GetPermission() == first_perm, svc::ResultInvalidCurrentMemory());
R_UNLESS((it->GetAttribute() | ignore_attr) == (first_attr | ignore_attr), svc::ResultInvalidCurrentMemory());
/* Validate against the provided masks. */
R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr));
R_TRY(this->CheckMemoryState(it, state_mask, state, perm_mask, perm, attr_mask, attr));
/* Break once we're done. */
if (last_addr <= info.GetLastAddress()) {
if (last_addr <= it->GetLastAddress()) {
break;
}
/* Advance our iterator. */
it++;
MESOSPHERE_ASSERT(it != m_memory_block_manager.cend());
info = it->GetMemoryInfo();
}
/* Write output state. */
@ -752,7 +746,7 @@ namespace ams::kern {
/* If the end address isn't aligned, we need a block. */
if (out_blocks_needed != nullptr) {
const size_t blocks_for_end_align = (util::AlignDown(GetInteger(last_addr), PageSize) + PageSize != info.GetEndAddress()) ? 1 : 0;
const size_t blocks_for_end_align = (util::AlignDown(GetInteger(last_addr), PageSize) + PageSize != it->GetEndAddress()) ? 1 : 0;
*out_blocks_needed = blocks_for_end_align;
}
@ -1176,17 +1170,14 @@ namespace ams::kern {
{
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(dst_address);
while (true) {
/* Get the memory info. */
const KMemoryInfo info = it->GetMemoryInfo();
/* Check if the memory has code flag. */
if ((info.GetState() & KMemoryState_FlagCode) != 0) {
if ((it->GetState() & KMemoryState_FlagCode) != 0) {
any_code_pages = true;
break;
}
/* Check if we're done. */
if (dst_address + size - 1 <= info.GetLastAddress()) {
if (dst_address + size - 1 <= it->GetLastAddress()) {
break;
}
@ -1355,14 +1346,13 @@ namespace ams::kern {
const size_t random_offset = KSystemControl::GenerateRandomRange(0, (region_num_pages - num_pages - guard_pages) * PageSize / alignment) * alignment;
const KProcessAddress candidate = util::AlignDown(GetInteger(region_start + random_offset), alignment) + offset;
KMemoryInfo info;
ams::svc::PageInfo page_info;
MESOSPHERE_R_ABORT_UNLESS(this->QueryInfoImpl(std::addressof(info), std::addressof(page_info), candidate));
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(candidate);
MESOSPHERE_ABORT_UNLESS(it != m_memory_block_manager.end());
if (info.m_state != KMemoryState_Free) { continue; }
if (it->GetState() != KMemoryState_Free) { continue; }
if (!(region_start <= candidate)) { continue; }
if (!(info.GetAddress() + guard_pages * PageSize <= GetInteger(candidate))) { continue; }
if (!(candidate + (num_pages + guard_pages) * PageSize - 1 <= info.GetLastAddress())) { continue; }
if (!(it->GetAddress() + guard_pages * PageSize <= GetInteger(candidate))) { continue; }
if (!(candidate + (num_pages + guard_pages) * PageSize - 1 <= it->GetLastAddress())) { continue; }
if (!(candidate + (num_pages + guard_pages) * PageSize - 1 <= region_start + region_num_pages * PageSize - 1)) { continue; }
address = candidate;
@ -1393,10 +1383,8 @@ namespace ams::kern {
/* Iterate, counting blocks with the desired state. */
size_t total_size = 0;
for (KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(m_address_space_start); it != m_memory_block_manager.end(); ++it) {
/* Get the memory info. */
const KMemoryInfo info = it->GetMemoryInfo();
if (info.GetState() == state) {
total_size += info.GetSize();
if (it->GetState() == state) {
total_size += it->GetSize();
}
}
@ -1488,17 +1476,14 @@ namespace ams::kern {
/* Check that the iterator is valid. */
MESOSPHERE_ASSERT(it != m_memory_block_manager.end());
/* Get the memory info. */
const KMemoryInfo info = it->GetMemoryInfo();
/* Determine the range to map. */
KProcessAddress map_address = std::max(info.GetAddress(), GetInteger(start_address));
const KProcessAddress map_end_address = std::min(info.GetEndAddress(), GetInteger(end_address));
KProcessAddress map_address = std::max(GetInteger(it->GetAddress()), GetInteger(start_address));
const KProcessAddress map_end_address = std::min(GetInteger(it->GetEndAddress()), GetInteger(end_address));
MESOSPHERE_ABORT_UNLESS(map_end_address != map_address);
/* Determine if we should disable head merge. */
const bool disable_head_merge = info.GetAddress() >= GetInteger(start_address) && (info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute_Normal) != 0;
const KPageProperties map_properties = { info.GetPermission(), false, false, disable_head_merge ? DisableMergeAttribute_DisableHead : DisableMergeAttribute_None };
const bool disable_head_merge = it->GetAddress() >= GetInteger(start_address) && (it->GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute_Normal) != 0;
const KPageProperties map_properties = { it->GetPermission(), false, false, disable_head_merge ? DisableMergeAttribute_DisableHead : DisableMergeAttribute_None };
/* While we have pages to map, map them. */
size_t map_pages = (map_end_address - map_address) / PageSize;
@ -1527,7 +1512,7 @@ namespace ams::kern {
}
/* Check if we're done. */
if (last_address <= info.GetLastAddress()) {
if (last_address <= it->GetLastAddress()) {
break;
}
@ -1802,6 +1787,11 @@ namespace ams::kern {
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* If we're creating an executable mapping, take and immediately release the scheduler lock. This will force a reschedule. */
if (is_x) {
KScopedSchedulerLock sl;
}
/* Perform mapping operation. */
const KPageProperties properties = { new_perm, false, false, DisableMergeAttribute_None };
const auto operation = was_x ? OperationType_ChangePermissionsAndRefreshAndFlush : OperationType_ChangePermissions;
@ -2032,19 +2022,18 @@ namespace ams::kern {
address = util::AlignDown(GetInteger(address), PageSize);
/* Verify that we can query the address. */
KMemoryInfo info;
ams::svc::PageInfo page_info;
R_TRY(this->QueryInfoImpl(std::addressof(info), std::addressof(page_info), address));
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(address);
R_UNLESS(it != m_memory_block_manager.end(), svc::ResultInvalidCurrentMemory());
/* Check the memory state. */
R_TRY(this->CheckMemoryState(info, KMemoryState_FlagCanQueryPhysical, KMemoryState_FlagCanQueryPhysical, KMemoryPermission_UserReadExecute, KMemoryPermission_UserRead, KMemoryAttribute_None, KMemoryAttribute_None));
R_TRY(this->CheckMemoryState(it, KMemoryState_FlagCanQueryPhysical, KMemoryState_FlagCanQueryPhysical, KMemoryPermission_UserReadExecute, KMemoryPermission_UserRead, KMemoryAttribute_None, KMemoryAttribute_None));
/* Prepare to traverse. */
KPhysicalAddress phys_addr;
size_t phys_size;
KProcessAddress virt_addr = info.GetAddress();
KProcessAddress end_addr = info.GetEndAddress();
KProcessAddress virt_addr = it->GetAddress();
KProcessAddress end_addr = it->GetEndAddress();
/* Perform traversal. */
{
@ -2711,18 +2700,37 @@ namespace ams::kern {
R_RETURN(cpu::InvalidateDataCache(GetVoidPointer(address), size));
}
Result KPageTableBase::ReadDebugMemory(void *buffer, KProcessAddress address, size_t size) {
bool KPageTableBase::CanReadWriteDebugMemory(KProcessAddress address, size_t size, bool force_debug_prod) {
/* Check pre-conditions. */
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
/* If the memory is debuggable and user-readable, we can perform the access. */
if (R_SUCCEEDED(this->CheckMemoryStateContiguous(address, size, KMemoryState_FlagCanDebug, KMemoryState_FlagCanDebug, KMemoryPermission_NotMapped | KMemoryPermission_UserRead, KMemoryPermission_UserRead, KMemoryAttribute_None, KMemoryAttribute_None))) {
return true;
}
/* If we're in debug mode, and the process isn't force debug prod, check if the memory is debuggable and kernel-readable and user-executable. */
if (KTargetSystem::IsDebugMode() && !force_debug_prod) {
if (R_SUCCEEDED(this->CheckMemoryStateContiguous(address, size, KMemoryState_FlagCanDebug, KMemoryState_FlagCanDebug, KMemoryPermission_KernelRead | KMemoryPermission_UserExecute, KMemoryPermission_KernelRead | KMemoryPermission_UserExecute, KMemoryAttribute_None, KMemoryAttribute_None))) {
return true;
}
}
/* If neither of the above checks passed, we can't access the memory. */
return false;
}
Result KPageTableBase::ReadDebugMemory(void *buffer, KProcessAddress address, size_t size, bool force_debug_prod) {
/* Lightly validate the region is in range. */
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Require that the memory either be user readable or debuggable. */
const bool can_read = R_SUCCEEDED(this->CheckMemoryStateContiguous(address, size, KMemoryState_None, KMemoryState_None, KMemoryPermission_UserRead, KMemoryPermission_UserRead, KMemoryAttribute_None, KMemoryAttribute_None));
/* Require that the memory either be user-readable-and-mapped or debug-accessible. */
const bool can_read = R_SUCCEEDED(this->CheckMemoryStateContiguous(address, size, KMemoryState_None, KMemoryState_None, KMemoryPermission_NotMapped | KMemoryPermission_UserRead, KMemoryPermission_UserRead, KMemoryAttribute_None, KMemoryAttribute_None));
if (!can_read) {
const bool can_debug = R_SUCCEEDED(this->CheckMemoryStateContiguous(address, size, KMemoryState_FlagCanDebug, KMemoryState_FlagCanDebug, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None));
R_UNLESS(can_debug, svc::ResultInvalidCurrentMemory());
R_UNLESS(this->CanReadWriteDebugMemory(address, size, force_debug_prod), svc::ResultInvalidCurrentMemory());
}
/* Get the impl. */
@ -2804,11 +2812,10 @@ namespace ams::kern {
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Require that the memory either be user writable or debuggable. */
const bool can_read = R_SUCCEEDED(this->CheckMemoryStateContiguous(address, size, KMemoryState_None, KMemoryState_None, KMemoryPermission_UserReadWrite, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryAttribute_None));
if (!can_read) {
const bool can_debug = R_SUCCEEDED(this->CheckMemoryStateContiguous(address, size, KMemoryState_FlagCanDebug, KMemoryState_FlagCanDebug, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None));
R_UNLESS(can_debug, svc::ResultInvalidCurrentMemory());
/* Require that the memory either be user-writable-and-mapped or debug-accessible. */
const bool can_write = R_SUCCEEDED(this->CheckMemoryStateContiguous(address, size, KMemoryState_None, KMemoryState_None, KMemoryPermission_NotMapped | KMemoryPermission_UserReadWrite, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryAttribute_None));
if (!can_write) {
R_UNLESS(this->CanReadWriteDebugMemory(address, size, false), svc::ResultInvalidCurrentMemory());
}
/* Get the impl. */
@ -3827,15 +3834,15 @@ namespace ams::kern {
switch (dst_state) {
case KMemoryState_Ipc:
test_state = KMemoryState_FlagCanUseIpc;
test_attr_mask = KMemoryAttribute_Uncached | KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked;
test_attr_mask = KMemoryAttribute_All & (~(KMemoryAttribute_PermissionLocked | KMemoryAttribute_IpcLocked));
break;
case KMemoryState_NonSecureIpc:
test_state = KMemoryState_FlagCanUseNonSecureIpc;
test_attr_mask = KMemoryAttribute_Uncached | KMemoryAttribute_Locked;
test_attr_mask = KMemoryAttribute_All & (~(KMemoryAttribute_PermissionLocked | KMemoryAttribute_DeviceShared | KMemoryAttribute_IpcLocked));
break;
case KMemoryState_NonDeviceIpc:
test_state = KMemoryState_FlagCanUseNonDeviceIpc;
test_attr_mask = KMemoryAttribute_Uncached | KMemoryAttribute_Locked;
test_attr_mask = KMemoryAttribute_All & (~(KMemoryAttribute_PermissionLocked | KMemoryAttribute_DeviceShared | KMemoryAttribute_IpcLocked));
break;
default:
R_THROW(svc::ResultInvalidCombination());
@ -3854,27 +3861,25 @@ namespace ams::kern {
/* Iterate, mapping as needed. */
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(aligned_src_start);
while (true) {
const KMemoryInfo info = it->GetMemoryInfo();
/* Validate the current block. */
R_TRY(this->CheckMemoryState(info, test_state, test_state, test_perm, test_perm, test_attr_mask, KMemoryAttribute_None));
R_TRY(this->CheckMemoryState(it, test_state, test_state, test_perm, test_perm, test_attr_mask, KMemoryAttribute_None));
if (mapping_src_start < mapping_src_end && GetInteger(mapping_src_start) < info.GetEndAddress() && info.GetAddress() < GetInteger(mapping_src_end)) {
const auto cur_start = info.GetAddress() >= GetInteger(mapping_src_start) ? info.GetAddress() : GetInteger(mapping_src_start);
const auto cur_end = mapping_src_last >= info.GetLastAddress() ? info.GetEndAddress() : GetInteger(mapping_src_end);
if (mapping_src_start < mapping_src_end && GetInteger(mapping_src_start) < GetInteger(it->GetEndAddress()) && GetInteger(it->GetAddress()) < GetInteger(mapping_src_end)) {
const auto cur_start = it->GetAddress() >= GetInteger(mapping_src_start) ? GetInteger(it->GetAddress()) : GetInteger(mapping_src_start);
const auto cur_end = mapping_src_last >= GetInteger(it->GetLastAddress()) ? GetInteger(it->GetEndAddress()) : GetInteger(mapping_src_end);
const size_t cur_size = cur_end - cur_start;
if (info.GetAddress() < GetInteger(mapping_src_start)) {
if (GetInteger(it->GetAddress()) < GetInteger(mapping_src_start)) {
++blocks_needed;
}
if (mapping_src_last < info.GetLastAddress()) {
if (mapping_src_last < GetInteger(it->GetLastAddress())) {
++blocks_needed;
}
/* Set the permissions on the block, if we need to. */
if ((info.GetPermission() & KMemoryPermission_IpcLockChangeMask) != src_perm) {
const DisableMergeAttribute head_body_attr = (GetInteger(mapping_src_start) >= info.GetAddress()) ? DisableMergeAttribute_DisableHeadAndBody : DisableMergeAttribute_None;
const DisableMergeAttribute tail_attr = (cur_end == GetInteger(mapping_src_end)) ? DisableMergeAttribute_DisableTail : DisableMergeAttribute_None;
if ((it->GetPermission() & KMemoryPermission_IpcLockChangeMask) != src_perm) {
const DisableMergeAttribute head_body_attr = (GetInteger(mapping_src_start) >= GetInteger(it->GetAddress())) ? DisableMergeAttribute_DisableHeadAndBody : DisableMergeAttribute_None;
const DisableMergeAttribute tail_attr = (cur_end == GetInteger(mapping_src_end)) ? DisableMergeAttribute_DisableTail : DisableMergeAttribute_None;
const KPageProperties properties = { src_perm, false, false, static_cast<DisableMergeAttribute>(head_body_attr | tail_attr) };
R_TRY(this->Operate(page_list, cur_start, cur_size / PageSize, Null<KPhysicalAddress>, false, properties, OperationType_ChangePermissions, false));
}
@ -3884,7 +3889,7 @@ namespace ams::kern {
}
/* If the block is at the end, we're done. */
if (aligned_src_last <= info.GetLastAddress()) {
if (aligned_src_last <= GetInteger(it->GetLastAddress())) {
break;
}
@ -4248,56 +4253,50 @@ namespace ams::kern {
const auto mapped_last = mapped_end - 1;
/* Get current and next iterators. */
KMemoryBlockManager::const_iterator start_it = m_memory_block_manager.FindIterator(mapping_start);
KMemoryBlockManager::const_iterator next_it = start_it;
KMemoryBlockManager::const_iterator cur_it = m_memory_block_manager.FindIterator(mapping_start);
KMemoryBlockManager::const_iterator next_it = cur_it;
++next_it;
/* Get the current block info. */
KMemoryInfo cur_info = start_it->GetMemoryInfo();
/* Create tracking variables. */
KProcessAddress cur_address = cur_info.GetAddress();
size_t cur_size = cur_info.GetSize();
bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission();
bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1;
bool first = cur_info.GetIpcDisableMergeCount() == 1 && (cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute_Locked) == 0;
KProcessAddress cur_address = cur_it->GetAddress();
size_t cur_size = cur_it->GetSize();
bool cur_perm_eq = cur_it->GetPermission() == cur_it->GetOriginalPermission();
bool cur_needs_set_perm = !cur_perm_eq && cur_it->GetIpcLockCount() == 1;
bool first = cur_it->GetIpcDisableMergeCount() == 1 && (cur_it->GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute_Locked) == 0;
while ((GetInteger(cur_address) + cur_size - 1) < mapped_last) {
/* Check that we have a next block. */
MESOSPHERE_ABORT_UNLESS(next_it != m_memory_block_manager.end());
/* Get the next info. */
const KMemoryInfo next_info = next_it->GetMemoryInfo();
/* Check if we can consolidate the next block's permission set with the current one. */
const bool next_perm_eq = next_info.GetPermission() == next_info.GetOriginalPermission();
const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1;
if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm && cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) {
const bool next_perm_eq = next_it->GetPermission() == next_it->GetOriginalPermission();
const bool next_needs_set_perm = !next_perm_eq && next_it->GetIpcLockCount() == 1;
if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm && cur_it->GetOriginalPermission() == next_it->GetOriginalPermission()) {
/* We can consolidate the reprotection for the current and next block into a single call. */
cur_size += next_info.GetSize();
cur_size += next_it->GetSize();
} else {
/* We have to operate on the current block. */
if ((cur_needs_set_perm || first) && !cur_perm_eq) {
const KPageProperties properties = { cur_info.GetPermission(), false, false, first ? DisableMergeAttribute_EnableAndMergeHeadBodyTail : DisableMergeAttribute_None };
const KPageProperties properties = { cur_it->GetPermission(), false, false, first ? DisableMergeAttribute_EnableAndMergeHeadBodyTail : DisableMergeAttribute_None };
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, Null<KPhysicalAddress>, false, properties, OperationType_ChangePermissions, true));
}
/* Advance. */
cur_address = next_info.GetAddress();
cur_size = next_info.GetSize();
cur_address = next_it->GetAddress();
cur_size = next_it->GetSize();
first = false;
}
/* Advance. */
cur_info = next_info;
cur_perm_eq = next_perm_eq;
cur_needs_set_perm = next_needs_set_perm;
++next_it;
cur_it = next_it++;
}
/* Process the last block. */
if ((first || cur_needs_set_perm) && !cur_perm_eq) {
const KPageProperties properties = { cur_info.GetPermission(), false, false, first ? DisableMergeAttribute_EnableAndMergeHeadBodyTail : DisableMergeAttribute_None };
const KPageProperties properties = { cur_it->GetPermission(), false, false, first ? DisableMergeAttribute_EnableAndMergeHeadBodyTail : DisableMergeAttribute_None };
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, Null<KPhysicalAddress>, false, properties, OperationType_ChangePermissions, true));
}
}
@ -4306,41 +4305,37 @@ namespace ams::kern {
/* Iterate, reprotecting as needed. */
{
/* Get current and next iterators. */
KMemoryBlockManager::const_iterator start_it = m_memory_block_manager.FindIterator(mapping_start);
KMemoryBlockManager::const_iterator next_it = start_it;
KMemoryBlockManager::const_iterator cur_it = m_memory_block_manager.FindIterator(mapping_start);
KMemoryBlockManager::const_iterator next_it = cur_it;
++next_it;
/* Validate the current block. */
KMemoryInfo cur_info = start_it->GetMemoryInfo();
MESOSPHERE_R_ABORT_UNLESS(this->CheckMemoryState(cur_info, test_state, test_state, KMemoryPermission_None, KMemoryPermission_None, test_attr_mask | KMemoryAttribute_IpcLocked, KMemoryAttribute_IpcLocked));
MESOSPHERE_R_ABORT_UNLESS(this->CheckMemoryState(cur_it, test_state, test_state, KMemoryPermission_None, KMemoryPermission_None, test_attr_mask | KMemoryAttribute_IpcLocked, KMemoryAttribute_IpcLocked));
/* Create tracking variables. */
KProcessAddress cur_address = cur_info.GetAddress();
size_t cur_size = cur_info.GetSize();
bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission();
bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1;
bool first = cur_info.GetIpcDisableMergeCount() == 1 && (cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute_Locked) == 0;
KProcessAddress cur_address = cur_it->GetAddress();
size_t cur_size = cur_it->GetSize();
bool cur_perm_eq = cur_it->GetPermission() == cur_it->GetOriginalPermission();
bool cur_needs_set_perm = !cur_perm_eq && cur_it->GetIpcLockCount() == 1;
bool first = cur_it->GetIpcDisableMergeCount() == 1 && (cur_it->GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute_Locked) == 0;
while ((cur_address + cur_size - 1) < mapping_last) {
/* Check that we have a next block. */
MESOSPHERE_ABORT_UNLESS(next_it != m_memory_block_manager.end());
/* Get the next info. */
const KMemoryInfo next_info = next_it->GetMemoryInfo();
/* Validate the next block. */
MESOSPHERE_R_ABORT_UNLESS(this->CheckMemoryState(next_info, test_state, test_state, KMemoryPermission_None, KMemoryPermission_None, test_attr_mask | KMemoryAttribute_IpcLocked, KMemoryAttribute_IpcLocked));
MESOSPHERE_R_ABORT_UNLESS(this->CheckMemoryState(next_it, test_state, test_state, KMemoryPermission_None, KMemoryPermission_None, test_attr_mask | KMemoryAttribute_IpcLocked, KMemoryAttribute_IpcLocked));
/* Check if we can consolidate the next block's permission set with the current one. */
const bool next_perm_eq = next_info.GetPermission() == next_info.GetOriginalPermission();
const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1;
if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm && cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) {
const bool next_perm_eq = next_it->GetPermission() == next_it->GetOriginalPermission();
const bool next_needs_set_perm = !next_perm_eq && next_it->GetIpcLockCount() == 1;
if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm && cur_it->GetOriginalPermission() == next_it->GetOriginalPermission()) {
/* We can consolidate the reprotection for the current and next block into a single call. */
cur_size += next_info.GetSize();
cur_size += next_it->GetSize();
} else {
/* We have to operate on the current block. */
if ((cur_needs_set_perm || first) && !cur_perm_eq) {
const KPageProperties properties = { cur_needs_set_perm ? cur_info.GetOriginalPermission() : cur_info.GetPermission(), false, false, first ? DisableMergeAttribute_EnableHeadAndBody : DisableMergeAttribute_None };
const KPageProperties properties = { cur_needs_set_perm ? cur_it->GetOriginalPermission() : cur_it->GetPermission(), false, false, first ? DisableMergeAttribute_EnableHeadAndBody : DisableMergeAttribute_None };
R_TRY(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, Null<KPhysicalAddress>, false, properties, OperationType_ChangePermissions, false));
}
@ -4348,24 +4343,24 @@ namespace ams::kern {
mapped_size += cur_size;
/* Advance. */
cur_address = next_info.GetAddress();
cur_size = next_info.GetSize();
cur_address = next_it->GetAddress();
cur_size = next_it->GetSize();
first = false;
}
/* Advance. */
cur_info = next_info;
cur_perm_eq = next_perm_eq;
cur_needs_set_perm = next_needs_set_perm;
++next_it;
cur_it = next_it++;
}
/* Process the last block. */
const auto lock_count = cur_info.GetIpcLockCount() + (next_it != m_memory_block_manager.end() ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount()) : 0);
const auto lock_count = cur_it->GetIpcLockCount() + (next_it != m_memory_block_manager.end() ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount()) : 0);
if ((first || cur_needs_set_perm || (lock_count == 1)) && !cur_perm_eq) {
const DisableMergeAttribute head_body_attr = first ? DisableMergeAttribute_EnableHeadAndBody : DisableMergeAttribute_None;
const DisableMergeAttribute tail_attr = lock_count == 1 ? DisableMergeAttribute_EnableTail : DisableMergeAttribute_None;
const KPageProperties properties = { cur_needs_set_perm ? cur_info.GetOriginalPermission() : cur_info.GetPermission(), false, false, static_cast<DisableMergeAttribute>(head_body_attr | tail_attr) };
const KPageProperties properties = { cur_needs_set_perm ? cur_it->GetOriginalPermission() : cur_it->GetPermission(), false, false, static_cast<DisableMergeAttribute>(head_body_attr | tail_attr) };
R_TRY(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, Null<KPhysicalAddress>, false, properties, OperationType_ChangePermissions, false));
}
}
@ -4398,38 +4393,36 @@ namespace ams::kern {
/* Iterate over blocks, fixing permissions. */
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(address);
while (true) {
const KMemoryInfo info = it->GetMemoryInfo();
const auto cur_start = info.GetAddress() >= GetInteger(src_map_start) ? info.GetAddress() : GetInteger(src_map_start);
const auto cur_end = src_map_last <= info.GetLastAddress() ? src_map_end : info.GetEndAddress();
const auto cur_start = it->GetAddress() >= GetInteger(src_map_start) ? it->GetAddress() : GetInteger(src_map_start);
const auto cur_end = src_map_last <= it->GetLastAddress() ? src_map_end : it->GetEndAddress();
/* If we can, fix the protections on the block. */
if ((info.GetIpcLockCount() == 0 && (info.GetPermission() & KMemoryPermission_IpcLockChangeMask) != prot_perm) ||
(info.GetIpcLockCount() != 0 && (info.GetOriginalPermission() & KMemoryPermission_IpcLockChangeMask) != prot_perm))
if ((it->GetIpcLockCount() == 0 && (it->GetPermission() & KMemoryPermission_IpcLockChangeMask) != prot_perm) ||
(it->GetIpcLockCount() != 0 && (it->GetOriginalPermission() & KMemoryPermission_IpcLockChangeMask) != prot_perm))
{
/* Check if we actually need to fix the protections on the block. */
if (cur_end == src_map_end || info.GetAddress() <= GetInteger(src_map_start) || (info.GetPermission() & KMemoryPermission_IpcLockChangeMask) != prot_perm) {
const bool start_nc = (info.GetAddress() == GetInteger(src_map_start)) ? ((info.GetDisableMergeAttribute() & (KMemoryBlockDisableMergeAttribute_Locked | KMemoryBlockDisableMergeAttribute_IpcLeft)) == 0) : info.GetAddress() <= GetInteger(src_map_start);
if (cur_end == src_map_end || it->GetAddress() <= GetInteger(src_map_start) || (it->GetPermission() & KMemoryPermission_IpcLockChangeMask) != prot_perm) {
const bool start_nc = (it->GetAddress() == GetInteger(src_map_start)) ? ((it->GetDisableMergeAttribute() & (KMemoryBlockDisableMergeAttribute_Locked | KMemoryBlockDisableMergeAttribute_IpcLeft)) == 0) : it->GetAddress() <= GetInteger(src_map_start);
const DisableMergeAttribute head_body_attr = start_nc ? DisableMergeAttribute_EnableHeadAndBody : DisableMergeAttribute_None;
DisableMergeAttribute tail_attr;
if (cur_end == src_map_end && info.GetEndAddress() == src_map_end) {
if (cur_end == src_map_end && it->GetEndAddress() == src_map_end) {
auto next_it = it;
++next_it;
const auto lock_count = info.GetIpcLockCount() + (next_it != m_memory_block_manager.end() ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount()) : 0);
const auto lock_count = it->GetIpcLockCount() + (next_it != m_memory_block_manager.end() ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount()) : 0);
tail_attr = lock_count == 0 ? DisableMergeAttribute_EnableTail : DisableMergeAttribute_None;
} else {
tail_attr = DisableMergeAttribute_None;
}
const KPageProperties properties = { info.GetPermission(), false, false, static_cast<DisableMergeAttribute>(head_body_attr | tail_attr) };
const KPageProperties properties = { it->GetPermission(), false, false, static_cast<DisableMergeAttribute>(head_body_attr | tail_attr) };
MESOSPHERE_R_ABORT_UNLESS(this->Operate(page_list, cur_start, (cur_end - cur_start) / PageSize, Null<KPhysicalAddress>, false, properties, OperationType_ChangePermissions, true));
}
}
/* If we're past the end of the region, we're done. */
if (src_map_last <= info.GetLastAddress()) {
if (src_map_last <= it->GetLastAddress()) {
break;
}
@ -4468,24 +4461,21 @@ namespace ams::kern {
/* Check that the iterator is valid. */
MESOSPHERE_ASSERT(it != m_memory_block_manager.end());
/* Get the memory info. */
const KMemoryInfo info = it->GetMemoryInfo();
/* Check if we're done. */
if (last_address <= info.GetLastAddress()) {
if (info.GetState() != KMemoryState_Free) {
if (last_address <= it->GetLastAddress()) {
if (it->GetState() != KMemoryState_Free) {
mapped_size += (last_address + 1 - cur_address);
}
break;
}
/* Track the memory if it's mapped. */
if (info.GetState() != KMemoryState_Free) {
mapped_size += KProcessAddress(info.GetEndAddress()) - cur_address;
if (it->GetState() != KMemoryState_Free) {
mapped_size += it->GetEndAddress() - cur_address;
}
/* Advance. */
cur_address = info.GetEndAddress();
cur_address = it->GetEndAddress();
++it;
}
@ -4527,21 +4517,18 @@ namespace ams::kern {
/* Check that the iterator is valid. */
MESOSPHERE_ASSERT(it != m_memory_block_manager.end());
/* Get the memory info. */
const KMemoryInfo info = it->GetMemoryInfo();
const bool is_free = info.GetState() == KMemoryState_Free;
const bool is_free = it->GetState() == KMemoryState_Free;
if (is_free) {
if (info.GetAddress() < GetInteger(address)) {
if (it->GetAddress() < GetInteger(address)) {
++num_allocator_blocks;
}
if (last_address < info.GetLastAddress()) {
if (last_address < it->GetLastAddress()) {
++num_allocator_blocks;
}
}
/* Check if we're done. */
if (last_address <= info.GetLastAddress()) {
if (last_address <= it->GetLastAddress()) {
if (!is_free) {
checked_mapped_size += (last_address + 1 - cur_address);
}
@ -4550,11 +4537,11 @@ namespace ams::kern {
/* Track the memory if it's mapped. */
if (!is_free) {
checked_mapped_size += KProcessAddress(info.GetEndAddress()) - cur_address;
checked_mapped_size += it->GetEndAddress() - cur_address;
}
/* Advance. */
cur_address = info.GetEndAddress();
cur_address = it->GetEndAddress();
++it;
}
@ -4594,26 +4581,23 @@ namespace ams::kern {
/* Check that the iterator is valid. */
MESOSPHERE_ASSERT(it != m_memory_block_manager.end());
/* Get the memory info. */
const KMemoryInfo info = it->GetMemoryInfo();
/* If the memory state is free, we mapped it and need to unmap it. */
if (info.GetState() == KMemoryState_Free) {
if (it->GetState() == KMemoryState_Free) {
/* Determine the range to unmap. */
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
const size_t cur_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address, last_unmap_address + 1 - cur_address) / PageSize;
const size_t cur_pages = std::min(it->GetEndAddress() - cur_address, last_unmap_address + 1 - cur_address) / PageSize;
/* Unmap. */
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), cur_address, cur_pages, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, true));
}
/* Check if we're done. */
if (last_unmap_address <= info.GetLastAddress()) {
if (last_unmap_address <= it->GetLastAddress()) {
break;
}
/* Advance. */
cur_address = info.GetEndAddress();
cur_address = it->GetEndAddress();
++it;
}
}
@ -4632,14 +4616,11 @@ namespace ams::kern {
/* Check that the iterator is valid. */
MESOSPHERE_ASSERT(it != m_memory_block_manager.end());
/* Get the memory info. */
const KMemoryInfo info = it->GetMemoryInfo();
/* If it's unmapped, we need to map it. */
if (info.GetState() == KMemoryState_Free) {
if (it->GetState() == KMemoryState_Free) {
/* Determine the range to map. */
const KPageProperties map_properties = { KMemoryPermission_UserReadWrite, false, false, cur_address == this->GetAliasRegionStart() ? DisableMergeAttribute_DisableHead : DisableMergeAttribute_None };
size_t map_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address, last_address + 1 - cur_address) / PageSize;
size_t map_pages = std::min(it->GetEndAddress() - cur_address, last_address + 1 - cur_address) / PageSize;
/* While we have pages to map, map them. */
{
@ -4680,12 +4661,12 @@ namespace ams::kern {
}
/* Check if we're done. */
if (last_address <= info.GetLastAddress()) {
if (last_address <= it->GetLastAddress()) {
break;
}
/* Advance. */
cur_address = info.GetEndAddress();
cur_address = it->GetEndAddress();
++it;
}
@ -4737,26 +4718,23 @@ namespace ams::kern {
/* Check that the iterator is valid. */
MESOSPHERE_ASSERT(it != m_memory_block_manager.end());
/* Get the memory info. */
const KMemoryInfo info = it->GetMemoryInfo();
/* Verify the memory's state. */
const bool is_normal = info.GetState() == KMemoryState_Normal && info.GetAttribute() == 0;
const bool is_free = info.GetState() == KMemoryState_Free;
const bool is_normal = it->GetState() == KMemoryState_Normal && it->GetAttribute() == 0;
const bool is_free = it->GetState() == KMemoryState_Free;
R_UNLESS(is_normal || is_free, svc::ResultInvalidCurrentMemory());
if (is_normal) {
R_UNLESS(info.GetAttribute() == KMemoryAttribute_None, svc::ResultInvalidCurrentMemory());
R_UNLESS(it->GetAttribute() == KMemoryAttribute_None, svc::ResultInvalidCurrentMemory());
if (map_start_address == Null<KProcessAddress>) {
map_start_address = cur_address;
}
map_last_address = (last_address >= info.GetLastAddress()) ? info.GetLastAddress() : last_address;
map_last_address = (last_address >= it->GetLastAddress()) ? it->GetLastAddress() : last_address;
if (info.GetAddress() < GetInteger(address)) {
if (it->GetAddress() < GetInteger(address)) {
++num_allocator_blocks;
}
if (last_address < info.GetLastAddress()) {
if (last_address < it->GetLastAddress()) {
++num_allocator_blocks;
}
@ -4764,12 +4742,12 @@ namespace ams::kern {
}
/* Check if we're done. */
if (last_address <= info.GetLastAddress()) {
if (last_address <= it->GetLastAddress()) {
break;
}
/* Advance. */
cur_address = info.GetEndAddress();
cur_address = it->GetEndAddress();
++it;
}
@ -4802,26 +4780,23 @@ namespace ams::kern {
/* Check that the iterator is valid. */
MESOSPHERE_ASSERT(it != m_memory_block_manager.end());
/* Get the memory info. */
const KMemoryInfo info = it->GetMemoryInfo();
/* If the memory state is normal, we need to unmap it. */
if (info.GetState() == KMemoryState_Normal) {
if (it->GetState() == KMemoryState_Normal) {
/* Determine the range to unmap. */
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
const size_t cur_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address, last_address + 1 - cur_address) / PageSize;
const size_t cur_pages = std::min(it->GetEndAddress() - cur_address, last_address + 1 - cur_address) / PageSize;
/* Unmap. */
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), cur_address, cur_pages, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, false));
}
/* Check if we're done. */
if (last_address <= info.GetLastAddress()) {
if (last_address <= it->GetLastAddress()) {
break;
}
/* Advance. */
cur_address = info.GetEndAddress();
cur_address = it->GetEndAddress();
++it;
}

View file

@ -220,18 +220,8 @@ namespace ams::kern {
m_running_thread_switch_counts[i] = 0;
}
/* Set max memory based on address space type. */
switch ((params.flags & ams::svc::CreateProcessFlag_AddressSpaceMask)) {
case ams::svc::CreateProcessFlag_AddressSpace32Bit:
case ams::svc::CreateProcessFlag_AddressSpace64BitDeprecated:
case ams::svc::CreateProcessFlag_AddressSpace64Bit:
m_max_process_memory = m_page_table.GetHeapRegionSize();
break;
case ams::svc::CreateProcessFlag_AddressSpace32BitWithoutAlias:
m_max_process_memory = m_page_table.GetHeapRegionSize() + m_page_table.GetAliasRegionSize();
break;
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
}
/* Set max memory. */
m_max_process_memory = m_page_table.GetHeapRegionSize();
/* Generate random entropy. */
KSystemControl::GenerateRandom(m_entropy, util::size(m_entropy));
@ -299,7 +289,7 @@ namespace ams::kern {
/* Setup page table. */
{
const bool from_back = (params.flags & ams::svc::CreateProcessFlag_EnableAslr) == 0;
R_TRY(m_page_table.Initialize(static_cast<ams::svc::CreateProcessFlag>(params.flags), from_back, pool, params.code_address, params.code_num_pages * PageSize, m_system_resource, res_limit));
R_TRY(m_page_table.Initialize(static_cast<ams::svc::CreateProcessFlag>(params.flags), from_back, pool, params.code_address, params.code_num_pages * PageSize, m_system_resource, res_limit, this->GetSlabIndex()));
}
ON_RESULT_FAILURE_2 { m_page_table.Finalize(); };
@ -378,7 +368,7 @@ namespace ams::kern {
/* Setup page table. */
{
const bool from_back = (params.flags & ams::svc::CreateProcessFlag_EnableAslr) == 0;
R_TRY(m_page_table.Initialize(static_cast<ams::svc::CreateProcessFlag>(params.flags), from_back, pool, params.code_address, code_size, m_system_resource, res_limit));
R_TRY(m_page_table.Initialize(static_cast<ams::svc::CreateProcessFlag>(params.flags), from_back, pool, params.code_address, code_size, m_system_resource, res_limit, this->GetSlabIndex()));
}
ON_RESULT_FAILURE_2 { m_page_table.Finalize(); };
@ -934,7 +924,6 @@ namespace ams::kern {
MESOSPHERE_ABORT_UNLESS(m_main_thread_stack_size == 0);
/* Ensure that we're allocating a valid stack. */
stack_size = util::AlignUp(stack_size, PageSize);
R_UNLESS(stack_size + m_code_size <= m_max_process_memory, svc::ResultOutOfMemory());
R_UNLESS(stack_size + m_code_size >= m_code_size, svc::ResultOutOfMemory());

View file

@ -124,31 +124,14 @@ namespace ams::kern {
}
/* System Initialization. */
void KSystemControlBase::ConfigureKTargetSystem() {
/* By default, use the default config set in the KTargetSystem header. */
}
void KSystemControlBase::InitializePhase1() {
/* Configure KTargetSystem. */
/* Enable KTargetSystem. */
{
/* Set IsDebugMode. */
{
KTargetSystem::SetIsDebugMode(true);
/* If debug mode, we want to initialize uart logging. */
KTargetSystem::EnableDebugLogging(true);
}
/* Set Kernel Configuration. */
{
KTargetSystem::EnableDebugMemoryFill(false);
KTargetSystem::EnableUserExceptionHandlers(true);
KTargetSystem::EnableDynamicResourceLimits(true);
KTargetSystem::EnableUserPmuAccess(false);
}
/* Set Kernel Debugging. */
{
/* NOTE: This is used to restrict access to SvcKernelDebug/SvcChangeKernelTraceState. */
/* Mesosphere may wish to not require this, as we'd ideally keep ProgramVerification enabled for userland. */
KTargetSystem::EnableKernelDebugging(true);
}
KTargetSystem::SetInitialized();
}
/* Initialize random and resource limit. */

View file

@ -1438,7 +1438,10 @@ namespace ams::kern {
this->SetState(ThreadState_Waiting);
/* Set our wait queue. */
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdangling-pointer"
m_wait_queue = queue;
#pragma GCC diagnostic pop
}
void KThread::NotifyAvailable(KSynchronizationObject *signaled_object, Result wait_result) {

View file

@ -56,8 +56,9 @@ namespace ams::kern {
{
const auto &management_region = KMemoryLayout::GetPoolManagementRegion();
MESOSPHERE_ABORT_UNLESS(management_region.GetEndAddress() != 0);
static_assert(util::size(MinimumMemoryManagerAlignmentShifts) == KMemoryManager::Pool_Count);
Kernel::GetMemoryManager().Initialize(management_region.GetAddress(), management_region.GetSize());
Kernel::GetMemoryManager().Initialize(management_region.GetAddress(), management_region.GetSize(), MinimumMemoryManagerAlignmentShifts);
}
/* Copy the Initial Process Binary to safe memory. */

View file

@ -41,17 +41,22 @@ namespace ams::kern::svc {
case ams::svc::ArbitrationType_WaitIfLessThan:
case ams::svc::ArbitrationType_DecrementAndWaitIfLessThan:
case ams::svc::ArbitrationType_WaitIfEqual:
case ams::svc::ArbitrationType_WaitIfEqual64:
return true;
default:
return false;
}
}
Result WaitForAddress(uintptr_t address, ams::svc::ArbitrationType arb_type, int32_t value, int64_t timeout_ns) {
Result WaitForAddress(uintptr_t address, ams::svc::ArbitrationType arb_type, int64_t value, int64_t timeout_ns) {
/* Validate input. */
R_UNLESS(AMS_LIKELY(!IsKernelAddress(address)), svc::ResultInvalidCurrentMemory());
R_UNLESS(util::IsAligned(address, sizeof(int32_t)), svc::ResultInvalidAddress());
R_UNLESS(IsValidArbitrationType(arb_type), svc::ResultInvalidEnumValue());
R_UNLESS(AMS_LIKELY(!IsKernelAddress(address)), svc::ResultInvalidCurrentMemory());
if (arb_type == ams::svc::ArbitrationType_WaitIfEqual64) {
R_UNLESS(util::IsAligned(address, sizeof(int64_t)), svc::ResultInvalidAddress());
} else {
R_UNLESS(util::IsAligned(address, sizeof(int32_t)), svc::ResultInvalidAddress());
}
R_UNLESS(IsValidArbitrationType(arb_type), svc::ResultInvalidEnumValue());
/* Convert timeout from nanoseconds to ticks. */
s64 timeout;
@ -85,7 +90,7 @@ namespace ams::kern::svc {
/* ============================= 64 ABI ============================= */
Result WaitForAddress64(ams::svc::Address address, ams::svc::ArbitrationType arb_type, int32_t value, int64_t timeout_ns) {
Result WaitForAddress64(ams::svc::Address address, ams::svc::ArbitrationType arb_type, int64_t value, int64_t timeout_ns) {
R_RETURN(WaitForAddress(address, arb_type, value, timeout_ns));
}
@ -95,7 +100,7 @@ namespace ams::kern::svc {
/* ============================= 64From32 ABI ============================= */
Result WaitForAddress64From32(ams::svc::Address address, ams::svc::ArbitrationType arb_type, int32_t value, int64_t timeout_ns) {
Result WaitForAddress64From32(ams::svc::Address address, ams::svc::ArbitrationType arb_type, int64_t value, int64_t timeout_ns) {
R_RETURN(WaitForAddress(address, arb_type, value, timeout_ns));
}

View file

@ -24,6 +24,9 @@ namespace ams::kern::svc {
constexpr inline int32_t MaximumDebuggableThreadCount = 0x60;
Result DebugActiveProcess(ams::svc::Handle *out_handle, uint64_t process_id) {
/* Check that the SVC can be used. */
R_UNLESS(KTargetSystem::IsDebugMode() || GetCurrentProcess().CanForceDebugProd(), svc::ResultNotImplemented());
/* Get the process from its id. */
KProcess *process = KProcess::GetProcessFromId(process_id);
R_UNLESS(process != nullptr, svc::ResultInvalidProcessId());
@ -32,9 +35,8 @@ namespace ams::kern::svc {
ON_SCOPE_EXIT { process->Close(); };
/* Check that the debugging is allowed. */
if (!process->IsPermittedDebug()) {
R_UNLESS(GetCurrentProcess().CanForceDebug(), svc::ResultInvalidState());
}
const bool allowable = process->IsPermittedDebug() || GetCurrentProcess().CanForceDebug() || GetCurrentProcess().CanForceDebugProd();
R_UNLESS(allowable, svc::ResultInvalidState());
/* Disallow debugging one's own processs, to prevent softlocks. */
R_UNLESS(process != GetCurrentProcessPointer(), svc::ResultInvalidState());
@ -92,6 +94,9 @@ namespace ams::kern::svc {
template<typename EventInfoType>
Result GetDebugEvent(KUserPointer<EventInfoType *> out_info, ams::svc::Handle debug_handle) {
/* Only allow invoking the svc on development hardware or if force debug prod. */
R_UNLESS(KTargetSystem::IsDebugMode() || GetCurrentProcess().CanForceDebugProd(), svc::ResultNotImplemented());
/* Get the debug object. */
KScopedAutoObject debug = GetCurrentProcess().GetHandleTable().GetObject<KDebug>(debug_handle);
R_UNLESS(debug.IsNotNull(), svc::ResultInvalidHandle());
@ -164,6 +169,9 @@ namespace ams::kern::svc {
}
Result GetDebugThreadContext(KUserPointer<ams::svc::ThreadContext *> out_context, ams::svc::Handle debug_handle, uint64_t thread_id, uint32_t context_flags) {
/* Only allow invoking the svc on development hardware or if force debug prod. */
R_UNLESS(KTargetSystem::IsDebugMode() || GetCurrentProcess().CanForceDebugProd(), svc::ResultNotImplemented());
/* Validate the context flags. */
R_UNLESS((context_flags | ams::svc::ThreadContextFlag_All) == ams::svc::ThreadContextFlag_All, svc::ResultInvalidEnumValue());
@ -220,6 +228,9 @@ namespace ams::kern::svc {
}
Result QueryDebugProcessMemory(ams::svc::MemoryInfo *out_memory_info, ams::svc::PageInfo *out_page_info, ams::svc::Handle debug_handle, uintptr_t address) {
/* Only allow invoking the svc on development hardware or if force debug prod. */
R_UNLESS(KTargetSystem::IsDebugMode() || GetCurrentProcess().CanForceDebugProd(), svc::ResultNotImplemented());
/* Get the debug object. */
KScopedAutoObject debug = GetCurrentProcess().GetHandleTable().GetObject<KDebug>(debug_handle);
R_UNLESS(debug.IsNotNull(), svc::ResultInvalidHandle());
@ -261,6 +272,9 @@ namespace ams::kern::svc {
}
Result ReadDebugProcessMemory(uintptr_t buffer, ams::svc::Handle debug_handle, uintptr_t address, size_t size) {
/* Only allow invoking the svc on development hardware or if force debug prod. */
R_UNLESS(KTargetSystem::IsDebugMode() || GetCurrentProcess().CanForceDebugProd(), svc::ResultNotImplemented());
/* Validate address / size. */
R_UNLESS(size > 0, svc::ResultInvalidSize());
R_UNLESS((address < address + size), svc::ResultInvalidCurrentMemory());
@ -306,6 +320,9 @@ namespace ams::kern::svc {
}
Result GetDebugThreadParam(uint64_t *out_64, uint32_t *out_32, ams::svc::Handle debug_handle, uint64_t thread_id, ams::svc::DebugThreadParam param) {
/* Only allow invoking the svc on development hardware or if force debug prod. */
R_UNLESS(KTargetSystem::IsDebugMode() || GetCurrentProcess().CanForceDebugProd(), svc::ResultNotImplemented());
/* Get the debug object. */
KScopedAutoObject debug = GetCurrentProcess().GetHandleTable().GetObject<KDebug>(debug_handle);
R_UNLESS(debug.IsNotNull(), svc::ResultInvalidHandle());

View file

@ -314,6 +314,19 @@ namespace ams::kern::svc {
*out = io_region->GetHint();
}
break;
case ams::svc::InfoType_TransferMemoryHint:
{
/* Verify the sub-type is valid. */
R_UNLESS(info_subtype == 0, svc::ResultInvalidCombination());
/* Get the transfer memory from its handle. */
KScopedAutoObject transfer_memory = GetCurrentProcess().GetHandleTable().GetObject<KTransferMemory>(handle);
R_UNLESS(transfer_memory.IsNotNull(), svc::ResultInvalidHandle());
/* Get the transfer memory's address hint. */
*out = transfer_memory->GetHint();
}
break;
case ams::svc::InfoType_MesosphereMeta:
{
/* Verify the handle is invalid. */

View file

@ -48,10 +48,11 @@ namespace ams::kern::svc {
Result MapPhysicalMemory(uintptr_t address, size_t size) {
/* Validate address / size. */
R_UNLESS(util::IsAligned(address, PageSize), svc::ResultInvalidAddress());
R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize());
R_UNLESS(size > 0, svc::ResultInvalidSize());
R_UNLESS((address < address + size), svc::ResultInvalidMemoryRegion());
const size_t min_alignment = Kernel::GetMemoryManager().GetMinimumAlignment(GetCurrentProcess().GetMemoryPool());
R_UNLESS(util::IsAligned(address, min_alignment), svc::ResultInvalidAddress());
R_UNLESS(util::IsAligned(size, min_alignment), svc::ResultInvalidSize());
R_UNLESS(size > 0, svc::ResultInvalidSize());
R_UNLESS((address < address + size), svc::ResultInvalidMemoryRegion());
/* Verify that the process has system resource. */
auto &process = GetCurrentProcess();
@ -69,10 +70,11 @@ namespace ams::kern::svc {
Result UnmapPhysicalMemory(uintptr_t address, size_t size) {
/* Validate address / size. */
R_UNLESS(util::IsAligned(address, PageSize), svc::ResultInvalidAddress());
R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize());
R_UNLESS(size > 0, svc::ResultInvalidSize());
R_UNLESS((address < address + size), svc::ResultInvalidMemoryRegion());
const size_t min_alignment = Kernel::GetMemoryManager().GetMinimumAlignment(GetCurrentProcess().GetMemoryPool());
R_UNLESS(util::IsAligned(address, min_alignment), svc::ResultInvalidAddress());
R_UNLESS(util::IsAligned(size, min_alignment), svc::ResultInvalidSize());
R_UNLESS(size > 0, svc::ResultInvalidSize());
R_UNLESS((address < address + size), svc::ResultInvalidMemoryRegion());
/* Verify that the process has system resource. */
auto &process = GetCurrentProcess();

View file

@ -71,6 +71,9 @@ namespace ams::kern::svc {
}
Result GetProcessList(int32_t *out_num_processes, KUserPointer<uint64_t *> out_process_ids, int32_t max_out_count) {
/* Only allow invoking the svc on development hardware. */
R_UNLESS(KTargetSystem::IsDebugMode(), svc::ResultNotImplemented());
/* Validate that the out count is valid. */
R_UNLESS((0 <= max_out_count && max_out_count <= static_cast<int32_t>(std::numeric_limits<int32_t>::max() / sizeof(u64))), svc::ResultOutOfRange());
@ -110,8 +113,8 @@ namespace ams::kern::svc {
case ams::svc::CreateProcessFlag_AddressSpace32Bit:
case ams::svc::CreateProcessFlag_AddressSpace32BitWithoutAlias:
{
map_start = KAddressSpaceInfo::GetAddressSpaceStart(32, KAddressSpaceInfo::Type_MapSmall);
map_size = KAddressSpaceInfo::GetAddressSpaceSize(32, KAddressSpaceInfo::Type_MapSmall);
map_start = KAddressSpaceInfo::GetAddressSpaceStart(static_cast<ams::svc::CreateProcessFlag>(params.flags), KAddressSpaceInfo::Type_MapSmall);
map_size = KAddressSpaceInfo::GetAddressSpaceSize(static_cast<ams::svc::CreateProcessFlag>(params.flags), KAddressSpaceInfo::Type_MapSmall);
map_end = map_start + map_size;
}
break;
@ -120,8 +123,8 @@ namespace ams::kern::svc {
/* 64-bit address space requires 64-bit process. */
R_UNLESS(is_64_bit, svc::ResultInvalidCombination());
map_start = KAddressSpaceInfo::GetAddressSpaceStart(36, KAddressSpaceInfo::Type_MapSmall);
map_size = KAddressSpaceInfo::GetAddressSpaceSize(36, KAddressSpaceInfo::Type_MapSmall);
map_start = KAddressSpaceInfo::GetAddressSpaceStart(static_cast<ams::svc::CreateProcessFlag>(params.flags), KAddressSpaceInfo::Type_MapSmall);
map_size = KAddressSpaceInfo::GetAddressSpaceSize(static_cast<ams::svc::CreateProcessFlag>(params.flags), KAddressSpaceInfo::Type_MapSmall);
map_end = map_start + map_size;
}
break;
@ -130,10 +133,10 @@ namespace ams::kern::svc {
/* 64-bit address space requires 64-bit process. */
R_UNLESS(is_64_bit, svc::ResultInvalidCombination());
map_start = KAddressSpaceInfo::GetAddressSpaceStart(39, KAddressSpaceInfo::Type_Map39Bit);
map_end = map_start + KAddressSpaceInfo::GetAddressSpaceSize(39, KAddressSpaceInfo::Type_Map39Bit);
map_start = KAddressSpaceInfo::GetAddressSpaceStart(static_cast<ams::svc::CreateProcessFlag>(params.flags), KAddressSpaceInfo::Type_Map39Bit);
map_end = map_start + KAddressSpaceInfo::GetAddressSpaceSize(static_cast<ams::svc::CreateProcessFlag>(params.flags), KAddressSpaceInfo::Type_Map39Bit);
map_size = KAddressSpaceInfo::GetAddressSpaceSize(39, KAddressSpaceInfo::Type_Heap);
map_size = KAddressSpaceInfo::GetAddressSpaceSize(static_cast<ams::svc::CreateProcessFlag>(params.flags), KAddressSpaceInfo::Type_Heap);
}
break;
default:
@ -293,7 +296,9 @@ namespace ams::kern::svc {
Result StartProcess(ams::svc::Handle process_handle, int32_t priority, int32_t core_id, uint64_t main_thread_stack_size) {
/* Validate stack size. */
R_UNLESS(main_thread_stack_size == static_cast<size_t>(main_thread_stack_size), svc::ResultOutOfMemory());
const uint64_t aligned_stack_size = util::AlignUp(main_thread_stack_size, Kernel::GetMemoryManager().GetMinimumAlignment(GetCurrentProcess().GetMemoryPool()));
R_UNLESS(aligned_stack_size >= main_thread_stack_size, svc::ResultOutOfMemory());
R_UNLESS(aligned_stack_size == static_cast<size_t>(aligned_stack_size), svc::ResultOutOfMemory());
/* Get the target process. */
KScopedAutoObject process = GetCurrentProcess().GetHandleTable().GetObject<KProcess>(process_handle);
@ -311,7 +316,7 @@ namespace ams::kern::svc {
process->SetIdealCoreId(core_id);
/* Run the process. */
R_RETURN(process->Run(priority, static_cast<size_t>(main_thread_stack_size)));
R_RETURN(process->Run(priority, static_cast<size_t>(aligned_stack_size)));
}
Result TerminateProcess(ams::svc::Handle process_handle) {

View file

@ -27,6 +27,7 @@ namespace ams::kern::svc {
case ams::svc::MemoryPermission_Read:
case ams::svc::MemoryPermission_ReadWrite:
case ams::svc::MemoryPermission_ReadExecute:
case ams::svc::MemoryPermission_Execute:
return true;
default:
return false;

View file

@ -217,6 +217,9 @@ namespace ams::kern::svc {
}
Result GetThreadList(int32_t *out_num_threads, KUserPointer<uint64_t *> out_thread_ids, int32_t max_out_count, ams::svc::Handle debug_handle) {
/* Only allow invoking the svc on development hardware. */
R_UNLESS(KTargetSystem::IsDebugMode(), svc::ResultNotImplemented());
/* Validate that the out count is valid. */
R_UNLESS((0 <= max_out_count && max_out_count <= static_cast<int32_t>(std::numeric_limits<int32_t>::max() / sizeof(u64))), svc::ResultOutOfRange());
@ -225,30 +228,34 @@ namespace ams::kern::svc {
R_UNLESS(GetCurrentProcess().GetPageTable().Contains(KProcessAddress(out_thread_ids.GetUnsafePointer()), max_out_count * sizeof(u64)), svc::ResultInvalidCurrentMemory());
}
if (debug_handle == ams::svc::InvalidHandle) {
/* If passed invalid handle, we should return the global thread list. */
R_TRY(KThread::GetThreadList(out_num_threads, out_thread_ids, max_out_count));
/* Get the handle table. */
auto &handle_table = GetCurrentProcess().GetHandleTable();
/* Try to get as a debug object. */
KScopedAutoObject debug = handle_table.GetObject<KDebug>(debug_handle);
if (debug.IsNotNull()) {
/* Check that the debug object has a process. */
R_UNLESS(debug->IsAttached(), svc::ResultProcessTerminated());
R_UNLESS(debug->OpenProcess(), svc::ResultProcessTerminated());
ON_SCOPE_EXIT { debug->CloseProcess(); };
/* Get the thread list. */
R_TRY(debug->GetProcessUnsafe()->GetThreadList(out_num_threads, out_thread_ids, max_out_count));
} else {
/* Get the handle table. */
auto &handle_table = GetCurrentProcess().GetHandleTable();
/* Try to get as a debug object. */
KScopedAutoObject debug = handle_table.GetObject<KDebug>(debug_handle);
if (debug.IsNotNull()) {
/* Check that the debug object has a process. */
R_UNLESS(debug->IsAttached(), svc::ResultProcessTerminated());
R_UNLESS(debug->OpenProcess(), svc::ResultProcessTerminated());
ON_SCOPE_EXIT { debug->CloseProcess(); };
/* Get the thread list. */
R_TRY(debug->GetProcessUnsafe()->GetThreadList(out_num_threads, out_thread_ids, max_out_count));
} else {
/* Try to get as a process. */
KScopedAutoObject process = handle_table.GetObjectWithoutPseudoHandle<KProcess>(debug_handle);
R_UNLESS(process.IsNotNull(), svc::ResultInvalidHandle());
/* Only allow getting as a process (or global) if the caller does not have ForceDebugProd. */
R_UNLESS(!GetCurrentProcess().CanForceDebugProd(), svc::ResultInvalidHandle());
/* Try to get as a process. */
KScopedAutoObject process = handle_table.GetObjectWithoutPseudoHandle<KProcess>(debug_handle);
if (process.IsNotNull()) {
/* Get the thread list. */
R_TRY(process->GetThreadList(out_num_threads, out_thread_ids, max_out_count));
} else {
/* If the object is not a process, the caller may want the global thread list. */
R_UNLESS(debug_handle == ams::svc::InvalidHandle, svc::ResultInvalidHandle());
/* If passed invalid handle, we should return the global thread list. */
R_TRY(KThread::GetThreadList(out_num_threads, out_thread_ids, max_out_count));
}
}

View file

@ -81,8 +81,8 @@
HANDLER(NetworkErrorInfo, 40 ) \
HANDLER(FileAccessPathInfo, 41 ) \
HANDLER(GameCardCIDInfo, 42 ) \
HANDLER(NANDCIDInfo, 43 ) \
HANDLER(MicroSDCIDInfo, 44 ) \
HANDLER(NANDCIDInfoDeprecated, 43 ) \
HANDLER(MicroSDCIDInfoDeprecated, 44 ) \
HANDLER(NANDSpeedModeInfo, 45 ) \
HANDLER(MicroSDSpeedModeInfo, 46 ) \
HANDLER(GameCardSpeedModeInfo, 47 ) \
@ -112,7 +112,7 @@
HANDLER(FocusedAppletHistoryInfo, 71 ) \
HANDLER(CompositorInfo, 72 ) \
HANDLER(BatteryChargeInfo, 73 ) \
HANDLER(NANDExtendedCsd, 74 ) \
HANDLER(NANDExtendedCsdDeprecated, 74 ) \
HANDLER(NANDPatrolInfo, 75 ) \
HANDLER(NANDErrorInfo, 76 ) \
HANDLER(NANDDriverLog, 77 ) \
@ -178,9 +178,12 @@
HANDLER(BuiltInWirelessOUIInfo, 137 ) \
HANDLER(WirelessAPOUIInfo, 138 ) \
HANDLER(EthernetAdapterOUIInfo, 139 ) \
HANDLER(NANDTypeInfo, 140 ) \
HANDLER(NANDTypeInfoDeprecated, 140 ) \
HANDLER(MicroSDTypeInfo, 141 ) \
HANDLER(TestNx, 1000)
HANDLER(AttachmentFileInfo, 142 ) \
HANDLER(TestNx, 1000) \
HANDLER(NANDTypeInfo, 1001) \
HANDLER(NANDExtendedCsd, 1002) \
#define AMS_ERPT_FOREACH_FIELD(HANDLER) \
HANDLER(TestU64, 0, Test, FieldType_NumericU64, FieldFlag_None ) \
@ -280,8 +283,8 @@
HANDLER(CDNContentPath, 94, NetworkErrorInfo, FieldType_String, FieldFlag_None ) \
HANDLER(FileAccessPath, 95, FileAccessPathInfo, FieldType_String, FieldFlag_None ) \
HANDLER(GameCardCID, 96, GameCardCIDInfo, FieldType_U8Array, FieldFlag_None ) \
HANDLER(NANDCID, 97, NANDCIDInfo, FieldType_U8Array, FieldFlag_None ) \
HANDLER(MicroSDCID, 98, MicroSDCIDInfo, FieldType_U8Array, FieldFlag_None ) \
HANDLER(NANDCIDDeprecated, 97, NANDCIDInfoDeprecated, FieldType_U8Array, FieldFlag_None ) \
HANDLER(MicroSDCIDDeprecated, 98, MicroSDCIDInfoDeprecated, FieldType_U8Array, FieldFlag_None ) \
HANDLER(NANDSpeedMode, 99, NANDSpeedModeInfo, FieldType_String, FieldFlag_None ) \
HANDLER(MicroSDSpeedMode, 100, MicroSDSpeedModeInfo, FieldType_String, FieldFlag_None ) \
HANDLER(GameCardSpeedMode, 101, GameCardSpeedModeInfo, FieldType_String, FieldFlag_None ) \
@ -373,9 +376,9 @@
HANDLER(FastBatteryChargingEnabled, 187, BatteryChargeInfo, FieldType_Bool, FieldFlag_None ) \
HANDLER(ControllerPowerSupplyAcquiredDeprecated, 188, BatteryChargeInfo, FieldType_Bool, FieldFlag_None ) \
HANDLER(OtgRequestedDeprecated, 189, BatteryChargeInfo, FieldType_Bool, FieldFlag_None ) \
HANDLER(NANDPreEolInfo, 190, NANDExtendedCsd, FieldType_NumericU32, FieldFlag_None ) \
HANDLER(NANDDeviceLifeTimeEstTypA, 191, NANDExtendedCsd, FieldType_NumericU32, FieldFlag_None ) \
HANDLER(NANDDeviceLifeTimeEstTypB, 192, NANDExtendedCsd, FieldType_NumericU32, FieldFlag_None ) \
HANDLER(NANDPreEolInfoDeprecated, 190, NANDExtendedCsdDeprecated, FieldType_NumericU32, FieldFlag_None ) \
HANDLER(NANDDeviceLifeTimeEstTypADeprecated, 191, NANDExtendedCsdDeprecated, FieldType_NumericU32, FieldFlag_None ) \
HANDLER(NANDDeviceLifeTimeEstTypBDeprecated, 192, NANDExtendedCsdDeprecated, FieldType_NumericU32, FieldFlag_None ) \
HANDLER(NANDPatrolCount, 193, NANDPatrolInfo, FieldType_NumericU32, FieldFlag_None ) \
HANDLER(NANDNumActivationFailures, 194, NANDErrorInfo, FieldType_NumericU32, FieldFlag_None ) \
HANDLER(NANDNumActivationErrorCorrections, 195, NANDErrorInfo, FieldType_NumericU32, FieldFlag_None ) \
@ -446,21 +449,21 @@
HANDLER(AdspExceptionStackDumpDeprecated, 260, AdspErrorInfo, FieldType_U32Array, FieldFlag_None ) \
HANDLER(AdspExceptionReasonDeprecated, 261, AdspErrorInfo, FieldType_NumericU32, FieldFlag_None ) \
HANDLER(OscillatorClock, 262, PowerClockInfo, FieldType_NumericU32, FieldFlag_None ) \
HANDLER(CpuDvfsTableClocks, 263, PowerClockInfo, FieldType_U32Array, FieldFlag_None ) \
HANDLER(CpuDvfsTableVoltages, 264, PowerClockInfo, FieldType_I32Array, FieldFlag_None ) \
HANDLER(GpuDvfsTableClocks, 265, PowerClockInfo, FieldType_U32Array, FieldFlag_None ) \
HANDLER(GpuDvfsTableVoltages, 266, PowerClockInfo, FieldType_I32Array, FieldFlag_None ) \
HANDLER(EmcDvfsTableClocks, 267, PowerClockInfo, FieldType_U32Array, FieldFlag_None ) \
HANDLER(EmcDvfsTableVoltages, 268, PowerClockInfo, FieldType_I32Array, FieldFlag_None ) \
HANDLER(CpuDvfsTableClocksDeprecated, 263, PowerClockInfo, FieldType_U32Array, FieldFlag_None ) \
HANDLER(CpuDvfsTableVoltagesDeprecated, 264, PowerClockInfo, FieldType_I32Array, FieldFlag_None ) \
HANDLER(GpuDvfsTableClocksDeprecated, 265, PowerClockInfo, FieldType_U32Array, FieldFlag_None ) \
HANDLER(GpuDvfsTableVoltagesDeprecated, 266, PowerClockInfo, FieldType_I32Array, FieldFlag_None ) \
HANDLER(EmcDvfsTableClocksDeprecated, 267, PowerClockInfo, FieldType_U32Array, FieldFlag_None ) \
HANDLER(EmcDvfsTableVoltagesDeprecated, 268, PowerClockInfo, FieldType_I32Array, FieldFlag_None ) \
HANDLER(ModuleClockFrequencies, 269, PowerClockInfo, FieldType_U32Array, FieldFlag_None ) \
HANDLER(ModuleClockEnableFlags, 270, PowerClockInfo, FieldType_U8Array, FieldFlag_None ) \
HANDLER(ModulePowerEnableFlags, 271, PowerClockInfo, FieldType_U8Array, FieldFlag_None ) \
HANDLER(ModuleResetAssertFlags, 272, PowerClockInfo, FieldType_U8Array, FieldFlag_None ) \
HANDLER(ModuleMinimumVoltageClockRates, 273, PowerClockInfo, FieldType_U32Array, FieldFlag_None ) \
HANDLER(PowerDomainEnableFlags, 274, PowerClockInfo, FieldType_U8Array, FieldFlag_None ) \
HANDLER(PowerDomainVoltages, 275, PowerClockInfo, FieldType_I32Array, FieldFlag_None ) \
HANDLER(PowerDomainEnableFlagsDeprecated, 274, PowerClockInfo, FieldType_U8Array, FieldFlag_None ) \
HANDLER(PowerDomainVoltagesDeprecated, 275, PowerClockInfo, FieldType_I32Array, FieldFlag_None ) \
HANDLER(AccessPointRssi, 276, RadioStrengthInfo, FieldType_NumericI32, FieldFlag_None ) \
HANDLER(FuseInfo, 277, PowerClockInfo, FieldType_U32Array, FieldFlag_None ) \
HANDLER(FuseInfoDeprecated, 277, PowerClockInfo, FieldType_U32Array, FieldFlag_None ) \
HANDLER(VideoLog, 278, VideoInfo, FieldType_String, FieldFlag_None ) \
HANDLER(GameCardDeviceId, 279, GameCardCIDInfo, FieldType_U8Array, FieldFlag_None ) \
HANDLER(GameCardAsicReinitializeCount, 280, GameCardErrorInfo, FieldType_NumericU16, FieldFlag_None ) \
@ -830,9 +833,9 @@
HANDLER(RuntimeLimitedApplicationLicenseUpgrade, 644, RunningApplicationInfo, FieldType_NumericU8, FieldFlag_None ) \
HANDLER(ServiceProfileRevisionKey, 645, ServiceProfileInfo, FieldType_NumericU64, FieldFlag_None ) \
HANDLER(BluetoothAudioConnectionCount, 646, BluetoothAudioInfo, FieldType_NumericU8, FieldFlag_None ) \
HANDLER(BluetoothHidPairingInfoCount, 647, BluetoothPairingCountInfo, FieldType_NumericU8, FieldFlag_None ) \
HANDLER(BluetoothAudioPairingInfoCount, 648, BluetoothPairingCountInfo, FieldType_NumericU8, FieldFlag_None ) \
HANDLER(BluetoothLePairingInfoCount, 649, BluetoothPairingCountInfo, FieldType_NumericU8, FieldFlag_None ) \
HANDLER(BluetoothHidPairingInfoCountDeprecated, 647, BluetoothPairingCountInfo, FieldType_NumericU8, FieldFlag_None ) \
HANDLER(BluetoothAudioPairingInfoCountDeprecated, 648, BluetoothPairingCountInfo, FieldType_NumericU8, FieldFlag_None ) \
HANDLER(BluetoothLePairingInfoCountDeprecated, 649, BluetoothPairingCountInfo, FieldType_NumericU8, FieldFlag_None ) \
HANDLER(FatFsBisSystemFilePeakOpenCount, 650, FsProxyErrorInfo, FieldType_NumericU16, FieldFlag_None ) \
HANDLER(FatFsBisSystemDirectoryPeakOpenCount, 651, FsProxyErrorInfo, FieldType_NumericU16, FieldFlag_None ) \
HANDLER(FatFsBisUserFilePeakOpenCount, 652, FsProxyErrorInfo, FieldType_NumericU16, FieldFlag_None ) \
@ -860,11 +863,24 @@
HANDLER(FatFsBisUserFatErrorNumber, 674, FsProxyErrorInfo2, FieldType_NumericI32, FieldFlag_None ) \
HANDLER(FatFsBisUserFatSafeErrorNumber, 675, FsProxyErrorInfo2, FieldType_NumericI32, FieldFlag_None ) \
HANDLER(GpuCrashDump2, 676, GpuCrashInfo, FieldType_U8Array, FieldFlag_None ) \
HANDLER(NANDType, 677, NANDTypeInfo, FieldType_U8Array, FieldFlag_None ) \
HANDLER(NANDTypeDeprecated, 677, NANDTypeInfoDeprecated, FieldType_U8Array, FieldFlag_None ) \
HANDLER(MicroSDType, 678, MicroSDTypeInfo, FieldType_U8Array, FieldFlag_None ) \
HANDLER(GameCardLastDeactivateReasonResult, 679, GameCardErrorInfo, FieldType_NumericU32, FieldFlag_None ) \
HANDLER(GameCardLastDeactivateReason, 680, GameCardErrorInfo, FieldType_NumericU8, FieldFlag_None ) \
HANDLER(InvalidErrorCode, 681, ErrorInfo, FieldType_String, FieldFlag_None ) \
HANDLER(AppletId, 682, ApplicationInfo, FieldType_NumericI32, FieldFlag_None ) \
HANDLER(PrevReportIdentifier, 683, ErrorInfoAuto, FieldType_String, FieldFlag_None ) \
HANDLER(SyslogStartupTimeBase, 684, ErrorInfoAuto, FieldType_NumericI64, FieldFlag_None ) \
HANDLER(NxdmpIsAttached, 685, AttachmentFileInfo, FieldType_Bool, FieldFlag_None ) \
HANDLER(ScreenshotIsAttached, 686, AttachmentFileInfo, FieldType_Bool, FieldFlag_None ) \
HANDLER(SyslogIsAttached, 687, AttachmentFileInfo, FieldType_Bool, FieldFlag_None ) \
HANDLER(SaveSyslogResult, 688, AttachmentFileInfo, FieldType_NumericU32, FieldFlag_None ) \
HANDLER(EncryptionKeyGeneration, 689, ErrorInfo, FieldType_NumericI32, FieldFlag_None ) \
HANDLER(FsBufferManagerNonBlockingRetriedCount, 690, FsMemoryInfo, FieldType_NumericU64, FieldFlag_None ) \
HANDLER(FsPooledBufferNonBlockingRetriedCount, 691, FsMemoryInfo, FieldType_NumericU64, FieldFlag_None ) \
HANDLER(LastConnectionTestDownloadSpeed64, 692, ConnectionInfo, FieldType_NumericU64, FieldFlag_None ) \
HANDLER(LastConnectionTestUploadSpeed64, 693, ConnectionInfo, FieldType_NumericU64, FieldFlag_None ) \
HANDLER(EncryptionKeyV1, 694, ErrorInfo, FieldType_U8Array, FieldFlag_None ) \
HANDLER(TestStringNx, 1000, TestNx, FieldType_String, FieldFlag_None ) \
HANDLER(BoostModeCurrentLimit, 1001, BatteryChargeInfo, FieldType_NumericI32, FieldFlag_None ) \
HANDLER(ChargeConfiguration, 1002, BatteryChargeInfo, FieldType_NumericI32, FieldFlag_None ) \
@ -877,5 +893,21 @@
HANDLER(AdspExceptionArmModeRegisters, 1009, AdspErrorInfo, FieldType_U32Array, FieldFlag_None ) \
HANDLER(AdspExceptionStackAddress, 1010, AdspErrorInfo, FieldType_NumericU32, FieldFlag_None ) \
HANDLER(AdspExceptionStackDump, 1011, AdspErrorInfo, FieldType_U32Array, FieldFlag_None ) \
HANDLER(AdspExceptionReason, 1012, AdspErrorInfo, FieldType_NumericU32, FieldFlag_None )
HANDLER(AdspExceptionReason, 1012, AdspErrorInfo, FieldType_NumericU32, FieldFlag_None ) \
HANDLER(CpuDvfsTableClocks, 1013, PowerClockInfo, FieldType_U32Array, FieldFlag_None ) \
HANDLER(CpuDvfsTableVoltages, 1014, PowerClockInfo, FieldType_I32Array, FieldFlag_None ) \
HANDLER(GpuDvfsTableClocks, 1015, PowerClockInfo, FieldType_U32Array, FieldFlag_None ) \
HANDLER(GpuDvfsTableVoltages, 1016, PowerClockInfo, FieldType_I32Array, FieldFlag_None ) \
HANDLER(EmcDvfsTableClocks, 1017, PowerClockInfo, FieldType_U32Array, FieldFlag_None ) \
HANDLER(EmcDvfsTableVoltages, 1018, PowerClockInfo, FieldType_I32Array, FieldFlag_None ) \
HANDLER(PowerDomainEnableFlags, 1019, PowerClockInfo, FieldType_U8Array, FieldFlag_None ) \
HANDLER(PowerDomainVoltages, 1020, PowerClockInfo, FieldType_I32Array, FieldFlag_None ) \
HANDLER(FuseInfo, 1021, PowerClockInfo, FieldType_U32Array, FieldFlag_None ) \
HANDLER(NANDType, 1022, NANDTypeInfo, FieldType_U8Array, FieldFlag_None ) \
HANDLER(BluetoothHidPairingInfoCount, 1023, BluetoothPairingCountInfo, FieldType_NumericU8, FieldFlag_None ) \
HANDLER(BluetoothAudioPairingInfoCount, 1024, BluetoothPairingCountInfo, FieldType_NumericU8, FieldFlag_None ) \
HANDLER(BluetoothLePairingInfoCount, 1025, BluetoothPairingCountInfo, FieldType_NumericU8, FieldFlag_None ) \
HANDLER(NANDPreEolInfo, 1026, NANDExtendedCsd, FieldType_NumericU32, FieldFlag_None ) \
HANDLER(NANDDeviceLifeTimeEstTypA, 1027, NANDExtendedCsd, FieldType_NumericU32, FieldFlag_None ) \
HANDLER(NANDDeviceLifeTimeEstTypB, 1028, NANDExtendedCsd, FieldType_NumericU32, FieldFlag_None )

View file

@ -84,6 +84,7 @@ namespace ams::hos {
Version_17_0_1 = ::ams::TargetFirmware_17_0_1,
Version_18_0_0 = ::ams::TargetFirmware_18_0_0,
Version_18_1_0 = ::ams::TargetFirmware_18_1_0,
Version_19_0_0 = ::ams::TargetFirmware_19_0_0,
Version_Current = ::ams::TargetFirmware_Current,

View file

@ -34,9 +34,10 @@ namespace ams::ldr {
u32 aci_sac_size;
u32 acid_fac_size;
u32 aci_fah_size;
u8 unused_20[0x10];
u8 ac_buffer[0x3E0];
};
static_assert(util::is_pod<ProgramInfo>::value && sizeof(ProgramInfo) == 0x400, "ProgramInfo definition!");
static_assert(util::is_pod<ProgramInfo>::value && sizeof(ProgramInfo) == 0x410, "ProgramInfo definition!");
enum ProgramInfoFlag {
ProgramInfoFlag_SystemModule = (0 << 0),
@ -226,6 +227,7 @@ namespace ams::ldr {
MetaFlag_OptimizeMemoryAllocation = (1 << 4),
MetaFlag_DisableDeviceAddressSpaceMerge = (1 << 5),
MetaFlag_EnableAliasRegionExtraSize = (1 << 6),
MetaFlag_PreventCodeReads = (1 << 7),
};
enum AddressSpaceType {

View file

@ -19,8 +19,10 @@
#include <stratosphere/pm/pm_types.hpp>
#include <stratosphere/sf.hpp>
#define AMS_PM_I_BOOT_MODE_INTERFACE_INTERFACE_INFO(C, H) \
AMS_SF_METHOD_INFO(C, H, 0, void, GetBootMode, (sf::Out<u32> out), (out)) \
AMS_SF_METHOD_INFO(C, H, 1, void, SetMaintenanceBoot, (), ())
#define AMS_PM_I_BOOT_MODE_INTERFACE_INTERFACE_INFO(C, H) \
AMS_SF_METHOD_INFO(C, H, 0, void, GetBootMode, (sf::Out<u32> out), (out)) \
AMS_SF_METHOD_INFO(C, H, 1, void, SetMaintenanceBoot, (), ()) \
AMS_SF_METHOD_INFO(C, H, 2, void, GetUnknown, (sf::Out<u32> out), (out)) \
AMS_SF_METHOD_INFO(C, H, 3, Result, SetUnknown, (u32 val), (val))
AMS_SF_DEFINE_INTERFACE(ams::pm::impl, IBootModeInterface, AMS_PM_I_BOOT_MODE_INTERFACE_INTERFACE_INFO, 0x96D01649)

View file

@ -135,14 +135,15 @@ namespace ams::ro {
class NroHeader {
public:
static constexpr u32 Magic = util::FourCC<'N','R','O','0'>::Code;
static constexpr u32 FlagAlignedHeader = 1;
private:
u32 m_entrypoint_insn;
u32 m_mod_offset;
u8 m_reserved_08[0x8];
u32 m_magic;
u8 m_reserved_14[0x4];
u8 m_version;
u32 m_size;
u8 m_reserved_1C[0x4];
u32 m_flags;
u32 m_text_offset;
u32 m_text_size;
u32 m_ro_offset;
@ -158,10 +159,22 @@ namespace ams::ro {
return m_magic == Magic;
}
u32 GetVersion() const {
return m_version;
}
u32 GetSize() const {
return m_size;
}
u32 GetFlags() const {
return m_flags;
}
bool IsAlignedHeader() const {
return m_flags & FlagAlignedHeader;
}
u32 GetTextOffset() const {
return m_text_offset;
}

View file

@ -231,7 +231,7 @@
R_RETURN(::svcGetThreadContext3(reinterpret_cast<::ThreadContext *>(out_context.GetPointerUnsafe()), thread_handle));
}
ALWAYS_INLINE Result WaitForAddress(::ams::svc::Address address, ::ams::svc::ArbitrationType arb_type, int32_t value, int64_t timeout_ns) {
ALWAYS_INLINE Result WaitForAddress(::ams::svc::Address address, ::ams::svc::ArbitrationType arb_type, int64_t value, int64_t timeout_ns) {
R_RETURN(::svcWaitForAddress(reinterpret_cast<void *>(static_cast<uintptr_t>(address)), arb_type, value, timeout_ns));
}

View file

@ -21,7 +21,7 @@ namespace ams::fs::impl {
#define ADD_ENUM_CASE(v) case v: return #v
template<> const char *IdString::ToString<pkg1::KeyGeneration>(pkg1::KeyGeneration id) {
static_assert(pkg1::KeyGeneration_Current == pkg1::KeyGeneration_18_0_0);
static_assert(pkg1::KeyGeneration_Current == pkg1::KeyGeneration_19_0_0);
switch (id) {
using enum pkg1::KeyGeneration;
case KeyGeneration_1_0_0: return "1.0.0-2.3.0";
@ -41,7 +41,8 @@ namespace ams::fs::impl {
case KeyGeneration_15_0_0: return "15.0.0-15.0.1";
case KeyGeneration_16_0_0: return "16.0.0-16.0.3";
case KeyGeneration_17_0_0: return "17.0.0-17.0.1";
case KeyGeneration_18_0_0: return "18.0.0-";
case KeyGeneration_18_0_0: return "18.0.0-18.1.0";
case KeyGeneration_19_0_0: return "19.0.0-";
default: return "Unknown";
}
}

View file

@ -24,6 +24,7 @@ namespace ams::ldr::pm {
}
Result GetProgramInfo(ProgramInfo *out, const ncm::ProgramLocation &loc) {
static_assert(sizeof(*out) == sizeof(LoaderProgramInfo));
R_RETURN(ldrPmGetProgramInfo(reinterpret_cast<const NcmProgramLocation *>(std::addressof(loc)), reinterpret_cast<LoaderProgramInfo *>(out)));
}
@ -42,6 +43,7 @@ namespace ams::ldr::pm {
Result AtmosphereGetProgramInfo(ProgramInfo *out, cfg::OverrideStatus *out_status, const ncm::ProgramLocation &loc) {
static_assert(sizeof(*out_status) == sizeof(CfgOverrideStatus), "CfgOverrideStatus definition!");
static_assert(sizeof(*out) == sizeof(LoaderProgramInfo));
R_RETURN(ldrPmAtmosphereGetProgramInfo(reinterpret_cast<LoaderProgramInfo *>(out), reinterpret_cast<CfgOverrideStatus *>(out_status), reinterpret_cast<const NcmProgramLocation *>(std::addressof(loc))));
}

View file

@ -27,6 +27,7 @@ namespace ams::os::impl {
case os::MemoryPermission_ReadOnly: return svc::MemoryPermission_Read;
case os::MemoryPermission_ReadWrite: return svc::MemoryPermission_ReadWrite;
case os::MemoryPermission_ReadExecute: return svc::MemoryPermission_ReadExecute;
case os::MemoryPermission_ExecuteOnly: return svc::MemoryPermission_Execute;
AMS_UNREACHABLE_DEFAULT_CASE();
}
}

View file

@ -16,11 +16,11 @@
#pragma once
#define ATMOSPHERE_RELEASE_VERSION_MAJOR 1
#define ATMOSPHERE_RELEASE_VERSION_MINOR 7
#define ATMOSPHERE_RELEASE_VERSION_MICRO 1
#define ATMOSPHERE_RELEASE_VERSION_MINOR 8
#define ATMOSPHERE_RELEASE_VERSION_MICRO 0
#define ATMOSPHERE_RELEASE_VERSION ATMOSPHERE_RELEASE_VERSION_MAJOR, ATMOSPHERE_RELEASE_VERSION_MINOR, ATMOSPHERE_RELEASE_VERSION_MICRO
#define ATMOSPHERE_SUPPORTED_HOS_VERSION_MAJOR 18
#define ATMOSPHERE_SUPPORTED_HOS_VERSION_MINOR 1
#define ATMOSPHERE_SUPPORTED_HOS_VERSION_MAJOR 19
#define ATMOSPHERE_SUPPORTED_HOS_VERSION_MINOR 0
#define ATMOSPHERE_SUPPORTED_HOS_VERSION_MICRO 0

View file

@ -82,8 +82,9 @@
#define ATMOSPHERE_TARGET_FIRMWARE_17_0_1 ATMOSPHERE_TARGET_FIRMWARE(17, 0, 1)
#define ATMOSPHERE_TARGET_FIRMWARE_18_0_0 ATMOSPHERE_TARGET_FIRMWARE(18, 0, 0)
#define ATMOSPHERE_TARGET_FIRMWARE_18_1_0 ATMOSPHERE_TARGET_FIRMWARE(18, 1, 0)
#define ATMOSPHERE_TARGET_FIRMWARE_19_0_0 ATMOSPHERE_TARGET_FIRMWARE(19, 0, 0)
#define ATMOSPHERE_TARGET_FIRMWARE_CURRENT ATMOSPHERE_TARGET_FIRMWARE_18_1_0
#define ATMOSPHERE_TARGET_FIRMWARE_CURRENT ATMOSPHERE_TARGET_FIRMWARE_19_0_0
#define ATMOSPHERE_TARGET_FIRMWARE_MIN ATMOSPHERE_TARGET_FIRMWARE(0, 0, 0)
#define ATMOSPHERE_TARGET_FIRMWARE_MAX ATMOSPHERE_TARGET_FIRMWARE_CURRENT
@ -158,6 +159,7 @@ namespace ams {
TargetFirmware_17_0_1 = ATMOSPHERE_TARGET_FIRMWARE_17_0_1,
TargetFirmware_18_0_0 = ATMOSPHERE_TARGET_FIRMWARE_18_0_0,
TargetFirmware_18_1_0 = ATMOSPHERE_TARGET_FIRMWARE_18_1_0,
TargetFirmware_19_0_0 = ATMOSPHERE_TARGET_FIRMWARE_19_0_0,
TargetFirmware_Current = ATMOSPHERE_TARGET_FIRMWARE_CURRENT,

View file

@ -27,5 +27,6 @@ namespace ams::pm {
R_DEFINE_ERROR_RESULT(DebugHookInUse, 4);
R_DEFINE_ERROR_RESULT(ApplicationRunning, 5);
R_DEFINE_ERROR_RESULT(InvalidSize, 6);
R_DEFINE_ERROR_RESULT(Unknown7, 7);
}

View file

@ -67,7 +67,7 @@
HANDLER(0x31, Result, GetResourceLimitCurrentValue, OUTPUT(int64_t, out_current_value), INPUT(::ams::svc::Handle, resource_limit_handle), INPUT(::ams::svc::LimitableResource, which)) \
HANDLER(0x32, Result, SetThreadActivity, INPUT(::ams::svc::Handle, thread_handle), INPUT(::ams::svc::ThreadActivity, thread_activity)) \
HANDLER(0x33, Result, GetThreadContext3, OUTPTR(::ams::svc::ThreadContext, out_context), INPUT(::ams::svc::Handle, thread_handle)) \
HANDLER(0x34, Result, WaitForAddress, INPUT(::ams::svc::Address, address), INPUT(::ams::svc::ArbitrationType, arb_type), INPUT(int32_t, value), INPUT(int64_t, timeout_ns)) \
HANDLER(0x34, Result, WaitForAddress, INPUT(::ams::svc::Address, address), INPUT(::ams::svc::ArbitrationType, arb_type), INPUT(int64_t, value), INPUT(int64_t, timeout_ns)) \
HANDLER(0x35, Result, SignalToAddress, INPUT(::ams::svc::Address, address), INPUT(::ams::svc::SignalType, signal_type), INPUT(int32_t, value), INPUT(int32_t, count)) \
HANDLER(0x36, void, SynchronizePreemptionState) \
HANDLER(0x37, Result, GetResourceLimitPeakValue, OUTPUT(int64_t, out_peak_value), INPUT(::ams::svc::Handle, resource_limit_handle), INPUT(::ams::svc::LimitableResource, which)) \

View file

@ -191,6 +191,8 @@ namespace ams::svc {
InfoType_IsSvcPermitted = 26,
InfoType_IoRegionHint = 27,
InfoType_AliasRegionExtraSize = 28,
/* ... */
InfoType_TransferMemoryHint = 34,
InfoType_MesosphereMeta = 65000,
InfoType_MesosphereCurrentProcess = 65001,
@ -261,6 +263,7 @@ namespace ams::svc {
ArbitrationType_WaitIfLessThan = 0,
ArbitrationType_DecrementAndWaitIfLessThan = 1,
ArbitrationType_WaitIfEqual = 2,
ArbitrationType_WaitIfEqual64 = 3,
};
enum YieldType : s64 {

View file

@ -125,6 +125,8 @@ SECTIONS
.gnu.version_r : { *(.gnu.version_r) } :rodata
.note.gnu.build-id : { *(.note.gnu.build-id) } :rodata
__rodata_end = .;
/* =========== DATA section =========== */
. = ALIGN(0x1000);
__data_start = . ;

View file

@ -15,6 +15,9 @@
*/
#include <mesosphere.hpp>
extern "C" void __rodata_start();
extern "C" void __rodata_end();
extern "C" void __bin_start__();
extern "C" void __bin_end__();
@ -220,6 +223,31 @@ namespace ams::kern::init {
};
static_assert(kern::arch::arm64::init::IsInitialPageAllocator<KInitialPageAllocatorForFinalizeIdentityMapping>);
void SetupAllTtbr0Entries(KInitialPageTable &init_pt, KInitialPageAllocator &allocator) {
/* Validate that the ttbr0 array is in rodata. */
const uintptr_t rodata_start = reinterpret_cast<uintptr_t>(__rodata_start);
const uintptr_t rodata_end = reinterpret_cast<uintptr_t>(__rodata_end);
MESOSPHERE_INIT_ABORT_UNLESS(rodata_start < rodata_end);
MESOSPHERE_INIT_ABORT_UNLESS(rodata_start <= reinterpret_cast<uintptr_t>(std::addressof(KPageTable::GetTtbr0Entry(0))));
MESOSPHERE_INIT_ABORT_UNLESS(reinterpret_cast<uintptr_t>(std::addressof(KPageTable::GetTtbr0Entry(KPageTable::NumTtbr0Entries))) < rodata_end);
/* Allocate pages for all ttbr0 entries. */
for (size_t i = 0; i < KPageTable::NumTtbr0Entries; ++i) {
/* Allocate a page. */
KPhysicalAddress page = allocator.Allocate(PageSize);
MESOSPHERE_INIT_ABORT_UNLESS(page != Null<KPhysicalAddress>);
/* Check that the page is allowed to be a ttbr0 entry. */
MESOSPHERE_INIT_ABORT_UNLESS((GetInteger(page) & UINT64_C(0xFFFF000000000001)) == 0);
/* Get the physical address of the ttbr0 entry. */
const auto ttbr0_phys_ptr = init_pt.GetPhysicalAddress(KVirtualAddress(std::addressof(KPageTable::GetTtbr0Entry(i))));
/* Set the entry to the newly allocated page. */
*reinterpret_cast<volatile u64 *>(GetInteger(ttbr0_phys_ptr)) = (static_cast<u64>(i) << 48) | GetInteger(page);
}
}
void FinalizeIdentityMapping(KInitialPageTable &init_pt, KInitialPageAllocator &allocator, u64 phys_to_virt_offset) {
/* Create an allocator for identity mapping finalization. */
KInitialPageAllocatorForFinalizeIdentityMapping finalize_allocator(allocator, phys_to_virt_offset);
@ -591,6 +619,9 @@ namespace ams::kern::init {
/* Create page table object for use during remaining initialization. */
KInitialPageTable init_pt;
/* Setup all ttbr0 pages. */
SetupAllTtbr0Entries(init_pt, g_initial_page_allocator);
/* Unmap the identity mapping. */
FinalizeIdentityMapping(init_pt, g_initial_page_allocator, g_phase2_linear_region_phys_to_virt_diff);

View file

@ -444,6 +444,7 @@ _ZN3ams4kern4arch5arm6430EL1SynchronousExceptionHandlerEv:
ERET_WITH_SPECULATION_BARRIER
2: /* The exception wasn't an triggered by copying memory from userspace. */
/* NOTE: The following is, as of 19.0.0, now ifdef'd out on NX non-debug kernel. */
ldr x0, [sp, #8]
ldr x1, [sp, #16]

View file

@ -76,6 +76,9 @@ _ZN3ams4kern4arch5arm6427SupervisorModeThreadStarterEv:
/* v */
/* | u64 argument | u64 entrypoint | KThread::StackParameters (size 0x30) | */
/* Clear the link register. */
mov x30, #0
/* Load the argument and entrypoint. */
ldp x0, x1, [sp], #0x10
@ -84,4 +87,6 @@ _ZN3ams4kern4arch5arm6427SupervisorModeThreadStarterEv:
/* Mask I bit in DAIF */
msr daifclr, #2
br x1
/* Invoke the function (by calling ams::kern::arch::arm64::InvokeSupervisorModeThread(argument, entrypoint)). */
b _ZN3ams4kern4arch5arm6426InvokeSupervisorModeThreadEmm

View file

@ -111,4 +111,8 @@ namespace ams::kern {
KThread &Kernel::GetMainThread(s32 core_id) { return g_main_threads.m_arr[core_id]; }
KThread &Kernel::GetIdleThread(s32 core_id) { return g_idle_threads.m_arr[core_id]; }
__attribute__((constructor)) void ConfigureKTargetSystem() {
KSystemControl::ConfigureKTargetSystem();
}
}

View file

@ -48,6 +48,29 @@ static Result _nsGetApplicationContentPath(Service *s, void* out, size_t out_siz
);
}
static Result _nsGetApplicationContentPath2(Service *s, void* out_path, size_t out_size, u64* out_program_id, u8 *out_attr, u64 app_id, NcmContentType content_type) {
const struct {
u8 content_type;
u64 app_id;
} in = { content_type, app_id };
struct {
u8 attr;
u64 program_id;
} out;
Result rc = serviceDispatchInOut(s, 2524, in, out,
.buffer_attrs = { SfBufferAttr_HipcMapAlias | SfBufferAttr_Out },
.buffers = { { out_path, out_size } },
);
if (R_SUCCEEDED(rc)) {
*out_program_id = out.program_id;
*out_attr = out.attr;
}
return rc;
}
static Result _nsResolveApplicationContentPath(Service* s, u64 app_id, NcmContentType content_type) {
const struct {
u8 content_type;
@ -93,6 +116,10 @@ Result nswebGetRunningApplicationProgramId(NsDocumentInterface* doc, u64* out_pr
return _nsGetRunningApplicationProgramId(&doc->s, out_program_id, app_id);
}
Result nswebGetApplicationContentPath2(NsDocumentInterface* doc, void* out, size_t out_size, u64* out_program_id, u8 *out_attr, u64 app_id, NcmContentType content_type) {
return _nsGetApplicationContentPath2(&doc->s, out, out_size, out_program_id, out_attr, app_id, content_type);
}
void nsDocumentInterfaceClose(NsDocumentInterface* doc) {
serviceClose(&doc->s);
}

View file

@ -25,6 +25,7 @@ Result nsamGetRunningApplicationProgramIdFwd(Service* s, u64* out_program_id, u6
Result nswebGetApplicationContentPath(NsDocumentInterface* doc, void* out, size_t out_size, u8 *out_attr, u64 app_id, NcmContentType content_type);
Result nswebResolveApplicationContentPath(NsDocumentInterface* doc, u64 app_id, NcmContentType content_type);
Result nswebGetRunningApplicationProgramId(NsDocumentInterface* doc, u64* out_program_id, u64 app_id);
Result nswebGetApplicationContentPath2(NsDocumentInterface* doc, void* out, size_t out_size, u64* out_program_id, u8 *out_attr, u64 app_id, NcmContentType content_type);
void nsDocumentInterfaceClose(NsDocumentInterface* doc);

View file

@ -38,6 +38,11 @@ namespace ams::mitm::ns {
R_RETURN(nswebGetRunningApplicationProgramId(m_srv.get(), reinterpret_cast<u64 *>(out.GetPointer()), static_cast<u64>(application_id)));
}
Result NsDocumentService::GetApplicationContentPath2(const sf::OutBuffer &out_path, sf::Out<ncm::ProgramId> out_program_id, sf::Out<ams::fs::ContentAttributes> out_attr, ncm::ProgramId application_id, u8 content_type) {
static_assert(sizeof(*out_attr.GetPointer()) == sizeof(u8));
R_RETURN(nswebGetApplicationContentPath2(m_srv.get(), out_path.GetPointer(), out_path.GetSize(), reinterpret_cast<u64 *>(out_program_id.GetPointer()), reinterpret_cast<u8 *>(out_attr.GetPointer()), static_cast<u64>(application_id), static_cast<NcmContentType>(content_type)));
}
Result NsWebMitmService::GetDocumentInterface(sf::Out<sf::SharedPointer<impl::IDocumentInterface>> out) {
/* Open a document interface. */
NsDocumentInterface doc;

View file

@ -18,10 +18,11 @@
#include "ns_shim.h"
#define AMS_NS_DOCUMENT_MITM_INTERFACE_INFO(C, H) \
AMS_SF_METHOD_INFO(C, H, 21, Result, GetApplicationContentPath, (const sf::OutBuffer &out_path, sf::Out<ams::fs::ContentAttributes> out_attr, ncm::ProgramId application_id, u8 content_type), (out_path, out_attr, application_id, content_type)) \
AMS_SF_METHOD_INFO(C, H, 23, Result, ResolveApplicationContentPath, (ncm::ProgramId application_id, u8 content_type), (application_id, content_type)) \
AMS_SF_METHOD_INFO(C, H, 92, Result, GetRunningApplicationProgramId, (sf::Out<ncm::ProgramId> out, ncm::ProgramId application_id), (out, application_id), hos::Version_6_0_0)
#define AMS_NS_DOCUMENT_MITM_INTERFACE_INFO(C, H) \
AMS_SF_METHOD_INFO(C, H, 21, Result, GetApplicationContentPath, (const sf::OutBuffer &out_path, sf::Out<ams::fs::ContentAttributes> out_attr, ncm::ProgramId application_id, u8 content_type), (out_path, out_attr, application_id, content_type)) \
AMS_SF_METHOD_INFO(C, H, 23, Result, ResolveApplicationContentPath, (ncm::ProgramId application_id, u8 content_type), (application_id, content_type)) \
AMS_SF_METHOD_INFO(C, H, 92, Result, GetRunningApplicationProgramId, (sf::Out<ncm::ProgramId> out, ncm::ProgramId application_id), (out, application_id), hos::Version_6_0_0) \
AMS_SF_METHOD_INFO(C, H, 2524, Result, GetApplicationContentPath2, (const sf::OutBuffer &out_path, sf::Out<ncm::ProgramId> out_program_id, sf::Out<ams::fs::ContentAttributes> out_attr, ncm::ProgramId application_id, u8 content_type), (out_path, out_program_id, out_attr, application_id, content_type), hos::Version_19_0_0)
AMS_SF_DEFINE_INTERFACE(ams::mitm::ns::impl, IDocumentInterface, AMS_NS_DOCUMENT_MITM_INTERFACE_INFO, 0x0F9B1C00)
@ -47,6 +48,7 @@ namespace ams::mitm::ns {
Result GetApplicationContentPath(const sf::OutBuffer &out_path, sf::Out<ams::fs::ContentAttributes> out_attr, ncm::ProgramId application_id, u8 content_type);
Result ResolveApplicationContentPath(ncm::ProgramId application_id, u8 content_type);
Result GetRunningApplicationProgramId(sf::Out<ncm::ProgramId> out, ncm::ProgramId application_id);
Result GetApplicationContentPath2(const sf::OutBuffer &out_path, sf::Out<ncm::ProgramId> out_program_id, sf::Out<ams::fs::ContentAttributes> out_attr, ncm::ProgramId application_id, u8 content_type);
};
static_assert(impl::IsIDocumentInterface<NsDocumentService>);

View file

@ -110,6 +110,7 @@
"type": "debug_flags",
"value": {
"allow_debug": false,
"force_debug_prod": false,
"force_debug": true
}
}

View file

@ -105,6 +105,7 @@
"type": "debug_flags",
"value": {
"allow_debug": false,
"force_debug_prod": false,
"force_debug": true
}
}]

View file

@ -104,6 +104,7 @@
"type": "debug_flags",
"value": {
"allow_debug": false,
"force_debug_prod": false,
"force_debug": true
}
}]

View file

@ -308,10 +308,19 @@ namespace ams::ldr {
);
DEFINE_CAPABILITY_CLASS(DebugFlags,
DEFINE_CAPABILITY_FIELD(AllowDebug, IdBits, 1, bool);
DEFINE_CAPABILITY_FIELD(ForceDebug, AllowDebug, 1, bool);
DEFINE_CAPABILITY_FIELD(AllowDebug, IdBits, 1, bool);
DEFINE_CAPABILITY_FIELD(ForceDebugProd, AllowDebug, 1, bool);
DEFINE_CAPABILITY_FIELD(ForceDebug, ForceDebugProd, 1, bool);
bool IsValid(const util::BitPack32 *kac, size_t kac_count) const {
u32 total = 0;
if (this->GetAllowDebug()) { ++total; }
if (this->GetForceDebugProd()) { ++total; }
if (this->GetForceDebug()) { ++total; }
if (total > 1) {
return false;
}
for (size_t i = 0; i < kac_count; i++) {
if (GetCapabilityId(kac[i]) == Id) {
const auto restriction = Decode(kac[i]);
@ -319,12 +328,14 @@ namespace ams::ldr {
return (restriction.GetValue() & this->GetValue()) == this->GetValue();
}
}
return false;
}
static constexpr util::BitPack32 Encode(bool allow_debug, bool force_debug) {
static constexpr util::BitPack32 Encode(bool allow_debug, bool force_debug_prod, bool force_debug) {
util::BitPack32 encoded{IdBitsValue};
encoded.Set<AllowDebug>(allow_debug);
encoded.Set<ForceDebugProd>(force_debug_prod);
encoded.Set<ForceDebug>(force_debug);
return encoded;
}
@ -406,7 +417,21 @@ namespace ams::ldr {
kac[i] = CapabilityApplicationType::Encode(flags & ProgramInfoFlag_ApplicationTypeMask);
break;
case CapabilityId::DebugFlags:
kac[i] = CapabilityDebugFlags::Encode((flags & ProgramInfoFlag_AllowDebug) != 0, CapabilityDebugFlags::Decode(cap).GetForceDebug());
kac[i] = CapabilityDebugFlags::Encode((flags & ProgramInfoFlag_AllowDebug) != 0, CapabilityDebugFlags::Decode(cap).GetForceDebugProd(), CapabilityDebugFlags::Decode(cap).GetForceDebug());
break;
default:
break;
}
}
}
void FixDebugCapabilityForHbl(util::BitPack32 *kac, size_t count) {
for (size_t i = 0; i < count; ++i) {
const auto cap = kac[i];
switch (GetCapabilityId(cap)) {
case CapabilityId::DebugFlags:
/* 19.0.0+ disallows more than one flag set; we are always DebugMode for kernel, so ForceDebug is the most powerful/flexible flag to set. */
kac[i] = CapabilityDebugFlags::Encode(false, false, true);
break;
default:
break;

View file

@ -23,6 +23,8 @@ namespace ams::ldr {
u16 MakeProgramInfoFlag(const util::BitPack32 *kac, size_t count);
void UpdateProgramInfoFlag(u16 flags, util::BitPack32 *kac, size_t count);
void FixDebugCapabilityForHbl(util::BitPack32 *kac, size_t count);
void PreProcessCapability(util::BitPack32 *kac, size_t count);
}

View file

@ -46,7 +46,7 @@ namespace ams {
namespace {
struct ServerOptions {
static constexpr size_t PointerBufferSize = 0x400;
static constexpr size_t PointerBufferSize = 0x420;
static constexpr size_t MaxDomains = 0;
static constexpr size_t MaxDomainObjects = 0;
static constexpr bool CanDeferInvokeRequest = false;

View file

@ -252,6 +252,10 @@ namespace ams::ldr {
meta->npdm->main_thread_priority = HblMainThreadPriorityApplet;
}
}
/* Fix the debug capabilities, to prevent needing a hbl recompilation. */
FixDebugCapabilityForHbl(static_cast<util::BitPack32 *>(meta->acid_kac), meta->acid->kac_size / sizeof(util::BitPack32));
FixDebugCapabilityForHbl(static_cast<util::BitPack32 *>(meta->aci_kac), meta->aci->kac_size / sizeof(util::BitPack32));
} else if (hos::GetVersion() >= hos::Version_10_0_0) {
/* If storage id is none, there is no base code filesystem, and thus it is impossible for us to validate. */
/* However, if we're an application, we are guaranteed a base code filesystem. */

View file

@ -555,7 +555,7 @@ namespace ams::ldr {
R_SUCCEED();
}
Result LoadAutoLoadModule(os::NativeHandle process_handle, fs::FileHandle file, const NsoHeader *nso_header, uintptr_t nso_address, size_t nso_size) {
Result LoadAutoLoadModule(os::NativeHandle process_handle, fs::FileHandle file, const NsoHeader *nso_header, uintptr_t nso_address, size_t nso_size, bool prevent_code_reads) {
/* Map and read data from file. */
{
/* Map the process memory. */
@ -594,7 +594,7 @@ namespace ams::ldr {
const size_t ro_size = util::AlignUp(nso_header->ro_size, os::MemoryPageSize);
const size_t rw_size = util::AlignUp(nso_header->rw_size + nso_header->bss_size, os::MemoryPageSize);
if (text_size) {
R_TRY(os::SetProcessMemoryPermission(process_handle, nso_address + nso_header->text_dst_offset, text_size, os::MemoryPermission_ReadExecute));
R_TRY(os::SetProcessMemoryPermission(process_handle, nso_address + nso_header->text_dst_offset, text_size, prevent_code_reads ? os::MemoryPermission_ExecuteOnly : os::MemoryPermission_ReadExecute));
}
if (ro_size) {
R_TRY(os::SetProcessMemoryPermission(process_handle, nso_address + nso_header->ro_dst_offset, ro_size, os::MemoryPermission_ReadOnly));
@ -606,7 +606,7 @@ namespace ams::ldr {
R_SUCCEED();
}
Result LoadAutoLoadModules(const ProcessInfo *process_info, const NsoHeader *nso_headers, const bool *has_nso, const ArgumentStore::Entry *argument) {
Result LoadAutoLoadModules(const ProcessInfo *process_info, const NsoHeader *nso_headers, const bool *has_nso, const ArgumentStore::Entry *argument, bool prevent_code_reads) {
/* Load each NSO. */
for (size_t i = 0; i < Nso_Count; i++) {
if (has_nso[i]) {
@ -614,7 +614,7 @@ namespace ams::ldr {
R_TRY(fs::OpenFile(std::addressof(file), GetNsoPath(i), fs::OpenMode_Read));
ON_SCOPE_EXIT { fs::CloseFile(file); };
R_TRY(LoadAutoLoadModule(process_info->process_handle, file, nso_headers + i, process_info->nso_address[i], process_info->nso_size[i]));
R_TRY(LoadAutoLoadModule(process_info->process_handle, file, nso_headers + i, process_info->nso_address[i], process_info->nso_size[i], prevent_code_reads));
}
}
@ -658,7 +658,7 @@ namespace ams::ldr {
ON_RESULT_FAILURE { svc::CloseHandle(process_handle); };
/* Load all auto load modules. */
R_RETURN(LoadAutoLoadModules(out, nso_headers, has_nso, argument));
R_RETURN(LoadAutoLoadModules(out, nso_headers, has_nso, argument, (meta->npdm->flags & ldr::Npdm::MetaFlag_PreventCodeReads) != 0));
}
}

View file

@ -85,6 +85,7 @@
"type": "debug_flags",
"value": {
"allow_debug": false,
"force_debug_prod": false,
"force_debug": true
}
}

View file

@ -22,6 +22,7 @@ namespace ams::pm {
/* Global bootmode. */
constinit BootMode g_boot_mode = BootMode::Normal;
constinit u32 g_unknown = 0;
}
@ -47,4 +48,14 @@ namespace ams::pm {
pm::bm::SetMaintenanceBoot();
}
void BootModeService::GetUnknown(sf::Out<u32> out) {
out.SetValue(g_unknown);
}
Result BootModeService::SetUnknown(u32 val) {
R_UNLESS(val <= 3, pm::ResultUnknown7());
g_unknown = val;
R_SUCCEED();
}
}

Some files were not shown because too many files have changed in this diff Show more